diff --git a/0001-riscv-kernel.patch b/0001-riscv-kernel.patch new file mode 100644 index 0000000000000000000000000000000000000000..b29267ce13f177db61f1ae20f91407719447bbee --- /dev/null +++ b/0001-riscv-kernel.patch @@ -0,0 +1,620870 @@ +From 8ad6f5c4cffb1f0debb2a9e48bec85df12f99ff9 Mon Sep 17 00:00:00 2001 +From: Mingzheng Xing +Date: Thu, 4 Sep 2025 01:37:12 +0800 +Subject: [PATCH] riscv kernel + +Signed-off-by: Mingzheng Xing +--- + Documentation/arch/index.rst | 2 +- + Documentation/{ => arch}/riscv/acpi.rst | 0 + .../{ => arch}/riscv/boot-image-header.rst | 0 + Documentation/{ => arch}/riscv/boot.rst | 0 + Documentation/{ => arch}/riscv/features.rst | 0 + Documentation/arch/riscv/hwprobe.rst | 271 + + Documentation/{ => arch}/riscv/index.rst | 0 + .../{ => arch}/riscv/patch-acceptance.rst | 0 + Documentation/{ => arch}/riscv/uabi.rst | 0 + Documentation/{ => arch}/riscv/vector.rst | 0 + Documentation/{ => arch}/riscv/vm-layout.rst | 0 + .../hwlock/xuantie,th1520-hwspinlock.yaml | 34 + + .../bindings/iio/adc/thead,th1520-adc.yaml | 52 + + .../bindings/iio/adc/xuantie,th1520-adc.yaml | 52 + + .../interrupt-controller/riscv,aplic.yaml | 172 + + .../interrupt-controller/riscv,imsics.yaml | 172 + + .../thead,c900-aclint-sswi.yaml | 58 + + .../bindings/iommu/riscv,iommu.yaml | 147 + + .../mailbox/xuantie-th1520-mailbox.txt | 57 + + .../bindings/mmc/snps,dwcmshc-sdhci.yaml | 1 + + .../devicetree/bindings/net/snps,dwmac.yaml | 2 + + .../bindings/net/xuantie,dwmac.yaml | 77 + + .../bindings/nvmem/xuantie,th1520-efuse.txt | 18 + + .../pinctrl/thead,th1520-pinctrl.yaml | 374 + + .../bindings/pwm/xuantie,th1520-pwm.yaml | 44 + + .../bindings/reset/xuantie,th1520-reset.yaml | 45 + + .../devicetree/bindings/riscv/extensions.yaml | 6 + + .../devicetree/bindings/rtc/xgene-rtc.txt | 16 + + .../soc/xuantie/xuantie,th1520-event.yaml | 37 + + .../bindings/sound/everest,es7210.txt | 12 + + .../bindings/sound/everest,es8156.yaml | 42 + + .../bindings/sound/xuantie,th1520-i2s.yaml | 91 + + .../bindings/sound/xuantie,th1520-spdif.yaml | 77 + + .../bindings/sound/xuantie,th1520-tdm.yaml | 86 + + .../bindings/spi/xuantie,th1520-qspi.yaml | 52 + + .../bindings/spi/xuantie,th1520-spi.yaml | 58 + + .../bindings/usb/xuantie,th1520-usb.yaml | 76 + + .../bindings/watchdog/xuantie,th1520-wdt.yaml | 19 + + .../membarrier-sync-core/arch-support.txt | 18 +- + .../maintainer/maintainer-entry-profile.rst | 2 +- + Documentation/process/index.rst | 2 +- + Documentation/riscv/hwprobe.rst | 98 - + Documentation/scheduler/index.rst | 1 + + Documentation/scheduler/membarrier.rst | 39 + + .../it_IT/riscv/patch-acceptance.rst | 2 +- + .../translations/zh_CN/arch/index.rst | 2 +- + .../{ => arch}/riscv/boot-image-header.rst | 4 +- + .../zh_CN/{ => arch}/riscv/index.rst | 4 +- + .../{ => arch}/riscv/patch-acceptance.rst | 4 +- + .../zh_CN/{ => arch}/riscv/vm-layout.rst | 4 +- + .../maintainer/maintainer-entry-profile.rst | 2 +- + MAINTAINERS | 29 +- + arch/arm64/Kconfig | 1 - + arch/arm64/include/asm/tlb.h | 5 +- + arch/arm64/kernel/pci.c | 191 - + arch/ia64/Kconfig | 1 - + arch/loongarch/Kconfig | 1 - + arch/loongarch/include/asm/pgalloc.h | 1 + + arch/loongarch/kernel/dma.c | 9 +- + arch/mips/include/asm/pgalloc.h | 1 + + arch/riscv/Kconfig | 135 +- + arch/riscv/Kconfig.socs | 49 + + arch/riscv/Kconfig.vendor | 19 + + arch/riscv/Makefile | 23 +- + arch/riscv/Makefile.isa | 15 + + arch/riscv/boot/dts/Makefile | 2 + + arch/riscv/boot/dts/sophgo/Makefile | 7 + + .../riscv/boot/dts/sophgo/mango-2sockets.dtsi | 699 + + .../boot/dts/sophgo/mango-clock-socket0.dtsi | 124 + + .../boot/dts/sophgo/mango-clock-socket1.dtsi | 124 + + .../boot/dts/sophgo/mango-cpus-socket0.dtsi | 2089 ++ + .../boot/dts/sophgo/mango-cpus-socket1.dtsi | 2090 ++ + .../boot/dts/sophgo/mango-milkv-pioneer.dts | 170 + + .../riscv/boot/dts/sophgo/mango-pcie-2rc.dtsi | 81 + + .../dts/sophgo/mango-pcie-3rc-capricorn.dtsi | 116 + + .../boot/dts/sophgo/mango-pcie-3rc-v2.dtsi | 115 + + .../riscv/boot/dts/sophgo/mango-pcie-3rc.dtsi | 112 + + .../boot/dts/sophgo/mango-pcie-4rc-v2.dtsi | 155 + + .../riscv/boot/dts/sophgo/mango-pcie-4rc.dtsi | 151 + + arch/riscv/boot/dts/sophgo/mango-pinctrl.dtsi | 434 + + .../dts/sophgo/mango-sophgo-capricorn.dts | 57 + + .../boot/dts/sophgo/mango-sophgo-pisces.dts | 58 + + .../boot/dts/sophgo/mango-sophgo-x4evb.dts | 144 + + .../boot/dts/sophgo/mango-sophgo-x8evb.dts | 172 + + .../boot/dts/sophgo/mango-top-intc2.dtsi | 62 + + .../boot/dts/sophgo/mango-yixin-s2110.dts | 63 + + arch/riscv/boot/dts/sophgo/mango.dtsi | 938 + + arch/riscv/boot/dts/spacemit/Makefile | 2 + + .../boot/dts/spacemit/k1-bananapi-f3.dts | 448 + + arch/riscv/boot/dts/spacemit/k1-x.dtsi | 1221 ++ + .../riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi | 1192 ++ + arch/riscv/boot/dts/thead/Makefile | 3 +- + .../boot/dts/thead/th1520-beaglev-ahead.dts | 222 +- + .../dts/thead/th1520-lichee-module-4a.dtsi | 440 +- + .../dts/thead/th1520-lichee-pi-4a-16g.dts | 18 + + .../boot/dts/thead/th1520-lichee-pi-4a.dts | 1369 +- + .../boot/dts/thead/th1520-lpi4a-dsi0.dts | 63 + + .../boot/dts/thead/th1520-lpi4a-hx8279.dts | 63 + + arch/riscv/boot/dts/thead/th1520.dtsi | 2048 +- + arch/riscv/configs/defconfig | 23 +- + arch/riscv/configs/k1_defconfig | 31 + + arch/riscv/configs/openeuler_defconfig | 1968 +- + arch/riscv/configs/sg2042_defconfig | 9 + + arch/riscv/configs/th1520_defconfig | 470 + + arch/riscv/errata/andes/errata.c | 13 +- + arch/riscv/errata/sifive/errata.c | 3 + + arch/riscv/errata/thead/errata.c | 3 + + arch/riscv/include/asm/acpi.h | 21 +- + arch/riscv/include/asm/arch_hweight.h | 78 + + arch/riscv/include/asm/archrandom.h | 72 + + arch/riscv/include/asm/atomic.h | 17 +- + arch/riscv/include/asm/barrier.h | 58 +- + arch/riscv/include/asm/bitops.h | 258 +- + arch/riscv/include/asm/cmpxchg.h | 496 +- + arch/riscv/include/asm/compat.h | 1 - + arch/riscv/include/asm/cpufeature-macros.h | 66 + + arch/riscv/include/asm/cpufeature.h | 69 + + arch/riscv/include/asm/csr.h | 13 + + arch/riscv/include/asm/dmi.h | 24 + + arch/riscv/include/asm/elf.h | 2 +- + arch/riscv/include/asm/errata_list.h | 45 +- + arch/riscv/include/asm/fence.h | 10 +- + arch/riscv/include/asm/hwcap.h | 141 +- + arch/riscv/include/asm/hwprobe.h | 26 +- + arch/riscv/include/asm/insn-def.h | 4 + + arch/riscv/include/asm/io.h | 12 +- + arch/riscv/include/asm/irq.h | 60 + + arch/riscv/include/asm/kvm_aia_aplic.h | 58 - + arch/riscv/include/asm/kvm_aia_imsic.h | 38 - + arch/riscv/include/asm/membarrier.h | 19 + + arch/riscv/include/asm/mmio.h | 5 +- + arch/riscv/include/asm/mmiowb.h | 2 +- + arch/riscv/include/asm/pgalloc.h | 53 +- + arch/riscv/include/asm/pgtable-64.h | 14 +- + arch/riscv/include/asm/pgtable.h | 21 +- + arch/riscv/include/asm/processor.h | 6 + + arch/riscv/include/asm/sbi.h | 9 + + arch/riscv/include/asm/sparsemem.h | 2 +- + arch/riscv/include/asm/suspend.h | 5 +- + arch/riscv/include/asm/switch_to.h | 17 +- + arch/riscv/include/asm/sync_core.h | 29 + + arch/riscv/include/asm/tlb.h | 18 + + arch/riscv/include/asm/vdso/processor.h | 8 +- + arch/riscv/include/asm/vector.h | 12 +- + arch/riscv/include/asm/vendor_extensions.h | 103 + + .../include/asm/vendor_extensions/andes.h | 19 + + arch/riscv/include/asm/vendorid_list.h | 2 +- + arch/riscv/include/uapi/asm/hwprobe.h | 52 +- + arch/riscv/kernel/Makefile | 4 + + arch/riscv/kernel/acpi.c | 135 +- + arch/riscv/kernel/acpi_numa.c | 130 + + arch/riscv/kernel/alternative.c | 2 +- + arch/riscv/kernel/cpufeature.c | 579 +- + arch/riscv/kernel/module.c | 83 +- + arch/riscv/kernel/process.c | 3 + + arch/riscv/kernel/sbi-ipi.c | 46 +- + arch/riscv/kernel/setup.c | 8 +- + arch/riscv/kernel/smp.c | 17 + + arch/riscv/kernel/smpboot.c | 4 +- + arch/riscv/kernel/suspend.c | 100 +- + arch/riscv/kernel/sys_hwprobe.c | 349 + + arch/riscv/kernel/sys_riscv.c | 267 - + arch/riscv/kernel/vdso/hwprobe.c | 86 +- + arch/riscv/kernel/vector.c | 8 +- + arch/riscv/kernel/vendor_extensions.c | 56 + + arch/riscv/kernel/vendor_extensions/Makefile | 3 + + arch/riscv/kernel/vendor_extensions/andes.c | 18 + + arch/riscv/kvm/aia.c | 37 +- + arch/riscv/kvm/aia_aplic.c | 2 +- + arch/riscv/kvm/aia_device.c | 2 +- + arch/riscv/kvm/aia_imsic.c | 2 +- + arch/riscv/kvm/main.c | 2 +- + arch/riscv/kvm/tlb.c | 2 +- + arch/riscv/kvm/vcpu_fp.c | 2 +- + arch/riscv/kvm/vcpu_onereg.c | 2 +- + arch/riscv/kvm/vcpu_vector.c | 2 +- + arch/riscv/lib/Makefile | 1 + + arch/riscv/lib/crc32.c | 294 + + arch/riscv/mm/cacheflush.c | 25 +- + arch/riscv/mm/dma-noncoherent.c | 9 +- + arch/riscv/mm/pgtable.c | 2 + + arch/riscv/mm/tlbflush.c | 31 + + arch/sw_64/Kconfig | 1 - + arch/x86/include/asm/hw_irq.h | 2 - + arch/x86/mm/pgtable.c | 3 + + drivers/acpi/Kconfig | 2 +- + drivers/acpi/Makefile | 2 +- + drivers/acpi/acpi_apd.c | 21 +- + drivers/acpi/acpi_lpss.c | 15 +- + drivers/acpi/arm64/dma.c | 17 +- + drivers/acpi/arm64/iort.c | 20 +- + drivers/acpi/bus.c | 4 + + drivers/acpi/internal.h | 8 + + drivers/acpi/mipi-disco-img.c | 292 + + drivers/acpi/numa/Kconfig | 5 +- + drivers/acpi/numa/srat.c | 34 +- + drivers/acpi/pci_link.c | 2 + + drivers/acpi/pci_mcfg.c | 17 + + drivers/acpi/riscv/Makefile | 4 +- + drivers/acpi/riscv/cppc.c | 157 + + drivers/acpi/riscv/cpuidle.c | 81 + + drivers/acpi/riscv/init.c | 13 + + drivers/acpi/riscv/init.h | 4 + + drivers/acpi/riscv/irq.c | 335 + + drivers/acpi/riscv/rhct.c | 93 +- + drivers/acpi/scan.c | 151 +- + drivers/acpi/thermal.c | 56 +- + drivers/acpi/utils.c | 138 +- + drivers/base/arch_numa.c | 2 +- + drivers/base/platform-msi.c | 149 +- + drivers/char/ipmi/ipmi_si_hardcode.c | 26 +- + drivers/char/ipmi/ipmi_si_intf.c | 3 +- + drivers/char/ipmi/ipmi_si_pci.c | 6 + + drivers/clk/Kconfig | 2 + + drivers/clk/Makefile | 3 + + drivers/clk/sophgo/Makefile | 3 + + drivers/clk/sophgo/clk-dummy.c | 594 + + drivers/clk/sophgo/clk-mango.c | 977 + + drivers/clk/sophgo/clk.c | 881 + + drivers/clk/sophgo/clk.h | 152 + + drivers/clk/spacemit/Kconfig | 9 + + drivers/clk/spacemit/Makefile | 11 + + drivers/clk/spacemit/ccu-spacemit-k1x.c | 2123 ++ + drivers/clk/spacemit/ccu-spacemit-k1x.h | 81 + + drivers/clk/spacemit/ccu_ddn.c | 161 + + drivers/clk/spacemit/ccu_ddn.h | 86 + + drivers/clk/spacemit/ccu_ddr.c | 272 + + drivers/clk/spacemit/ccu_ddr.h | 44 + + drivers/clk/spacemit/ccu_dpll.c | 124 + + drivers/clk/spacemit/ccu_dpll.h | 76 + + drivers/clk/spacemit/ccu_mix.c | 502 + + drivers/clk/spacemit/ccu_mix.h | 380 + + drivers/clk/spacemit/ccu_pll.c | 286 + + drivers/clk/spacemit/ccu_pll.h | 79 + + drivers/clk/xuantie/Kconfig | 12 + + drivers/clk/xuantie/Makefile | 7 + + drivers/clk/xuantie/clk-th1520-fm.c | 646 + + drivers/clk/xuantie/clk.c | 766 + + drivers/clk/xuantie/clk.h | 126 + + drivers/clk/xuantie/gate/Makefile | 3 + + drivers/clk/xuantie/gate/audiosys-gate.c | 124 + + drivers/clk/xuantie/gate/clk-gate.h | 35 + + drivers/clk/xuantie/gate/dspsys-gate.c | 123 + + drivers/clk/xuantie/gate/miscsys-gate.c | 108 + + drivers/clk/xuantie/gate/visys-gate.c | 144 + + drivers/clk/xuantie/gate/vosys-gate.c | 111 + + drivers/clk/xuantie/gate/vpsys-gate.c | 99 + + drivers/clk/xuantie/gate/xuantie-gate.c | 114 + + drivers/clocksource/timer-riscv.c | 6 +- + drivers/cpufreq/Kconfig | 38 + + drivers/cpufreq/Kconfig.arm | 26 - + drivers/cpufreq/Makefile | 1 + + drivers/cpufreq/th1520-cpufreq.c | 588 + + drivers/cpuidle/cpuidle-riscv-sbi.c | 49 +- + drivers/dma/Kconfig | 7 + + drivers/dma/Makefile | 1 + + .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 106 +- + drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 11 + + drivers/dma/mv_xor_v2.c | 8 +- + drivers/dma/qcom/hidma.c | 6 +- + drivers/dma/spacemit-k1-dma.c | 1515 ++ + drivers/firmware/Kconfig | 3 +- + drivers/firmware/Makefile | 1 + + drivers/firmware/efi/libstub/Makefile | 2 +- + drivers/firmware/efi/riscv-runtime.c | 13 + + drivers/firmware/qemu_fw_cfg.c | 2 +- + drivers/firmware/xuantie/Kconfig | 23 + + drivers/firmware/xuantie/Makefile | 4 + + drivers/firmware/xuantie/th1520_aon.c | 341 + + drivers/firmware/xuantie/th1520_aon_pd.c | 414 + + drivers/firmware/xuantie/th1520_proc_debug.c | 173 + + drivers/gpio/Kconfig | 9 + + drivers/gpio/Makefile | 1 + + drivers/gpio/gpio-dwapb.c | 15 +- + drivers/gpio/gpio-k1x.c | 407 + + drivers/gpio/gpio-pca953x.c | 12 +- + drivers/gpu/drm/Kconfig | 4 + + drivers/gpu/drm/Makefile | 2 + + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 + + drivers/gpu/drm/amd/amdkfd/Kconfig | 2 +- + drivers/gpu/drm/amd/display/Kconfig | 5 +- + .../gpu/drm/amd/display/amdgpu_dm/dc_fpu.c | 6 +- + drivers/gpu/drm/amd/display/dc/dml/Makefile | 6 + + drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 198 +- + drivers/gpu/drm/drm_fbdev_generic.c | 9 + + drivers/gpu/drm/drm_gem_vram_helper.c | 2 +- + drivers/gpu/drm/etnaviv/common.xml.h | 1 + + drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 7 +- + drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 16 +- + drivers/gpu/drm/etnaviv/etnaviv_hwdb.c | 31 + + drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 15 +- + drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 2 + + drivers/gpu/drm/img-rogue/Kconfig | 24 + + drivers/gpu/drm/img-rogue/Makefile | 18 + + drivers/gpu/drm/img-rogue/allocmem.c | 422 + + drivers/gpu/drm/img-rogue/allocmem.h | 224 + + drivers/gpu/drm/img-rogue/apollo/apollo.mk | 4 + + .../gpu/drm/img-rogue/apollo/apollo_regs.h | 108 + + drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h | 68 + + .../gpu/drm/img-rogue/apollo/drm_pdp_crtc.c | 1104 ++ + .../drm/img-rogue/apollo/drm_pdp_debugfs.c | 184 + + .../gpu/drm/img-rogue/apollo/drm_pdp_drv.c | 866 + + .../gpu/drm/img-rogue/apollo/drm_pdp_drv.h | 241 + + .../gpu/drm/img-rogue/apollo/drm_pdp_dvi.c | 307 + + drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c | 312 + + .../gpu/drm/img-rogue/apollo/drm_pdp_gem.c | 780 + + .../gpu/drm/img-rogue/apollo/drm_pdp_gem.h | 157 + + .../drm/img-rogue/apollo/drm_pdp_modeset.c | 472 + + .../gpu/drm/img-rogue/apollo/drm_pdp_plane.c | 323 + + .../gpu/drm/img-rogue/apollo/drm_pdp_tmds.c | 143 + + drivers/gpu/drm/img-rogue/apollo/odin_defs.h | 326 + + .../gpu/drm/img-rogue/apollo/odin_pdp_regs.h | 8540 ++++++++ + drivers/gpu/drm/img-rogue/apollo/odin_regs.h | 1026 + + drivers/gpu/drm/img-rogue/apollo/orion_defs.h | 183 + + drivers/gpu/drm/img-rogue/apollo/orion_regs.h | 439 + + drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c | 332 + + drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h | 88 + + drivers/gpu/drm/img-rogue/apollo/pdp_common.h | 107 + + drivers/gpu/drm/img-rogue/apollo/pdp_odin.c | 1230 ++ + drivers/gpu/drm/img-rogue/apollo/pdp_odin.h | 95 + + drivers/gpu/drm/img-rogue/apollo/pdp_plato.c | 339 + + drivers/gpu/drm/img-rogue/apollo/pdp_plato.h | 86 + + drivers/gpu/drm/img-rogue/apollo/pdp_regs.h | 75 + + drivers/gpu/drm/img-rogue/apollo/pfim_defs.h | 69 + + drivers/gpu/drm/img-rogue/apollo/pfim_regs.h | 265 + + drivers/gpu/drm/img-rogue/apollo/sysconfig.c | 1116 ++ + drivers/gpu/drm/img-rogue/apollo/sysinfo.h | 60 + + drivers/gpu/drm/img-rogue/apollo/tc_apollo.c | 1499 ++ + drivers/gpu/drm/img-rogue/apollo/tc_apollo.h | 77 + + drivers/gpu/drm/img-rogue/apollo/tc_clocks.h | 158 + + drivers/gpu/drm/img-rogue/apollo/tc_drv.c | 943 + + drivers/gpu/drm/img-rogue/apollo/tc_drv.h | 183 + + .../drm/img-rogue/apollo/tc_drv_internal.h | 204 + + drivers/gpu/drm/img-rogue/apollo/tc_odin.c | 2220 +++ + drivers/gpu/drm/img-rogue/apollo/tc_odin.h | 82 + + .../img-rogue/apollo/tc_odin_common_regs.h | 105 + + .../gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h | 1018 + + drivers/gpu/drm/img-rogue/apollo/tcf_pll.h | 311 + + .../drm/img-rogue/apollo/tcf_rgbpdp_regs.h | 559 + + drivers/gpu/drm/img-rogue/cache_km.c | 1630 ++ + drivers/gpu/drm/img-rogue/cache_km.h | 151 + + drivers/gpu/drm/img-rogue/cache_ops.h | 61 + + .../gpu/drm/img-rogue/client_cache_bridge.h | 80 + + .../img-rogue/client_cache_direct_bridge.c | 112 + + .../client_devicememhistory_bridge.h | 111 + + .../client_devicememhistory_direct_bridge.c | 194 + + .../drm/img-rogue/client_htbuffer_bridge.h | 71 + + .../img-rogue/client_htbuffer_direct_bridge.c | 85 + + drivers/gpu/drm/img-rogue/client_mm_bridge.h | 265 + + .../drm/img-rogue/client_mm_direct_bridge.c | 804 + + .../gpu/drm/img-rogue/client_pvrtl_bridge.h | 93 + + .../img-rogue/client_pvrtl_direct_bridge.c | 175 + + drivers/gpu/drm/img-rogue/client_ri_bridge.h | 89 + + .../drm/img-rogue/client_ri_direct_bridge.c | 182 + + .../gpu/drm/img-rogue/client_sync_bridge.h | 102 + + .../drm/img-rogue/client_sync_direct_bridge.c | 262 + + .../img-rogue/client_synctracking_bridge.h | 68 + + .../client_synctracking_direct_bridge.c | 92 + + .../gpu/drm/img-rogue/common_cache_bridge.h | 126 + + drivers/gpu/drm/img-rogue/common_cmm_bridge.h | 114 + + .../common_devicememhistory_bridge.h | 185 + + drivers/gpu/drm/img-rogue/common_di_bridge.h | 153 + + .../gpu/drm/img-rogue/common_dmabuf_bridge.h | 150 + + .../drm/img-rogue/common_htbuffer_bridge.h | 104 + + drivers/gpu/drm/img-rogue/common_mm_bridge.h | 879 + + .../gpu/drm/img-rogue/common_pvrtl_bridge.h | 214 + + .../img-rogue/common_rgxbreakpoint_bridge.h | 149 + + .../gpu/drm/img-rogue/common_rgxcmp_bridge.h | 229 + + .../drm/img-rogue/common_rgxfwdbg_bridge.h | 200 + + .../drm/img-rogue/common_rgxhwperf_bridge.h | 172 + + .../drm/img-rogue/common_rgxkicksync_bridge.h | 143 + + .../img-rogue/common_rgxregconfig_bridge.h | 146 + + .../gpu/drm/img-rogue/common_rgxta3d_bridge.h | 404 + + .../img-rogue/common_rgxtimerquery_bridge.h | 112 + + .../gpu/drm/img-rogue/common_rgxtq2_bridge.h | 228 + + .../gpu/drm/img-rogue/common_rgxtq_bridge.h | 176 + + drivers/gpu/drm/img-rogue/common_ri_bridge.h | 225 + + .../gpu/drm/img-rogue/common_srvcore_bridge.h | 369 + + .../gpu/drm/img-rogue/common_sync_bridge.h | 254 + + .../img-rogue/common_synctracking_bridge.h | 97 + + drivers/gpu/drm/img-rogue/config_kernel.h | 163 + + drivers/gpu/drm/img-rogue/config_kernel.mk | 53 + + .../img-rogue/configs/rgxconfig_km_1.V.4.5.h | 80 + + .../configs/rgxconfig_km_36.V.104.182.h | 105 + + drivers/gpu/drm/img-rogue/connection_server.c | 491 + + drivers/gpu/drm/img-rogue/connection_server.h | 144 + + .../drm/img-rogue/cores/rgxcore_km_1.82.4.5.h | 69 + + .../cores/rgxcore_km_36.52.104.182.h | 74 + + drivers/gpu/drm/img-rogue/debug_common.c | 1646 ++ + drivers/gpu/drm/img-rogue/debug_common.h | 55 + + drivers/gpu/drm/img-rogue/device.h | 540 + + drivers/gpu/drm/img-rogue/device_connection.h | 123 + + drivers/gpu/drm/img-rogue/devicemem.c | 2962 +++ + drivers/gpu/drm/img-rogue/devicemem.h | 730 + + drivers/gpu/drm/img-rogue/devicemem_heapcfg.c | 184 + + drivers/gpu/drm/img-rogue/devicemem_heapcfg.h | 184 + + .../drm/img-rogue/devicemem_history_server.c | 1962 ++ + .../drm/img-rogue/devicemem_history_server.h | 157 + + drivers/gpu/drm/img-rogue/devicemem_pdump.h | 363 + + drivers/gpu/drm/img-rogue/devicemem_server.c | 1813 ++ + drivers/gpu/drm/img-rogue/devicemem_server.h | 633 + + .../drm/img-rogue/devicemem_server_utils.h | 198 + + .../gpu/drm/img-rogue/devicemem_typedefs.h | 142 + + drivers/gpu/drm/img-rogue/devicemem_utils.c | 1259 ++ + drivers/gpu/drm/img-rogue/devicemem_utils.h | 605 + + drivers/gpu/drm/img-rogue/di_common.h | 236 + + drivers/gpu/drm/img-rogue/di_impl_brg.c | 889 + + drivers/gpu/drm/img-rogue/di_impl_brg.h | 92 + + .../gpu/drm/img-rogue/di_impl_brg_intern.h | 61 + + drivers/gpu/drm/img-rogue/di_server.c | 780 + + drivers/gpu/drm/img-rogue/di_server.h | 219 + + drivers/gpu/drm/img-rogue/dllist.h | 408 + + drivers/gpu/drm/img-rogue/dma_km.h | 83 + + drivers/gpu/drm/img-rogue/dma_support.c | 523 + + drivers/gpu/drm/img-rogue/dma_support.h | 117 + + drivers/gpu/drm/img-rogue/drm_netlink_gem.c | 143 + + drivers/gpu/drm/img-rogue/drm_netlink_gem.h | 61 + + drivers/gpu/drm/img-rogue/drm_nulldisp_drv.c | 2731 +++ + drivers/gpu/drm/img-rogue/drm_nulldisp_drv.h | 97 + + drivers/gpu/drm/img-rogue/drm_nulldisp_gem.c | 678 + + drivers/gpu/drm/img-rogue/drm_nulldisp_gem.h | 160 + + .../gpu/drm/img-rogue/drm_nulldisp_netlink.c | 710 + + .../gpu/drm/img-rogue/drm_nulldisp_netlink.h | 77 + + drivers/gpu/drm/img-rogue/env_connection.h | 92 + + drivers/gpu/drm/img-rogue/event.c | 514 + + drivers/gpu/drm/img-rogue/event.h | 54 + + drivers/gpu/drm/img-rogue/fwload.c | 255 + + drivers/gpu/drm/img-rogue/fwload.h | 158 + + drivers/gpu/drm/img-rogue/fwtrace_string.h | 52 + + drivers/gpu/drm/img-rogue/gpu_trace_point.h | 39 + + drivers/gpu/drm/img-rogue/handle.c | 2484 +++ + drivers/gpu/drm/img-rogue/handle.h | 206 + + drivers/gpu/drm/img-rogue/handle_idr.c | 440 + + drivers/gpu/drm/img-rogue/handle_impl.h | 89 + + drivers/gpu/drm/img-rogue/handle_types.h | 88 + + drivers/gpu/drm/img-rogue/hash.c | 734 + + drivers/gpu/drm/img-rogue/hash.h | 247 + + drivers/gpu/drm/img-rogue/htb_debug.c | 1190 ++ + drivers/gpu/drm/img-rogue/htb_debug.h | 64 + + drivers/gpu/drm/img-rogue/htbserver.c | 857 + + drivers/gpu/drm/img-rogue/htbserver.h | 228 + + drivers/gpu/drm/img-rogue/htbuffer.c | 197 + + drivers/gpu/drm/img-rogue/htbuffer.h | 135 + + drivers/gpu/drm/img-rogue/htbuffer_init.h | 114 + + drivers/gpu/drm/img-rogue/htbuffer_sf.h | 241 + + drivers/gpu/drm/img-rogue/htbuffer_types.h | 118 + + drivers/gpu/drm/img-rogue/img_3dtypes.h | 248 + + drivers/gpu/drm/img-rogue/img_defs.h | 567 + + drivers/gpu/drm/img-rogue/img_elf.h | 111 + + drivers/gpu/drm/img-rogue/img_types.h | 324 + + drivers/gpu/drm/img-rogue/img_types_check.h | 58 + + drivers/gpu/drm/img-rogue/include/cache_ops.h | 61 + + .../img-rogue/include/devicemem_typedefs.h | 142 + + drivers/gpu/drm/img-rogue/include/dllist.h | 408 + + .../gpu/drm/img-rogue/include/drm/netlink.h | 147 + + .../drm/img-rogue/include/drm/nulldisp_drm.h | 105 + + .../gpu/drm/img-rogue/include/drm/pdp_drm.h | 105 + + .../gpu/drm/img-rogue/include/drm/pvr_drm.h | 146 + + .../gpu/drm/img-rogue/include/img_3dtypes.h | 248 + + drivers/gpu/drm/img-rogue/include/img_defs.h | 567 + + .../include/img_drm_fourcc_internal.h | 94 + + drivers/gpu/drm/img-rogue/include/img_elf.h | 111 + + drivers/gpu/drm/img-rogue/include/img_types.h | 324 + + .../gpu/drm/img-rogue/include/kernel_types.h | 137 + + .../gpu/drm/img-rogue/include/linux_sw_sync.h | 52 + + .../gpu/drm/img-rogue/include/lock_types.h | 92 + + drivers/gpu/drm/img-rogue/include/log2.h | 417 + + .../drm/img-rogue/include/multicore_defs.h | 53 + + .../gpu/drm/img-rogue/include/osfunc_common.h | 300 + + drivers/gpu/drm/img-rogue/include/pdumpdefs.h | 249 + + drivers/gpu/drm/img-rogue/include/pdumpdesc.h | 226 + + .../include/public/powervr/buffer_attribs.h | 193 + + .../include/public/powervr/img_drm_fourcc.h | 140 + + .../include/public/powervr/mem_types.h | 64 + + .../include/public/powervr/pvrsrv_sync_ext.h | 72 + + .../include/pvr_buffer_sync_shared.h | 57 + + drivers/gpu/drm/img-rogue/include/pvr_debug.h | 898 + + .../img-rogue/include/pvr_fd_sync_kernel.h | 64 + + .../drm/img-rogue/include/pvr_intrinsics.h | 70 + + drivers/gpu/drm/img-rogue/include/pvrmodule.h | 48 + + .../img-rogue/include/pvrsrv_device_types.h | 55 + + .../gpu/drm/img-rogue/include/pvrsrv_devvar.h | 291 + + .../gpu/drm/img-rogue/include/pvrsrv_error.h | 61 + + .../gpu/drm/img-rogue/include/pvrsrv_errors.h | 410 + + .../include/pvrsrv_memalloc_physheap.h | 170 + + .../img-rogue/include/pvrsrv_memallocflags.h | 969 + + .../include/pvrsrv_memallocflags_internal.h | 78 + + .../drm/img-rogue/include/pvrsrv_sync_km.h | 65 + + .../drm/img-rogue/include/pvrsrv_tlcommon.h | 260 + + .../drm/img-rogue/include/pvrsrv_tlstreams.h | 61 + + .../gpu/drm/img-rogue/include/pvrversion.h | 68 + + .../gpu/drm/img-rogue/include/rgx_common.h | 235 + + .../img-rogue/include/rgx_common_asserts.h | 73 + + .../drm/img-rogue/include/rgx_compat_bvnc.h | 140 + + .../include/rgx_fwif_resetframework.h | 70 + + .../gpu/drm/img-rogue/include/rgx_fwif_sf.h | 931 + + .../drm/img-rogue/include/rgx_heap_firmware.h | 120 + + .../drm/img-rogue/include/rgx_hwperf_common.h | 482 + + drivers/gpu/drm/img-rogue/include/rgx_meta.h | 385 + + drivers/gpu/drm/img-rogue/include/rgx_mips.h | 374 + + drivers/gpu/drm/img-rogue/include/rgx_riscv.h | 250 + + .../drm/img-rogue/include/rgxfw_log_helper.h | 79 + + .../gpu/drm/img-rogue/include/ri_typedefs.h | 52 + + .../include/rogue/rgx_fwif_alignchecks.h | 192 + + .../img-rogue/include/rogue/rgx_fwif_hwperf.h | 252 + + .../drm/img-rogue/include/rogue/rgx_fwif_km.h | 2341 +++ + .../img-rogue/include/rogue/rgx_fwif_shared.h | 335 + + .../drm/img-rogue/include/rogue/rgx_heaps.h | 68 + + .../drm/img-rogue/include/rogue/rgx_hwperf.h | 1607 ++ + .../drm/img-rogue/include/rogue/rgx_options.h | 304 + + .../img-rogue/include/rogue/rgxheapconfig.h | 290 + + .../include/rogue/rgxheapconfig_65273.h | 124 + + .../include/rogue/system/rgx_tc/tc_clocks.h | 158 + + .../gpu/drm/img-rogue/include/services_km.h | 180 + + .../gpu/drm/img-rogue/include/servicesext.h | 156 + + .../include/sync_checkpoint_external.h | 83 + + .../img-rogue/include/sync_prim_internal.h | 84 + + .../include/system/rgx_tc/apollo_regs.h | 108 + + .../include/system/rgx_tc/bonnie_tcf.h | 68 + + .../include/system/rgx_tc/odin_defs.h | 326 + + .../include/system/rgx_tc/odin_pdp_regs.h | 8540 ++++++++ + .../include/system/rgx_tc/odin_regs.h | 1026 + + .../include/system/rgx_tc/orion_defs.h | 183 + + .../include/system/rgx_tc/orion_regs.h | 439 + + .../include/system/rgx_tc/pdp_regs.h | 75 + + .../include/system/rgx_tc/pfim_defs.h | 69 + + .../include/system/rgx_tc/pfim_regs.h | 265 + + .../include/system/rgx_tc/tcf_clk_ctrl.h | 1018 + + .../img-rogue/include/system/rgx_tc/tcf_pll.h | 311 + + .../include/system/rgx_tc/tcf_rgbpdp_regs.h | 559 + + .../img-rogue/include/virt_validation_defs.h | 63 + + .../include/volcanic/rgx_fwif_alignchecks.h | 191 + + .../include/volcanic/rgx_fwif_hwperf.h | 125 + + .../img-rogue/include/volcanic/rgx_fwif_km.h | 2331 +++ + .../include/volcanic/rgx_fwif_shared.h | 376 + + .../img-rogue/include/volcanic/rgx_heaps.h | 65 + + .../img-rogue/include/volcanic/rgx_hwperf.h | 1424 ++ + .../include/volcanic/rgx_hwperf_table.h | 511 + + .../img-rogue/include/volcanic/rgx_options.h | 294 + + .../include/volcanic/rgxheapconfig.h | 278 + + .../volcanic/system/rgx_tc/tc_clocks.h | 101 + + drivers/gpu/drm/img-rogue/info_page.h | 99 + + drivers/gpu/drm/img-rogue/info_page_client.h | 89 + + drivers/gpu/drm/img-rogue/info_page_defs.h | 91 + + drivers/gpu/drm/img-rogue/info_page_km.c | 138 + + drivers/gpu/drm/img-rogue/interrupt_support.c | 151 + + drivers/gpu/drm/img-rogue/interrupt_support.h | 103 + + .../gpu/drm/img-rogue/kernel_compatibility.h | 521 + + .../img-rogue/kernel_config_compatibility.h | 54 + + drivers/gpu/drm/img-rogue/kernel_nospec.h | 71 + + drivers/gpu/drm/img-rogue/kernel_types.h | 137 + + .../gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h | 377 + + .../gpu/drm/img-rogue/km/rgx_bvnc_table_km.h | 462 + + drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h | 8077 ++++++++ + drivers/gpu/drm/img-rogue/km/rgxdefs_km.h | 338 + + drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h | 286 + + drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h | 216 + + drivers/gpu/drm/img-rogue/km_apphint.c | 1751 ++ + drivers/gpu/drm/img-rogue/km_apphint.h | 99 + + drivers/gpu/drm/img-rogue/km_apphint_defs.h | 160 + + .../drm/img-rogue/km_apphint_defs_common.h | 280 + + drivers/gpu/drm/img-rogue/linkage.h | 52 + + drivers/gpu/drm/img-rogue/linux_sw_sync.h | 52 + + drivers/gpu/drm/img-rogue/lists.c | 60 + + drivers/gpu/drm/img-rogue/lists.h | 367 + + drivers/gpu/drm/img-rogue/lock.h | 431 + + drivers/gpu/drm/img-rogue/lock_types.h | 92 + + drivers/gpu/drm/img-rogue/log2.h | 417 + + drivers/gpu/drm/img-rogue/mem_utils.c | 449 + + drivers/gpu/drm/img-rogue/mmu_common.c | 4464 +++++ + drivers/gpu/drm/img-rogue/mmu_common.h | 792 + + drivers/gpu/drm/img-rogue/module_common.c | 730 + + drivers/gpu/drm/img-rogue/module_common.h | 101 + + drivers/gpu/drm/img-rogue/multicore_defs.h | 53 + + drivers/gpu/drm/img-rogue/opaque_types.h | 56 + + drivers/gpu/drm/img-rogue/os_cpu_cache.h | 69 + + drivers/gpu/drm/img-rogue/os_srvinit_param.h | 328 + + .../gpu/drm/img-rogue/osconnection_server.c | 157 + + .../gpu/drm/img-rogue/osconnection_server.h | 133 + + drivers/gpu/drm/img-rogue/osdi_impl.h | 205 + + drivers/gpu/drm/img-rogue/osfunc.c | 2648 +++ + drivers/gpu/drm/img-rogue/osfunc.h | 1690 ++ + drivers/gpu/drm/img-rogue/osfunc_arm.c | 151 + + drivers/gpu/drm/img-rogue/osfunc_arm64.c | 290 + + drivers/gpu/drm/img-rogue/osfunc_common.h | 300 + + drivers/gpu/drm/img-rogue/osfunc_riscv.c | 428 + + drivers/gpu/drm/img-rogue/osfunc_x86.c | 134 + + drivers/gpu/drm/img-rogue/oskm_apphint.h | 186 + + drivers/gpu/drm/img-rogue/osmmap.h | 115 + + drivers/gpu/drm/img-rogue/osmmap_stub.c | 146 + + drivers/gpu/drm/img-rogue/ospvr_gputrace.h | 167 + + drivers/gpu/drm/img-rogue/pci_support.c | 726 + + drivers/gpu/drm/img-rogue/pci_support.h | 99 + + drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk | 13 + + drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h | 764 + + drivers/gpu/drm/img-rogue/pdp2_regs.h | 8565 ++++++++ + drivers/gpu/drm/img-rogue/pdp_drm.h | 105 + + drivers/gpu/drm/img-rogue/pdump.h | 238 + + drivers/gpu/drm/img-rogue/pdump_km.h | 1136 ++ + drivers/gpu/drm/img-rogue/pdump_mmu.h | 180 + + drivers/gpu/drm/img-rogue/pdump_physmem.h | 257 + + .../gpu/drm/img-rogue/pdump_symbolicaddr.h | 55 + + drivers/gpu/drm/img-rogue/pdumpdefs.h | 249 + + drivers/gpu/drm/img-rogue/pdumpdesc.h | 226 + + drivers/gpu/drm/img-rogue/physheap.c | 1184 ++ + drivers/gpu/drm/img-rogue/physheap.h | 497 + + drivers/gpu/drm/img-rogue/physheap_config.h | 119 + + drivers/gpu/drm/img-rogue/physmem.c | 839 + + drivers/gpu/drm/img-rogue/physmem.h | 321 + + drivers/gpu/drm/img-rogue/physmem_dmabuf.c | 1302 ++ + drivers/gpu/drm/img-rogue/physmem_dmabuf.h | 124 + + drivers/gpu/drm/img-rogue/physmem_hostmem.c | 206 + + drivers/gpu/drm/img-rogue/physmem_hostmem.h | 65 + + drivers/gpu/drm/img-rogue/physmem_lma.c | 2003 ++ + drivers/gpu/drm/img-rogue/physmem_lma.h | 93 + + drivers/gpu/drm/img-rogue/physmem_osmem.h | 142 + + .../gpu/drm/img-rogue/physmem_osmem_linux.c | 3952 ++++ + .../gpu/drm/img-rogue/physmem_osmem_linux.h | 49 + + drivers/gpu/drm/img-rogue/physmem_test.c | 710 + + drivers/gpu/drm/img-rogue/physmem_test.h | 51 + + drivers/gpu/drm/img-rogue/plato_drv.h | 416 + + drivers/gpu/drm/img-rogue/pmr.c | 3697 ++++ + drivers/gpu/drm/img-rogue/pmr.h | 1023 + + drivers/gpu/drm/img-rogue/pmr_impl.h | 539 + + drivers/gpu/drm/img-rogue/pmr_os.c | 619 + + drivers/gpu/drm/img-rogue/pmr_os.h | 62 + + drivers/gpu/drm/img-rogue/power.c | 929 + + drivers/gpu/drm/img-rogue/power.h | 430 + + .../drm/img-rogue/powervr/buffer_attribs.h | 193 + + .../drm/img-rogue/powervr/img_drm_fourcc.h | 140 + + drivers/gpu/drm/img-rogue/powervr/mem_types.h | 64 + + .../drm/img-rogue/powervr/pvrsrv_sync_ext.h | 72 + + drivers/gpu/drm/img-rogue/private_data.h | 59 + + drivers/gpu/drm/img-rogue/proc_stats.h | 135 + + drivers/gpu/drm/img-rogue/process_stats.c | 3358 ++++ + drivers/gpu/drm/img-rogue/process_stats.h | 223 + + drivers/gpu/drm/img-rogue/pvr_bridge.h | 457 + + drivers/gpu/drm/img-rogue/pvr_bridge_k.c | 582 + + drivers/gpu/drm/img-rogue/pvr_bridge_k.h | 103 + + drivers/gpu/drm/img-rogue/pvr_buffer_sync.c | 646 + + drivers/gpu/drm/img-rogue/pvr_buffer_sync.h | 125 + + .../drm/img-rogue/pvr_buffer_sync_shared.h | 57 + + .../gpu/drm/img-rogue/pvr_counting_timeline.c | 308 + + .../gpu/drm/img-rogue/pvr_counting_timeline.h | 68 + + drivers/gpu/drm/img-rogue/pvr_debug.c | 481 + + drivers/gpu/drm/img-rogue/pvr_debug.h | 898 + + drivers/gpu/drm/img-rogue/pvr_debugfs.c | 623 + + drivers/gpu/drm/img-rogue/pvr_debugfs.h | 50 + + drivers/gpu/drm/img-rogue/pvr_dicommon.h | 59 + + drivers/gpu/drm/img-rogue/pvr_dma_resv.h | 80 + + drivers/gpu/drm/img-rogue/pvr_drm.c | 336 + + drivers/gpu/drm/img-rogue/pvr_drm.h | 146 + + drivers/gpu/drm/img-rogue/pvr_drv.h | 106 + + .../gpu/drm/img-rogue/pvr_fd_sync_kernel.h | 64 + + drivers/gpu/drm/img-rogue/pvr_fence.c | 1149 ++ + drivers/gpu/drm/img-rogue/pvr_fence.h | 248 + + drivers/gpu/drm/img-rogue/pvr_fence_trace.h | 225 + + drivers/gpu/drm/img-rogue/pvr_gputrace.c | 1281 ++ + drivers/gpu/drm/img-rogue/pvr_intrinsics.h | 70 + + drivers/gpu/drm/img-rogue/pvr_ion_stats.h | 80 + + drivers/gpu/drm/img-rogue/pvr_linux_fence.h | 103 + + drivers/gpu/drm/img-rogue/pvr_notifier.c | 647 + + drivers/gpu/drm/img-rogue/pvr_notifier.h | 326 + + drivers/gpu/drm/img-rogue/pvr_platform_drv.c | 326 + + drivers/gpu/drm/img-rogue/pvr_procfs.h | 50 + + drivers/gpu/drm/img-rogue/pvr_ricommon.h | 68 + + drivers/gpu/drm/img-rogue/pvr_sw_fence.c | 199 + + drivers/gpu/drm/img-rogue/pvr_sw_fence.h | 60 + + drivers/gpu/drm/img-rogue/pvr_sync.h | 120 + + drivers/gpu/drm/img-rogue/pvr_sync_api.h | 61 + + drivers/gpu/drm/img-rogue/pvr_sync_file.c | 1094 ++ + .../gpu/drm/img-rogue/pvr_sync_ioctl_common.c | 277 + + .../gpu/drm/img-rogue/pvr_sync_ioctl_common.h | 71 + + .../gpu/drm/img-rogue/pvr_sync_ioctl_drm.c | 168 + + .../gpu/drm/img-rogue/pvr_sync_ioctl_drm.h | 62 + + drivers/gpu/drm/img-rogue/pvr_uaccess.h | 99 + + drivers/gpu/drm/img-rogue/pvr_vmap.h | 83 + + drivers/gpu/drm/img-rogue/pvrmodule.h | 48 + + drivers/gpu/drm/img-rogue/pvrsrv.c | 3028 +++ + drivers/gpu/drm/img-rogue/pvrsrv.h | 542 + + drivers/gpu/drm/img-rogue/pvrsrv_apphint.h | 71 + + .../gpu/drm/img-rogue/pvrsrv_bridge_init.c | 385 + + .../gpu/drm/img-rogue/pvrsrv_bridge_init.h | 53 + + drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h | 177 + + drivers/gpu/drm/img-rogue/pvrsrv_device.h | 401 + + .../gpu/drm/img-rogue/pvrsrv_device_types.h | 55 + + drivers/gpu/drm/img-rogue/pvrsrv_devvar.h | 291 + + drivers/gpu/drm/img-rogue/pvrsrv_error.c | 61 + + drivers/gpu/drm/img-rogue/pvrsrv_error.h | 61 + + drivers/gpu/drm/img-rogue/pvrsrv_errors.h | 410 + + .../gpu/drm/img-rogue/pvrsrv_firmware_boot.h | 87 + + .../drm/img-rogue/pvrsrv_memalloc_physheap.h | 170 + + .../gpu/drm/img-rogue/pvrsrv_memallocflags.h | 969 + + .../img-rogue/pvrsrv_memallocflags_internal.h | 78 + + drivers/gpu/drm/img-rogue/pvrsrv_pool.c | 260 + + drivers/gpu/drm/img-rogue/pvrsrv_pool.h | 135 + + drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h | 65 + + .../gpu/drm/img-rogue/pvrsrv_sync_server.h | 278 + + drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h | 260 + + drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h | 61 + + drivers/gpu/drm/img-rogue/pvrsrvkm.mk | 148 + + drivers/gpu/drm/img-rogue/pvrversion.h | 68 + + drivers/gpu/drm/img-rogue/ra.c | 2166 +++ + drivers/gpu/drm/img-rogue/ra.h | 386 + + drivers/gpu/drm/img-rogue/rgx_bridge.h | 243 + + drivers/gpu/drm/img-rogue/rgx_bridge_init.c | 111 + + drivers/gpu/drm/img-rogue/rgx_bridge_init.h | 55 + + drivers/gpu/drm/img-rogue/rgx_common.h | 235 + + .../gpu/drm/img-rogue/rgx_common_asserts.h | 73 + + drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h | 140 + + drivers/gpu/drm/img-rogue/rgx_fw_info.h | 135 + + .../gpu/drm/img-rogue/rgx_fwif_alignchecks.h | 192 + + drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h | 252 + + drivers/gpu/drm/img-rogue/rgx_fwif_km.h | 2341 +++ + .../drm/img-rogue/rgx_fwif_resetframework.h | 70 + + drivers/gpu/drm/img-rogue/rgx_fwif_sf.h | 931 + + drivers/gpu/drm/img-rogue/rgx_fwif_shared.h | 335 + + drivers/gpu/drm/img-rogue/rgx_heap_firmware.h | 120 + + drivers/gpu/drm/img-rogue/rgx_heaps.h | 68 + + drivers/gpu/drm/img-rogue/rgx_hwperf.h | 1607 ++ + drivers/gpu/drm/img-rogue/rgx_hwperf_common.h | 482 + + drivers/gpu/drm/img-rogue/rgx_hwperf_table.c | 635 + + drivers/gpu/drm/img-rogue/rgx_hwperf_table.h | 116 + + drivers/gpu/drm/img-rogue/rgx_memallocflags.h | 58 + + drivers/gpu/drm/img-rogue/rgx_meta.h | 385 + + drivers/gpu/drm/img-rogue/rgx_mips.h | 374 + + drivers/gpu/drm/img-rogue/rgx_options.h | 304 + + drivers/gpu/drm/img-rogue/rgx_pdump_panics.h | 64 + + drivers/gpu/drm/img-rogue/rgx_riscv.h | 250 + + drivers/gpu/drm/img-rogue/rgx_tq_shared.h | 63 + + drivers/gpu/drm/img-rogue/rgxapi_km.h | 336 + + drivers/gpu/drm/img-rogue/rgxbreakpoint.c | 290 + + drivers/gpu/drm/img-rogue/rgxbreakpoint.h | 141 + + drivers/gpu/drm/img-rogue/rgxbvnc.c | 852 + + drivers/gpu/drm/img-rogue/rgxbvnc.h | 90 + + drivers/gpu/drm/img-rogue/rgxccb.c | 2803 +++ + drivers/gpu/drm/img-rogue/rgxccb.h | 356 + + drivers/gpu/drm/img-rogue/rgxcompute.c | 1324 ++ + drivers/gpu/drm/img-rogue/rgxcompute.h | 173 + + drivers/gpu/drm/img-rogue/rgxdebug.c | 5792 ++++++ + drivers/gpu/drm/img-rogue/rgxdebug.h | 229 + + drivers/gpu/drm/img-rogue/rgxdevice.h | 828 + + drivers/gpu/drm/img-rogue/rgxfw_log_helper.h | 79 + + drivers/gpu/drm/img-rogue/rgxfwdbg.c | 282 + + drivers/gpu/drm/img-rogue/rgxfwdbg.h | 113 + + drivers/gpu/drm/img-rogue/rgxfwimageutils.c | 1082 ++ + drivers/gpu/drm/img-rogue/rgxfwimageutils.h | 223 + + .../gpu/drm/img-rogue/rgxfwtrace_strings.c | 56 + + drivers/gpu/drm/img-rogue/rgxfwutils.c | 7825 ++++++++ + drivers/gpu/drm/img-rogue/rgxfwutils.h | 1362 ++ + drivers/gpu/drm/img-rogue/rgxheapconfig.h | 290 + + .../gpu/drm/img-rogue/rgxheapconfig_65273.h | 124 + + drivers/gpu/drm/img-rogue/rgxhwperf.c | 694 + + drivers/gpu/drm/img-rogue/rgxhwperf.h | 74 + + drivers/gpu/drm/img-rogue/rgxhwperf_common.c | 3715 ++++ + drivers/gpu/drm/img-rogue/rgxhwperf_common.h | 512 + + drivers/gpu/drm/img-rogue/rgxinit.c | 5158 +++++ + drivers/gpu/drm/img-rogue/rgxinit.h | 281 + + drivers/gpu/drm/img-rogue/rgxkicksync.c | 794 + + drivers/gpu/drm/img-rogue/rgxkicksync.h | 128 + + drivers/gpu/drm/img-rogue/rgxlayer.h | 812 + + drivers/gpu/drm/img-rogue/rgxlayer_impl.c | 1318 ++ + drivers/gpu/drm/img-rogue/rgxlayer_impl.h | 67 + + drivers/gpu/drm/img-rogue/rgxmem.c | 947 + + drivers/gpu/drm/img-rogue/rgxmem.h | 147 + + drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c | 1045 + + drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h | 97 + + drivers/gpu/drm/img-rogue/rgxmmuinit.c | 1079 ++ + drivers/gpu/drm/img-rogue/rgxmmuinit.h | 60 + + drivers/gpu/drm/img-rogue/rgxmulticore.c | 224 + + drivers/gpu/drm/img-rogue/rgxmulticore.h | 54 + + drivers/gpu/drm/img-rogue/rgxpower.c | 1628 ++ + drivers/gpu/drm/img-rogue/rgxpower.h | 286 + + drivers/gpu/drm/img-rogue/rgxregconfig.c | 319 + + drivers/gpu/drm/img-rogue/rgxregconfig.h | 130 + + drivers/gpu/drm/img-rogue/rgxshader.c | 302 + + drivers/gpu/drm/img-rogue/rgxshader.h | 83 + + drivers/gpu/drm/img-rogue/rgxsrvinit.c | 1657 ++ + drivers/gpu/drm/img-rogue/rgxstartstop.c | 1331 ++ + drivers/gpu/drm/img-rogue/rgxstartstop.h | 84 + + drivers/gpu/drm/img-rogue/rgxsyncutils.c | 184 + + drivers/gpu/drm/img-rogue/rgxsyncutils.h | 76 + + drivers/gpu/drm/img-rogue/rgxta3d.c | 5426 ++++++ + drivers/gpu/drm/img-rogue/rgxta3d.h | 502 + + drivers/gpu/drm/img-rogue/rgxtdmtransfer.c | 1329 ++ + drivers/gpu/drm/img-rogue/rgxtdmtransfer.h | 132 + + drivers/gpu/drm/img-rogue/rgxtimecorr.c | 648 + + drivers/gpu/drm/img-rogue/rgxtimecorr.h | 272 + + drivers/gpu/drm/img-rogue/rgxtimerquery.c | 244 + + drivers/gpu/drm/img-rogue/rgxtimerquery.h | 123 + + drivers/gpu/drm/img-rogue/rgxtransfer.c | 1805 ++ + drivers/gpu/drm/img-rogue/rgxtransfer.h | 153 + + .../gpu/drm/img-rogue/rgxtransfer_shader.h | 61 + + drivers/gpu/drm/img-rogue/rgxutils.c | 221 + + drivers/gpu/drm/img-rogue/rgxutils.h | 185 + + drivers/gpu/drm/img-rogue/ri_server.c | 2123 ++ + drivers/gpu/drm/img-rogue/ri_server.h | 106 + + drivers/gpu/drm/img-rogue/ri_typedefs.h | 52 + + .../gpu/drm/img-rogue/rogue_trace_events.h | 543 + + .../gpu/drm/img-rogue/server_cache_bridge.c | 457 + + drivers/gpu/drm/img-rogue/server_cmm_bridge.c | 409 + + .../server_devicememhistory_bridge.c | 846 + + drivers/gpu/drm/img-rogue/server_di_bridge.c | 639 + + .../gpu/drm/img-rogue/server_dmabuf_bridge.c | 694 + + .../drm/img-rogue/server_htbuffer_bridge.c | 351 + + drivers/gpu/drm/img-rogue/server_mm_bridge.c | 3802 ++++ + .../gpu/drm/img-rogue/server_pvrtl_bridge.c | 836 + + .../img-rogue/server_rgxbreakpoint_bridge.c | 370 + + .../gpu/drm/img-rogue/server_rgxcmp_bridge.c | 1171 ++ + .../drm/img-rogue/server_rgxfwdbg_bridge.c | 305 + + .../drm/img-rogue/server_rgxhwperf_bridge.c | 651 + + .../drm/img-rogue/server_rgxkicksync_bridge.c | 579 + + .../img-rogue/server_rgxregconfig_bridge.c | 239 + + .../gpu/drm/img-rogue/server_rgxta3d_bridge.c | 2406 +++ + .../img-rogue/server_rgxtimerquery_bridge.c | 167 + + .../gpu/drm/img-rogue/server_rgxtq2_bridge.c | 1210 ++ + .../gpu/drm/img-rogue/server_rgxtq_bridge.c | 1212 ++ + drivers/gpu/drm/img-rogue/server_ri_bridge.c | 760 + + .../gpu/drm/img-rogue/server_srvcore_bridge.c | 1072 + + .../gpu/drm/img-rogue/server_sync_bridge.c | 746 + + .../img-rogue/server_synctracking_bridge.c | 333 + + .../drm/img-rogue/services_kernel_client.h | 291 + + drivers/gpu/drm/img-rogue/services_km.h | 180 + + drivers/gpu/drm/img-rogue/servicesext.h | 156 + + drivers/gpu/drm/img-rogue/sofunc_pvr.h | 94 + + drivers/gpu/drm/img-rogue/srvcore.c | 1450 ++ + drivers/gpu/drm/img-rogue/srvcore.h | 229 + + drivers/gpu/drm/img-rogue/srvinit.h | 68 + + drivers/gpu/drm/img-rogue/srvkm.h | 145 + + drivers/gpu/drm/img-rogue/sync.c | 907 + + drivers/gpu/drm/img-rogue/sync.h | 292 + + drivers/gpu/drm/img-rogue/sync_checkpoint.c | 2981 +++ + drivers/gpu/drm/img-rogue/sync_checkpoint.h | 666 + + .../drm/img-rogue/sync_checkpoint_external.h | 83 + + .../gpu/drm/img-rogue/sync_checkpoint_init.h | 82 + + .../drm/img-rogue/sync_checkpoint_internal.h | 288 + + .../gpu/drm/img-rogue/sync_fallback_server.h | 204 + + drivers/gpu/drm/img-rogue/sync_internal.h | 127 + + .../gpu/drm/img-rogue/sync_prim_internal.h | 84 + + drivers/gpu/drm/img-rogue/sync_server.c | 1223 ++ + drivers/gpu/drm/img-rogue/sync_server.h | 249 + + drivers/gpu/drm/img-rogue/syscommon.h | 146 + + drivers/gpu/drm/img-rogue/sysconfig.c | 462 + + drivers/gpu/drm/img-rogue/sysconfig.h | 59 + + drivers/gpu/drm/img-rogue/sysconfig_cmn.c | 132 + + drivers/gpu/drm/img-rogue/sysinfo.h | 58 + + drivers/gpu/drm/img-rogue/sysvalidation.h | 62 + + drivers/gpu/drm/img-rogue/tlclient.c | 500 + + drivers/gpu/drm/img-rogue/tlclient.h | 257 + + drivers/gpu/drm/img-rogue/tlintern.c | 473 + + drivers/gpu/drm/img-rogue/tlintern.h | 345 + + drivers/gpu/drm/img-rogue/tlserver.c | 747 + + drivers/gpu/drm/img-rogue/tlserver.h | 97 + + drivers/gpu/drm/img-rogue/tlstream.c | 1625 ++ + drivers/gpu/drm/img-rogue/tlstream.h | 600 + + drivers/gpu/drm/img-rogue/trace_events.c | 265 + + drivers/gpu/drm/img-rogue/trace_events.h | 198 + + .../gpu/drm/img-rogue/uniq_key_splay_tree.c | 280 + + .../gpu/drm/img-rogue/uniq_key_splay_tree.h | 90 + + drivers/gpu/drm/img-rogue/vmm_impl.h | 186 + + drivers/gpu/drm/img-rogue/vmm_pvz_client.c | 138 + + drivers/gpu/drm/img-rogue/vmm_pvz_client.h | 77 + + drivers/gpu/drm/img-rogue/vmm_pvz_common.h | 65 + + drivers/gpu/drm/img-rogue/vmm_pvz_server.c | 245 + + drivers/gpu/drm/img-rogue/vmm_pvz_server.h | 121 + + drivers/gpu/drm/img-rogue/vmm_type_stub.c | 119 + + drivers/gpu/drm/img-rogue/vz_vm.h | 61 + + drivers/gpu/drm/img-rogue/vz_vmm_pvz.c | 183 + + drivers/gpu/drm/img-rogue/vz_vmm_pvz.h | 79 + + drivers/gpu/drm/img-rogue/vz_vmm_vm.c | 221 + + drivers/gpu/drm/img-rogue/xuantie_sys.c | 521 + + drivers/gpu/drm/img-rogue/xuantie_sys.h | 75 + + drivers/gpu/drm/panel/Kconfig | 9 + + drivers/gpu/drm/panel/Makefile | 3 +- + drivers/gpu/drm/panel/panel-himax-hx8279.c | 326 + + .../gpu/drm/panel/panel-jadard-jd9365da-h3.c | 37 +- + drivers/gpu/drm/panel/panel-jadard-jd9365da.c | 356 + + drivers/gpu/drm/radeon/radeon_irq_kms.c | 2 + + drivers/gpu/drm/ttm/ttm_bo_util.c | 5 +- + drivers/gpu/drm/ttm/ttm_module.c | 3 +- + drivers/gpu/drm/ttm/ttm_resource.c | 7 +- + drivers/gpu/drm/ttm/ttm_tt.c | 2 +- + drivers/gpu/drm/verisilicon/Kconfig | 62 + + drivers/gpu/drm/verisilicon/Makefile | 18 + + drivers/gpu/drm/verisilicon/dw_hdmi-th1520.c | 213 + + drivers/gpu/drm/verisilicon/dw_hdmi_th1520.h | 7 + + .../gpu/drm/verisilicon/dw_hdmi_tx_phy_gen2.h | 717 + + drivers/gpu/drm/verisilicon/dw_mipi_dsi.c | 1171 ++ + drivers/gpu/drm/verisilicon/dw_mipi_dsi.h | 10 + + drivers/gpu/drm/verisilicon/vs_crtc.c | 462 + + drivers/gpu/drm/verisilicon/vs_crtc.h | 78 + + drivers/gpu/drm/verisilicon/vs_dc.c | 1395 ++ + drivers/gpu/drm/verisilicon/vs_dc.h | 60 + + drivers/gpu/drm/verisilicon/vs_dc_dec.c | 386 + + drivers/gpu/drm/verisilicon/vs_dc_dec.h | 106 + + drivers/gpu/drm/verisilicon/vs_dc_hw.c | 2292 +++ + drivers/gpu/drm/verisilicon/vs_dc_hw.h | 578 + + drivers/gpu/drm/verisilicon/vs_dc_mmu.c | 707 + + drivers/gpu/drm/verisilicon/vs_dc_mmu.h | 98 + + drivers/gpu/drm/verisilicon/vs_drv.c | 709 + + drivers/gpu/drm/verisilicon/vs_drv.h | 64 + + drivers/gpu/drm/verisilicon/vs_fb.c | 178 + + drivers/gpu/drm/verisilicon/vs_fb.h | 13 + + drivers/gpu/drm/verisilicon/vs_gem.c | 554 + + drivers/gpu/drm/verisilicon/vs_gem.h | 71 + + drivers/gpu/drm/verisilicon/vs_plane.c | 428 + + drivers/gpu/drm/verisilicon/vs_plane.h | 77 + + drivers/gpu/drm/verisilicon/vs_simple_enc.c | 292 + + drivers/gpu/drm/verisilicon/vs_simple_enc.h | 27 + + drivers/gpu/drm/verisilicon/vs_type.h | 70 + + drivers/gpu/drm/verisilicon/vs_virtual.c | 359 + + drivers/gpu/drm/verisilicon/vs_virtual.h | 37 + + drivers/hwmon/mr75203.c | 35 +- + drivers/hwspinlock/Kconfig | 8 + + drivers/hwspinlock/Makefile | 1 + + drivers/hwspinlock/th1520_hwspinlock.c | 129 + + drivers/i2c/busses/Kconfig | 8 + + drivers/i2c/busses/Makefile | 2 + + drivers/i2c/busses/i2c-designware-common.c | 27 + + drivers/i2c/busses/i2c-designware-core.h | 22 +- + drivers/i2c/busses/i2c-designware-master.c | 77 +- + .../i2c/busses/i2c-designware-master_dma.c | 348 + + .../i2c/busses/i2c-designware-master_dma.h | 6 + + drivers/i2c/busses/i2c-designware-platdrv.c | 3 + + drivers/i2c/busses/i2c-spacemit-k1.c | 1299 ++ + drivers/i2c/busses/i2c-spacemit-k1.h | 225 + + drivers/iio/adc/Kconfig | 23 + + drivers/iio/adc/Makefile | 2 + + drivers/iio/adc/spacemit-p1-adc.c | 278 + + drivers/iio/adc/th1520-adc.c | 573 + + drivers/iio/adc/th1520-adc.h | 192 + + drivers/input/misc/Kconfig | 10 + + drivers/input/misc/Makefile | 1 + + drivers/input/misc/spacemit-p1-pwrkey.c | 211 + + drivers/iommu/Kconfig | 1 + + drivers/iommu/Makefile | 2 +- + drivers/iommu/apple-dart.c | 3 +- + drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 8 +- + drivers/iommu/arm/arm-smmu/arm-smmu.c | 3 +- + drivers/iommu/arm/arm-smmu/qcom_iommu.c | 3 +- + drivers/iommu/exynos-iommu.c | 2 +- + drivers/iommu/intel/dmar.c | 16 +- + drivers/iommu/intel/iommu.c | 47 +- + drivers/iommu/intel/iommu.h | 2 - + drivers/iommu/intel/irq_remapping.c | 16 +- + drivers/iommu/intel/pasid.c | 18 +- + drivers/iommu/intel/svm.c | 11 +- + drivers/iommu/iommu-pages.h | 154 + + drivers/iommu/iommu.c | 2 +- + drivers/iommu/ipmmu-vmsa.c | 4 +- + drivers/iommu/msm_iommu.c | 4 +- + drivers/iommu/mtk_iommu.c | 3 +- + drivers/iommu/mtk_iommu_v1.c | 3 +- + drivers/iommu/riscv/Kconfig | 20 + + drivers/iommu/riscv/Makefile | 3 + + drivers/iommu/riscv/iommu-bits.h | 784 + + drivers/iommu/riscv/iommu-pci.c | 120 + + drivers/iommu/riscv/iommu-platform.c | 92 + + drivers/iommu/riscv/iommu.c | 1661 ++ + drivers/iommu/riscv/iommu.h | 88 + + drivers/iommu/rockchip-iommu.c | 2 +- + drivers/iommu/sprd-iommu.c | 3 +- + drivers/iommu/sun50i-iommu.c | 2 +- + drivers/iommu/tegra-smmu.c | 4 +- + drivers/iommu/virtio-iommu.c | 3 +- + drivers/irqchip/Kconfig | 45 + + drivers/irqchip/Makefile | 5 + + drivers/irqchip/irq-riscv-aplic-direct.c | 329 + + drivers/irqchip/irq-riscv-aplic-main.c | 234 + + drivers/irqchip/irq-riscv-aplic-main.h | 53 + + drivers/irqchip/irq-riscv-aplic-msi.c | 285 + + drivers/irqchip/irq-riscv-imsic-early.c | 263 + + drivers/irqchip/irq-riscv-imsic-platform.c | 395 + + drivers/irqchip/irq-riscv-imsic-state.c | 891 + + drivers/irqchip/irq-riscv-imsic-state.h | 108 + + drivers/irqchip/irq-riscv-intc.c | 152 +- + drivers/irqchip/irq-sg2044-msi.c | 403 + + drivers/irqchip/irq-sifive-plic.c | 365 +- + drivers/irqchip/irq-thead-c900-aclint-sswi.c | 351 + + drivers/mailbox/Kconfig | 8 + + drivers/mailbox/Makefile | 2 + + drivers/mailbox/bcm-flexrm-mailbox.c | 8 +- + drivers/mailbox/th1520-mailbox.c | 614 + + drivers/mfd/Kconfig | 12 + + drivers/mfd/Makefile | 2 + + drivers/mfd/spacemit-p1.c | 481 + + drivers/mmc/host/Kconfig | 25 + + drivers/mmc/host/Makefile | 2 + + drivers/mmc/host/sdhci-of-dwcmshc.c | 649 + + drivers/mmc/host/sdhci-of-k1.c | 1475 ++ + drivers/mmc/host/sdhci-sophgo.c | 619 + + drivers/mmc/host/sdhci-sophgo.h | 121 + + drivers/mmc/host/sdhci.c | 12 +- + drivers/mmc/host/sdhci.h | 4 + + drivers/mtd/spi-nor/controllers/Kconfig | 11 + + drivers/mtd/spi-nor/controllers/Makefile | 1 + + .../mtd/spi-nor/controllers/sophgo-spifmc.c | 445 + + drivers/mtd/spi-nor/gigadevice.c | 14 + + drivers/net/ethernet/Kconfig | 1 + + drivers/net/ethernet/Makefile | 1 + + drivers/net/ethernet/intel/i40e/i40e_common.c | 3 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 +- + drivers/net/ethernet/spacemit/Kconfig | 24 + + drivers/net/ethernet/spacemit/Makefile | 6 + + drivers/net/ethernet/spacemit/k1-emac.c | 2739 +++ + drivers/net/ethernet/spacemit/k1-emac.h | 727 + + drivers/net/ethernet/stmicro/stmmac/Kconfig | 18 + + drivers/net/ethernet/stmicro/stmmac/Makefile | 2 + + .../ethernet/stmicro/stmmac/dwmac-sophgo.c | 268 + + .../ethernet/stmicro/stmmac/dwmac-xuantie.c | 584 + + .../net/ethernet/stmicro/stmmac/dwmac_lib.c | 2 +- + drivers/net/wireless/Kconfig | 2 +- + drivers/net/wireless/Makefile | 1 + + drivers/net/wireless/aic8800/Kconfig | 17 + + drivers/net/wireless/aic8800/Makefile | 84 + + .../wireless/aic8800/aic8800_bsp/.gitignore | 10 + + .../net/wireless/aic8800/aic8800_bsp/Makefile | 135 + + .../aic8800/aic8800_bsp/aic8800d80_compat.c | 252 + + .../aic8800/aic8800_bsp/aic8800d80_compat.h | 24 + + .../aic8800/aic8800_bsp/aic8800dc_compat.c | 2275 +++ + .../aic8800/aic8800_bsp/aic8800dc_compat.h | 37 + + .../aic8800/aic8800_bsp/aic_bsp_driver.c | 2051 ++ + .../aic8800/aic8800_bsp/aic_bsp_driver.h | 581 + + .../aic8800/aic8800_bsp/aic_bsp_export.h | 65 + + .../aic8800/aic8800_bsp/aic_bsp_main.c | 393 + + .../wireless/aic8800/aic8800_bsp/aicsdio.c | 1980 ++ + .../wireless/aic8800/aic8800_bsp/aicsdio.h | 148 + + .../aic8800/aic8800_bsp/aicsdio_txrxif.c | 465 + + .../aic8800/aic8800_bsp/aicsdio_txrxif.h | 214 + + .../aic8800_bsp/aicwf_firmware_array.c | 16138 ++++++++++++++++ + .../aic8800_bsp/aicwf_firmware_array.h | 3 + + .../aic8800/aic8800_bsp/aicwf_txq_prealloc.c | 62 + + .../aic8800/aic8800_bsp/aicwf_txq_prealloc.h | 4 + + .../net/wireless/aic8800/aic8800_bsp/md5.c | 161 + + .../net/wireless/aic8800/aic8800_bsp/md5.h | 48 + + .../aic8800/aic8800_bsp/rwnx_version_gen.h | 4 + + .../wireless/aic8800/aic8800_btlpm/.gitignore | 10 + + .../wireless/aic8800/aic8800_btlpm/Kconfig | 5 + + .../wireless/aic8800/aic8800_btlpm/Makefile | 81 + + .../aic8800/aic8800_btlpm/aic8800_btlpm.c | 1167 ++ + .../aic8800_btlpm/aic_bluetooth_main.c | 88 + + .../aic8800/aic8800_btlpm/aic_bsp_export.h | 19 + + .../net/wireless/aic8800/aic8800_btlpm/lpm.c | 1111 ++ + .../net/wireless/aic8800/aic8800_btlpm/lpm.h | 21 + + .../wireless/aic8800/aic8800_btlpm/rfkill.c | 81 + + .../wireless/aic8800/aic8800_btlpm/rfkill.h | 17 + + .../wireless/aic8800/aic8800_fdrv/.gitignore | 10 + + .../net/wireless/aic8800/aic8800_fdrv/Kconfig | 4 + + .../wireless/aic8800/aic8800_fdrv/Makefile | 376 + + .../aic8800/aic8800_fdrv/aic_br_ext.c | 1569 ++ + .../aic8800/aic8800_fdrv/aic_br_ext.h | 73 + + .../aic8800/aic8800_fdrv/aic_bsp_export.h | 58 + + .../aic8800/aic8800_fdrv/aic_btsdio.c | 1310 ++ + .../aic8800/aic8800_fdrv/aic_btsdio.h | 549 + + .../aic8800/aic8800_fdrv/aic_vendor.c | 909 + + .../aic8800/aic8800_fdrv/aic_vendor.h | 346 + + .../aic8800_fdrv/aicwf_compat_8800d80.c | 66 + + .../aic8800_fdrv/aicwf_compat_8800d80.h | 9 + + .../aic8800_fdrv/aicwf_compat_8800dc.c | 542 + + .../aic8800_fdrv/aicwf_compat_8800dc.h | 15 + + .../aic8800/aic8800_fdrv/aicwf_debug.h | 56 + + .../aic8800/aic8800_fdrv/aicwf_rx_prealloc.c | 97 + + .../aic8800/aic8800_fdrv/aicwf_rx_prealloc.h | 24 + + .../aic8800/aic8800_fdrv/aicwf_sdio.c | 2591 +++ + .../aic8800/aic8800_fdrv/aicwf_sdio.h | 186 + + .../aic8800/aic8800_fdrv/aicwf_tcp_ack.c | 633 + + .../aic8800/aic8800_fdrv/aicwf_tcp_ack.h | 111 + + .../aic8800/aic8800_fdrv/aicwf_txrxif.c | 885 + + .../aic8800/aic8800_fdrv/aicwf_txrxif.h | 262 + + .../wireless/aic8800/aic8800_fdrv/aicwf_usb.c | 957 + + .../wireless/aic8800/aic8800_fdrv/aicwf_usb.h | 99 + + .../wireless/aic8800/aic8800_fdrv/hal_desc.h | 353 + + .../aic8800/aic8800_fdrv/ipc_compat.h | 25 + + .../wireless/aic8800/aic8800_fdrv/ipc_host.c | 52 + + .../wireless/aic8800/aic8800_fdrv/ipc_host.h | 168 + + .../aic8800/aic8800_fdrv/ipc_shared.h | 785 + + .../wireless/aic8800/aic8800_fdrv/lmac_mac.h | 564 + + .../wireless/aic8800/aic8800_fdrv/lmac_msg.h | 3082 +++ + .../aic8800/aic8800_fdrv/lmac_types.h | 62 + + .../net/wireless/aic8800/aic8800_fdrv/md5.c | 161 + + .../net/wireless/aic8800/aic8800_fdrv/md5.h | 48 + + .../aic8800/aic8800_fdrv/reg_access.h | 148 + + .../net/wireless/aic8800/aic8800_fdrv/regdb.c | 2898 +++ + .../aic8800/aic8800_fdrv/rwnx_bfmer.c | 105 + + .../aic8800/aic8800_fdrv/rwnx_bfmer.h | 100 + + .../aic8800/aic8800_fdrv/rwnx_cfgfile.c | 239 + + .../aic8800/aic8800_fdrv/rwnx_cfgfile.h | 35 + + .../wireless/aic8800/aic8800_fdrv/rwnx_cmds.c | 539 + + .../wireless/aic8800/aic8800_fdrv/rwnx_cmds.h | 124 + + .../aic8800/aic8800_fdrv/rwnx_compat.h | 451 + + .../aic8800/aic8800_fdrv/rwnx_debugfs.c | 2455 +++ + .../aic8800/aic8800_fdrv/rwnx_debugfs.h | 202 + + .../wireless/aic8800/aic8800_fdrv/rwnx_defs.h | 746 + + .../wireless/aic8800/aic8800_fdrv/rwnx_dini.c | 297 + + .../wireless/aic8800/aic8800_fdrv/rwnx_dini.h | 20 + + .../aic8800/aic8800_fdrv/rwnx_events.h | 1326 ++ + .../aic8800/aic8800_fdrv/rwnx_fw_trace.c | 48 + + .../aic8800/aic8800_fdrv/rwnx_fw_trace.h | 35 + + .../wireless/aic8800/aic8800_fdrv/rwnx_gki.c | 408 + + .../wireless/aic8800/aic8800_fdrv/rwnx_gki.h | 72 + + .../wireless/aic8800/aic8800_fdrv/rwnx_irqs.c | 65 + + .../wireless/aic8800/aic8800_fdrv/rwnx_irqs.h | 20 + + .../wireless/aic8800/aic8800_fdrv/rwnx_main.c | 7394 +++++++ + .../wireless/aic8800/aic8800_fdrv/rwnx_main.h | 40 + + .../wireless/aic8800/aic8800_fdrv/rwnx_mesh.c | 42 + + .../wireless/aic8800/aic8800_fdrv/rwnx_mesh.h | 45 + + .../aic8800/aic8800_fdrv/rwnx_mod_params.c | 1754 ++ + .../aic8800/aic8800_fdrv/rwnx_mod_params.h | 70 + + .../aic8800/aic8800_fdrv/rwnx_msg_rx.c | 1567 ++ + .../aic8800/aic8800_fdrv/rwnx_msg_rx.h | 19 + + .../aic8800/aic8800_fdrv/rwnx_msg_tx.c | 3677 ++++ + .../aic8800/aic8800_fdrv/rwnx_msg_tx.h | 186 + + .../aic8800/aic8800_fdrv/rwnx_mu_group.c | 659 + + .../aic8800/aic8800_fdrv/rwnx_mu_group.h | 181 + + .../wireless/aic8800/aic8800_fdrv/rwnx_pci.c | 94 + + .../wireless/aic8800/aic8800_fdrv/rwnx_pci.h | 17 + + .../aic8800/aic8800_fdrv/rwnx_platform.c | 2108 ++ + .../aic8800/aic8800_fdrv/rwnx_platform.h | 136 + + .../wireless/aic8800/aic8800_fdrv/rwnx_prof.h | 133 + + .../aic8800/aic8800_fdrv/rwnx_radar.c | 1644 ++ + .../aic8800/aic8800_fdrv/rwnx_radar.h | 160 + + .../wireless/aic8800/aic8800_fdrv/rwnx_rx.c | 2501 +++ + .../wireless/aic8800/aic8800_fdrv/rwnx_rx.h | 392 + + .../wireless/aic8800/aic8800_fdrv/rwnx_strs.c | 266 + + .../wireless/aic8800/aic8800_fdrv/rwnx_strs.h | 31 + + .../wireless/aic8800/aic8800_fdrv/rwnx_tdls.c | 785 + + .../wireless/aic8800/aic8800_fdrv/rwnx_tdls.h | 54 + + .../aic8800/aic8800_fdrv/rwnx_testmode.c | 230 + + .../aic8800/aic8800_fdrv/rwnx_testmode.h | 64 + + .../wireless/aic8800/aic8800_fdrv/rwnx_tx.c | 1953 ++ + .../wireless/aic8800/aic8800_fdrv/rwnx_tx.h | 188 + + .../wireless/aic8800/aic8800_fdrv/rwnx_txq.c | 1370 ++ + .../wireless/aic8800/aic8800_fdrv/rwnx_txq.h | 402 + + .../aic8800/aic8800_fdrv/rwnx_utils.c | 39 + + .../aic8800/aic8800_fdrv/rwnx_utils.h | 133 + + .../wireless/aic8800/aic8800_fdrv/rwnx_v7.c | 195 + + .../wireless/aic8800/aic8800_fdrv/rwnx_v7.h | 20 + + .../aic8800/aic8800_fdrv/rwnx_version.h | 12 + + .../aic8800/aic8800_fdrv/rwnx_version_gen.h | 4 + + .../aic8800/aic8800_fdrv/rwnx_wakelock.c | 86 + + .../aic8800/aic8800_fdrv/rwnx_wakelock.h | 21 + + .../wireless/aic8800/aic8800_fdrv/sdio_host.c | 137 + + .../wireless/aic8800/aic8800_fdrv/sdio_host.h | 41 + + .../wireless/aic8800/aic8800_fdrv/usb_host.c | 146 + + .../wireless/aic8800/aic8800_fdrv/usb_host.h | 41 + + drivers/nvmem/Kconfig | 10 + + drivers/nvmem/Makefile | 2 + + drivers/nvmem/th1520-efuse.c | 1197 ++ + drivers/of/device.c | 42 +- + drivers/pci/controller/cadence/Kconfig | 11 + + drivers/pci/controller/cadence/Makefile | 1 + + .../controller/cadence/pcie-cadence-sophgo.c | 973 + + .../controller/cadence/pcie-cadence-sophgo.h | 17 + + drivers/pci/controller/dwc/Kconfig | 11 + + drivers/pci/controller/dwc/Makefile | 1 + + drivers/pci/controller/dwc/pcie-dw-sophgo.c | 1687 ++ + drivers/pci/controller/dwc/pcie-dw-sophgo.h | 251 + + drivers/pci/msi/msi.c | 97 +- + drivers/pci/pci-acpi.c | 248 +- + drivers/pci/pci.h | 4 +- + drivers/pci/pcie/portdrv.c | 2 +- + drivers/perf/Kconfig | 14 + + drivers/perf/arm_smmuv3_pmu.c | 4 +- + drivers/perf/riscv_pmu_sbi.c | 44 +- + drivers/phy/Kconfig | 1 + + drivers/phy/Makefile | 3 +- + drivers/phy/synopsys/Kconfig | 13 + + drivers/phy/synopsys/Makefile | 3 + + drivers/phy/synopsys/phy-dw-mipi-dphy.c | 824 + + drivers/pinctrl/Kconfig | 36 +- + drivers/pinctrl/Makefile | 4 + + drivers/pinctrl/pinctrl-spacemit-k1x.c | 2101 ++ + drivers/pinctrl/pinctrl-spacemit-p1.c | 631 + + drivers/pinctrl/pinctrl-th1520.c | 1180 ++ + drivers/pinctrl/sophgo/Makefile | 2 + + drivers/pinctrl/sophgo/pinctrl-mango.c | 453 + + drivers/pinctrl/sophgo/pinctrl-sophgo.c | 292 + + drivers/pinctrl/sophgo/pinctrl-sophgo.h | 70 + + .../platform/surface/surface_acpi_notify.c | 14 +- + drivers/pwm/Kconfig | 13 +- + drivers/pwm/Makefile | 2 + + drivers/pwm/pwm-pxa.c | 22 +- + drivers/pwm/pwm-sophgo.c | 276 + + drivers/pwm/pwm-xuantie.c | 270 + + drivers/regulator/Kconfig | 17 + + drivers/regulator/Makefile | 2 + + drivers/regulator/spacemit-p1-regulator.c | 268 + + drivers/regulator/th1520-aon-regulator.c | 770 + + drivers/reset/Kconfig | 16 + + drivers/reset/Makefile | 3 + + drivers/reset/reset-sophgo.c | 163 + + drivers/reset/reset-spacemit-k1x.c | 669 + + drivers/reset/reset-th1520.c | 170 + + drivers/rpmsg/Kconfig | 4 + + drivers/rpmsg/Makefile | 1 + + drivers/rpmsg/th1520_rpmsg.c | 958 + + drivers/rtc/Kconfig | 28 +- + drivers/rtc/Makefile | 2 + + drivers/rtc/rtc-astbmc.c | 535 + + drivers/rtc/rtc-spacemit-p1.c | 716 + + drivers/rtc/rtc-xgene.c | 32 + + drivers/soc/Kconfig | 2 + + drivers/soc/Makefile | 3 + + drivers/soc/sophgo/Makefile | 3 + + drivers/soc/sophgo/tach/sophgo-tach.c | 330 + + drivers/soc/sophgo/top/top_intc.c | 412 + + drivers/soc/sophgo/umcu/mcu.c | 1144 ++ + drivers/soc/spacemit/Kconfig | 13 + + drivers/soc/spacemit/Makefile | 1 + + drivers/soc/spacemit/spacemit-mem-range.c | 39 + + drivers/soc/xuantie/Kconfig | 34 + + drivers/soc/xuantie/Makefile | 13 + + drivers/soc/xuantie/nna/GPLHEADER | 356 + + drivers/soc/xuantie/nna/Kconfig | 64 + + drivers/soc/xuantie/nna/Makefile | 7 + + drivers/soc/xuantie/nna/README | 29 + + drivers/soc/xuantie/nna/build.mk | 161 + + .../dmabuf_exporter/FindDmaBufExporter.cmake | 8 + + .../soc/xuantie/nna/dmabuf_exporter/Makefile | 36 + + .../soc/xuantie/nna/dmabuf_exporter/README | 17 + + .../xuantie/nna/dmabuf_exporter/de_common.c | 176 + + .../soc/xuantie/nna/dmabuf_exporter/de_heap.h | 28 + + .../nna/dmabuf_exporter/de_heap_carveout.c | 468 + + .../nna/dmabuf_exporter/de_heap_coherent.c | 303 + + .../xuantie/nna/dmabuf_exporter/de_heap_ion.c | 212 + + .../xuantie/nna/dmabuf_exporter/de_heap_ion.h | 46 + + .../nna/dmabuf_exporter/de_heap_ion_example.c | 205 + + .../nna/dmabuf_exporter/de_heap_noncoherent.c | 496 + + .../nna/dmabuf_exporter/test/dma-map.c | 77 + + .../nna/dmabuf_exporter/test/dma-test.c | 61 + + .../dmabuf_exporter/uapi/dmabuf_exporter.h | 20 + + .../dmabuf_exporter/uapi/kernel_4x14/ion.h | 136 + + .../nna/dmabuf_exporter/uapi/kernel_4x4/ion.h | 203 + + drivers/soc/xuantie/nna/fenrir_loki/Makefile | 3 + + .../soc/xuantie/nna/fenrir_loki/loki-intc.c | 159 + + .../soc/xuantie/nna/fenrir_loki/loki-main.c | 110 + + drivers/soc/xuantie/nna/fenrir_loki/loki.h | 44 + + drivers/soc/xuantie/nna/img_mem/Makefile | 34 + + .../xuantie/nna/img_mem/img_mem_anonymous.c | 349 + + .../xuantie/nna/img_mem/img_mem_carveout.c | 854 + + .../xuantie/nna/img_mem/img_mem_coherent.c | 204 + + .../soc/xuantie/nna/img_mem/img_mem_dmabuf.c | 551 + + drivers/soc/xuantie/nna/img_mem/img_mem_ion.c | 266 + + drivers/soc/xuantie/nna/img_mem/img_mem_man.c | 2768 +++ + .../xuantie/nna/img_mem/img_mem_man_priv.h | 213 + + drivers/soc/xuantie/nna/img_mem/img_mem_ocm.c | 173 + + .../soc/xuantie/nna/img_mem/img_mem_unified.c | 1060 + + drivers/soc/xuantie/nna/img_mem/img_pdump.c | 194 + + .../soc/xuantie/nna/img_mem/imgmmu/imgmmu.c | 1449 ++ + .../xuantie/nna/img_mem/imgmmu/kernel_heap.c | 307 + + .../soc/xuantie/nna/img_mem/imgmmu/mmu_defs.h | 146 + + .../xuantie/nna/img_mem/imgmmu/mmulib/heap.h | 159 + + .../xuantie/nna/img_mem/imgmmu/mmulib/mmu.h | 449 + + .../xuantie/nna/include/hwdefs/aura_system.h | 4 + + .../xuantie/nna/include/hwdefs/gyrus_system.h | 24 + + .../xuantie/nna/include/hwdefs/magna_system.h | 4 + + .../nna/include/hwdefs/mirage_system.h | 4 + + .../nna/include/hwdefs/nn_sys_cr_gyrus.h | 355 + + .../nna/include/hwdefs/nn_sys_cr_vagus.h | 364 + + .../xuantie/nna/include/hwdefs/vagus_system.h | 8 + + .../xuantie/nna/include/hwdefs/vha_cr_aura.h | 5471 ++++++ + .../xuantie/nna/include/hwdefs/vha_cr_gyrus.h | 4998 +++++ + .../xuantie/nna/include/hwdefs/vha_cr_magna.h | 6553 +++++++ + .../nna/include/hwdefs/vha_cr_mirage.h | 3171 +++ + .../soc/xuantie/nna/include/hwdefs/vha_tb.h | 101 + + drivers/soc/xuantie/nna/include/img_mem_man.h | 296 + + drivers/soc/xuantie/nna/include/nexef_plat.h | 55 + + .../xuantie/nna/include/uapi/img_mem_man.h | 118 + + .../soc/xuantie/nna/include/uapi/version.h | 49 + + drivers/soc/xuantie/nna/include/uapi/vha.h | 423 + + .../soc/xuantie/nna/include/uapi/vha_errors.h | 116 + + .../soc/xuantie/nna/include/vha_drv_common.h | 72 + + .../soc/xuantie/nna/include/vha_trace_point.h | 104 + + .../soc/xuantie/nna/nexef_platform/Makefile | 24 + + .../soc/xuantie/nna/nexef_platform/README.md | 110 + + .../xuantie/nna/nexef_platform/nexef_plat.c | 1799 ++ + .../nna/nexef_platform/set_fpga_freq.py | 12 + + drivers/soc/xuantie/nna/vha/Makefile | 139 + + drivers/soc/xuantie/nna/vha/multi/vha_dev.c | 4213 ++++ + drivers/soc/xuantie/nna/vha/multi/vha_mmu.c | 261 + + .../soc/xuantie/nna/vha/multi/vha_mt19937.c | 229 + + .../soc/xuantie/nna/vha/multi/vha_mt19937.h | 93 + + drivers/soc/xuantie/nna/vha/multi/vha_regs.h | 391 + + .../soc/xuantie/nna/vha/multi/vha_sc_dbg.c | 264 + + drivers/soc/xuantie/nna/vha/multi/vha_wm.c | 1896 ++ + .../soc/xuantie/nna/vha/platform/vha_plat.h | 104 + + .../nna/vha/platform/vha_plat_apollo.c | 862 + + .../xuantie/nna/vha/platform/vha_plat_dt.c | 386 + + .../xuantie/nna/vha/platform/vha_plat_dt.h | 78 + + .../nna/vha/platform/vha_plat_dt_example.c | 156 + + .../nna/vha/platform/vha_plat_dt_example.dts | 60 + + .../nna/vha/platform/vha_plat_dt_fenrir.dts | 81 + + .../xuantie/nna/vha/platform/vha_plat_dummy.c | 361 + + .../xuantie/nna/vha/platform/vha_plat_emu.c | 641 + + .../xuantie/nna/vha/platform/vha_plat_frost.c | 1004 + + .../xuantie/nna/vha/platform/vha_plat_nexef.c | 491 + + .../xuantie/nna/vha/platform/vha_plat_odin.c | 1152 ++ + .../xuantie/nna/vha/platform/vha_plat_orion.c | 1065 + + .../vha_plat_param_xuantie_th1520_fpga_c910.h | 36 + + .../xuantie/nna/vha/platform/vha_plat_pci.c | 483 + + .../nna/vha/platform/vha_plat_xuantie.c | 403 + + .../vha/platform/vha_plat_xuantie_th1520.c | 181 + + .../vha_plat_xuantie_th1520_fpga_c910.c | 122 + + drivers/soc/xuantie/nna/vha/single/vha_cnn.c | 761 + + drivers/soc/xuantie/nna/vha/single/vha_dev.c | 1615 ++ + .../soc/xuantie/nna/vha/single/vha_dev_ax2.c | 190 + + .../soc/xuantie/nna/vha/single/vha_dev_ax3.c | 207 + + drivers/soc/xuantie/nna/vha/single/vha_mmu.c | 241 + + drivers/soc/xuantie/nna/vha/single/vha_regs.h | 191 + + drivers/soc/xuantie/nna/vha/vha_api.c | 1060 + + drivers/soc/xuantie/nna/vha/vha_common.c | 2783 +++ + drivers/soc/xuantie/nna/vha/vha_common.h | 1174 ++ + drivers/soc/xuantie/nna/vha/vha_dbg.c | 1913 ++ + drivers/soc/xuantie/nna/vha/vha_devfreq.c | 665 + + drivers/soc/xuantie/nna/vha/vha_info.c | 191 + + drivers/soc/xuantie/nna/vha/vha_io.h | 373 + + drivers/soc/xuantie/nna/vha/vha_monitor.c | 125 + + drivers/soc/xuantie/nna/vha/vha_pdump.c | 347 + + drivers/soc/xuantie/th1520-iopmp.c | 707 + + drivers/soc/xuantie/th1520_event.c | 280 + + drivers/soc/xuantie/th1520_regdump.c | 198 + + drivers/soc/xuantie/th1520_system_monitor.c | 898 + + drivers/soc/xuantie/video_memory/Kconfig | 3 + + drivers/soc/xuantie/video_memory/Makefile | 116 + + .../soc/xuantie/video_memory/driver/Makefile | 47 + + .../xuantie/video_memory/driver/rsvmem_pool.c | 178 + + .../xuantie/video_memory/driver/rsvmem_pool.h | 36 + + .../video_memory/driver/video_memory.c | 1702 ++ + .../video_memory/driver/video_memory.h | 69 + + drivers/soc/xuantie/video_memory/lib/Makefile | 16 + + .../soc/xuantie/video_memory/lib/video_mem.c | 331 + + .../soc/xuantie/video_memory/lib/video_mem.h | 85 + + .../soc/xuantie/video_memory/test/Makefile | 70 + + .../video_memory/test/video_memory_test.c | 175 + + .../soc/xuantie/vpu-vc8000d-kernel/COPYING | 674 + + .../soc/xuantie/vpu-vc8000d-kernel/Kconfig | 3 + + .../soc/xuantie/vpu-vc8000d-kernel/Makefile | 80 + + .../vpu-vc8000d-kernel/addons/ko/insmod.sh | 5 + + .../vpu-vc8000d-kernel/addons/ko/rmmod.sh | 3 + + .../vpu-vc8000d-kernel/linux/dwl/dwl_defs.h | 142 + + .../linux/memalloc/Makefile | 88 + + .../vpu-vc8000d-kernel/linux/memalloc/README | 44 + + .../linux/memalloc/build_for_pcie.sh | 22 + + .../linux/memalloc/memalloc.c | 369 + + .../linux/memalloc/memalloc.h | 88 + + .../linux/memalloc/memalloc_load.sh | 99 + + .../linux/memalloc/testbench_memalloc.c | 207 + + .../linux/subsys_driver/Makefile | 95 + + .../linux/subsys_driver/README | 49 + + .../linux/subsys_driver/bidirect_list.c | 220 + + .../linux/subsys_driver/bidirect_list.h | 107 + + .../linux/subsys_driver/build_for_mpcore.sh | 10 + + .../linux/subsys_driver/build_for_socle.sh | 10 + + .../linux/subsys_driver/build_for_vexpress.sh | 10 + + .../linux/subsys_driver/dec_devfreq.h | 55 + + .../linux/subsys_driver/driver_load.sh | 43 + + .../linux/subsys_driver/driver_load_sc.sh | 43 + + .../freertos/dev_common_freertos.h | 255 + + .../freertos/hantro_dec_freertos.c | 2065 ++ + .../freertos/hantro_vcmd_freertos.c | 3843 ++++ + .../linux/subsys_driver/freertos/io_tools.c | 86 + + .../linux/subsys_driver/freertos/io_tools.h | 89 + + .../freertos/memalloc_freertos.c | 484 + + .../freertos/memalloc_freertos.h | 104 + + .../subsys_driver/freertos/subsys_freertos.c | 192 + + .../subsys_driver/freertos/user_freertos.c | 151 + + .../subsys_driver/freertos/user_freertos.h | 79 + + .../linux/subsys_driver/hantro_axife.c | 87 + + .../linux/subsys_driver/hantro_dec.c | 3676 ++++ + .../linux/subsys_driver/hantro_mmu.c | 1912 ++ + .../linux/subsys_driver/hantro_vcmd.c | 4342 +++++ + .../linux/subsys_driver/hantroaxife.h | 63 + + .../linux/subsys_driver/hantrodec.h | 199 + + .../linux/subsys_driver/hantrommu.h | 109 + + .../linux/subsys_driver/hantrovcmd.h | 210 + + .../linux/subsys_driver/kernel_allocator.c | 1296 ++ + .../linux/subsys_driver/kernel_allocator.h | 121 + + .../linux/subsys_driver/subsys.c | 203 + + .../linux/subsys_driver/subsys.h | 208 + + .../linux/subsys_driver/vcmdregisterenum.h | 156 + + .../linux/subsys_driver/vcmdregistertable.h | 156 + + .../linux/subsys_driver/vcmdswhwregisters.c | 148 + + .../linux/subsys_driver/vcmdswhwregisters.h | 233 + + .../linux/subsys_driver/vdec_trace_point.h | 43 + + .../soc/xuantie/vpu-vc8000e-kernel/COPYING | 674 + + .../soc/xuantie/vpu-vc8000e-kernel/Kconfig | 3 + + .../soc/xuantie/vpu-vc8000e-kernel/Makefile | 91 + + .../vpu-vc8000e-kernel/addons/ko/insmod.sh | 5 + + .../vpu-vc8000e-kernel/addons/ko/rmmod.sh | 3 + + .../linux/kernel_module/Makefile | 213 + + .../linux/kernel_module/README | 42 + + .../linux/kernel_module/bidirect_list.c | 222 + + .../linux/kernel_module/bidirect_list.h | 116 + + .../linux/kernel_module/driver_load.sh | 61 + + .../linux/kernel_module/hantro_mmu.c | 1911 ++ + .../linux/kernel_module/hantrommu.h | 155 + + .../linux/kernel_module/vc8000_axife.c | 98 + + .../linux/kernel_module/vc8000_axife.h | 77 + + .../linux/kernel_module/vc8000_devfreq.h | 54 + + .../linux/kernel_module/vc8000_driver.c | 102 + + .../linux/kernel_module/vc8000_driver.h | 349 + + .../kernel_module/vc8000_normal_driver.c | 1459 ++ + .../linux/kernel_module/vc8000_vcmd_driver.c | 5594 ++++++ + .../linux/kernel_module/vcmdregisterenum.h | 157 + + .../linux/kernel_module/vcmdregistertable.h | 157 + + .../linux/kernel_module/vcmdswhwregisters.c | 180 + + .../linux/kernel_module/vcmdswhwregisters.h | 244 + + .../linux/kernel_module/venc_trace_point.h | 43 + + drivers/spi/Kconfig | 21 + + drivers/spi/Makefile | 3 + + drivers/spi/spi-dw-mmio-quad.c | 216 + + drivers/spi/spi-dw-mmio.c | 1 + + drivers/spi/spi-dw-quad.c | 830 + + drivers/spi/spi-dw-quad.h | 365 + + drivers/spi/spi-spacemit-k1-qspi.c | 1572 ++ + drivers/spi/spi-spacemit-k1.c | 1281 ++ + drivers/spi/spi-spacemit-k1.h | 281 + + drivers/spi/spidev.c | 40 + + drivers/tee/Kconfig | 2 +- + drivers/tee/optee/Kconfig | 2 +- + drivers/tee/optee/call.c | 2 + + drivers/tee/optee/smc_abi.c | 37 + + drivers/tty/serial/8250/8250_dma.c | 134 +- + drivers/tty/serial/8250/8250_dw.c | 3 +- + drivers/tty/serial/8250/8250_port.c | 12 +- + drivers/tty/serial/Kconfig | 19 +- + drivers/tty/serial/Makefile | 1 + + drivers/tty/serial/spacemit_k1x_uart.c | 1979 ++ + drivers/ufs/host/ufs-qcom.c | 9 +- + drivers/usb/dwc3/Kconfig | 20 + + drivers/usb/dwc3/Makefile | 2 + + drivers/usb/dwc3/core.c | 22 +- + drivers/usb/dwc3/dwc3-xuantie.c | 275 + + drivers/watchdog/Kconfig | 14 + + drivers/watchdog/Makefile | 1 + + drivers/watchdog/dw_wdt.c | 13 +- + drivers/watchdog/th1520_wdt.c | 393 + + include/acpi/acpi_bus.h | 37 +- + include/acpi/actbl3.h | 18 +- + include/asm-generic/pgalloc.h | 7 +- + include/drm/bridge/dw_hdmi.h | 5 + + .../dt-bindings/clock/sophgo-mango-clock.h | 165 + + include/dt-bindings/clock/sophgo.h | 15 + + .../dt-bindings/clock/spacemit-k1x-clock.h | 223 + + include/dt-bindings/clock/th1520-audiosys.h | 35 + + include/dt-bindings/clock/th1520-dspsys.h | 33 + + .../dt-bindings/clock/th1520-fm-ap-clock.h | 513 + + include/dt-bindings/clock/th1520-miscsys.h | 28 + + include/dt-bindings/clock/th1520-visys.h | 54 + + include/dt-bindings/clock/th1520-vosys.h | 41 + + include/dt-bindings/clock/th1520-vpsys.h | 26 + + include/dt-bindings/dma/spacemit-k1-dma.h | 54 + + include/dt-bindings/firmware/xuantie/rsrc.h | 18 + + include/dt-bindings/mmc/spacemit-k1-sdhci.h | 62 + + include/dt-bindings/pinctrl/k1-x-pinctrl.h | 198 + + .../dt-bindings/reset/sophgo-mango-resets.h | 96 + + .../dt-bindings/reset/spacemit-k1x-reset.h | 126 + + .../dt-bindings/reset/xuantie,th1520-reset.h | 28 + + .../dt-bindings/soc/th1520_system_status.h | 38 + + .../dt-bindings/soc/xuantie,th1520-iopmp.h | 41 + + include/linux/acpi.h | 15 + + include/linux/acpi_iort.h | 4 +- + include/linux/cpuhotplug.h | 3 + + include/linux/cpumask.h | 17 + + include/linux/crc32.h | 3 + + include/linux/dma-direct.h | 18 + + include/linux/find.h | 27 + + include/linux/firmware/xuantie/ipc.h | 167 + + include/linux/firmware/xuantie/th1520_event.h | 35 + + include/linux/iommu.h | 4 +- + include/linux/irqchip/riscv-aplic.h | 145 + + include/linux/irqchip/riscv-imsic.h | 96 + + include/linux/irqdomain.h | 17 + + include/linux/irqdomain_defs.h | 2 + + include/linux/mfd/spacemit_p1.h | 250 + + include/linux/mlx4/device.h | 2 +- + include/linux/mm.h | 16 + + include/linux/msi.h | 28 +- + include/linux/pci-ecam.h | 1 + + .../linux/platform_data/spacemit_k1_sdhci.h | 99 + + include/linux/sizes.h | 9 + + include/linux/string_choices.h | 11 + + include/linux/sync_core.h | 16 +- + include/linux/th1520_proc_debug.h | 13 + + include/linux/th1520_rpmsg.h | 99 + + include/soc/xuantie/th1520_system_monitor.h | 71 + + include/soc/xuantie/th1520_system_status.h | 36 + + include/uapi/drm/drm_fourcc.h | 90 + + include/uapi/drm/vs_drm.h | 50 + + init/Kconfig | 3 + + kernel/irq/irqdomain.c | 28 +- + kernel/irq/matrix.c | 28 +- + kernel/irq/msi.c | 184 +- + kernel/panic.c | 8 + + kernel/sched/core.c | 11 +- + kernel/sched/fair.c | 3 + + kernel/sched/membarrier.c | 13 +- + kernel/time/tick-oneshot.c | 2 +- + lib/find_bit.c | 12 + + mm/memblock.c | 6 +- + mm/pgtable-generic.c | 1 + + net/rfkill/Makefile | 1 + + net/rfkill/rfkill-bt.c | 244 + + net/rfkill/rfkill-wlan.c | 283 + + scripts/package/builddeb | 4 +- + scripts/package/kernel.spec | 10 + + sound/core/pcm_lib.c | 1 + + sound/pci/hda/hda_intel.c | 5 +- + sound/soc/Kconfig | 1 + + sound/soc/Makefile | 1 + + sound/soc/codecs/Kconfig | 19 + + sound/soc/codecs/Makefile | 6 + + sound/soc/codecs/aw87519.c | 787 + + sound/soc/codecs/aw87519.h | 105 + + sound/soc/codecs/es7210.c | 2019 ++ + sound/soc/codecs/es7210.h | 140 + + sound/soc/codecs/es8156.c | 929 + + sound/soc/codecs/es8156.h | 85 + + sound/soc/soc-generic-dmaengine-pcm.c | 4 +- + sound/soc/xuantie/Kconfig | 53 + + sound/soc/xuantie/Makefile | 17 + + sound/soc/xuantie/th1520-audio-cpr.h | 79 + + sound/soc/xuantie/th1520-hdmi-pcm.c | 109 + + sound/soc/xuantie/th1520-i2s-8ch.c | 754 + + sound/soc/xuantie/th1520-i2s-common.c | 53 + + sound/soc/xuantie/th1520-i2s.c | 834 + + sound/soc/xuantie/th1520-i2s.h | 548 + + sound/soc/xuantie/th1520-pcm-dma.c | 48 + + sound/soc/xuantie/th1520-pcm.h | 18 + + sound/soc/xuantie/th1520-spdif.c | 502 + + sound/soc/xuantie/th1520-spdif.h | 233 + + sound/soc/xuantie/th1520-tdm.c | 610 + + sound/soc/xuantie/th1520-tdm.h | 122 + + tools/lib/perf/cpumap.c | 10 +- + tools/perf/pmu-events/arch/riscv/mapfile.csv | 2 + + .../arch/riscv/thead/c900-legacy/cache.json | 67 + + .../riscv/thead/c900-legacy/firmware.json | 68 + + .../riscv/thead/c900-legacy/instruction.json | 72 + + .../riscv/thead/c900-legacy/microarch.json | 80 + + .../arch/riscv/thead/th1520-ddr/metrics.json | 713 + + .../thead/th1520-ddr/uncore-ddr-pmu.json | 1550 ++ + tools/testing/selftests/hid/Makefile | 10 +- + tools/testing/selftests/hid/progs/hid.c | 3 - + .../selftests/hid/progs/hid_bpf_helpers.h | 77 + + .../testing/selftests/riscv/hwprobe/Makefile | 9 +- + tools/testing/selftests/riscv/hwprobe/cbo.c | 228 + + .../testing/selftests/riscv/hwprobe/hwprobe.c | 64 +- + .../testing/selftests/riscv/hwprobe/hwprobe.h | 15 + + .../selftests/riscv/vector/vstate_prctl.c | 10 +- + 1549 files changed, 598823 insertions(+), 2526 deletions(-) + rename Documentation/{ => arch}/riscv/acpi.rst (100%) + rename Documentation/{ => arch}/riscv/boot-image-header.rst (100%) + rename Documentation/{ => arch}/riscv/boot.rst (100%) + rename Documentation/{ => arch}/riscv/features.rst (100%) + create mode 100644 Documentation/arch/riscv/hwprobe.rst + rename Documentation/{ => arch}/riscv/index.rst (100%) + rename Documentation/{ => arch}/riscv/patch-acceptance.rst (100%) + rename Documentation/{ => arch}/riscv/uabi.rst (100%) + rename Documentation/{ => arch}/riscv/vector.rst (100%) + rename Documentation/{ => arch}/riscv/vm-layout.rst (100%) + create mode 100644 Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml + create mode 100644 Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml + create mode 100644 Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml + create mode 100644 Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml + create mode 100644 Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml + create mode 100644 Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml + create mode 100644 Documentation/devicetree/bindings/iommu/riscv,iommu.yaml + create mode 100644 Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt + create mode 100644 Documentation/devicetree/bindings/net/xuantie,dwmac.yaml + create mode 100644 Documentation/devicetree/bindings/nvmem/xuantie,th1520-efuse.txt + create mode 100644 Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml + create mode 100644 Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml + create mode 100644 Documentation/devicetree/bindings/reset/xuantie,th1520-reset.yaml + create mode 100644 Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml + create mode 100644 Documentation/devicetree/bindings/sound/everest,es7210.txt + create mode 100644 Documentation/devicetree/bindings/sound/everest,es8156.yaml + create mode 100644 Documentation/devicetree/bindings/sound/xuantie,th1520-i2s.yaml + create mode 100644 Documentation/devicetree/bindings/sound/xuantie,th1520-spdif.yaml + create mode 100644 Documentation/devicetree/bindings/sound/xuantie,th1520-tdm.yaml + create mode 100644 Documentation/devicetree/bindings/spi/xuantie,th1520-qspi.yaml + create mode 100644 Documentation/devicetree/bindings/spi/xuantie,th1520-spi.yaml + create mode 100644 Documentation/devicetree/bindings/usb/xuantie,th1520-usb.yaml + create mode 100644 Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml + delete mode 100644 Documentation/riscv/hwprobe.rst + create mode 100644 Documentation/scheduler/membarrier.rst + rename Documentation/translations/zh_CN/{ => arch}/riscv/boot-image-header.rst (96%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/index.rst (79%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/patch-acceptance.rst (93%) + rename Documentation/translations/zh_CN/{ => arch}/riscv/vm-layout.rst (98%) + create mode 100644 arch/riscv/Kconfig.vendor + create mode 100644 arch/riscv/Makefile.isa + create mode 100644 arch/riscv/boot/dts/sophgo/Makefile + create mode 100644 arch/riscv/boot/dts/sophgo/mango-2sockets.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-clock-socket0.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-clock-socket1.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-cpus-socket0.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-cpus-socket1.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-milkv-pioneer.dts + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pcie-2rc.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pcie-3rc-capricorn.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pcie-3rc-v2.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pcie-3rc.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pcie-4rc-v2.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pcie-4rc.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-pinctrl.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-sophgo-capricorn.dts + create mode 100644 arch/riscv/boot/dts/sophgo/mango-sophgo-pisces.dts + create mode 100644 arch/riscv/boot/dts/sophgo/mango-sophgo-x4evb.dts + create mode 100644 arch/riscv/boot/dts/sophgo/mango-sophgo-x8evb.dts + create mode 100644 arch/riscv/boot/dts/sophgo/mango-top-intc2.dtsi + create mode 100644 arch/riscv/boot/dts/sophgo/mango-yixin-s2110.dts + create mode 100644 arch/riscv/boot/dts/sophgo/mango.dtsi + create mode 100644 arch/riscv/boot/dts/spacemit/Makefile + create mode 100644 arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts + create mode 100644 arch/riscv/boot/dts/spacemit/k1-x.dtsi + create mode 100644 arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi + create mode 100644 arch/riscv/boot/dts/thead/th1520-lichee-pi-4a-16g.dts + create mode 100644 arch/riscv/boot/dts/thead/th1520-lpi4a-dsi0.dts + create mode 100644 arch/riscv/boot/dts/thead/th1520-lpi4a-hx8279.dts + create mode 100644 arch/riscv/configs/k1_defconfig + create mode 100644 arch/riscv/configs/sg2042_defconfig + create mode 100644 arch/riscv/configs/th1520_defconfig + create mode 100644 arch/riscv/include/asm/arch_hweight.h + create mode 100644 arch/riscv/include/asm/archrandom.h + create mode 100644 arch/riscv/include/asm/cpufeature-macros.h + create mode 100644 arch/riscv/include/asm/dmi.h + delete mode 100644 arch/riscv/include/asm/kvm_aia_aplic.h + delete mode 100644 arch/riscv/include/asm/kvm_aia_imsic.h + create mode 100644 arch/riscv/include/asm/sync_core.h + create mode 100644 arch/riscv/include/asm/vendor_extensions.h + create mode 100644 arch/riscv/include/asm/vendor_extensions/andes.h + create mode 100644 arch/riscv/kernel/acpi_numa.c + create mode 100644 arch/riscv/kernel/sys_hwprobe.c + create mode 100644 arch/riscv/kernel/vendor_extensions.c + create mode 100644 arch/riscv/kernel/vendor_extensions/Makefile + create mode 100644 arch/riscv/kernel/vendor_extensions/andes.c + create mode 100644 arch/riscv/lib/crc32.c + create mode 100644 drivers/acpi/mipi-disco-img.c + create mode 100644 drivers/acpi/riscv/cppc.c + create mode 100644 drivers/acpi/riscv/cpuidle.c + create mode 100644 drivers/acpi/riscv/init.c + create mode 100644 drivers/acpi/riscv/init.h + create mode 100644 drivers/acpi/riscv/irq.c + create mode 100644 drivers/clk/sophgo/Makefile + create mode 100644 drivers/clk/sophgo/clk-dummy.c + create mode 100644 drivers/clk/sophgo/clk-mango.c + create mode 100644 drivers/clk/sophgo/clk.c + create mode 100644 drivers/clk/sophgo/clk.h + create mode 100644 drivers/clk/spacemit/Kconfig + create mode 100644 drivers/clk/spacemit/Makefile + create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1x.c + create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1x.h + create mode 100644 drivers/clk/spacemit/ccu_ddn.c + create mode 100644 drivers/clk/spacemit/ccu_ddn.h + create mode 100644 drivers/clk/spacemit/ccu_ddr.c + create mode 100644 drivers/clk/spacemit/ccu_ddr.h + create mode 100644 drivers/clk/spacemit/ccu_dpll.c + create mode 100644 drivers/clk/spacemit/ccu_dpll.h + create mode 100644 drivers/clk/spacemit/ccu_mix.c + create mode 100644 drivers/clk/spacemit/ccu_mix.h + create mode 100644 drivers/clk/spacemit/ccu_pll.c + create mode 100644 drivers/clk/spacemit/ccu_pll.h + create mode 100644 drivers/clk/xuantie/Kconfig + create mode 100644 drivers/clk/xuantie/Makefile + create mode 100644 drivers/clk/xuantie/clk-th1520-fm.c + create mode 100644 drivers/clk/xuantie/clk.c + create mode 100644 drivers/clk/xuantie/clk.h + create mode 100644 drivers/clk/xuantie/gate/Makefile + create mode 100644 drivers/clk/xuantie/gate/audiosys-gate.c + create mode 100644 drivers/clk/xuantie/gate/clk-gate.h + create mode 100644 drivers/clk/xuantie/gate/dspsys-gate.c + create mode 100644 drivers/clk/xuantie/gate/miscsys-gate.c + create mode 100644 drivers/clk/xuantie/gate/visys-gate.c + create mode 100644 drivers/clk/xuantie/gate/vosys-gate.c + create mode 100644 drivers/clk/xuantie/gate/vpsys-gate.c + create mode 100644 drivers/clk/xuantie/gate/xuantie-gate.c + create mode 100644 drivers/cpufreq/th1520-cpufreq.c + create mode 100644 drivers/dma/spacemit-k1-dma.c + create mode 100644 drivers/firmware/xuantie/Kconfig + create mode 100644 drivers/firmware/xuantie/Makefile + create mode 100644 drivers/firmware/xuantie/th1520_aon.c + create mode 100644 drivers/firmware/xuantie/th1520_aon_pd.c + create mode 100644 drivers/firmware/xuantie/th1520_proc_debug.c + create mode 100644 drivers/gpio/gpio-k1x.c + create mode 100644 drivers/gpu/drm/img-rogue/Kconfig + create mode 100644 drivers/gpu/drm/img-rogue/Makefile + create mode 100644 drivers/gpu/drm/img-rogue/allocmem.c + create mode 100644 drivers/gpu/drm/img-rogue/allocmem.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/apollo.mk + create mode 100644 drivers/gpu/drm/img-rogue/apollo/apollo_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/odin_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/odin_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/orion_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/orion_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_common.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_odin.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_odin.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_plato.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_plato.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pdp_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pfim_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/pfim_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/sysconfig.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/sysinfo.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_apollo.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_apollo.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_clocks.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_drv.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_drv.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_odin.c + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_odin.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tcf_pll.h + create mode 100644 drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/cache_km.c + create mode 100644 drivers/gpu/drm/img-rogue/cache_km.h + create mode 100644 drivers/gpu/drm/img-rogue/cache_ops.h + create mode 100644 drivers/gpu/drm/img-rogue/client_cache_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_mm_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_ri_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_sync_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/client_synctracking_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/common_cache_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_cmm_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_di_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_mm_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_ri_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_srvcore_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_sync_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/common_synctracking_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/config_kernel.h + create mode 100644 drivers/gpu/drm/img-rogue/config_kernel.mk + create mode 100644 drivers/gpu/drm/img-rogue/configs/rgxconfig_km_1.V.4.5.h + create mode 100644 drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.104.182.h + create mode 100644 drivers/gpu/drm/img-rogue/connection_server.c + create mode 100644 drivers/gpu/drm/img-rogue/connection_server.h + create mode 100644 drivers/gpu/drm/img-rogue/cores/rgxcore_km_1.82.4.5.h + create mode 100644 drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.52.104.182.h + create mode 100644 drivers/gpu/drm/img-rogue/debug_common.c + create mode 100644 drivers/gpu/drm/img-rogue/debug_common.h + create mode 100644 drivers/gpu/drm/img-rogue/device.h + create mode 100644 drivers/gpu/drm/img-rogue/device_connection.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem.c + create mode 100644 drivers/gpu/drm/img-rogue/devicemem.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_heapcfg.c + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_heapcfg.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_history_server.c + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_history_server.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_pdump.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_server.c + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_server.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_server_utils.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_typedefs.h + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_utils.c + create mode 100644 drivers/gpu/drm/img-rogue/devicemem_utils.h + create mode 100644 drivers/gpu/drm/img-rogue/di_common.h + create mode 100644 drivers/gpu/drm/img-rogue/di_impl_brg.c + create mode 100644 drivers/gpu/drm/img-rogue/di_impl_brg.h + create mode 100644 drivers/gpu/drm/img-rogue/di_impl_brg_intern.h + create mode 100644 drivers/gpu/drm/img-rogue/di_server.c + create mode 100644 drivers/gpu/drm/img-rogue/di_server.h + create mode 100644 drivers/gpu/drm/img-rogue/dllist.h + create mode 100644 drivers/gpu/drm/img-rogue/dma_km.h + create mode 100644 drivers/gpu/drm/img-rogue/dma_support.c + create mode 100644 drivers/gpu/drm/img-rogue/dma_support.h + create mode 100644 drivers/gpu/drm/img-rogue/drm_netlink_gem.c + create mode 100644 drivers/gpu/drm/img-rogue/drm_netlink_gem.h + create mode 100644 drivers/gpu/drm/img-rogue/drm_nulldisp_drv.c + create mode 100644 drivers/gpu/drm/img-rogue/drm_nulldisp_drv.h + create mode 100644 drivers/gpu/drm/img-rogue/drm_nulldisp_gem.c + create mode 100644 drivers/gpu/drm/img-rogue/drm_nulldisp_gem.h + create mode 100644 drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.c + create mode 100644 drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.h + create mode 100644 drivers/gpu/drm/img-rogue/env_connection.h + create mode 100644 drivers/gpu/drm/img-rogue/event.c + create mode 100644 drivers/gpu/drm/img-rogue/event.h + create mode 100644 drivers/gpu/drm/img-rogue/fwload.c + create mode 100644 drivers/gpu/drm/img-rogue/fwload.h + create mode 100644 drivers/gpu/drm/img-rogue/fwtrace_string.h + create mode 100644 drivers/gpu/drm/img-rogue/gpu_trace_point.h + create mode 100644 drivers/gpu/drm/img-rogue/handle.c + create mode 100644 drivers/gpu/drm/img-rogue/handle.h + create mode 100644 drivers/gpu/drm/img-rogue/handle_idr.c + create mode 100644 drivers/gpu/drm/img-rogue/handle_impl.h + create mode 100644 drivers/gpu/drm/img-rogue/handle_types.h + create mode 100644 drivers/gpu/drm/img-rogue/hash.c + create mode 100644 drivers/gpu/drm/img-rogue/hash.h + create mode 100644 drivers/gpu/drm/img-rogue/htb_debug.c + create mode 100644 drivers/gpu/drm/img-rogue/htb_debug.h + create mode 100644 drivers/gpu/drm/img-rogue/htbserver.c + create mode 100644 drivers/gpu/drm/img-rogue/htbserver.h + create mode 100644 drivers/gpu/drm/img-rogue/htbuffer.c + create mode 100644 drivers/gpu/drm/img-rogue/htbuffer.h + create mode 100644 drivers/gpu/drm/img-rogue/htbuffer_init.h + create mode 100644 drivers/gpu/drm/img-rogue/htbuffer_sf.h + create mode 100644 drivers/gpu/drm/img-rogue/htbuffer_types.h + create mode 100644 drivers/gpu/drm/img-rogue/img_3dtypes.h + create mode 100644 drivers/gpu/drm/img-rogue/img_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/img_elf.h + create mode 100644 drivers/gpu/drm/img-rogue/img_types.h + create mode 100644 drivers/gpu/drm/img-rogue/img_types_check.h + create mode 100644 drivers/gpu/drm/img-rogue/include/cache_ops.h + create mode 100644 drivers/gpu/drm/img-rogue/include/devicemem_typedefs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/dllist.h + create mode 100644 drivers/gpu/drm/img-rogue/include/drm/netlink.h + create mode 100644 drivers/gpu/drm/img-rogue/include/drm/nulldisp_drm.h + create mode 100644 drivers/gpu/drm/img-rogue/include/drm/pdp_drm.h + create mode 100644 drivers/gpu/drm/img-rogue/include/drm/pvr_drm.h + create mode 100644 drivers/gpu/drm/img-rogue/include/img_3dtypes.h + create mode 100644 drivers/gpu/drm/img-rogue/include/img_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/img_drm_fourcc_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/include/img_elf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/img_types.h + create mode 100644 drivers/gpu/drm/img-rogue/include/kernel_types.h + create mode 100644 drivers/gpu/drm/img-rogue/include/linux_sw_sync.h + create mode 100644 drivers/gpu/drm/img-rogue/include/lock_types.h + create mode 100644 drivers/gpu/drm/img-rogue/include/log2.h + create mode 100644 drivers/gpu/drm/img-rogue/include/multicore_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/osfunc_common.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pdumpdefs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pdumpdesc.h + create mode 100644 drivers/gpu/drm/img-rogue/include/public/powervr/buffer_attribs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/public/powervr/img_drm_fourcc.h + create mode 100644 drivers/gpu/drm/img-rogue/include/public/powervr/mem_types.h + create mode 100644 drivers/gpu/drm/img-rogue/include/public/powervr/pvrsrv_sync_ext.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvr_buffer_sync_shared.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvr_debug.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvr_fd_sync_kernel.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvr_intrinsics.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrmodule.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_device_types.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_devvar.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_error.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_errors.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_memalloc_physheap.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_sync_km.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_tlcommon.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrsrv_tlstreams.h + create mode 100644 drivers/gpu/drm/img-rogue/include/pvrversion.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_common.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_common_asserts.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_compat_bvnc.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_fwif_resetframework.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_fwif_sf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_heap_firmware.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_hwperf_common.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_meta.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_mips.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgx_riscv.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rgxfw_log_helper.h + create mode 100644 drivers/gpu/drm/img-rogue/include/ri_typedefs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_alignchecks.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_hwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_km.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_shared.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_heaps.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_hwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgx_options.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig_65273.h + create mode 100644 drivers/gpu/drm/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h + create mode 100644 drivers/gpu/drm/img-rogue/include/services_km.h + create mode 100644 drivers/gpu/drm/img-rogue/include/servicesext.h + create mode 100644 drivers/gpu/drm/img-rogue/include/sync_checkpoint_external.h + create mode 100644 drivers/gpu/drm/img-rogue/include/sync_prim_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/apollo_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/bonnie_tcf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_pdp_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/pdp_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_pll.h + create mode 100644 drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/virt_validation_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_alignchecks.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_hwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_km.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_shared.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_heaps.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf_table.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgx_options.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/rgxheapconfig.h + create mode 100644 drivers/gpu/drm/img-rogue/include/volcanic/system/rgx_tc/tc_clocks.h + create mode 100644 drivers/gpu/drm/img-rogue/info_page.h + create mode 100644 drivers/gpu/drm/img-rogue/info_page_client.h + create mode 100644 drivers/gpu/drm/img-rogue/info_page_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/info_page_km.c + create mode 100644 drivers/gpu/drm/img-rogue/interrupt_support.c + create mode 100644 drivers/gpu/drm/img-rogue/interrupt_support.h + create mode 100644 drivers/gpu/drm/img-rogue/kernel_compatibility.h + create mode 100644 drivers/gpu/drm/img-rogue/kernel_config_compatibility.h + create mode 100644 drivers/gpu/drm/img-rogue/kernel_nospec.h + create mode 100644 drivers/gpu/drm/img-rogue/kernel_types.h + create mode 100644 drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h + create mode 100644 drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h + create mode 100644 drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h + create mode 100644 drivers/gpu/drm/img-rogue/km/rgxdefs_km.h + create mode 100644 drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h + create mode 100644 drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h + create mode 100644 drivers/gpu/drm/img-rogue/km_apphint.c + create mode 100644 drivers/gpu/drm/img-rogue/km_apphint.h + create mode 100644 drivers/gpu/drm/img-rogue/km_apphint_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/km_apphint_defs_common.h + create mode 100644 drivers/gpu/drm/img-rogue/linkage.h + create mode 100644 drivers/gpu/drm/img-rogue/linux_sw_sync.h + create mode 100644 drivers/gpu/drm/img-rogue/lists.c + create mode 100644 drivers/gpu/drm/img-rogue/lists.h + create mode 100644 drivers/gpu/drm/img-rogue/lock.h + create mode 100644 drivers/gpu/drm/img-rogue/lock_types.h + create mode 100644 drivers/gpu/drm/img-rogue/log2.h + create mode 100644 drivers/gpu/drm/img-rogue/mem_utils.c + create mode 100644 drivers/gpu/drm/img-rogue/mmu_common.c + create mode 100644 drivers/gpu/drm/img-rogue/mmu_common.h + create mode 100644 drivers/gpu/drm/img-rogue/module_common.c + create mode 100644 drivers/gpu/drm/img-rogue/module_common.h + create mode 100644 drivers/gpu/drm/img-rogue/multicore_defs.h + create mode 100644 drivers/gpu/drm/img-rogue/opaque_types.h + create mode 100644 drivers/gpu/drm/img-rogue/os_cpu_cache.h + create mode 100644 drivers/gpu/drm/img-rogue/os_srvinit_param.h + create mode 100644 drivers/gpu/drm/img-rogue/osconnection_server.c + create mode 100644 drivers/gpu/drm/img-rogue/osconnection_server.h + create mode 100644 drivers/gpu/drm/img-rogue/osdi_impl.h + create mode 100644 drivers/gpu/drm/img-rogue/osfunc.c + create mode 100644 drivers/gpu/drm/img-rogue/osfunc.h + create mode 100644 drivers/gpu/drm/img-rogue/osfunc_arm.c + create mode 100644 drivers/gpu/drm/img-rogue/osfunc_arm64.c + create mode 100644 drivers/gpu/drm/img-rogue/osfunc_common.h + create mode 100644 drivers/gpu/drm/img-rogue/osfunc_riscv.c + create mode 100644 drivers/gpu/drm/img-rogue/osfunc_x86.c + create mode 100644 drivers/gpu/drm/img-rogue/oskm_apphint.h + create mode 100644 drivers/gpu/drm/img-rogue/osmmap.h + create mode 100644 drivers/gpu/drm/img-rogue/osmmap_stub.c + create mode 100644 drivers/gpu/drm/img-rogue/ospvr_gputrace.h + create mode 100644 drivers/gpu/drm/img-rogue/pci_support.c + create mode 100644 drivers/gpu/drm/img-rogue/pci_support.h + create mode 100644 drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk + create mode 100644 drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/pdp2_regs.h + create mode 100644 drivers/gpu/drm/img-rogue/pdp_drm.h + create mode 100644 drivers/gpu/drm/img-rogue/pdump.h + create mode 100644 drivers/gpu/drm/img-rogue/pdump_km.h + create mode 100644 drivers/gpu/drm/img-rogue/pdump_mmu.h + create mode 100644 drivers/gpu/drm/img-rogue/pdump_physmem.h + create mode 100644 drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h + create mode 100644 drivers/gpu/drm/img-rogue/pdumpdefs.h + create mode 100644 drivers/gpu/drm/img-rogue/pdumpdesc.h + create mode 100644 drivers/gpu/drm/img-rogue/physheap.c + create mode 100644 drivers/gpu/drm/img-rogue/physheap.h + create mode 100644 drivers/gpu/drm/img-rogue/physheap_config.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem.c + create mode 100644 drivers/gpu/drm/img-rogue/physmem.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem_dmabuf.c + create mode 100644 drivers/gpu/drm/img-rogue/physmem_dmabuf.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem_hostmem.c + create mode 100644 drivers/gpu/drm/img-rogue/physmem_hostmem.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem_lma.c + create mode 100644 drivers/gpu/drm/img-rogue/physmem_lma.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem_osmem.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem_osmem_linux.c + create mode 100644 drivers/gpu/drm/img-rogue/physmem_osmem_linux.h + create mode 100644 drivers/gpu/drm/img-rogue/physmem_test.c + create mode 100644 drivers/gpu/drm/img-rogue/physmem_test.h + create mode 100644 drivers/gpu/drm/img-rogue/plato_drv.h + create mode 100644 drivers/gpu/drm/img-rogue/pmr.c + create mode 100644 drivers/gpu/drm/img-rogue/pmr.h + create mode 100644 drivers/gpu/drm/img-rogue/pmr_impl.h + create mode 100644 drivers/gpu/drm/img-rogue/pmr_os.c + create mode 100644 drivers/gpu/drm/img-rogue/pmr_os.h + create mode 100644 drivers/gpu/drm/img-rogue/power.c + create mode 100644 drivers/gpu/drm/img-rogue/power.h + create mode 100644 drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h + create mode 100644 drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h + create mode 100644 drivers/gpu/drm/img-rogue/powervr/mem_types.h + create mode 100644 drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h + create mode 100644 drivers/gpu/drm/img-rogue/private_data.h + create mode 100644 drivers/gpu/drm/img-rogue/proc_stats.h + create mode 100644 drivers/gpu/drm/img-rogue/process_stats.c + create mode 100644 drivers/gpu/drm/img-rogue/process_stats.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_bridge_k.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_bridge_k.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_buffer_sync.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_buffer_sync.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_counting_timeline.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_counting_timeline.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_debug.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_debug.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_debugfs.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_debugfs.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_dicommon.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_dma_resv.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_drm.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_drm.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_drv.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_fence.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_fence.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_fence_trace.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_gputrace.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_intrinsics.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_ion_stats.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_linux_fence.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_notifier.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_notifier.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_platform_drv.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_procfs.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_ricommon.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sw_fence.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sw_fence.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync_api.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync_file.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c + create mode 100644 drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_uaccess.h + create mode 100644 drivers/gpu/drm/img-rogue/pvr_vmap.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrmodule.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv.c + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_apphint.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_device.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_device_types.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_devvar.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_error.c + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_error.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_errors.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_pool.c + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_pool.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h + create mode 100644 drivers/gpu/drm/img-rogue/pvrsrvkm.mk + create mode 100644 drivers/gpu/drm/img-rogue/pvrversion.h + create mode 100644 drivers/gpu/drm/img-rogue/ra.c + create mode 100644 drivers/gpu/drm/img-rogue/ra.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_bridge.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_bridge_init.c + create mode 100644 drivers/gpu/drm/img-rogue/rgx_bridge_init.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_common.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_common_asserts.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fw_info.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fwif_km.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fwif_sf.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_fwif_shared.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_heap_firmware.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_heaps.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_hwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_hwperf_common.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_hwperf_table.c + create mode 100644 drivers/gpu/drm/img-rogue/rgx_hwperf_table.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_memallocflags.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_meta.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_mips.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_options.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_pdump_panics.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_riscv.h + create mode 100644 drivers/gpu/drm/img-rogue/rgx_tq_shared.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxapi_km.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxbreakpoint.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxbreakpoint.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxbvnc.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxbvnc.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxccb.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxccb.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxcompute.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxcompute.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxdebug.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxdebug.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxdevice.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxfw_log_helper.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwdbg.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwdbg.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwimageutils.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwimageutils.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwutils.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxfwutils.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxheapconfig.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxhwperf.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxhwperf.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxhwperf_common.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxhwperf_common.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxinit.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxinit.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxkicksync.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxkicksync.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxlayer.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxlayer_impl.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxlayer_impl.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxmem.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxmem.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxmmuinit.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxmmuinit.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxmulticore.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxmulticore.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxpower.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxpower.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxregconfig.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxregconfig.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxshader.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxshader.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxsrvinit.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxstartstop.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxstartstop.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxsyncutils.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxsyncutils.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxta3d.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxta3d.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxtdmtransfer.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxtdmtransfer.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxtimecorr.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxtimecorr.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxtimerquery.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxtimerquery.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxtransfer.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxtransfer.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxtransfer_shader.h + create mode 100644 drivers/gpu/drm/img-rogue/rgxutils.c + create mode 100644 drivers/gpu/drm/img-rogue/rgxutils.h + create mode 100644 drivers/gpu/drm/img-rogue/ri_server.c + create mode 100644 drivers/gpu/drm/img-rogue/ri_server.h + create mode 100644 drivers/gpu/drm/img-rogue/ri_typedefs.h + create mode 100644 drivers/gpu/drm/img-rogue/rogue_trace_events.h + create mode 100644 drivers/gpu/drm/img-rogue/server_cache_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_cmm_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_di_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_mm_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_ri_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_srvcore_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_sync_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/server_synctracking_bridge.c + create mode 100644 drivers/gpu/drm/img-rogue/services_kernel_client.h + create mode 100644 drivers/gpu/drm/img-rogue/services_km.h + create mode 100644 drivers/gpu/drm/img-rogue/servicesext.h + create mode 100644 drivers/gpu/drm/img-rogue/sofunc_pvr.h + create mode 100644 drivers/gpu/drm/img-rogue/srvcore.c + create mode 100644 drivers/gpu/drm/img-rogue/srvcore.h + create mode 100644 drivers/gpu/drm/img-rogue/srvinit.h + create mode 100644 drivers/gpu/drm/img-rogue/srvkm.h + create mode 100644 drivers/gpu/drm/img-rogue/sync.c + create mode 100644 drivers/gpu/drm/img-rogue/sync.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_checkpoint.c + create mode 100644 drivers/gpu/drm/img-rogue/sync_checkpoint.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_checkpoint_external.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_checkpoint_init.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_fallback_server.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_prim_internal.h + create mode 100644 drivers/gpu/drm/img-rogue/sync_server.c + create mode 100644 drivers/gpu/drm/img-rogue/sync_server.h + create mode 100644 drivers/gpu/drm/img-rogue/syscommon.h + create mode 100644 drivers/gpu/drm/img-rogue/sysconfig.c + create mode 100644 drivers/gpu/drm/img-rogue/sysconfig.h + create mode 100644 drivers/gpu/drm/img-rogue/sysconfig_cmn.c + create mode 100644 drivers/gpu/drm/img-rogue/sysinfo.h + create mode 100644 drivers/gpu/drm/img-rogue/sysvalidation.h + create mode 100644 drivers/gpu/drm/img-rogue/tlclient.c + create mode 100644 drivers/gpu/drm/img-rogue/tlclient.h + create mode 100644 drivers/gpu/drm/img-rogue/tlintern.c + create mode 100644 drivers/gpu/drm/img-rogue/tlintern.h + create mode 100644 drivers/gpu/drm/img-rogue/tlserver.c + create mode 100644 drivers/gpu/drm/img-rogue/tlserver.h + create mode 100644 drivers/gpu/drm/img-rogue/tlstream.c + create mode 100644 drivers/gpu/drm/img-rogue/tlstream.h + create mode 100644 drivers/gpu/drm/img-rogue/trace_events.c + create mode 100644 drivers/gpu/drm/img-rogue/trace_events.h + create mode 100644 drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c + create mode 100644 drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h + create mode 100644 drivers/gpu/drm/img-rogue/vmm_impl.h + create mode 100644 drivers/gpu/drm/img-rogue/vmm_pvz_client.c + create mode 100644 drivers/gpu/drm/img-rogue/vmm_pvz_client.h + create mode 100644 drivers/gpu/drm/img-rogue/vmm_pvz_common.h + create mode 100644 drivers/gpu/drm/img-rogue/vmm_pvz_server.c + create mode 100644 drivers/gpu/drm/img-rogue/vmm_pvz_server.h + create mode 100644 drivers/gpu/drm/img-rogue/vmm_type_stub.c + create mode 100644 drivers/gpu/drm/img-rogue/vz_vm.h + create mode 100644 drivers/gpu/drm/img-rogue/vz_vmm_pvz.c + create mode 100644 drivers/gpu/drm/img-rogue/vz_vmm_pvz.h + create mode 100644 drivers/gpu/drm/img-rogue/vz_vmm_vm.c + create mode 100644 drivers/gpu/drm/img-rogue/xuantie_sys.c + create mode 100644 drivers/gpu/drm/img-rogue/xuantie_sys.h + create mode 100644 drivers/gpu/drm/panel/panel-himax-hx8279.c + create mode 100644 drivers/gpu/drm/panel/panel-jadard-jd9365da.c + create mode 100644 drivers/gpu/drm/verisilicon/Kconfig + create mode 100644 drivers/gpu/drm/verisilicon/Makefile + create mode 100644 drivers/gpu/drm/verisilicon/dw_hdmi-th1520.c + create mode 100644 drivers/gpu/drm/verisilicon/dw_hdmi_th1520.h + create mode 100644 drivers/gpu/drm/verisilicon/dw_hdmi_tx_phy_gen2.h + create mode 100644 drivers/gpu/drm/verisilicon/dw_mipi_dsi.c + create mode 100644 drivers/gpu/drm/verisilicon/dw_mipi_dsi.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_crtc.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_crtc.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_dec.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_dec.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_hw.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_hw.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_mmu.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_dc_mmu.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_drv.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_drv.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_fb.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_fb.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_gem.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_gem.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_plane.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_plane.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_simple_enc.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_simple_enc.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_type.h + create mode 100644 drivers/gpu/drm/verisilicon/vs_virtual.c + create mode 100644 drivers/gpu/drm/verisilicon/vs_virtual.h + create mode 100644 drivers/hwspinlock/th1520_hwspinlock.c + create mode 100644 drivers/i2c/busses/i2c-designware-master_dma.c + create mode 100644 drivers/i2c/busses/i2c-designware-master_dma.h + create mode 100644 drivers/i2c/busses/i2c-spacemit-k1.c + create mode 100644 drivers/i2c/busses/i2c-spacemit-k1.h + create mode 100644 drivers/iio/adc/spacemit-p1-adc.c + create mode 100644 drivers/iio/adc/th1520-adc.c + create mode 100644 drivers/iio/adc/th1520-adc.h + create mode 100644 drivers/input/misc/spacemit-p1-pwrkey.c + create mode 100644 drivers/iommu/iommu-pages.h + create mode 100644 drivers/iommu/riscv/Kconfig + create mode 100644 drivers/iommu/riscv/Makefile + create mode 100644 drivers/iommu/riscv/iommu-bits.h + create mode 100644 drivers/iommu/riscv/iommu-pci.c + create mode 100644 drivers/iommu/riscv/iommu-platform.c + create mode 100644 drivers/iommu/riscv/iommu.c + create mode 100644 drivers/iommu/riscv/iommu.h + create mode 100644 drivers/irqchip/irq-riscv-aplic-direct.c + create mode 100644 drivers/irqchip/irq-riscv-aplic-main.c + create mode 100644 drivers/irqchip/irq-riscv-aplic-main.h + create mode 100644 drivers/irqchip/irq-riscv-aplic-msi.c + create mode 100644 drivers/irqchip/irq-riscv-imsic-early.c + create mode 100644 drivers/irqchip/irq-riscv-imsic-platform.c + create mode 100644 drivers/irqchip/irq-riscv-imsic-state.c + create mode 100644 drivers/irqchip/irq-riscv-imsic-state.h + create mode 100644 drivers/irqchip/irq-sg2044-msi.c + create mode 100644 drivers/irqchip/irq-thead-c900-aclint-sswi.c + create mode 100644 drivers/mailbox/th1520-mailbox.c + create mode 100644 drivers/mfd/spacemit-p1.c + create mode 100644 drivers/mmc/host/sdhci-of-k1.c + create mode 100644 drivers/mmc/host/sdhci-sophgo.c + create mode 100644 drivers/mmc/host/sdhci-sophgo.h + create mode 100644 drivers/mtd/spi-nor/controllers/sophgo-spifmc.c + create mode 100644 drivers/net/ethernet/spacemit/Kconfig + create mode 100644 drivers/net/ethernet/spacemit/Makefile + create mode 100644 drivers/net/ethernet/spacemit/k1-emac.c + create mode 100644 drivers/net/ethernet/spacemit/k1-emac.h + create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c + create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-xuantie.c + create mode 100644 drivers/net/wireless/aic8800/Kconfig + create mode 100644 drivers/net/wireless/aic8800/Makefile + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/.gitignore + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/Makefile + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic8800d80_compat.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic8800d80_compat.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic8800dc_compat.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic8800dc_compat.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic_bsp_driver.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic_bsp_driver.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic_bsp_export.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aic_bsp_main.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicsdio.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicsdio.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicsdio_txrxif.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicsdio_txrxif.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicwf_firmware_array.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicwf_firmware_array.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicwf_txq_prealloc.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/aicwf_txq_prealloc.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/md5.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/md5.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_bsp/rwnx_version_gen.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/.gitignore + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/Kconfig + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/Makefile + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/aic8800_btlpm.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/aic_bluetooth_main.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/aic_bsp_export.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/lpm.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/lpm.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/rfkill.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_btlpm/rfkill.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/.gitignore + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/Kconfig + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/Makefile + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_br_ext.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_br_ext.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_bsp_export.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_btsdio.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_btsdio.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_vendor.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_vendor.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800d80.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800d80.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800dc.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800dc.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_debug.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_rx_prealloc.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_rx_prealloc.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_sdio.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_sdio.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_tcp_ack.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_tcp_ack.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_txrxif.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_txrxif.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_usb.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_usb.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/hal_desc.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_compat.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_host.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_host.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_shared.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/lmac_mac.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/lmac_msg.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/lmac_types.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/md5.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/md5.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/reg_access.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/regdb.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_bfmer.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_bfmer.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cfgfile.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cfgfile.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cmds.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cmds.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_compat.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_debugfs.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_debugfs.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_defs.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_dini.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_dini.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_events.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_trace.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_trace.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_gki.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_gki.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_irqs.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_irqs.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_main.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_main.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mesh.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mesh.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mod_params.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mod_params.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_rx.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_rx.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_tx.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_tx.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mu_group.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mu_group.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_pci.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_pci.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_platform.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_platform.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_prof.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_radar.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_radar.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_rx.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_rx.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_strs.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_strs.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tdls.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tdls.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_testmode.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_testmode.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tx.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tx.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_utils.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_utils.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_v7.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_v7.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_version.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_version_gen.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_wakelock.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_wakelock.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/sdio_host.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/sdio_host.h + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/usb_host.c + create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/usb_host.h + create mode 100644 drivers/nvmem/th1520-efuse.c + create mode 100644 drivers/pci/controller/cadence/pcie-cadence-sophgo.c + create mode 100644 drivers/pci/controller/cadence/pcie-cadence-sophgo.h + create mode 100644 drivers/pci/controller/dwc/pcie-dw-sophgo.c + create mode 100644 drivers/pci/controller/dwc/pcie-dw-sophgo.h + create mode 100644 drivers/phy/synopsys/Kconfig + create mode 100644 drivers/phy/synopsys/Makefile + create mode 100644 drivers/phy/synopsys/phy-dw-mipi-dphy.c + create mode 100644 drivers/pinctrl/pinctrl-spacemit-k1x.c + create mode 100644 drivers/pinctrl/pinctrl-spacemit-p1.c + create mode 100644 drivers/pinctrl/pinctrl-th1520.c + create mode 100644 drivers/pinctrl/sophgo/Makefile + create mode 100644 drivers/pinctrl/sophgo/pinctrl-mango.c + create mode 100644 drivers/pinctrl/sophgo/pinctrl-sophgo.c + create mode 100644 drivers/pinctrl/sophgo/pinctrl-sophgo.h + create mode 100644 drivers/pwm/pwm-sophgo.c + create mode 100644 drivers/pwm/pwm-xuantie.c + create mode 100644 drivers/regulator/spacemit-p1-regulator.c + create mode 100644 drivers/regulator/th1520-aon-regulator.c + create mode 100644 drivers/reset/reset-sophgo.c + create mode 100644 drivers/reset/reset-spacemit-k1x.c + create mode 100644 drivers/reset/reset-th1520.c + create mode 100644 drivers/rpmsg/th1520_rpmsg.c + create mode 100644 drivers/rtc/rtc-astbmc.c + create mode 100644 drivers/rtc/rtc-spacemit-p1.c + create mode 100644 drivers/soc/sophgo/Makefile + create mode 100644 drivers/soc/sophgo/tach/sophgo-tach.c + create mode 100644 drivers/soc/sophgo/top/top_intc.c + create mode 100644 drivers/soc/sophgo/umcu/mcu.c + create mode 100644 drivers/soc/spacemit/Kconfig + create mode 100644 drivers/soc/spacemit/Makefile + create mode 100644 drivers/soc/spacemit/spacemit-mem-range.c + create mode 100644 drivers/soc/xuantie/Kconfig + create mode 100644 drivers/soc/xuantie/Makefile + create mode 100644 drivers/soc/xuantie/nna/GPLHEADER + create mode 100644 drivers/soc/xuantie/nna/Kconfig + create mode 100644 drivers/soc/xuantie/nna/Makefile + create mode 100644 drivers/soc/xuantie/nna/README + create mode 100644 drivers/soc/xuantie/nna/build.mk + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/FindDmaBufExporter.cmake + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/Makefile + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/README + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_common.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap.h + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap_carveout.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap_coherent.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap_ion.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap_ion.h + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap_ion_example.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/de_heap_noncoherent.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/test/dma-map.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/test/dma-test.c + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/uapi/dmabuf_exporter.h + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/uapi/kernel_4x14/ion.h + create mode 100644 drivers/soc/xuantie/nna/dmabuf_exporter/uapi/kernel_4x4/ion.h + create mode 100644 drivers/soc/xuantie/nna/fenrir_loki/Makefile + create mode 100644 drivers/soc/xuantie/nna/fenrir_loki/loki-intc.c + create mode 100644 drivers/soc/xuantie/nna/fenrir_loki/loki-main.c + create mode 100644 drivers/soc/xuantie/nna/fenrir_loki/loki.h + create mode 100644 drivers/soc/xuantie/nna/img_mem/Makefile + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_anonymous.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_carveout.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_coherent.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_dmabuf.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_ion.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_man.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_man_priv.h + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_ocm.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_mem_unified.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/img_pdump.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/imgmmu/imgmmu.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/imgmmu/kernel_heap.c + create mode 100644 drivers/soc/xuantie/nna/img_mem/imgmmu/mmu_defs.h + create mode 100644 drivers/soc/xuantie/nna/img_mem/imgmmu/mmulib/heap.h + create mode 100644 drivers/soc/xuantie/nna/img_mem/imgmmu/mmulib/mmu.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/aura_system.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/gyrus_system.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/magna_system.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/mirage_system.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/nn_sys_cr_gyrus.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/nn_sys_cr_vagus.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/vagus_system.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/vha_cr_aura.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/vha_cr_gyrus.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/vha_cr_magna.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/vha_cr_mirage.h + create mode 100644 drivers/soc/xuantie/nna/include/hwdefs/vha_tb.h + create mode 100644 drivers/soc/xuantie/nna/include/img_mem_man.h + create mode 100644 drivers/soc/xuantie/nna/include/nexef_plat.h + create mode 100644 drivers/soc/xuantie/nna/include/uapi/img_mem_man.h + create mode 100644 drivers/soc/xuantie/nna/include/uapi/version.h + create mode 100644 drivers/soc/xuantie/nna/include/uapi/vha.h + create mode 100644 drivers/soc/xuantie/nna/include/uapi/vha_errors.h + create mode 100644 drivers/soc/xuantie/nna/include/vha_drv_common.h + create mode 100644 drivers/soc/xuantie/nna/include/vha_trace_point.h + create mode 100644 drivers/soc/xuantie/nna/nexef_platform/Makefile + create mode 100644 drivers/soc/xuantie/nna/nexef_platform/README.md + create mode 100644 drivers/soc/xuantie/nna/nexef_platform/nexef_plat.c + create mode 100644 drivers/soc/xuantie/nna/nexef_platform/set_fpga_freq.py + create mode 100644 drivers/soc/xuantie/nna/vha/Makefile + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_dev.c + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_mmu.c + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_mt19937.c + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_mt19937.h + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_regs.h + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_sc_dbg.c + create mode 100644 drivers/soc/xuantie/nna/vha/multi/vha_wm.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat.h + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_apollo.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_dt.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_dt.h + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_dt_example.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_dt_example.dts + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_dt_fenrir.dts + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_dummy.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_emu.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_frost.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_nexef.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_odin.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_orion.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_param_xuantie_th1520_fpga_c910.h + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_pci.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_xuantie.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_xuantie_th1520.c + create mode 100644 drivers/soc/xuantie/nna/vha/platform/vha_plat_xuantie_th1520_fpga_c910.c + create mode 100644 drivers/soc/xuantie/nna/vha/single/vha_cnn.c + create mode 100644 drivers/soc/xuantie/nna/vha/single/vha_dev.c + create mode 100644 drivers/soc/xuantie/nna/vha/single/vha_dev_ax2.c + create mode 100644 drivers/soc/xuantie/nna/vha/single/vha_dev_ax3.c + create mode 100644 drivers/soc/xuantie/nna/vha/single/vha_mmu.c + create mode 100644 drivers/soc/xuantie/nna/vha/single/vha_regs.h + create mode 100644 drivers/soc/xuantie/nna/vha/vha_api.c + create mode 100644 drivers/soc/xuantie/nna/vha/vha_common.c + create mode 100644 drivers/soc/xuantie/nna/vha/vha_common.h + create mode 100644 drivers/soc/xuantie/nna/vha/vha_dbg.c + create mode 100644 drivers/soc/xuantie/nna/vha/vha_devfreq.c + create mode 100644 drivers/soc/xuantie/nna/vha/vha_info.c + create mode 100644 drivers/soc/xuantie/nna/vha/vha_io.h + create mode 100644 drivers/soc/xuantie/nna/vha/vha_monitor.c + create mode 100644 drivers/soc/xuantie/nna/vha/vha_pdump.c + create mode 100644 drivers/soc/xuantie/th1520-iopmp.c + create mode 100644 drivers/soc/xuantie/th1520_event.c + create mode 100644 drivers/soc/xuantie/th1520_regdump.c + create mode 100644 drivers/soc/xuantie/th1520_system_monitor.c + create mode 100644 drivers/soc/xuantie/video_memory/Kconfig + create mode 100644 drivers/soc/xuantie/video_memory/Makefile + create mode 100644 drivers/soc/xuantie/video_memory/driver/Makefile + create mode 100644 drivers/soc/xuantie/video_memory/driver/rsvmem_pool.c + create mode 100644 drivers/soc/xuantie/video_memory/driver/rsvmem_pool.h + create mode 100644 drivers/soc/xuantie/video_memory/driver/video_memory.c + create mode 100644 drivers/soc/xuantie/video_memory/driver/video_memory.h + create mode 100644 drivers/soc/xuantie/video_memory/lib/Makefile + create mode 100644 drivers/soc/xuantie/video_memory/lib/video_mem.c + create mode 100644 drivers/soc/xuantie/video_memory/lib/video_mem.h + create mode 100644 drivers/soc/xuantie/video_memory/test/Makefile + create mode 100644 drivers/soc/xuantie/video_memory/test/video_memory_test.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/COPYING + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/Kconfig + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/Makefile + create mode 100755 drivers/soc/xuantie/vpu-vc8000d-kernel/addons/ko/insmod.sh + create mode 100755 drivers/soc/xuantie/vpu-vc8000d-kernel/addons/ko/rmmod.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/dwl/dwl_defs.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/Makefile + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/README + create mode 100755 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/build_for_pcie.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/memalloc.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/memalloc.h + create mode 100755 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/memalloc_load.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/memalloc/testbench_memalloc.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/Makefile + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/README + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/bidirect_list.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/bidirect_list.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/build_for_mpcore.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/build_for_socle.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/build_for_vexpress.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/dec_devfreq.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/driver_load.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/driver_load_sc.sh + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/dev_common_freertos.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/hantro_dec_freertos.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/hantro_vcmd_freertos.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/io_tools.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/io_tools.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/memalloc_freertos.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/memalloc_freertos.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/subsys_freertos.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/user_freertos.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/freertos/user_freertos.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantro_axife.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantro_dec.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantro_mmu.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantro_vcmd.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantroaxife.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantrodec.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantrommu.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/hantrovcmd.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/kernel_allocator.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/kernel_allocator.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/subsys.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/subsys.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/vcmdregisterenum.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/vcmdregistertable.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/vcmdswhwregisters.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/vcmdswhwregisters.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000d-kernel/linux/subsys_driver/vdec_trace_point.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/COPYING + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/Kconfig + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/Makefile + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/insmod.sh + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/addons/ko/rmmod.sh + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/Makefile + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/README + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/bidirect_list.h + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/driver_load.sh + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantro_mmu.c + create mode 100755 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/hantrommu.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_axife.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_devfreq.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_driver.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_normal_driver.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vc8000_vcmd_driver.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregisterenum.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdregistertable.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.c + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/vcmdswhwregisters.h + create mode 100644 drivers/soc/xuantie/vpu-vc8000e-kernel/linux/kernel_module/venc_trace_point.h + create mode 100644 drivers/spi/spi-dw-mmio-quad.c + create mode 100644 drivers/spi/spi-dw-quad.c + create mode 100644 drivers/spi/spi-dw-quad.h + create mode 100644 drivers/spi/spi-spacemit-k1-qspi.c + create mode 100644 drivers/spi/spi-spacemit-k1.c + create mode 100644 drivers/spi/spi-spacemit-k1.h + create mode 100644 drivers/tty/serial/spacemit_k1x_uart.c + create mode 100644 drivers/usb/dwc3/dwc3-xuantie.c + create mode 100644 drivers/watchdog/th1520_wdt.c + create mode 100644 include/dt-bindings/clock/sophgo-mango-clock.h + create mode 100644 include/dt-bindings/clock/sophgo.h + create mode 100644 include/dt-bindings/clock/spacemit-k1x-clock.h + create mode 100644 include/dt-bindings/clock/th1520-audiosys.h + create mode 100644 include/dt-bindings/clock/th1520-dspsys.h + create mode 100644 include/dt-bindings/clock/th1520-fm-ap-clock.h + create mode 100644 include/dt-bindings/clock/th1520-miscsys.h + create mode 100644 include/dt-bindings/clock/th1520-visys.h + create mode 100644 include/dt-bindings/clock/th1520-vosys.h + create mode 100644 include/dt-bindings/clock/th1520-vpsys.h + create mode 100644 include/dt-bindings/dma/spacemit-k1-dma.h + create mode 100644 include/dt-bindings/firmware/xuantie/rsrc.h + create mode 100644 include/dt-bindings/mmc/spacemit-k1-sdhci.h + create mode 100644 include/dt-bindings/pinctrl/k1-x-pinctrl.h + create mode 100644 include/dt-bindings/reset/sophgo-mango-resets.h + create mode 100644 include/dt-bindings/reset/spacemit-k1x-reset.h + create mode 100644 include/dt-bindings/reset/xuantie,th1520-reset.h + create mode 100644 include/dt-bindings/soc/th1520_system_status.h + create mode 100644 include/dt-bindings/soc/xuantie,th1520-iopmp.h + create mode 100644 include/linux/firmware/xuantie/ipc.h + create mode 100644 include/linux/firmware/xuantie/th1520_event.h + create mode 100644 include/linux/irqchip/riscv-aplic.h + create mode 100644 include/linux/irqchip/riscv-imsic.h + create mode 100644 include/linux/mfd/spacemit_p1.h + create mode 100644 include/linux/platform_data/spacemit_k1_sdhci.h + create mode 100644 include/linux/th1520_proc_debug.h + create mode 100644 include/linux/th1520_rpmsg.h + create mode 100644 include/soc/xuantie/th1520_system_monitor.h + create mode 100644 include/soc/xuantie/th1520_system_status.h + create mode 100644 include/uapi/drm/vs_drm.h + create mode 100644 net/rfkill/rfkill-bt.c + create mode 100644 net/rfkill/rfkill-wlan.c + create mode 100644 sound/soc/codecs/aw87519.c + create mode 100644 sound/soc/codecs/aw87519.h + create mode 100644 sound/soc/codecs/es7210.c + create mode 100644 sound/soc/codecs/es7210.h + create mode 100644 sound/soc/codecs/es8156.c + create mode 100644 sound/soc/codecs/es8156.h + create mode 100644 sound/soc/xuantie/Kconfig + create mode 100644 sound/soc/xuantie/Makefile + create mode 100644 sound/soc/xuantie/th1520-audio-cpr.h + create mode 100644 sound/soc/xuantie/th1520-hdmi-pcm.c + create mode 100644 sound/soc/xuantie/th1520-i2s-8ch.c + create mode 100644 sound/soc/xuantie/th1520-i2s-common.c + create mode 100644 sound/soc/xuantie/th1520-i2s.c + create mode 100644 sound/soc/xuantie/th1520-i2s.h + create mode 100644 sound/soc/xuantie/th1520-pcm-dma.c + create mode 100644 sound/soc/xuantie/th1520-pcm.h + create mode 100644 sound/soc/xuantie/th1520-spdif.c + create mode 100644 sound/soc/xuantie/th1520-spdif.h + create mode 100644 sound/soc/xuantie/th1520-tdm.c + create mode 100644 sound/soc/xuantie/th1520-tdm.h + create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json + create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json + create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json + create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json + create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/metrics.json + create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/uncore-ddr-pmu.json + create mode 100644 tools/testing/selftests/riscv/hwprobe/cbo.c + create mode 100644 tools/testing/selftests/riscv/hwprobe/hwprobe.h + +diff --git a/Documentation/arch/index.rst b/Documentation/arch/index.rst +index 84b80255b851..f4794117e56b 100644 +--- a/Documentation/arch/index.rst ++++ b/Documentation/arch/index.rst +@@ -20,7 +20,7 @@ implementation. + openrisc/index + parisc/index + ../powerpc/index +- ../riscv/index ++ riscv/index + s390/index + sh/index + sparc/index +diff --git a/Documentation/riscv/acpi.rst b/Documentation/arch/riscv/acpi.rst +similarity index 100% +rename from Documentation/riscv/acpi.rst +rename to Documentation/arch/riscv/acpi.rst +diff --git a/Documentation/riscv/boot-image-header.rst b/Documentation/arch/riscv/boot-image-header.rst +similarity index 100% +rename from Documentation/riscv/boot-image-header.rst +rename to Documentation/arch/riscv/boot-image-header.rst +diff --git a/Documentation/riscv/boot.rst b/Documentation/arch/riscv/boot.rst +similarity index 100% +rename from Documentation/riscv/boot.rst +rename to Documentation/arch/riscv/boot.rst +diff --git a/Documentation/riscv/features.rst b/Documentation/arch/riscv/features.rst +similarity index 100% +rename from Documentation/riscv/features.rst +rename to Documentation/arch/riscv/features.rst +diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst +new file mode 100644 +index 000000000000..971370894bfd +--- /dev/null ++++ b/Documentation/arch/riscv/hwprobe.rst +@@ -0,0 +1,271 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++RISC-V Hardware Probing Interface ++--------------------------------- ++ ++The RISC-V hardware probing interface is based around a single syscall, which ++is defined in :: ++ ++ struct riscv_hwprobe { ++ __s64 key; ++ __u64 value; ++ }; ++ ++ long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, cpu_set_t *cpus, ++ unsigned int flags); ++ ++The arguments are split into three groups: an array of key-value pairs, a CPU ++set, and some flags. The key-value pairs are supplied with a count. Userspace ++must prepopulate the key field for each element, and the kernel will fill in the ++value if the key is recognized. If a key is unknown to the kernel, its key field ++will be cleared to -1, and its value set to 0. The CPU set is defined by ++CPU_SET(3) with size ``cpusetsize`` bytes. For value-like keys (eg. vendor, ++arch, impl), the returned value will only be valid if all CPUs in the given set ++have the same value. Otherwise -1 will be returned. For boolean-like keys, the ++value returned will be a logical AND of the values for the specified CPUs. ++Usermode can supply NULL for ``cpus`` and 0 for ``cpusetsize`` as a shortcut for ++all online CPUs. The currently supported flags are: ++ ++* :c:macro:`RISCV_HWPROBE_WHICH_CPUS`: This flag basically reverses the behavior ++ of sys_riscv_hwprobe(). Instead of populating the values of keys for a given ++ set of CPUs, the values of each key are given and the set of CPUs is reduced ++ by sys_riscv_hwprobe() to only those which match each of the key-value pairs. ++ How matching is done depends on the key type. For value-like keys, matching ++ means to be the exact same as the value. For boolean-like keys, matching ++ means the result of a logical AND of the pair's value with the CPU's value is ++ exactly the same as the pair's value. Additionally, when ``cpus`` is an empty ++ set, then it is initialized to all online CPUs which fit within it, i.e. the ++ CPU set returned is the reduction of all the online CPUs which can be ++ represented with a CPU set of size ``cpusetsize``. ++ ++All other flags are reserved for future compatibility and must be zero. ++ ++On success 0 is returned, on failure a negative error code is returned. ++ ++The following keys are defined: ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MVENDORID`: Contains the value of ``mvendorid``, ++ as defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as ++ defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as ++ defined by the RISC-V privileged architecture specification. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base ++ user-visible behavior that this kernel supports. The following base user ABIs ++ are defined: ++ ++ * :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: Support for rv32ima or ++ rv64ima, as defined by version 2.2 of the user ISA and version 1.10 of the ++ privileged ISA, with the following known exceptions (more exceptions may be ++ added, but only if it can be demonstrated that the user ABI is not broken): ++ ++ * The ``fence.i`` instruction cannot be directly executed by userspace ++ programs (it may still be executed in userspace via a ++ kernel-controlled mechanism such as the vDSO). ++ ++* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions ++ that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: ++ base system behavior. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_FD`: The F and D extensions are supported, as ++ defined by commit cd20cee ("FMIN/FMAX now implement ++ minimumNumber/maximumNumber, not minNum/maxNum") of the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_C`: The C extension is supported, as defined ++ by version 2.2 of the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_IMA_V`: The V extension is supported, as defined by ++ version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBA`: The Zba address generation extension is ++ supported, as defined in version 1.0 of the Bit-Manipulation ISA ++ extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBB`: The Zbb extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as ++ ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBC` The Zbc extension is supported, as defined ++ in version 1.0 of the Bit-Manipulation ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKB` The Zbkb extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKC` The Zbkc extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZBKX` The Zbkx extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKND` The Zknd extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKNE` The Zkne extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKNH` The Zknh extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKSED` The Zksed extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKSH` The Zksh extension is supported, as ++ defined in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZKT` The Zkt extension is supported, as defined ++ in version 1.0 of the Scalar Crypto ISA extensions. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVBB`: The Zvbb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVBC`: The Zvbc extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKB`: The Zvkb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKG`: The Zvkg extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNED`: The Zvkned extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHA`: The Zvknha extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKNHB`: The Zvknhb extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKSED`: The Zvksed extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKSH`: The Zvksh extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVKT`: The Zvkt extension is supported as ++ defined in version 1.0 of the RISC-V Cryptography Extensions Volume II. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFH`: The Zfh extension version 1.0 is supported ++ as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFHMIN`: The Zfhmin extension version 1.0 is ++ supported as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTNTL`: The Zihintntl extension version 1.0 ++ is supported as defined in the RISC-V ISA manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFH`: The Zvfh extension is supported as ++ defined in the RISC-V Vector manual starting from commit e2ccd0548d6c ++ ("Remove draft warnings from Zvfh[min]"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFHMIN`: The Zvfhmin extension is supported as ++ defined in the RISC-V Vector manual starting from commit e2ccd0548d6c ++ ("Remove draft warnings from Zvfh[min]"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFA`: The Zfa extension is supported as ++ defined in the RISC-V ISA manual starting from commit 056b6ff467c7 ++ ("Zfa is ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZTSO`: The Ztso extension is supported as ++ defined in the RISC-V ISA manual starting from commit 5618fb5a216b ++ ("Ztso is now ratified.") ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZACAS`: The Zacas extension is supported as ++ defined in the Atomic Compare-and-Swap (CAS) instructions manual starting ++ from commit 5059e0ca641c ("update to ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICOND`: The Zicond extension is supported as ++ defined in the RISC-V Integer Conditional (Zicond) operations extension ++ manual starting from commit 95cf1f9 ("Add changes requested by Ved ++ during signoff") ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHINTPAUSE`: The Zihintpause extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ d8ab5c78c207 ("Zihintpause is ratified"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE32X`: The Vector sub-extension Zve32x is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE32F`: The Vector sub-extension Zve32f is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64X`: The Vector sub-extension Zve64x is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64F`: The Vector sub-extension Zve64f is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVE64D`: The Vector sub-extension Zve64d is ++ supported, as defined by version 1.0 of the RISC-V Vector extension manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIMOP`: The Zimop May-Be-Operations extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ 58220614a5f ("Zimop is ratified/1.0"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCA`: The Zca extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCB`: The Zcb extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCD`: The Zcd extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCF`: The Zcf extension part of Zc* standard ++ extensions for code size reduction, as ratified in commit 8be3419c1c0 ++ ("Zcf doesn't exist on RV64 as it contains no instructions") of ++ riscv-code-size-reduction. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZCMOP`: The Zcmop May-Be-Operations extension is ++ supported as defined in the RISC-V ISA manual starting from commit ++ c732a4f39a4 ("Zcmop is ratified/1.0"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZAWRS`: The Zawrs extension is supported as ++ ratified in commit 98918c844281 ("Merge pull request #1217 from ++ riscv/zawrs") of riscv-isa-manual. ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as ++ defined in version 1.0 of the RISC-V Pointer Masking extensions. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance ++ information about the selected set of processors. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned ++ accesses is unknown. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are ++ emulated via software, either in or below the kernel. These accesses are ++ always extremely slow. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower ++ than equivalent byte accesses. Misaligned accesses may be supported ++ directly in hardware, or trapped and emulated by software. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster ++ than equivalent byte accesses. ++ ++ * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are ++ not supported at all and will generate a misaligned address fault. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which ++ represents the size of the Zicboz block in bytes. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS`: An unsigned long which ++ represent the highest userspace virtual address usable. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`. +diff --git a/Documentation/riscv/index.rst b/Documentation/arch/riscv/index.rst +similarity index 100% +rename from Documentation/riscv/index.rst +rename to Documentation/arch/riscv/index.rst +diff --git a/Documentation/riscv/patch-acceptance.rst b/Documentation/arch/riscv/patch-acceptance.rst +similarity index 100% +rename from Documentation/riscv/patch-acceptance.rst +rename to Documentation/arch/riscv/patch-acceptance.rst +diff --git a/Documentation/riscv/uabi.rst b/Documentation/arch/riscv/uabi.rst +similarity index 100% +rename from Documentation/riscv/uabi.rst +rename to Documentation/arch/riscv/uabi.rst +diff --git a/Documentation/riscv/vector.rst b/Documentation/arch/riscv/vector.rst +similarity index 100% +rename from Documentation/riscv/vector.rst +rename to Documentation/arch/riscv/vector.rst +diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/arch/riscv/vm-layout.rst +similarity index 100% +rename from Documentation/riscv/vm-layout.rst +rename to Documentation/arch/riscv/vm-layout.rst +diff --git a/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml b/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml +new file mode 100644 +index 000000000000..8d36beae9676 +--- /dev/null ++++ b/Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml +@@ -0,0 +1,34 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/hwlock/xuantie,th1520-hwspinlock.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XUANTIE th1520 SoC HwSpinlock ++ ++maintainers: ++ - Liu Yibin ++ ++properties: ++ compatible: ++ items: ++ - const: th1520,hwspinlock ++ ++ reg: ++ maxItems: 1 ++ ++ ++required: ++ - compatible ++ - reg ++ ++additionalProperties: false ++ ++examples: ++ ++ - | ++ hwspinlock: hwspinlock@ffefc10000 { ++ compatible = "th1520,hwspinlock"; ++ reg = <0xff 0xefc10000 0x0 0x10000>; ++ status = "disabled"; ++ }; +diff --git a/Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml b/Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml +new file mode 100644 +index 000000000000..a4bb8f1b0e17 +--- /dev/null ++++ b/Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml +@@ -0,0 +1,52 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/iio/adc/xuantie,th1520-adc.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 Analog to Digital Converter (ADC) ++ ++maintainers: ++ - Fugang Duan ++ - Xiangyi Zeng ++ - Wei Fu ++ ++description: | ++ 12-Bit Analog to Digital Converter (ADC) on XuanTie TH1520 ++properties: ++ compatible: ++ const: xuantie,th1520-adc ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-names: ++ const: adc ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - clocks ++ - clock-names ++ - status ++ ++additionalProperties: false ++ ++examples: ++ - | ++ adc: adc@0xfffff51000 { ++ compatible = "xuantie,th1520-adc"; ++ reg = <0xff 0xfff51000 0x0 0x1000>; ++ interrupts = <61 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&aonsys_clk>; ++ clock-names = "adc"; ++ /* ADC pin is proprietary,no need to config pinctrl */ ++ status = "disabled"; ++ }; +diff --git a/Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml b/Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml +new file mode 100644 +index 000000000000..a4bb8f1b0e17 +--- /dev/null ++++ b/Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml +@@ -0,0 +1,52 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/iio/adc/xuantie,th1520-adc.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 Analog to Digital Converter (ADC) ++ ++maintainers: ++ - Fugang Duan ++ - Xiangyi Zeng ++ - Wei Fu ++ ++description: | ++ 12-Bit Analog to Digital Converter (ADC) on XuanTie TH1520 ++properties: ++ compatible: ++ const: xuantie,th1520-adc ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-names: ++ const: adc ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - clocks ++ - clock-names ++ - status ++ ++additionalProperties: false ++ ++examples: ++ - | ++ adc: adc@0xfffff51000 { ++ compatible = "xuantie,th1520-adc"; ++ reg = <0xff 0xfff51000 0x0 0x1000>; ++ interrupts = <61 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&aonsys_clk>; ++ clock-names = "adc"; ++ /* ADC pin is proprietary,no need to config pinctrl */ ++ status = "disabled"; ++ }; +diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml b/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml +new file mode 100644 +index 000000000000..190a6499c932 +--- /dev/null ++++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml +@@ -0,0 +1,172 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/interrupt-controller/riscv,aplic.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: RISC-V Advanced Platform Level Interrupt Controller (APLIC) ++ ++maintainers: ++ - Anup Patel ++ ++description: ++ The RISC-V advanced interrupt architecture (AIA) defines an advanced ++ platform level interrupt controller (APLIC) for handling wired interrupts ++ in a RISC-V platform. The RISC-V AIA specification can be found at ++ https://github.com/riscv/riscv-aia. ++ ++ The RISC-V APLIC is implemented as hierarchical APLIC domains where all ++ interrupt sources connect to the root APLIC domain and a parent APLIC ++ domain can delegate interrupt sources to it's child APLIC domains. There ++ is one device tree node for each APLIC domain. ++ ++allOf: ++ - $ref: /schemas/interrupt-controller.yaml# ++ ++properties: ++ compatible: ++ items: ++ - enum: ++ - qemu,aplic ++ - const: riscv,aplic ++ ++ reg: ++ maxItems: 1 ++ ++ interrupt-controller: true ++ ++ "#interrupt-cells": ++ const: 2 ++ ++ interrupts-extended: ++ minItems: 1 ++ maxItems: 16384 ++ description: ++ Given APLIC domain directly injects external interrupts to a set of ++ RISC-V HARTS (or CPUs). Each node pointed to should be a riscv,cpu-intc ++ node, which has a CPU node (i.e. RISC-V HART) as parent. ++ ++ msi-parent: ++ description: ++ Given APLIC domain forwards wired interrupts as MSIs to a AIA incoming ++ message signaled interrupt controller (IMSIC). If both "msi-parent" and ++ "interrupts-extended" properties are present then it means the APLIC ++ domain supports both MSI mode and Direct mode in HW. In this case, the ++ APLIC driver has to choose between MSI mode or Direct mode. ++ ++ riscv,num-sources: ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ minimum: 1 ++ maximum: 1023 ++ description: ++ Specifies the number of wired interrupt sources supported by this ++ APLIC domain. ++ ++ riscv,children: ++ $ref: /schemas/types.yaml#/definitions/phandle-array ++ minItems: 1 ++ maxItems: 1024 ++ items: ++ maxItems: 1 ++ description: ++ A list of child APLIC domains for the given APLIC domain. Each child ++ APLIC domain is assigned a child index in increasing order, with the ++ first child APLIC domain assigned child index 0. The APLIC domain child ++ index is used by firmware to delegate interrupts from the given APLIC ++ domain to a particular child APLIC domain. ++ ++ riscv,delegation: ++ $ref: /schemas/types.yaml#/definitions/phandle-array ++ minItems: 1 ++ maxItems: 1024 ++ items: ++ items: ++ - description: child APLIC domain phandle ++ - description: first interrupt number of the parent APLIC domain (inclusive) ++ - description: last interrupt number of the parent APLIC domain (inclusive) ++ description: ++ A interrupt delegation list where each entry is a triple consisting ++ of child APLIC domain phandle, first interrupt number of the parent ++ APLIC domain, and last interrupt number of the parent APLIC domain. ++ Firmware must configure interrupt delegation registers based on ++ interrupt delegation list. ++ ++dependencies: ++ riscv,delegation: [ "riscv,children" ] ++ ++required: ++ - compatible ++ - reg ++ - interrupt-controller ++ - "#interrupt-cells" ++ - riscv,num-sources ++ ++anyOf: ++ - required: ++ - interrupts-extended ++ - required: ++ - msi-parent ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ // Example 1 (APLIC domains directly injecting interrupt to HARTs): ++ ++ interrupt-controller@c000000 { ++ compatible = "qemu,aplic", "riscv,aplic"; ++ interrupts-extended = <&cpu1_intc 11>, ++ <&cpu2_intc 11>, ++ <&cpu3_intc 11>, ++ <&cpu4_intc 11>; ++ reg = <0xc000000 0x4080>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ riscv,num-sources = <63>; ++ riscv,children = <&aplic1>, <&aplic2>; ++ riscv,delegation = <&aplic1 1 63>; ++ }; ++ ++ aplic1: interrupt-controller@d000000 { ++ compatible = "qemu,aplic", "riscv,aplic"; ++ interrupts-extended = <&cpu1_intc 9>, ++ <&cpu2_intc 9>; ++ reg = <0xd000000 0x4080>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ riscv,num-sources = <63>; ++ }; ++ ++ aplic2: interrupt-controller@e000000 { ++ compatible = "qemu,aplic", "riscv,aplic"; ++ interrupts-extended = <&cpu3_intc 9>, ++ <&cpu4_intc 9>; ++ reg = <0xe000000 0x4080>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ riscv,num-sources = <63>; ++ }; ++ ++ - | ++ // Example 2 (APLIC domains forwarding interrupts as MSIs): ++ ++ interrupt-controller@c000000 { ++ compatible = "qemu,aplic", "riscv,aplic"; ++ msi-parent = <&imsic_mlevel>; ++ reg = <0xc000000 0x4000>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ riscv,num-sources = <63>; ++ riscv,children = <&aplic3>; ++ riscv,delegation = <&aplic3 1 63>; ++ }; ++ ++ aplic3: interrupt-controller@d000000 { ++ compatible = "qemu,aplic", "riscv,aplic"; ++ msi-parent = <&imsic_slevel>; ++ reg = <0xd000000 0x4000>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ riscv,num-sources = <63>; ++ }; ++... +diff --git a/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml b/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml +new file mode 100644 +index 000000000000..84976f17a4a1 +--- /dev/null ++++ b/Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml +@@ -0,0 +1,172 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/interrupt-controller/riscv,imsics.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: RISC-V Incoming MSI Controller (IMSIC) ++ ++maintainers: ++ - Anup Patel ++ ++description: | ++ The RISC-V advanced interrupt architecture (AIA) defines a per-CPU incoming ++ MSI controller (IMSIC) for handling MSIs in a RISC-V platform. The RISC-V ++ AIA specification can be found at https://github.com/riscv/riscv-aia. ++ ++ The IMSIC is a per-CPU (or per-HART) device with separate interrupt file ++ for each privilege level (machine or supervisor). The configuration of ++ a IMSIC interrupt file is done using AIA CSRs and it also has a 4KB MMIO ++ space to receive MSIs from devices. Each IMSIC interrupt file supports a ++ fixed number of interrupt identities (to distinguish MSIs from devices) ++ which is same for given privilege level across CPUs (or HARTs). ++ ++ The device tree of a RISC-V platform will have one IMSIC device tree node ++ for each privilege level (machine or supervisor) which collectively describe ++ IMSIC interrupt files at that privilege level across CPUs (or HARTs). ++ ++ The arrangement of IMSIC interrupt files in MMIO space of a RISC-V platform ++ follows a particular scheme defined by the RISC-V AIA specification. A IMSIC ++ group is a set of IMSIC interrupt files co-located in MMIO space and we can ++ have multiple IMSIC groups (i.e. clusters, sockets, chiplets, etc) in a ++ RISC-V platform. The MSI target address of a IMSIC interrupt file at given ++ privilege level (machine or supervisor) encodes group index, HART index, ++ and guest index (shown below). ++ ++ XLEN-1 > (HART Index MSB) 12 0 ++ | | | | ++ ------------------------------------------------------------- ++ |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 | ++ ------------------------------------------------------------- ++ ++allOf: ++ - $ref: /schemas/interrupt-controller.yaml# ++ - $ref: /schemas/interrupt-controller/msi-controller.yaml# ++ ++properties: ++ compatible: ++ items: ++ - enum: ++ - qemu,imsics ++ - const: riscv,imsics ++ ++ reg: ++ minItems: 1 ++ maxItems: 16384 ++ description: ++ Base address of each IMSIC group. ++ ++ interrupt-controller: true ++ ++ "#interrupt-cells": ++ const: 0 ++ ++ msi-controller: true ++ ++ "#msi-cells": ++ const: 0 ++ ++ interrupts-extended: ++ minItems: 1 ++ maxItems: 16384 ++ description: ++ This property represents the set of CPUs (or HARTs) for which given ++ device tree node describes the IMSIC interrupt files. Each node pointed ++ to should be a riscv,cpu-intc node, which has a CPU node (i.e. RISC-V ++ HART) as parent. ++ ++ riscv,num-ids: ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ minimum: 63 ++ maximum: 2047 ++ description: ++ Number of interrupt identities supported by IMSIC interrupt file. ++ ++ riscv,num-guest-ids: ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ minimum: 63 ++ maximum: 2047 ++ description: ++ Number of interrupt identities are supported by IMSIC guest interrupt ++ file. When not specified it is assumed to be same as specified by the ++ riscv,num-ids property. ++ ++ riscv,guest-index-bits: ++ minimum: 0 ++ maximum: 7 ++ default: 0 ++ description: ++ Number of guest index bits in the MSI target address. ++ ++ riscv,hart-index-bits: ++ minimum: 0 ++ maximum: 15 ++ description: ++ Number of HART index bits in the MSI target address. When not ++ specified it is calculated based on the interrupts-extended property. ++ ++ riscv,group-index-bits: ++ minimum: 0 ++ maximum: 7 ++ default: 0 ++ description: ++ Number of group index bits in the MSI target address. ++ ++ riscv,group-index-shift: ++ $ref: /schemas/types.yaml#/definitions/uint32 ++ minimum: 0 ++ maximum: 55 ++ default: 24 ++ description: ++ The least significant bit position of the group index bits in the ++ MSI target address. ++ ++required: ++ - compatible ++ - reg ++ - interrupt-controller ++ - msi-controller ++ - "#msi-cells" ++ - interrupts-extended ++ - riscv,num-ids ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ // Example 1 (Machine-level IMSIC files with just one group): ++ ++ interrupt-controller@24000000 { ++ compatible = "qemu,imsics", "riscv,imsics"; ++ interrupts-extended = <&cpu1_intc 11>, ++ <&cpu2_intc 11>, ++ <&cpu3_intc 11>, ++ <&cpu4_intc 11>; ++ reg = <0x28000000 0x4000>; ++ interrupt-controller; ++ #interrupt-cells = <0>; ++ msi-controller; ++ #msi-cells = <0>; ++ riscv,num-ids = <127>; ++ }; ++ ++ - | ++ // Example 2 (Supervisor-level IMSIC files with two groups): ++ ++ interrupt-controller@28000000 { ++ compatible = "qemu,imsics", "riscv,imsics"; ++ interrupts-extended = <&cpu1_intc 9>, ++ <&cpu2_intc 9>, ++ <&cpu3_intc 9>, ++ <&cpu4_intc 9>; ++ reg = <0x28000000 0x2000>, /* Group0 IMSICs */ ++ <0x29000000 0x2000>; /* Group1 IMSICs */ ++ interrupt-controller; ++ #interrupt-cells = <0>; ++ msi-controller; ++ #msi-cells = <0>; ++ riscv,num-ids = <127>; ++ riscv,group-index-bits = <1>; ++ riscv,group-index-shift = <24>; ++ }; ++... +diff --git a/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml +new file mode 100644 +index 000000000000..8d330906bbbd +--- /dev/null ++++ b/Documentation/devicetree/bindings/interrupt-controller/thead,c900-aclint-sswi.yaml +@@ -0,0 +1,58 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/interrupt-controller/thead,c900-aclint-sswi.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: T-HEAD C900 ACLINT Supervisor-level Software Interrupt Device ++ ++maintainers: ++ - Inochi Amaoto ++ ++description: ++ The SSWI device is a part of the THEAD ACLINT device. It provides ++ supervisor-level IPI functionality for a set of HARTs on a THEAD ++ platform. It provides a register to set an IPI (SETSSIP) for each ++ HART connected to the SSWI device. ++ ++properties: ++ compatible: ++ items: ++ - enum: ++ - sophgo,sg2044-aclint-sswi ++ - const: thead,c900-aclint-sswi ++ ++ reg: ++ maxItems: 1 ++ ++ "#interrupt-cells": ++ const: 0 ++ ++ interrupt-controller: true ++ ++ interrupts-extended: ++ minItems: 1 ++ maxItems: 4095 ++ ++additionalProperties: false ++ ++required: ++ - compatible ++ - reg ++ - "#interrupt-cells" ++ - interrupt-controller ++ - interrupts-extended ++ ++examples: ++ - | ++ interrupt-controller@94000000 { ++ compatible = "sophgo,sg2044-aclint-sswi", "thead,c900-aclint-sswi"; ++ reg = <0x94000000 0x00004000>; ++ #interrupt-cells = <0>; ++ interrupt-controller; ++ interrupts-extended = <&cpu1intc 1>, ++ <&cpu2intc 1>, ++ <&cpu3intc 1>, ++ <&cpu4intc 1>; ++ }; ++... +diff --git a/Documentation/devicetree/bindings/iommu/riscv,iommu.yaml b/Documentation/devicetree/bindings/iommu/riscv,iommu.yaml +new file mode 100644 +index 000000000000..5d015eeb06d0 +--- /dev/null ++++ b/Documentation/devicetree/bindings/iommu/riscv,iommu.yaml +@@ -0,0 +1,147 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/iommu/riscv,iommu.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: RISC-V IOMMU Architecture Implementation ++ ++maintainers: ++ - Tomasz Jeznach ++ ++description: | ++ The RISC-V IOMMU provides memory address translation and isolation for ++ input and output devices, supporting per-device translation context, ++ shared process address spaces including the ATS and PRI components of ++ the PCIe specification, two stage address translation and MSI remapping. ++ It supports identical translation table format to the RISC-V address ++ translation tables with page level access and protection attributes. ++ Hardware uses in-memory command and fault reporting queues with wired ++ interrupt or MSI notifications. ++ ++ Visit https://github.com/riscv-non-isa/riscv-iommu for more details. ++ ++ For information on assigning RISC-V IOMMU to its peripheral devices, ++ see generic IOMMU bindings. ++ ++properties: ++ # For PCIe IOMMU hardware compatible property should contain the vendor ++ # and device ID according to the PCI Bus Binding specification. ++ # Since PCI provides built-in identification methods, compatible is not ++ # actually required. For non-PCIe hardware implementations 'riscv,iommu' ++ # should be specified along with 'reg' property providing MMIO location. ++ compatible: ++ oneOf: ++ - items: ++ - enum: ++ - qemu,riscv-iommu ++ - const: riscv,iommu ++ - items: ++ - enum: ++ - pci1efd,edf1 ++ - const: riscv,pci-iommu ++ ++ reg: ++ maxItems: 1 ++ description: ++ For non-PCI devices this represents base address and size of for the ++ IOMMU memory mapped registers interface. ++ For PCI IOMMU hardware implementation this should represent an address ++ of the IOMMU, as defined in the PCI Bus Binding reference. ++ ++ '#iommu-cells': ++ const: 1 ++ description: ++ The single cell describes the requester id emitted by a master to the ++ IOMMU. ++ ++ interrupts: ++ minItems: 1 ++ maxItems: 4 ++ description: ++ Wired interrupt vectors available for RISC-V IOMMU to notify the ++ RISC-V HARTS. The cause to interrupt vector is software defined ++ using IVEC IOMMU register. ++ ++ msi-parent: true ++ ++ power-domains: ++ maxItems: 1 ++ ++required: ++ - compatible ++ - reg ++ - '#iommu-cells' ++ ++additionalProperties: false ++ ++examples: ++ - |+ ++ /* Example 1 (IOMMU device with wired interrupts) */ ++ #include ++ ++ iommu1: iommu@1bccd000 { ++ compatible = "qemu,riscv-iommu", "riscv,iommu"; ++ reg = <0x1bccd000 0x1000>; ++ interrupt-parent = <&aplic_smode>; ++ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>, ++ <33 IRQ_TYPE_LEVEL_HIGH>, ++ <34 IRQ_TYPE_LEVEL_HIGH>, ++ <35 IRQ_TYPE_LEVEL_HIGH>; ++ #iommu-cells = <1>; ++ }; ++ ++ /* Device with two IOMMU device IDs, 0 and 7 */ ++ master1 { ++ iommus = <&iommu1 0>, <&iommu1 7>; ++ }; ++ ++ - |+ ++ /* Example 2 (IOMMU device with shared wired interrupt) */ ++ #include ++ ++ iommu2: iommu@1bccd000 { ++ compatible = "qemu,riscv-iommu", "riscv,iommu"; ++ reg = <0x1bccd000 0x1000>; ++ interrupt-parent = <&aplic_smode>; ++ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>; ++ #iommu-cells = <1>; ++ }; ++ ++ - |+ ++ /* Example 3 (IOMMU device with MSIs) */ ++ iommu3: iommu@1bcdd000 { ++ compatible = "qemu,riscv-iommu", "riscv,iommu"; ++ reg = <0x1bccd000 0x1000>; ++ msi-parent = <&imsics_smode>; ++ #iommu-cells = <1>; ++ }; ++ ++ - |+ ++ /* Example 4 (IOMMU PCIe device with MSIs) */ ++ bus { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ pcie@30000000 { ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ reg = <0x0 0x30000000 0x0 0x1000000>; ++ ranges = <0x02000000 0x0 0x41000000 0x0 0x41000000 0x0 0x0f000000>; ++ ++ /* ++ * The IOMMU manages all functions in this PCI domain except ++ * itself. Omit BDF 00:01.0. ++ */ ++ iommu-map = <0x0 &iommu0 0x0 0x8>, ++ <0x9 &iommu0 0x9 0xfff7>; ++ ++ /* The IOMMU programming interface uses slot 00:01.0 */ ++ iommu0: iommu@1,0 { ++ compatible = "pci1efd,edf1", "riscv,pci-iommu"; ++ reg = <0x800 0 0 0 0>; ++ #iommu-cells = <1>; ++ }; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt b/Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt +new file mode 100644 +index 000000000000..e93195bdb651 +--- /dev/null ++++ b/Documentation/devicetree/bindings/mailbox/xuantie-th1520-mailbox.txt +@@ -0,0 +1,57 @@ ++XuanTie TH1520 mailbox controller ++-------------------------------------------------------------------- ++ ++The XuanTie TH1520 mailbox controller enables two cores within the SoC to ++communicate and coordinate by passing messages (e.g. data, status ++and control) through the mailbox chan. It also provides the ability ++for one core to signal the other processor using interrupts. ++ ++TH1520 mailbox Device Node: ++============================= ++ ++Required properties: ++------------------- ++- compatible : Should be "xuantie,th1520-mbox". ++- reg : Should contain the registers location and length ++- interrupts : Interrupt number. The interrupt specifier format depends ++ on the interrupt controller parent. ++- icu_cpu_id : Should be the ICU CPU ID defined in SOC. ++- #mbox-cells: Must be 2. ++ <&phandle type channel> ++ phandle : Label name of controller ++ channel : Channel number ++ type : Channel type ++ ++ This TH1520 mailbox support 4 channels and 2 types. ++ channel supported: ++ support 4 channels: 0,1,2,3 ++ ++ types supported: ++ 0 - TX & RX channel share the same channel with 7 info registers to ++ share data, and with IRQ support. ++ 1 - TX & RX doorbell channel. Without own info registers and no ACK support. ++ ++Optional properties: ++------------------- ++- clocks : phandle to the input clock. ++ ++Examples: ++-------- ++mbox_910t: mbox@ffffc38000 { ++ compatible = "xuantie,th1520-mbox"; ++ reg = <0xff 0xffc38000 0x0 0x4000>, ++ <0xff 0xffc44000 0x0 0x1000>, ++ <0xff 0xffc4c000 0x0 0x1000>, ++ <0xff 0xffc54000 0x0 0x1000>; ++ reg-names = "local_base", ++ "remote_icu0", ++ "remote_icu1", ++ "remote_icu2"; ++ interrupt-parent = <&intc>; ++ interrupts = <28>; ++ clocks = <&dummy_clock_apb>; ++ clock-names = "ipg"; ++ icu_cpu_id = <0>; ++ #mbox-cells = <2>; ++}; ++ +diff --git a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml +index a43eb837f8da..d5941917dc4d 100644 +--- a/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml ++++ b/Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml +@@ -19,6 +19,7 @@ properties: + - rockchip,rk3568-dwcmshc + - rockchip,rk3588-dwcmshc + - snps,dwcmshc-sdhci ++ - xuantie,th1520-dwcmshc + + reg: + maxItems: 1 +diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml +index 5c2769dc689a..1290ad38e127 100644 +--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml ++++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml +@@ -96,6 +96,7 @@ properties: + - snps,dwxgmac + - snps,dwxgmac-2.10 + - starfive,jh7110-dwmac ++ - xuantie,th1520-dwmac + + reg: + minItems: 1 +@@ -591,6 +592,7 @@ allOf: + - qcom,sa8775p-ethqos + - qcom,sc8280xp-ethqos + - snps,dwmac-3.50a ++ - snps,dwmac-3.70a + - snps,dwmac-4.10a + - snps,dwmac-4.20a + - snps,dwmac-5.20 +diff --git a/Documentation/devicetree/bindings/net/xuantie,dwmac.yaml b/Documentation/devicetree/bindings/net/xuantie,dwmac.yaml +new file mode 100644 +index 000000000000..4eac1448f94b +--- /dev/null ++++ b/Documentation/devicetree/bindings/net/xuantie,dwmac.yaml +@@ -0,0 +1,77 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/net/xuantie,dwmac.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie Th1520 DWMAC Ethernet controller ++ ++maintainers: ++ - Jisheng Zhang ++ ++select: ++ properties: ++ compatible: ++ contains: ++ enum: ++ - xuantie,th1520-dwmac ++ required: ++ - compatible ++ ++properties: ++ compatible: ++ items: ++ - enum: ++ - xuantie,th1520-dwmac ++ - const: snps,dwmac-3.70a ++ ++ reg: ++ maxItems: 1 ++ ++ th1520,gmacapb: ++ $ref: /schemas/types.yaml#/definitions/phandle ++ description: ++ The phandle to the syscon node that control ethernet ++ interface and timing delay. ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - clock-names ++ - interrupts ++ - interrupt-names ++ - phy-mode ++ - th1520,gmacapb ++ ++allOf: ++ - $ref: snps,dwmac.yaml# ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ gmac0: ethernet@e7070000 { ++ compatible = "xuantie,th1520-dwmac"; ++ reg = <0xe7070000 0x2000>; ++ clocks = <&clk 1>, <&clk 2>; ++ clock-names = "stmmaceth", "pclk"; ++ interrupts = <66>; ++ interrupt-names = "macirq"; ++ phy-mode = "rgmii-id"; ++ snps,fixed-burst; ++ snps,axi-config = <&stmmac_axi_setup>; ++ snps,pbl = <32>; ++ th1520,gmacapb = <&gmacapb_syscon>; ++ phy-handle = <&phy0>; ++ ++ mdio { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,dwmac-mdio"; ++ ++ phy0: ethernet-phy@0 { ++ reg = <0>; ++ }; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/nvmem/xuantie,th1520-efuse.txt b/Documentation/devicetree/bindings/nvmem/xuantie,th1520-efuse.txt +new file mode 100644 +index 000000000000..f99d6913ef86 +--- /dev/null ++++ b/Documentation/devicetree/bindings/nvmem/xuantie,th1520-efuse.txt +@@ -0,0 +1,18 @@ ++= XuanTie TH1520-EFUSE device tree bindings = ++ ++This binding is intended to represent TH1520-EFUSE which is found in XuanTie ++SoCs ++ ++Required properties: ++- compatible: should be ++ "xuantie,th1520-efuse": for th1520 fullmask ++- reg: should contain registers location and length ++- xuantie,secsys: the syscon phandle to read and write sec system registers ++ ++Example: ++ ++ nvmem_controller: efuse@ffff210000 { ++ compatible = "xuantie,th1520-efuse"; ++ reg = <0xff 0xff210000 0x0 0x10000>; ++ xuantie,secsys = <&secsys_reg>; ++ }; +diff --git a/Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml +new file mode 100644 +index 000000000000..12a23f185577 +--- /dev/null ++++ b/Documentation/devicetree/bindings/pinctrl/thead,th1520-pinctrl.yaml +@@ -0,0 +1,374 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/pinctrl/thead,th1520-pinctrl.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 SoC pin controller ++ ++maintainers: ++ - Emil Renner Berthing ++ ++description: | ++ Pinmux and pinconf controller in the XuanTie TH1520 RISC-V SoC. ++ ++ The TH1520 has 3 groups of pads each controlled from different memory ranges. ++ Confusingly the memory ranges are named ++ PADCTRL_AOSYS -> PAD Group 1 ++ PADCTRL1_APSYS -> PAD Group 2 ++ PADCTRL0_APSYS -> PAD Group 3 ++ PADCTRL_AUDIOSYS -> PAD Group 4 ++ ++ Each pad can be muxed individually to up to 6 different functions. For most ++ pads only a few of those 6 configurations are valid though, and a few pads in ++ group 1 does not support muxing at all. ++ ++ Pinconf is fairly regular except for a few pads in group 1 that either can't ++ be configured or has some special functions. The rest have configurable drive ++ strength, input enable, schmitt trigger, slew rate, pull-up and pull-down in ++ addition to a special strong pull up. ++ ++ Certain pads in group 1 can be muxed to AUDIO_PA0 - AUDIO_PA30 functions and ++ are then meant to be used by the audio co-processor. Each such pad can then ++ be further muxed to either audio GPIO or one of 4 functions such as UART, I2C ++ and I2S. If the audio pad is muxed to one of the 4 functions then pinconf is ++ also configured in different registers. All of this is done from a different ++ AUDIO_IOCTRL memory range and is left to the audio co-processor for now. ++ ++properties: ++ compatible: ++ enum: ++ - thead,th1520-group1-pinctrl ++ - thead,th1520-group2-pinctrl ++ - thead,th1520-group3-pinctrl ++ - thead,th1520-group4-pinctrl ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++patternProperties: ++ '-[0-9]+$': ++ type: object ++ ++ additionalProperties: false ++ ++ patternProperties: ++ '-pins$': ++ type: object ++ $ref: /schemas/pinctrl/pincfg-node.yaml ++ ++ additionalProperties: false ++ ++ description: ++ A pinctrl node should contain at least one subnode describing one ++ or more pads and their associated pinmux and pinconf settings. ++ ++ properties: ++ pins: ++ $ref: /schemas/pinctrl/pinmux-node.yaml#/properties/pins ++ description: List of pads that properties in the node apply to. ++ ++ function: ++ $ref: /schemas/pinctrl/pinmux-node.yaml#/properties/function ++ enum: [ gpio, pwm, uart, ir, i2c, spi, qspi, sdio, audio, i2s, ++ gmac0, gmac1, dpu0, dpu1, isp, hdmi, bootsel, debug, ++ clock, jtag, iso7816, efuse, reset ] ++ description: The mux function to select for the given pins. ++ ++ bias-disable: true ++ ++ bias-pull-up: ++ oneOf: ++ - type: boolean ++ description: Enable the regular 48kOhm pull-up ++ - enum: [ 2100, 48000 ] ++ description: Enable the strong 2.1kOhm pull-up or regular 48kOhm pull-up ++ ++ bias-pull-down: ++ oneOf: ++ - type: boolean ++ - const: 44000 ++ description: Enable the regular 44kOhm pull-down ++ ++ drive-strength: ++ enum: [ 1, 2, 3, 5, 7, 8, 10, 12, 13, 15, 16, 18, 20, 21, 23, 25 ] ++ description: Drive strength in mA ++ ++ input-enable: true ++ ++ input-disable: true ++ ++ input-schmitt-enable: true ++ ++ input-schmitt-disable: true ++ ++ slew-rate: ++ maximum: 1 ++ ++ required: ++ - pins ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ ++additionalProperties: false ++ ++allOf: ++ - $ref: pinctrl.yaml# ++ - if: ++ properties: ++ compatible: ++ const: thead,th1520-group1-pinctrl ++ then: ++ patternProperties: ++ '-[0-9]+$': ++ patternProperties: ++ '-pins$': ++ properties: ++ pins: ++ items: ++ enum: ++ - OSC_CLK_IN ++ - OSC_CLK_OUT ++ - SYS_RST_N ++ - RTC_CLK_IN ++ - RTC_CLK_OUT ++ - TEST_MODE ++ - DEBUG_MODE ++ - POR_SEL ++ - I2C_AON_SCL ++ - I2C_AON_SDA ++ - CPU_JTG_TCLK ++ - CPU_JTG_TMS ++ - CPU_JTG_TDI ++ - CPU_JTG_TDO ++ - CPU_JTG_TRST ++ - AOGPIO_7 ++ - AOGPIO_8 ++ - AOGPIO_9 ++ - AOGPIO_10 ++ - AOGPIO_11 ++ - AOGPIO_12 ++ - AOGPIO_13 ++ - AOGPIO_14 ++ - AOGPIO_15 ++ - AUDIO_PA0 ++ - AUDIO_PA1 ++ - AUDIO_PA2 ++ - AUDIO_PA3 ++ - AUDIO_PA4 ++ - AUDIO_PA5 ++ - AUDIO_PA6 ++ - AUDIO_PA7 ++ - AUDIO_PA8 ++ - AUDIO_PA9 ++ - AUDIO_PA10 ++ - AUDIO_PA11 ++ - AUDIO_PA12 ++ - AUDIO_PA13 ++ - AUDIO_PA14 ++ - AUDIO_PA15 ++ - AUDIO_PA16 ++ - AUDIO_PA17 ++ - AUDIO_PA27 ++ - AUDIO_PA28 ++ - AUDIO_PA29 ++ - AUDIO_PA30 ++ - if: ++ properties: ++ compatible: ++ const: thead,th1520-group2-pinctrl ++ then: ++ patternProperties: ++ '-[0-9]+$': ++ patternProperties: ++ '-pins$': ++ properties: ++ pins: ++ items: ++ enum: ++ - QSPI1_SCLK ++ - QSPI1_CSN0 ++ - QSPI1_D0_MOSI ++ - QSPI1_D1_MISO ++ - QSPI1_D2_WP ++ - QSPI1_D3_HOLD ++ - I2C0_SCL ++ - I2C0_SDA ++ - I2C1_SCL ++ - I2C1_SDA ++ - UART1_TXD ++ - UART1_RXD ++ - UART4_TXD ++ - UART4_RXD ++ - UART4_CTSN ++ - UART4_RTSN ++ - UART3_TXD ++ - UART3_RXD ++ - GPIO0_18 ++ - GPIO0_19 ++ - GPIO0_20 ++ - GPIO0_21 ++ - GPIO0_22 ++ - GPIO0_23 ++ - GPIO0_24 ++ - GPIO0_25 ++ - GPIO0_26 ++ - GPIO0_27 ++ - GPIO0_28 ++ - GPIO0_29 ++ - GPIO0_30 ++ - GPIO0_31 ++ - GPIO1_0 ++ - GPIO1_1 ++ - GPIO1_2 ++ - GPIO1_3 ++ - GPIO1_4 ++ - GPIO1_5 ++ - GPIO1_6 ++ - GPIO1_7 ++ - GPIO1_8 ++ - GPIO1_9 ++ - GPIO1_10 ++ - GPIO1_11 ++ - GPIO1_12 ++ - GPIO1_13 ++ - GPIO1_14 ++ - GPIO1_15 ++ - GPIO1_16 ++ - CLK_OUT_0 ++ - CLK_OUT_1 ++ - CLK_OUT_2 ++ - CLK_OUT_3 ++ - GPIO1_21 ++ - GPIO1_22 ++ - GPIO1_23 ++ - GPIO1_24 ++ - GPIO1_25 ++ - GPIO1_26 ++ - GPIO1_27 ++ - GPIO1_28 ++ - GPIO1_29 ++ - GPIO1_30 ++ - if: ++ properties: ++ compatible: ++ const: thead,th1520-group3-pinctrl ++ then: ++ patternProperties: ++ '-[0-9]+$': ++ patternProperties: ++ '-pins$': ++ properties: ++ pins: ++ items: ++ enum: ++ - UART0_TXD ++ - UART0_RXD ++ - QSPI0_SCLK ++ - QSPI0_CSN0 ++ - QSPI0_CSN1 ++ - QSPI0_D0_MOSI ++ - QSPI0_D1_MISO ++ - QSPI0_D2_WP ++ - QSPI1_D3_HOLD ++ - I2C2_SCL ++ - I2C2_SDA ++ - I2C3_SCL ++ - I2C3_SDA ++ - GPIO2_13 ++ - SPI_SCLK ++ - SPI_CSN ++ - SPI_MOSI ++ - SPI_MISO ++ - GPIO2_18 ++ - GPIO2_19 ++ - GPIO2_20 ++ - GPIO2_21 ++ - GPIO2_22 ++ - GPIO2_23 ++ - GPIO2_24 ++ - GPIO2_25 ++ - SDIO0_WPRTN ++ - SDIO0_DETN ++ - SDIO1_WPRTN ++ - SDIO1_DETN ++ - GPIO2_30 ++ - GPIO2_31 ++ - GPIO3_0 ++ - GPIO3_1 ++ - GPIO3_2 ++ - GPIO3_3 ++ - HDMI_SCL ++ - HDMI_SDA ++ - HDMI_CEC ++ - GMAC0_TX_CLK ++ - GMAC0_RX_CLK ++ - GMAC0_TXEN ++ - GMAC0_TXD0 ++ - GMAC0_TXD1 ++ - GMAC0_TXD2 ++ - GMAC0_TXD3 ++ - GMAC0_RXDV ++ - GMAC0_RXD0 ++ - GMAC0_RXD1 ++ - GMAC0_RXD2 ++ - GMAC0_RXD3 ++ - GMAC0_MDC ++ - GMAC0_MDIO ++ - GMAC0_COL ++ - GMAC0_CRS ++ ++examples: ++ - | ++ padctrl0_apsys: pinctrl@ec007000 { ++ compatible = "thead,th1520-group3-pinctrl"; ++ reg = <0xec007000 0x1000>; ++ clocks = <&apb_clk>; ++ ++ uart0_pins: uart0-0 { ++ tx-pins { ++ pins = "UART0_TXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <3>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "UART0_RXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ }; ++ ++ padctrl1_apsys: pinctrl@e7f3c000 { ++ compatible = "thead,th1520-group2-pinctrl"; ++ reg = <0xe7f3c000 0x1000>; ++ clocks = <&apb_clk>; ++ ++ i2c5_pins: i2c5-0 { ++ i2c-pins { ++ pins = "QSPI1_CSN0", /* I2C5_SCL */ ++ "QSPI1_D0_MOSI"; /* I2C5_SDA */ ++ function = "i2c"; ++ bias-pull-up = <2100>; ++ drive-strength = <7>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml b/Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml +new file mode 100644 +index 000000000000..087b0584887e +--- /dev/null ++++ b/Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml +@@ -0,0 +1,44 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/pwm/xuantie,th1520-pwm.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 PWM ++ ++maintainers: ++ - Jisheng Zhang ++ ++allOf: ++ - $ref: pwm.yaml# ++ ++properties: ++ compatible: ++ enum: ++ - xuantie,th1520-pwm ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ "#pwm-cells": ++ const: 3 ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ ++additionalProperties: false ++ ++examples: ++ - | ++ ++ pwm@ec01c000 { ++ compatible = "xuantie,th1520-pwm"; ++ reg = <0xec01c000 0x1000>; ++ clocks = <&clk 1>; ++ #pwm-cells = <3>; ++ }; +diff --git a/Documentation/devicetree/bindings/reset/xuantie,th1520-reset.yaml b/Documentation/devicetree/bindings/reset/xuantie,th1520-reset.yaml +new file mode 100644 +index 000000000000..6182bf93d2dc +--- /dev/null ++++ b/Documentation/devicetree/bindings/reset/xuantie,th1520-reset.yaml +@@ -0,0 +1,45 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/reset/xuantie,th1520-reset.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie th1520 SoC Reset Controller ++ ++maintainers: ++ - Kwanghoon Son ++ - Hengqiang Ming ++ ++properties: ++ compatible: ++ items: ++ - const: xuantie,th1520-reset ++ - const: syscon ++ ++ reg: ++ maxItems: 1 ++ ++ '#reset-cells': ++ const: 1 ++ ++required: ++ - compatible ++ - reg ++ - '#reset-cells' ++ ++additionalProperties: false ++ ++examples: ++ - | ++ #include ++ ++ soc { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ reset-controller@ffef014000 { ++ compatible = "xuantie,th1520-reset", "syscon"; ++ reg = <0xff 0xef014000 0x0 0x1000>; ++ #reset-cells = <1>; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml +index cc1f546fdbdc..36ff6749fbba 100644 +--- a/Documentation/devicetree/bindings/riscv/extensions.yaml ++++ b/Documentation/devicetree/bindings/riscv/extensions.yaml +@@ -128,6 +128,12 @@ properties: + changes to interrupts as frozen at commit ccbddab ("Merge pull + request #42 from riscv/jhauser-2023-RC4") of riscv-aia. + ++ - const: smstateen ++ description: | ++ The standard Smstateen extension for controlling access to CSRs ++ added by other RISC-V extensions in H/S/VS/U/VU modes and as ++ ratified at commit a28bfae (Ratified (#7)) of riscv-state-enable. ++ + - const: ssaia + description: | + The standard Ssaia supervisor-level extension for the advanced +diff --git a/Documentation/devicetree/bindings/rtc/xgene-rtc.txt b/Documentation/devicetree/bindings/rtc/xgene-rtc.txt +index fd195c358446..25ba8cf0cc31 100644 +--- a/Documentation/devicetree/bindings/rtc/xgene-rtc.txt ++++ b/Documentation/devicetree/bindings/rtc/xgene-rtc.txt +@@ -10,6 +10,9 @@ Required properties: + - #clock-cells: Should be 1. + - clocks: Reference to the clock entry. + ++Optional properties: ++- prescaler: Reference to the Value of Counter Prescaler. ++ + Example: + + rtcclk: rtcclk { +@@ -26,3 +29,16 @@ rtc: rtc@10510000 { + #clock-cells = <1>; + clocks = <&rtcclk 0>; + }; ++ ++Example XuanTie TH1520 RTC node with Counter Prescaler(prescaler): ++ ++rtc: rtc@fffff40000 { ++ compatible = "snps,dw-apb-rtc"; ++ reg = <0xff 0xfff40000 0x0 0x1000>; ++ interrupts = <74 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&osc_32k>; ++ clock-names = "osc_32k"; ++ wakeup-source; ++ prescaler = <0x8000>; ++ status = "okay"; ++}; +diff --git a/Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml b/Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml +new file mode 100644 +index 000000000000..0448f9897cd4 +--- /dev/null ++++ b/Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml +@@ -0,0 +1,37 @@ ++# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/soc/xuantie/xuantie,th1520-event.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 SOC Rebootmode Feature ++ ++maintainers: ++ - Hao Li ++ - Xiangyi Zeng ++ ++description: ++ This driver supports check rebootmode feature in th1520 FM platform. ++ When the system reboots, event driver recording the reboot cause to a ++ piece of always-on ram, which is data-hold during the system reboot. ++ ++properties: ++ compatible: ++ - items: ++ - const: xuantie,th1520-event ++ ++ aon-iram-regmap: ++ description: ++ always-on ram address range for driver to save rebootmode ++ ++required: ++ - compatible ++ - aon-iram-regmap ++ ++examples: ++ - | ++ th1520_event: th1520-event { ++ compatible = "xuantie,th1520-event"; ++ aon-iram-regmap = <&aon_iram>; ++ status = "okay"; ++ }; +diff --git a/Documentation/devicetree/bindings/sound/everest,es7210.txt b/Documentation/devicetree/bindings/sound/everest,es7210.txt +new file mode 100644 +index 000000000000..76ac2a820eff +--- /dev/null ++++ b/Documentation/devicetree/bindings/sound/everest,es7210.txt +@@ -0,0 +1,12 @@ ++ES7210 i2s DA converter ++ ++Required properties: ++- compatible : "everest,es7210" or ++- VDD-supply : regulator phandle for the VDD supply ++- PVDD-supply: regulator phandle for the PVDD supply for the es7210 ++ ++Example: ++i2s_codec: external-codec { ++ compatible = "everest,es7210"; ++ VDD-supply = <&vcc_5v>; ++}; +diff --git a/Documentation/devicetree/bindings/sound/everest,es8156.yaml b/Documentation/devicetree/bindings/sound/everest,es8156.yaml +new file mode 100644 +index 000000000000..87412a7ee7c7 +--- /dev/null ++++ b/Documentation/devicetree/bindings/sound/everest,es8156.yaml +@@ -0,0 +1,42 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/sound/everest,es8156.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: Everest ES8156 audio CODEC ++ ++maintainers: ++ - shuofeng.ren ++ ++allOf: ++ - $ref: dai-common.yaml# ++ ++properties: ++ compatible: ++ const: everest,es8156 ++ ++ reg: ++ maxItems: 1 ++ ++ "#sound-dai-cells": ++ const: 0 ++ ++required: ++ - compatible ++ - reg ++ - "#sound-dai-cells" ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ i2c { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ es8156: codec@8 { ++ compatible = "everest,es8156"; ++ reg = <0x08>; ++ #sound-dai-cells = <0>; ++ }; ++ }; +diff --git a/Documentation/devicetree/bindings/sound/xuantie,th1520-i2s.yaml b/Documentation/devicetree/bindings/sound/xuantie,th1520-i2s.yaml +new file mode 100644 +index 000000000000..b051fef08431 +--- /dev/null ++++ b/Documentation/devicetree/bindings/sound/xuantie,th1520-i2s.yaml +@@ -0,0 +1,91 @@ ++# SPDX-License-Identifier: GPL-2.0 ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/sound/xuantie,th1520-i2s.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie Th1520 I2S controller ++ ++maintainers: ++ - David Li ++ ++properties: ++ compatible: ++ oneOf: ++ - const: xuantie,th1520-i2s ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ items: ++ - description: clock for I2S controller ++ ++ clock-names: ++ items: ++ - const: i2s_pclk ++ ++ dmas: ++ minItems: 1 ++ maxItems: 2 ++ ++ dma-names: ++ oneOf: ++ - const: rx ++ - items: ++ - const: tx ++ - const: rx ++ ++ pinctrl-names: ++ oneOf: ++ - const: default ++ - items: ++ - const: bclk ++ - const: lrck ++ - const: dout ++ - const: din ++ ++ reset-names: ++ items: ++ - const: reset_i2s ++ ++ resets: ++ maxItems: 1 ++ ++ audio-cpr-regmap: ++ description: ++ The phandle of the syscon node for the CPR register. ++ ++ "#sound-dai-cells": ++ const: 1 ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - clocks ++ - clock-names ++ - dmas ++ - dma-names ++ - resets ++ - "#sound-dai-cells" ++ ++examples: ++ - | ++ i2s1: audio_i2s1@ffcb015000 { ++ compatible = "xuantie,th1520-i2s"; ++ reg = <0xff 0xcb015000 0x0 0x1000>; ++ pinctrl-names = "default"; ++ interrupts = <175, IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 11>, <&dmac2 10>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S1>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S1>; ++ xuantie,audio-cpr-regmap = <&audio_cpr>; ++ #sound-dai-cells = <1>; ++ }; +diff --git a/Documentation/devicetree/bindings/sound/xuantie,th1520-spdif.yaml b/Documentation/devicetree/bindings/sound/xuantie,th1520-spdif.yaml +new file mode 100644 +index 000000000000..00934c46e1cf +--- /dev/null ++++ b/Documentation/devicetree/bindings/sound/xuantie,th1520-spdif.yaml +@@ -0,0 +1,77 @@ ++# SPDX-License-Identifier: GPL-2.0 ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/sound/xuantie,th1520-spdif.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: TH1520 SPDIF transceiver ++ ++description: ++ The S/PDIF audio block is a stereo transceiver that allows the ++ processor to receive and transmit digital audio via a coaxial or ++ fibre cable. ++ ++maintainers: ++ - nanli.yd ++ ++properties: ++ compatible: ++ oneOf: ++ - const: xuantie,th1520-spdif ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ items: ++ - description: clock for SPDIF controller ++ ++ clock-names: ++ items: ++ - const: pclk ++ ++ dmas: ++ minItems: 1 ++ maxItems: 2 ++ ++ dma-names: ++ oneOf: ++ - const: tx ++ - items: ++ - const: tx ++ - const: rx ++ ++ audio-cpr-regmap: ++ description: ++ The phandle of the syscon node for the CPR register. ++ ++ "#sound-dai-cells": ++ const: 0 ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - clocks ++ - clock-names ++ - dmas ++ - dma-names ++ - "#sound-dai-cells" ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ spdif: spdif@ffcb018000 { ++ compatible = "xuantie,th1520-spdif"; ++ reg = <0xff 0xcb018000 0x0 0x1000>; ++ interrupts = <179 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_SPDIF0>; ++ clock-names = "pclk"; ++ dmas = <&dmac2 25>, <&dmac2 24>;; ++ dma-names = "tx", "rx"; ++ #sound-dai-cells = <0>; ++ }; +diff --git a/Documentation/devicetree/bindings/sound/xuantie,th1520-tdm.yaml b/Documentation/devicetree/bindings/sound/xuantie,th1520-tdm.yaml +new file mode 100644 +index 000000000000..17835597e8ec +--- /dev/null ++++ b/Documentation/devicetree/bindings/sound/xuantie,th1520-tdm.yaml +@@ -0,0 +1,86 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/sound/xuantie,th1520-tdm.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: TH1520 TDM Controller ++ ++description: ++ The TH1520 TDM Controller is a Time Division Multiplexed ++ audio interface in TH1520 SoCs, allowing up to 8 channels ++ of audio over a serial interface. ++ ++maintainers: ++ - nanli.yd ++ ++properties: ++ compatible: ++ enum: ++ - xuantie,th1520-tdm ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ dmas: ++ maxItems: 1 ++ ++ dma-names: ++ maxItems: 1 ++ items: ++ enum: ++ - rx ++ ++ clocks: ++ minItems: 1 ++ items: ++ - description: tdm_pclk ++ ++ clock-names: ++ minItems: 1 ++ items: ++ - const: pclk ++ ++ resets: ++ maxItems: 1 ++ description: reset tdm ++ ++ reset-names: ++ maxItems: 1 ++ ++ audio-cpr-regmap: ++ description: ++ The phandle of the syscon node for the CPR register. ++ ++ "#sound-dai-cells": ++ const: 0 ++ ++required: ++ - compatible ++ - reg ++ - interrupts ++ - dmas ++ - dma-names ++ - clocks ++ - clock-names ++ - resets ++ - reset-names ++ - "#sound-dai-cells" ++ ++examples: ++ - | ++ tdm: audio_tdm@ffcb012000 { ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <178, IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 28>; ++ dma-names = "rx"; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ #sound-dai-cells = <0>; ++ }; +diff --git a/Documentation/devicetree/bindings/spi/xuantie,th1520-qspi.yaml b/Documentation/devicetree/bindings/spi/xuantie,th1520-qspi.yaml +new file mode 100644 +index 000000000000..2b69bf1b7c46 +--- /dev/null ++++ b/Documentation/devicetree/bindings/spi/xuantie,th1520-qspi.yaml +@@ -0,0 +1,52 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/spi/xuantie,th1520-spi.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 Quad Serial Peripheral Interface (QSPI) ++ ++maintainers: ++ - linghui zeng ++ - Xiangyi Zeng ++properties: ++ compatible: ++ const: xuantie,th1520 ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-names: ++ items: ++ - const: sclk ++ - const: pclk ++ ++ interrupts: ++ maxItems: 1 ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - clock-names ++ - interrupts ++ - status ++ ++unevaluatedProperties: false ++ ++examples: ++ - | ++ qspi0: qspi@ffea000000 { ++ compatible = "snps,dw-apb-ssi-quad"; ++ reg = <0xff 0xea000000 0x0 0x1000>; ++ interrupts = <52 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_QSPI0_SSI_CLK>, ++ <&clk CLKGEN_QSPI0_PCLK>; ++ clock-names = "sclk", "pclk"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ }; +diff --git a/Documentation/devicetree/bindings/spi/xuantie,th1520-spi.yaml b/Documentation/devicetree/bindings/spi/xuantie,th1520-spi.yaml +new file mode 100644 +index 000000000000..660996424d81 +--- /dev/null ++++ b/Documentation/devicetree/bindings/spi/xuantie,th1520-spi.yaml +@@ -0,0 +1,58 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/spi/xuantie,th1520-spi.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 SPI Controller ++ ++description: | ++ The XuanTie TH1520 SPI controller is used to communicate with external devices using ++ the Serial Peripheral Interface. It supports full-duplex, half-duplex and ++ simplex synchronous serial communication with external devices. It supports ++ from 4 to 32-bit data size. ++ ++maintainers: ++ - linghui zeng ++ - Xiangyi Zeng ++properties: ++ compatible: ++ const: xuantie,th1520 ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-names: ++ items: ++ - const: sclk ++ - const: pclk ++ ++ interrupts: ++ maxItems: 1 ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - clock-names ++ - interrupts ++ - status ++ ++additionalProperties: false ++ ++examples: ++ - | ++ spi: spi@ffe700c000 { ++ compatible = "snps,dw-apb-ssi"; ++ reg = <0xff 0xe700c000 0x0 0x1000>; ++ interrupts = <54 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_SPI_SSI_CLK>, ++ <&clk CLKGEN_SPI_PCLK>; ++ clock-names = "sclk", "pclk"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ }; +diff --git a/Documentation/devicetree/bindings/usb/xuantie,th1520-usb.yaml b/Documentation/devicetree/bindings/usb/xuantie,th1520-usb.yaml +new file mode 100644 +index 000000000000..f4a63904c3bc +--- /dev/null ++++ b/Documentation/devicetree/bindings/usb/xuantie,th1520-usb.yaml +@@ -0,0 +1,76 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/usb/xuantie,th1520-usb.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie TH1520 DWC3 USB Controller Glue ++ ++maintainers: ++ - Jisheng Zhang ++ ++properties: ++ compatible: ++ const: xuantie,th1520-usb ++ ++ reg: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 4 ++ ++ clock-names: ++ items: ++ - const: ref ++ - const: bus_early ++ - const: phy ++ - const: suspend ++ ++ ranges: true ++ ++ '#address-cells': ++ enum: [ 1, 2 ] ++ ++ '#size-cells': ++ enum: [ 1, 2 ] ++ ++# Required child node: ++ ++patternProperties: ++ "^usb@[0-9a-f]+$": ++ $ref: snps,dwc3.yaml# ++ ++required: ++ - compatible ++ - reg ++ - clocks ++ - clock-names ++ - ranges ++ ++additionalProperties: false ++ ++examples: ++ - | ++ usb: usb@ffec03f000 { ++ compatible = "xuantie,th1520-usb"; ++ usb3-misc-regmap = <&misc_sysreg>; ++ reg = <0xff 0xec03f000 0x0 0x1000>; ++ xuantie,misc-sysreg = <&misc_sysreg>; ++ clocks = <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_CTRL_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_PHY_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_SUSPEND_CLK>; ++ clock-names = "drd", "ctrl", "phy", "suspend"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ usb_dwc3: usb@ffe7040000 { ++ compatible = "snps,dwc3"; ++ reg = <0xff 0xe7040000 0x0 0x10000>; ++ interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; ++ dr_mode = "host"; ++ snps,usb3_lpm_capable; ++ status = "disabled"; ++ }; ++ }; +\ No newline at end of file +diff --git a/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml b/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml +new file mode 100644 +index 000000000000..23a2bc07210b +--- /dev/null ++++ b/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml +@@ -0,0 +1,19 @@ ++# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/watchdog/xuantie,th1520-wdt.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: XuanTie PMIC Watchdog for TH1520 SoC ++ ++maintainers: ++ - Wei.Liu ++ - Xiangyi Zeng ++ ++description: ++ This is the driver for the hardware watchdog on TH1520 product Board. ++ This watchdog simply watches your kernel to make sure it doesn't freeze, ++ and if it does, it reboots your computer after a certain amount of time. ++ ++allOf: ++ - $ref: watchdog.yaml# +diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt +index 23260ca44946..76597adfb7d5 100644 +--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt ++++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt +@@ -10,6 +10,22 @@ + # Rely on implicit context synchronization as a result of exception return + # when returning from IPI handler, and when returning to user-space. + # ++# * riscv ++# ++# riscv uses xRET as return from interrupt and to return to user-space. ++# ++# Given that xRET is not core serializing, we rely on FENCE.I for providing ++# core serialization: ++# ++# - by calling sync_core_before_usermode() on return from interrupt (cf. ++# ipi_sync_core()), ++# ++# - via switch_mm() and sync_core_before_usermode() (respectively, for ++# uthread->uthread and kthread->uthread transitions) before returning ++# to user-space. ++# ++# The serialization in switch_mm() is activated by prepare_sync_core_cmd(). ++# + # * x86 + # + # x86-32 uses IRET as return from interrupt, which takes care of the IPI. +@@ -44,7 +60,7 @@ + | openrisc: | TODO | + | parisc: | TODO | + | powerpc: | ok | +- | riscv: | TODO | ++ | riscv: | ok | + | s390: | ok | + | sh: | TODO | + | sparc: | TODO | +diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst +index 6b64072d4bf2..7ad4bfc2cc03 100644 +--- a/Documentation/maintainer/maintainer-entry-profile.rst ++++ b/Documentation/maintainer/maintainer-entry-profile.rst +@@ -101,7 +101,7 @@ to do something different in the near future. + + ../doc-guide/maintainer-profile + ../nvdimm/maintainer-entry-profile +- ../riscv/patch-acceptance ++ ../arch/riscv/patch-acceptance + ../driver-api/media/maintainer-entry-profile + ../driver-api/vfio-pci-device-specific-driver-acceptance + ../nvme/feature-and-quirk-policy +diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst +index b501cd977053..db09a81d474b 100644 +--- a/Documentation/process/index.rst ++++ b/Documentation/process/index.rst +@@ -71,7 +71,7 @@ lack of a better place. + volatile-considered-harmful + botching-up-ioctls + clang-format +- ../riscv/patch-acceptance ++ ../arch/riscv/patch-acceptance + ../core-api/unaligned-memory-access + + .. only:: subproject and html +diff --git a/Documentation/riscv/hwprobe.rst b/Documentation/riscv/hwprobe.rst +deleted file mode 100644 +index a52996b22f75..000000000000 +--- a/Documentation/riscv/hwprobe.rst ++++ /dev/null +@@ -1,98 +0,0 @@ +-.. SPDX-License-Identifier: GPL-2.0 +- +-RISC-V Hardware Probing Interface +---------------------------------- +- +-The RISC-V hardware probing interface is based around a single syscall, which +-is defined in :: +- +- struct riscv_hwprobe { +- __s64 key; +- __u64 value; +- }; +- +- long sys_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, cpu_set_t *cpus, +- unsigned int flags); +- +-The arguments are split into three groups: an array of key-value pairs, a CPU +-set, and some flags. The key-value pairs are supplied with a count. Userspace +-must prepopulate the key field for each element, and the kernel will fill in the +-value if the key is recognized. If a key is unknown to the kernel, its key field +-will be cleared to -1, and its value set to 0. The CPU set is defined by +-CPU_SET(3). For value-like keys (eg. vendor/arch/impl), the returned value will +-be only be valid if all CPUs in the given set have the same value. Otherwise -1 +-will be returned. For boolean-like keys, the value returned will be a logical +-AND of the values for the specified CPUs. Usermode can supply NULL for cpus and +-0 for cpu_count as a shortcut for all online CPUs. There are currently no flags, +-this value must be zero for future compatibility. +- +-On success 0 is returned, on failure a negative error code is returned. +- +-The following keys are defined: +- +-* :c:macro:`RISCV_HWPROBE_KEY_MVENDORID`: Contains the value of ``mvendorid``, +- as defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_MARCHID`: Contains the value of ``marchid``, as +- defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_MIMPLID`: Contains the value of ``mimplid``, as +- defined by the RISC-V privileged architecture specification. +- +-* :c:macro:`RISCV_HWPROBE_KEY_BASE_BEHAVIOR`: A bitmask containing the base +- user-visible behavior that this kernel supports. The following base user ABIs +- are defined: +- +- * :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: Support for rv32ima or +- rv64ima, as defined by version 2.2 of the user ISA and version 1.10 of the +- privileged ISA, with the following known exceptions (more exceptions may be +- added, but only if it can be demonstrated that the user ABI is not broken): +- +- * The ``fence.i`` instruction cannot be directly executed by userspace +- programs (it may still be executed in userspace via a +- kernel-controlled mechanism such as the vDSO). +- +-* :c:macro:`RISCV_HWPROBE_KEY_IMA_EXT_0`: A bitmask containing the extensions +- that are compatible with the :c:macro:`RISCV_HWPROBE_BASE_BEHAVIOR_IMA`: +- base system behavior. +- +- * :c:macro:`RISCV_HWPROBE_IMA_FD`: The F and D extensions are supported, as +- defined by commit cd20cee ("FMIN/FMAX now implement +- minimumNumber/maximumNumber, not minNum/maxNum") of the RISC-V ISA manual. +- +- * :c:macro:`RISCV_HWPROBE_IMA_C`: The C extension is supported, as defined +- by version 2.2 of the RISC-V ISA manual. +- +- * :c:macro:`RISCV_HWPROBE_IMA_V`: The V extension is supported, as defined by +- version 1.0 of the RISC-V Vector extension manual. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBA`: The Zba address generation extension is +- supported, as defined in version 1.0 of the Bit-Manipulation ISA +- extensions. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBB`: The Zbb extension is supported, as defined +- in version 1.0 of the Bit-Manipulation ISA extensions. +- +- * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined +- in version 1.0 of the Bit-Manipulation ISA extensions. +- +-* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance +- information about the selected set of processors. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNKNOWN`: The performance of misaligned +- accesses is unknown. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_EMULATED`: Misaligned accesses are +- emulated via software, either in or below the kernel. These accesses are +- always extremely slow. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_SLOW`: Misaligned accesses are slower +- than equivalent byte accesses. Misaligned accesses may be supported +- directly in hardware, or trapped and emulated by software. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_FAST`: Misaligned accesses are faster +- than equivalent byte accesses. +- +- * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are +- not supported at all and will generate a misaligned address fault. +diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst +index 3170747226f6..43bd8a145b7a 100644 +--- a/Documentation/scheduler/index.rst ++++ b/Documentation/scheduler/index.rst +@@ -7,6 +7,7 @@ Scheduler + + + completion ++ membarrier + sched-arch + sched-bwc + sched-deadline +diff --git a/Documentation/scheduler/membarrier.rst b/Documentation/scheduler/membarrier.rst +new file mode 100644 +index 000000000000..2387804b1c63 +--- /dev/null ++++ b/Documentation/scheduler/membarrier.rst +@@ -0,0 +1,39 @@ ++.. SPDX-License-Identifier: GPL-2.0 ++ ++======================== ++membarrier() System Call ++======================== ++ ++MEMBARRIER_CMD_{PRIVATE,GLOBAL}_EXPEDITED - Architecture requirements ++===================================================================== ++ ++Memory barriers before updating rq->curr ++---------------------------------------- ++ ++The commands MEMBARRIER_CMD_PRIVATE_EXPEDITED and MEMBARRIER_CMD_GLOBAL_EXPEDITED ++require each architecture to have a full memory barrier after coming from ++user-space, before updating rq->curr. This barrier is implied by the sequence ++rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full ++barrier in the proximity of the membarrier system call exit, cf. ++membarrier_{private,global}_expedited(). ++ ++Memory barriers after updating rq->curr ++--------------------------------------- ++ ++The commands MEMBARRIER_CMD_PRIVATE_EXPEDITED and MEMBARRIER_CMD_GLOBAL_EXPEDITED ++require each architecture to have a full memory barrier after updating rq->curr, ++before returning to user-space. The schemes providing this barrier on the various ++architectures are as follows. ++ ++ - alpha, arc, arm, hexagon, mips rely on the full barrier implied by ++ spin_unlock() in finish_lock_switch(). ++ ++ - arm64 relies on the full barrier implied by switch_to(). ++ ++ - powerpc, riscv, s390, sparc, x86 rely on the full barrier implied by ++ switch_mm(), if mm is not NULL; they rely on the full barrier implied ++ by mmdrop(), otherwise. On powerpc and riscv, switch_mm() relies on ++ membarrier_arch_switch_mm(). ++ ++The barrier matches a full barrier in the proximity of the membarrier system call ++entry, cf. membarrier_{private,global}_expedited(). +diff --git a/Documentation/translations/it_IT/riscv/patch-acceptance.rst b/Documentation/translations/it_IT/riscv/patch-acceptance.rst +index edf67252b3fb..2d7afb1f6959 100644 +--- a/Documentation/translations/it_IT/riscv/patch-acceptance.rst ++++ b/Documentation/translations/it_IT/riscv/patch-acceptance.rst +@@ -1,6 +1,6 @@ + .. include:: ../disclaimer-ita.rst + +-:Original: :doc:`../../../riscv/patch-acceptance` ++:Original: :doc:`../../../arch/riscv/patch-acceptance` + :Translator: Federico Vaga + + arch/riscv linee guida alla manutenzione per gli sviluppatori +diff --git a/Documentation/translations/zh_CN/arch/index.rst b/Documentation/translations/zh_CN/arch/index.rst +index e3d273d7d599..c4c2e16f629c 100644 +--- a/Documentation/translations/zh_CN/arch/index.rst ++++ b/Documentation/translations/zh_CN/arch/index.rst +@@ -10,7 +10,7 @@ + + mips/index + arm64/index +- ../riscv/index ++ ../arch/riscv/index + openrisc/index + parisc/index + loongarch/index +diff --git a/Documentation/translations/zh_CN/riscv/boot-image-header.rst b/Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +similarity index 96% +rename from Documentation/translations/zh_CN/riscv/boot-image-header.rst +rename to Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +index 0234c28a7114..779b5172fe24 100644 +--- a/Documentation/translations/zh_CN/riscv/boot-image-header.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/boot-image-header.rst +@@ -1,6 +1,6 @@ +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/boot-image-header.rst ++:Original: Documentation/arch/riscv/boot-image-header.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/index.rst b/Documentation/translations/zh_CN/arch/riscv/index.rst +similarity index 79% +rename from Documentation/translations/zh_CN/riscv/index.rst +rename to Documentation/translations/zh_CN/arch/riscv/index.rst +index 131e405aa857..3b041c116169 100644 +--- a/Documentation/translations/zh_CN/riscv/index.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/index.rst +@@ -1,8 +1,8 @@ + .. SPDX-License-Identifier: GPL-2.0 + +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/index.rst ++:Original: Documentation/arch/riscv/index.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst b/Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +similarity index 93% +rename from Documentation/translations/zh_CN/riscv/patch-acceptance.rst +rename to Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +index d180d24717bf..c8eb230ca8ee 100644 +--- a/Documentation/translations/zh_CN/riscv/patch-acceptance.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/patch-acceptance.rst +@@ -1,8 +1,8 @@ + .. SPDX-License-Identifier: GPL-2.0 + +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/patch-acceptance.rst ++:Original: Documentation/arch/riscv/patch-acceptance.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/riscv/vm-layout.rst b/Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +similarity index 98% +rename from Documentation/translations/zh_CN/riscv/vm-layout.rst +rename to Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +index 91884e2dfff8..4b9f4dcf6c19 100644 +--- a/Documentation/translations/zh_CN/riscv/vm-layout.rst ++++ b/Documentation/translations/zh_CN/arch/riscv/vm-layout.rst +@@ -1,7 +1,7 @@ + .. SPDX-License-Identifier: GPL-2.0 +-.. include:: ../disclaimer-zh_CN.rst ++.. include:: ../../disclaimer-zh_CN.rst + +-:Original: Documentation/riscv/vm-layout.rst ++:Original: Documentation/arch/riscv/vm-layout.rst + + :翻译: + +diff --git a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst +index a1ee99c4786e..0f5acfb1012e 100644 +--- a/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst ++++ b/Documentation/translations/zh_CN/maintainer/maintainer-entry-profile.rst +@@ -89,4 +89,4 @@ + + ../doc-guide/maintainer-profile + ../../../nvdimm/maintainer-entry-profile +- ../../../riscv/patch-acceptance ++ ../../../arch/riscv/patch-acceptance +diff --git a/MAINTAINERS b/MAINTAINERS +index 61baf2cfc4e1..b0f5d1363843 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -13805,7 +13805,9 @@ M: Mathieu Desnoyers + M: "Paul E. McKenney" + L: linux-kernel@vger.kernel.org + S: Supported ++F: Documentation/scheduler/membarrier.rst + F: arch/*/include/asm/membarrier.h ++F: arch/*/include/asm/sync_core.h + F: include/uapi/linux/membarrier.h + F: kernel/sched/membarrier.c + +@@ -18554,6 +18556,20 @@ S: Maintained + F: drivers/mtd/nand/raw/r852.c + F: drivers/mtd/nand/raw/r852.h + ++RISC-V AIA DRIVERS ++M: Anup Patel ++L: linux-riscv@lists.infradead.org ++S: Maintained ++F: Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml ++F: Documentation/devicetree/bindings/interrupt-controller/riscv,imsics.yaml ++F: drivers/irqchip/irq-riscv-aplic-*.c ++F: drivers/irqchip/irq-riscv-aplic-*.h ++F: drivers/irqchip/irq-riscv-imsic-*.c ++F: drivers/irqchip/irq-riscv-imsic-*.h ++F: drivers/irqchip/irq-riscv-intc.c ++F: include/linux/irqchip/riscv-aplic.h ++F: include/linux/irqchip/riscv-imsic.h ++ + RISC-V ARCHITECTURE + M: Paul Walmsley + M: Palmer Dabbelt +@@ -18562,12 +18578,21 @@ L: linux-riscv@lists.infradead.org + S: Supported + Q: https://patchwork.kernel.org/project/linux-riscv/list/ + C: irc://irc.libera.chat/riscv +-P: Documentation/riscv/patch-acceptance.rst ++P: Documentation/arch/riscv/patch-acceptance.rst + T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git + F: arch/riscv/ + N: riscv + K: riscv + ++RISC-V IOMMU ++M: Tomasz Jeznach ++L: iommu@lists.linux.dev ++L: linux-riscv@lists.infradead.org ++S: Maintained ++T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git ++F: Documentation/devicetree/bindings/iommu/riscv,iommu.yaml ++F: drivers/iommu/riscv/ ++ + RISC-V MICROCHIP FPGA SUPPORT + M: Conor Dooley + M: Daire McNamara +@@ -18625,6 +18650,8 @@ M: Fu Wei + L: linux-riscv@lists.infradead.org + S: Maintained + F: arch/riscv/boot/dts/thead/ ++F: drivers/pinctrl/pinctrl-th1520.c ++F: drivers/usb/dwc3/dwc3-thead.c + + RNBD BLOCK DRIVERS + M: Md. Haris Iqbal +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 5422d1502fd6..5cd9a47c9604 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -1596,7 +1596,6 @@ config ARM64_BOOTPARAM_HOTPLUG_CPU0 + config NUMA + bool "NUMA Memory Allocation and Scheduler Support" + select GENERIC_ARCH_NUMA +- select ACPI_NUMA if ACPI + select OF_NUMA + select HAVE_SETUP_PER_CPU_AREA + select NEED_PER_CPU_EMBED_FIRST_CHUNK +diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h +index 2c29239d05c3..846c563689a8 100644 +--- a/arch/arm64/include/asm/tlb.h ++++ b/arch/arm64/include/asm/tlb.h +@@ -96,7 +96,10 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, + unsigned long addr) + { +- tlb_remove_ptdesc(tlb, virt_to_ptdesc(pudp)); ++ struct ptdesc *ptdesc = virt_to_ptdesc(pudp); ++ ++ pagetable_pud_dtor(ptdesc); ++ tlb_remove_ptdesc(tlb, ptdesc); + } + #endif + +diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c +index f872c57e9909..fd9a7bed83ce 100644 +--- a/arch/arm64/kernel/pci.c ++++ b/arch/arm64/kernel/pci.c +@@ -6,28 +6,7 @@ + * Copyright (C) 2014 ARM Ltd. + */ + +-#include +-#include +-#include +-#include +-#include + #include +-#include +-#include +-#include +- +-#ifdef CONFIG_ACPI +-/* +- * Try to assign the IRQ number when probing a new device +- */ +-int pcibios_alloc_irq(struct pci_dev *dev) +-{ +- if (!acpi_disabled) +- acpi_pci_irq_enable(dev); +- +- return 0; +-} +-#endif + + /* + * raw_pci_read/write - Platform-specific PCI config space access. +@@ -61,173 +40,3 @@ int pcibus_to_node(struct pci_bus *bus) + EXPORT_SYMBOL(pcibus_to_node); + + #endif +- +-#ifdef CONFIG_ACPI +- +-struct acpi_pci_generic_root_info { +- struct acpi_pci_root_info common; +- struct pci_config_window *cfg; /* config space mapping */ +-}; +- +-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +-{ +- struct pci_config_window *cfg = bus->sysdata; +- struct acpi_device *adev = to_acpi_device(cfg->parent); +- struct acpi_pci_root *root = acpi_driver_data(adev); +- +- return root->segment; +-} +- +-int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +-{ +- struct pci_config_window *cfg; +- struct acpi_device *adev; +- struct device *bus_dev; +- +- if (acpi_disabled) +- return 0; +- +- cfg = bridge->bus->sysdata; +- +- /* +- * On Hyper-V there is no corresponding ACPI device for a root bridge, +- * therefore ->parent is set as NULL by the driver. And set 'adev' as +- * NULL in this case because there is no proper ACPI device. +- */ +- if (!cfg->parent) +- adev = NULL; +- else +- adev = to_acpi_device(cfg->parent); +- +- bus_dev = &bridge->bus->dev; +- +- ACPI_COMPANION_SET(&bridge->dev, adev); +- set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev))); +- +- return 0; +-} +- +-static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci) +-{ +- struct resource_entry *entry, *tmp; +- int status; +- +- status = acpi_pci_probe_root_resources(ci); +- resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { +- if (!(entry->res->flags & IORESOURCE_WINDOW)) +- resource_list_destroy_entry(entry); +- } +- return status; +-} +- +-/* +- * Lookup the bus range for the domain in MCFG, and set up config space +- * mapping. +- */ +-static struct pci_config_window * +-pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +-{ +- struct device *dev = &root->device->dev; +- struct resource *bus_res = &root->secondary; +- u16 seg = root->segment; +- const struct pci_ecam_ops *ecam_ops; +- struct resource cfgres; +- struct acpi_device *adev; +- struct pci_config_window *cfg; +- int ret; +- +- ret = pci_mcfg_lookup(root, &cfgres, &ecam_ops); +- if (ret) { +- dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); +- return NULL; +- } +- +- adev = acpi_resource_consumer(&cfgres); +- if (adev) +- dev_info(dev, "ECAM area %pR reserved by %s\n", &cfgres, +- dev_name(&adev->dev)); +- else +- dev_warn(dev, FW_BUG "ECAM area %pR not reserved in ACPI namespace\n", +- &cfgres); +- +- cfg = pci_ecam_create(dev, &cfgres, bus_res, ecam_ops); +- if (IS_ERR(cfg)) { +- dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, +- PTR_ERR(cfg)); +- return NULL; +- } +- +- return cfg; +-} +- +-/* release_info: free resources allocated by init_info */ +-static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci) +-{ +- struct acpi_pci_generic_root_info *ri; +- +- ri = container_of(ci, struct acpi_pci_generic_root_info, common); +- pci_ecam_free(ri->cfg); +- kfree(ci->ops); +- kfree(ri); +-} +- +-/* Interface called from ACPI code to setup PCI host controller */ +-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +-{ +- struct acpi_pci_generic_root_info *ri; +- struct pci_bus *bus, *child; +- struct acpi_pci_root_ops *root_ops; +- struct pci_host_bridge *host; +- +- ri = kzalloc(sizeof(*ri), GFP_KERNEL); +- if (!ri) +- return NULL; +- +- root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); +- if (!root_ops) { +- kfree(ri); +- return NULL; +- } +- +- ri->cfg = pci_acpi_setup_ecam_mapping(root); +- if (!ri->cfg) { +- kfree(ri); +- kfree(root_ops); +- return NULL; +- } +- +- root_ops->release_info = pci_acpi_generic_release_info; +- root_ops->prepare_resources = pci_acpi_root_prepare_resources; +- root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; +- bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); +- if (!bus) +- return NULL; +- +- /* If we must preserve the resource configuration, claim now */ +- host = pci_find_host_bridge(bus); +- if (host->preserve_config) +- pci_bus_claim_resources(bus); +- +- /* +- * Assign whatever was left unassigned. If we didn't claim above, +- * this will reassign everything. +- */ +- pci_assign_unassigned_root_bus_resources(bus); +- +- list_for_each_entry(child, &bus->children, node) +- pcie_bus_configure_settings(child); +- +- return bus; +-} +- +-void pcibios_add_bus(struct pci_bus *bus) +-{ +- acpi_pci_add_bus(bus); +-} +- +-void pcibios_remove_bus(struct pci_bus *bus) +-{ +- acpi_pci_remove_bus(bus); +-} +- +-#endif +diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig +index 53faa122b0f4..88182df75060 100644 +--- a/arch/ia64/Kconfig ++++ b/arch/ia64/Kconfig +@@ -16,7 +16,6 @@ config IA64 + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO + select ACPI +- select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_SUPPORTS_ACPI +diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig +index 8739e15c137b..4ce4b491edcd 100644 +--- a/arch/loongarch/Kconfig ++++ b/arch/loongarch/Kconfig +@@ -468,7 +468,6 @@ config NR_CPUS + config NUMA + bool "NUMA Support" + select SMP +- select ACPI_NUMA if ACPI + help + Say Y to compile the kernel with NUMA (Non-Uniform Memory Access) + support. This option improves performance on systems with more +diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h +index c9f9895f237d..a7b9c9e73593 100644 +--- a/arch/loongarch/include/asm/pgalloc.h ++++ b/arch/loongarch/include/asm/pgalloc.h +@@ -95,6 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) + + if (!ptdesc) + return NULL; ++ pagetable_pud_ctor(ptdesc); + pud = ptdesc_address(ptdesc); + + pud_init(pud); +diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c +index 34836408b15a..e24b6f9ea208 100644 +--- a/arch/loongarch/kernel/dma.c ++++ b/arch/loongarch/kernel/dma.c +@@ -17,7 +17,7 @@ EXPORT_SYMBOL_GPL(node_id_offset); + void acpi_arch_dma_setup(struct device *dev) + { + int ret; +- u64 mask, end = 0; ++ u64 mask, end; + const struct bus_dma_region *map = NULL; + + if (node_id_offset == 0) { +@@ -27,12 +27,7 @@ void acpi_arch_dma_setup(struct device *dev) + + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { +- const struct bus_dma_region *r = map; +- +- for (end = 0; r->size; r++) { +- if (r->dma_start + r->size - 1 > end) +- end = r->dma_start + r->size - 1; +- } ++ end = dma_range_map_max(map); + + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->bus_dma_limit = end; +diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h +index 40e40a7eb94a..f4440edcd8fe 100644 +--- a/arch/mips/include/asm/pgalloc.h ++++ b/arch/mips/include/asm/pgalloc.h +@@ -95,6 +95,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) + + if (!ptdesc) + return NULL; ++ pagetable_pud_ctor(ptdesc); + pud = ptdesc_address(ptdesc); + + pud_init(pud); +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig +index 3be10e723b2c..b20dc27077c2 100644 +--- a/arch/riscv/Kconfig ++++ b/arch/riscv/Kconfig +@@ -13,7 +13,10 @@ config 32BIT + config RISCV + def_bool y + select ACPI_GENERIC_GSI if ACPI ++ select ACPI_PPTT if ACPI ++ select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY if ACPI ++ select ACPI_SPCR_TABLE if ACPI + select ARCH_DMA_DEFAULT_COHERENT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 +@@ -26,20 +29,25 @@ config RISCV + select ARCH_HAS_FORTIFY_SOURCE + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_GIGANTIC_PAGE ++ select ARCH_HAS_HW_PTE_YOUNG + select ARCH_HAS_KCOV + select ARCH_HAS_MEMBARRIER_CALLBACKS ++ select ARCH_HAS_MEMBARRIER_SYNC_CORE + select ARCH_HAS_MMIOWB + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + select ARCH_HAS_PMEM_API ++ select ARCH_HAS_PREPARE_SYNC_CORE_CMD + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SET_DIRECT_MAP if MMU + select ARCH_HAS_SET_MEMORY if MMU + select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL + select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL ++ select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE + select ARCH_HAS_SYSCALL_WRAPPER + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAS_VDSO_DATA ++ select ARCH_KEEP_MEMBLOCK if ACPI + select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX + select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT + select ARCH_STACKWALK +@@ -64,7 +72,7 @@ config RISCV + select CLINT_TIMER if !MMU + select CLONE_BACKWARDS + select COMMON_CLK +- select CPU_PM if CPU_IDLE || HIBERNATION ++ select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND + select EDAC_SUPPORT + select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) + select GENERIC_ARCH_TOPOLOGY +@@ -119,6 +127,7 @@ config RISCV + select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION + select HAVE_EBPF_JIT if MMU ++ select HAVE_FAST_GUP if MMU + select HAVE_FUNCTION_ARG_ACCESS_API + select HAVE_FUNCTION_ERROR_INJECTION + select HAVE_GCC_PLUGINS +@@ -147,14 +156,18 @@ config RISCV + select IRQ_FORCED_THREADING + select KASAN_VMALLOC if KASAN + select LOCK_MM_AND_FIND_VMA ++ select MMU_GATHER_RCU_TABLE_FREE if SMP && MMU + select MODULES_USE_ELF_RELA if MODULES + select MODULE_SECTIONS if MODULES + select OF + select OF_EARLY_FLATTREE + select OF_IRQ + select PCI_DOMAINS_GENERIC if PCI ++ select PCI_ECAM if (ACPI && PCI) + select PCI_MSI if PCI + select RISCV_ALTERNATIVE if !XIP_KERNEL ++ select RISCV_APLIC ++ select RISCV_IMSIC + select RISCV_INTC + select RISCV_TIMER if RISCV_SBI + select SIFIVE_PLIC +@@ -223,6 +236,20 @@ config KASAN_SHADOW_OFFSET + default 0xdfffffff00000000 if 64BIT + default 0xffffffff if 32BIT + ++config ARCH_FORCE_MAX_ORDER ++ int "Maximum zone order" ++ default 10 ++ help ++ The kernel memory allocator divides physically contiguous memory ++ blocks into "zones", where each zone is a power of two number of ++ pages. This option selects the largest power of two that the kernel ++ keeps in the memory allocator. If you need to allocate very large ++ blocks of physically contiguous memory, then you may need to ++ increase this value. ++ ++ The page size is not necessarily 4KB. Keep this in mind ++ when choosing a value for this option. ++ + config ARCH_FLATMEM_ENABLE + def_bool !NUMA + +@@ -281,6 +308,7 @@ config RISCV_DMA_NONCOHERENT + select ARCH_HAS_SYNC_DMA_FOR_CPU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB ++ select ARCH_HAS_DMA_WRITE_COMBINE + + config RISCV_NONSTANDARD_CACHE_OPS + bool +@@ -298,6 +326,7 @@ config AS_HAS_OPTION_ARCH + + source "arch/riscv/Kconfig.socs" + source "arch/riscv/Kconfig.errata" ++source "arch/riscv/Kconfig.vendor" + + menu "Platform type" + +@@ -507,7 +536,7 @@ config RISCV_ISA_V + depends on TOOLCHAIN_HAS_V + depends on FPU + select DYNAMIC_SIGFRAME +- default y ++ default n + help + Say N here if you want to disable all vector related procedure + in the kernel. +@@ -525,6 +554,53 @@ config RISCV_ISA_V_DEFAULT_ENABLE + + If you don't know what to do here, say Y. + ++config RISCV_ISA_ZAWRS ++ bool "Zawrs extension support for more efficient busy waiting" ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ The Zawrs extension defines instructions to be used in polling loops ++ which allow a hart to enter a low-power state or to trap to the ++ hypervisor while waiting on a store to a memory location. Enable the ++ use of these instructions in the kernel when the Zawrs extension is ++ detected at boot. ++ ++config TOOLCHAIN_HAS_ZABHA ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zabha) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zabha) ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZABHA ++ bool "Zabha extension support for atomic byte/halfword operations" ++ depends on TOOLCHAIN_HAS_ZABHA ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Enable the use of the Zabha ISA-extension to implement kernel ++ byte/halfword atomic memory operations when it is detected at boot. ++ ++ If you don't know what to do here, say Y. ++ ++config TOOLCHAIN_HAS_ZACAS ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zacas) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zacas) ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZACAS ++ bool "Zacas extension support for atomic CAS" ++ depends on TOOLCHAIN_HAS_ZACAS ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Enable the use of the Zacas ISA-extension to implement kernel atomic ++ cmpxchg operations when it is detected at boot. ++ ++ If you don't know what to do here, say Y. ++ + config TOOLCHAIN_HAS_ZBB + bool + default y +@@ -549,6 +625,29 @@ config RISCV_ISA_ZBB + + If you don't know what to do here, say Y. + ++config TOOLCHAIN_HAS_ZBC ++ bool ++ default y ++ depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zbc) ++ depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zbc) ++ depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900 ++ depends on AS_HAS_OPTION_ARCH ++ ++config RISCV_ISA_ZBC ++ bool "Zbc extension support for carry-less multiplication instructions" ++ depends on TOOLCHAIN_HAS_ZBC ++ depends on MMU ++ depends on RISCV_ALTERNATIVE ++ default y ++ help ++ Adds support to dynamically detect the presence of the Zbc ++ extension (carry-less multiplication) and enable its usage. ++ ++ The Zbc extension could accelerate CRC (cyclic redundancy check) ++ calculations. ++ ++ If you don't know what to do here, say Y. ++ + config RISCV_ISA_ZICBOM + bool "Zicbom extension support for non-coherent DMA operation" + depends on MMU +@@ -579,13 +678,6 @@ config RISCV_ISA_ZICBOZ + + If you don't know what to do here, say Y. + +-config TOOLCHAIN_HAS_ZIHINTPAUSE +- bool +- default y +- depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause) +- depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause) +- depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600 +- + config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + def_bool y + # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc +@@ -697,6 +789,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY + config ARCH_SUPPORTS_CRASH_DUMP + def_bool y + ++config ARCH_FORCE_MAX_ORDER ++ int "Maximum zone order" ++ default "10" ++ help ++ The kernel memory allocator divides physically contiguous memory ++ blocks into "zones", where each zone is a power of two number of ++ pages. This option selects the largest power of two that the kernel ++ keeps in the memory allocator. If you need to allocate very large ++ blocks of physically contiguous memory, then you may need to ++ increase this value. ++ ++ The page size is not necessarily 4KB. Keep this in mind ++ when choosing a value for this option. ++ + config COMPAT + bool "Kernel support for 32-bit U-mode" + default 64BIT +@@ -811,6 +917,17 @@ config EFI + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + ++config DMI ++ bool "Enable support for SMBIOS (DMI) tables" ++ depends on EFI ++ default y ++ help ++ This enables SMBIOS/DMI feature for systems. ++ ++ This option is only useful on systems that have UEFI firmware. ++ However, even with this option, the resultant kernel should ++ continue to boot on existing non-UEFI platforms. ++ + config CC_HAVE_STACKPROTECTOR_TLS + def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0) + +diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs +index 30fd6a512828..d468306a1a5c 100644 +--- a/arch/riscv/Kconfig.socs ++++ b/arch/riscv/Kconfig.socs +@@ -22,6 +22,11 @@ config SOC_SIFIVE + help + This enables support for SiFive SoC platform hardware. + ++config ARCH_SOPHGO ++ bool "Sophgo SoCs" ++ help ++ This enables support for Sophgo SoC platform hardware. ++ + config ARCH_STARFIVE + def_bool SOC_STARFIVE + +@@ -49,6 +54,13 @@ config ARCH_THEAD + help + This enables support for the RISC-V based T-HEAD SoCs. + ++config ARCH_XUANTIE ++ bool "XuanTie RISC-V SoCs" ++ depends on MMU && !XIP_KERNEL ++ select ERRATA_THEAD ++ help ++ This enables support for the RISC-V based XuanTie SoCs. ++ + config ARCH_VIRT + def_bool SOC_VIRT + +@@ -111,4 +123,41 @@ config SOC_CANAAN_K210_DTB_SOURCE + + endif # ARCH_CANAAN + ++config SOC_SPACEMIT ++ bool "Spacemit SoCs" ++ select SIFIVE_PLIC ++ help ++ This enables support for Spacemit SoCs platform hardware. ++ ++if SOC_SPACEMIT ++ ++choice ++ prompt "Spacemit SoCs platform" ++ default SOC_SPACEMIT_K1 ++ help ++ choice Spacemit Soc platform ++ ++ config SOC_SPACEMIT_K1 ++ bool "k1" ++ help ++ select Spacemit k1 Platform SoCs. ++endchoice ++ ++if SOC_SPACEMIT_K1 ++ ++choice ++ prompt "Spacemit K1 serial SoCs" ++ default SOC_SPACEMIT_K1X ++ help ++ choice Spacemit K1 SoC platform ++ ++ config SOC_SPACEMIT_K1X ++ bool "k1-x" ++ help ++ This enables support for Spacemit k1-x Platform Hardware. ++endchoice ++ ++endif ++endif ++ + endmenu # "SoC selection" +diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor +new file mode 100644 +index 000000000000..6f1cdd32ed29 +--- /dev/null ++++ b/arch/riscv/Kconfig.vendor +@@ -0,0 +1,19 @@ ++menu "Vendor extensions" ++ ++config RISCV_ISA_VENDOR_EXT ++ bool ++ ++menu "Andes" ++config RISCV_ISA_VENDOR_EXT_ANDES ++ bool "Andes vendor extension support" ++ select RISCV_ISA_VENDOR_EXT ++ default y ++ help ++ Say N here if you want to disable all Andes vendor extension ++ support. This will cause any Andes vendor extensions that are ++ requested by hardware probing to be ignored. ++ ++ If you don't know what to do here, say Y. ++endmenu ++ ++endmenu +diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile +index b43a6bb7e4dc..30099b367479 100644 +--- a/arch/riscv/Makefile ++++ b/arch/riscv/Makefile +@@ -54,22 +54,13 @@ endif + endif + endif + +-# ISA string setting +-riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima +-riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima +-riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd +-riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c +-riscv-march-$(CONFIG_RISCV_ISA_V) := $(riscv-march-y)v +- +-ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC +-KBUILD_CFLAGS += -Wa,-misa-spec=2.2 +-KBUILD_AFLAGS += -Wa,-misa-spec=2.2 +-else +-riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei +-endif ++include $(srctree)/arch/riscv/Makefile.isa ++ ++# Check if the toolchain supports Zacas ++riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZACAS) := $(riscv-march-y)_zacas + +-# Check if the toolchain supports Zihintpause extension +-riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause ++# Check if the toolchain supports Zabha ++riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZABHA) := $(riscv-march-y)_zabha + + # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by + # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) +@@ -152,7 +143,7 @@ ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy) + KBUILD_IMAGE := $(boot)/loader.bin + else + ifeq ($(CONFIG_EFI_ZBOOT),) +-KBUILD_IMAGE := $(boot)/Image.gz ++KBUILD_IMAGE := $(boot)/Image + else + KBUILD_IMAGE := $(boot)/vmlinuz.efi + endif +diff --git a/arch/riscv/Makefile.isa b/arch/riscv/Makefile.isa +new file mode 100644 +index 000000000000..279f24f32763 +--- /dev/null ++++ b/arch/riscv/Makefile.isa +@@ -0,0 +1,15 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++# ISA string setting ++riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima ++riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima ++riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd ++riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c ++riscv-march-$(CONFIG_RISCV_ISA_V) := $(riscv-march-y)v ++ ++ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC ++KBUILD_CFLAGS += -Wa,-misa-spec=2.2 ++KBUILD_AFLAGS += -Wa,-misa-spec=2.2 ++else ++riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei ++endif +diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile +index f60a280abb15..513e9f338637 100644 +--- a/arch/riscv/boot/dts/Makefile ++++ b/arch/riscv/boot/dts/Makefile +@@ -4,6 +4,8 @@ subdir-y += canaan + subdir-y += microchip + subdir-y += renesas + subdir-y += sifive ++subdir-y += sophgo ++subdir-y += spacemit + subdir-y += starfive + subdir-y += thead + +diff --git a/arch/riscv/boot/dts/sophgo/Makefile b/arch/riscv/boot/dts/sophgo/Makefile +new file mode 100644 +index 000000000000..6e7c7763b0a9 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/Makefile +@@ -0,0 +1,7 @@ ++# SPDX-License-Identifier: GPL-2.0 ++dtb-$(CONFIG_ARCH_SOPHGO) += mango-sophgo-x4evb.dtb ++dtb-$(CONFIG_ARCH_SOPHGO) += mango-sophgo-x8evb.dtb ++dtb-$(CONFIG_ARCH_SOPHGO) += mango-sophgo-pisces.dtb ++dtb-$(CONFIG_ARCH_SOPHGO) += mango-sophgo-capricorn.dtb ++dtb-$(CONFIG_ARCH_SOPHGO) += mango-milkv-pioneer.dtb ++dtb-$(CONFIG_ARCH_SOPHGO) += mango-yixin-s2110.dtb +diff --git a/arch/riscv/boot/dts/sophgo/mango-2sockets.dtsi b/arch/riscv/boot/dts/sophgo/mango-2sockets.dtsi +new file mode 100644 +index 000000000000..8c6e22c33cef +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-2sockets.dtsi +@@ -0,0 +1,699 @@ ++#define NR_CPUS 128 ++ ++#include "mango.dtsi" ++#if NR_CPUS > 64 ++#include "mango-cpus-socket1.dtsi" ++#endif ++#include "mango-clock-socket1.dtsi" ++ ++/ { ++ /delete-node/ distance-map; ++ distance-map { ++ compatible = "numa-distance-map-v1"; ++ distance-matrix = <0 0 10>, //chip0 ++ <0 1 15>, ++ <0 2 25>, ++ <0 3 30>, ++ <0 4 110>, ++ <0 5 115>, ++ <0 6 125>, ++ <0 7 130>, ++ <1 0 15>, ++ <1 1 10>, ++ <1 2 30>, ++ <1 3 25>, ++ <1 4 115>, ++ <1 5 110>, ++ <1 6 130>, ++ <1 7 125>, ++ <2 0 25>, ++ <2 1 30>, ++ <2 2 10>, ++ <2 3 15>, ++ <2 4 125>, ++ <2 5 130>, ++ <2 6 110>, ++ <2 7 115>, ++ <3 0 30>, ++ <3 1 25>, ++ <3 2 15>, ++ <3 3 10>, ++ <3 4 130>, ++ <3 5 125>, ++ <3 6 115>, ++ <3 7 110>, ++ <4 0 110>, //chip1 ++ <4 1 115>, ++ <4 2 125>, ++ <4 3 130>, ++ <4 4 10>, ++ <4 5 15>, ++ <4 6 25>, ++ <4 7 30>, ++ <5 0 115>, ++ <5 1 110>, ++ <5 2 130>, ++ <5 3 125>, ++ <5 4 15>, ++ <5 5 10>, ++ <5 6 30>, ++ <5 7 25>, ++ <6 0 125>, ++ <6 1 130>, ++ <6 2 110>, ++ <6 3 115>, ++ <6 4 25>, ++ <6 5 30>, ++ <6 6 10>, ++ <6 7 15>, ++ <7 0 130>, ++ <7 1 125>, ++ <7 2 115>, ++ <7 3 110>, ++ <7 4 30>, ++ <7 5 25>, ++ <7 6 15>, ++ <7 7 10>; ++ }; ++ ++ soc { ++#if NR_CPUS > 64 ++ /delete-node/ clint-mswi@7094000000; ++ clint_mswi: clint-mswi@7094000000 { ++ compatible = "thead,c900-clint-mswi"; ++ reg = <0x00000070 0x94000000 0x00000000 0x00004000>; ++ interrupts-extended = < ++ &cpu0_intc 3 ++ &cpu1_intc 3 ++ &cpu2_intc 3 ++ &cpu3_intc 3 ++ &cpu4_intc 3 ++ &cpu5_intc 3 ++ &cpu6_intc 3 ++ &cpu7_intc 3 ++ &cpu8_intc 3 ++ &cpu9_intc 3 ++ &cpu10_intc 3 ++ &cpu11_intc 3 ++ &cpu12_intc 3 ++ &cpu13_intc 3 ++ &cpu14_intc 3 ++ &cpu15_intc 3 ++ &cpu16_intc 3 ++ &cpu17_intc 3 ++ &cpu18_intc 3 ++ &cpu19_intc 3 ++ &cpu20_intc 3 ++ &cpu21_intc 3 ++ &cpu22_intc 3 ++ &cpu23_intc 3 ++ &cpu24_intc 3 ++ &cpu25_intc 3 ++ &cpu26_intc 3 ++ &cpu27_intc 3 ++ &cpu28_intc 3 ++ &cpu29_intc 3 ++ &cpu30_intc 3 ++ &cpu31_intc 3 ++ &cpu32_intc 3 ++ &cpu33_intc 3 ++ &cpu34_intc 3 ++ &cpu35_intc 3 ++ &cpu36_intc 3 ++ &cpu37_intc 3 ++ &cpu38_intc 3 ++ &cpu39_intc 3 ++ &cpu40_intc 3 ++ &cpu41_intc 3 ++ &cpu42_intc 3 ++ &cpu43_intc 3 ++ &cpu44_intc 3 ++ &cpu45_intc 3 ++ &cpu46_intc 3 ++ &cpu47_intc 3 ++ &cpu48_intc 3 ++ &cpu49_intc 3 ++ &cpu50_intc 3 ++ &cpu51_intc 3 ++ &cpu52_intc 3 ++ &cpu53_intc 3 ++ &cpu54_intc 3 ++ &cpu55_intc 3 ++ &cpu56_intc 3 ++ &cpu57_intc 3 ++ &cpu58_intc 3 ++ &cpu59_intc 3 ++ &cpu60_intc 3 ++ &cpu61_intc 3 ++ &cpu62_intc 3 ++ &cpu63_intc 3 ++ ++ // chip 1 ++ &cpu64_intc 3 ++ &cpu65_intc 3 ++ &cpu66_intc 3 ++ &cpu67_intc 3 ++ &cpu68_intc 3 ++ &cpu69_intc 3 ++ &cpu70_intc 3 ++ &cpu71_intc 3 ++ &cpu72_intc 3 ++ &cpu73_intc 3 ++ &cpu74_intc 3 ++ &cpu75_intc 3 ++ &cpu76_intc 3 ++ &cpu77_intc 3 ++ &cpu78_intc 3 ++ &cpu79_intc 3 ++ &cpu80_intc 3 ++ &cpu81_intc 3 ++ &cpu82_intc 3 ++ &cpu83_intc 3 ++ &cpu84_intc 3 ++ &cpu85_intc 3 ++ &cpu86_intc 3 ++ &cpu87_intc 3 ++ &cpu88_intc 3 ++ &cpu89_intc 3 ++ &cpu90_intc 3 ++ &cpu91_intc 3 ++ &cpu92_intc 3 ++ &cpu93_intc 3 ++ &cpu94_intc 3 ++ &cpu95_intc 3 ++ &cpu96_intc 3 ++ &cpu97_intc 3 ++ &cpu98_intc 3 ++ &cpu99_intc 3 ++ &cpu100_intc 3 ++ &cpu101_intc 3 ++ &cpu102_intc 3 ++ &cpu103_intc 3 ++ &cpu104_intc 3 ++ &cpu105_intc 3 ++ &cpu106_intc 3 ++ &cpu107_intc 3 ++ &cpu108_intc 3 ++ &cpu109_intc 3 ++ &cpu110_intc 3 ++ &cpu111_intc 3 ++ &cpu112_intc 3 ++ &cpu113_intc 3 ++ &cpu114_intc 3 ++ &cpu115_intc 3 ++ &cpu116_intc 3 ++ &cpu117_intc 3 ++ &cpu118_intc 3 ++ &cpu119_intc 3 ++ &cpu120_intc 3 ++ &cpu121_intc 3 ++ &cpu122_intc 3 ++ &cpu123_intc 3 ++ &cpu124_intc 3 ++ &cpu125_intc 3 ++ &cpu126_intc 3 ++ &cpu127_intc 3 ++ >; ++ }; ++ ++ clint_mtimer16: clint-mtimer@70ac100000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac100000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu64_intc 7 ++ &cpu65_intc 7 ++ &cpu66_intc 7 ++ &cpu67_intc 7 ++ >; ++ }; ++ ++ clint_mtimer17: clint-mtimer@70ac110000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac110000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu68_intc 7 ++ &cpu69_intc 7 ++ &cpu70_intc 7 ++ &cpu71_intc 7 ++ >; ++ }; ++ ++ clint_mtimer18: clint-mtimer@70ac120000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac120000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu72_intc 7 ++ &cpu73_intc 7 ++ &cpu74_intc 7 ++ &cpu75_intc 7 ++ >; ++ }; ++ ++ clint_mtimer19: clint-mtimer@70ac130000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac130000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu76_intc 7 ++ &cpu77_intc 7 ++ &cpu78_intc 7 ++ &cpu79_intc 7 ++ >; ++ }; ++ ++ clint_mtimer20: clint-mtimer@70ac140000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac140000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu80_intc 7 ++ &cpu81_intc 7 ++ &cpu82_intc 7 ++ &cpu83_intc 7 ++ >; ++ }; ++ ++ clint_mtimer21: clint-mtimer@70ac150000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac150000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu84_intc 7 ++ &cpu85_intc 7 ++ &cpu86_intc 7 ++ &cpu87_intc 7 ++ >; ++ }; ++ ++ clint_mtimer22: clint-mtimer@70ac160000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac160000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu88_intc 7 ++ &cpu89_intc 7 ++ &cpu90_intc 7 ++ &cpu91_intc 7 ++ >; ++ }; ++ ++ clint_mtimer23: clint-mtimer@70ac170000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac170000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu92_intc 7 ++ &cpu93_intc 7 ++ &cpu94_intc 7 ++ &cpu95_intc 7 ++ >; ++ }; ++ ++ clint_mtimer24: clint-mtimer@70ac180000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac180000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu96_intc 7 ++ &cpu97_intc 7 ++ &cpu98_intc 7 ++ &cpu99_intc 7 ++ >; ++ }; ++ ++ clint_mtimer25: clint-mtimer@70ac190000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac190000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu100_intc 7 ++ &cpu101_intc 7 ++ &cpu102_intc 7 ++ &cpu103_intc 7 ++ >; ++ }; ++ ++ clint_mtimer26: clint-mtimer@70ac1a0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac1a0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu104_intc 7 ++ &cpu105_intc 7 ++ &cpu106_intc 7 ++ &cpu107_intc 7 ++ >; ++ }; ++ ++ clint_mtimer27: clint-mtimer@70ac1b0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac1b0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu108_intc 7 ++ &cpu109_intc 7 ++ &cpu110_intc 7 ++ &cpu111_intc 7 ++ >; ++ }; ++ ++ clint_mtimer28: clint-mtimer@70ac1c0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac1c0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu112_intc 7 ++ &cpu113_intc 7 ++ &cpu114_intc 7 ++ &cpu115_intc 7 ++ >; ++ }; ++ ++ clint_mtimer29: clint-mtimer@70ac1d0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac1d0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu116_intc 7 ++ &cpu117_intc 7 ++ &cpu118_intc 7 ++ &cpu119_intc 7 ++ >; ++ }; ++ ++ clint_mtimer30: clint-mtimer@70ac1e0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac1e0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu120_intc 7 ++ &cpu121_intc 7 ++ &cpu122_intc 7 ++ &cpu123_intc 7 ++ >; ++ }; ++ ++ clint_mtimer31: clint-mtimer@70ac1f0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac1f0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu124_intc 7 ++ &cpu125_intc 7 ++ &cpu126_intc 7 ++ &cpu127_intc 7 ++ >; ++ }; ++#endif ++ ++ /delete-node/ interrupt-controller@7090000000; ++ intc: interrupt-controller@7090000000 { ++ #address-cells = <0>; ++ #interrupt-cells = <2>; ++ compatible = "thead,c900-plic"; ++ interrupt-controller; ++ interrupts-extended = < ++ &cpu0_intc 11 &cpu0_intc 9 ++ &cpu1_intc 11 &cpu1_intc 9 ++ &cpu2_intc 11 &cpu2_intc 9 ++ &cpu3_intc 11 &cpu3_intc 9 ++ &cpu4_intc 11 &cpu4_intc 9 ++ &cpu5_intc 11 &cpu5_intc 9 ++ &cpu6_intc 11 &cpu6_intc 9 ++ &cpu7_intc 11 &cpu7_intc 9 ++ &cpu8_intc 11 &cpu8_intc 9 ++ &cpu9_intc 11 &cpu9_intc 9 ++ &cpu10_intc 11 &cpu10_intc 9 ++ &cpu11_intc 11 &cpu11_intc 9 ++ &cpu12_intc 11 &cpu12_intc 9 ++ &cpu13_intc 11 &cpu13_intc 9 ++ &cpu14_intc 11 &cpu14_intc 9 ++ &cpu15_intc 11 &cpu15_intc 9 ++ &cpu16_intc 11 &cpu16_intc 9 ++ &cpu17_intc 11 &cpu17_intc 9 ++ &cpu18_intc 11 &cpu18_intc 9 ++ &cpu19_intc 11 &cpu19_intc 9 ++ &cpu20_intc 11 &cpu20_intc 9 ++ &cpu21_intc 11 &cpu21_intc 9 ++ &cpu22_intc 11 &cpu22_intc 9 ++ &cpu23_intc 11 &cpu23_intc 9 ++ &cpu24_intc 11 &cpu24_intc 9 ++ &cpu25_intc 11 &cpu25_intc 9 ++ &cpu26_intc 11 &cpu26_intc 9 ++ &cpu27_intc 11 &cpu27_intc 9 ++ &cpu28_intc 11 &cpu28_intc 9 ++ &cpu29_intc 11 &cpu29_intc 9 ++ &cpu30_intc 11 &cpu30_intc 9 ++ &cpu31_intc 11 &cpu31_intc 9 ++ &cpu32_intc 11 &cpu32_intc 9 ++ &cpu33_intc 11 &cpu33_intc 9 ++ &cpu34_intc 11 &cpu34_intc 9 ++ &cpu35_intc 11 &cpu35_intc 9 ++ &cpu36_intc 11 &cpu36_intc 9 ++ &cpu37_intc 11 &cpu37_intc 9 ++ &cpu38_intc 11 &cpu38_intc 9 ++ &cpu39_intc 11 &cpu39_intc 9 ++ &cpu40_intc 11 &cpu40_intc 9 ++ &cpu41_intc 11 &cpu41_intc 9 ++ &cpu42_intc 11 &cpu42_intc 9 ++ &cpu43_intc 11 &cpu43_intc 9 ++ &cpu44_intc 11 &cpu44_intc 9 ++ &cpu45_intc 11 &cpu45_intc 9 ++ &cpu46_intc 11 &cpu46_intc 9 ++ &cpu47_intc 11 &cpu47_intc 9 ++ &cpu48_intc 11 &cpu48_intc 9 ++ &cpu49_intc 11 &cpu49_intc 9 ++ &cpu50_intc 11 &cpu50_intc 9 ++ &cpu51_intc 11 &cpu51_intc 9 ++ &cpu52_intc 11 &cpu52_intc 9 ++ &cpu53_intc 11 &cpu53_intc 9 ++ &cpu54_intc 11 &cpu54_intc 9 ++ &cpu55_intc 11 &cpu55_intc 9 ++ &cpu56_intc 11 &cpu56_intc 9 ++ &cpu57_intc 11 &cpu57_intc 9 ++ &cpu58_intc 11 &cpu58_intc 9 ++ &cpu59_intc 11 &cpu59_intc 9 ++ &cpu60_intc 11 &cpu60_intc 9 ++ &cpu61_intc 11 &cpu61_intc 9 ++ &cpu62_intc 11 &cpu62_intc 9 ++ &cpu63_intc 11 &cpu63_intc 9 ++ ++#if NR_CPUS > 64 ++ //chip 1 ++ &cpu64_intc 11 &cpu64_intc 9 ++ &cpu65_intc 11 &cpu65_intc 9 ++ &cpu66_intc 11 &cpu66_intc 9 ++ &cpu67_intc 11 &cpu67_intc 9 ++ &cpu68_intc 11 &cpu68_intc 9 ++ &cpu69_intc 11 &cpu69_intc 9 ++ &cpu70_intc 11 &cpu70_intc 9 ++ &cpu71_intc 11 &cpu71_intc 9 ++ &cpu72_intc 11 &cpu72_intc 9 ++ &cpu73_intc 11 &cpu73_intc 9 ++ &cpu74_intc 11 &cpu74_intc 9 ++ &cpu75_intc 11 &cpu75_intc 9 ++ &cpu76_intc 11 &cpu76_intc 9 ++ &cpu77_intc 11 &cpu77_intc 9 ++ &cpu78_intc 11 &cpu78_intc 9 ++ &cpu79_intc 11 &cpu79_intc 9 ++ &cpu80_intc 11 &cpu80_intc 9 ++ &cpu81_intc 11 &cpu81_intc 9 ++ &cpu82_intc 11 &cpu82_intc 9 ++ &cpu83_intc 11 &cpu83_intc 9 ++ &cpu84_intc 11 &cpu84_intc 9 ++ &cpu85_intc 11 &cpu85_intc 9 ++ &cpu86_intc 11 &cpu86_intc 9 ++ &cpu87_intc 11 &cpu87_intc 9 ++ &cpu88_intc 11 &cpu88_intc 9 ++ &cpu89_intc 11 &cpu89_intc 9 ++ &cpu90_intc 11 &cpu90_intc 9 ++ &cpu91_intc 11 &cpu91_intc 9 ++ &cpu92_intc 11 &cpu92_intc 9 ++ &cpu93_intc 11 &cpu93_intc 9 ++ &cpu94_intc 11 &cpu94_intc 9 ++ &cpu95_intc 11 &cpu95_intc 9 ++ &cpu96_intc 11 &cpu96_intc 9 ++ &cpu97_intc 11 &cpu97_intc 9 ++ &cpu98_intc 11 &cpu98_intc 9 ++ &cpu99_intc 11 &cpu99_intc 9 ++ &cpu100_intc 11 &cpu100_intc 9 ++ &cpu101_intc 11 &cpu101_intc 9 ++ &cpu102_intc 11 &cpu102_intc 9 ++ &cpu103_intc 11 &cpu103_intc 9 ++ &cpu104_intc 11 &cpu104_intc 9 ++ &cpu105_intc 11 &cpu105_intc 9 ++ &cpu106_intc 11 &cpu106_intc 9 ++ &cpu107_intc 11 &cpu107_intc 9 ++ &cpu108_intc 11 &cpu108_intc 9 ++ &cpu109_intc 11 &cpu109_intc 9 ++ &cpu110_intc 11 &cpu110_intc 9 ++ &cpu111_intc 11 &cpu111_intc 9 ++ &cpu112_intc 11 &cpu112_intc 9 ++ &cpu113_intc 11 &cpu113_intc 9 ++ &cpu114_intc 11 &cpu114_intc 9 ++ &cpu115_intc 11 &cpu115_intc 9 ++ &cpu116_intc 11 &cpu116_intc 9 ++ &cpu117_intc 11 &cpu117_intc 9 ++ &cpu118_intc 11 &cpu118_intc 9 ++ &cpu119_intc 11 &cpu119_intc 9 ++ &cpu120_intc 11 &cpu120_intc 9 ++ &cpu121_intc 11 &cpu121_intc 9 ++ &cpu122_intc 11 &cpu122_intc 9 ++ &cpu123_intc 11 &cpu123_intc 9 ++ &cpu124_intc 11 &cpu124_intc 9 ++ &cpu125_intc 11 &cpu125_intc 9 ++ &cpu126_intc 11 &cpu126_intc 9 ++ &cpu127_intc 11 &cpu127_intc 9 ++#endif ++ >; ++ reg = <0x00000070 0x90000000 0x00000000 0x04000000>; ++ reg-names = "control"; ++ riscv,max-priority = <7>; ++ riscv,ndev = <448>; ++ }; ++ ++ top1_misc: top_misc_ctrl@f030010000 { ++ compatible = "syscon"; ++ reg = <0xf0 0x30010000 0x0 0x8000>; ++ }; ++ ++ rst1: reset1-controller { ++ #reset-cells = <1>; ++ compatible = "bitmain,reset"; ++ subctrl-syscon = <&top1_misc>; ++ top_rst_offset = <0x3000>; ++ nr_resets = ; ++ }; ++ ++ gpio3: gpio@f030009000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0xf0 0x30009000 0x0 0x400>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&s1_div_clk GATE_CLK_APB_GPIO>, ++ <&s1_div_clk GATE_CLK_GPIO_DB>; ++ clock-names = "bus", "db"; ++ ++ port3a: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ bank-name = "port0a"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <32>; ++ reg = <0>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ }; ++ }; ++ ++ ethernet1: ethernet@f040026000 { ++ compatible = "bitmain,ethernet"; ++ reg = <0xf0 0x40026000 0x0 0x4000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clock-names = "clk_tx", "gate_clk_tx", "stmmaceth", "ptp_ref", "gate_clk_ref"; ++ clocks = <&s1_div_clk DIV_CLK_FPLL_TX_ETH0>, ++ <&s1_div_clk GATE_CLK_TX_ETH0>, ++ <&s1_div_clk GATE_CLK_AXI_ETH0>, ++ <&s1_div_clk GATE_CLK_PTP_REF_I_ETH0>, ++ <&s1_div_clk GATE_CLK_REF_ETH0>; ++ ++ /* no hash filter and perfect filter support */ ++ snps,multicast-filter-bins = <0>; ++ snps,perfect-filter-entries = <1>; ++ ++ snps,txpbl = <32>; ++ snps,rxpbl = <32>; ++ snps,aal; ++ ++ snps,axi-config = <&stmmac_axi_setup>; ++ snps,mtl-rx-config = <&mtl_rx_setup>; ++ snps,mtl-tx-config = <&mtl_tx_setup>; ++ ++ phy-mode = "rgmii-txid"; ++ phy-reset-gpios = <&port3a 27 0>; ++ phy-handle = <&phy1>; ++ mdio { ++ #address-cells = <0x1>; ++ #size-cells = <0x0>; ++ compatible = "snps,dwmac-mdio"; ++ phy1: phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ device_type = "ethernet-phy"; ++ reg = <0x0>; ++ }; ++ }; ++ }; ++ ++ emmc1: bm-emmc@f04002A000 { ++ compatible = "bitmain,bm-emmc"; ++ reg = <0xf0 0x4002A000 0x0 0x1000>; ++ reg-names = "core_mem"; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ bus-width = <4>; ++ non-removable; ++ no-sdio; ++ no-sd; ++ resets = <&rst1 RST_EMMC>; ++ reset-names = "emmc"; ++ clocks = ++ <&s1_div_clk GATE_CLK_EMMC_100M>, ++ <&s1_div_clk GATE_CLK_AXI_EMMC>, ++ <&s1_div_clk GATE_CLK_100K_EMMC>; ++ clock-names = ++ "clk_gate_emmc", ++ "clk_gate_axi_emmc", ++ "clk_gate_100k_emmc"; ++ }; ++ ++ sd1: bm-sd@f04002B000 { ++ compatible = "bitmain,bm-sd"; ++ reg = <0xf0 0x4002B000 0x0 0x1000>; ++ reg-names = "core_mem"; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ bus-width = <4>; ++ no-sdio; ++ no-mmc; ++ resets = <&rst1 RST_SD>; ++ reset-names = "sdio"; ++ clocks = ++ <&s1_div_clk GATE_CLK_SD_100M>, ++ <&s1_div_clk GATE_CLK_AXI_SD>, ++ <&s1_div_clk GATE_CLK_100K_SD>; ++ clock-names = ++ "clk_gate_sd", ++ "clk_gate_axi_sd", ++ "clk_gate_100k_sd"; ++ }; ++ }; ++ ++ spifmc2: flash-controller@f000180000 { ++ compatible = "sophgo,spifmc"; ++ reg = <0xf0 0x00180000 0x0 0x1000000>; ++ reg-names = "memory"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000000>; ++ clocks = <&s1_div_clk GATE_CLK_AHB_SF>; ++ flash@0 { ++ reg = <0>; ++ compatible = "jedec,spi-nor"; ++ }; ++ }; ++ ++ spifmc3: flash-controller@f002180000 { ++ compatible = "sophgo,spifmc"; ++ reg = <0xf0 0x02180000 0x0 0x1000000>; ++ reg-names = "memory"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000000>; ++ clocks = <&s1_div_clk GATE_CLK_AHB_SF>; ++ flash@0 { ++ reg = <0>; ++ compatible = "jedec,spi-nor"; ++ }; ++ }; ++ ++ aliases { ++ serial0 = &uart0; ++ ethernet0 = ðernet0; ++ ethernet1 = ðernet1; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-clock-socket0.dtsi b/arch/riscv/boot/dts/sophgo/mango-clock-socket0.dtsi +new file mode 100644 +index 000000000000..af3380412f1d +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-clock-socket0.dtsi +@@ -0,0 +1,124 @@ ++/ { ++ socket0-clocks { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ cgi: ctrystal { ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ clock-output-names = "cgi"; ++ #clock-cells = <0>; ++ }; ++ ++ /* pll clock */ ++ mpll: mpll { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ id = ; ++ mode = ; ++ subctrl-syscon = <&top_misc>; ++ clocks = <&cgi>; ++ clock-output-names = "mpll_clock"; ++ }; ++ ++ fpll: fpll { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ id = ; ++ mode = ; ++ subctrl-syscon = <&top_misc>; ++ clocks = <&cgi>; ++ clock-output-names = "fpll_clock"; ++ }; ++ ++ dpll0: dpll0 { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ id = ; ++ mode = ; ++ subctrl-syscon = <&top_misc>; ++ clocks = <&cgi>; ++ clock-output-names = "dpll0_clock"; ++ }; ++ ++ dpll1: dpll1 { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ mode = ; ++ subctrl-syscon = <&top_misc>; ++ clocks = <&cgi>; ++ id = ; ++ clock-output-names = "dpll1_clock"; ++ }; ++ ++ div_clk: div_clk { ++ compatible = "mango, pll-child-clock"; ++ #clock-cells = <1>; ++ id = ; ++ subctrl-syscon = <&top_misc>; ++ }; ++ ++ mux_clk: mux_clk { ++ compatible = "mango, pll-mux-clock"; ++ #clock-cells = <1>; ++ id = ; ++ subctrl-syscon = <&top_misc>; ++ }; ++ ++ socket0_default_rates { ++ compatible = "mango, clk-default-rates"; ++ #clock-cells = <1>; ++ subctrl-syscon = <&top_misc>; ++ clocks = \ ++ <&mpll>, <&fpll>, ++ ++ <&div_clk DIV_CLK_FPLL_RP_CPU_NORMAL_1>, ++ <&div_clk DIV_CLK_FPLL_50M_A53>, ++ <&div_clk DIV_CLK_FPLL_TOP_RP_CMN_DIV2>, ++ <&div_clk DIV_CLK_FPLL_UART_500M>, ++ <&div_clk DIV_CLK_FPLL_AHB_LPC>, ++ <&div_clk DIV_CLK_FPLL_EFUSE>, ++ <&div_clk DIV_CLK_FPLL_TX_ETH0>, ++ <&div_clk DIV_CLK_FPLL_PTP_REF_I_ETH0>, ++ <&div_clk DIV_CLK_FPLL_REF_ETH0>, ++ <&div_clk DIV_CLK_FPLL_EMMC>, ++ <&div_clk DIV_CLK_FPLL_SD>, ++ <&div_clk DIV_CLK_FPLL_TOP_AXI0>, ++ <&div_clk DIV_CLK_FPLL_TOP_AXI_HSPERI>, ++ <&div_clk DIV_CLK_FPLL_AXI_DDR_1>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER1>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER2>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER3>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER4>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER5>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER6>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER7>, ++ <&div_clk DIV_CLK_FPLL_DIV_TIMER8>, ++ <&div_clk DIV_CLK_FPLL_100K_EMMC>, ++ <&div_clk DIV_CLK_FPLL_100K_SD>, ++ <&div_clk DIV_CLK_FPLL_GPIO_DB>, ++ ++ <&div_clk DIV_CLK_MPLL_RP_CPU_NORMAL_0>, ++ <&div_clk DIV_CLK_MPLL_AXI_DDR_0>; ++ ++ clock-rates = \ ++ <2000000000>, <1000000000>, ++ ++ <2000000000>, <50000000>, ++ <1000000000>, <500000000>, ++ <200000000>, <25000000>, ++ <125000000>, <50000000>, ++ <25000000>, <100000000>, ++ <100000000>, <100000000>, ++ <250000000>, <1000000000>, ++ <50000000>, <50000000>, ++ <50000000>, <50000000>, ++ <50000000>, <50000000>, ++ <50000000>, <50000000>, ++ <100000>, <100000>, <100000>, ++ ++ <2000000001>, <1000000001>; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-clock-socket1.dtsi b/arch/riscv/boot/dts/sophgo/mango-clock-socket1.dtsi +new file mode 100644 +index 000000000000..cfe34495e4fd +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-clock-socket1.dtsi +@@ -0,0 +1,124 @@ ++/ { ++ socket1-clocks { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ cgi1: ctrystal1 { ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ clock-output-names = "s1_cgi"; ++ #clock-cells = <0>; ++ }; ++ ++ /* pll clock */ ++ mpll1: mpll1 { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ id = ; ++ mode = ; ++ subctrl-syscon = <&top1_misc>; ++ clocks = <&cgi1>; ++ clock-output-names = "s1_mpll_clock"; ++ }; ++ ++ fpll1: fpll1 { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ id = ; ++ mode = ; ++ subctrl-syscon = <&top1_misc>; ++ clocks = <&cgi1>; ++ clock-output-names = "s1_fpll_clock"; ++ }; ++ ++ dpll01: dpll01 { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ id = ; ++ mode = ; ++ subctrl-syscon = <&top1_misc>; ++ clocks = <&cgi1>; ++ clock-output-names = "s1_dpll0_clock"; ++ }; ++ ++ dpll11: dpll11 { ++ compatible = "mango, pll-clock"; ++ #clock-cells = <0>; ++ mode = ; ++ subctrl-syscon = <&top1_misc>; ++ clocks = <&cgi1>; ++ id = ; ++ clock-output-names = "s1_dpll1_clock"; ++ }; ++ ++ s1_div_clk: s1_div_clk { ++ compatible = "mango, pll-child-clock"; ++ #clock-cells = <1>; ++ id = ; ++ subctrl-syscon = <&top1_misc>; ++ }; ++ ++ s1_mux_clk: s1_mux_clk { ++ compatible = "mango, pll-mux-clock"; ++ #clock-cells = <1>; ++ id = ; ++ subctrl-syscon = <&top1_misc>; ++ }; ++ ++ socket1_default_rates { ++ compatible = "mango, clk-default-rates"; ++ #clock-cells = <1>; ++ subctrl-syscon = <&top1_misc>; ++ clocks = \ ++ <&mpll1>, <&fpll1>, ++ ++ <&s1_div_clk DIV_CLK_FPLL_RP_CPU_NORMAL_1>, ++ <&s1_div_clk DIV_CLK_FPLL_50M_A53>, ++ <&s1_div_clk DIV_CLK_FPLL_TOP_RP_CMN_DIV2>, ++ <&s1_div_clk DIV_CLK_FPLL_UART_500M>, ++ <&s1_div_clk DIV_CLK_FPLL_AHB_LPC>, ++ <&s1_div_clk DIV_CLK_FPLL_EFUSE>, ++ <&s1_div_clk DIV_CLK_FPLL_TX_ETH0>, ++ <&s1_div_clk DIV_CLK_FPLL_PTP_REF_I_ETH0>, ++ <&s1_div_clk DIV_CLK_FPLL_REF_ETH0>, ++ <&s1_div_clk DIV_CLK_FPLL_EMMC>, ++ <&s1_div_clk DIV_CLK_FPLL_SD>, ++ <&s1_div_clk DIV_CLK_FPLL_TOP_AXI0>, ++ <&s1_div_clk DIV_CLK_FPLL_TOP_AXI_HSPERI>, ++ <&s1_div_clk DIV_CLK_FPLL_AXI_DDR_1>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER1>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER2>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER3>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER4>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER5>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER6>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER7>, ++ <&s1_div_clk DIV_CLK_FPLL_DIV_TIMER8>, ++ <&s1_div_clk DIV_CLK_FPLL_100K_EMMC>, ++ <&s1_div_clk DIV_CLK_FPLL_100K_SD>, ++ <&s1_div_clk DIV_CLK_FPLL_GPIO_DB>, ++ ++ <&s1_div_clk DIV_CLK_MPLL_RP_CPU_NORMAL_0>, ++ <&s1_div_clk DIV_CLK_MPLL_AXI_DDR_0>; ++ ++ clock-rates = \ ++ <2000000000>, <1000000000>, ++ ++ <2000000000>, <50000000>, ++ <1000000000>, <500000000>, ++ <200000000>, <25000000>, ++ <125000000>, <50000000>, ++ <25000000>, <100000000>, ++ <100000000>, <100000000>, ++ <250000000>, <1000000000>, ++ <50000000>, <50000000>, ++ <50000000>, <50000000>, ++ <50000000>, <50000000>, ++ <50000000>, <50000000>, ++ <100000>, <100000>, <100000>, ++ ++ <2000000001>, <1000000001>; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-cpus-socket0.dtsi b/arch/riscv/boot/dts/sophgo/mango-cpus-socket0.dtsi +new file mode 100644 +index 000000000000..5e3e697f1daa +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-cpus-socket0.dtsi +@@ -0,0 +1,2089 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* ++ * Copyright (C) 2022 Sophgo Technology Inc. All rights reserved. ++ */ ++ ++/ { ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ timebase-frequency = <50000000>; ++ ++ cpu-map { ++ socket0 { ++ cluster0 { ++ core0 { ++ cpu = <&cpu0>; ++ }; ++ core1 { ++ cpu = <&cpu1>; ++ }; ++ core2 { ++ cpu = <&cpu2>; ++ }; ++ core3 { ++ cpu = <&cpu3>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu4>; ++ }; ++ core1 { ++ cpu = <&cpu5>; ++ }; ++ core2 { ++ cpu = <&cpu6>; ++ }; ++ core3 { ++ cpu = <&cpu7>; ++ }; ++ }; ++ ++ cluster2 { ++ core0 { ++ cpu = <&cpu16>; ++ }; ++ core1 { ++ cpu = <&cpu17>; ++ }; ++ core2 { ++ cpu = <&cpu18>; ++ }; ++ core3 { ++ cpu = <&cpu19>; ++ }; ++ }; ++ ++ cluster3 { ++ core0 { ++ cpu = <&cpu20>; ++ }; ++ core1 { ++ cpu = <&cpu21>; ++ }; ++ core2 { ++ cpu = <&cpu22>; ++ }; ++ core3 { ++ cpu = <&cpu23>; ++ }; ++ }; ++ ++ cluster4 { ++ core0 { ++ cpu = <&cpu8>; ++ }; ++ core1 { ++ cpu = <&cpu9>; ++ }; ++ core2 { ++ cpu = <&cpu10>; ++ }; ++ core3 { ++ cpu = <&cpu11>; ++ }; ++ }; ++ ++ cluster5 { ++ core0 { ++ cpu = <&cpu12>; ++ }; ++ core1 { ++ cpu = <&cpu13>; ++ }; ++ core2 { ++ cpu = <&cpu14>; ++ }; ++ core3 { ++ cpu = <&cpu15>; ++ }; ++ }; ++ ++ cluster6 { ++ core0 { ++ cpu = <&cpu24>; ++ }; ++ core1 { ++ cpu = <&cpu25>; ++ }; ++ core2 { ++ cpu = <&cpu26>; ++ }; ++ core3 { ++ cpu = <&cpu27>; ++ }; ++ }; ++ ++ cluster7 { ++ core0 { ++ cpu = <&cpu28>; ++ }; ++ core1 { ++ cpu = <&cpu29>; ++ }; ++ core2 { ++ cpu = <&cpu30>; ++ }; ++ core3 { ++ cpu = <&cpu31>; ++ }; ++ }; ++ ++ cluster8 { ++ core0 { ++ cpu = <&cpu32>; ++ }; ++ core1 { ++ cpu = <&cpu33>; ++ }; ++ core2 { ++ cpu = <&cpu34>; ++ }; ++ core3 { ++ cpu = <&cpu35>; ++ }; ++ }; ++ ++ cluster9 { ++ core0 { ++ cpu = <&cpu36>; ++ }; ++ core1 { ++ cpu = <&cpu37>; ++ }; ++ core2 { ++ cpu = <&cpu38>; ++ }; ++ core3 { ++ cpu = <&cpu39>; ++ }; ++ }; ++ ++ cluster10 { ++ core0 { ++ cpu = <&cpu48>; ++ }; ++ core1 { ++ cpu = <&cpu49>; ++ }; ++ core2 { ++ cpu = <&cpu50>; ++ }; ++ core3 { ++ cpu = <&cpu51>; ++ }; ++ }; ++ ++ cluster11 { ++ core0 { ++ cpu = <&cpu52>; ++ }; ++ core1 { ++ cpu = <&cpu53>; ++ }; ++ core2 { ++ cpu = <&cpu54>; ++ }; ++ core3 { ++ cpu = <&cpu55>; ++ }; ++ }; ++ ++ cluster12 { ++ core0 { ++ cpu = <&cpu40>; ++ }; ++ core1 { ++ cpu = <&cpu41>; ++ }; ++ core2 { ++ cpu = <&cpu42>; ++ }; ++ core3 { ++ cpu = <&cpu43>; ++ }; ++ }; ++ ++ cluster13 { ++ core0 { ++ cpu = <&cpu44>; ++ }; ++ core1 { ++ cpu = <&cpu45>; ++ }; ++ core2 { ++ cpu = <&cpu46>; ++ }; ++ core3 { ++ cpu = <&cpu47>; ++ }; ++ }; ++ ++ cluster14 { ++ core0 { ++ cpu = <&cpu56>; ++ }; ++ core1 { ++ cpu = <&cpu57>; ++ }; ++ core2 { ++ cpu = <&cpu58>; ++ }; ++ core3 { ++ cpu = <&cpu59>; ++ }; ++ }; ++ ++ cluster15 { ++ core0 { ++ cpu = <&cpu60>; ++ }; ++ core1 { ++ cpu = <&cpu61>; ++ }; ++ core2 { ++ cpu = <&cpu62>; ++ }; ++ core3 { ++ cpu = <&cpu63>; ++ }; ++ }; ++ }; ++ }; ++ ++ cpu0: cpu@0 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <0>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache0>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu0_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu1: cpu@1 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <1>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache0>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu1_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu2: cpu@2 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <2>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache0>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu2_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu3: cpu@3 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <3>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache0>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu3_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu4: cpu@4 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <4>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache1>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu4_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu5: cpu@5 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <5>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache1>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu5_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu6: cpu@6 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <6>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache1>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu6_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu7: cpu@7 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <7>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache1>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu7_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu8: cpu@8 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <8>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache4>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu8_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu9: cpu@9 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <9>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache4>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu9_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu10: cpu@10 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <10>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache4>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu10_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu11: cpu@11 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <11>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache4>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu11_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu12: cpu@12 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <12>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache5>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu12_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu13: cpu@13 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <13>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache5>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu13_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu14: cpu@14 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <14>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache5>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu14_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu15: cpu@15 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <15>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache5>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu15_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu16: cpu@16 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <16>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache2>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu16_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu17: cpu@17 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <17>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache2>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu17_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu18: cpu@18 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <18>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache2>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu18_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu19: cpu@19 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <19>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache2>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu19_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu20: cpu@20 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <20>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache3>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu20_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu21: cpu@21 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <21>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache3>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu21_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu22: cpu@22 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <22>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache3>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu22_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu23: cpu@23 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <23>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache3>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <0>; ++ ++ cpu23_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu24: cpu@24 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <24>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache6>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu24_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu25: cpu@25 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <25>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache6>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu25_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu26: cpu@26 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <26>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache6>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu26_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu27: cpu@27 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <27>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache6>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu27_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu28: cpu@28 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <28>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache7>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu28_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu29: cpu@29 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <29>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache7>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu29_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu30: cpu@30 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <30>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache7>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu30_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu31: cpu@31 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <31>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache7>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <1>; ++ ++ cpu31_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu32: cpu@32 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <32>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache8>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu32_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu33: cpu@33 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <33>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache8>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu33_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu34: cpu@34 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <34>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache8>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu34_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu35: cpu@35 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <35>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache8>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu35_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu36: cpu@36 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <36>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache9>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu36_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu37: cpu@37 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <37>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache9>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu37_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu38: cpu@38 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <38>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache9>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu38_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu39: cpu@39 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <39>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache9>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu39_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu40: cpu@40 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <40>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache12>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu40_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu41: cpu@41 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <41>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache12>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu41_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu42: cpu@42 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <42>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache12>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu42_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu43: cpu@43 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <43>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache12>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu43_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu44: cpu@44 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <44>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache13>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu44_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu45: cpu@45 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <45>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache13>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu45_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu46: cpu@46 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <46>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache13>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu46_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu47: cpu@47 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <47>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache13>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu47_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu48: cpu@48 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <48>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache10>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu48_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu49: cpu@49 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <49>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache10>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu49_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu50: cpu@50 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <50>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache10>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu50_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu51: cpu@51 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <51>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache10>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu51_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu52: cpu@52 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <52>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache11>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu52_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu53: cpu@53 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <53>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache11>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu53_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu54: cpu@54 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <54>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache11>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu54_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu55: cpu@55 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <55>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache11>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <2>; ++ ++ cpu55_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu56: cpu@56 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <56>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache14>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu56_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu57: cpu@57 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <57>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache14>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu57_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu58: cpu@58 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <58>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache14>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu58_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu59: cpu@59 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <59>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache14>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu59_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu60: cpu@60 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <60>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache15>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu60_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu61: cpu@61 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <61>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache15>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu61_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu62: cpu@62 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <62>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache15>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu62_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu63: cpu@63 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdc"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <63>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache15>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <3>; ++ ++ cpu63_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ l2_cache0: cache-controller-0 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache1: cache-controller-1 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache2: cache-controller-2 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache3: cache-controller-3 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache4: cache-controller-4 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache5: cache-controller-5 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache6: cache-controller-6 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache7: cache-controller-7 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache8: cache-controller-8 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache9: cache-controller-9 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache10: cache-controller-10 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache11: cache-controller-11 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache12: cache-controller-12 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache13: cache-controller-13 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache14: cache-controller-14 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l2_cache15: cache-controller-15 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache0>; ++ }; ++ ++ l3_cache0: cache-controller-130 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <3>; ++ cache-size = <67108864>; ++ cache-sets = <4096>; ++ cache-unified; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-cpus-socket1.dtsi b/arch/riscv/boot/dts/sophgo/mango-cpus-socket1.dtsi +new file mode 100644 +index 000000000000..f8e2800c51b9 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-cpus-socket1.dtsi +@@ -0,0 +1,2090 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* ++ * Copyright (C) 2022 Sophgo Technology Inc. All rights reserved. ++ */ ++ ++/ { ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ timebase-frequency = <50000000>; ++ ++ cpu-map { ++ socket1 { ++ cluster0 { ++ core0 { ++ cpu = <&cpu64>; ++ }; ++ core1 { ++ cpu = <&cpu65>; ++ }; ++ core2 { ++ cpu = <&cpu66>; ++ }; ++ core3 { ++ cpu = <&cpu67>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu68>; ++ }; ++ core1 { ++ cpu = <&cpu69>; ++ }; ++ core2 { ++ cpu = <&cpu70>; ++ }; ++ core3 { ++ cpu = <&cpu71>; ++ }; ++ }; ++ ++ cluster2 { ++ core0 { ++ cpu = <&cpu80>; ++ }; ++ core1 { ++ cpu = <&cpu81>; ++ }; ++ core2 { ++ cpu = <&cpu82>; ++ }; ++ core3 { ++ cpu = <&cpu83>; ++ }; ++ }; ++ ++ cluster3 { ++ core0 { ++ cpu = <&cpu84>; ++ }; ++ core1 { ++ cpu = <&cpu85>; ++ }; ++ core2 { ++ cpu = <&cpu86>; ++ }; ++ core3 { ++ cpu = <&cpu87>; ++ }; ++ }; ++ ++ cluster4 { ++ core0 { ++ cpu = <&cpu72>; ++ }; ++ core1 { ++ cpu = <&cpu73>; ++ }; ++ core2 { ++ cpu = <&cpu74>; ++ }; ++ core3 { ++ cpu = <&cpu75>; ++ }; ++ }; ++ ++ cluster5 { ++ core0 { ++ cpu = <&cpu76>; ++ }; ++ core1 { ++ cpu = <&cpu77>; ++ }; ++ core2 { ++ cpu = <&cpu78>; ++ }; ++ core3 { ++ cpu = <&cpu79>; ++ }; ++ }; ++ ++ cluster6 { ++ core0 { ++ cpu = <&cpu88>; ++ }; ++ core1 { ++ cpu = <&cpu89>; ++ }; ++ core2 { ++ cpu = <&cpu90>; ++ }; ++ core3 { ++ cpu = <&cpu91>; ++ }; ++ }; ++ ++ cluster7 { ++ core0 { ++ cpu = <&cpu92>; ++ }; ++ core1 { ++ cpu = <&cpu93>; ++ }; ++ core2 { ++ cpu = <&cpu94>; ++ }; ++ core3 { ++ cpu = <&cpu95>; ++ }; ++ }; ++ ++ cluster8 { ++ core0 { ++ cpu = <&cpu96>; ++ }; ++ core1 { ++ cpu = <&cpu97>; ++ }; ++ core2 { ++ cpu = <&cpu98>; ++ }; ++ core3 { ++ cpu = <&cpu99>; ++ }; ++ }; ++ ++ cluster9 { ++ core0 { ++ cpu = <&cpu100>; ++ }; ++ core1 { ++ cpu = <&cpu101>; ++ }; ++ core2 { ++ cpu = <&cpu102>; ++ }; ++ core3 { ++ cpu = <&cpu103>; ++ }; ++ }; ++ ++ cluster10 { ++ core0 { ++ cpu = <&cpu112>; ++ }; ++ core1 { ++ cpu = <&cpu113>; ++ }; ++ core2 { ++ cpu = <&cpu114>; ++ }; ++ core3 { ++ cpu = <&cpu115>; ++ }; ++ }; ++ ++ cluster11 { ++ core0 { ++ cpu = <&cpu116>; ++ }; ++ core1 { ++ cpu = <&cpu117>; ++ }; ++ core2 { ++ cpu = <&cpu118>; ++ }; ++ core3 { ++ cpu = <&cpu119>; ++ }; ++ }; ++ ++ cluster12 { ++ core0 { ++ cpu = <&cpu104>; ++ }; ++ core1 { ++ cpu = <&cpu105>; ++ }; ++ core2 { ++ cpu = <&cpu106>; ++ }; ++ core3 { ++ cpu = <&cpu107>; ++ }; ++ }; ++ ++ cluster13 { ++ core0 { ++ cpu = <&cpu108>; ++ }; ++ core1 { ++ cpu = <&cpu109>; ++ }; ++ core2 { ++ cpu = <&cpu110>; ++ }; ++ core3 { ++ cpu = <&cpu111>; ++ }; ++ }; ++ ++ cluster14 { ++ core0 { ++ cpu = <&cpu120>; ++ }; ++ core1 { ++ cpu = <&cpu121>; ++ }; ++ core2 { ++ cpu = <&cpu122>; ++ }; ++ core3 { ++ cpu = <&cpu123>; ++ }; ++ }; ++ ++ cluster15 { ++ core0 { ++ cpu = <&cpu124>; ++ }; ++ core1 { ++ cpu = <&cpu125>; ++ }; ++ core2 { ++ cpu = <&cpu126>; ++ }; ++ core3 { ++ cpu = <&cpu127>; ++ }; ++ }; ++ }; ++ }; ++ ++ cpu64: cpu@64 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache16>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu64_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu65: cpu@65 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <65>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache16>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu65_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu66: cpu@66 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <66>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache16>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu66_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu67: cpu@67 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <67>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache16>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu67_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu68: cpu@68 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <68>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache17>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu68_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu69: cpu@69 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <69>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache17>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu69_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu70: cpu@70 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <70>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache17>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu70_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu71: cpu@71 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <71>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache17>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu71_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu72: cpu@72 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <72>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache20>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu72_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu73: cpu@73 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <73>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache20>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu73_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu74: cpu@74 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <74>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache20>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu74_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu75: cpu@75 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <75>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache20>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu75_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu76: cpu@76 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <76>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache21>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu76_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu77: cpu@77 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <77>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache21>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu77_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu78: cpu@78 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <78>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache21>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu78_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu79: cpu@79 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <79>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache21>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu79_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu80: cpu@80 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <80>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache18>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu80_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu81: cpu@81 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <81>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache18>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu81_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu82: cpu@82 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <82>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache18>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu82_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu83: cpu@83 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <83>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache18>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu83_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu84: cpu@84 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <84>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache19>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu84_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu85: cpu@85 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <85>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache19>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu85_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu86: cpu@86 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <86>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache19>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu86_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu87: cpu@87 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <87>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache19>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <4>; ++ ++ cpu87_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu88: cpu@88 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <88>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache22>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu88_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu89: cpu@89 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <89>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache22>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu89_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu90: cpu@90 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <90>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache22>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu90_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu91: cpu@91 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <91>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache22>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu91_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu92: cpu@92 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <92>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache23>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu92_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu93: cpu@93 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <93>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache23>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu93_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu94: cpu@94 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <94>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache23>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu94_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu95: cpu@95 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <95>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache23>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <5>; ++ ++ cpu95_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu96: cpu@96 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <96>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache24>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu96_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu97: cpu@97 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <97>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache24>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu97_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu98: cpu@98 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <98>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache24>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu98_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu99: cpu@99 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <99>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache24>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu99_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu100: cpu@100 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <100>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache25>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu100_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu101: cpu@101 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <101>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache25>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu101_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu102: cpu@102 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <102>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache25>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu102_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu103: cpu@103 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <103>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache25>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu103_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu104: cpu@104 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <104>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache28>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu104_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu105: cpu@105 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <105>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache28>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu105_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu106: cpu@106 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <106>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache28>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu106_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu107: cpu@107 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <107>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache28>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu107_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu108: cpu@108 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <108>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache29>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu108_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu109: cpu@109 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <109>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache29>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu109_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu110: cpu@110 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <110>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache29>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu110_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu111: cpu@111 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <111>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache29>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu111_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu112: cpu@112 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <112>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache26>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu112_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu113: cpu@113 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <113>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache26>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu113_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu114: cpu@114 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <114>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache26>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu114_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu115: cpu@115 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <115>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache26>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu115_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu116: cpu@116 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <116>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache27>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu116_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu117: cpu@117 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <117>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache27>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu117_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu118: cpu@118 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <118>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache27>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu118_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu119: cpu@119 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <119>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache27>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <6>; ++ ++ cpu119_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu120: cpu@120 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <120>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache30>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu120_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu121: cpu@121 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <121>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache30>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu121_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu122: cpu@122 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <122>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache30>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu122_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu123: cpu@123 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <123>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache30>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu123_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu124: cpu@124 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <124>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache31>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu124_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu125: cpu@125 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <125>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache31>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu125_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu126: cpu@126 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <126>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache31>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu126_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ cpu127: cpu@127 { ++ compatible = "thead,c920", "riscv"; ++ device_type = "cpu"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicntr", "zicsr", "zifencei", ++ "zihpm"; ++ reg = <127>; ++ i-cache-block-size = <64>; ++ i-cache-size = <65536>; ++ i-cache-sets = <512>; ++ d-cache-block-size = <64>; ++ d-cache-size = <65536>; ++ d-cache-sets = <512>; ++ next-level-cache = <&l2_cache31>; ++ mmu-type = "riscv,sv39"; ++ numa-node-id = <7>; ++ ++ cpu127_intc: interrupt-controller { ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ }; ++ }; ++ ++ l2_cache16: cache-controller-16 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache17: cache-controller-17 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache18: cache-controller-18 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache19: cache-controller-19 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache20: cache-controller-20 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache21: cache-controller-21 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache22: cache-controller-22 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache23: cache-controller-23 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache24: cache-controller-24 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache25: cache-controller-25 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache26: cache-controller-26 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache27: cache-controller-27 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache28: cache-controller-28 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache29: cache-controller-29 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache30: cache-controller-30 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l2_cache31: cache-controller-31 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <1048576>; ++ cache-sets = <1024>; ++ cache-unified; ++ next-level-cache = <&l3_cache1>; ++ }; ++ ++ l3_cache1: cache-controller-131 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <3>; ++ cache-size = <67108864>; ++ cache-sets = <4096>; ++ cache-unified; ++ }; ++ ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-milkv-pioneer.dts b/arch/riscv/boot/dts/sophgo/mango-milkv-pioneer.dts +new file mode 100644 +index 000000000000..3e9bd7ca6793 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-milkv-pioneer.dts +@@ -0,0 +1,170 @@ ++#include "mango.dtsi" ++#include "mango-pcie-4rc.dtsi" ++ ++/ { ++ info { ++ file-name = "mango-milkv-pioneer.dts"; ++ }; ++}; ++ ++&i2c0 { ++ rtc: rtc@68 { ++ compatible = "dallas,ds1307"; ++ reg = <0x68>; ++ }; ++}; ++ ++&i2c1 { ++ mcu: sg2042mcu@17 { ++ compatible = "sophgo,sg20xx-mcu"; ++ reg = <0x17>; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ mango_srst: mango-reset@17 { ++ compatible = "mango,reset"; ++ reg = <0x17>; ++ }; ++}; ++ ++&i2c2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c2_acquire>; ++}; ++ ++&soc { ++ /delete-node/ ethernet@7040026000; ++ gpio-poweroff { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pwr_key>; ++ ++ power { ++ label = "GPIO Key Power"; ++ linux,code = ; ++ gpios = <&port0a 22 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++ ++ gpio-restart { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&restart_key>; ++ ++ restart { ++ label = "GPIO Key Restart"; ++ linux,code = ; ++ gpios = <&port0a 23 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++}; ++ ++&tach0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan0_acquire>; ++}; ++ ++&tach1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan1_acquire>; ++}; ++ ++/ { ++ pwmfan: pwm-fan { ++ compatible = "pwm-fan"; ++ pwms = <&pwm 0 40000>, <&pwm 1 40000>; // period_ns ++ pwm-names = "pwm0","pwm1"; ++ pwm_inuse = "pwm0"; ++ #cooling-cells = <2>; ++ cooling-levels = <1 1 1 1 1>; //total 255 ++ }; ++ ++ thermal_zones: thermal-zones { ++ soc { ++ polling-delay-passive = <1000>; /* milliseconds */ ++ polling-delay = <1000>; /* milliseconds */ ++ thermal-sensors = <&mcu 0>; ++ ++ trips { ++ soc_pwmfan_trip1: soc_pwmfan_trip@1 { ++ temperature = <30000>; /* millicelsius */ ++ hysteresis = <8000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip2: soc_pwmfan_trip@2 { ++ temperature = <40000>; /* millicelsius */ ++ hysteresis = <12000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip3: soc_pwmfan_trip@3 { ++ temperature = <50000>; /* millicelsius */ ++ hysteresis = <10000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip4: soc_pwmfan_trip@4 { ++ temperature = <60000>; /* millicelsius */ ++ hysteresis = <5000>; /* millicelsius */ ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&soc_pwmfan_trip1>; ++ cooling-device = <&pwmfan 0 1>; ++ }; ++ ++ map1 { ++ trip = <&soc_pwmfan_trip2>; ++ cooling-device = <&pwmfan 1 2>; ++ }; ++ ++ map2 { ++ trip = <&soc_pwmfan_trip3>; ++ cooling-device = <&pwmfan 2 3>; ++ }; ++ ++ map3 { ++ trip = <&soc_pwmfan_trip4>; ++ cooling-device = <&pwmfan 3 4>; ++ }; ++ }; ++ ++ }; ++ ++ board { ++ polling-delay-passive = <1000>; /* milliseconds */ ++ polling-delay = <1000>; /* milliseconds */ ++ thermal-sensors = <&mcu 1>; ++ ++ trips { ++ board_pwmfan_trip1: board_pwmfan_trip@1 { ++ temperature = <75000>; /* millicelsius */ ++ hysteresis = <8000>; /* millicelsius */ ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map4 { ++ trip = <&board_pwmfan_trip1>; ++ cooling-device = <&pwmfan 3 4>; ++ }; ++ }; ++ }; ++ }; ++ ++}; ++ ++&chosen { ++ bootargs = "console=ttyS0,115200 console=tty1 earlycon maxcpus=1"; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pcie-2rc.dtsi b/arch/riscv/boot/dts/sophgo/mango-pcie-2rc.dtsi +new file mode 100644 +index 000000000000..e39b3a80bf06 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pcie-2rc.dtsi +@@ -0,0 +1,81 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ pcie@7062000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x00 0x3f>; ++ linux,pci-domain = <0>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <1>; ++ interrupt-parent = <&intc1>; ++ //interrupts = ; ++ //interrupt-names = "msi"; ++ reg = <0x70 0x62000000 0x0 0x02000000>, ++ <0x48 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0000000 0x48 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x48 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x48 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x4a 0x00000000 0x4a 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x49 0x00000000 0x49 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@f060000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x80 0xff>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <1>; ++ top-intc-id = <1>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc2>; ++ //interrupts = ; ++ //interrupt-names = "msi"; ++ reg = <0xf0 0x60000000 0x0 0x02000000>, ++ <0xc0 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0800000 0xc0 0xc0800000 0x0 0x00800000>, ++ <0x42000000 0x0 0xd0000000 0xc0 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0xc0 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0xc2 0x00000000 0xc2 0x00000000 0x2 0x00000000>, ++ <0x03000000 0xc1 0x00000000 0xc1 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pcie-3rc-capricorn.dtsi b/arch/riscv/boot/dts/sophgo/mango-pcie-3rc-capricorn.dtsi +new file mode 100644 +index 000000000000..776889585272 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pcie-3rc-capricorn.dtsi +@@ -0,0 +1,116 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ pcie@7060000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x0 0x3f>; ++ linux,pci-domain = <0>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ reg = <0x70 0x60000000 0x0 0x02000000>, ++ <0x40 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0000000 0x40 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x40 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x40 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x42 0x00000000 0x42 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x41 0x00000000 0x41 0x00000000 0x1 0x00000000>; ++ //dma-ranges = <0x03000000 0x0 0x0 0x0 0x0 0x1f 0x0>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7060800000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x40 0x7f>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <0>; ++ top-intc-id = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x44 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0400000 0x44 0xc0400000 0x0 0x00400000>, ++ <0x42000000 0x0 0xe0000000 0x44 0xe0000000 0x0 0x20000000>, ++ <0x02000000 0x0 0xd0000000 0x44 0xd0000000 0x0 0x10000000>, ++ <0x43000000 0x46 0x00000000 0x46 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x45 0x00000000 0x45 0x00000000 0x1 0x00000000>; ++ //dma-ranges = <0x03000000 0x0 0x0 0x0 0x0 0x1f 0x0>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7062000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x80 0xff>; ++ linux,pci-domain = <2>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x70 0x62000000 0x0 0x02000000>, ++ <0x48 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0800000 0x48 0xc0800000 0x0 0x00800000>, ++ <0x42000000 0x0 0xd0000000 0x48 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x48 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x4a 0x00000000 0x4a 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x49 0x00000000 0x49 0x00000000 0x1 0x00000000>; ++ //dma-ranges = <0x03000000 0x0 0x0 0x0 0x0 0x1f 0x0>; ++ ++ status = "okay"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pcie-3rc-v2.dtsi b/arch/riscv/boot/dts/sophgo/mango-pcie-3rc-v2.dtsi +new file mode 100644 +index 000000000000..9c4c9641e1c0 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pcie-3rc-v2.dtsi +@@ -0,0 +1,115 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ pcie@7060000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x0 0x3f>; ++ linux,pci-domain = <0>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <0>; ++ top-intc-id = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x70 0x60000000 0x0 0x02000000>, ++ <0x40 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0000000 0x40 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x40 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x40 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x42 0x00000000 0x42 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x41 0x00000000 0x41 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7060800000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x40 0x7f>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ //interrupts = ; ++ //interrupt-names = "msi"; ++ reg = <0x44 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0400000 0x44 0xc0400000 0x0 0x00400000>, ++ <0x42000000 0x0 0xe0000000 0x44 0xe0000000 0x0 0x20000000>, ++ <0x02000000 0x0 0xd0000000 0x44 0xd0000000 0x0 0x10000000>, ++ <0x43000000 0x46 0x00000000 0x46 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x45 0x00000000 0x45 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7062000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x80 0xff>; ++ linux,pci-domain = <2>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x70 0x62000000 0x0 0x02000000>, ++ <0x48 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0800000 0x48 0xc0800000 0x0 0x00800000>, ++ <0x42000000 0x0 0xd0000000 0x48 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x48 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x4a 0x00000000 0x4a 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x49 0x00000000 0x49 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pcie-3rc.dtsi b/arch/riscv/boot/dts/sophgo/mango-pcie-3rc.dtsi +new file mode 100644 +index 000000000000..63fb41b43809 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pcie-3rc.dtsi +@@ -0,0 +1,112 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ pcie@7060000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x0 0x3f>; ++ linux,pci-domain = <0>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x70 0x60000000 0x0 0x02000000>, ++ <0x40 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0000000 0x40 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x40 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x40 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x42 0x00000000 0x42 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x41 0x00000000 0x41 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7060800000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x40 0x7f>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ reg = <0x44 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0400000 0x44 0xc0400000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x44 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x44 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x46 0x00000000 0x46 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x45 0x00000000 0x45 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7062000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x80 0xff>; ++ linux,pci-domain = <2>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x70 0x62000000 0x0 0x02000000>, ++ <0x48 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0800000 0x48 0xc0800000 0x0 0x00800000>, ++ <0x42000000 0x0 0xd0000000 0x48 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x48 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x49 0x00000000 0x49 0x00000000 0x1 0x00000000>, ++ <0x03000000 0x4a 0x00000000 0x4a 0x00000000 0x2 0x00000000>; ++ ++ status = "okay"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pcie-4rc-v2.dtsi b/arch/riscv/boot/dts/sophgo/mango-pcie-4rc-v2.dtsi +new file mode 100644 +index 000000000000..efbcc5c04740 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pcie-4rc-v2.dtsi +@@ -0,0 +1,155 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ pcie@7062000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x00 0x3f>; ++ linux,pci-domain = <0>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ reg = <0x70 0x62000000 0x0 0x02000000>, ++ <0x48 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ /* ++ * IO, check IO_SPACE_LIMIT ++ * 32bit prefetchable memory ++ * 32bit non-prefetchable memory ++ * 64bit prefetchable memory ++ * 64bit non-prefetchable memory ++ */ ++ ranges = <0x01000000 0x0 0xc0000000 0x48 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x48 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x48 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x4a 0x00000000 0x4a 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x49 0x00000000 0x49 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7062800000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x40 0x7f>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ reg = <0x4c 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ /* ++ * IO, check IO_SPACE_LIMIT ++ * 32bit prefetchable memory ++ * 32bit non-prefetchable memory ++ * 64bit prefetchable memory ++ * 64bit non-prefetchable memory ++ */ ++ ranges = <0x01000000 0x0 0xc0000000 0x4c 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x4c 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x4c 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x4e 0x00000000 0x4e 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x4d 0x00000000 0x4d 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@f060000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x80 0xbf>; ++ linux,pci-domain = <2>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <1>; ++ top-intc-id = <1>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc2>; ++ reg = <0xf0 0x60000000 0x0 0x02000000>, ++ <0xc0 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ /* ++ * IO, check IO_SPACE_LIMIT ++ * 32bit prefetchable memory ++ * 32bit non-prefetchable memory ++ * 64bit prefetchable memory ++ * 64bit non-prefetchable memory ++ */ ++ ranges = <0x01000000 0x0 0xc0000000 0xc0 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0xc0 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0xc0 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0xc2 0x00000000 0xc2 0x00000000 0x2 0x00000000>, ++ <0x03000000 0xc1 0x00000000 0xc1 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@f068000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0xc0 0xff>; ++ linux,pci-domain = <3>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <1>; ++ top-intc-id = <1>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc2>; ++ reg = <0xc4 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ /* ++ * IO, check IO_SPACE_LIMIT ++ * 32bit prefetchable memory ++ * 32bit non-prefetchable memory ++ * 64bit prefetchable memory ++ * 64bit non-prefetchable memory ++ */ ++ ranges = <0x01000000 0x0 0xc0000000 0xc4 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0xc4 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0xc4 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0xc6 0x00000000 0xc6 0x00000000 0x2 0x00000000>, ++ <0x03000000 0xc5 0x00000000 0xc5 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pcie-4rc.dtsi b/arch/riscv/boot/dts/sophgo/mango-pcie-4rc.dtsi +new file mode 100644 +index 000000000000..22bc466757bf +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pcie-4rc.dtsi +@@ -0,0 +1,151 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ pcie@7060000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x0 0x3f>; ++ linux,pci-domain = <0>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <1>; ++ interrupt-parent = <&intc1>; ++ //top-intc-used = <0>; ++ //interrupt-parent = <&intc>; ++ //interrupts = ; ++ //interrupt-names = "msi"; ++ reg = <0x70 0x60000000 0x0 0x02000000>, ++ <0x40 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0000000 0x40 0xc0000000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x40 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x40 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x42 0x00000000 0x42 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x41 0x00000000 0x41 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++#if 0 ++ pcie@7060800000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x40 0x7f>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x0>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ reg = <0x44 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0400000 0x44 0xc0400000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x44 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x44 0xe0000000 0x0 0x20000000>, ++ <0x43000000 0x46 0x00000000 0x46 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x45 0x00000000 0x45 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++#endif ++ pcie@7062000000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0x80 0xbf>; ++ linux,pci-domain = <1>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x0>; ++ top-intc-used = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "msi"; ++ reg = <0x70 0x62000000 0x0 0x02000000>, ++ <0x48 0x00000000 0x0 0x00001000>; ++ reg-names = "reg", "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0800000 0x48 0xc0800000 0x0 0x00400000>, ++ <0x42000000 0x0 0xd0000000 0x48 0xd0000000 0x0 0x10000000>, ++ <0x02000000 0x0 0xe0000000 0x48 0xe0000000 0x0 0x20000000>, ++ <0x03000000 0x49 0x00000000 0x49 0x00000000 0x1 0x00000000>, ++ <0x43000000 0x4a 0x00000000 0x4a 0x00000000 0x2 0x00000000>; ++ ++ status = "okay"; ++ }; ++ ++ pcie@7062800000 { ++ compatible = "sophgo,cdns-pcie-host"; ++ device_type = "pci"; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ ++ bus-range = <0xc0 0xff>; ++ linux,pci-domain = <2>; ++ cdns,max-outbound-regions = <16>; ++ cdns,no-bar-match-nbits = <48>; ++ vendor-id = /bits/ 16 <0x1E30>; ++ device-id = /bits/ 16 <0x2042>; ++ pcie-id = /bits/ 16 <0x1>; ++ link-id = /bits/ 16 <0x1>; ++ top-intc-used = <1>; ++ top-intc-id = <0>; ++ msix-supported = <0>; ++ interrupt-parent = <&intc1>; ++ reg = <0x4c 0x00000000 0x0 0x00001000>; ++ reg-names = "cfg"; ++ ++ // IO, check IO_SPACE_LIMIT ++ // 32bit prefetchable memory ++ // 32bit non-prefetchable memory ++ // 64bit prefetchable memory ++ // 64bit non-prefetchable memory ++ ranges = <0x01000000 0x0 0xc0c00000 0x4c 0xc0c00000 0x0 0x00400000>, ++ <0x42000000 0x0 0xf8000000 0x4c 0xf8000000 0x0 0x04000000>, ++ <0x02000000 0x0 0xfc000000 0x4c 0xfc000000 0x0 0x04000000>, ++ <0x43000000 0x4e 0x00000000 0x4e 0x00000000 0x2 0x00000000>, ++ <0x03000000 0x4d 0x00000000 0x4d 0x00000000 0x1 0x00000000>; ++ ++ status = "okay"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-pinctrl.dtsi b/arch/riscv/boot/dts/sophgo/mango-pinctrl.dtsi +new file mode 100644 +index 000000000000..f3fb2e39af26 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-pinctrl.dtsi +@@ -0,0 +1,434 @@ ++/ { ++ bmpctrl: pinctrl@50010400 { ++ compatible = "sophgo, pinctrl-mango"; ++ subctrl-syscon = <&top_misc>; ++ top_pinctl_offset = <0x1000>; ++ ++ lpc_acquire: lpc_acquire { ++ mux { ++ groups = "lpc_grp"; ++ function = "lpc_a"; ++ }; ++ }; ++ ++ lpc_release: lpc_release{ ++ mux { ++ groups = "lpc_grp"; ++ function = "lpc_r"; ++ }; ++ }; ++ ++ pcie_acquire: pcie_acquire { ++ mux { ++ groups = "pcie_grp"; ++ function = "pcie_a"; ++ }; ++ }; ++ ++ pcie_release: pcie_release{ ++ mux { ++ groups = "pcie_grp"; ++ function = "pcie_r"; ++ }; ++ }; ++ ++ spif_acquire: spif_acquire { ++ mux { ++ groups = "spif_grp"; ++ function = "spif_a"; ++ }; ++ }; ++ ++ spif_release: spif_release{ ++ mux { ++ groups = "spif_grp"; ++ function = "spif_r"; ++ }; ++ }; ++ ++ emmc_acquire: emmc_acquire { ++ mux { ++ groups = "emmc_grp"; ++ function = "emmc_a"; ++ }; ++ }; ++ ++ emmc_release: emmc_release{ ++ mux { ++ groups = "emmc_grp"; ++ function = "emmc_r"; ++ }; ++ }; ++ ++ sdio_acquire: sdio_acquire { ++ mux { ++ groups = "sdio_grp"; ++ function = "sdio_a"; ++ }; ++ }; ++ ++ sdio_release: sdio_release{ ++ mux { ++ groups = "sdio_grp"; ++ function = "sdio_r"; ++ }; ++ }; ++ ++ eth0_acquire: eth0_acquire { ++ mux { ++ groups = "eth0_grp"; ++ function = "eth0_a"; ++ }; ++ }; ++ ++ eth0_release: eth0_release{ ++ mux { ++ groups = "eth0_grp"; ++ function = "eth0_r"; ++ }; ++ }; ++ ++ pwm0_acquire: pwm0_acquire { ++ mux { ++ groups = "pwm0_grp"; ++ function = "pwm0_a"; ++ }; ++ }; ++ ++ pwm0_release: pwm0_release{ ++ mux { ++ groups = "pwm0_grp"; ++ function = "pwm0_r"; ++ }; ++ }; ++ ++ pwm1_acquire: pwm1_acquire { ++ mux { ++ groups = "pwm1_grp"; ++ function = "pwm1_a"; ++ }; ++ }; ++ ++ pwm1_release: pwm1_release{ ++ mux { ++ groups = "pwm1_grp"; ++ function = "pwm1_r"; ++ }; ++ }; ++ ++ pwm2_acquire: pwm2_acquire { ++ mux { ++ groups = "pwm2_grp"; ++ function = "pwm2_a"; ++ }; ++ }; ++ ++ pwm2_release: pwm2_release{ ++ mux { ++ groups = "pwm2_grp"; ++ function = "pwm2_r"; ++ }; ++ }; ++ ++ pwm3_acquire: pwm3_acquire { ++ mux { ++ groups = "pwm3_grp"; ++ function = "pwm3_a"; ++ }; ++ }; ++ ++ pwm3_release: pwm3_release{ ++ mux { ++ groups = "pwm3_grp"; ++ function = "pwm3_r"; ++ }; ++ }; ++ ++ fan0_acquire: fan0_acquire { ++ mux { ++ groups = "fan0_grp"; ++ function = "fan0_a"; ++ }; ++ }; ++ ++ fan0_release: fan0_release{ ++ mux { ++ groups = "fan0_grp"; ++ function = "fan0_r"; ++ }; ++ }; ++ ++ fan1_acquire: fan1_acquire { ++ mux { ++ groups = "fan1_grp"; ++ function = "fan1_a"; ++ }; ++ }; ++ ++ fan1_release: fan1_release{ ++ mux { ++ groups = "fan1_grp"; ++ function = "fan1_r"; ++ }; ++ }; ++ ++ fan2_acquire: fan2_acquire { ++ mux { ++ groups = "fan2_grp"; ++ function = "fan2_a"; ++ }; ++ }; ++ ++ fan2_release: fan2_release{ ++ mux { ++ roups = "fan2_grp"; ++ function = "fan2_r"; ++ }; ++ }; ++ ++ fan3_acquire: fan3_acquire { ++ mux { ++ groups = "fan3_grp"; ++ function = "fan3_a"; ++ }; ++ }; ++ ++ fan3_release: fan3_release{ ++ mux { ++ groups = "fan3_grp"; ++ function = "fan3_r"; ++ }; ++ }; ++ ++ i2c0_acquire: i2c0_acquire { ++ mux { ++ groups = "i2c0_grp"; ++ function = "i2c0_a"; ++ }; ++ }; ++ ++ i2c0_release: i2c0_release{ ++ mux { ++ groups = "i2c0_grp"; ++ function = "i2c0_r"; ++ }; ++ }; ++ ++ i2c1_acquire: i2c1_acquire { ++ mux { ++ groups = "i2c1_grp"; ++ function = "i2c1_a"; ++ }; ++ }; ++ ++ i2c1_release: i2c1_release{ ++ mux { ++ groups = "i2c1_grp"; ++ function = "i2c1_r"; ++ }; ++ }; ++ ++ i2c2_acquire: i2c2_acquire { ++ mux { ++ groups = "i2c2_grp"; ++ function = "i2c2_a"; ++ }; ++ }; ++ ++ i2c2_release: i2c2_release{ ++ mux { ++ groups = "i2c2_grp"; ++ function = "i2c2_r"; ++ }; ++ }; ++ ++ i2c3_acquire: i2c3_acquire { ++ mux { ++ groups = "i2c3_grp"; ++ function = "i2c3_a"; ++ }; ++ }; ++ ++ i2c3_release: i2c3_release{ ++ mux { ++ groups = "i2c3_grp"; ++ function = "i2c3_r"; ++ }; ++ }; ++ ++ uart0_acquire: uart0_acquire { ++ mux { ++ groups = "uart0_grp"; ++ function = "uart0_a"; ++ }; ++ }; ++ ++ uart0_release: uart0_release{ ++ mux { ++ groups = "uart0_grp"; ++ function = "uart0_r"; ++ }; ++ }; ++ ++ uart1_acquire: uart1_acquire { ++ mux { ++ groups = "uart1_grp"; ++ function = "uart1_a"; ++ }; ++ }; ++ ++ uart1_release: uart1_release{ ++ mux { ++ groups = "uart1_grp"; ++ function = "uart1_r"; ++ }; ++ }; ++ ++ uart2_acquire: uart2_acquire { ++ mux { ++ groups = "uart2_grp"; ++ function = "uart2_a"; ++ }; ++ }; ++ ++ uart2_release: uart2_release{ ++ mux { ++ groups = "uart2_grp"; ++ function = "uart2_r"; ++ }; ++ }; ++ ++ uart3_acquire: uart3_acquire { ++ mux { ++ groups = "uart3_grp"; ++ function = "uart3_a"; ++ }; ++ }; ++ ++ uart3_release: uart3_release{ ++ mux { ++ groups = "uart3_grp"; ++ function = "uart3_r"; ++ }; ++ }; ++ ++ spi0_acquire: spi0_acquire { ++ mux { ++ groups = "spi0_grp"; ++ function = "spi0_a"; ++ }; ++ }; ++ ++ spi0_release: spi0_release{ ++ mux { ++ groups = "spi0_grp"; ++ function = "spi0_r"; ++ }; ++ }; ++ ++ spi1_acquire: spi1_acquire { ++ mux { ++ groups = "spi1_grp"; ++ function = "spi1_a"; ++ }; ++ }; ++ ++ spi1_release: spi1_release{ ++ mux { ++ groups = "spi1_grp"; ++ function = "spi1_r"; ++ }; ++ }; ++ ++ jtag0_acquire: jtag0_acquire { ++ mux { ++ groups = "jtag0_grp"; ++ function = "jtag0_a"; ++ }; ++ }; ++ ++ jtag0_release: jtag0_release{ ++ mux { ++ groups = "jtag0_grp"; ++ function = "jtag0_r"; ++ }; ++ }; ++ ++ jtag1_acquire: jtag1_acquire { ++ mux { ++ groups = "jtag1_grp"; ++ function = "jtag1_a"; ++ }; ++ }; ++ ++ jtag1_release: jtag1_release{ ++ mux { ++ groups = "jtag1_grp"; ++ function = "jtag1_r"; ++ }; ++ }; ++ ++ jtag2_acquire: jtag2_acquire { ++ mux { ++ groups = "jtag2_grp"; ++ function = "jtag2_a"; ++ }; ++ }; ++ ++ jtag2_release: jtag2_release{ ++ mux { ++ groups = "jtag2_grp"; ++ function = "jtag2_r"; ++ }; ++ }; ++ ++ gpio2_acquire: gpio2_acquire { ++ mux { ++ pins = <127>; ++ function = "gpio0_a"; ++ }; ++ }; ++ ++ gpio3_release: gpio3_release { ++ mux { ++ pins = <128>; ++ function = "gpio0_r"; ++ }; ++ }; ++ ++ gpio5_release: gpio5_release { ++ mux { ++ pins = <130>; ++ function = "gpio0_r"; ++ }; ++ }; ++ ++ pwr_key: pwr-key { ++ mux { ++ pins = <147>; ++ function = "gpio0_a"; ++ }; ++ }; ++ ++ restart_key: restart-key { ++ mux { ++ pins = <148>; ++ function = "gpio0_a"; ++ }; ++ }; ++ ++ dbgi2c_acquire: dbgi2c_acquire { ++ mux { ++ groups = "dbgi2c_grp"; ++ function = "dbgi2c_a"; ++ }; ++ }; ++ ++ dbgi2c_release: dbgi2c_release{ ++ mux { ++ groups = "dbgi2c_grp"; ++ function = "dbgi2c_r"; ++ }; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-sophgo-capricorn.dts b/arch/riscv/boot/dts/sophgo/mango-sophgo-capricorn.dts +new file mode 100644 +index 000000000000..94892b74467f +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-sophgo-capricorn.dts +@@ -0,0 +1,57 @@ ++#include "mango.dtsi" ++#include "mango-pcie-3rc-capricorn.dtsi" ++ ++/ { ++ info { ++ file-name = "mango-sophgo-capricorn.dts"; ++ }; ++}; ++ ++ðernet0 { ++ max-speed = <1000>; ++ eth-sophgo-config { ++ autoneg = "enable"; ++ }; ++}; ++ ++&soc { ++ gpio-poweroff { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gpio2_acquire>; ++ ++ power { ++ label = "GPIO Key Power"; ++ linux,code = ; ++ gpios = <&port0a 2 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++}; ++ ++&port0a { ++ compatible = "snps,dw-apb-gpio-port", "sophgo,gpio0"; ++ ++ cpld_poweroff: cpld-poweroff { ++ compatible = "mango,cpld-poweroff"; ++ gpios = <&port0a 3 GPIO_ACTIVE_HIGH>; ++ }; ++ ++ cpld_reboot: cpld-reboot { ++ compatible = "mango,cpld-reboot"; ++ gpios = <&port0a 5 GPIO_ACTIVE_HIGH>; ++ }; ++}; ++ ++/ { ++ board-info { ++ /* compatible MUST be sophgo,board-info */ ++ compatible = "sophgo,board-info"; ++ /* valid values are: full-function, xmr */ ++ chip-package = "full-function"; ++ /* valid values are: x4, x8 */ ++ ddr-pcb-type = "x4"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-sophgo-pisces.dts b/arch/riscv/boot/dts/sophgo/mango-sophgo-pisces.dts +new file mode 100644 +index 000000000000..98761cbf42e8 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-sophgo-pisces.dts +@@ -0,0 +1,58 @@ ++#include "mango-2sockets.dtsi" ++#include "mango-top-intc2.dtsi" ++#include "mango-pcie-2rc.dtsi" ++ ++/ { ++ info { ++ file-name = "mango-sophgo-pisces.dts"; ++ }; ++}; ++ ++ðernet0 { ++ max-speed = <1000>; ++ eth-sophgo-config { ++ autoneg = "enable"; ++ }; ++}; ++ ++&soc { ++ gpio-poweroff { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gpio2_acquire>; ++ ++ power { ++ label = "GPIO Key Power"; ++ linux,code = ; ++ gpios = <&port0a 2 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++}; ++ ++&port0a { ++ compatible = "snps,dw-apb-gpio-port", "sophgo,gpio0"; ++ ++ cpld_poweroff: cpld-poweroff { ++ compatible = "mango,cpld-poweroff"; ++ gpios = <&port0a 3 GPIO_ACTIVE_HIGH>; ++ }; ++ ++ cpld_reboot: cpld-reboot { ++ compatible = "mango,cpld-reboot"; ++ gpios = <&port0a 5 GPIO_ACTIVE_HIGH>; ++ }; ++}; ++ ++/ { ++ board-info { ++ /* compatible MUST be sophgo,board-info */ ++ compatible = "sophgo,board-info"; ++ /* valid values are: full-function, xmr */ ++ chip-package = "full-function"; ++ /* valid values are: x4, x8 */ ++ ddr-pcb-type = "x4"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-sophgo-x4evb.dts b/arch/riscv/boot/dts/sophgo/mango-sophgo-x4evb.dts +new file mode 100644 +index 000000000000..3fe655eaf69a +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-sophgo-x4evb.dts +@@ -0,0 +1,144 @@ ++#include "mango.dtsi" ++#include "mango-pcie-3rc-v2.dtsi" ++ ++/ { ++ info { ++ file-name = "mango-sophgo-x4evb.dts"; ++ }; ++}; ++ ++&i2c0 { ++ rtc: rtc@68 { ++ compatible = "dallas,ds1307"; ++ reg = <0x68>; ++ }; ++}; ++ ++&i2c1 { ++ mcu: sg2042mcu@17 { ++ compatible = "sophgo,sg20xx-mcu"; ++ reg = <0x17>; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ mango_srst: mango-reset@17 { ++ compatible = "mango,reset"; ++ reg = <0x17>; ++ }; ++}; ++ ++&i2c2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c2_acquire>; ++}; ++ ++&tach0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan0_acquire>; ++}; ++ ++&tach1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan1_acquire>; ++}; ++ ++ðernet0 { ++ max-speed = <1000>; ++ eth-sophgo-config { ++ autoneg = "enable"; ++ }; ++}; ++ ++&soc { ++ /delete-node/ flash-controller@7000180000; ++}; ++ ++/ { ++ pwmfan: pwm-fan { ++ compatible = "pwm-fan"; ++ pwms = <&pwm 0 40000>, <&pwm 1 40000>; // period_ns ++ pwm-names = "pwm0","pwm1"; ++ pwm_inuse = "pwm0"; ++ #cooling-cells = <2>; ++ cooling-levels = <102 127 178 229 254>; //total 255 ++ }; ++ ++ thermal_zones: thermal-zones { ++ soc { ++ polling-delay-passive = <1000>; /* milliseconds */ ++ polling-delay = <1000>; /* milliseconds */ ++ thermal-sensors = <&mcu 0>; ++ ++ trips { ++ soc_pwmfan_trip1: soc_pwmfan_trip@1 { ++ temperature = <40000>; /* millicelsius */ ++ hysteresis = <8000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip2: soc_pwmfan_trip@2 { ++ temperature = <58000>; /* millicelsius */ ++ hysteresis = <12000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip3: soc_pwmfan_trip@3 { ++ temperature = <70000>; /* millicelsius */ ++ hysteresis = <10000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip4: soc_pwmfan_trip@4 { ++ temperature = <85000>; /* millicelsius */ ++ hysteresis = <5000>; /* millicelsius */ ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&soc_pwmfan_trip1>; ++ cooling-device = <&pwmfan 0 1>; ++ }; ++ ++ map1 { ++ trip = <&soc_pwmfan_trip2>; ++ cooling-device = <&pwmfan 1 2>; ++ }; ++ ++ map2 { ++ trip = <&soc_pwmfan_trip3>; ++ cooling-device = <&pwmfan 2 3>; ++ }; ++ ++ map3 { ++ trip = <&soc_pwmfan_trip4>; ++ cooling-device = <&pwmfan 3 4>; ++ }; ++ }; ++ ++ }; ++ ++ board { ++ polling-delay-passive = <1000>; /* milliseconds */ ++ polling-delay = <1000>; /* milliseconds */ ++ thermal-sensors = <&mcu 1>; ++ ++ trips { ++ board_pwmfan_trip1: board_pwmfan_trip@1 { ++ temperature = <75000>; /* millicelsius */ ++ hysteresis = <8000>; /* millicelsius */ ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map4 { ++ trip = <&board_pwmfan_trip1>; ++ cooling-device = <&pwmfan 3 4>; ++ }; ++ }; ++ }; ++ }; ++ ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-sophgo-x8evb.dts b/arch/riscv/boot/dts/sophgo/mango-sophgo-x8evb.dts +new file mode 100644 +index 000000000000..9e0cf5348051 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-sophgo-x8evb.dts +@@ -0,0 +1,172 @@ ++#include "mango.dtsi" ++#include "mango-pcie-3rc.dtsi" ++ ++/ { ++ info { ++ file-name = "mango-sophgo-x8evb.dts"; ++ }; ++}; ++ ++&i2c0 { ++ rtc: rtc@68 { ++ compatible = "dallas,ds1307"; ++ reg = <0x68>; ++ }; ++}; ++ ++&i2c1 { ++ mcu: sg2042mcu@17 { ++ compatible = "sophgo,sg20xx-mcu"; ++ reg = <0x17>; ++ #thermal-sensor-cells = <1>; ++ }; ++ ++ mango_srst: mango-reset@17 { ++ compatible = "mango,reset"; ++ reg = <0x17>; ++ }; ++}; ++ ++&i2c2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c2_acquire>; ++}; ++ ++&soc { ++ gpio-poweroff { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pwr_key>; ++ ++ power { ++ label = "GPIO Key Power"; ++ linux,code = ; ++ gpios = <&port0a 22 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++ ++ gpio-restart { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&restart_key>; ++ ++ restart { ++ label = "GPIO Key Restart"; ++ linux,code = ; ++ gpios = <&port0a 23 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++}; ++ ++&tach0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan0_acquire>; ++}; ++ ++&tach1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan1_acquire>; ++}; ++ ++ðernet0 { ++ max-speed = <1000>; ++ eth-sophgo-config { ++ autoneg = "enable"; ++ }; ++}; ++ ++/ { ++ pwmfan: pwm-fan { ++ compatible = "pwm-fan"; ++ pwms = <&pwm 0 40000>, <&pwm 1 40000>; // period_ns ++ pwm-names = "pwm0","pwm1"; ++ pwm_inuse = "pwm0"; ++ #cooling-cells = <2>; ++ cooling-levels = <153 128 77 26 1>; //total 255 ++ }; ++ ++ thermal_zones: thermal-zones { ++ soc { ++ polling-delay-passive = <1000>; /* milliseconds */ ++ polling-delay = <1000>; /* milliseconds */ ++ thermal-sensors = <&mcu 0>; ++ ++ trips { ++ soc_pwmfan_trip1: soc_pwmfan_trip@1 { ++ temperature = <40000>; /* millicelsius */ ++ hysteresis = <8000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip2: soc_pwmfan_trip@2 { ++ temperature = <58000>; /* millicelsius */ ++ hysteresis = <12000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip3: soc_pwmfan_trip@3 { ++ temperature = <70000>; /* millicelsius */ ++ hysteresis = <10000>; /* millicelsius */ ++ type = "active"; ++ }; ++ ++ soc_pwmfan_trip4: soc_pwmfan_trip@4 { ++ temperature = <85000>; /* millicelsius */ ++ hysteresis = <5000>; /* millicelsius */ ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map0 { ++ trip = <&soc_pwmfan_trip1>; ++ cooling-device = <&pwmfan 0 1>; ++ }; ++ ++ map1 { ++ trip = <&soc_pwmfan_trip2>; ++ cooling-device = <&pwmfan 1 2>; ++ }; ++ ++ map2 { ++ trip = <&soc_pwmfan_trip3>; ++ cooling-device = <&pwmfan 2 3>; ++ }; ++ ++ map3 { ++ trip = <&soc_pwmfan_trip4>; ++ cooling-device = <&pwmfan 3 4>; ++ }; ++ }; ++ ++ }; ++ ++ board { ++ polling-delay-passive = <1000>; /* milliseconds */ ++ polling-delay = <1000>; /* milliseconds */ ++ thermal-sensors = <&mcu 1>; ++ ++ trips { ++ board_pwmfan_trip1: board_pwmfan_trip@1 { ++ temperature = <75000>; /* millicelsius */ ++ hysteresis = <8000>; /* millicelsius */ ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map4 { ++ trip = <&board_pwmfan_trip1>; ++ cooling-device = <&pwmfan 3 4>; ++ }; ++ }; ++ }; ++ }; ++ ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-top-intc2.dtsi b/arch/riscv/boot/dts/sophgo/mango-top-intc2.dtsi +new file mode 100644 +index 000000000000..6d364cf6b3c5 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-top-intc2.dtsi +@@ -0,0 +1,62 @@ ++#include ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ intc2: top_intc@f030010300 { ++ compatible = "sophgo,top-intc"; ++ reg = <0xf0 0x300102E0 0x0 0x4>, ++ <0xf0 0x30010300 0x0 0x4>, ++ <0xf0 0x30010304 0x0 0x4>; ++ reg-names = "sta", "set", "clr"; ++ reg-bitwidth = <32>; ++ top-intc-id = <1>; ++ interrupt-controller; ++ #interrupt-cells = <0x1>; // only applies to child node ++ for-msi; ++ ++ interrupt-parent = <&intc>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ interrupt-names = "msi0", "msi1", "msi2", "msi3", ++ "msi4", "msi5", "msi6", "msi7", ++ "msi8", "msi9", "msi10", "msi11", ++ "msi12", "msi13", "msi14", "msi15", ++ "msi16", "msi17", "msi18", "msi19", ++ "msi20", "msi21", "msi22", "msi23", ++ "msi24", "msi25", "msi26", "msi27", ++ "msi28", "msi29", "msi30", "msi31"; ++ ++ }; ++ ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango-yixin-s2110.dts b/arch/riscv/boot/dts/sophgo/mango-yixin-s2110.dts +new file mode 100644 +index 000000000000..172421ffc196 +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango-yixin-s2110.dts +@@ -0,0 +1,63 @@ ++#include "mango-2sockets.dtsi" ++#include "mango-top-intc2.dtsi" ++#include "mango-pcie-4rc-v2.dtsi" ++ ++/ { ++ info { ++ file-name = "mango-yixin-s2110.dts"; ++ }; ++}; ++ ++ðernet0 { ++ max-speed = <1000>; ++ eth-sophgo-config { ++ autoneg = "enable"; ++ }; ++}; ++ ++&soc { ++ gpio-poweroff { ++ compatible = "gpio-keys"; ++ input-name = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gpio2_acquire>; ++ ++ power { ++ label = "GPIO Key Power"; ++ linux,code = ; ++ gpios = <&port0a 2 GPIO_ACTIVE_HIGH>; ++ linux,input-type = <1>; ++ debounce-interval = <100>; ++ }; ++ }; ++}; ++ ++&gpio0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&dbgi2c_release>; ++}; ++ ++&port0a { ++ compatible = "snps,dw-apb-gpio-port", "sophgo,gpio0"; ++ ++ cpld_poweroff: cpld-poweroff { ++ compatible = "mango,cpld-poweroff"; ++ gpios = <&port0a 3 GPIO_ACTIVE_HIGH>; ++ }; ++ ++ cpld_reboot: cpld-reboot { ++ compatible = "mango,cpld-reboot"; ++ gpios = <&port0a 29 GPIO_ACTIVE_HIGH>; ++ }; ++}; ++ ++/ { ++ board-info { ++ /* compatible MUST be sophgo,board-info */ ++ compatible = "sophgo,board-info"; ++ /* valid values are: full-function, xmr */ ++ chip-package = "full-function"; ++ /* valid values are: x4, x8 */ ++ ddr-pcb-type = "x8"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/sophgo/mango.dtsi b/arch/riscv/boot/dts/sophgo/mango.dtsi +new file mode 100644 +index 000000000000..57f304fc778f +--- /dev/null ++++ b/arch/riscv/boot/dts/sophgo/mango.dtsi +@@ -0,0 +1,938 @@ ++/dts-v1/; ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "mango-cpus-socket0.dtsi" ++#include "mango-clock-socket0.dtsi" ++#include "mango-pinctrl.dtsi" ++ ++#define SOC_PERIPHERAL_IRQ(nr) (nr) ++ ++/ { ++ model = "Sophgo Mango"; ++ compatible = "sophgo,mango"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-noncoherent; ++ ++ distance-map { ++ compatible = "numa-distance-map-v1"; ++ distance-matrix = <0 0 10>, ++ <0 1 15>, ++ <0 2 25>, ++ <0 3 30>, ++ <1 0 15>, ++ <1 1 10>, ++ <1 2 30>, ++ <1 3 25>, ++ <2 0 25>, ++ <2 1 30>, ++ <2 2 10>, ++ <2 3 15>, ++ <3 0 30>, ++ <3 1 25>, ++ <3 2 15>, ++ <3 3 10>; ++ }; ++ ++ pmu { ++ compatible = "riscv,pmu"; ++ riscv,event-to-mhpmevent = ++ <0x00003 0x00000000 0x00000010>, ++ <0x00004 0x00000000 0x00000011>, ++ <0x00005 0x00000000 0x00000007>, ++ <0x00006 0x00000000 0x00000006>, ++ <0x00008 0x00000000 0x00000027>, ++ <0x00009 0x00000000 0x00000028>, ++ <0x10000 0x00000000 0x0000000c>, ++ <0x10001 0x00000000 0x0000000d>, ++ <0x10002 0x00000000 0x0000000e>, ++ <0x10003 0x00000000 0x0000000f>, ++ <0x10008 0x00000000 0x00000001>, ++ <0x10009 0x00000000 0x00000002>, ++ <0x10010 0x00000000 0x00000010>, ++ <0x10011 0x00000000 0x00000011>, ++ <0x10012 0x00000000 0x00000012>, ++ <0x10013 0x00000000 0x00000013>, ++ <0x10019 0x00000000 0x00000004>, ++ <0x10021 0x00000000 0x00000003>, ++ <0x10030 0x00000000 0x0000001c>, ++ <0x10031 0x00000000 0x0000001b>; ++ riscv,event-to-mhpmcounters = ++ <0x00003 0x00003 0xfffffff8>, ++ <0x00004 0x00004 0xfffffff8>, ++ <0x00005 0x00005 0xfffffff8>, ++ <0x00006 0x00006 0xfffffff8>, ++ <0x00007 0x00007 0xfffffff8>, ++ <0x00008 0x00008 0xfffffff8>, ++ <0x00009 0x00009 0xfffffff8>, ++ <0x0000a 0x0000a 0xfffffff8>, ++ <0x10000 0x10000 0xfffffff8>, ++ <0x10001 0x10001 0xfffffff8>, ++ <0x10002 0x10002 0xfffffff8>, ++ <0x10003 0x10003 0xfffffff8>, ++ <0x10008 0x10008 0xfffffff8>, ++ <0x10009 0x10009 0xfffffff8>, ++ <0x10010 0x10010 0xfffffff8>, ++ <0x10011 0x10011 0xfffffff8>, ++ <0x10012 0x10012 0xfffffff8>, ++ <0x10013 0x10013 0xfffffff8>, ++ <0x10019 0x10019 0xfffffff8>, ++ <0x10021 0x10021 0xfffffff8>, ++ <0x10030 0x10030 0xfffffff8>, ++ <0x10031 0x10031 0xfffffff8>; ++ riscv,raw-event-to-mhpmcounters = ++ <0x00000000 0x00000001 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000002 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000003 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000004 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000005 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000006 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000007 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000008 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000009 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000000a 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000000b 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000000c 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000000d 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000000e 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000000f 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000010 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000011 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000012 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000013 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000014 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000015 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000016 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000017 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000018 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000019 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000001a 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000001b 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000001c 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000001d 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000001e 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000001f 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000020 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000021 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000022 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000023 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000024 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000025 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000026 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000027 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000028 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x00000029 0xffffffff 0xffffffff 0xfffffff8>, ++ <0x00000000 0x0000002a 0xffffffff 0xffffffff 0xfffffff8>; ++ }; ++ ++ soc: soc { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ compatible = "simple-bus"; ++ ranges; ++ dma-ranges = <0x0 0x0 0x0 0x0 0x1f 0x0>; ++ ++ clint_mswi: clint-mswi@7094000000 { ++ compatible = "thead,c900-clint-mswi"; ++ reg = <0x00000070 0x94000000 0x00000000 0x00004000>; ++ interrupts-extended = < ++ &cpu0_intc 3 ++ &cpu1_intc 3 ++ &cpu2_intc 3 ++ &cpu3_intc 3 ++ &cpu4_intc 3 ++ &cpu5_intc 3 ++ &cpu6_intc 3 ++ &cpu7_intc 3 ++ &cpu8_intc 3 ++ &cpu9_intc 3 ++ &cpu10_intc 3 ++ &cpu11_intc 3 ++ &cpu12_intc 3 ++ &cpu13_intc 3 ++ &cpu14_intc 3 ++ &cpu15_intc 3 ++ &cpu16_intc 3 ++ &cpu17_intc 3 ++ &cpu18_intc 3 ++ &cpu19_intc 3 ++ &cpu20_intc 3 ++ &cpu21_intc 3 ++ &cpu22_intc 3 ++ &cpu23_intc 3 ++ &cpu24_intc 3 ++ &cpu25_intc 3 ++ &cpu26_intc 3 ++ &cpu27_intc 3 ++ &cpu28_intc 3 ++ &cpu29_intc 3 ++ &cpu30_intc 3 ++ &cpu31_intc 3 ++ &cpu32_intc 3 ++ &cpu33_intc 3 ++ &cpu34_intc 3 ++ &cpu35_intc 3 ++ &cpu36_intc 3 ++ &cpu37_intc 3 ++ &cpu38_intc 3 ++ &cpu39_intc 3 ++ &cpu40_intc 3 ++ &cpu41_intc 3 ++ &cpu42_intc 3 ++ &cpu43_intc 3 ++ &cpu44_intc 3 ++ &cpu45_intc 3 ++ &cpu46_intc 3 ++ &cpu47_intc 3 ++ &cpu48_intc 3 ++ &cpu49_intc 3 ++ &cpu50_intc 3 ++ &cpu51_intc 3 ++ &cpu52_intc 3 ++ &cpu53_intc 3 ++ &cpu54_intc 3 ++ &cpu55_intc 3 ++ &cpu56_intc 3 ++ &cpu57_intc 3 ++ &cpu58_intc 3 ++ &cpu59_intc 3 ++ &cpu60_intc 3 ++ &cpu61_intc 3 ++ &cpu62_intc 3 ++ &cpu63_intc 3 ++ >; ++ }; ++ ++ clint_mtimer0: clint-mtimer@70ac000000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac000000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu0_intc 7 ++ &cpu1_intc 7 ++ &cpu2_intc 7 ++ &cpu3_intc 7 ++ >; ++ }; ++ ++ clint_mtimer1: clint-mtimer@70ac010000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac010000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu4_intc 7 ++ &cpu5_intc 7 ++ &cpu6_intc 7 ++ &cpu7_intc 7 ++ >; ++ }; ++ ++ clint_mtimer2: clint-mtimer@70ac020000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac020000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu8_intc 7 ++ &cpu9_intc 7 ++ &cpu10_intc 7 ++ &cpu11_intc 7 ++ >; ++ }; ++ ++ clint_mtimer3: clint-mtimer@70ac030000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac030000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu12_intc 7 ++ &cpu13_intc 7 ++ &cpu14_intc 7 ++ &cpu15_intc 7 ++ >; ++ }; ++ ++ clint_mtimer4: clint-mtimer@70ac040000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac040000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu16_intc 7 ++ &cpu17_intc 7 ++ &cpu18_intc 7 ++ &cpu19_intc 7 ++ >; ++ }; ++ ++ clint_mtimer5: clint-mtimer@70ac050000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac050000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu20_intc 7 ++ &cpu21_intc 7 ++ &cpu22_intc 7 ++ &cpu23_intc 7 ++ >; ++ }; ++ ++ clint_mtimer6: clint-mtimer@70ac060000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac060000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu24_intc 7 ++ &cpu25_intc 7 ++ &cpu26_intc 7 ++ &cpu27_intc 7 ++ >; ++ }; ++ ++ clint_mtimer7: clint-mtimer@70ac070000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac070000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu28_intc 7 ++ &cpu29_intc 7 ++ &cpu30_intc 7 ++ &cpu31_intc 7 ++ >; ++ }; ++ ++ clint_mtimer8: clint-mtimer@70ac080000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac080000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu32_intc 7 ++ &cpu33_intc 7 ++ &cpu34_intc 7 ++ &cpu35_intc 7 ++ >; ++ }; ++ ++ clint_mtimer9: clint-mtimer@70ac090000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac090000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu36_intc 7 ++ &cpu37_intc 7 ++ &cpu38_intc 7 ++ &cpu39_intc 7 ++ >; ++ }; ++ ++ clint_mtimer10: clint-mtimer@70ac0a0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac0a0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu40_intc 7 ++ &cpu41_intc 7 ++ &cpu42_intc 7 ++ &cpu43_intc 7 ++ >; ++ }; ++ ++ clint_mtimer11: clint-mtimer@70ac0b0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac0b0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu44_intc 7 ++ &cpu45_intc 7 ++ &cpu46_intc 7 ++ &cpu47_intc 7 ++ >; ++ }; ++ ++ clint_mtimer12: clint-mtimer@70ac0c0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac0c0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu48_intc 7 ++ &cpu49_intc 7 ++ &cpu50_intc 7 ++ &cpu51_intc 7 ++ >; ++ }; ++ ++ clint_mtimer13: clint-mtimer@70ac0d0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac0d0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu52_intc 7 ++ &cpu53_intc 7 ++ &cpu54_intc 7 ++ &cpu55_intc 7 ++ >; ++ }; ++ ++ clint_mtimer14: clint-mtimer@70ac0e0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac0e0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu56_intc 7 ++ &cpu57_intc 7 ++ &cpu58_intc 7 ++ &cpu59_intc 7 ++ >; ++ }; ++ ++ clint_mtimer15: clint-mtimer@70ac0f0000 { ++ compatible = "thead,c900-clint-mtimer"; ++ reg = <0x00000070 0xac0f0000 0x00000000 0x00007ff8>; ++ interrupts-extended = < ++ &cpu60_intc 7 ++ &cpu61_intc 7 ++ &cpu62_intc 7 ++ &cpu63_intc 7 ++ >; ++ }; ++ ++ intc: interrupt-controller@7090000000 { ++ #address-cells = <0>; ++ #interrupt-cells = <2>; ++ compatible = "thead,c900-plic"; ++ interrupt-controller; ++ interrupts-extended = < ++ &cpu0_intc 11 &cpu0_intc 9 ++ &cpu1_intc 11 &cpu1_intc 9 ++ &cpu2_intc 11 &cpu2_intc 9 ++ &cpu3_intc 11 &cpu3_intc 9 ++ &cpu4_intc 11 &cpu4_intc 9 ++ &cpu5_intc 11 &cpu5_intc 9 ++ &cpu6_intc 11 &cpu6_intc 9 ++ &cpu7_intc 11 &cpu7_intc 9 ++ &cpu8_intc 11 &cpu8_intc 9 ++ &cpu9_intc 11 &cpu9_intc 9 ++ &cpu10_intc 11 &cpu10_intc 9 ++ &cpu11_intc 11 &cpu11_intc 9 ++ &cpu12_intc 11 &cpu12_intc 9 ++ &cpu13_intc 11 &cpu13_intc 9 ++ &cpu14_intc 11 &cpu14_intc 9 ++ &cpu15_intc 11 &cpu15_intc 9 ++ &cpu16_intc 11 &cpu16_intc 9 ++ &cpu17_intc 11 &cpu17_intc 9 ++ &cpu18_intc 11 &cpu18_intc 9 ++ &cpu19_intc 11 &cpu19_intc 9 ++ &cpu20_intc 11 &cpu20_intc 9 ++ &cpu21_intc 11 &cpu21_intc 9 ++ &cpu22_intc 11 &cpu22_intc 9 ++ &cpu23_intc 11 &cpu23_intc 9 ++ &cpu24_intc 11 &cpu24_intc 9 ++ &cpu25_intc 11 &cpu25_intc 9 ++ &cpu26_intc 11 &cpu26_intc 9 ++ &cpu27_intc 11 &cpu27_intc 9 ++ &cpu28_intc 11 &cpu28_intc 9 ++ &cpu29_intc 11 &cpu29_intc 9 ++ &cpu30_intc 11 &cpu30_intc 9 ++ &cpu31_intc 11 &cpu31_intc 9 ++ &cpu32_intc 11 &cpu32_intc 9 ++ &cpu33_intc 11 &cpu33_intc 9 ++ &cpu34_intc 11 &cpu34_intc 9 ++ &cpu35_intc 11 &cpu35_intc 9 ++ &cpu36_intc 11 &cpu36_intc 9 ++ &cpu37_intc 11 &cpu37_intc 9 ++ &cpu38_intc 11 &cpu38_intc 9 ++ &cpu39_intc 11 &cpu39_intc 9 ++ &cpu40_intc 11 &cpu40_intc 9 ++ &cpu41_intc 11 &cpu41_intc 9 ++ &cpu42_intc 11 &cpu42_intc 9 ++ &cpu43_intc 11 &cpu43_intc 9 ++ &cpu44_intc 11 &cpu44_intc 9 ++ &cpu45_intc 11 &cpu45_intc 9 ++ &cpu46_intc 11 &cpu46_intc 9 ++ &cpu47_intc 11 &cpu47_intc 9 ++ &cpu48_intc 11 &cpu48_intc 9 ++ &cpu49_intc 11 &cpu49_intc 9 ++ &cpu50_intc 11 &cpu50_intc 9 ++ &cpu51_intc 11 &cpu51_intc 9 ++ &cpu52_intc 11 &cpu52_intc 9 ++ &cpu53_intc 11 &cpu53_intc 9 ++ &cpu54_intc 11 &cpu54_intc 9 ++ &cpu55_intc 11 &cpu55_intc 9 ++ &cpu56_intc 11 &cpu56_intc 9 ++ &cpu57_intc 11 &cpu57_intc 9 ++ &cpu58_intc 11 &cpu58_intc 9 ++ &cpu59_intc 11 &cpu59_intc 9 ++ &cpu60_intc 11 &cpu60_intc 9 ++ &cpu61_intc 11 &cpu61_intc 9 ++ &cpu62_intc 11 &cpu62_intc 9 ++ &cpu63_intc 11 &cpu63_intc 9 ++ >; ++ reg = <0x00000070 0x90000000 0x00000000 0x04000000>; ++ reg-names = "control"; ++ riscv,max-priority = <7>; ++ riscv,ndev = <224>; ++ }; ++ ++ timer0: dw-apb-timer0@7030003000 { ++ compatible = "snps,dw-apb-timer"; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ reg = <0x70 0x30003000 0x0 0x14>; ++ clocks = <&div_clk GATE_CLK_TIMER1>, ++ <&div_clk GATE_CLK_APB_TIMER>; ++ clock-names = "timer", "pclk"; ++ clk-drv-rating = <300>; ++ }; ++ ++ timer1: dw-apb-timer1@7030003014 { ++ compatible = "snps,dw-apb-timer"; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ reg = <0x70 0x30003014 0x0 0x10>; ++ clocks = <&div_clk GATE_CLK_TIMER2>, ++ <&div_clk GATE_CLK_APB_TIMER>; ++ clock-names = "timer", "pclk"; ++ clk-drv-rating = <300>; ++ }; ++ ++ top_misc: top_misc_ctrl@7030010000 { ++ compatible = "syscon"; ++ reg = <0x70 0x30010000 0x0 0x8000>; ++ }; ++ ++ rst: reset-controller { ++ #reset-cells = <1>; ++ compatible = "bitmain,reset"; ++ subctrl-syscon = <&top_misc>; ++ top_rst_offset = <0x3000>; ++ nr_resets = ; ++ }; ++ ++ i2c0: i2c@7030005000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ clocks = <&div_clk GATE_CLK_APB_I2C>; ++ clock-names = "clk_gate_apb_i2c"; ++ reg = <0x70 0x30005000 0x0 0x1000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ resets = <&rst RST_I2C0>; ++ reset-names = "i2c0"; ++ }; ++ ++ i2c1: i2c@7030006000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ clocks = <&div_clk GATE_CLK_APB_I2C>; ++ clock-names = "clk_gate_apb_i2c"; ++ reg = <0x70 0x30006000 0x0 0x1000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ resets = <&rst RST_I2C1>; ++ reset-names = "i2c1"; ++ }; ++ ++ i2c2: i2c@7030007000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ clocks = <&div_clk GATE_CLK_APB_I2C>; ++ clock-names = "clk_gate_apb_i2c"; ++ reg = <0x70 0x30007000 0x0 0x1000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ resets = <&rst RST_I2C2>; ++ reset-names = "i2c2"; ++ }; ++ ++ i2c3: i2c@7030008000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "snps,designware-i2c"; ++ clocks = <&div_clk GATE_CLK_APB_I2C>; ++ clock-names = "clk_gate_apb_i2c"; ++ reg = <0x70 0x30008000 0x0 0x1000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ resets = <&rst RST_I2C3>; ++ reset-names = "i2c3"; ++ }; ++ ++ gpio0: gpio@7030009000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x70 0x30009000 0x0 0x400>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&div_clk GATE_CLK_APB_GPIO>, ++ <&div_clk GATE_CLK_GPIO_DB>; ++ clock-names = "bus", "db"; ++ ++ port0a: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ bank-name = "port0a"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <32>; ++ reg = <0>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ }; ++ }; ++ ++ gpio1: gpio@703000a000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x70 0x3000a000 0x0 0x400>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&div_clk GATE_CLK_APB_GPIO>, ++ <&div_clk GATE_CLK_GPIO_DB>; ++ clock-names = "bus", "db"; ++ ++ port1a: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ bank-name = "port0a"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <32>; ++ reg = <0>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ }; ++ }; ++ ++ gpio2: gpio@703000b000 { ++ compatible = "snps,dw-apb-gpio"; ++ reg = <0x70 0x3000b000 0x0 0x400>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ clocks = <&div_clk GATE_CLK_APB_GPIO>, ++ <&div_clk GATE_CLK_GPIO_DB>; ++ clock-names = "bus", "db"; ++ ++ port2a: gpio-controller@0 { ++ compatible = "snps,dw-apb-gpio-port"; ++ bank-name = "port0a"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ snps,nr-gpios = <32>; ++ reg = <0>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ }; ++ }; ++ ++ pwm: pwm@703000C000 { ++ compatible = "sophgo,sophgo-pwm"; ++ reg = <0x70 0x3000C000 0x0 0x20>; ++ clocks = <&div_clk GATE_CLK_APB_PWM>; ++ clock-names = "clk_gate_apb_pwm"; ++ #pwm-cells = <2>; ++ pwm-num = <2>; ++ no-polarity; ++ }; ++ ++ tach0: tach@703000C020 { ++ compatible = "sophgo,sophgo-tach"; ++ reg = <0x70 0x3000C020 0x0 0x8>; ++ }; ++ ++ tach1: tach@703000C028 { ++ compatible = "sophgo,sophgo-tach"; ++ reg = <0x70 0x3000C028 0x0 0x8>; ++ }; ++ ++ uart0: serial@7040000000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x00000070 0x40000000 0x00000000 0x00001000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <500000000>; ++ clocks = <&div_clk GATE_CLK_UART_500M>, ++ <&div_clk GATE_CLK_APB_UART>; ++ clock-names = "baudclk", "apb_pclk"; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ }; ++ ++ uart1: serial@7040001000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x00000070 0x40001000 0x00000000 0x00001000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <500000000>; ++ clocks = <&div_clk GATE_CLK_UART_500M>, ++ <&div_clk GATE_CLK_APB_UART>; ++ clock-names = "baudclk", "apb_pclk"; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ }; ++ ++ uart2: serial@7040002000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x00000070 0x40002000 0x00000000 0x00001000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <500000000>; ++ clocks = <&div_clk GATE_CLK_UART_500M>, ++ <&div_clk GATE_CLK_APB_UART>; ++ clock-names = "baudclk", "apb_pclk"; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ }; ++ ++ uart3: serial@7040003000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x00000070 0x40003000 0x00000000 0x00001000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <500000000>; ++ clocks = <&div_clk GATE_CLK_UART_500M>, ++ <&div_clk GATE_CLK_APB_UART>; ++ clock-names = "baudclk", "apb_pclk"; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ }; ++ ++ emmc: bm-emmc@704002A000 { ++ compatible = "bitmain,bm-emmc"; ++ reg = <0x70 0x4002A000 0x0 0x1000>; ++ reg-names = "core_mem"; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ bus-width = <4>; ++ non-removable; ++ no-sdio; ++ no-sd; ++ resets = <&rst RST_EMMC>; ++ reset-names = "emmc"; ++ clocks = ++ <&div_clk GATE_CLK_EMMC_100M>, ++ <&div_clk GATE_CLK_AXI_EMMC>, ++ <&div_clk GATE_CLK_100K_EMMC>; ++ clock-names = ++ "clk_gate_emmc", ++ "clk_gate_axi_emmc", ++ "clk_gate_100k_emmc"; ++ }; ++ ++ sd: bm-sd@704002B000 { ++ compatible = "bitmain,bm-sd"; ++ reg = <0x70 0x4002B000 0x0 0x1000>; ++ reg-names = "core_mem"; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ bus-width = <4>; ++ no-sdio; ++ no-mmc; ++ resets = <&rst RST_SD>; ++ reset-names = "sdio"; ++ clocks = ++ <&div_clk GATE_CLK_SD_100M>, ++ <&div_clk GATE_CLK_AXI_SD>, ++ <&div_clk GATE_CLK_100K_SD>; ++ clock-names = ++ "clk_gate_sd", ++ "clk_gate_axi_sd", ++ "clk_gate_100k_sd"; ++ }; ++ ++ spifmc0: flash-controller@7000180000 { ++ compatible = "sophgo,spifmc"; ++ reg = <0x70 0x00180000 0x0 0x1000000>; ++ reg-names = "memory"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000000>; ++ clocks = <&div_clk GATE_CLK_AHB_SF>; ++ flash@0 { ++ reg = <0>; ++ compatible = "jedec,spi-nor"; ++ }; ++ }; ++ ++ spifmc1: flash-controller@7002180000 { ++ compatible = "sophgo,spifmc"; ++ reg = <0x70 0x02180000 0x0 0x1000000>; ++ reg-names = "memory"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clock-frequency = <100000000>; ++ clocks = <&div_clk GATE_CLK_AHB_SF>; ++ flash@0 { ++ reg = <0>; ++ compatible = "jedec,spi-nor"; ++ }; ++ }; ++ ++ spi0: spi@7040004000 { ++ compatible = "snps,dw-apb-ssi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x70 0x40004000 0x0 0x1000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clocks = <&div_clk GATE_CLK_APB_SPI>, ++ <&div_clk GATE_CLK_SYSDMA_AXI>; ++ clock-frequency = <250000000>; ++ resets = <&rst RST_SPI0>; ++ reset-names = "spi0"; ++ num-cs = <2>; ++ status = "okay"; ++ }; ++ ++ spi1: spi@7040005000 { ++ compatible = "snps,dw-apb-ssi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x70 0x40005000 0x0 0x1000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ clocks = <&div_clk GATE_CLK_APB_SPI>, ++ <&div_clk GATE_CLK_SYSDMA_AXI>; ++ clock-frequency = <250000000>; ++ resets = <&rst RST_SPI1>; ++ reset-names = "spi1"; ++ num-cs = <2>; ++ status = "okay"; ++ }; ++ ++ stmmac_axi_setup: stmmac-axi-config { ++ snps,wr_osr_lmt = <1>; ++ snps,rd_osr_lmt = <2>; ++ snps,blen = <4 8 16 0 0 0 0>; ++ }; ++ ++ mtl_rx_setup: rx-queues-config { ++ snps,rx-queues-to-use = <8>; ++ queue0 {}; ++ queue1 {}; ++ queue2 {}; ++ queue3 {}; ++ queue4 {}; ++ queue5 {}; ++ queue6 {}; ++ queue7 {}; ++ }; ++ ++ mtl_tx_setup: tx-queues-config { ++ snps,tx-queues-to-use = <8>; ++ queue0 {}; ++ queue1 {}; ++ queue2 {}; ++ queue3 {}; ++ queue4 {}; ++ queue5 {}; ++ queue6 {}; ++ queue7 {}; ++ }; ++ ++ ethernet0: ethernet@7040026000 { ++ compatible = "bitmain,ethernet"; ++ reg = <0x70 0x40026000 0x0 0x4000>; ++ interrupt-parent = <&intc>; ++ interrupts = ; ++ interrupt-names = "macirq"; ++ clock-names = "clk_tx", "gate_clk_tx", "stmmaceth", "ptp_ref", "gate_clk_ref"; ++ clocks = <&div_clk DIV_CLK_FPLL_TX_ETH0>, ++ <&div_clk GATE_CLK_TX_ETH0>, ++ <&div_clk GATE_CLK_AXI_ETH0>, ++ <&div_clk GATE_CLK_PTP_REF_I_ETH0>, ++ <&div_clk GATE_CLK_REF_ETH0>; ++ ++ /* no hash filter and perfect filter support */ ++ snps,multicast-filter-bins = <0>; ++ snps,perfect-filter-entries = <1>; ++ ++ snps,txpbl = <32>; ++ snps,rxpbl = <32>; ++ snps,aal; ++ ++ snps,axi-config = <&stmmac_axi_setup>; ++ snps,mtl-rx-config = <&mtl_rx_setup>; ++ snps,mtl-tx-config = <&mtl_tx_setup>; ++ ++ phy-mode = "rgmii-txid"; ++ phy-reset-gpios = <&port0a 27 0>; ++ phy-handle = <&phy0>; ++ mdio { ++ #address-cells = <0x1>; ++ #size-cells = <0x0>; ++ compatible = "snps,dwmac-mdio"; ++ phy0: phy@0 { ++ compatible = "ethernet-phy-ieee802.3-c22"; ++ device_type = "ethernet-phy"; ++ reg = <0x0>; ++ }; ++ }; ++ }; ++ }; ++ ++ intc1: top_intc@7030010300 { ++ compatible = "sophgo,top-intc"; ++ reg = <0x70 0x300102E0 0x0 0x4>, ++ <0x70 0x30010300 0x0 0x4>, ++ <0x70 0x30010304 0x0 0x4>; ++ reg-names = "sta", "set", "clr"; ++ reg-bitwidth = <32>; ++ top_intc_id = <0>; ++ interrupt-controller; ++ #interrupt-cells = <0x1>; // only applies to child node ++ for-msi; ++ ++ interrupt-parent = <&intc>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ interrupt-names = "msi0", "msi1", "msi2", "msi3", ++ "msi4", "msi5", "msi6", "msi7", ++ "msi8", "msi9", "msi10", "msi11", ++ "msi12", "msi13", "msi14", "msi15", ++ "msi16", "msi17", "msi18", "msi19", ++ "msi20", "msi21", "msi22", "msi23", ++ "msi24", "msi25", "msi26", "msi27", ++ "msi28", "msi29", "msi30", "msi31"; ++ ++ }; ++ ++ aliases { ++ serial0 = &uart0; ++ }; ++ ++ chosen: chosen { ++ bootargs = "console=ttyS0,115200 earlycon maxcpus=1"; ++ stdout-path = "serial0"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile +new file mode 100644 +index 000000000000..bc18f5f5cec9 +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/Makefile +@@ -0,0 +1,2 @@ ++dtb-$(CONFIG_SOC_SPACEMIT_K1X) += k1-bananapi-f3.dtb ++obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) +diff --git a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts +new file mode 100644 +index 000000000000..16f7a19f701f +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts +@@ -0,0 +1,448 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* Copyright (c) 2023 Spacemit, Inc */ ++ ++/dts-v1/; ++ ++#include "k1-x.dtsi" ++#include "k1-x_pinctrl.dtsi" ++ ++/ { ++ model = "Banana Pi BPI-F3"; ++ ++ memory@0 { ++ device_type = "memory"; ++ reg = <0x0 0x00000000 0x0 0x80000000>; ++ }; ++ ++ memory@100000000 { ++ device_type = "memory"; ++ reg = <0x1 0x00000000 0x0 0x80000000>; ++ }; ++ ++ chosen { ++ bootargs = "earlycon=sbi console=ttySP0,115200n8 loglevel=8 rdinit=/init"; ++ stdout-path = "serial0:115200n8"; ++ }; ++}; ++ ++&pinctrl { ++ pinctrl-single,gpio-range = < ++ &range GPIO_49 2 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ &range GPIO_58 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_63 2 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_65 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_67 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ &range PRI_TDI 2 (MUX_MODE1 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range PRI_TCK 1 (MUX_MODE1 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range PRI_TDO 1 (MUX_MODE1 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_74 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_80 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ &range GPIO_81 3 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_90 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_91 2 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range DVL0 1 (MUX_MODE1 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range DVL1 1 (MUX_MODE1 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS0) ++ &range GPIO_110 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_111 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_113 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_114 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_115 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ &range GPIO_116 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_118 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_123 1 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS0) ++ &range GPIO_124 1 (MUX_MODE0 | EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ &range GPIO_125 3 (MUX_MODE0 | EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ >; ++}; ++ ++&gpio{ ++ gpio-ranges = < ++ &pinctrl 49 GPIO_49 2 ++ &pinctrl 58 GPIO_58 1 ++ &pinctrl 63 GPIO_63 3 ++ &pinctrl 67 GPIO_67 1 ++ &pinctrl 70 PRI_TDI 4 ++ &pinctrl 74 GPIO_74 1 ++ &pinctrl 80 GPIO_80 4 ++ &pinctrl 90 GPIO_90 3 ++ &pinctrl 96 DVL0 2 ++ &pinctrl 110 GPIO_110 1 ++ &pinctrl 111 GPIO_111 1 ++ &pinctrl 113 GPIO_113 1 ++ &pinctrl 114 GPIO_114 3 ++ &pinctrl 118 GPIO_118 1 ++ &pinctrl 123 GPIO_123 5 ++ >; ++}; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_uart0_2>; ++ status = "okay"; ++}; ++ ++&spi3 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_ssp3_0>; ++ status = "okay"; ++}; ++ ++&qspi { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_qspi>; ++ status = "okay"; ++ ++ flash@0 { ++ compatible = "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <26500000>; ++ m25p,fast-read; ++ broken-flash-reset; ++ status = "okay"; ++ }; ++}; ++ ++&i2c2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c2_0>; ++ spacemit,i2c-fast-mode; ++ status = "okay"; ++}; ++ ++&i2c8 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_i2c8>; ++ status = "okay"; ++ ++ spmp1@41 { ++ compatible = "spacemit,p1"; ++ reg = <0x41>; ++ interrupt-parent = <&intc>; ++ interrupts = <64>; ++ status = "okay"; ++ ++ regulators { ++ compatible = "spacemit,p1,regulator"; ++ ++ dcdc_1: DCDC_REG1 { ++ regulator-name = "dcdc1"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <650000>; ++ }; ++ }; ++ ++ dcdc_2: DCDC_REG2 { ++ regulator-name = "dcdc2"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ dcdc_3: DCDC_REG3 { ++ regulator-name = "dcdc3"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <1800000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ dcdc_4: DCDC_REG4 { ++ regulator-name = "dcdc4"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3300000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <3300000>; ++ }; ++ }; ++ ++ dcdc_5: DCDC_REG5 { ++ regulator-name = "dcdc5"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ dcdc_6: DCDC_REG6 { ++ regulator-name = "dcdc6"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3450000>; ++ regulator-ramp-delay = <5000>; ++ regulator-always-on; ++ }; ++ ++ ldo_1: LDO_REG1 { ++ regulator-name = "ldo1"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-boot-on; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_2: LDO_REG2 { ++ regulator-name = "ldo2"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_3: LDO_REG3 { ++ regulator-name = "ldo3"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_4: LDO_REG4 { ++ regulator-name = "ldo4"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_5: LDO_REG5 { ++ regulator-name = "ldo5"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-boot-on; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_6: LDO_REG6 { ++ regulator-name = "ldo6"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_7: LDO_REG7 { ++ regulator-name = "ldo7"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-state-mem { ++ regulator-off-in-suspend; ++ regulator-suspend-microvolt = <500000>; ++ }; ++ }; ++ ++ ldo_8: LDO_REG8 { ++ regulator-name = "ldo8"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-always-on; ++ }; ++ ++ ldo_9: LDO_REG9 { ++ regulator-name = "ldo9"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ }; ++ ++ ldo_10: LDO_REG10 { ++ regulator-name = "ldo10"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ regulator-always-on; ++ }; ++ ++ ldo_11: LDO_REG11 { ++ regulator-name = "ldo11"; ++ regulator-min-microvolt = <500000>; ++ regulator-max-microvolt = <3400000>; ++ }; ++ ++ sw_1: SWITCH_REG1 { ++ regulator-name = "switch1"; ++ }; ++ }; ++ ++ pmic_pinctrl: pinctrl { ++ compatible = "spacemit,p1,pinctrl"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ spm_pmic,npins = <6>; ++ }; ++ ++ pwr_key: key { ++ compatible = "spacemit,p1,pwrkey"; ++ }; ++ ++ ext_rtc: rtc { ++ compatible = "spacemit,p1,rtc"; ++ }; ++ ++ ext_adc: adc { ++ compatible = "spacemit,p1,adc"; ++ }; ++ }; ++}; ++ ++&pwm7 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_pwm7_0>; ++ status = "okay"; ++}; ++ ++/* SDCard */ ++&sdhci0 { ++ pinctrl-names = "default","fast"; ++ pinctrl-0 = <&pinctrl_mmc1>; ++ pinctrl-1 = <&pinctrl_mmc1_fast>; ++ bus-width = <4>; ++ cd-gpios = <&gpio 80 0>; ++ cd-inverted; ++ vmmc-supply = <&dcdc_4>; ++ vqmmc-supply = <&ldo_1>; ++ no-mmc; ++ no-sdio; ++ spacemit,sdh-host-caps-disable = <(MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25)>; ++ spacemit,sdh-quirks = <(SDHCI_QUIRK_BROKEN_CARD_DETECTION | ++ SDHCI_QUIRK_INVERTED_WRITE_PROTECT | ++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)>; ++ spacemit,sdh-quirks2 = <(SDHCI_QUIRK2_PRESET_VALUE_BROKEN | ++ SDHCI_QUIRK2_BROKEN_PHY_MODULE | ++ SDHCI_QUIRK2_SET_AIB_MMC)>; ++ spacemit,aib_mmc1_io_reg = <0xD401E81C>; ++ spacemit,apbc_asfar_reg = <0xD4015050>; ++ spacemit,apbc_assar_reg = <0xD4015054>; ++ spacemit,rx_dline_reg = <0x0>; ++ spacemit,tx_dline_reg = <0x0>; ++ spacemit,tx_delaycode = <0x5f>; ++ spacemit,rx_tuning_limit = <50>; ++ spacemit,sdh-freq = <204800000>; ++ status = "okay"; ++}; ++ ++/* SDIO */ ++&sdhci1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_mmc2>; ++ bus-width = <4>; ++ non-removable; ++ vqmmc-supply = <&dcdc_3>; ++ no-mmc; ++ no-sd; ++ spacemit,sdh-host-caps-disable = <(MMC_CAP_UHS_DDR50 | MMC_CAP_NEEDS_POLL)>; ++ spacemit,sdh-quirks = <(SDHCI_QUIRK_BROKEN_CARD_DETECTION | ++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)>; ++ spacemit,sdh-quirks2 = <(SDHCI_QUIRK2_PRESET_VALUE_BROKEN | ++ SDHCI_QUIRK2_BROKEN_PHY_MODULE)>; ++ spacemit,rx_dline_reg = <0x0>; ++ spacemit,tx_delaycode = <0x8f 0x5f>; ++ spacemit,rx_tuning_limit = <50>; ++ spacemit,sdh-freq = <375000000>; ++ status = "okay"; ++}; ++ ++/* eMMC */ ++&sdhci2 { ++ bus-width = <8>; ++ non-removable; ++ mmc-hs400-1_8v; ++ mmc-hs400-enhanced-strobe; ++ no-sd; ++ no-sdio; ++ spacemit,sdh-quirks = <(SDHCI_QUIRK_BROKEN_CARD_DETECTION | ++ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)>; ++ spacemit,sdh-quirks2 = ; ++ spacemit,sdh-freq = <375000000>; ++ status = "okay"; ++}; ++ ++ð0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_gmac0>; ++ ++ emac,reset-gpio = <&gpio 110 0>; ++ emac,reset-active-low; ++ emac,reset-delays-us = <0 10000 100000>; ++ ++ /* store forward mode */ ++ tx-threshold = <1518>; ++ rx-threshold = <12>; ++ tx-ring-num = <1024>; ++ rx-ring-num = <1024>; ++ dma-burst-len = <5>; ++ ++ ref-clock-from-phy; ++ clk-tuning-enable; ++ clk-tuning-by-delayline; ++ tx-phase = <60>; ++ rx-phase = <73>; ++ phy-handle = <&rgmii0>; ++ status = "okay"; ++ ++ mdio-bus { ++ #address-cells = <0x1>; ++ #size-cells = <0x0>; ++ rgmii0: phy@0 { ++ compatible = "ethernet-phy-id001c.c916"; ++ device_type = "ethernet-phy"; ++ reg = <0x1>; ++ phy-mode = "rgmii"; ++ }; ++ }; ++}; ++ ++ð1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_gmac1>; ++ ++ emac,reset-gpio = <&gpio 115 0>; ++ emac,reset-active-low; ++ emac,reset-delays-us = <0 10000 100000>; ++ ++ /* store forward mode */ ++ tx-threshold = <1518>; ++ rx-threshold = <12>; ++ tx-ring-num = <1024>; ++ rx-ring-num = <1024>; ++ dma-burst-len = <5>; ++ ++ ref-clock-from-phy; ++ clk-tuning-enable; ++ clk-tuning-by-delayline; ++ tx-phase = <90>; ++ rx-phase = <73>; ++ phy-handle = <&rgmii1>; ++ status = "okay"; ++ ++ mdio-bus { ++ #address-cells = <0x1>; ++ #size-cells = <0x0>; ++ rgmii1: phy@1 { ++ compatible = "ethernet-phy-id001c.c916"; ++ device_type = "ethernet-phy"; ++ reg = <0x1>; ++ phy-mode = "rgmii"; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/spacemit/k1-x.dtsi b/arch/riscv/boot/dts/spacemit/k1-x.dtsi +new file mode 100644 +index 000000000000..3c7e2ad81529 +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/k1-x.dtsi +@@ -0,0 +1,1221 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* Copyright (c) 2022 Spacemit, Inc */ ++ ++/dts-v1/; ++ ++#include ++#include ++#include ++#include ++ ++/ { ++ compatible = "spacemit,k1-x"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart2; ++ serial2 = &uart3; ++ serial3 = &uart4; ++ serial4 = &uart5; ++ serial5 = &uart6; ++ serial6 = &uart7; ++ serial7 = &uart8; ++ serial8 = &uart9; ++ mmc0 = &sdhci0; ++ mmc1 = &sdhci1; ++ mmc2 = &sdhci2; ++ ethernet0 = ð0; ++ ethernet1 = ð1; ++ }; ++ ++ cpus: cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ timebase-frequency = <24000000>; ++ cpu_0: cpu@0 { ++ compatible = "spacemit,x60", "riscv"; ++ device_type = "cpu"; ++ model = "Spacemit(R) X60"; ++ reg = <0>; ++ status = "okay"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu0_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_1: cpu@1 { ++ device_type = "cpu"; ++ reg = <1>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu1_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_2: cpu@2 { ++ device_type = "cpu"; ++ reg = <2>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu2_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_3: cpu@3 { ++ device_type = "cpu"; ++ reg = <3>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst0_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ cpu-ai = "true"; ++ ++ cpu3_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_4: cpu@4 { ++ device_type = "cpu"; ++ reg = <4>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu4_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_5: cpu@5 { ++ device_type = "cpu"; ++ reg = <5>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu5_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_6: cpu@6 { ++ device_type = "cpu"; ++ reg = <6>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu6_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu_7: cpu@7 { ++ device_type = "cpu"; ++ reg = <7>; ++ status = "okay"; ++ compatible = "spacemit,x60", "riscv"; ++ model = "Spacemit(R) X60"; ++ riscv,isa = "rv64imafdcv"; ++ riscv,isa-base = "rv64i"; ++ riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "v", ++ "zicbom", "zicboz", "zicntr", "zicond", ++ "zicsr", "zifencei", "zihintpause", "zihpm", ++ "zfh", "zfhmin", "zba", "zbb", "zbc", "zbs", ++ "zkt", "zvfh", "zvfhmin", "zvkt", "sscofpmf", ++ "sstc", "svinval", "svnapot", "svpbmt" ; ++ riscv,cbom-block-size = <64>; ++ riscv,cboz-block-size = <64>; ++ i-cache-block-size = <64>; ++ i-cache-size = <32768>; ++ i-cache-sets = <128>; ++ d-cache-block-size = <64>; ++ d-cache-size = <32768>; ++ d-cache-sets = <128>; ++ next-level-cache = <&clst1_l2_cache>; ++ mmu-type = "riscv,sv39"; ++ ++ cpu7_intc: interrupt-controller { ++ #interrupt-cells = <1>; ++ compatible = "riscv,cpu-intc"; ++ interrupt-controller; ++ }; ++ }; ++ ++ cpu-map { ++ cluster0 { ++ core0 { ++ cpu = <&cpu_0>; ++ }; ++ ++ core1 { ++ cpu = <&cpu_1>; ++ }; ++ ++ core2 { ++ cpu = <&cpu_2>; ++ }; ++ ++ core3 { ++ cpu = <&cpu_3>; ++ }; ++ }; ++ ++ cluster1 { ++ core0 { ++ cpu = <&cpu_4>; ++ }; ++ ++ core1 { ++ cpu = <&cpu_5>; ++ }; ++ ++ core2 { ++ cpu = <&cpu_6>; ++ }; ++ ++ core3 { ++ cpu = <&cpu_7>; ++ }; ++ }; ++ }; ++ ++ clst0_l2_cache: l2-cache0 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <524288>; ++ cache-sets = <512>; ++ cache-unified; ++ }; ++ ++ clst1_l2_cache: l2-cache1 { ++ compatible = "cache"; ++ cache-block-size = <64>; ++ cache-level = <2>; ++ cache-size = <524288>; ++ cache-sets = <512>; ++ cache-unified; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <0x2>; ++ #size-cells = <0x2>; ++ ranges; ++ ++ vctcxo_24: clock-vctcxo_24 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <24000000>; ++ clock-output-names = "vctcxo_24"; ++ }; ++ ++ vctcxo_3: clock-vctcxo_3 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <3000000>; ++ clock-output-names = "vctcxo_3"; ++ }; ++ ++ vctcxo_1: clock-vctcxo_1 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <1000000>; ++ clock-output-names = "vctcxo_1"; ++ }; ++ ++ pll1_2457p6_vco: clock-pll1_2457p6_vco { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <2457600000>; ++ clock-output-names = "pll1_2457p6_vco"; ++ }; ++ ++ clk_32k: clock-clk32k { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <32000>; ++ clock-output-names = "clk_32k"; ++ }; ++ ++ pll_clk_cluster0: clock-pll_clk_cluster0 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <10000000>; ++ clock-output-names = "pll_clk_cluster0"; ++ }; ++ ++ pll_clk_cluster1: clock-pll_clk_cluster1 { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <10000000>; ++ clock-output-names = "pll_clk_cluster1"; ++ }; ++ }; ++ ++ soc: soc { ++ compatible = "simple-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-noncoherent; ++ ranges; ++ ++ /* ++ * dram mapping for dma/usb/sdh for ex, ++ * only 2GB available for devices. ++ */ ++ dram_range0: dram_range@0 { ++ compatible = "spacemit-dram-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-ranges = <0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>; ++ #interconnect-cells = <0>; ++ status = "okay"; ++ }; ++ ++ /* ++ * dram mapping for vpu/gpu/dpu/v2d/isp/csi/vi/cpp, ++ * and eth/crypto/jpu for ex. ++ * 4GB space is available for devices, and the mapping is: ++ * 0~2GB of device's address space is mapping to 0~2GB of cpu's ++ * 2~4GB of device's address space is mapping to 4~6GB of cpu's ++ */ ++ dram_range1: dram_range@1 { ++ compatible = "spacemit-dram-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-ranges = <0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>, ++ <0x0 0x80000000 0x1 0x00000000 0x0 0x80000000>; ++ #interconnect-cells = <0>; ++ status = "okay"; ++ }; ++ ++ /* ++ * dram mapping for pcie0/pcie1/pcie2 for ex. ++ * 14GB space is available for devices, and the mapping is: ++ * 0~2GB of device's address space is mapping to 0~2GB of cpu's ++ * 4~16GB of device's address space is mapping to 6~16GB of cpu's ++ * the 2~4GB of devcie's address space is io area ++ */ ++ dram_range2: dram_range@2 { ++ compatible = "spacemit-dram-bus"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-ranges = <0x0 0x00000000 0x0 0x00000000 0x0 0x80000000>, ++ <0x1 0x00000000 0x1 0x80000000 0x3 0x00000000>; ++ #interconnect-cells = <0>; ++ status = "okay"; ++ }; ++ ++ clint0: clint@e4000000 { ++ compatible = "riscv,clint0"; ++ interrupts-extended = < ++ &cpu0_intc 3 &cpu0_intc 7 ++ &cpu1_intc 3 &cpu1_intc 7 ++ &cpu2_intc 3 &cpu2_intc 7 ++ &cpu3_intc 3 &cpu3_intc 7 ++ &cpu4_intc 3 &cpu4_intc 7 ++ &cpu5_intc 3 &cpu5_intc 7 ++ &cpu6_intc 3 &cpu6_intc 7 ++ &cpu7_intc 3 &cpu7_intc 7 ++ >; ++ reg = <0x0 0xE4000000 0x0 0x00010000>; ++ }; ++ ++ ccu: clock-controller@d4050000 { ++ compatible = "spacemit,k1x-clock"; ++ reg = <0x0 0xd4050000 0x0 0x209c>, ++ <0x0 0xd4282800 0x0 0x400>, ++ <0x0 0xd4015000 0x0 0x1000>, ++ <0x0 0xd4090000 0x0 0x1000>, ++ <0x0 0xd4282c00 0x0 0x400>, ++ <0x0 0xd8440000 0x0 0x98>, ++ <0x0 0xc0000000 0x0 0x4280>, ++ <0x0 0xf0610000 0x0 0x20>, ++ <0x0 0xc0880000 0x0 0x2050>, ++ <0x0 0xc0888000 0x0 0x30>; ++ reg-names = "mpmu", "apmu", "apbc", "apbs", "ciu", ++ "dciu", "ddrc", "apbc2", "rcpu", "rcpu2"; ++ clocks = <&vctcxo_24>, <&vctcxo_3>, <&vctcxo_1>, ++ <&pll1_2457p6_vco>, <&clk_32k>; ++ clock-names = "vctcxo_24", "vctcxo_3", "vctcxo_1", ++ "pll1_2457p6_vco", ++ "clk_32k"; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ reset: reset-controller@d4050000 { ++ compatible = "spacemit,k1x-reset"; ++ reg = <0x0 0xd4050000 0x0 0x209c>, ++ <0x0 0xd4282800 0x0 0x400>, ++ <0x0 0xd4015000 0x0 0x1000>, ++ <0x0 0xd4090000 0x0 0x1000>, ++ <0x0 0xd4282c00 0x0 0x400>, ++ <0x0 0xd8440000 0x0 0x98>, ++ <0x0 0xc0000000 0x0 0x4280>, ++ <0x0 0xf0610000 0x0 0x20>, ++ <0x0 0xc0880000 0x0 0x2050>, ++ <0x0 0xc0888000 0x0 0x30>; ++ reg-names = "mpmu", "apmu", "apbc", "apbs", "ciu", ++ "dciu", "ddrc", "apbc2", "rcpu", "rcpu2"; ++ #reset-cells = <1>; ++ status = "okay"; ++ }; ++ ++ intc: interrupt-controller@e0000000 { ++ #interrupt-cells = <1>; ++ compatible = "riscv,plic0"; ++ interrupt-controller; ++ interrupts-extended = < ++ &cpu0_intc 11 &cpu0_intc 9 ++ &cpu1_intc 11 &cpu1_intc 9 ++ &cpu2_intc 11 &cpu2_intc 9 ++ &cpu3_intc 11 &cpu3_intc 9 ++ &cpu4_intc 11 &cpu4_intc 9 ++ &cpu5_intc 11 &cpu5_intc 9 ++ &cpu6_intc 11 &cpu6_intc 9 ++ &cpu7_intc 11 &cpu7_intc 9 ++ >; ++ reg = <0x0 0xE0000000 0x0 0x04000000>; ++ reg-names = "control"; ++ riscv,max-priority = <7>; ++ riscv,ndev = <159>; ++ }; ++ ++ pinctrl: pinctrl@d401e000 { ++ compatible = "pinctrl-spacemit-k1x"; ++ reg = <0x0 0xd401e000 0x0 0x250>, ++ <0x0 0xd4019800 0x0 0x10>, ++ <0x0 0xd4019000 0x0 0x800>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ #pinctrl-cells = <2>; ++ #gpio-range-cells = <3>; ++ ++ pinctrl-single,register-width = <32>; ++ pinctrl-single,function-mask = <0xff77>; ++ ++ clocks = <&ccu CLK_AIB>; ++ clock-names = "clk_aib"; ++ resets = <&reset RESET_AIB>; ++ reset-names = "aib_rst"; ++ ++ interrupt-parent = <&intc>; ++ interrupts = <60>; ++ interrupt-controller; ++ #interrupt-cells = <1>; ++ ++ range: gpio-range { ++ #pinctrl-single,gpio-range-cells = <3>; ++ }; ++ }; ++ ++ pdma0: pdma@d4000000 { ++ compatible = "spacemit,k1-pdma"; ++ reg = <0x0 0xd4000000 0x0 0x4000>; ++ interrupts = <72>; ++ interrupt-parent = <&intc>; ++ clocks = <&ccu CLK_DMA>; ++ resets = <&reset RESET_DMA>; ++ #dma-cells= <2>; ++ #dma-channels = <16>; ++ max-burst-size = <64>; ++ reserved-channels = <15 45>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "ok"; ++ }; ++ ++ uart0: serial@d4017000 { ++ compatible = "spacemit,k1x-uart"; ++ reg = <0x0 0xd4017000 0x0 0x100>; ++ interrupt-parent = <&intc>; ++ interrupts = <42>; ++ clocks = <&ccu CLK_UART1>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART1>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart2: uart@d4017100 { ++ compatible = "spacemit,k1x-uart"; ++ reg = <0x0 0xd4017100 0x0 0x100>; ++ interrupt-parent = <&intc>; ++ interrupts = <44>; ++ clocks = <&ccu CLK_UART2>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART2>; ++ status = "disabled"; ++ }; ++ ++ uart3: uart@d4017200 { ++ compatible = "spacemit,k1x-uart"; ++ reg = <0x0 0xd4017200 0x0 0x100>; ++ interrupt-parent = <&intc>; ++ interrupts = <45>; ++ clocks = <&ccu CLK_UART3>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART3>; ++ status = "disabled"; ++ }; ++ ++ uart4: uart@d4017300 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017300 0x0 0x100>; ++ interrupts = <46>; ++ clocks = <&ccu CLK_UART4>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART4>; ++ status = "disabled"; ++ }; ++ ++ uart5: uart@d4017400 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017400 0x0 0x100>; ++ interrupts = <47>; ++ clocks = <&ccu CLK_UART5>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART5>; ++ status = "disabled"; ++ }; ++ ++ uart6: uart@d4017500 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017500 0x0 0x100>; ++ interrupts = <48>; ++ clocks = <&ccu CLK_UART6>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART6>; ++ status = "disabled"; ++ }; ++ ++ uart7: uart@d4017600 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017600 0x0 0x100>; ++ interrupts = <49>; ++ clocks = <&ccu CLK_UART7>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART7>; ++ status = "disabled"; ++ }; ++ ++ uart8: uart@d4017700 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017700 0x0 0x100>; ++ interrupts = <50>; ++ clocks = <&ccu CLK_UART8>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART8>; ++ status = "disabled"; ++ }; ++ ++ uart9: uart@d4017800 { ++ compatible = "spacemit,k1x-uart"; ++ interrupt-parent = <&intc>; ++ reg = <0x0 0xd4017800 0x0 0x100>; ++ interrupts = <51>; ++ clocks = <&ccu CLK_UART9>, <&ccu CLK_SLOW_UART>; ++ clock-names = "func", "gate"; ++ resets = <&reset RESET_UART9>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@d4010800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <0>; ++ reg = <0x0 0xd4010800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <36>; ++ clocks = <&ccu CLK_TWSI0>; ++ resets = <&reset RESET_TWSI0>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@d4011000 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <1>; ++ reg = <0x0 0xd4011000 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <37>; ++ clocks = <&ccu CLK_TWSI1>; ++ resets = <&reset RESET_TWSI1>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c2: i2c@d4012000 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <2>; ++ reg = <0x0 0xd4012000 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <38>; ++ clocks = <&ccu CLK_TWSI2>; ++ resets = <&reset RESET_TWSI2>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c4: i2c@d4012800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <4>; ++ reg = <0x0 0xd4012800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <40>; ++ clocks = <&ccu CLK_TWSI4>; ++ resets = <&reset RESET_TWSI4>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c5: i2c@d4013800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <5>; ++ reg = <0x0 0xd4013800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <41>; ++ clocks = <&ccu CLK_TWSI5>; ++ resets = <&reset RESET_TWSI5>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c6: i2c@d4018800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <6>; ++ reg = <0x0 0xd4018800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <70>; ++ clocks = <&ccu CLK_TWSI6>; ++ resets = <&reset RESET_TWSI6>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c7: i2c@d401d000 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <7>; ++ reg = <0x0 0xd401d000 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <18>; ++ clocks = <&ccu CLK_TWSI7>; ++ resets = <&reset RESET_TWSI7>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ i2c8: i2c@d401d800 { ++ compatible = "spacemit,k1-i2c"; ++ spacemit,adapter-id = <8>; ++ reg = <0x0 0xd401d800 0x0 0x38>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupt-parent = <&intc>; ++ interrupts = <19>; ++ clocks = <&ccu CLK_TWSI8>; ++ resets = <&reset RESET_TWSI8>; ++ spacemit,i2c-master-code = /bits/ 8 <0x0e>; ++ spacemit,i2c-clk-rate = <32000000>; ++ spacemit,i2c-lcr = <0x82c469f>; ++ spacemit,i2c-wcr = <0x142a>; ++ /* apb clock: 26MHz or 52MHz */ ++ spacemit,apb_clock = <52000000>; ++ status = "disabled"; ++ }; ++ ++ spi0: spi@d4026000 { ++ compatible = "spacemit,k1-spi"; ++ reg = <0x0 0xd4026000 0x0 0x30>; ++ k1,spi-id = <0>; ++ k1,spi-clock-rate = <26000000>; ++ dmas = <&pdma0 DMA_SSPA0_RX 1 ++ &pdma0 DMA_SSPA0_TX 1>; ++ dma-names = "rx", "tx"; ++ interrupt-parent = <&intc>; ++ interrupts = <56>; ++ clocks = <&ccu CLK_SSPA0>; ++ resets = <&reset RESET_SSPA0>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ spi1: spi@d4026800 { ++ compatible = "spacemit,k1-spi"; ++ reg = <0x0 0xd4026800 0x0 0x30>; ++ k1,spi-id = <1>; ++ k1,spi-clock-rate = <26000000>; ++ dmas = <&pdma0 DMA_SSPA1_RX 1 ++ &pdma0 DMA_SSPA1_TX 1>; ++ dma-names = "rx", "tx"; ++ interrupt-parent = <&intc>; ++ interrupts = <57>; ++ clocks = <&ccu CLK_SSPA1>; ++ resets = <&reset RESET_SSPA1>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ spi3: spi@d401c000 { ++ compatible = "spacemit,k1-spi"; ++ reg = <0x0 0xd401c000 0x0 0x30>; ++ k1,spi-id = <3>; ++ k1,spi-clock-rate = <26000000>; ++ dmas = <&pdma0 DMA_SSP3_RX 1 ++ &pdma0 DMA_SSP3_TX 1>; ++ dma-names = "rx", "tx"; ++ interrupt-parent = <&intc>; ++ interrupts = <55>; ++ clocks = <&ccu CLK_SSP3>; ++ resets = <&reset RESET_SSP3>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@d420c000 { ++ compatible = "spacemit,k1-qspi"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0x0 0xd420c000 0x0 0x1000>, ++ <0x0 0xb8000000 0x0 0xc00000>; ++ reg-names = "qspi-base", "qspi-mmap"; ++ k1,qspi-sfa1ad = <0x4000000>; ++ k1,qspi-sfa2ad = <0x100000>; ++ k1,qspi-sfb1ad = <0x100000>; ++ k1,qspi-sfb2ad = <0x100000>; ++ clocks = <&ccu CLK_QSPI>, ++ <&ccu CLK_QSPI_BUS>; ++ clock-names = "qspi_clk", "qspi_bus_clk"; ++ resets = <&reset RESET_QSPI>, ++ <&reset RESET_QSPI_BUS>; ++ reset-names = "qspi_reset", "qspi_bus_reset"; ++ k1,qspi-pmuap-reg = <0xd4282860>; ++ k1,qspi-mpmu-acgr-reg = <0xd4051024>; ++ k1,qspi-freq = <26500000>; ++ k1,qspi-id = <4>; ++ interrupts = <117>; ++ interrupt-parent = <&intc>; ++ k1,qspi-tx-dma = <1>; ++ k1,qspi-rx-dma = <1>; ++ dmas = <&pdma0 DMA_QSPI_TX 1>; ++ dma-names = "tx-dma"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ pwm0: pwm@d401a000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401a000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM0>; ++ resets = <&reset RESET_PWM0>; ++ status = "disabled"; ++ }; ++ ++ pwm1: pwm@d401a400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401a400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM1>; ++ resets = <&reset RESET_PWM1>; ++ status = "disabled"; ++ }; ++ ++ pwm2: pwm@d401a800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401a800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM2>; ++ resets = <&reset RESET_PWM2>; ++ status = "disabled"; ++ }; ++ ++ pwm3: pwm@d401ac00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401ac00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM3>; ++ resets = <&reset RESET_PWM3>; ++ status = "disabled"; ++ }; ++ ++ pwm4: pwm@d401b000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401b000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM4>; ++ resets = <&reset RESET_PWM4>; ++ status = "disabled"; ++ }; ++ ++ pwm5: pwm@d401b400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401b400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM5>; ++ resets = <&reset RESET_PWM5>; ++ status = "disabled"; ++ }; ++ ++ pwm6: pwm@d401b800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401b800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM6>; ++ resets = <&reset RESET_PWM6>; ++ status = "disabled"; ++ }; ++ ++ pwm7: pwm@d401bc00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd401bc00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM7>; ++ resets = <&reset RESET_PWM7>; ++ status = "disabled"; ++ }; ++ ++ pwm8: pwm@d4020000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM8>; ++ resets = <&reset RESET_PWM8>; ++ status = "disabled"; ++ }; ++ ++ pwm9: pwm@d4020400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM9>; ++ resets = <&reset RESET_PWM9>; ++ status = "disabled"; ++ }; ++ ++ pwm10: pwm@d4020800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM10>; ++ resets = <&reset RESET_PWM10>; ++ status = "disabled"; ++ }; ++ ++ pwm11: pwm@d4020c00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4020c00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM11>; ++ resets = <&reset RESET_PWM11>; ++ status = "disabled"; ++ }; ++ ++ pwm12: pwm@d4021000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM12>; ++ resets = <&reset RESET_PWM12>; ++ status = "disabled"; ++ }; ++ ++ pwm13: pwm@d4021400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM13>; ++ resets = <&reset RESET_PWM13>; ++ status = "disabled"; ++ }; ++ ++ pwm14: pwm@d4021800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM14>; ++ resets = <&reset RESET_PWM14>; ++ status = "disabled"; ++ }; ++ ++ pwm15: pwm@d4021c00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4021c00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM15>; ++ resets = <&reset RESET_PWM15>; ++ status = "disabled"; ++ }; ++ ++ pwm16: pwm@d4022000 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022000 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM16>; ++ resets = <&reset RESET_PWM16>; ++ status = "disabled"; ++ }; ++ ++ pwm17: pwm@d4022400 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022400 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM17>; ++ resets = <&reset RESET_PWM17>; ++ status = "disabled"; ++ }; ++ ++ pwm18: pwm@d4022800 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022800 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM18>; ++ resets = <&reset RESET_PWM18>; ++ status = "disabled"; ++ }; ++ ++ pwm19: pwm@d4022c00 { ++ compatible = "spacemit,k1-pwm"; ++ reg = <0x0 0xd4022c00 0x0 0x10>; ++ #pwm-cells = <1>; ++ clocks = <&ccu CLK_PWM19>; ++ resets = <&reset RESET_PWM19>; ++ status = "disabled"; ++ }; ++ ++ gpio: gpio@d4019000 { ++ compatible = "spacemit,k1x-gpio"; ++ reg = <0x0 0xd4019000 0x0 0x800>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ interrupts = <58>; ++ clocks = <&ccu CLK_GPIO>; ++ interrupt-names = "gpio_mux"; ++ interrupt-parent = <&intc>; ++ interrupt-controller; ++ #interrupt-cells = <2>; ++ ++ gcb0: gpio0 { ++ reg-offset = <0x0>; ++ }; ++ ++ gcb1: gpio1 { ++ reg-offset = <0x4>; ++ }; ++ ++ gcb2: gpio2 { ++ reg-offset = <0x8>; ++ }; ++ ++ gcb3: gpio3 { ++ reg-offset = <0x100>; ++ }; ++ }; ++ ++ sdhci0: sdh@d4280000 { ++ compatible = "spacemit,k1-sdhci"; ++ reg = <0x0 0xd4280000 0x0 0x200>; ++ interrupts = <99>; ++ interrupt-parent = <&intc>; ++ resets = <&reset RESET_SDH_AXI>, ++ <&reset RESET_SDH0>; ++ reset-names = "sdh_axi", "sdh0"; ++ clocks = <&ccu CLK_SDH0>, ++ <&ccu CLK_SDH_AXI>, ++ <&ccu CLK_AIB>; ++ clock-names = "sdh-io", "sdh-core","aib-clk"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ sdhci1: sdh@d4280800 { ++ compatible = "spacemit,k1-sdhci"; ++ reg = <0x0 0xd4280800 0x0 0x200>; ++ interrupts = <100>; ++ interrupt-parent = <&intc>; ++ resets = <&reset RESET_SDH_AXI>, ++ <&reset RESET_SDH1>; ++ reset-names = "sdh_axi", "sdh1"; ++ clocks = <&ccu CLK_SDH1>, ++ <&ccu CLK_SDH_AXI>; ++ clock-names = "sdh-io", "sdh-core"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ sdhci2: sdh@d4281000 { ++ compatible = "spacemit,k1-sdhci"; ++ reg = <0x0 0xd4281000 0x0 0x200>; ++ interrupts = <101>; ++ interrupt-parent = <&intc>; ++ resets = <&reset RESET_SDH_AXI>, ++ <&reset RESET_SDH2>; ++ reset-names = "sdh_axi", "sdh2"; ++ clocks = <&ccu CLK_SDH2>, ++ <&ccu CLK_SDH_AXI>; ++ clock-names = "sdh-io", "sdh-core"; ++ interconnects = <&dram_range0>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ eth0: ethernet@cac80000 { ++ compatible = "spacemit,k1-emac"; ++ reg = <0x0 0xCAC80000 0x0 0x420>; ++ k1,apmu-base-reg = <0xD4282800>; ++ ctrl-reg = <0x3e4>; ++ dline-reg = <0x3e8>; ++ clocks = <&ccu CLK_EMAC0_BUS>, <&ccu CLK_EMAC0_PTP>; ++ clock-names = "emac-clk", "ptp-clk"; ++ resets = <&reset RESET_EMAC0>; ++ reset-names = "emac-reset"; ++ interrupts-extended = <&intc 131>; ++ mac-address = [ 00 00 00 00 00 00 ]; ++ ptp-support; ++ ptp-clk-rate = <10000000>; ++ interconnects = <&dram_range1>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ ++ eth1: ethernet@cac81000 { ++ compatible = "spacemit,k1-emac"; ++ reg = <0x0 0xCAC81000 0x0 0x420>; ++ k1,apmu-base-reg = <0xD4282800>; ++ ctrl-reg = <0x3ec>; ++ dline-reg = <0x3f0>; ++ clocks = <&ccu CLK_EMAC1_BUS>, <&ccu CLK_EMAC1_PTP>; ++ clock-names = "emac-clk", "ptp-clk"; ++ resets = <&reset RESET_EMAC1>; ++ reset-names = "emac-reset"; ++ interrupts-extended = <&intc 133>; ++ mac-address = [ 00 00 00 00 00 00 ]; ++ ptp-support; ++ ptp-clk-rate = <10000000>; ++ interconnects = <&dram_range1>; ++ interconnect-names = "dma-mem"; ++ status = "disabled"; ++ }; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi +new file mode 100644 +index 000000000000..46b826f6b681 +--- /dev/null ++++ b/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi +@@ -0,0 +1,1192 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* Copyright (c) 2023 Spacemit, Inc */ ++ ++#include ++/* Pin Configuration Node: */ ++/* Format: */ ++&pinctrl { ++ pinctrl_uart0_0: uart0_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart0_1: uart0_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_CMD, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_80, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart0_2: uart0_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_68, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_69, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart2: uart2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_21, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_22, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_23, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_24, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart3_0: uart3_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_82, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_83, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_84, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart3_1: uart3_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_18, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_19, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_20, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_21, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart3_2: uart3_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_54, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_56, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_0: uart4_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(QSPI_DAT1, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT0, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart4_1: uart4_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_82, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_83, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_84, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_2: uart4_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_23, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_24, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_3: uart4_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_33, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_34, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_35, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_36, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart4_4: uart4_4_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_111, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_112, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_113, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_114, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart5_0: uart5_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(QSPI_CLK, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_CSI, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart5_1: uart5_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_25, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_26, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_27, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_28, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart5_2: uart5_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_42, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_43, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_44, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_45, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart5_3: uart5_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TCK, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart6_0: uart6_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_85, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_86, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_87, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart6_1: uart6_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_00, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_01, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_02, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_03, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart6_2: uart6_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart7_0: uart7_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_88, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_89, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart7_1: uart7_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_04, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_05, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_06, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_07, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart8_0: uart8_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_82, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_83, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart8_1: uart8_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_08, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_09, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_10, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_11, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart8_2: uart8_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_75, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_77, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_78, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_uart9_0: uart9_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_12, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_13, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart9_1: uart9_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_110, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_115, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_116, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_117, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_uart9_2: uart9_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TCK, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c0: i2c0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_54, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_uart1: r_uart1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_49, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_50, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_51, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_52, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_i2c1: i2c1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c2_0: i2c2_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_84, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_85, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c2_1: i2c2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c2_2: i2c2_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_68, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_69, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c3_0: i2c3_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_38, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_39, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c3_1: i2c3_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_47, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_48, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c3_2: i2c3_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_77, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_78, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c4_0: i2c4_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_40, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_41, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c4_1: i2c4_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_75, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ K1X_PADCONF(GPIO_76, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c4_2: i2c4_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_51, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ K1X_PADCONF(GPIO_52, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c5_0: i2c5_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_82, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c5_1: i2c5_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_54, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c6_0: i2c6_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_83, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c6_1: i2c6_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_118, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_119, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c6_2: i2c6_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c7: i2c7_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_118, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_119, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_i2c8: i2c8_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PWR_SCL, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PWR_SDA, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_one_wire_0: one_wire_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_110, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_one_wire_1: one_wire_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_47, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ir_rx_0: ir_rx_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(DVL1, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_ir_rx_1: ir_rx_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_79, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ir_rx_2: ir_rx_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_58, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_ir_rx_0: r_ir_rx_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_48, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_r_ir_rx_1: r_ir_rx_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_44, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm0_0: pwm0_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm0_1: pwm0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_14, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm0_2: pwm0_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_22, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm1_0: pwm1_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm1_1: pwm1_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_29, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm1_2: pwm1_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_23, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm2_0: pwm2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT1, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm2_1: pwm2_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_22, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm2_2: pwm2_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_30, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm2_3: pwm2_3_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_24, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm3_0: pwm3_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT0, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm3_1: pwm3_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_33, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm3_2: pwm3_2_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_25, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm4_0: pwm4_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_CMD, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm4_1: pwm4_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_34, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm5_0: pwm5_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_CLK, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_pwm5_1: pwm5_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_35, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm6_0: pwm6_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_88, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm6_1: pwm6_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_36, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm7_0: pwm7_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_92, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm7_1: pwm7_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_37, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm8_0: pwm8_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_00, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm8_1: pwm8_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_38, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm9_0: pwm9_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_01, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm9_1: pwm9_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_39, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm10_0: pwm10_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_02, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm10_1: pwm10_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_40, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm11_0: pwm11_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_03, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm11_1: pwm11_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_41, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm12_0: pwm12_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_04, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm12_1: pwm12_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_42, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm13_0: pwm13_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_05, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm13_1: pwm13_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_43, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm14_0: pwm14_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_06, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm14_1: pwm14_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_44, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm15_0: pwm15_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_07, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm15_1: pwm15_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_45, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm16_0: pwm16_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_09, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm16_1: pwm16_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_46, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm17_0: pwm17_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_10, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm17_1: pwm17_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm18_0: pwm18_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_11, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm18_1: pwm18_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_57, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm19_0: pwm19_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_13, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pwm19_1: pwm19_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_63, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_rpwm2_0: rpwm2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_79, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_rpwm9_0: rpwm9_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_74, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_sspa0_0: sspa0_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_118, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_119, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_120, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_121, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_122, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ >; ++ }; ++ ++ pinctrl_sspa0_1: sspa0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_58, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_111, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_112, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_113, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_114, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ >; ++ }; ++ ++ pinctrl_sspa1: sspa1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_24, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_25, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_26, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_27, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1X_PADCONF(GPIO_28, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ >; ++ }; ++ ++ pinctrl_ssp2_0: ssp2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_75, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_77, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_78, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ssp2_1: ssp2_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_64, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_65, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_66, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_67, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ssp3_0: ssp3_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_75, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_77, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_78, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_ssp3_1: ssp3_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_59, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_60, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_61, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_62, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_qspi: qspi_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(QSPI_DAT3, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT2, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT1, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_DAT0, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_CLK, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1X_PADCONF(QSPI_CSI, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_mmc1: mmc1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT1, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_DAT0, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_CMD, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(MMC1_CLK, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_mmc1_fast: mmc1_fast_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(MMC1_DAT3, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_DAT2, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_DAT1, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_DAT0, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_CMD, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) ++ K1X_PADCONF(MMC1_CLK, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS3)) ++ >; ++ }; ++ ++ pinctrl_mmc2: mmc2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_15, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_16, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_18, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_19, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_20, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb0_0: usb0_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_125, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_126, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_127, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb0_1: usb0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_64, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_65, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_63, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb1_0: usb1_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_124, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb1_1: usb1_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_66, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb2_0: usb2_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_121, MUX_MODE2, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_122, MUX_MODE2, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_123, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_usb2_1: usb2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_68, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_69, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_67, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_0: pcie0_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_15, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_16, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_1: pcie0_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_29, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_30, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_31, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_2: pcie0_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_110, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_115, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_116, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie0_3: pcie0_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_54, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_0: pcie1_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_15, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_16, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_1: pcie1_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_32, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_33, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_34, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_2: pcie1_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_56, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_57, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_58, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie1_3: pcie1_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_59, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_60, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_61, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_0: pcie2_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_18, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_19, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_20, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_1: pcie2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_35, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_36, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_37, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_2: pcie2_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_62, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_74, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_117, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_3: pcie2_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_111, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_112, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_113, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pcie2_4: pcie2_4_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_62, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_112, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_117, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_gmac0: gmac0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_00, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_01, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_02, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_03, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_04, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_05, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_06, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_07, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_08, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_09, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_10, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_11, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_12, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_13, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_14, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_45, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_gmac1: gmac1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_29, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_30, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_31, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_32, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_33, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_34, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_35, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_36, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_37, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_38, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_39, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_40, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_41, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_42, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_43, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_46, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_can_0: can_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_75, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1X_PADCONF(GPIO_76, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ >; ++ }; ++ ++ pinctrl_can_1: can_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_54, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_55, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_can_0: r_can_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_47, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_48, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_r_can_1: r_can_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_110, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_115, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_hdmi_0: hdmi_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_86, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_87, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_88, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_89, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_hdmi_1: hdmi_1_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_59, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_60, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_61, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_62, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_spi_lcd_0: spi_lcd_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_86, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_87, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_88, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_89, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_91, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_92, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_spi_lcd_1: spi_lcd_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TCK, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_74, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_114, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_63, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_camera0: camera0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_53, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_camera1: camera1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_58, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_camera2: camera2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_120, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pmic: pmic_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(VCXO_EN, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(DVL0, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1X_PADCONF(DVL1, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_0: mn_clk_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_92, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_1: mn_clk_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_81, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_2: mn_clk_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_44, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_3: mn_clk_3_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_20, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_4: mn_clk_4_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_23, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk_5: mn_clk_5_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_32, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk2_0: mn_clk2_0_grp { ++ pinctrl-single,pins = < ++ K1X_PADCONF(GPIO_91, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_mn_clk2_1: mn_clk2_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_85, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_0: vcxo_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(DVL0, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(DVL1, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_1: vcxo_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_16, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_17, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_2: vcxo_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_89, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1X_PADCONF(GPIO_90, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_out_0: vcxo_out_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_91, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_vcxo_out_1: vcxo_out_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_12, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_32k_out_0: 32k_out_0_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_21, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_32k_out_1: 32k_out_1_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_31, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_32k_out_2: 32k_out_2_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(GPIO_28, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ >; ++ }; ++ ++ pinctrl_pri: pri_grp { ++ pinctrl-single,pins =< ++ K1X_PADCONF(PRI_TDI, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TMS, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TCK, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1X_PADCONF(PRI_TDO, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ >; ++ }; ++}; ++ +diff --git a/arch/riscv/boot/dts/thead/Makefile b/arch/riscv/boot/dts/thead/Makefile +index b55a17127c2b..3e6311bc9976 100644 +--- a/arch/riscv/boot/dts/thead/Makefile ++++ b/arch/riscv/boot/dts/thead/Makefile +@@ -1,2 +1,3 @@ + # SPDX-License-Identifier: GPL-2.0 +-dtb-$(CONFIG_ARCH_THEAD) += th1520-lichee-pi-4a.dtb th1520-beaglev-ahead.dtb ++dtb-$(CONFIG_ARCH_XUANTIE) += th1520-lichee-pi-4a.dtb th1520-beaglev-ahead.dtb th1520-lichee-pi-4a-16g.dtb ++dtb-$(CONFIG_ARCH_XUANTIE) += th1520-lpi4a-dsi0.dtb th1520-lpi4a-hx8279.dtb +diff --git a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts +index 70e8042c8304..6d03eb1d7318 100644 +--- a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts ++++ b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts +@@ -7,23 +7,13 @@ + /dts-v1/; + + #include "th1520.dtsi" ++#include ++#include + + / { + model = "BeagleV Ahead"; + compatible = "beagle,beaglev-ahead", "thead,th1520"; + +- aliases { +- gpio0 = &gpio0; +- gpio1 = &gpio1; +- gpio2 = &gpio2; +- gpio3 = &gpio3; +- serial0 = &uart0; +- serial1 = &uart1; +- serial2 = &uart2; +- serial3 = &uart3; +- serial4 = &uart4; +- serial5 = &uart5; +- }; + + chosen { + stdout-path = "serial0:115200n8"; +@@ -31,8 +21,43 @@ chosen { + + memory@0 { + device_type = "memory"; +- reg = <0x0 0x00000000 0x1 0x00000000>; ++ reg = <0x0 0x200000 0x0 0xffe00000>; ++ }; ++ ++ leds { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&led_pins>; ++ compatible = "gpio-leds"; ++ ++ led-1 { ++ gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; ++ color = ; ++ label = "led1"; ++ }; ++ ++ led-2 { ++ gpios = <&gpio4 9 GPIO_ACTIVE_LOW>; ++ color = ; ++ label = "led2"; ++ }; ++ ++ led-3 { ++ gpios = <&gpio4 10 GPIO_ACTIVE_LOW>; ++ color = ; ++ label = "led3"; ++ }; + ++ led-4 { ++ gpios = <&gpio4 11 GPIO_ACTIVE_LOW>; ++ color = ; ++ label = "led4"; ++ }; ++ ++ led-5 { ++ gpios = <&gpio4 12 GPIO_ACTIVE_LOW>; ++ color = ; ++ label = "led5"; ++ }; + }; + }; + +@@ -44,10 +69,22 @@ &osc_32k { + clock-frequency = <32768>; + }; + ++&rc_24m { ++ clock-frequency = <24000000>; ++}; ++ ++&aonsys_clk { ++ clock-frequency = <73728000>; ++}; ++ + &apb_clk { + clock-frequency = <62500000>; + }; + ++&sdhci_clk { ++ clock-frequency = <198000000>; ++}; ++ + &uart_sclk { + clock-frequency = <100000000>; + }; +@@ -56,6 +93,165 @@ &dmac0 { + status = "okay"; + }; + ++&gmac_clk { ++ clock-frequency = <500000000>; ++}; ++ ++&gmac_axi_clk { ++ clock-frequency = <100000000>; ++}; ++ ++&emmc { ++ bus-width = <8>; ++ max-frequency = <198000000>; ++ mmc-hs400-1_8v; ++ non-removable; ++ no-sdio; ++ no-sd; ++ status = "okay"; ++}; ++ ++&gmac0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gmac0_pins>; ++ phy-mode = "rgmii-id"; ++ status = "okay"; ++}; ++ ++&mdio0 { ++ phy0: ethernet-phy@1 { ++ reg = <1>; ++ }; ++}; ++ ++&padctrl_aosys { ++ led_pins: led-0 { ++ led-pins { ++ pins = "AUDIO_PA8", /* GPIO4_8 */ ++ "AUDIO_PA9", /* GPIO4_9 */ ++ "AUDIO_PA10", /* GPIO4_10 */ ++ "AUDIO_PA11", /* GPIO4_11 */ ++ "AUDIO_PA12"; /* GPIO4_12 */ ++ function = "gpio"; ++ bias-disable; ++ drive-strength = <3>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++}; ++ ++&padctrl0_apsys { ++ gmac0_pins: gmac0-0 { ++ tx-pins { ++ pins = "GMAC0_TX_CLK", ++ "GMAC0_TXEN", ++ "GMAC0_TXD0", ++ "GMAC0_TXD1", ++ "GMAC0_TXD2", ++ "GMAC0_TXD3"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <25>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "GMAC0_RX_CLK", ++ "GMAC0_RXDV", ++ "GMAC0_RXD0", ++ "GMAC0_RXD1", ++ "GMAC0_RXD2", ++ "GMAC0_RXD3"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ mdc-pins { ++ pins = "GMAC0_MDC"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <13>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ mdio-pins { ++ pins = "GMAC0_MDIO"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <13>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ sdio0_pins: sdio0-0 { ++ detn-pins { ++ pins = "SDIO0_DETN"; ++ function = "sdio"; ++ bias-disable; /* external pull-up */ ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ uart0_pins: uart0-0 { ++ tx-pins { ++ pins = "UART0_TXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <3>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "UART0_RXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++}; ++ ++&emmc { ++ bus-width = <8>; ++ max-frequency = <198000000>; ++ mmc-hs400-1_8v; ++ non-removable; ++ no-sdio; ++ no-sd; ++ status = "okay"; ++}; ++ ++&sdio0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&sdio0_pins>; ++ bus-width = <4>; ++ max-frequency = <198000000>; ++ wprtn_ignore; ++ no-sdio; ++ status = "okay"; ++}; ++ + &uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins>; + status = "okay"; + }; +diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi +index a802ab110429..aa98efe6f62b 100644 +--- a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi ++++ b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi +@@ -13,10 +13,186 @@ / { + + memory@0 { + device_type = "memory"; +- reg = <0x0 0x00000000 0x2 0x00000000>; ++ reg = <0x0 0x200000 0x1 0xffe00000>; + }; + }; + ++&cpus { ++ c910_0: cpu@0 { ++ dvdd-supply = <&dvdd_cpu_reg>; ++ dvddm-supply = <&dvddm_cpu_reg>; ++ ++ operating-points = < ++ /* kHz uV */ ++ 300000 600000 ++ 400000 700000 ++ 500000 700000 ++ 600000 700000 ++ 702000 700000 ++ 800000 700000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ th1520,dvddm-operating-points = < ++ /* kHz uV */ ++ 300000 800000 ++ 400000 800000 ++ 500000 800000 ++ 600000 800000 ++ 702000 800000 ++ 800000 800000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ }; ++ c910_1: cpu@1 { ++ dvdd-supply = <&dvdd_cpu_reg>; ++ dvddm-supply = <&dvddm_cpu_reg>; ++ ++ operating-points = < ++ /* kHz uV */ ++ 300000 600000 ++ 400000 700000 ++ 500000 700000 ++ 600000 700000 ++ 702000 700000 ++ 800000 700000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ th1520,dvddm-operating-points = < ++ /* kHz uV */ ++ 300000 800000 ++ 400000 800000 ++ 500000 800000 ++ 600000 800000 ++ 702000 800000 ++ 800000 800000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ }; ++ c910_2: cpu@2 { ++ dvdd-supply = <&dvdd_cpu_reg>; ++ dvddm-supply = <&dvddm_cpu_reg>; ++ ++ operating-points = < ++ /* kHz uV */ ++ 300000 600000 ++ 400000 700000 ++ 500000 700000 ++ 600000 700000 ++ 702000 700000 ++ 800000 700000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ th1520,dvddm-operating-points = < ++ /* kHz uV */ ++ 300000 800000 ++ 400000 800000 ++ 500000 800000 ++ 600000 800000 ++ 702000 800000 ++ 800000 800000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ }; ++ c910_3: cpu@3 { ++ dvdd-supply = <&dvdd_cpu_reg>; ++ dvddm-supply = <&dvddm_cpu_reg>; ++ ++ operating-points = < ++ /* kHz uV */ ++ 300000 600000 ++ 400000 700000 ++ 500000 700000 ++ 600000 700000 ++ 702000 700000 ++ 800000 700000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ th1520,dvddm-operating-points = < ++ /* kHz uV */ ++ 300000 800000 ++ 400000 800000 ++ 500000 800000 ++ 600000 800000 ++ 702000 800000 ++ 800000 800000 ++ 900000 800000 ++ 1000000 800000 ++ 1104000 800000 ++ 1200000 800000 ++ 1296000 800000 ++ 1404000 800000 ++ 1500000 800000 ++ 1608000 1000000 ++ 1704000 1000000 ++ 1848000 1000000 ++ >; ++ }; ++}; ++ ++ + &osc { + clock-frequency = <24000000>; + }; +@@ -25,14 +201,276 @@ &osc_32k { + clock-frequency = <32768>; + }; + ++&rc_24m { ++ clock-frequency = <24000000>; ++}; ++ ++&aonsys_clk { ++ clock-frequency = <73728000>; ++}; ++ ++&audiosys_clk { ++ clock-frequency = <294912000>; ++}; ++ + &apb_clk { + clock-frequency = <62500000>; + }; + ++&sdhci_clk { ++ clock-frequency = <198000000>; ++}; ++ + &uart_sclk { + clock-frequency = <100000000>; + }; + ++&aogpio { ++ gpio-line-names = "", "", "", ++ "GPIO00", ++ "GPIO04"; ++}; ++ ++&gmac_clk { ++ clock-frequency = <500000000>; ++}; ++ ++&gmac_axi_clk { ++ clock-frequency = <100000000>; ++}; ++ ++&aon { ++ dvdd_cpu_reg: appcpu_dvdd { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "appcpu_dvdd"; ++ regulator-min-microvolt = <300000>; ++ regulator-max-microvolt = <1570000>; ++ regulator-type = "dvdd"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ dvddm_cpu_reg: appcpu_dvddm { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "appcpu_dvddm"; ++ regulator-min-microvolt = <300000>; ++ regulator-max-microvolt = <1570000>; ++ regulator-type = "dvddm"; ++ regulator-dual-rail; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_dvdd18_aon_reg: soc_dvdd18_aon { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd18_aon"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_avdd33_usb3_reg: soc_avdd33_usb3 { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_avdd33_usb3"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_dvdd08_aon_reg: soc_dvdd08_aon { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd08_aon"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_dvdd08_ddr_reg: soc_dvdd08_ddr { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd08_ddr"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_vdd_ddr_1v8_reg: soc_vdd_ddr_1v8 { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_vdd_ddr_1v8"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_vdd_ddr_1v1_reg: soc_vdd_ddr_1v1 { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_vdd_ddr_1v1"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_vdd_ddr_0v6_reg: soc_vdd_ddr_0v6 { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_vdd_ddr_0v6"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_dvdd18_ap_reg: soc_dvdd18_ap { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd18_ap"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_dvdd08_ap_reg: soc_dvdd08_ap { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd08_ap"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_avdd08_mipi_hdmi_reg: soc_avdd08_mipi_hdmi { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_avdd08_mipi_hdmi"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_avdd18_mipi_hdmi_reg: soc_avdd18_mipi_hdmi { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_avdd18_mipi_hdmi"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_vdd33_emmc_reg: soc_vdd33_emmc { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd33_emmc"; ++ regulator-type = "common"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_vdd18_emmc_reg: soc_vdd18_emmc { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd18_emmc"; ++ regulator-type = "gpio"; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ soc_dovdd18_scan_reg: soc_dovdd18_scan { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dovdd18_scan"; ++ regulator-type = "common"; ++ regulator-min-microvolt = <900000>; ++ regulator-max-microvolt = <3600000>; ++ }; ++ ++ soc_dvdd12_scan_reg: soc_dvdd12_scan { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_dvdd12_scan"; ++ regulator-type = "common"; ++ regulator-min-microvolt = <900000>; ++ regulator-max-microvolt = <3600000>; ++ }; ++ ++ soc_avdd28_scan_en_reg: soc_avdd28_scan_en { ++ compatible = "xuantie,th1520-aon-pmic"; ++ regulator-name = "soc_avdd28_scan_en"; ++ regulator-type = "common"; ++ regulator-min-microvolt = <900000>; ++ regulator-max-microvolt = <3600000>; ++ }; ++}; ++ + &dmac0 { + status = "okay"; + }; ++ ++&dmac2 { ++ status = "okay"; ++}; ++ ++&emmc { ++ bus-width = <8>; ++ max-frequency = <198000000>; ++ mmc-hs400-1_8v; ++ non-removable; ++ no-sdio; ++ no-sd; ++ status = "okay"; ++}; ++ ++&padctrl0_apsys { ++ sdio0_pins: sdio0-0 { ++ detn-pins { ++ pins = "SDIO0_DETN"; ++ function = "sdio"; ++ bias-disable; /* external pull-up */ ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++}; ++ ++&sdio0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&sdio0_pins>; ++ bus-width = <4>; ++ max-frequency = <198000000>; ++ no-sdio; ++ status = "okay"; ++}; ++ ++&gpio0 { ++ gpio-line-names = "", "", "", "", "", "", "", "", "", "", ++ "", "", "", "", "", "", "", "", "", "", ++ "", "", "", "", ++ "GPIO07", ++ "GPIO08", ++ "", ++ "GPIO01", ++ "GPIO02"; ++}; ++ ++&gpio1 { ++ gpio-line-names = "", "", "", ++ "GPIO11", ++ "GPIO12", ++ "GPIO13", ++ "GPIO14", ++ "", "", "", "", "", "", "", "", "", "", ++ "", "", "", "", "", ++ "GPIO06"; ++}; ++ ++&gpio2 { ++ gpio-line-names = "GPIO03", ++ "GPIO05"; ++}; ++ ++&gpio3 { ++ gpio-line-names = "", "", ++ "GPIO09", ++ "GPIO10"; ++}; ++ ++&npu { ++ vha_clk_rate = <1000000000>; ++ status = "okay"; ++}; ++ ++&npu_opp_table { ++ opp-1000000000 { ++ opp-suspend; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a-16g.dts b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a-16g.dts +new file mode 100644 +index 000000000000..a3a991baf716 +--- /dev/null ++++ b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a-16g.dts +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* ++ * Copyright (C) 2023 Han Gao ++ */ ++ ++/dts-v1/; ++ ++#include "th1520-lichee-pi-4a.dts" ++ ++/ { ++ model = "Sipeed Lichee Pi 4A 16G"; ++ compatible = "sipeed,lichee-pi-4a", "sipeed,lichee-module-4a", "thead,th1520"; ++ ++ memory@0 { ++ device_type = "memory"; ++ reg = <0x0 0x00000000 0x4 0x00000000>; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts +index 9a3884a73e13..3fd09dc4ba28 100644 +--- a/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts ++++ b/arch/riscv/boot/dts/thead/th1520-lichee-pi-4a.dts +@@ -4,29 +4,1378 @@ + */ + + #include "th1520-lichee-module-4a.dtsi" ++#include ++#include + + / { + model = "Sipeed Lichee Pi 4A"; + compatible = "sipeed,lichee-pi-4a", "sipeed,lichee-module-4a", "thead,th1520"; + + aliases { +- gpio0 = &gpio0; +- gpio1 = &gpio1; +- gpio2 = &gpio2; +- gpio3 = &gpio3; +- serial0 = &uart0; +- serial1 = &uart1; +- serial2 = &uart2; +- serial3 = &uart3; +- serial4 = &uart4; +- serial5 = &uart5; ++ vivcam3 = &vvcam_sensor3; ++ viv_video2 = &video2; ++ viv_video3 = &video3; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; ++ ++ fan: pwm-fan { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&fan_pins>; ++ compatible = "pwm-fan"; ++ #cooling-cells = <2>; ++ pwms = <&pwm 1 10000000 0>; ++ cooling-levels = <0 66 196 255>; ++ }; ++ ++ lcd0_backlight: pwm-backlight@0 { ++ compatible = "pwm-backlight"; ++ pwms = <&pwm 0 5000000 0>; ++ brightness-levels = <0 4 8 16 32 64 128 255>; ++ default-brightness-level = <7>; ++ }; ++ ++ th1520_iopmp: iopmp { ++ compatible = "xuantie,th1520-iopmp"; ++ ++ /* config#1: multiple valid regions */ ++ iopmp_emmc: IOPMP_EMMC { ++ attr = <0xFFFFFFFF>; ++ is_default_region; ++ }; ++ ++ /* config#2: iopmp bypass */ ++ iopmp_sdio0: IOPMP_SDIO0 { ++ bypass_en; ++ }; ++ ++ /* config#3: iopmp default region set */ ++ iopmp_sdio1: IOPMP_SDIO1 { ++ attr = <0xFFFFFFFF>; ++ is_default_region; ++ }; ++ ++ iopmp_usb0: IOPMP_USB0 { ++ attr = <0xFFFFFFFF>; ++ is_default_region; ++ }; ++ ++ iopmp_ao: IOPMP_AO { ++ is_default_region; ++ }; ++ ++ iopmp_aud: IOPMP_AUD { ++ is_default_region; ++ }; ++ ++ iopmp_chip_dbg: IOPMP_CHIP_DBG { ++ is_default_region; ++ }; ++ ++ iopmp_eip120i: IOPMP_EIP120I { ++ is_default_region; ++ }; ++ ++ iopmp_eip120ii: IOPMP_EIP120II { ++ is_default_region; ++ }; ++ ++ iopmp_eip120iii: IOPMP_EIP120III { ++ is_default_region; ++ }; ++ ++ iopmp_isp0: IOPMP_ISP0 { ++ is_default_region; ++ }; ++ ++ iopmp_isp1: IOPMP_ISP1 { ++ is_default_region; ++ }; ++ ++ iopmp_dw200: IOPMP_DW200 { ++ is_default_region; ++ }; ++ ++ iopmp_vipre: IOPMP_VIPRE { ++ is_default_region; ++ }; ++ ++ iopmp_venc: IOPMP_VENC { ++ is_default_region; ++ }; ++ ++ iopmp_vdec: IOPMP_VDEC { ++ is_default_region; ++ }; ++ ++ iopmp_g2d: IOPMP_G2D { ++ is_default_region; ++ }; ++ ++ iopmp_fce: IOPMP_FCE { ++ is_default_region; ++ }; ++ ++ iopmp_npu: IOPMP_NPU { ++ is_default_region; ++ }; ++ ++ iopmp0_dpu: IOPMP0_DPU { ++ bypass_en; ++ }; ++ ++ iopmp1_dpu: IOPMP1_DPU { ++ bypass_en; ++ }; ++ ++ iopmp_gpu: IOPMP_GPU { ++ is_default_region; ++ }; ++ ++ iopmp_gmac1: IOPMP_GMAC1 { ++ is_default_region; ++ }; ++ ++ iopmp_gmac2: IOPMP_GMAC2 { ++ is_default_region; ++ }; ++ ++ iopmp_dmac: IOPMP_DMAC { ++ is_default_region; ++ }; ++ ++ iopmp_tee_dmac: IOPMP_TEE_DMAC { ++ is_default_region; ++ }; ++ ++ iopmp_dsp0: IOPMP_DSP0 { ++ is_default_region; ++ }; ++ ++ iopmp_dsp1: IOPMP_DSP1 { ++ is_default_region; ++ }; ++ ++ iopmp_audio0: IOPMP_AUDIO0 { ++ is_default_region; ++ }; ++ ++ iopmp_audio1: IOPMP_AUDIO1 { ++ is_default_region; ++ }; ++ }; ++ ++ reg_tp0_pwr: regulator-tp0-pwr { ++ compatible = "regulator-fixed"; ++ regulator-name = "tp0-pwr"; ++ regulator-min-microvolt = <2800000>; ++ regulator-max-microvolt = <2800000>; ++ gpio = <&ioexp3 4 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ regulator-always-on; ++ }; ++ ++ reg_hub_1v2: regulator-hub-1v2 { ++ compatible = "regulator-fixed"; ++ regulator-name = "HUB_1V2"; ++ regulator-min-microvolt = <1200000>; ++ regulator-max-microvolt = <1200000>; ++ gpio = <&ioexp3 2 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ regulator-always-on; ++ }; ++ ++ reg_hub_5v: regulator-hub-5v { ++ compatible = "regulator-fixed"; ++ regulator-name = "HUB_5V"; ++ regulator-min-microvolt = <5000000>; ++ regulator-max-microvolt = <5000000>; ++ gpio = <&ioexp3 3 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ regulator-always-on; ++ }; ++ ++ reg_vcc5v_usb: regulator-vcc5v-usb { ++ compatible = "regulator-fixed"; ++ regulator-name = "VCC5V_USB"; ++ regulator-min-microvolt = <5000000>; ++ regulator-max-microvolt = <5000000>; ++ gpio = <&gpio1 22 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ regulator-always-on; ++ }; ++ ++ reg_vdd33_lcd0: regulator-vdd33-lcd0 { ++ compatible = "regulator-fixed"; ++ regulator-name = "lcd0_vdd33"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpio = <&ioexp3 5 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ reg_vdd18_lcd0: regulator-vdd18-lcd0 { ++ compatible = "regulator-fixed"; ++ regulator-name = "lcd0_vdd18"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ gpio = <&ioexp3 6 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ reg_vdd_3v3: regulator-vdd-3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "vdd_3v3"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ gpio = <&gpio1 24 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ regulator-boot-on; ++ regulator-always-on; ++ }; ++ ++ reg_vref_1v8: regulator-adc-verf { ++ compatible = "regulator-fixed"; ++ regulator-name = "vref-1v8"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ regulator-alaways-on; ++ vin-supply = <®_vdd_3v3>; ++ }; ++ ++ reg_aud_3v3: regulator-aud-3v3 { ++ compatible = "regulator-fixed"; ++ regulator-name = "aud_3v3"; ++ regulator-min-microvolt = <3300000>; ++ regulator-max-microvolt = <3300000>; ++ enable-active-high; ++ regulator-always-on; ++ }; ++ ++ reg_aud_1v8: regulator-aud-1v8 { ++ compatible = "regulator-fixed"; ++ regulator-name = "aud_1v8"; ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ enable-active-high; ++ regulator-always-on; ++ }; ++ ++ reg_cam0_dvdd12: regulator-cam0-dvdd12 { ++ compatible = "regulator-fixed"; ++ regulator-name = "dvdd12_cam0"; ++ regulator-min-microvolt = <1200000>; ++ regulator-max-microvolt = <1200000>; ++ gpio = <&ioexp1 0 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ reg_cam0_avdd28: regulator-cam0-avdd28 { ++ compatible = "regulator-fixed"; ++ regulator-name = "avdd28_cam0"; ++ regulator-min-microvolt = <2800000>; ++ regulator-max-microvolt = <2800000>; ++ gpio = <&ioexp1 1 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ reg_cam0_dovdd18: regulator-cam0-dovdd18 { ++ compatible = "regulator-fixed"; ++ regulator-name = "dovdd18_cam0"; ++ regulator-min-microvolt = <2800000>; ++ regulator-max-microvolt = <2800000>; ++ gpio = <&ioexp1 2 GPIO_ACTIVE_HIGH>; ++ enable-active-high; ++ }; ++ ++ mbox_910t_client2: mbox_910t_client2 { ++ compatible = "xuantie,th1520-mbox-client"; ++ mbox-names = "906"; ++ mboxes = <&mbox_910t 2 0>; ++ audio-mbox-regmap = <&audio_mbox>; ++ status = "okay"; ++ }; ++ ++ th1520_rpmsg: th1520_rpmsg { ++ compatible = "th1520,rpmsg-bus", "simple-bus"; ++ memory-region = <&rpmsgmem>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ rpmsg: rpmsg{ ++ vdev-nums = <1>; ++ reg = <0x0 0x1E000000 0 0x10000>; ++ compatible = "th1520,th1520-rpmsg"; ++ log-memory-region = <&audio_log_mem>; ++ audio-text-memory-region = <&audio_text_mem>; ++ status = "okay"; ++ }; ++ }; ++ ++ hdmi_codec: hdmi_codec@1 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-hdmi-pcm"; ++ status = "okay"; ++ sound-name-prefix = "DUMMY"; ++ }; ++ ++ th1520_sound: soundcard@1 { ++ compatible = "simple-audio-card"; ++ simple-audio-card,name = "TH1520-Sound-Card"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ simple-audio-card,dai-link@0 { ++ reg = <0>; ++ format = "i2s"; ++ cpu { ++ sound-dai = <&ap_i2s 1>; ++ }; ++ codec { ++ sound-dai = <&hdmi_codec>; ++ }; ++ }; ++ simple-audio-card,dai-link@1 { ++ reg = <1>; ++ format = "i2s"; ++ cpu { ++ sound-dai = <&i2s1 0>; ++ }; ++ codec { ++ sound-dai = <&es7210_audio_codec>; ++ }; ++ }; ++ simple-audio-card,dai-link@2 { ++ reg = <2>; ++ format = "i2s"; ++ cpu { ++ sound-dai = <&i2s1 0>; ++ }; ++ codec { ++ sound-dai = <&es8156_audio_codec>; ++ }; ++ }; ++ }; ++ ++ usb-hub { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ hub_2_0: hub@1 { ++ compatible = "usb2109,2817"; ++ reg = <1>; ++ peer-hub = <&hub_3_0>; ++ vdd-supply = <®_hub_1v2>; ++ vbus-supply = <®_vcc5v_usb>; ++ }; ++ ++ hub_3_0: hub@2 { ++ compatible = "usb2109,817"; ++ reg = <2>; ++ peer-hub = <&hub_2_0>; ++ vbus-supply = <®_vcc5v_usb>; ++ vdd-supply = <®_vcc5v_usb>; ++ }; ++ }; ++ ++ wcn_wifi: wireless-wlan { ++ compatible = "wlan-platdata"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_wifi_wake>; ++ WIFI,poweren-gpios = <&ioexp2 4 0>; ++ power_on_after_init; ++ power_on_when_resume; ++ status = "okay"; ++ }; ++ ++ wcn_bt: wireless-bluetooth { ++ compatible = "bluetooth-platdata"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&pinctrl_bt_wake>; ++ BT,power-gpios = <&ioexp2 5 0>; ++ status = "okay"; ++ }; ++ ++ gpio-keys { ++ compatible = "gpio-keys"; ++ key-volumedown { ++ label = "Volume Down Key"; ++ linux,code = ; ++ debounce-interval = <1>; ++ gpios = <&gpio1 19 GPIO_ACTIVE_LOW>; ++ }; ++ key-volumeup { ++ label = "Volume Up Key"; ++ linux,code = ; ++ debounce-interval = <1>; ++ gpios = <&gpio1 0 GPIO_ACTIVE_LOW>; ++ }; ++ key-sleep { ++ label = "Sleep Wake Key"; ++ wakeup-source; ++ linux,code = ; ++ debounce-interval = <1>; ++ gpios = <&aogpio 2 GPIO_ACTIVE_LOW>; ++ }; ++ }; ++ ++ thermal-zones { ++ cpu-thermal { ++ sustainable-power = <1600>; ++ ++ trips { ++ trip_active0: active-0 { ++ temperature = <39000>; ++ hysteresis = <5000>; ++ type = "active"; ++ }; ++ ++ trip_active1: active-1 { ++ temperature = <50000>; ++ hysteresis = <5000>; ++ type = "active"; ++ }; ++ ++ trip_active2: active-2 { ++ temperature = <60000>; ++ hysteresis = <5000>; ++ type = "active"; ++ }; ++ }; ++ ++ cooling-maps { ++ map-active-0 { ++ cooling-device = <&fan 1 1>; ++ trip = <&trip_active0>; ++ }; ++ ++ map-active-1 { ++ cooling-device = <&fan 2 2>; ++ trip = <&trip_active1>; ++ }; ++ ++ map-active-2 { ++ cooling-device = <&fan 3 3>; ++ trip = <&trip_active2>; ++ }; ++ }; ++ }; ++ ++ dev-thermal { ++ sustainable-power = <3000>; ++ }; ++ }; ++ ++ soc { ++ compatible = "simple-bus"; ++ interrupt-parent = <&plic>; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ dma-noncoherent; ++ ranges; ++ ++ ++ vvcam_sensor3: vvcam_sensor@3 {//cam3 csi0 modified ++ sensor_name = "OV5693"; ++ compatible = "xuantie,th1520-vvcam-sensor"; ++ sensor_pdn = <&gpio1 28 0>; //powerdown pin / shutdown pin ++ sensor_rst = <&ioexp2 1 0>; ++ sensor_pdn_delay_us = <4000>; //powerdown pin / shutdown pin activated till I2C ready ++ sensor_regulators = "DOVDD18", "DVDD12", "AVDD28"; ++ sensor_regulator_timing_us = <70 50 20>; ++ DOVDD18-supply = <®_cam0_dovdd18>; ++ DVDD12-supply = <®_cam0_dvdd12>; ++ AVDD28-supply = <®_cam0_avdd28>; ++ i2c_reg_width = /bits/ 8 <2>; ++ i2c_data_width = /bits/ 8 <1>; ++ i2c_addr = /bits/ 8 <0x36>; ++ i2c_bus = /bits/ 8 <0>; ++ status = "okay"; ++ }; ++ ++ video2: cam_dev@300 { ++ compatible = "xuantie,th1520-video"; ++ vi_mem_pool_region = <0>; // vi_mem: framebuffer, region[0] ++ status = "okay"; ++ channel0 { ++ channel_id = <0>; ++ status = "okay"; ++ sensor1 { ++ subdev_name = "vivcam"; ++ idx = <3>; ++ csi_idx = <0>; ++ mode_idx = <1>; ++ path_type = "SENSOR_2592x1944_LINER"; ++ }; ++ dma { ++ subdev_name = "vipre"; ++ idx = <0>; ++ path_type = "VIPRE_CSI0_ISP1"; ++ }; ++ isp { ++ subdev_name = "isp"; ++ idx = <1>; ++ path_type = "ISP_MI_PATH_MP"; ++ output { ++ max_width = <2600>; ++ max_height = <2000>; ++ bit_per_pixel = <12>; ++ frame_count = <3>; ++ }; ++ }; ++ }; ++ channel1 { ++ sensor1 { ++ subdev_name = "vivcam"; ++ idx = <3>; ++ csi_idx = <0>; ++ mode_idx = <1>; ++ path_type = "SENSOR_2592x1944_LINER"; ++ }; ++ dma { ++ subdev_name = "vipre"; ++ idx = <0>; ++ path_type = "VIPRE_CSI0_ISP1"; ++ }; ++ isp { ++ subdev_name = "isp"; ++ idx = <1>; ++ path_type = "ISP_MI_PATH_SP"; ++ output { ++ max_width = <2600>; ++ max_height = <2000>; ++ bit_per_pixel = <12>; ++ frame_count = <3>; ++ }; ++ }; ++ }; ++ channel2 { ++ sensor1 { ++ subdev_name = "vivcam"; ++ idx = <3>; ++ csi_idx = <0>; ++ mode_idx = <1>; ++ path_type = "SENSOR_2592x1944_LINER"; ++ }; ++ dma { ++ subdev_name = "vipre"; ++ idx = <0>; ++ path_type = "VIPRE_CSI0_ISP1"; ++ }; ++ isp { ++ subdev_name = "isp"; ++ idx = <1>; ++ path_type = "ISP_MI_PATH_SP2_BP"; ++ output { ++ max_width = <2600>; ++ max_height = <2000>; ++ bit_per_pixel = <12>; ++ frame_count = <3>; ++ }; ++ }; ++ }; ++ }; ++ ++ video3: cam_dev@400{ ++ compatible = "xuantie,th1520-video"; ++ vi_mem_pool_region = <0>; // vi_mem: framebuffer, region[0] ++ status = "okay"; ++ channel0 { ++ sensor1 { ++ subdev_name = "vivcam"; ++ idx = <3>; ++ csi_idx = <0>; ++ mode_idx = <1>; ++ path_type = "SENSOR_2592x1944_LINER"; ++ }; ++ dma { ++ subdev_name = "vipre"; ++ idx = <0>; ++ path_type = "VIPRE_CSI0_ISP1"; ++ ++ }; ++ isp { ++ subdev_name = "isp"; ++ idx = <1>; ++ path_type = "ISP_MI_PATH_MP"; ++ output { ++ max_width = <2600>; ++ max_height = <2000>; ++ bit_per_pixel = <12>; ++ frame_count = <3>; ++ }; ++ }; ++ dw { ++ subdev_name = "dw"; ++ idx = <0>; ++ path_type = "DW_DWE_VSE0"; ++ dw_dst_depth = <2>; ++ }; ++ }; ++ channel1 { ++ sensor1 { ++ subdev_name = "vivcam"; ++ idx = <3>; ++ csi_idx = <0>; ++ mode_idx = <1>; ++ path_type = "SENSOR_2592x1944_LINER"; ++ }; ++ dma { ++ subdev_name = "vipre"; ++ idx = <0>; ++ path_type = "VIPRE_CSI0_ISP1"; ++ ++ }; ++ isp { ++ subdev_name = "isp"; ++ idx = <1>; ++ path_type = "ISP_MI_PATH_MP"; ++ output { ++ max_width = <2600>; ++ max_height = <2000>; ++ bit_per_pixel = <12>; ++ frame_count = <3>; ++ }; ++ }; ++ dw { ++ subdev_name = "dw"; ++ idx = <0>; ++ path_type = "DW_DWE_VSE1"; ++ dw_dst_depth = <2>; ++ }; ++ }; ++ channel2 { ++ sensor1 { ++ subdev_name = "vivcam"; ++ idx = <3>; ++ csi_idx = <0>; ++ mode_idx = <1>; ++ path_type = "SENSOR_2592x1944_LINER"; ++ }; ++ dma { ++ subdev_name = "vipre"; ++ idx = <0>; ++ path_type = "VIPRE_CSI0_ISP1"; ++ ++ }; ++ isp { ++ subdev_name = "isp"; ++ idx = <1>; ++ path_type = "ISP_MI_PATH_MP"; ++ output { ++ max_width = <2600>; ++ max_height = <2000>; ++ bit_per_pixel = <12>; ++ frame_count = <3>; ++ }; ++ }; ++ dw { ++ subdev_name = "dw"; ++ idx = <0>; ++ path_type = "DW_DWE_VSE2"; ++ dw_dst_depth = <2>; ++ }; ++ }; ++ }; ++ }; ++ ++}; ++ ++&gmac0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gmac0_pins>; ++ phy-handle = <&phy0>; ++ phy-mode = "rgmii-id"; ++ status = "okay"; ++}; ++ ++&gmac1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gmac1_pins>; ++ phy-handle = <&phy1>; ++ phy-mode = "rgmii-id"; ++ status = "okay"; ++}; ++ ++&i2c0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c0_pins>; ++ clock-frequency = <100000>; ++ i2c-sda-hold-time-ns = <300>; ++ i2c-sda-falling-time-ns = <510>; ++ i2c-scl-falling-time-ns = <510>; ++ status = "okay"; ++ ++ ioexp1: gpio@18 { ++ compatible = "nxp,pca9557"; ++ reg = <0x18>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ gpio-line-names = "cam0_dvdd12", ++ "cam0_avdd28", ++ "cam0_dovdd18"; ++ }; ++}; ++ ++&i2c1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c1_pins>; ++ clock-frequency = <100000>; ++ i2c-sda-hold-time-ns = <300>; ++ i2c-sda-falling-time-ns = <510>; ++ i2c-scl-falling-time-ns = <510>; ++ status = "okay"; ++ ++ ioexp2: gpio@18 { ++ compatible = "nxp,pca9557"; ++ reg = <0x18>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ gpio-line-names = "", ++ "cam0_reset", ++ "cam1_reset", ++ "cam2_reset", ++ "wl_host_wake", ++ "bt_resetn", ++ "", ++ "bt_host_wake"; ++ }; ++}; ++ ++&i2c3 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c3_pins>; ++ clock-frequency = <100000>; ++ i2c-sda-hold-time-ns = <300>; ++ i2c-sda-falling-time-ns = <510>; ++ i2c-scl-falling-time-ns = <510>; ++ status = "okay"; ++ ++ ioexp3: gpio@18 { ++ compatible = "nxp,pca9557"; ++ reg = <0x18>; ++ gpio-controller; ++ #gpio-cells = <2>; ++ gpio-line-names = "tp0_rst", ++ "", ++ "", ++ "vcc5v_usb", ++ "vdd28_tp0", ++ "vdd33_lcd0", ++ "vdd18_lcd0", ++ "lcd0_reset"; ++ }; ++}; ++ ++&mdio0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&mdio0_pins>; ++ ++ phy0: ethernet-phy@1 { ++ reg = <1>; ++ }; ++ ++ phy1: ethernet-phy@2 { ++ reg = <2>; ++ }; ++}; ++ ++&padctrl0_apsys { ++ fan_pins: fan-0 { ++ pwm1-pins { ++ pins = "GPIO3_3"; /* PWM1 */ ++ function = "pwm"; ++ bias-disable; ++ drive-strength = <25>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ gmac0_pins: gmac0-0 { ++ tx-pins { ++ pins = "GMAC0_TX_CLK", ++ "GMAC0_TXEN", ++ "GMAC0_TXD0", ++ "GMAC0_TXD1", ++ "GMAC0_TXD2", ++ "GMAC0_TXD3"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <25>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "GMAC0_RX_CLK", ++ "GMAC0_RXDV", ++ "GMAC0_RXD0", ++ "GMAC0_RXD1", ++ "GMAC0_RXD2", ++ "GMAC0_RXD3"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ gmac1_pins: gmac1-0 { ++ tx-pins { ++ pins = "GPIO2_18", /* GMAC1_TX_CLK */ ++ "GPIO2_20", /* GMAC1_TXEN */ ++ "GPIO2_21", /* GMAC1_TXD0 */ ++ "GPIO2_22", /* GMAC1_TXD1 */ ++ "GPIO2_23", /* GMAC1_TXD2 */ ++ "GPIO2_24"; /* GMAC1_TXD3 */ ++ function = "gmac1"; ++ bias-disable; ++ drive-strength = <25>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "GPIO2_19", /* GMAC1_RX_CLK */ ++ "GPIO2_25", /* GMAC1_RXDV */ ++ "GPIO2_30", /* GMAC1_RXD0 */ ++ "GPIO2_31", /* GMAC1_RXD1 */ ++ "GPIO3_0", /* GMAC1_RXD2 */ ++ "GPIO3_1"; /* GMAC1_RXD3 */ ++ function = "gmac1"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ i2c3_pins: i2c3-0 { ++ i2c-pins { ++ pins = "I2C3_SCL", "I2C3_SDA"; ++ function = "i2c"; ++ bias-disable; ++ drive-strength = <7>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ mdio0_pins: mdio0-0 { ++ mdc-pins { ++ pins = "GMAC0_MDC"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <13>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ mdio-pins { ++ pins = "GMAC0_MDIO"; ++ function = "gmac0"; ++ bias-disable; ++ drive-strength = <13>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ uart0_pins: uart0-0 { ++ tx-pins { ++ pins = "UART0_TXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <3>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "UART0_RXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ spi_pins: spi-0 { ++ spi-pins { ++ pins = "SPI_SCLK", "SPI_MOSI", "SPI_MISO"; ++ function = "spi"; ++ bias-disable; ++ drive-strength = <7>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ hdmi_tx_pins: hdmi-tx-0 { ++ hdmi-pins { ++ pins = "HDMI_SCL", "HDMI_SDA", "HDMI_CEC"; ++ function = "hdmi"; ++ bias-disable; ++ drive-strength = <3>; ++ input-enable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++}; ++ ++&padctrl1_apsys { ++ i2c0_pins: i2c0-0 { ++ i2c-pins { ++ pins = "I2C0_SCL", "I2C0_SDA"; ++ function = "i2c"; ++ bias-disable; ++ drive-strength = <7>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ i2c1_pins: i2c1-0 { ++ i2c-pins { ++ pins = "I2C1_SCL", "I2C1_SDA"; ++ function = "i2c"; ++ bias-disable; ++ drive-strength = <7>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ qspi1_pins: qspi1-0 { ++ qspi-pins { ++ pins = "QSPI1_SCLK", "QSPI1_D0_MOSI", "QSPI1_D1_MISO"; ++ function = "qspi"; ++ bias-disable; ++ drive-strength = <7>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ uart4_pins: uart4-0 { ++ tx-pins { ++ pins = "UART4_TXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <3>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ ++ rx-pins { ++ pins = "UART4_RXD"; ++ function = "uart"; ++ bias-disable; ++ drive-strength = <1>; ++ input-enable; ++ input-schmitt-enable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ pinctrl_wifi_wake: wifi_grp { ++ wifi-pins { ++ pins = "GPIO0_27"; ++ function = "gpio"; ++ bias-disable; ++ drive-strength = <7>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++ ++ pinctrl_bt_wake: bt_grp { ++ bt-pins { ++ pins = "GPIO0_28"; ++ function = "gpio"; ++ bias-disable; ++ drive-strength = <7>; ++ input-disable; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++}; ++ ++&padctrl_aosys { ++ i2s1_pa_pins: i2s1-pa-0 { ++ i2s-pa-pins { ++ pins = "AUDIO_PA14", "AUDIO_PA15", "AUDIO_PA16", "AUDIO_PA17"; ++ function = "audio"; ++ bias-disable; ++ drive-strength = <0>; ++ slew-rate = <0>; ++ }; ++ }; ++ aud_i2c0_pa_pins: aud-i2c0-pa-0 { ++ aud-i2c-pa-pins { ++ pins = "AUDIO_PA29", "AUDIO_PA30"; ++ function = "audio"; ++ bias-disable; ++ drive-strength = <0>; ++ slew-rate = <0>; ++ }; ++ }; ++}; ++ ++&padctrl_audiosys { ++ aud_i2c0_pins: aud-i2c0-0 { ++ i2c-pins { ++ pins = "PA29_FUNC", "PA30_FUNC"; ++ function = "aud_i2c0"; ++ bias-disable; ++ drive-strength = <7>; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; ++ i2s1_pins: i2s1-0 { ++ i2s-pins { ++ pins = "PA14_FUNC", "PA15_FUNC", "PA16_FUNC", "PA17_FUNC"; ++ function = "aud_i2s1"; ++ bias-disable; ++ drive-strength = <13>; ++ input-schmitt-disable; ++ slew-rate = <0>; ++ }; ++ }; + }; + + &uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins>; ++ status = "okay"; ++}; ++ ++&uart4 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart4_pins>; ++ status = "okay"; ++}; ++ ++&usb { ++ status = "okay"; ++ hubswitch-gpio = <&aogpio 4 0>; ++}; ++ ++&usb_dwc3 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++}; ++ ++&adc { ++ vref-supply = <®_vref_1v8>; ++ #io-channel-cells = <1>; ++ status = "okay"; ++}; ++ ++&spi { ++ num-cs = <1>; ++ cs-gpios = <&gpio2 15 0>; ++ rx-sample-delay-ns = <10>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi_pins>; ++ status = "okay"; ++ ++ spi_norflash@0 { ++ status = "okay"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "winbond,w25q64jwm", "jedec,spi-nor"; ++ reg = <0>; ++ spi-max-frequency = <50000000>; ++ w25q,fast-read; ++ }; ++}; ++ ++&qspi1 { ++ // use one-line mode ++ compatible = "snps,dw-apb-ssi"; ++ num-cs = <1>; ++ cs-gpios = <&gpio0 1 0>; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&qspi1_pins>; ++ status = "okay"; ++ ++ spidev@0 { ++ compatible = "spidev"; ++ #address-cells = <0x1>; ++ #size-cells = <0x1>; ++ reg = <0x0>; ++ spi-max-frequency = <50000000>; ++ }; ++}; ++ ++&sdio1 { ++ max-frequency = <100000000>; ++ bus-width = <4>; ++ pull_up; ++ no-sd; ++ no-mmc; ++ broken-cd; ++ io_fixed_1v8; ++ post-power-on-delay-ms = <50>; ++ wprtn_ignore; ++ cap-sd-highspeed; ++ wakeup-source; ++ keep-power-in-suspend; ++ status = "okay"; ++}; ++ ++&aon { ++ log-memory-region = <&aon_log_mem>; ++ status = "okay"; ++}; ++ ++&resmem { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ /* global autoconfigured region for contiguous allocations */ ++ cmamem: linux,cma { ++ compatible = "shared-dma-pool"; ++ reusable; ++ size = <0 0x30000000>; // 768MB on lpi4a (SOM) ++ alloc-ranges = <0 0xc8000000 0 0x30000000>; // [0x0C800_0000 ~ 0x0F800_0000] linux,cma-default; ++ linux,cma-default; ++ }; ++ dsp0_mem: memory@20000000 { /**0x2000_0000~0x2040_0000 4M**/ ++ reg = <0x0 0x20000000 0x0 0x00280000 /* DSP FW code&data section 2.5M*/ ++ 0x0 0x20280000 0x0 0x00001000 /* DSP communication area 4K*/ ++ 0x0 0x20281000 0x0 0x00007000 /* Panic/log page 28K */ ++ 0x0 0x20288000 0x0 0x00178000>; /* DSP shared memory 1.5M-32K*/ ++ }; ++ dsp1_mem: memory@20400000 { /**0x2040_0000~0x2080_0000 4M**/ ++ reg = <0x0 0x20400000 0x0 0x00280000 /* DSP FW code&data section */ ++ 0x0 0x20680000 0x0 0x00001000 /* DSP communication area */ ++ 0x0 0x20681000 0x0 0x00007000 /* Panic/log page*/ ++ 0x0 0x20688000 0x0 0x00178000>; /* DSP shared memory */ ++ }; ++ vi_mem: framebuffer@10000000 { ++ reg = <0x0 0x10000000 0x0 0x6700000>; /* vi_mem_pool_region[0] 44 MB (default) */ ++ //0x0 0x12C00000 0x0 0x01D00000 /* vi_mem_pool_region[1] 29 MB */ ++ //0x0 0x14900000 0x0 0x01E00000>; /* vi_mem_pool_region[2] 30 MB */ ++ }; ++ ++ audio_text_mem: memory@32000000 { ++ reg = <0x0 0x32000000 0x0 0xE00000>; ++ //no-map; ++ }; ++ audio_data_mem: memory@32E00000 { ++ reg = <0x0 0x32E00000 0x0 0x600000>; ++ //no-map; ++ }; ++ audio_log_mem: memory@33400000 { ++ reg = <0x0 0x33400000 0x0 0x200000>; ++ }; ++ //Note: with "no-map" reserv mem not saved in hibernation ++ rpmsgmem: memory@1E000000 { ++ reg = <0x0 0x1E000000 0x0 0x10000>; ++ }; ++ aon_log_mem: memory@33600000 { ++ reg = <0x0 0x33600000 0x0 0x200000>; ++ }; ++ regdump_mem: memory@38400000 { ++ reg = <0x0 0x38400000 0x0 0x1400000>; ++ no-map; ++ }; ++}; ++ ++®dump { ++ memory-region = <®dump_mem>; ++ status = "okay"; ++}; ++ ++&audio_i2c0 { ++ clock-frequency = <100000>; ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&aud_i2c0_pa_pins>, ++ <&aud_i2c0_pins>; ++ ++ es8156_audio_codec: es8156@8 { ++ #sound-dai-cells = <0>; ++ compatible = "everest,es8156"; ++ reg = <0x08>; ++ sound-name-prefix = "ES8156"; ++ AVDD-supply = <®_aud_3v3>; ++ DVDD-supply = <®_aud_1v8>; ++ PVDD-supply = <®_aud_1v8>; ++ }; ++ ++ es7210_audio_codec: es7210@40 { ++ #sound-dai-cells = <0>; ++ compatible = "MicArray_0"; ++ reg = <0x40>; ++ sound-name-prefix = "ES7210"; ++ MVDD-supply = <®_aud_3v3>; ++ AVDD-supply = <®_aud_3v3>; ++ DVDD-supply = <®_aud_1v8>; ++ PVDD-supply = <®_aud_1v8>; ++ }; ++}; ++ ++&ap_i2s { ++ status = "okay"; ++}; ++ ++&i2s1 { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2s1_pa_pins>, <&i2s1_pins>; ++}; ++ ++&dpu_enc1 { ++ ports { ++ /delete-node/ port@0; ++ }; ++}; ++ ++&disp1_out { ++ remote-endpoint = <&hdmi_tx_in>; ++}; ++ ++&hdmi_tx { ++ status = "okay"; ++ ++ port@0 { ++ /* input */ ++ hdmi_tx_in: endpoint { ++ remote-endpoint = <&disp1_out>; ++ }; ++ }; ++}; ++ ++&isp0 { ++ status = "okay"; ++}; ++ ++&isp1 { ++ status = "okay"; ++}; ++ ++&isp_ry0 { ++ status = "okay"; ++}; ++ ++&dewarp { ++ status = "okay"; ++}; ++ ++&dec400_isp0 { ++ status = "okay"; ++}; ++ ++&dec400_isp1 { ++ status = "okay"; ++}; ++ ++&dec400_isp2 { ++ status = "okay"; ++}; ++ ++&bm_visys { ++ status = "okay"; ++}; ++ ++&bm_csi0 { ++ status = "okay"; ++}; ++ ++&bm_csi1 { ++ status = "okay"; ++}; ++ ++&bm_csi2 { ++ status = "okay"; ++}; ++ ++&vidmem { ++ status = "okay"; ++ memory-region = <&vi_mem>; ++}; ++ ++&vi_pre { ++ status = "okay"; ++}; ++&xtensa_dsp { ++ status = "okay"; ++}; ++ ++&xtensa_dsp0 { ++ status = "okay"; ++ memory-region = <&dsp0_mem>; ++}; ++ ++&xtensa_dsp1 { ++ status = "okay"; ++ memory-region = <&dsp1_mem>; ++}; ++ ++&npu { ++ vha_clk_rate = <1000000000>; ++ status = "okay"; ++}; ++ ++&npu_opp_table { ++ opp-1000000000 { ++ opp-suspend; ++ }; ++}; ++ ++&dpu_enc1 { ++ ports { ++ /delete-node/ port@0; ++ }; ++}; ++ ++&disp1_out { ++ remote-endpoint = <&hdmi_tx_in>; ++}; ++ ++&hdmi_tx { ++ status = "okay"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&hdmi_tx_pins>; ++ ++ port@0 { ++ /* input */ ++ hdmi_tx_in: endpoint { ++ remote-endpoint = <&disp1_out>; ++ }; ++ }; ++}; ++ ++&eip_28 { + status = "okay"; + }; +diff --git a/arch/riscv/boot/dts/thead/th1520-lpi4a-dsi0.dts b/arch/riscv/boot/dts/thead/th1520-lpi4a-dsi0.dts +new file mode 100644 +index 000000000000..bf53fbcbca56 +--- /dev/null ++++ b/arch/riscv/boot/dts/thead/th1520-lpi4a-dsi0.dts +@@ -0,0 +1,63 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* ++ * Copyright (C) 2021-2024 Alibaba Group Holding Limited. ++ */ ++ ++#include "th1520-lichee-pi-4a.dts" ++ ++&dpu_enc0 { ++ status = "okay"; ++ ++ ports { ++ /* output */ ++ port@1 { ++ reg = <1>; ++ ++ enc0_out: endpoint { ++ remote-endpoint = <&dsi0_in>; ++ }; ++ }; ++ }; ++}; ++ ++&dhost_0 { ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ port@0 { ++ reg = <0>; ++ ++ dsi0_in: endpoint { ++ remote-endpoint = <&enc0_out>; ++ }; ++ }; ++ ++ port@1 { ++ reg = <1>; ++ ++ dsi0_out: endpoint { ++ remote-endpoint = <&panel0_in>; ++ }; ++ }; ++ }; ++ ++ panel0@0 { ++ compatible = "jadard,jd9365da-h3"; ++ reg = <0>; ++ backlight = <&lcd0_backlight>; ++ reset-gpio = <&ioexp3 7 0>; /* active low */ ++ hsvcc-supply = <®_vdd18_lcd0>; ++ vspn3v3-supply = <®_vdd33_lcd0>; ++ ++ port { ++ panel0_in: endpoint { ++ remote-endpoint = <&dsi0_out>; ++ }; ++ }; ++ }; ++}; ++ ++&dsi0 { ++ status = "okay"; ++}; +\ No newline at end of file +diff --git a/arch/riscv/boot/dts/thead/th1520-lpi4a-hx8279.dts b/arch/riscv/boot/dts/thead/th1520-lpi4a-hx8279.dts +new file mode 100644 +index 000000000000..44641bb31bbe +--- /dev/null ++++ b/arch/riscv/boot/dts/thead/th1520-lpi4a-hx8279.dts +@@ -0,0 +1,63 @@ ++// SPDX-License-Identifier: (GPL-2.0 OR MIT) ++/* ++ * Copyright (C) 2021-2024 Alibaba Group Holding Limited. ++ */ ++ ++#include "th1520-lichee-pi-4a.dts" ++ ++&dpu_enc0 { ++ status = "okay"; ++ ++ ports { ++ /* output */ ++ port@1 { ++ reg = <1>; ++ ++ enc0_out: endpoint { ++ remote-endpoint = <&dsi0_in>; ++ }; ++ }; ++ }; ++}; ++ ++&dhost_0 { ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ port@0 { ++ reg = <0>; ++ ++ dsi0_in: endpoint { ++ remote-endpoint = <&enc0_out>; ++ }; ++ }; ++ ++ port@1 { ++ reg = <1>; ++ ++ dsi0_out: endpoint { ++ remote-endpoint = <&panel0_in>; ++ }; ++ }; ++ }; ++ ++ panel0@0 { ++ compatible = "himax,hx8279"; ++ reg = <0>; ++ backlight = <&lcd0_backlight>; ++ reset-gpio = <&ioexp3 7 0>; /* active low */ ++ hsvcc-supply = <®_vdd18_lcd0>; ++ vspn3v3-supply = <®_vdd33_lcd0>; ++ ++ port { ++ panel0_in: endpoint { ++ remote-endpoint = <&dsi0_out>; ++ }; ++ }; ++ }; ++}; ++ ++&dsi0 { ++ status = "okay"; ++}; +\ No newline at end of file +diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi +index ff364709a6df..a47bf9f15d9a 100644 +--- a/arch/riscv/boot/dts/thead/th1520.dtsi ++++ b/arch/riscv/boot/dts/thead/th1520.dtsi +@@ -5,12 +5,59 @@ + */ + + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + + / { + compatible = "thead,th1520"; + #address-cells = <2>; + #size-cells = <2>; + ++ aliases { ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ gpio0 = &gpio0; ++ gpio1 = &gpio1; ++ gpio2 = &gpio2; ++ gpio3 = &gpio3; ++ i2c0 = &i2c0; ++ i2c1 = &i2c1; ++ i2c2 = &i2c2; ++ i2c3 = &i2c3; ++ i2c4 = &i2c4; ++ i2c5 = &audio_i2c0; ++ i2c6 = &audio_i2c1; ++ mmc0 = &emmc; ++ mmc1 = &sdio0; ++ mmc2 = &sdio1; ++ serial0 = &uart0; ++ serial1 = &uart1; ++ serial2 = &uart2; ++ serial3 = &uart3; ++ serial4 = &uart4; ++ serial5 = &uart5; ++ spi0 = &spi; ++ spi1 = &qspi0; ++ spi2 = &qspi1; ++ ap_i2s = &ap_i2s; ++ i2s0 = &i2s0; ++ i2s1 = &i2s1; ++ i2s2 = &i2s2; ++ }; ++ ++ system_monitor: system-monitor { ++ compatible = "th1520,system-monitor"; ++ }; ++ + cpus: cpus { + #address-cells = <1>; + #size-cells = <0>; +@@ -29,6 +76,18 @@ c910_0: cpu@0 { + d-cache-sets = <512>; + next-level-cache = <&l2_cache>; + mmu-type = "riscv,sv39"; ++ video-4k-minfreq = <1848000000>; ++ qos-mid-minfreq = <750000000>; ++ #cooling-cells = <2>; ++ dynamic-power-coefficient = <500>; ++ ++ clock-latency = <61036>; ++ clocks = <&clk C910_CCLK>, ++ <&clk C910_CCLK_I0>, ++ <&clk CPU_PLL1_FOUTPOSTDIV>, ++ <&clk CPU_PLL0_FOUTPOSTDIV>; ++ clock-names = "c910_cclk", "c910_cclk_i0", ++ "cpu_pll1_foutpostdiv", "cpu_pll0_foutpostdiv"; + + cpu0_intc: interrupt-controller { + compatible = "riscv,cpu-intc"; +@@ -50,6 +109,16 @@ c910_1: cpu@1 { + d-cache-sets = <512>; + next-level-cache = <&l2_cache>; + mmu-type = "riscv,sv39"; ++ #cooling-cells = <2>; ++ dynamic-power-coefficient = <500>; ++ ++ clock-latency = <61036>; ++ clocks = <&clk C910_CCLK>, ++ <&clk C910_CCLK_I0>, ++ <&clk CPU_PLL1_FOUTPOSTDIV>, ++ <&clk CPU_PLL0_FOUTPOSTDIV>; ++ clock-names = "c910_cclk", "c910_cclk_i0", ++ "cpu_pll1_foutpostdiv", "cpu_pll0_foutpostdiv"; + + cpu1_intc: interrupt-controller { + compatible = "riscv,cpu-intc"; +@@ -71,6 +140,16 @@ c910_2: cpu@2 { + d-cache-sets = <512>; + next-level-cache = <&l2_cache>; + mmu-type = "riscv,sv39"; ++ #cooling-cells = <2>; ++ dynamic-power-coefficient = <500>; ++ ++ clock-latency = <61036>; ++ clocks = <&clk C910_CCLK>, ++ <&clk C910_CCLK_I0>, ++ <&clk CPU_PLL1_FOUTPOSTDIV>, ++ <&clk CPU_PLL0_FOUTPOSTDIV>; ++ clock-names = "c910_cclk", "c910_cclk_i0", ++ "cpu_pll1_foutpostdiv", "cpu_pll0_foutpostdiv"; + + cpu2_intc: interrupt-controller { + compatible = "riscv,cpu-intc"; +@@ -92,6 +171,16 @@ c910_3: cpu@3 { + d-cache-sets = <512>; + next-level-cache = <&l2_cache>; + mmu-type = "riscv,sv39"; ++ #cooling-cells = <2>; ++ dynamic-power-coefficient = <500>; ++ ++ clock-latency = <61036>; ++ clocks = <&clk C910_CCLK>, ++ <&clk C910_CCLK_I0>, ++ <&clk CPU_PLL1_FOUTPOSTDIV>, ++ <&clk CPU_PLL0_FOUTPOSTDIV>; ++ clock-names = "c910_cclk", "c910_cclk_i0", ++ "cpu_pll1_foutpostdiv", "cpu_pll0_foutpostdiv"; + + cpu3_intc: interrupt-controller { + compatible = "riscv,cpu-intc"; +@@ -110,6 +199,93 @@ l2_cache: l2-cache { + }; + }; + ++ resmem: reserved-memory { ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ }; ++ ++ pmu { ++ compatible = "riscv,pmu"; ++ riscv,event-to-mhpmcounters = ++ <0x00003 0x00003 0x0007fff8>, ++ <0x00004 0x00004 0x0007fff8>, ++ <0x00005 0x00005 0x0007fff8>, ++ <0x00006 0x00006 0x0007fff8>, ++ <0x00007 0x00007 0x0007fff8>, ++ <0x00008 0x00008 0x0007fff8>, ++ <0x00009 0x00009 0x0007fff8>, ++ <0x0000a 0x0000a 0x0007fff8>, ++ <0x10000 0x10000 0x0007fff8>, ++ <0x10001 0x10001 0x0007fff8>, ++ <0x10002 0x10002 0x0007fff8>, ++ <0x10003 0x10003 0x0007fff8>, ++ <0x10010 0x10010 0x0007fff8>, ++ <0x10011 0x10011 0x0007fff8>, ++ <0x10012 0x10012 0x0007fff8>, ++ <0x10013 0x10013 0x0007fff8>; ++ riscv,event-to-mhpmevent = ++ <0x00003 0x00000000 0x00000001>, ++ <0x00004 0x00000000 0x00000002>, ++ <0x00006 0x00000000 0x00000006>, ++ <0x00005 0x00000000 0x00000007>, ++ <0x00007 0x00000000 0x00000008>, ++ <0x00008 0x00000000 0x00000009>, ++ <0x00009 0x00000000 0x0000000a>, ++ <0x0000a 0x00000000 0x0000000b>, ++ <0x10000 0x00000000 0x0000000c>, ++ <0x10001 0x00000000 0x0000000d>, ++ <0x10002 0x00000000 0x0000000e>, ++ <0x10003 0x00000000 0x0000000f>, ++ <0x10010 0x00000000 0x00000010>, ++ <0x10011 0x00000000 0x00000011>, ++ <0x10012 0x00000000 0x00000012>, ++ <0x10013 0x00000000 0x00000013>; ++ riscv,raw-event-to-mhpmcounters = ++ <0x00000000 0x00000001 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000002 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000003 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000004 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000005 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000006 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000007 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000008 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000009 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000000a 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000000b 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000000c 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000000d 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000000e 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000000f 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000010 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000011 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000012 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000013 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000014 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000015 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000016 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000017 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000018 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000019 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000001a 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000001b 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000001c 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000001d 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000001e 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000001f 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000020 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000021 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000022 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000023 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000024 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000025 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000026 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000027 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000028 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x00000029 0xffffffff 0xffffffff 0x0007fff8>, ++ <0x00000000 0x0000002a 0xffffffff 0xffffffff 0x0007fff8>; ++ }; ++ + osc: oscillator { + compatible = "fixed-clock"; + clock-output-names = "osc_24m"; +@@ -122,6 +298,24 @@ osc_32k: 32k-oscillator { + #clock-cells = <0>; + }; + ++ rc_24m: clock-rc-24m { ++ compatible = "fixed-clock"; ++ clock-output-names = "rc_24m"; ++ #clock-cells = <0>; ++ }; ++ ++ aonsys_clk: aonsys-clk { ++ compatible = "fixed-clock"; ++ clock-output-names = "aonsys_clk"; ++ #clock-cells = <0>; ++ }; ++ ++ audiosys_clk: audiosys-clk { ++ compatible = "fixed-clock"; ++ clock-output-names = "audiosys_clk"; ++ #clock-cells = <0>; ++ }; ++ + apb_clk: apb-clk-clock { + compatible = "fixed-clock"; + clock-output-names = "apb_clk"; +@@ -134,6 +328,197 @@ uart_sclk: uart-sclk-clock { + #clock-cells = <0>; + }; + ++ sdhci_clk: sdhci-clock { ++ compatible = "fixed-clock"; ++ clock-frequency = <198000000>; ++ clock-output-names = "sdhci_clk"; ++ #clock-cells = <0>; ++ }; ++ ++ gmac_axi_clk: gmac-axi-clock { ++ compatible = "fixed-clock"; ++ clock-output-names = "gmac_axi_clk"; ++ #clock-cells = <0>; ++ }; ++ ++ gmac_clk: gmac-clock { ++ compatible = "fixed-clock"; ++ clock-output-names = "gmac_clk"; ++ #clock-cells = <0>; ++ }; ++ ++ stmmac_axi_config: stmmac-axi-config { ++ snps,wr_osr_lmt = <15>; ++ snps,rd_osr_lmt = <15>; ++ snps,blen = <0 0 64 32 0 0 0>; ++ }; ++ ++ aon: aon_subsys { ++ compatible = "xuantie,th1520-aon"; ++ mbox-names = "aon"; ++ mboxes = <&mbox_910t 1 0>; ++ opensbi-mboxes = <&mbox_910r>; ++ status = "okay"; ++ ++ pd: th1520-aon-pd { ++ compatible = "xuantie,th1520-aon-pd"; ++ #power-domain-cells = <1>; ++ }; ++ ++ cpufreq: c910_cpufreq { ++ compatible = "xuantie,th1520-cpufreq"; ++ status = "okay"; ++ }; ++ }; ++ ++ aon_iram: aon-iram@ffffef8000 { ++ compatible = "syscon"; ++ reg = <0xff 0xffef8000 0x0 0x10000>; ++ }; ++ ++ thermal-zones { ++ cpu-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <2000>; ++ thermal-sensors = <&pvt 0>; ++ ++ trips { ++ cpu_threshold: trip0 { ++ temperature = <80000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ cpu_target: trip1 { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ cpu_crit: trip2 { ++ temperature = <110000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ cpu_cdev { ++ trip = <&cpu_target>; ++ cooling-device = ++ <&c910_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, ++ <&c910_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, ++ <&c910_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, ++ <&c910_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; ++ contribution = <1024>; ++ }; ++ }; ++ }; ++ ++ dev-thermal { ++ polling-delay-passive = <250>; ++ polling-delay = <2000>; ++ thermal-sensors = <&pvt 1>; ++ ++ trips { ++ dev_threshold: trip0 { ++ temperature = <80000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ dev_target: trip1 { ++ temperature = <85000>; ++ hysteresis = <2000>; ++ type = "passive"; ++ }; ++ ++ dev_crit: trip2 { ++ temperature = <110000>; ++ hysteresis = <2000>; ++ type = "critical"; ++ }; ++ }; ++ ++ cooling-maps { ++ npu_devfreq { ++ trip = <&dev_target>; ++ cooling-device = ++ <&npu THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; ++ contribution = <1024>; ++ }; ++ ++ dsp0_devfreq { ++ trip = <&dev_target>; ++ cooling-device = ++ <&xtensa_dsp0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; ++ contribution = <1024>; ++ }; ++ ++ dsp1_devfreq { ++ trip = <&dev_target>; ++ cooling-device = ++ <&xtensa_dsp1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; ++ contribution = <1024>; ++ }; ++ }; ++ }; ++ }; ++ ++ display-subsystem { ++ compatible = "verisilicon,display-subsystem"; ++ ports = <&dpu_disp0>, <&dpu_disp1>; ++ status = "okay"; ++ }; ++ ++ dpu-encoders { ++ compatible = "simple-bus"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ dpu_enc0: dpu-encoder@0 { ++ /* default encoder is DSI */ ++ compatible = "verisilicon,dsi-encoder"; ++ reg = <0>; ++ status = "disabled"; ++ ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ /* input */ ++ port@0 { ++ reg = <0>; ++ ++ enc0_in: endpoint { ++ remote-endpoint = <&disp0_out>; ++ }; ++ }; ++ }; ++ }; ++ ++ dpu_enc1: dpu-encoder@1 { ++ /* default encoder is DSI */ ++ compatible = "verisilicon,dsi-encoder"; ++ reg = <1>; ++ status = "disabled"; ++ ++ ports { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ /* input */ ++ port@0 { ++ reg = <0>; ++ ++ enc1_in: endpoint { ++ remote-endpoint = <&disp1_out>; ++ }; ++ }; ++ }; ++ }; ++ }; ++ + soc { + compatible = "simple-bus"; + interrupt-parent = <&plic>; +@@ -142,6 +527,346 @@ soc { + dma-noncoherent; + ranges; + ++ audio_i2c0: i2c@ffcb01a000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xcb01a000 0x0 0x1000>; ++ clocks = <&apb_clk>; ++ interrupts = <182 IRQ_TYPE_LEVEL_HIGH>; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac2 21>, <&dmac2 20>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x82>; ++ ss_lcnt = /bits/ 16 <0x78>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x5>; ++ hs_lcnt = /bits/ 16 <0x15>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ audio_i2c1: i2c@ffcb01b000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xcb01b000 0x0 0x1000>; ++ clocks = <&apb_clk>; ++ interrupts = <183 IRQ_TYPE_LEVEL_HIGH>; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac2 23>, <&dmac2 22>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x82>; ++ ss_lcnt = /bits/ 16 <0x78>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x5>; ++ hs_lcnt = /bits/ 16 <0x15>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ ap_i2s: ap-i2s@ffe7034000 { ++ #sound-dai-cells = <1>; ++ compatible = "xuantie,th1520-i2s"; ++ reg = <0xff 0xe7034000 0x0 0x4000>; ++ interrupts = <70 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac0 35>, <&dmac0 40>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_HDMI_I2S_CLK>; ++ clock-names = "pclk"; ++ resets = <&rst TH1520_RESET_HDMI_I2S>; ++ status = "disabled"; ++ }; ++ ++ i2s0: audio-i2s0@ffcb014000 { ++ #sound-dai-cells = <1>; ++ compatible = "xuantie,th1520-i2s"; ++ reg = <0xff 0xcb014000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <174 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 9>, <&dmac2 8>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S0>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S0>; ++ status = "disabled"; ++ }; ++ ++ i2s1: audio-i2s1@ffcb015000 { ++ #sound-dai-cells = <1>; ++ compatible = "xuantie,th1520-i2s"; ++ reg = <0xff 0xcb015000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <175 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 11>, <&dmac2 10>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S1>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S1>; ++ status = "disabled"; ++ }; ++ ++ i2s2: audio-i2s2@ffcb016000 { ++ #sound-dai-cells = <1>; ++ compatible = "xuantie,th1520-i2s"; ++ reg = <0xff 0xcb016000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <176 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 13>, <&dmac2 12>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S2>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S2>; ++ status = "disabled"; ++ }; ++ ++ i2s_8ch_sd0: i2s-8ch-sd0@ffcb017000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-i2s-8ch"; ++ reg = <0xff 0xcb017000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <177 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 36>, <&dmac2 14>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S8CH>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S8CH>; ++ status = "disabled"; ++ }; ++ ++ i2s_8ch_sd1: i2s-8ch-sd1@ffcb017000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-i2s-8ch"; ++ reg = <0xff 0xcb017000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <177 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 37>, <&dmac2 15>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S8CH>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S8CH>; ++ status = "disabled"; ++ }; ++ ++ i2s_8ch_sd2: i2s-8ch-sd2@ffcb017000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-i2s-8ch"; ++ reg = <0xff 0xcb017000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <177 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 38>, <&dmac2 16>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S8CH>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S8CH>; ++ status = "disabled"; ++ }; ++ ++ i2s_8ch_sd3: i2s-8ch-sd3@ffcb017000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-i2s-8ch"; ++ reg = <0xff 0xcb017000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ interrupts = <177 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 39>, <&dmac2 17>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_I2S8CH>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_I2S8CH>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot1: tdm1@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <1>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 28>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot2: tdm2@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <2>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 29>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot3: tdm3@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <3>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 30>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot4: tdm4@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <4>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 31>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot5: tdm5@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <5>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 32>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot6: tdm6@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <6>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 33>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot7: tdm7@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <7>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 34>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ tdm_slot8: tdm8@ffcb012000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-tdm"; ++ reg = <0xff 0xcb012000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ th1520,tdm_slots = <8>; ++ th1520,tdm_slot_num = <8>; ++ interrupts = <178 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 35>; ++ dma-names = "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_TDM>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_TDM>; ++ status = "disabled"; ++ }; ++ ++ spdif0: spdif@ffcb018000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-spdif"; ++ reg = <0xff 0xcb018000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ interrupts = <179 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 25>, <&dmac2 24>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_SPDIF0>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_SPDIF0>; ++ status = "disabled"; ++ }; ++ ++ spdif1: spdif@ffcb019000 { ++ #sound-dai-cells = <0>; ++ compatible = "xuantie,th1520-spdif"; ++ reg = <0xff 0xcb019000 0x0 0x1000>; ++ audio-cpr-regmap = <&audio_cpr>; ++ pinctrl-names = "default"; ++ interrupts = <180 IRQ_TYPE_LEVEL_HIGH>; ++ dmas = <&dmac2 27>, <&dmac2 26>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ clocks = <&audiosys_clk_gate TH1520_CLKGEN_AUDIO_SPDIF1>; ++ clock-names = "pclk"; ++ resets = <&audiosys_rst TH1520_RESET_AUD_SPDIF1>; ++ status = "disabled"; ++ }; ++ + plic: interrupt-controller@ffd8000000 { + compatible = "thead,th1520-plic", "thead,c900-plic"; + reg = <0xff 0xd8000000 0x0 0x01000000>; +@@ -164,6 +889,85 @@ clint: timer@ffdc000000 { + <&cpu3_intc 3>, <&cpu3_intc 7>; + }; + ++ aclint_sswi: aclint-sswi@ffdc00c000 { ++ compatible = "riscv,aclint-sswi"; ++ reg = <0xff 0xdc00c000 0x0 0x00010000>; ++ interrupt-controller; ++ }; ++ ++ secsys_reg: secsys-reg@ffff200000 { ++ compatible = "syscon"; ++ reg = <0xff 0xff200000 0x0 0x10000>; ++ }; ++ ++ nvmem_controller: efuse@ffff210000 { ++ compatible = "xuantie,th1520-efuse", "syscon"; ++ reg = <0xff 0xff210000 0x0 0x10000>; ++ xuantie,secsys = <&secsys_reg>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ clocks = <&miscsys_clk_gate CLKGEN_MISCSYS_EFUSE_PCLK>; ++ clock-names = "pclk"; ++ ++ gmac0_mac_address: mac-address@176 { ++ reg = <0xb0 6>; ++ }; ++ ++ gmac1_mac_address: mac-address@184 { ++ reg = <0xb8 6>; ++ }; ++ }; ++ ++ gmac0: ethernet@ffe7070000 { ++ compatible = "xuantie,th1520-dwmac"; ++ reg = <0xff 0xe7070000 0x0 0x2000>; ++ interrupts = <66 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-names = "macirq"; ++ clocks = <&clk CLKGEN_GMAC0_CCLK>, ++ <&clk CLKGEN_GMAC0_PCLK>, ++ <&clk CLKGEN_GMAC_AXI_ACLK>, ++ <&clk CLKGEN_GMAC_AXI_PCLK>; ++ clock-names = "stmmaceth", "pclk", "axi_aclk","axi_pclk"; ++ snps,pbl = <32>; ++ snps,fixed-burst; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <32>; ++ snps,axi-config = <&stmmac_axi_config>; ++ th1520,gmacapb = <&gmac0_apb>; ++ status = "disabled"; ++ ++ mdio0: mdio { ++ compatible = "snps,dwmac-mdio"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ }; ++ ++ gmac1: ethernet@ffe7060000 { ++ compatible = "xuantie,th1520-dwmac"; ++ reg = <0xff 0xe7060000 0x0 0x2000>; ++ interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-names = "macirq"; ++ clocks = <&clk CLKGEN_GMAC1_CCLK>, ++ <&clk CLKGEN_GMAC1_PCLK>, ++ <&clk CLKGEN_GMAC_AXI_ACLK>, ++ <&clk CLKGEN_GMAC_AXI_PCLK>; ++ clock-names = "stmmaceth", "pclk","axi_aclk","axi_pclk"; ++ snps,pbl = <32>; ++ snps,fixed-burst; ++ snps,multicast-filter-bins = <64>; ++ snps,perfect-filter-entries = <32>; ++ snps,axi-config = <&stmmac_axi_config>; ++ th1520,gmacapb = <&gmac1_apb>; ++ status = "disabled"; ++ ++ mdio1: mdio { ++ compatible = "snps,dwmac-mdio"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ }; ++ + uart0: serial@ffe7014000 { + compatible = "snps,dw-apb-uart"; + reg = <0xff 0xe7014000 0x0 0x100>; +@@ -184,6 +988,16 @@ uart1: serial@ffe7f00000 { + status = "disabled"; + }; + ++ uart2: serial@ffec010000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0xff 0xec010000 0x0 0x100>; ++ interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&uart_sclk>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ + uart3: serial@ffe7f04000 { + compatible = "snps,dw-apb-uart"; + reg = <0xff 0xe7f04000 0x0 0x100>; +@@ -194,17 +1008,165 @@ uart3: serial@ffe7f04000 { + status = "disabled"; + }; + +- gpio2: gpio@ffe7f34000 { ++ uart4: serial@fff7f08000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0xff 0xf7f08000 0x0 0x100>; ++ interrupts = <40 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&uart_sclk>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ uart5: serial@fff7f0c000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0xff 0xf7f0c000 0x0 0x100>; ++ interrupts = <41 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&uart_sclk>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@ffe7f20000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xe7f20000 0x0 0x1000>; ++ interrupts = <44 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_I2C0_PCLK>; ++ clock-names = "pclk"; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac0 12>, <&dmac0 13>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x104>; ++ ss_lcnt = /bits/ 16 <0xec>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x9>; ++ hs_lcnt = /bits/ 16 <0x11>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c1: i2c@ffe7f24000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xe7f24000 0x0 0x1000>; ++ interrupts = <45 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_I2C1_PCLK>; ++ clock-names = "pclk"; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac0 14>, <&dmac0 15>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x104>; ++ ss_lcnt = /bits/ 16 <0xec>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x9>; ++ hs_lcnt = /bits/ 16 <0x11>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c2: i2c@ffec00c000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xec00c000 0x0 0x1000>; ++ interrupts = <46 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_I2C2_PCLK>; ++ clock-names = "pclk"; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac0 16>, <&dmac0 17>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x104>; ++ ss_lcnt = /bits/ 16 <0xec>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x9>; ++ hs_lcnt = /bits/ 16 <0x11>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c3: i2c@ffec014000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xec014000 0x0 0x1000>; ++ interrupts = <47 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_I2C3_PCLK>; ++ clock-names = "pclk"; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac0 18>, <&dmac0 19>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x104>; ++ ss_lcnt = /bits/ 16 <0xec>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x9>; ++ hs_lcnt = /bits/ 16 <0x11>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ i2c4: i2c@ffe7f28000 { ++ compatible = "snps,designware-i2c"; ++ reg = <0xff 0xe7f28000 0x0 0x1000>; ++ interrupts = <48 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_I2C4_PCLK>; ++ clock-names = "pclk"; ++ clock-frequency = <100000>; ++ i2c_mode = "dma"; ++ dmas = <&dmac0 20>, <&dmac0 21>; ++ dma-names = "tx", "rx"; ++ #dma-cells = <1>; ++ ss_hcnt = /bits/ 16 <0x104>; ++ ss_lcnt = /bits/ 16 <0xec>; ++ fs_hcnt = /bits/ 16 <0x37>; ++ fs_lcnt = /bits/ 16 <0x42>; ++ fp_hcnt = /bits/ 16 <0x14>; ++ fp_lcnt = /bits/ 16 <0x1a>; ++ hs_hcnt = /bits/ 16 <0x9>; ++ hs_lcnt = /bits/ 16 <0x11>; ++ status = "disabled"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ ++ audio_mbox: audio_mbox@0xffefc48000 { ++ compatible = "xuantie,th1520-audio-mbox-reg", "syscon"; ++ reg = <0xff 0xefc48000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ gpio@ffe7f34000 { + compatible = "snps,dw-apb-gpio"; + reg = <0xff 0xe7f34000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; ++ clocks = <&clk CLKGEN_GPIO2_PCLK>, ++ <&clk CLKGEN_GPIO2_DBCLK>; ++ clock-names = "bus", "db"; + +- portc: gpio-controller@0 { ++ gpio2: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; +- ngpios = <32>; ++ ngpios = <31>; ++ gpio-ranges = <&padctrl0_apsys 0 0 32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; +@@ -212,17 +1174,21 @@ portc: gpio-controller@0 { + }; + }; + +- gpio3: gpio@ffe7f38000 { ++ gpio@ffe7f38000 { + compatible = "snps,dw-apb-gpio"; + reg = <0xff 0xe7f38000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; ++ clocks = <&clk CLKGEN_GPIO3_PCLK>, ++ <&clk CLKGEN_GPIO3_DBCLK>; ++ clock-names = "bus", "db"; + +- portd: gpio-controller@0 { ++ gpio3: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; ++ gpio-ranges = <&padctrl0_apsys 0 32 23>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; +@@ -230,17 +1196,38 @@ portd: gpio-controller@0 { + }; + }; + +- gpio0: gpio@ffec005000 { +- compatible = "snps,dw-apb-gpio"; ++ padctrl1_apsys: pinctrl@ffe7f3c000 { ++ compatible = "thead,th1520-group2-pinctrl"; ++ reg = <0xff 0xe7f3c000 0x0 0x1000>; ++ clocks = <&clk CLKGEN_PADCTRL1_APSYS_PCLK>; ++ clock-names = "pclk"; ++ }; ++ ++ gmac0_apb: syscon@ffec003000 { ++ compatible = "xuantie,th1520-gmac-apb", "syscon"; ++ reg = <0xff 0xec003000 0x0 0x1000>; ++ }; ++ ++ gmac1_apb: syscon@ffec004000 { ++ compatible = "xuantie,th1520-gmac-apb", "syscon"; ++ reg = <0xff 0xec004000 0x0 0x1000>; ++ }; ++ ++ gpio@ffec005000 { ++ compatible = "snps,dw-apb-gpio"; + reg = <0xff 0xec005000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; ++ clocks = <&clk CLKGEN_GPIO0_PCLK>, ++ <&clk CLKGEN_GPIO0_DBCLK>; ++ clock-names = "bus", "db"; + +- porta: gpio-controller@0 { ++ gpio0: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; +- ngpios = <32>; ++ ngpios = <23>; ++ gpio-ranges = <&padctrl1_apsys 0 0 32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; +@@ -248,17 +1235,21 @@ porta: gpio-controller@0 { + }; + }; + +- gpio1: gpio@ffec006000 { ++ gpio@ffec006000 { + compatible = "snps,dw-apb-gpio"; + reg = <0xff 0xec006000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; ++ clocks = <&clk CLKGEN_GPIO1_PCLK>, ++ <&clk CLKGEN_GPIO1_DBCLK>; ++ clock-names = "bus", "db"; + +- portb: gpio-controller@0 { ++ gpio1: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; ++ gpio-ranges = <&padctrl1_apsys 0 32 31>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; +@@ -266,21 +1257,55 @@ portb: gpio-controller@0 { + }; + }; + +- uart2: serial@ffec010000 { +- compatible = "snps,dw-apb-uart"; +- reg = <0xff 0xec010000 0x0 0x4000>; +- interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&uart_sclk>; +- reg-shift = <2>; +- reg-io-width = <4>; +- status = "disabled"; ++ padctrl0_apsys: pinctrl@ffec007000 { ++ compatible = "thead,th1520-group3-pinctrl"; ++ reg = <0xff 0xec007000 0x0 0x1000>; ++ clocks = <&clk CLKGEN_PADCTRL0_APSYS_PCLK>; ++ clock-names = "pclk"; ++ }; ++ ++ pwm: pwm@ffec01c000 { ++ compatible = "xuantie,th1520-pwm"; ++ reg = <0xff 0xec01c000 0x0 0x4000>; ++ #pwm-cells = <3>; ++ clocks = <&osc>; ++ }; ++ ++ misc_sysreg: misc_sysreg@ffec02c000 { ++ compatible = "xuantie,th1520-misc-sysreg", "syscon"; ++ reg = <0xff 0xec02c000 0x0 0x1000>; ++ }; ++ ++ usb: usb@ffec03f000 { ++ compatible = "xuantie,th1520-usb"; ++ usb3-misc-regmap = <&misc_sysreg>; ++ reg = <0xff 0xec03f000 0x0 0x1000>; ++ xuantie,misc-sysreg = <&misc_sysreg>; ++ clocks = <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_CTRL_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_PHY_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_USB3_DRD_SUSPEND_CLK>; ++ clock-names = "drd", "ctrl", "phy", "suspend"; ++ #address-cells = <2>; ++ #size-cells = <2>; ++ ranges; ++ ++ usb_dwc3: usb@ffe7040000 { ++ compatible = "snps,dwc3"; ++ reg = <0xff 0xe7040000 0x0 0x10000>; ++ interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; ++ dr_mode = "host"; ++ snps,dis_u2_susphy_quirk; ++ snps,usb3_lpm_capable; ++ status = "disabled"; ++ }; + }; + + dmac0: dma-controller@ffefc00000 { +- compatible = "snps,axi-dma-1.01a"; ++ compatible = "xuantie,th1520-axi-dma"; + reg = <0xff 0xefc00000 0x0 0x1000>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&apb_clk>, <&apb_clk>; ++ clocks = <&clk CLKGEN_DMAC_CPUSYS_ACLK>, <&clk CLKGEN_DMAC_CPUSYS_HCLK>; + clock-names = "core-clk", "cfgr-clk"; + #dma-cells = <1>; + dma-channels = <4>; +@@ -292,6 +1317,71 @@ dmac0: dma-controller@ffefc00000 { + status = "disabled"; + }; + ++ dmac1: dma-controller@ffff340000 { ++ compatible = "snps,axi-dma-1.01a"; ++ reg = <0xff 0xff340000 0x0 0x1000>; ++ interrupts = <150 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_DMAC_CPUSYS_ACLK>, <&clk CLKGEN_DMAC_CPUSYS_HCLK>; ++ clock-names = "core-clk", "cfgr-clk"; ++ #dma-cells = <1>; ++ dma-channels = <4>; ++ snps,block-size = <65536 65536 65536 65536>; ++ snps,priority = <0 1 2 3>; ++ snps,dma-masters = <1>; ++ snps,data-width = <4>; ++ snps,axi-max-burst-len = <16>; ++ status = "disabled"; ++ }; ++ ++ dmac2: dma-controller@ffc8000000 { ++ compatible = "xuantie,th1520-axi-dma"; ++ reg = <0xff 0xc8000000 0x0 0x2000>; ++ interrupts = <167 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_DMAC_CPUSYS_ACLK>, <&clk CLKGEN_DMAC_CPUSYS_HCLK>; ++ clock-names = "core-clk", "cfgr-clk"; ++ #dma-cells = <1>; ++ dma-channels = <16>; ++ snps,block-size = <65536 65536 65536 65536 ++ 65536 65536 65536 65536 ++ 65536 65536 65536 65536 ++ 65536 65536 65536 65536>; ++ snps,priority = <0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0>; // <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15>; ++ snps,dma-masters = <1>; ++ snps,data-width = <4>; ++ snps,axi-max-burst-len = <16>; ++ status = "disabled"; ++ }; ++ ++ emmc: mmc@ffe7080000 { ++ compatible = "xuantie,th1520-dwcmshc"; ++ reg = <0xff 0xe7080000 0x0 0x10000>; ++ interrupts = <62 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_EMMC_SDIO_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_EMMC_CLK>; ++ clock-names = "core", "bus"; ++ status = "disabled"; ++ }; ++ ++ sdio0: mmc@ffe7090000 { ++ compatible = "xuantie,th1520-dwcmshc"; ++ reg = <0xff 0xe7090000 0x0 0x10000>; ++ interrupts = <64 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_EMMC_SDIO_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_EMMC_CLK>; ++ clock-names = "core", "bus"; ++ status = "disabled"; ++ }; ++ ++ sdio1: mmc@ffe70a0000 { ++ compatible = "xuantie,th1520-dwcmshc"; ++ reg = <0xff 0xe70a0000 0x0 0x10000>; ++ interrupts = <71 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_EMMC_SDIO_REF_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_EMMC_CLK>; ++ clock-names = "core", "bus"; ++ status = "disabled"; ++ }; ++ + timer0: timer@ffefc32000 { + compatible = "snps,dw-apb-timer"; + reg = <0xff 0xefc32000 0x0 0x14>; +@@ -328,26 +1418,6 @@ timer3: timer@ffefc3203c { + status = "disabled"; + }; + +- uart4: serial@fff7f08000 { +- compatible = "snps,dw-apb-uart"; +- reg = <0xff 0xf7f08000 0x0 0x4000>; +- interrupts = <40 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&uart_sclk>; +- reg-shift = <2>; +- reg-io-width = <4>; +- status = "disabled"; +- }; +- +- uart5: serial@fff7f0c000 { +- compatible = "snps,dw-apb-uart"; +- reg = <0xff 0xf7f0c000 0x0 0x4000>; +- interrupts = <41 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&uart_sclk>; +- reg-shift = <2>; +- reg-io-width = <4>; +- status = "disabled"; +- }; +- + timer4: timer@ffffc33000 { + compatible = "snps,dw-apb-timer"; + reg = <0xff 0xffc33000 0x0 0x14>; +@@ -384,17 +1454,29 @@ timer7: timer@ffffc3303c { + status = "disabled"; + }; + +- ao_gpio0: gpio@fffff41000 { ++ rtc: rtc@fffff40000 { ++ compatible = "snps,dw-apb-rtc"; ++ reg = <0xff 0xfff40000 0x0 0x1000>; ++ interrupts = <74 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&osc_32k>; ++ clock-names = "osc_32k"; ++ wakeup-source; ++ prescaler = <0x8000>; ++ status = "okay"; ++ }; ++ ++ gpio@fffff41000 { + compatible = "snps,dw-apb-gpio"; + reg = <0xff 0xfff41000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + +- porte: gpio-controller@0 { ++ aogpio: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; +- ngpios = <32>; ++ ngpios = <23>; ++ gpio-ranges = <&padctrl_aosys 0 9 16>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; +@@ -402,22 +1484,894 @@ porte: gpio-controller@0 { + }; + }; + +- ao_gpio1: gpio@fffff52000 { ++ padctrl_aosys: pinctrl@fffff4a000 { ++ compatible = "thead,th1520-group1-pinctrl"; ++ reg = <0xff 0xfff4a000 0x0 0x2000>; ++ clocks = <&aonsys_clk>; ++ }; ++ ++ padctrl_audiosys: pinctrl@ffcb01d000 { ++ compatible = "thead,th1520-group4-pinctrl"; ++ reg = <0xff 0xcb01d000 0x0 0x2000>; ++ clocks = <&audiosys_clk>; ++ }; ++ ++ pvt: pvt@fffff4e000 { ++ compatible = "moortec,mr75203"; ++ reg = <0xff 0xfff4e000 0x0 0x80>, ++ <0xff 0xfff4e080 0x0 0x100>, ++ <0xff 0xfff4e180 0x0 0x680>, ++ <0xff 0xfff4e800 0x0 0x600>; ++ reg-names = "common", "ts", "pd", "vm"; ++ clocks = <&aonsys_clk>; ++ #thermal-sensor-cells = <1>; ++ moortec,ts-coeff-h = <220000>; ++ moortec,ts-coeff-g = <42740>; ++ moortec,ts-coeff-j = <0xFFFFFF60>; // -160 ++ moortec,ts-coeff-cal5 = <4094>; ++ }; ++ ++ gpio@fffff52000 { + compatible = "snps,dw-apb-gpio"; + reg = <0xff 0xfff52000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + +- portf: gpio-controller@0 { ++ gpio4: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; +- ngpios = <32>; ++ ngpios = <16>; ++ gpio-ranges = <&padctrl_aosys 0 25 22>, <&padctrl_aosys 22 7 1>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <55 IRQ_TYPE_LEVEL_HIGH>; + }; + }; ++ ++ bmu: ddr-pmu@ffff008000 { ++ compatible = "xuantie,th1520-ddr-pmu"; ++ reg = <0xff 0xff008000 0x0 0x800 ++ 0xff 0xff008800 0x0 0x800 ++ 0xff 0xff009000 0x0 0x800 ++ 0xff 0xff009800 0x0 0x800 ++ 0xff 0xff00a000 0x0 0x800>; ++ interrupts = <87 IRQ_TYPE_LEVEL_HIGH>; ++ }; ++ ++ mbox_910t: mbox@ffffc38000 { ++ compatible = "xuantie,th1520-mbox"; ++ reg = <0xff 0xffc38000 0x0 0x4000>, ++ <0xff 0xffc44000 0x0 0x1000>, ++ <0xff 0xffc4c000 0x0 0x1000>, ++ <0xff 0xffc54000 0x0 0x1000>; ++ reg-names = "local_base", ++ "remote_icu0", ++ "remote_icu1", ++ "remote_icu2"; ++ interrupt-controller; ++ interrupts = <28 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&apb_clk>; ++ clock-names = "ipg"; ++ icu_cpu_id = <0>; ++ #mbox-cells = <2>; ++ }; ++ ++ mbox_910r: mbox@ffefc53000 { ++ compatible = "xuantie,th1520-mbox-r"; ++ reg = <0xff 0xefc53000 0x0 0x4000>, ++ <0xff 0xefc3f000 0x0 0x1000>, ++ <0xff 0xefc47000 0x0 0x1000>, ++ <0xff 0xefc4f000 0x0 0x1000>; ++ reg-names = "local_base", ++ "remote_icu0", ++ "remote_icu1", ++ "remote_icu2"; ++ interrupt-controller; ++ clocks = <&apb_clk>; ++ clock-names = "ipg"; ++ icu_cpu_id = <3>; ++ #mbox-cells = <2>; ++ }; ++ ++ adc: adc@0xfffff51000 { ++ compatible = "xuantie,th1520-adc"; ++ reg = <0xff 0xfff51000 0x0 0x1000>; ++ interrupts = <61 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&aonsys_clk>; ++ clock-names = "adc"; ++ /* ADC pin is proprietary,no need to config pinctrl */ ++ status = "disabled"; ++ }; ++ ++ visys_reg: visys-reg@ffe4040000 { ++ compatible = "xuantie,th1520-visys-reg", "syscon"; ++ reg = <0xff 0xe4040000 0x0 0x1000>; ++ status = "disabled"; ++ }; ++ ++ vosys_reg: vosys-reg@ffef528000 { ++ compatible = "xuantie,th1520-vosys-reg", "syscon"; ++ reg = <0xff 0xef528000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ dsi0: dw-mipi-dsi0@ffef500000 { ++ compatible = "xuantie,th1520-mipi-dsi", "simple-bus", "syscon"; ++ reg = <0xff 0xef500000 0x0 0x10000>; ++ status = "disabled"; ++ ++ dphy_0: dsi0-dphy { ++ compatible = "xuantie,th1520-mipi-dphy"; ++ regmap = <&dsi0>; ++ vosys-regmap = <&vosys_reg>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI0_REFCLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI0_CFG_CLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI0_PCLK>, ++ <&clk OSC_24M>, ++ <&clk OSC_24M>; ++ clock-names = "refclk", "cfgclk", "pclk", "prefclk", "pcfgclk"; ++ #phy-cells = <0>; ++ }; ++ ++ dhost_0: dsi0-host { ++ compatible = "verisilicon,dw-mipi-dsi"; ++ regmap = <&dsi0>; ++ interrupts = <129 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI0_PCLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI0_PIXCLK>; ++ clock-names = "pclk", "pixclk"; ++ phys = <&dphy_0>; ++ phy-names = "dphy"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ }; ++ ++ dsi1: dw-mipi-dsi1@ffef510000 { ++ compatible = "xuantie,th1520-mipi-dsi", "simple-bus", "syscon"; ++ reg = <0xff 0xef510000 0x0 0x10000>; ++ status = "disabled"; ++ ++ dphy_1: dsi1-dphy { ++ compatible = "xuantie,th1520-mipi-dphy"; ++ regmap = <&dsi1>; ++ vosys-regmap = <&vosys_reg>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI1_REFCLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI1_CFG_CLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI1_PCLK>, ++ <&clk OSC_24M>, ++ <&clk OSC_24M>; ++ clock-names = "refclk", "cfgclk", "pclk", "prefclk", "pcfgclk"; ++ #phy-cells = <0>; ++ }; ++ ++ dhost_1: dsi1-host { ++ compatible = "verisilicon,dw-mipi-dsi"; ++ regmap = <&dsi1>; ++ interrupts = <129 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI1_PCLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_MIPIDSI1_PIXCLK>; ++ clock-names = "pclk", "pixclk"; ++ phys = <&dphy_1>; ++ phy-names = "dphy"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ }; ++ }; ++ ++ hdmi_tx: dw-hdmi-tx@ffef540000 { ++ compatible = "xuantie,th1520-hdmi-tx"; ++ reg = <0xff 0xef540000 0x0 0x40000>; ++ interrupts = <111 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_HDMI_PCLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_HDMI_SFR_CLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_HDMI_CEC_CLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_HDMI_PIXCLK>; ++ clock-names = "iahb", "isfr", "cec", "pixclk"; ++ reg-io-width = <4>; ++ phy_version = <301>; ++ /* TODO: add phy property */ ++ status = "disabled"; ++ }; ++ ++ dpu: dc8200@ffef600000 { ++ compatible = "verisilicon,dc8200"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ reg = <0xff 0xef600000 0x0 0x100>, ++ <0xff 0xef600800 0x0 0x2000>, ++ <0xff 0xef630010 0x0 0x60>; ++ interrupts = <93 IRQ_TYPE_LEVEL_HIGH>; ++ vosys-regmap = <&vosys_reg>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_DPU_CCLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_DPU_PIXCLK0>, ++ <&vosys_clk_gate TH1520_CLKGEN_DPU_PIXCLK1>, ++ <&vosys_clk_gate TH1520_CLKGEN_DPU_ACLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_DPU_HCLK>, ++ <&clk DPU0_PLL_DIV_CLK>, ++ <&clk DPU1_PLL_DIV_CLK>, ++ <&clk DPU0_PLL_FOUTPOSTDIV>, ++ <&clk DPU1_PLL_FOUTPOSTDIV>; ++ clock-names = "core_clk", "pix_clk0", "pix_clk1", ++ "axi_clk", "cfg_clk", "pixclk0", ++ "pixclk1", "dpu0_pll_foutpostdiv", ++ "dpu1_pll_foutpostdiv"; ++ status = "okay"; ++ ++ dpu_disp0: port@0 { ++ reg = <0>; ++ ++ disp0_out: endpoint { ++ remote-endpoint = <&enc0_in>; ++ }; ++ }; ++ ++ dpu_disp1: port@1 { ++ reg = <1>; ++ ++ disp1_out: endpoint { ++ remote-endpoint = <&enc1_in>; ++ }; ++ }; ++ }; ++ ++ gpu: gpu@ffef400000 { ++ compatible = "img,gpu"; ++ reg = <0xff 0xef400000 0x0 0x100000>; ++ interrupts = <102 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-names = "gpuirq"; ++ vosys-regmap = <&vosys_reg>; ++ power-domains = <&pd TH1520_AON_GPU_PD>; ++ clocks = <&vosys_clk_gate TH1520_CLKGEN_GPU_CORE_CLK>, ++ <&vosys_clk_gate TH1520_CLKGEN_GPU_CFG_ACLK>; ++ clock-names = "cclk", "aclk"; ++ gpu_clk_rate = <18000000>; ++ dma-mask = <0xf 0xffffffff>; ++ status = "okay"; ++ }; ++ ++ aon_suspend_ctrl: aon_suspend_ctrl { ++ compatible = "xuantie,th1520-aon-suspend-ctrl"; ++ status = "okay"; ++ }; ++ ++ sys_reg: sys-reg@ffef010100 { ++ compatible = "xuantie,th1520-sys-reg"; ++ reg = <0xff 0xef010100 0x0 0x100>; ++ status = "okay"; ++ }; ++ ++ dspsys_reg: dspsys-reg@ffef040000 { ++ compatible = "xuantie,th1520-dspsys-reg", "syscon"; ++ reg = <0xff 0xef040000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ miscsys_reg: miscsys-reg@ffec02c000 { ++ compatible = "xuantie,th1520-miscsys-reg", "syscon"; ++ reg = <0xff 0xec02c000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ tee_miscsys_reg: tee_miscsys-reg@fffc02d000 { ++ compatible = "xuantie,th1520-miscsys-reg", "syscon"; ++ reg = <0xff 0xfc02d000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ audio_cpr: audio_cpr@ffcb000000 { ++ compatible = "xuantie,th1520-audio-cpr-reg", "syscon"; ++ reg = <0xff 0xcb000000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ clk: clock-controller@ffef010000 { ++ compatible = "xuantie,th1520-fm-ree-clk"; ++ reg = <0xff 0xef010000 0x0 0x1000>; ++ #clock-cells = <1>; ++ clocks = <&osc_32k>, <&osc>, <&rc_24m>; ++ clock-names = "osc_32k", "osc_24m", "rc_24m"; ++ status = "okay"; ++ }; ++ ++ visys_clk_gate: visys-clk-gate { /* VI_SYSREG_R */ ++ compatible = "xuantie,visys-gate-controller"; ++ visys-regmap = <&visys_reg>; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ vpsys_clk_gate: vpsys-clk-gate@ffecc30000 { /* VP_SYSREG_R */ ++ compatible = "xuantie,vpsys-gate-controller"; ++ reg = <0xff 0xecc30000 0x0 0x1000>; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ vosys_clk_gate: vosys-clk-gate@ffef528000 { /* VO_SYSREG_R */ ++ compatible = "xuantie,vosys-gate-controller"; ++ reg = <0xff 0xef528000 0x0 0x1000>; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ dspsys_clk_gate: dspsys-clk-gate { ++ compatible = "xuantie,dspsys-gate-controller"; ++ dspsys-regmap = <&dspsys_reg>; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ audiosys_clk_gate: audiosys-clk-gate { ++ compatible = "xuantie,audiosys-gate-controller"; ++ audiosys-regmap = <&audio_cpr>; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ miscsys_clk_gate: miscsys-clk-gate { ++ compatible = "xuantie,miscsys-gate-controller"; ++ miscsys-regmap = <&miscsys_reg>; ++ tee-miscsys-regmap = <&tee_miscsys_reg>; ++ #clock-cells = <1>; ++ status = "okay"; ++ }; ++ ++ rst: reset-controller@ffef014000 { ++ compatible = "xuantie,th1520-reset", "syscon"; ++ reg = <0xff 0xef014000 0x0 0x1000>; ++ #reset-cells = <1>; ++ status = "okay"; ++ }; ++ ++ vpsys_rst: vpsys-reset-controller@ffecc30000 { ++ compatible = "xuantie,th1520-vpsys-reset","syscon"; ++ reg = <0xff 0xecc30000 0x0 0x1000>; ++ #reset-cells = <1>; ++ status = "okay"; ++ }; ++ ++ audiosys_rst: audiosys-reset-controller@ffcb000000 { ++ compatible = "xuantie,th1520-audiosys-reset","syscon"; ++ reg = <0xff 0xcb000000 0x0 0x1000>; ++ #reset-cells = <1>; ++ status = "okay"; ++ }; ++ ++ spi: spi@ffe700c000 { ++ compatible = "snps,dw-apb-ssi"; ++ reg = <0xff 0xe700c000 0x0 0x1000>; ++ interrupts = <54 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_SPI_SSI_CLK>, ++ <&clk CLKGEN_SPI_PCLK>; ++ clock-names = "sclk", "pclk"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ }; ++ ++ qspi0: qspi@ffea000000 { ++ compatible = "snps,dw-apb-ssi-quad"; ++ reg = <0xff 0xea000000 0x0 0x1000>; ++ interrupts = <52 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_QSPI0_SSI_CLK>, ++ <&clk CLKGEN_QSPI0_PCLK>; ++ clock-names = "sclk", "pclk"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ }; ++ ++ qspi1: qspi@fff8000000 { ++ compatible = "snps,dw-apb-ssi-quad"; ++ reg = <0xff 0xf8000000 0x0 0x1000>; ++ interrupts = <53 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_QSPI1_SSI_CLK>, ++ <&clk CLKGEN_QSPI1_PCLK>; ++ clock-names = "sclk", "pclk"; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ }; ++ ++ th1520_event: th1520-event { ++ compatible = "xuantie,th1520-event"; ++ aon-iram-regmap = <&aon_iram>; ++ status = "okay"; ++ }; ++ ++ watchdog0: watchdog@ffefc30000 { ++ compatible = "snps,dw-wdt"; ++ reg = <0xff 0xefc30000 0x0 0x1000>; ++ interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_WDT0_PCLK>; ++ clock-names = "tclk"; ++ resets = <&rst TH1520_RESET_WDT0>; ++ status = "okay"; ++ }; ++ ++ watchdog1: watchdog@ffefc31000 { ++ compatible = "snps,dw-wdt"; ++ reg = <0xff 0xefc31000 0x0 0x1000>; ++ interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&clk CLKGEN_WDT1_PCLK>; ++ clock-names = "tclk"; ++ resets = <&rst TH1520_RESET_WDT1>; ++ status = "okay"; ++ }; ++ ++ regdump: th1520-regdump { ++ compatible = "xuantie,th1520-regdump"; ++ status = "disabled"; ++ }; ++ ++ vdec_opp_table: opp_table_vdec { ++ compatible = "operating-points-v2"; ++ video-4k-minfreq = <594000000>; ++ qos-mid-minfreq = <297000000>; ++ ++ opp00 { ++ opp-hz = /bits/ 64 <158400000>; ++ opp-microvolt = <875000>; ++ }; ++ opp01 { ++ opp-hz = /bits/ 64 <198000000>; ++ opp-microvolt = <875000>; ++ }; ++ opp02 { ++ opp-hz = /bits/ 64 <237600000>; ++ opp-microvolt = <875000>; ++ }; ++ opp03 { ++ opp-hz = /bits/ 64 <264000000>; ++ opp-microvolt = <887500>; ++ }; ++ opp04 { ++ opp-hz = /bits/ 64 <297000000>; ++ opp-microvolt = <937500>; ++ }; ++ opp05 { ++ opp-hz = /bits/ 64 <396000000>; ++ opp-microvolt = <1012500>; ++ }; ++ opp06 { ++ opp-hz = /bits/ 64 <475200000>; ++ opp-microvolt = <1037500>; ++ }; ++ opp07 { ++ opp-hz = /bits/ 64 <594000000>; ++ opp-microvolt = <1050000>; ++ }; ++ }; ++ ++ venc_opp_table: opp_table_venc { ++ compatible = "operating-points-v2"; ++ qos-mid-minfreq = <250000000>; ++ ++ opp00 { ++ opp-hz = /bits/ 64 <200000000>; ++ }; ++ opp01 { ++ opp-hz = /bits/ 64 <250000000>; ++ }; ++ opp02 { ++ opp-hz = /bits/ 64 <333300000>; ++ }; ++ opp03 { ++ opp-hz = /bits/ 64 <500000000>; ++ }; ++ }; ++ ++ vdec: vdec@ffecc00000 { ++ compatible = "xuantie,th1520-vc8000d"; ++ reg = <0xff 0xecc00000 0x0 0x8000>; ++ interrupts = <131 IRQ_TYPE_LEVEL_HIGH>; ++ power-domains = <&pd TH1520_AON_VDEC_PD>; ++ clocks = <&vpsys_clk_gate TH1520_VPSYS_VDEC_ACLK>, ++ <&vpsys_clk_gate TH1520_VPSYS_VDEC_CCLK>, ++ <&vpsys_clk_gate TH1520_VPSYS_VDEC_PCLK>; ++ clock-names = "aclk", "cclk", "pclk"; ++ operating-points-v2 = <&vdec_opp_table>; ++ status = "okay"; ++ }; ++ ++ venc: venc@ffecc10000 { ++ compatible = "xuantie,th1520-vc8000e"; ++ reg = <0xff 0xecc10000 0x0 0x8000>; ++ interrupts = <133 IRQ_TYPE_LEVEL_HIGH>; ++ power-domains = <&pd TH1520_AON_VENC_PD>; ++ clocks = <&vpsys_clk_gate TH1520_VPSYS_VENC_ACLK>, ++ <&vpsys_clk_gate TH1520_VPSYS_VENC_CCLK>, ++ <&vpsys_clk_gate TH1520_VPSYS_VENC_PCLK>; ++ clock-names = "aclk", "cclk", "pclk"; ++ operating-points-v2 =<&venc_opp_table>; ++ status = "okay"; ++ }; ++ ++ g2d_opp_table:g2d-opp-table { ++ compatible = "operating-points-v2"; ++ video-4k-minfreq = <396000000>; ++ qos-mid-minfreq = <198000000>; ++ ++ opp-49500000 { ++ opp-hz = /bits/ 64 <49500000>; ++ }; ++ opp-99000000 { ++ opp-hz = /bits/ 64 <99000000>; ++ }; ++ opp-198000000 { ++ opp-hz = /bits/ 64 <198000000>; ++ }; ++ opp-396000000 { ++ opp-hz = /bits/ 64 <396000000>; ++ }; ++ }; ++ ++ g2d: gc620@ffecc80000 { ++ compatible = "vivante,gc"; ++ reg = <0xff 0xecc80000 0x0 0x40000>; ++ interrupt-parent = <&plic>; ++ interrupts = <101 IRQ_TYPE_LEVEL_HIGH>; ++ ++ clocks = <&vpsys_clk_gate TH1520_VPSYS_G2D_PCLK>, ++ <&vpsys_clk_gate TH1520_VPSYS_G2D_ACLK>, ++ <&vpsys_clk_gate TH1520_VPSYS_G2D_CCLK>; ++ clock-names = "bus", "core", "shader"; ++ operating-points-v2 = <&g2d_opp_table>; ++ status = "okay"; ++ }; ++ ++ vidmem: vidmem@ffecc08000 { ++ compatible = "xuantie,th1520-vidmem"; ++ reg = <0xff 0xecc08000 0x0 0x1000>; ++ status = "okay"; ++ }; ++ ++ bm_visys: bm_visys@ffe4040000 { ++ compatible = "xuantie,th1520-bm-visys"; ++ reg = <0xff 0xe4040000 0x0 0x1000>; ++ status = "disabled"; ++ }; ++ ++ bm_csi0: csi@ffe4000000{ //CSI2 ++ compatible = "xuantie,th1520-bm-csi"; ++ reg = < 0xff 0xe4000000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <128 IRQ_TYPE_LEVEL_HIGH>; ++ dphyglueiftester = <0x180>; ++ sysreg_mipi_csi_ctrl = <0x140>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI0_PCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI0_PIXCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI0_CFG_CLK>; ++ clock-names = "pclk", "pixclk", "cfg_clk"; ++ phy_name = "CSI_4LANE"; ++ status = "disabled"; ++ }; ++ ++ csia_reg: visys-reg@ffe4020000 { ++ compatible = "xuantie,th1520-visys-reg", "syscon"; ++ reg = < 0xff 0xe4020000 0x0 0x10000>; ++ status = "okay"; ++ }; ++ ++ csib_reg: visys-reg@ffe4010000{ ++ compatible = "xuantie,th1520-visys-reg", "syscon"; ++ reg = < 0xff 0xe4010000 0x0 0x10000>; ++ status = "okay"; ++ }; ++ ++ bm_csi1: csi@ffe4010000{ //CSI2X2_B ++ compatible = "xuantie,th1520-bm-csi"; ++ reg = < 0xff 0xe4010000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <126 IRQ_TYPE_LEVEL_HIGH>; // 110 + 16 int_mipi_csi2x2_int0 ++ dphyglueiftester = <0x182>; // for FPGA PHY only. ASIC not needed. ++ sysreg_mipi_csi_ctrl = <0x148>; ++ visys-regmap = <&visys_reg>; ++ csia-regmap = <&csia_reg>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI1_PCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI1_PIXCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI1_CFG_CLK>; ++ clock-names = "pclk", "pixclk", "cfg_clk"; ++ phy_name = "CSI_B"; ++ status = "disabled"; ++ }; ++ ++ bm_csi2: csi@ffe4020000{ //CSI2X2_A ++ compatible = "xuantie,th1520-bm-csi"; ++ reg = < 0xff 0xe4020000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <127 IRQ_TYPE_LEVEL_HIGH>; ++ dphyglueiftester = <0x184>; ++ sysreg_mipi_csi_ctrl = <0x144>; ++ sysreg_mipi_csi_fifo_ctrl = <0x14c>; ++ csib-regmap = <&csib_reg>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI2_PCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI2_PIXCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI2_CFG_CLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI1_PCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI1_PIXCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_MIPI_CSI1_CFG_CLK>; ++ clock-names = "pclk", "pixclk", "cfg_clk", "pclk1", "pixclk1", "cfg_clk1"; ++ phy_name = "CSI_A"; ++ status = "disabled"; ++ }; ++ ++ isp0: isp@ffe4100000 { ++ compatible = "xuantie,th1520-isp"; ++ reg = <0xff 0xe4100000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <117 IRQ_TYPE_LEVEL_HIGH>,<118 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_ISP0_ACLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP0_HCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP0_PIXELCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP0_CLK>; ++ clock-names = "aclk", "hclk", "isp0_pclk", "cclk"; ++ status = "disabled"; ++ }; ++ ++ isp1: isp@ffe4110000 { ++ compatible = "xuantie,th1520-isp"; ++ reg = <0xff 0xe4110000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <120 IRQ_TYPE_LEVEL_HIGH>,<121 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_ISP0_ACLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP0_HCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP0_PIXELCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP1_CLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP1_PIXELCLK>; ++ clock-names = "aclk", "hclk", "isp0_pclk", "cclk", "isp1_pclk"; ++ status = "disabled"; ++ }; ++ ++ isp_ry0: isp_ry@ffe4120000 { ++ compatible = "xuantie,th1520-isp_ry"; ++ reg = <0xff 0xe4120000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <123 IRQ_TYPE_LEVEL_HIGH>,<124 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_ISP_RY_ACLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP_RY_HCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_ISP_RY_CCLK>; ++ clock-names = "aclk", "hclk", "cclk"; ++ status = "disabled"; ++ }; ++ ++ dewarp: dewarp@ffe4130000 { ++ compatible = "xuantie,th1520-dewarp"; ++ reg = <0xff 0xe4130000 0x0 0x10000>; ++ interrupt-parent = <&plic>; ++ interrupts = <98 IRQ_TYPE_LEVEL_HIGH>,<99 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_DW200_ACLK>, ++ <&visys_clk_gate TH1520_CLKGEN_DW200_HCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_DW200_CLK_VSE>, ++ <&visys_clk_gate TH1520_CLKGEN_DW200_CLK_DWE>; ++ clock-names = "aclk", "hclk", "vseclk", "dweclk"; ++ status = "disabled"; ++ }; ++ ++ dec400_isp0: dec400@ffe4060000 { ++ compatible = "xuantie,th1520-dec400"; ++ reg = <0xff 0xe4060000 0x0 0x8000>; ++ status = "disabled"; ++ }; ++ ++ dec400_isp1: dec400@ffe4068000 { ++ compatible = "xuantie,th1520-dec400"; ++ reg = <0xff 0xe4068000 0x0 0x8000>; ++ status = "disabled"; ++ }; ++ ++ dec400_isp2: dec400@ffe4070000 { ++ compatible = "xuantie,th1520-dec400"; ++ reg = <0xff 0xe4070000 0x0 0x8000>; ++ status = "disabled"; ++ }; ++ ++ ++ vi_pre: vi_pre@ffe4030000 { ++ compatible = "xuantie,th1520-vi_pre"; ++ reg = <0xff 0xe4030000 0x0 0x1000>; ++ interrupt-parent = <&plic>; ++ interrupts = <134 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&visys_clk_gate TH1520_CLKGEN_VIPRE_ACLK>, ++ <&visys_clk_gate TH1520_CLKGEN_VIPRE_PCLK>, ++ <&visys_clk_gate TH1520_CLKGEN_VIPRE_PIXELCLK>; ++ clock-names ="aclk", "pclk", "pixclk"; ++ status = "disabled"; ++ }; ++ ++ xtensa_dsp: dsp@01{ ++ compatible = "xuantie,dsp-hw-common"; ++ reg = <0xff 0xef040000 0x0 0x001000 >; /*DSP_SYSREG(0x0000-0xFFF) */ ++ status = "disabled"; ++ }; ++ ++ xtensa_dsp0: dsp@0 { ++ compatible = "cdns,xrp-hw-simple"; ++ reg = <0xff 0xe4040190 0x0 0x000010 /* host irq DSP->CPU INT Register */ ++ 0xff 0xe40401e0 0x0 0x000010 /* device irq CPU->DSP INT Register */ ++ 0xff 0xef048000 0x0 0x008000 /* DSP shared memory */ ++ 0xff 0xe0180000 0x0 0x040000>; /* DSP TCM*/ ++ dsp = <0>; ++ dspsys-rst-bit = <8>; /*bit# in DSP_SYSREG*/ ++ dspsys-bus-offset = <0x90>; /*in DSP_SYSREG*/ ++ device-irq = <0x4 1 24>; /*0xff 0xe40401e4 offset to clear DSP I]RQ, bit#, IRQ# */ ++ device-irq-host-offset = <0x8>; /*0xff 0xe40401e8 offset to trigger DSP IRQ*/ ++ device-irq-mode = <1>; /*level trigger*/ ++ host-irq = <0x4 1>; /*0xff 0xe4040194 offset to clear, bit# */ ++ host-irq-mode = <1>; /*level trigger */ ++ host-irq-offset = <0x8>; /* 0xff 0xe4040198 offset to trigger ,device side*/ ++ interrupt-parent = <&plic>; ++ interrupts = <156 IRQ_TYPE_LEVEL_HIGH>; ++ #cooling-cells = <2>; ++ firmware-name = "xrp0.elf"; ++ clocks = <&dspsys_clk_gate CLKGEN_DSP0_PCLK>, ++ <&dspsys_clk_gate CLKGEN_DSP0_CCLK>; ++ clock-names = "pclk", "cclk"; ++ status = "disabled"; ++ operating-points-v2 = <&dsp_opp_table>; ++ dynamic-power-coefficient = <1000>; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ranges = <0x00 0x00000000 0x00 0x00000000 0xe0180000 ++ 0x00 0xe01c0000 0x00 0xe01c0000 0x19E40000 ++ 0x00 0xfa000000 0xff 0xe0000000 0x00180000 ++ 0x00 0xe0180000 0xff 0xe0180000 0x00040000 ++ 0x00 0xffc00000 0xff 0xe4000000 0x00200000 >; /* VISYS_R */ ++ dsp@0 { ++ ranges = <0x00 0x00000000 0x00 0x00000000 0xe0180000 ++ 0x00 0xe01c0000 0x00 0xe01c0000 0x19E40000 ++ 0x00 0xfa000000 0xff 0xe0000000 0x00180000 ++ 0x00 0xe0180000 0xff 0xe0180000 0x00040000 ++ 0x00 0xffc00000 0xff 0xe4000000 0x00200000 >; /* VISYS_R */ ++ }; ++ }; ++ ++ xtensa_dsp1: dsp@1 { ++ compatible = "cdns,xrp-hw-simple"; ++ reg = <0xff 0xe40401a0 0x0 0x000010 /* host irq DSP->CPU INT Register */ ++ 0xff 0xe40401d0 0x0 0x000010 /* device irq CPU->DSP INT Register */ ++ 0xff 0xef050000 0x0 0x008000 /* DSP shared memory */ ++ 0xff 0xe01C0000 0x0 0x040000>;/* DSP TCM*/ ++ dsp = <1>; ++ dspsys-rst-bit = <8>; /*bit# in DSP_SYSREG*/ ++ dspsys-bus-offset = <0x90>; /*in DSP_SYSREG*/ ++ device-irq = <0x4 1 24>; /*0xff 0xe40401e4 offset to clear DSP I]RQ, bit#, IRQ# */ ++ device-irq-host-offset = <0x8>; /*0xff 0xe40401e8 offset to trigger DSP IRQ*/ ++ device-irq-mode = <1>; /*level trigger*/ ++ host-irq = <0x4 1>; /*0xff 0xe4040194 offset to clear, bit# */ ++ host-irq-mode = <1>; /*level trigger */ ++ host-irq-offset = <0x8>; /* 0xff 0xe4040198 offset to trigger ,device side*/ ++ interrupt-parent = <&plic>; ++ interrupts = <157 IRQ_TYPE_LEVEL_HIGH>; ++ firmware-name = "xrp1.elf"; ++ #cooling-cells = <2>; ++ clocks = <&dspsys_clk_gate CLKGEN_DSP1_PCLK>, ++ <&dspsys_clk_gate CLKGEN_DSP1_CCLK>; ++ clock-names = "pclk", "cclk"; ++ status = "disabled"; ++ operating-points-v2 = <&dsp_opp_table>; ++ dynamic-power-coefficient = <1000>; ++ #address-cells = <2>; ++ #size-cells = <1>; ++ ranges = <0x00 0x00000000 0x00 0x00000000 0xe0180000 ++ 0x00 0xe01c0000 0x00 0xe01c0000 0x19E40000 ++ 0x00 0xfa000000 0xff 0xe0000000 0x00180000 ++ 0x00 0xe0180000 0xff 0xe01C0000 0x00040000 ++ 0x00 0xffc00000 0xff 0xe4000000 0x00200000 >; /* VISYS_R */ ++ dsp@0 { ++ ranges = <0x00 0x00000000 0x00 0x00000000 0xe0180000 ++ 0x00 0xe01c0000 0x00 0xe01c0000 0x19E40000 ++ 0x00 0xfa000000 0xff 0xe0000000 0x00180000 ++ 0x00 0xe0180000 0xff 0xe01C0000 0x00040000 ++ 0x00 0xffc00000 0xff 0xe4000000 0x00200000 >; /* VISYS_R */ ++ }; ++ }; ++ ++ ++ dsp_opp_table: dsp_opp_table { ++ compatible = "operating-points-v2"; ++ qos-mid-minfreq = <500000000>; ++ ++ opp-125000000 { ++ opp-hz = /bits/ 64 <125000000>; ++ opp-microvolt = <800000>; ++ }; ++ ++ opp-250000000 { ++ opp-hz = /bits/ 64 <250000000>; ++ opp-microvolt = <800000>; ++ }; ++ ++ opp-500000000 { ++ opp-hz = /bits/ 64 <500000000>; ++ opp-microvolt = <800000>; ++ }; ++ opp-1000000000 { ++ opp-hz = /bits/ 64 <1000000000>; ++ opp-microvolt = <800000>; ++ opp-suspend; ++ }; ++ }; ++ ++ npu: vha@fffc800000 { ++ compatible = "img,ax3386-nna"; ++ reg = <0xff 0xfc800000 0x0 0x100000>; ++ interrupts = <113 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-names = "npuirq"; ++ #cooling-cells = <2>; ++ dynamic-power-coefficient = <1600>; ++ power-domains = <&pd TH1520_AON_NPU_PD>; ++ clocks = <&clk CLKGEN_TOP_APB_SX_PCLK>, ++ <&clk CLKGEN_TOP_AXI4S_ACLK>, ++ <&clk NPU_CCLK>, ++ <&clk GMAC_PLL_FOUTPOSTDIV>, ++ <&clk NPU_CCLK_OUT_DIV>; ++ clock-names = "pclk", "aclk", "cclk", ++ "gmac_pll_foutpostdiv", ++ "npu_cclk_out_div"; ++ operating-points-v2 = <&npu_opp_table>; ++ vha_clk_rate = <1000000000>; ++ ldo_vha-supply = <&npu>; ++ dma-mask = <0xff 0xffffffff>; ++ resets = <&rst TH1520_RESET_NPU>; ++ status = "disabled"; ++ }; ++ ++ npu_opp_table: opp-table { ++ compatible = "operating-points-v2"; ++ qos-mid-minfreq = <594000000>; ++ ++ opp-1000000000 { ++ opp-hz = /bits/ 64 <1000000000>; ++ opp-microvolt = <800000>; ++ }; ++ opp-792000000 { ++ opp-hz = /bits/ 64 <792000000>; ++ opp-microvolt = <800000>; ++ }; ++ opp-594000000 { ++ opp-hz = /bits/ 64 <594000000>; ++ opp-microvolt = <800000>; ++ }; ++ opp-475200000 { ++ opp-hz = /bits/ 64 <475200000>; ++ opp-microvolt = <800000>; ++ }; ++ opp-396000000 { ++ opp-hz = /bits/ 64 <396000000>; ++ opp-microvolt = <800000>; ++ }; ++ }; ++ ++ iso7816: iso7816-card@fff7f30000 { ++ compatible = "xuantie,th1520-iso7816-card"; ++ reg = <0xff 0xf7f30000 0x0 0x4000>; ++ interrupts = <69 IRQ_TYPE_LEVEL_HIGH>; ++ status = "disabled"; ++ }; ++ ++ eip_28: eip-28@ffff300000 { ++ compatible = "xlnx,sunrise-fpga-1.0", "safexcel-eip-28"; ++ reg = <0xff 0xff300000 0x0 0x40000>; ++ interrupts = <144 IRQ_TYPE_LEVEL_HIGH>, ++ <145 IRQ_TYPE_LEVEL_HIGH>, ++ <146 IRQ_TYPE_LEVEL_HIGH>, ++ <147 IRQ_TYPE_LEVEL_HIGH>; ++ clocks = <&miscsys_clk_gate CLKGEN_MISCSYS_EIP120SI_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_EIP120SII_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_EIP120SIII_CLK>, ++ <&miscsys_clk_gate CLKGEN_MISCSYS_EIP150B_HCLK>; ++ clock-names = "120si_clk","120sii_clk","120siii_clk","hclk"; ++ status = "disabled"; ++ }; ++ ++ hwspinlock: hwspinlock@ffefc10000 { ++ compatible = "th1520,hwspinlock"; ++ reg = <0xff 0xefc10000 0x0 0x10000>; ++ status = "disabled"; ++ }; + }; + }; +diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig +index ab86ec3b9eab..33159406ee58 100644 +--- a/arch/riscv/configs/defconfig ++++ b/arch/riscv/configs/defconfig +@@ -28,6 +28,7 @@ CONFIG_PROFILING=y + CONFIG_SOC_MICROCHIP_POLARFIRE=y + CONFIG_ARCH_RENESAS=y + CONFIG_ARCH_THEAD=y ++CONFIG_ARCH_XUANTIE=y + CONFIG_SOC_SIFIVE=y + CONFIG_SOC_STARFIVE=y + CONFIG_ARCH_SUNXI=y +@@ -36,6 +37,7 @@ CONFIG_SMP=y + CONFIG_HOTPLUG_CPU=y + CONFIG_PM=y + CONFIG_CPU_IDLE=y ++CONFIG_ACPI_CPPC_CPUFREQ=m + CONFIG_VIRTUALIZATION=y + CONFIG_KVM=m + CONFIG_ACPI=y +@@ -142,6 +144,13 @@ CONFIG_SPI_SUN6I=y + # CONFIG_PTP_1588_CLOCK is not set + CONFIG_GPIO_SIFIVE=y + CONFIG_WATCHDOG=y ++CONFIG_DW_WATCHDOG=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y ++#CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y ++CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y ++CONFIG_WATCHDOG_OPEN_TIMEOUT=32 + CONFIG_SUNXI_WATCHDOG=y + CONFIG_REGULATOR=y + CONFIG_REGULATOR_FIXED_VOLTAGE=y +@@ -168,21 +177,25 @@ CONFIG_MMC=y + CONFIG_MMC_SDHCI=y + CONFIG_MMC_SDHCI_PLTFM=y + CONFIG_MMC_SDHCI_CADENCE=y ++CONFIG_MMC_SDHCI_OF_DWCMSHC=y + CONFIG_MMC_SPI=y + CONFIG_MMC_SUNXI=y + CONFIG_RTC_CLASS=y + CONFIG_RTC_DRV_SUN6I=y + CONFIG_DMADEVICES=y + CONFIG_DMA_SUN6I=m ++CONFIG_DW_AXI_DMAC=y + CONFIG_VIRTIO_PCI=y + CONFIG_VIRTIO_BALLOON=y + CONFIG_VIRTIO_INPUT=y + CONFIG_VIRTIO_MMIO=y + CONFIG_SUN8I_DE2_CCU=m + CONFIG_SUN50I_IOMMU=y ++CONFIG_MAILBOX=y + CONFIG_RPMSG_CHAR=y + CONFIG_RPMSG_CTRL=y + CONFIG_RPMSG_VIRTIO=y ++CONFIG_RPMSG_TH1520=y + CONFIG_ARCH_R9A07G043=y + CONFIG_PHY_SUN4I_USB=m + CONFIG_LIBNVDIMM=y +@@ -238,5 +251,13 @@ CONFIG_DEBUG_SG=y + # CONFIG_RCU_TRACE is not set + CONFIG_RCU_EQS_DEBUG=y + # CONFIG_FTRACE is not set +-# CONFIG_RUNTIME_TESTING_MENU is not set + CONFIG_MEMTEST=y ++# Enable TEE ++CONFIG_TEE=y ++CONFIG_OPTEE=y ++# TH1520 CLOCK ++CONFIG_CLK_TH1520_FM=y ++# TH1520 MAILBOX ++CONFIG_MAILBOX=y ++# TH1520 PMIC_WDT ++CONFIG_TH1520_PMIC_WATCHDOG=y +diff --git a/arch/riscv/configs/k1_defconfig b/arch/riscv/configs/k1_defconfig +new file mode 100644 +index 000000000000..72df9883c25c +--- /dev/null ++++ b/arch/riscv/configs/k1_defconfig +@@ -0,0 +1,31 @@ ++# ++# Spacemit k1 SoC support ++# ++CONFIG_SOC_SPACEMIT=y ++CONFIG_SOC_SPACEMIT_K1=y ++CONFIG_SOC_SPACEMIT_K1X=y ++CONFIG_RISCV_ISA_ZICBOM=y ++CONFIG_SPACEMIT_K1X_CCU=y ++CONFIG_RESET_K1X_SPACEMIT=y ++CONFIG_PINCTRL_SPACEMIT_K1X=y ++CONFIG_GPIO_K1X=y ++CONFIG_SERIAL_SPACEMIT_K1X=y ++CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE=y ++CONFIG_SERIAL_DEV_BUS=y ++CONFIG_SPACEMIT_MEM_RANGE=y ++CONFIG_SPACEMIT_K1_DMA=y ++CONFIG_I2C_SPACEMIT_K1=y ++CONFIG_SPI_SPACEMIT_K1=y ++CONFIG_SPI_SPACEMIT_K1_QSPI=y ++CONFIG_PWM_PXA=m ++CONFIG_MFD_CORE=y ++CONFIG_MFD_SPACEMIT_P1=y ++CONFIG_REGULATOR_SPACEMIT_P1=y ++CONFIG_INPUT_SPACEMIT_P1_PWRKEY=m ++CONFIG_PINCTRL_SPACEMIT_P1=m ++CONFIG_RTC_DRV_SPACEMIT_P1=m ++CONFIG_SPACEMIT_P1_ADC=m ++CONFIG_MMC_SDHCI_OF_K1=y ++CONFIG_NET_VENDOR_SPACEMIT=y ++CONFIG_K1_EMAC=m ++ +diff --git a/arch/riscv/configs/openeuler_defconfig b/arch/riscv/configs/openeuler_defconfig +index 61f2b2f12589..a09cebedc1c8 100644 +--- a/arch/riscv/configs/openeuler_defconfig ++++ b/arch/riscv/configs/openeuler_defconfig +@@ -2,6 +2,7 @@ + # Automatically generated file; DO NOT EDIT. + # Linux/riscv 6.6.0 Kernel Configuration + # ++CONFIG_GCC_ASM_GOTO_OUTPUT_BROKEN=y + CONFIG_IRQ_WORK=y + CONFIG_BUILDTIME_TABLE_SORT=y + CONFIG_THREAD_INFO_IN_TASK=y +@@ -44,6 +45,7 @@ CONFIG_IRQ_DOMAIN_HIERARCHY=y + CONFIG_GENERIC_IRQ_IPI=y + CONFIG_GENERIC_IRQ_IPI_MUX=y + CONFIG_GENERIC_MSI_IRQ=y ++CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y + CONFIG_IRQ_FORCED_THREADING=y + CONFIG_SPARSE_IRQ=y + # CONFIG_GENERIC_IRQ_DEBUGFS is not set +@@ -90,9 +92,10 @@ CONFIG_BPF_JIT_DEFAULT_ON=y + # CONFIG_BPF_SCHED is not set + # end of BPF subsystem + +-CONFIG_PREEMPT_VOLUNTARY_BUILD=y +-# CONFIG_PREEMPT_NONE is not set +-CONFIG_PREEMPT_VOLUNTARY=y ++# CONFIG_BPF_RVI is not set ++CONFIG_PREEMPT_NONE_BUILD=y ++CONFIG_PREEMPT_NONE=y ++# CONFIG_PREEMPT_VOLUNTARY is not set + # CONFIG_PREEMPT is not set + CONFIG_PREEMPT_COUNT=y + # CONFIG_PREEMPT_DYNAMIC is not set +@@ -148,7 +151,7 @@ CONFIG_GENERIC_SCHED_CLOCK=y + + CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y + CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +-CONFIG_GCC11_NO_ARRAY_BOUNDS=y ++CONFIG_GCC10_NO_ARRAY_BOUNDS=y + CONFIG_CC_NO_ARRAY_BOUNDS=y + CONFIG_ARCH_SUPPORTS_INT128=y + CONFIG_NUMA_BALANCING=y +@@ -160,6 +163,7 @@ CONFIG_MEMCG=y + # CONFIG_MEMCG_V1_RECLAIM is not set + # CONFIG_MEMCG_MEMFS_INFO is not set + CONFIG_MEMCG_KMEM=y ++# CONFIG_MEMCG_KMEM_STOCK is not set + CONFIG_BLK_CGROUP=y + CONFIG_CGROUP_WRITEBACK=y + # CONFIG_CGROUP_V1_WRITEBACK is not set +@@ -169,7 +173,6 @@ CONFIG_FAIR_GROUP_SCHED=y + CONFIG_CFS_BANDWIDTH=y + CONFIG_RT_GROUP_SCHED=y + # CONFIG_QOS_SCHED_DYNAMIC_AFFINITY is not set +-CONFIG_SCHED_MM_CID=y + CONFIG_CGROUP_PIDS=y + CONFIG_CGROUP_RDMA=y + CONFIG_CGROUP_FREEZER=y +@@ -186,6 +189,8 @@ CONFIG_SOCK_CGROUP_DATA=y + # CONFIG_CGROUP_V1_KILL is not set + # CONFIG_CGROUP_V1_STAT is not set + # CONFIG_CGROUP_FILES is not set ++# CONFIG_CGROUP_IFS is not set ++# CONFIG_UCOUNTS_PERCPU_COUNTER is not set + CONFIG_NAMESPACES=y + CONFIG_UTS_NS=y + CONFIG_TIME_NS=y +@@ -244,6 +249,8 @@ CONFIG_KALLSYMS=y + # CONFIG_KALLSYMS_SELFTEST is not set + CONFIG_KALLSYMS_ALL=y + CONFIG_KALLSYMS_BASE_RELATIVE=y ++CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS=y ++CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y + CONFIG_KCMP=y + CONFIG_RSEQ=y + CONFIG_CACHESTAT_SYSCALL=y +@@ -262,8 +269,6 @@ CONFIG_DEBUG_PERF_USE_VMALLOC=y + CONFIG_SYSTEM_DATA_VERIFICATION=y + CONFIG_PROFILING=y + CONFIG_TRACEPOINTS=y +-CONFIG_KABI_RESERVE=y +-CONFIG_KABI_SIZE_ALIGN_CHECKS=y + + # + # Kexec and crash features +@@ -288,6 +293,7 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 + CONFIG_RISCV_SBI=y + CONFIG_MMU=y + CONFIG_PAGE_OFFSET=0xff60000000000000 ++CONFIG_ARCH_FORCE_MAX_ORDER=15 + CONFIG_ARCH_SPARSEMEM_ENABLE=y + CONFIG_ARCH_SELECT_MEMORY_MODEL=y + CONFIG_ARCH_SUPPORTS_UPROBES=y +@@ -298,6 +304,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y + CONFIG_GENERIC_CSUM=y + CONFIG_GENERIC_HWEIGHT=y + CONFIG_FIX_EARLYCON_MEM=y ++CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 + CONFIG_PGTABLE_LEVELS=5 + CONFIG_LOCKDEP_SUPPORT=y + CONFIG_RISCV_DMA_NONCOHERENT=y +@@ -306,15 +313,20 @@ CONFIG_RISCV_DMA_NONCOHERENT=y + # SoC selection + # + # CONFIG_SOC_MICROCHIP_POLARFIRE is not set +-# CONFIG_ARCH_RENESAS is not set ++CONFIG_ARCH_RENESAS=y + CONFIG_ARCH_SIFIVE=y + CONFIG_SOC_SIFIVE=y ++CONFIG_ARCH_SOPHGO=y + CONFIG_ARCH_STARFIVE=y + CONFIG_SOC_STARFIVE=y +-# CONFIG_ARCH_SUNXI is not set ++CONFIG_ARCH_SUNXI=y + CONFIG_ARCH_THEAD=y ++CONFIG_ARCH_XUANTIE=y + CONFIG_ARCH_VIRT=y + CONFIG_SOC_VIRT=y ++CONFIG_SOC_SPACEMIT=y ++CONFIG_SOC_SPACEMIT_K1=y ++CONFIG_SOC_SPACEMIT_K1X=y + # end of SoC selection + + # +@@ -330,6 +342,18 @@ CONFIG_ERRATA_THEAD_CMO=y + CONFIG_ERRATA_THEAD_PMU=y + # end of CPU errata selection + ++# ++# Vendor extensions ++# ++CONFIG_RISCV_ISA_VENDOR_EXT=y ++ ++# ++# Andes ++# ++CONFIG_RISCV_ISA_VENDOR_EXT_ANDES=y ++# end of Andes ++# end of Vendor extensions ++ + # + # Platform type + # +@@ -339,7 +363,7 @@ CONFIG_ARCH_RV64I=y + CONFIG_CMODEL_MEDANY=y + CONFIG_MODULE_SECTIONS=y + CONFIG_SMP=y +-# CONFIG_SCHED_MC is not set ++CONFIG_SCHED_MC=y + CONFIG_NR_CPUS=512 + CONFIG_HOTPLUG_CPU=y + CONFIG_TUNE_GENERIC=y +@@ -351,11 +375,14 @@ CONFIG_RISCV_ISA_C=y + CONFIG_RISCV_ISA_SVNAPOT=y + CONFIG_RISCV_ISA_SVPBMT=y + CONFIG_TOOLCHAIN_HAS_V=y +-CONFIG_RISCV_ISA_V=y +-CONFIG_RISCV_ISA_V_DEFAULT_ENABLE=y ++# CONFIG_RISCV_ISA_V is not set ++CONFIG_RISCV_ISA_ZAWRS=y ++CONFIG_TOOLCHAIN_HAS_ZBB=y ++CONFIG_RISCV_ISA_ZBB=y ++CONFIG_TOOLCHAIN_HAS_ZBC=y ++CONFIG_RISCV_ISA_ZBC=y + CONFIG_RISCV_ISA_ZICBOM=y + CONFIG_RISCV_ISA_ZICBOZ=y +-CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y + CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI=y + CONFIG_FPU=y + CONFIG_IRQ_STACKS=y +@@ -390,6 +417,7 @@ CONFIG_COMPAT=y + CONFIG_CMDLINE="" + CONFIG_EFI_STUB=y + CONFIG_EFI=y ++CONFIG_DMI=y + CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y + CONFIG_STACKPROTECTOR_PER_TASK=y + CONFIG_RISCV_ISA_FALLBACK=y +@@ -420,7 +448,7 @@ CONFIG_PM_GENERIC_DOMAINS=y + CONFIG_PM_GENERIC_DOMAINS_SLEEP=y + CONFIG_PM_GENERIC_DOMAINS_OF=y + CONFIG_CPU_PM=y +-# CONFIG_ENERGY_MODEL is not set ++CONFIG_ENERGY_MODEL=y + CONFIG_ARCH_SUSPEND_POSSIBLE=y + # end of Power management options + +@@ -436,6 +464,7 @@ CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y + # CONFIG_CPU_IDLE_GOV_LADDER is not set + CONFIG_CPU_IDLE_GOV_MENU=y + CONFIG_CPU_IDLE_GOV_TEO=y ++# CONFIG_CPU_IDLE_GOV_HALTPOLL is not set + CONFIG_DT_IDLE_STATES=y + CONFIG_DT_IDLE_GENPD=y + +@@ -471,6 +500,8 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + # + CONFIG_CPUFREQ_DT=y + CONFIG_CPUFREQ_DT_PLATDEV=y ++CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ=y ++# CONFIG_ACPI_CPPC_CPUFREQ is not set + # end of CPU Frequency scaling + # end of CPU Power Management + +@@ -485,9 +516,52 @@ CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y + CONFIG_KVM_XFER_TO_GUEST_WORK=y + CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y + CONFIG_VIRTUALIZATION=y +-CONFIG_KVM=m ++CONFIG_KVM=y + CONFIG_ARCH_SUPPORTS_ACPI=y +-# CONFIG_ACPI is not set ++CONFIG_ACPI=y ++CONFIG_ACPI_GENERIC_GSI=y ++# CONFIG_ACPI_DEBUGGER is not set ++CONFIG_ACPI_SPCR_TABLE=y ++# CONFIG_ACPI_EC_DEBUGFS is not set ++CONFIG_ACPI_AC=y ++CONFIG_ACPI_BATTERY=y ++CONFIG_ACPI_BUTTON=y ++CONFIG_ACPI_VIDEO=m ++CONFIG_ACPI_FAN=y ++# CONFIG_ACPI_TAD is not set ++# CONFIG_ACPI_DOCK is not set ++CONFIG_ACPI_PROCESSOR_IDLE=y ++CONFIG_ACPI_MCFG=y ++CONFIG_ACPI_PROCESSOR=y ++# CONFIG_ACPI_IPMI is not set ++CONFIG_ACPI_THERMAL=y ++# CONFIG_ACPI_DEBUG is not set ++# CONFIG_ACPI_PCI_SLOT is not set ++# CONFIG_ACPI_CONTAINER is not set ++# CONFIG_ACPI_HED is not set ++# CONFIG_ACPI_CUSTOM_METHOD is not set ++CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y ++# CONFIG_ACPI_NFIT is not set ++CONFIG_ACPI_NUMA=y ++# CONFIG_ACPI_HMAT is not set ++# CONFIG_ACPI_CONFIGFS is not set ++# CONFIG_ACPI_PFRUT is not set ++CONFIG_ACPI_PPTT=y ++# CONFIG_ACPI_FFH is not set ++# CONFIG_PMIC_OPREGION is not set ++CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y ++ ++# ++# Enable Livepatch ++# ++CONFIG_LIVEPATCH=y ++CONFIG_LIVEPATCH_WO_FTRACE=y ++CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y ++CONFIG_LIVEPATCH_STACK=y ++CONFIG_LIVEPATCH_RESTRICT_KPROBE=y ++# end of Enable Livepatch ++ ++CONFIG_CPU_MITIGATIONS=y + + # + # General architecture-dependent options +@@ -524,6 +598,8 @@ CONFIG_HAVE_PERF_REGS=y + CONFIG_HAVE_PERF_USER_STACK_DUMP=y + CONFIG_HAVE_ARCH_JUMP_LABEL=y + CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y ++CONFIG_MMU_GATHER_TABLE_FREE=y ++CONFIG_MMU_GATHER_RCU_TABLE_FREE=y + CONFIG_MMU_LAZY_TLB_REFCOUNT=y + CONFIG_HAVE_ARCH_SECCOMP=y + CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +@@ -565,7 +641,7 @@ CONFIG_VMAP_STACK=y + CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y + CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y + CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +-CONFIG_STRICT_KERNEL_RWX=y ++# CONFIG_STRICT_KERNEL_RWX is not set + CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y + CONFIG_STRICT_MODULE_RWX=y + CONFIG_ARCH_USE_MEMREMAP_PROT=y +@@ -576,7 +652,6 @@ CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y + CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y + CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +-CONFIG_DYNAMIC_SIGFRAME=y + + # + # GCOV-based kernel profiling +@@ -585,6 +660,11 @@ CONFIG_DYNAMIC_SIGFRAME=y + CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y + # end of GCOV-based kernel profiling + ++# ++# Profile Guided Optimization (PGO) ++# ++# end of Profile Guided Optimization (PGO) ++ + CONFIG_HAVE_GCC_PLUGINS=y + CONFIG_FUNCTION_ALIGNMENT=0 + # end of General architecture-dependent options +@@ -646,6 +726,7 @@ CONFIG_BLK_INLINE_ENCRYPTION=y + CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y + # CONFIG_BLK_DEV_DETECT_WRITING_PART0 is not set + # CONFIG_BLK_DEV_WRITE_MOUNTED_DUMP is not set ++CONFIG_BLK_IO_HUNG_TASK_CHECK=y + # CONFIG_BLK_IO_HIERARCHY_STATS is not set + + # +@@ -706,6 +787,8 @@ CONFIG_QUEUED_RWLOCKS=y + CONFIG_ARCH_HAS_MMIOWB=y + CONFIG_MMIOWB=y + CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y ++CONFIG_ARCH_HAS_PREPARE_SYNC_CORE_CMD=y ++CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y + CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y + # CONFIG_PID_MAX_PER_NAMESPACE is not set + CONFIG_FREEZER=y +@@ -771,6 +854,8 @@ CONFIG_SPARSEMEM_EXTREME=y + CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y + CONFIG_SPARSEMEM_VMEMMAP=y + CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y ++CONFIG_HAVE_FAST_GUP=y ++CONFIG_ARCH_KEEP_MEMBLOCK=y + CONFIG_MEMORY_ISOLATION=y + CONFIG_EXCLUSIVE_SYSTEM_RAM=y + CONFIG_SPLIT_PTLOCK_CPUS=4 +@@ -795,13 +880,14 @@ CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y + # CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set + CONFIG_THP_SWAP=y + # CONFIG_READ_ONLY_THP_FOR_FS is not set ++CONFIG_PGTABLE_HAS_HUGE_LEAVES=y + CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y + CONFIG_USE_PERCPU_NUMA_NODE_ID=y + CONFIG_CMA=y + # CONFIG_CMA_DEBUG is not set +-# CONFIG_CMA_DEBUGFS is not set +-CONFIG_CMA_SYSFS=y +-CONFIG_CMA_AREAS=19 ++CONFIG_CMA_DEBUGFS=y ++# CONFIG_CMA_SYSFS is not set ++CONFIG_CMA_AREAS=7 + CONFIG_GENERIC_EARLY_IOREMAP=y + # CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set + CONFIG_PAGE_IDLE_FLAG=y +@@ -833,6 +919,8 @@ CONFIG_LOCK_MM_AND_FIND_VMA=y + # + # CONFIG_DAMON is not set + # end of Data Access Monitoring ++ ++# CONFIG_THP_CONTROL is not set + # end of Memory Management options + + CONFIG_NET=y +@@ -1346,10 +1434,10 @@ CONFIG_L2TP_DEBUGFS=m + CONFIG_L2TP_V3=y + CONFIG_L2TP_IP=m + CONFIG_L2TP_ETH=m +-CONFIG_STP=m ++CONFIG_STP=y + CONFIG_GARP=m + CONFIG_MRP=m +-CONFIG_BRIDGE=m ++CONFIG_BRIDGE=y + CONFIG_BRIDGE_IGMP_SNOOPING=y + CONFIG_BRIDGE_VLAN_FILTERING=y + # CONFIG_BRIDGE_MRP is not set +@@ -1358,7 +1446,7 @@ CONFIG_BRIDGE_VLAN_FILTERING=y + CONFIG_VLAN_8021Q=m + CONFIG_VLAN_8021Q_GVRP=y + CONFIG_VLAN_8021Q_MVRP=y +-CONFIG_LLC=m ++CONFIG_LLC=y + # CONFIG_LLC2 is not set + # CONFIG_ATALK is not set + # CONFIG_X25 is not set +@@ -1513,7 +1601,54 @@ CONFIG_CAN_BCM=m + CONFIG_CAN_GW=m + # CONFIG_CAN_J1939 is not set + # CONFIG_CAN_ISOTP is not set +-# CONFIG_BT is not set ++CONFIG_BT=y ++CONFIG_BT_BREDR=y ++CONFIG_BT_RFCOMM=y ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=y ++# CONFIG_BT_BNEP_MC_FILTER is not set ++# CONFIG_BT_BNEP_PROTO_FILTER is not set ++CONFIG_BT_HIDP=y ++CONFIG_BT_LE=y ++CONFIG_BT_LE_L2CAP_ECRED=y ++# CONFIG_BT_6LOWPAN is not set ++# CONFIG_BT_LEDS is not set ++# CONFIG_BT_MSFTEXT is not set ++# CONFIG_BT_AOSPEXT is not set ++CONFIG_BT_DEBUGFS=y ++# CONFIG_BT_SELFTEST is not set ++ ++# ++# Bluetooth device drivers ++# ++# CONFIG_BT_HCIBTUSB is not set ++# CONFIG_BT_HCIBTSDIO is not set ++CONFIG_BT_HCIUART=y ++CONFIG_BT_HCIUART_SERDEV=y ++CONFIG_BT_HCIUART_H4=y ++# CONFIG_BT_HCIUART_NOKIA is not set ++# CONFIG_BT_HCIUART_BCSP is not set ++# CONFIG_BT_HCIUART_ATH3K is not set ++# CONFIG_BT_HCIUART_LL is not set ++# CONFIG_BT_HCIUART_3WIRE is not set ++# CONFIG_BT_HCIUART_INTEL is not set ++# CONFIG_BT_HCIUART_BCM is not set ++# CONFIG_BT_HCIUART_RTL is not set ++# CONFIG_BT_HCIUART_QCA is not set ++# CONFIG_BT_HCIUART_AG6XX is not set ++# CONFIG_BT_HCIUART_MRVL is not set ++# CONFIG_BT_HCIBCM203X is not set ++# CONFIG_BT_HCIBCM4377 is not set ++# CONFIG_BT_HCIBPA10X is not set ++# CONFIG_BT_HCIBFUSB is not set ++# CONFIG_BT_HCIVHCI is not set ++# CONFIG_BT_MRVL is not set ++# CONFIG_BT_MTKSDIO is not set ++# CONFIG_BT_MTKUART is not set ++# CONFIG_BT_VIRTIO is not set ++# CONFIG_BT_NXPUART is not set ++# end of Bluetooth device drivers ++ + # CONFIG_AF_RXRPC is not set + # CONFIG_AF_KCM is not set + CONFIG_STREAM_PARSER=y +@@ -1522,7 +1657,7 @@ CONFIG_FIB_RULES=y + CONFIG_WIRELESS=y + CONFIG_WEXT_CORE=y + CONFIG_WEXT_PROC=y +-CONFIG_CFG80211=m ++CONFIG_CFG80211=y + # CONFIG_NL80211_TESTMODE is not set + # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set + # CONFIG_CFG80211_CERTIFICATION_ONUS is not set +@@ -1532,7 +1667,7 @@ CONFIG_CFG80211_DEFAULT_PS=y + # CONFIG_CFG80211_DEBUGFS is not set + CONFIG_CFG80211_CRDA_SUPPORT=y + CONFIG_CFG80211_WEXT=y +-CONFIG_MAC80211=m ++CONFIG_MAC80211=y + CONFIG_MAC80211_HAS_RC=y + CONFIG_MAC80211_RC_MINSTREL=y + CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +@@ -1543,7 +1678,7 @@ CONFIG_MAC80211_DEBUGFS=y + # CONFIG_MAC80211_MESSAGE_TRACING is not set + # CONFIG_MAC80211_DEBUG_MENU is not set + CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +-CONFIG_RFKILL=m ++CONFIG_RFKILL=y + CONFIG_RFKILL_LEDS=y + CONFIG_RFKILL_INPUT=y + CONFIG_RFKILL_GPIO=m +@@ -1573,6 +1708,7 @@ CONFIG_FAILOVER=y + CONFIG_ETHTOOL_NETLINK=y + CONFIG_NETACC_BPF=y + CONFIG_NETACC_TERRACE=y ++# CONFIG_ETH_CAQM is not set + + # + # Device Drivers +@@ -1595,6 +1731,7 @@ CONFIG_PCIEASPM_DEFAULT=y + CONFIG_PCIE_PME=y + CONFIG_PCIE_DPC=y + # CONFIG_PCIE_PTM is not set ++# CONFIG_PCIE_EDR is not set + CONFIG_PCI_MSI=y + CONFIG_PCI_QUIRKS=y + # CONFIG_PCI_DEBUG is not set +@@ -1606,6 +1743,7 @@ CONFIG_PCI_ECAM=y + CONFIG_PCI_IOV=y + CONFIG_PCI_PRI=y + CONFIG_PCI_PASID=y ++CONFIG_PCI_LABEL=y + # CONFIG_PCI_DYNAMIC_OF_NODES is not set + # CONFIG_PCIE_BUS_TUNE_OFF is not set + CONFIG_PCIE_BUS_DEFAULT=y +@@ -1615,6 +1753,7 @@ CONFIG_PCIE_BUS_DEFAULT=y + CONFIG_VGA_ARB=y + CONFIG_VGA_ARB_MAX_GPUS=64 + CONFIG_HOTPLUG_PCI=y ++# CONFIG_HOTPLUG_PCI_ACPI is not set + # CONFIG_HOTPLUG_PCI_CPCI is not set + CONFIG_HOTPLUG_PCI_SHPC=y + +@@ -1625,6 +1764,8 @@ CONFIG_HOTPLUG_PCI_SHPC=y + CONFIG_PCI_HOST_COMMON=y + CONFIG_PCI_HOST_GENERIC=y + CONFIG_PCIE_MICROCHIP_HOST=y ++# CONFIG_PCIE_RCAR_HOST is not set ++# CONFIG_PCIE_RCAR_EP is not set + CONFIG_PCIE_XILINX=y + + # +@@ -1636,6 +1777,7 @@ CONFIG_PCIE_CADENCE_EP=y + CONFIG_PCIE_CADENCE_PLAT=y + CONFIG_PCIE_CADENCE_PLAT_HOST=y + CONFIG_PCIE_CADENCE_PLAT_EP=y ++CONFIG_PCIE_CADENCE_SOPHGO=y + CONFIG_PCI_J721E=y + CONFIG_PCI_J721E_HOST=y + # CONFIG_PCI_J721E_EP is not set +@@ -1647,6 +1789,7 @@ CONFIG_PCI_J721E_HOST=y + CONFIG_PCIE_DW=y + CONFIG_PCIE_DW_HOST=y + CONFIG_PCIE_DW_EP=y ++CONFIG_PCIE_DW_SOPHGO=y + # CONFIG_PCI_MESON is not set + CONFIG_PCIE_DW_PLAT=y + CONFIG_PCIE_DW_PLAT_HOST=y +@@ -1697,7 +1840,9 @@ CONFIG_FW_LOADER=y + CONFIG_FW_LOADER_DEBUG=y + CONFIG_EXTRA_FIRMWARE="" + # CONFIG_FW_LOADER_USER_HELPER is not set +-# CONFIG_FW_LOADER_COMPRESS is not set ++CONFIG_FW_LOADER_COMPRESS=y ++# CONFIG_FW_LOADER_COMPRESS_XZ is not set ++CONFIG_FW_LOADER_COMPRESS_ZSTD=y + CONFIG_FW_CACHE=y + # CONFIG_FW_UPLOAD is not set + # end of Firmware loader +@@ -1709,10 +1854,12 @@ CONFIG_WANT_DEV_COREDUMP=y + # CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set + # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set + CONFIG_GENERIC_CPU_DEVICES=y ++CONFIG_SOC_BUS=y + CONFIG_REGMAP=y +-CONFIG_REGMAP_I2C=m +-CONFIG_REGMAP_SPI=m ++CONFIG_REGMAP_I2C=y ++CONFIG_REGMAP_SPI=y + CONFIG_REGMAP_MMIO=y ++CONFIG_REGMAP_IRQ=y + CONFIG_DMA_SHARED_BUFFER=y + # CONFIG_DMA_FENCE_TRACE is not set + CONFIG_GENERIC_ARCH_TOPOLOGY=y +@@ -1724,6 +1871,8 @@ CONFIG_GENERIC_ARCH_NUMA=y + # Bus devices + # + # CONFIG_MOXTET is not set ++# CONFIG_SUN50I_DE2_BUS is not set ++# CONFIG_SUNXI_RSB is not set + # CONFIG_MHI_BUS is not set + # CONFIG_MHI_BUS_EP is not set + # end of Bus devices +@@ -1747,6 +1896,10 @@ CONFIG_PROC_EVENTS=y + # end of ARM System Control and Management Interface Protocol + + # CONFIG_FIRMWARE_MEMMAP is not set ++CONFIG_DMIID=y ++# CONFIG_DMI_SYSFS is not set ++# CONFIG_ISCSI_IBFT is not set ++# CONFIG_FW_CFG_SYSFS is not set + CONFIG_SYSFB=y + CONFIG_SYSFB_SIMPLEFB=y + # CONFIG_GOOGLE_FIRMWARE is not set +@@ -1767,6 +1920,7 @@ CONFIG_EFI_GENERIC_STUB=y + # CONFIG_RESET_ATTACK_MITIGATION is not set + # CONFIG_EFI_DISABLE_PCI_DMA is not set + CONFIG_EFI_EARLYCON=y ++# CONFIG_EFI_CUSTOM_SSDT_OVERLAYS is not set + # CONFIG_EFI_DISABLE_RUNTIME is not set + # CONFIG_EFI_COCO_SECRET is not set + # end of EFI (Extensible Firmware Interface) Support +@@ -1775,11 +1929,14 @@ CONFIG_EFI_EARLYCON=y + # Tegra firmware driver + # + # end of Tegra firmware driver ++ ++CONFIG_TH1520_AON=y ++CONFIG_TH1520_AON_PD=y + # end of Firmware Drivers + + # CONFIG_GNSS is not set +-CONFIG_MTD=m +-# CONFIG_MTD_TESTS is not set ++CONFIG_MTD=y ++CONFIG_MTD_TESTS=m + + # + # Partition parsers +@@ -1793,9 +1950,8 @@ CONFIG_MTD_OF_PARTS=m + # + # User Modules And Translation Layers + # +-CONFIG_MTD_BLKDEVS=m +-CONFIG_MTD_BLOCK=m +-# CONFIG_MTD_BLOCK_RO is not set ++CONFIG_MTD_BLKDEVS=y ++CONFIG_MTD_BLOCK=y + + # + # Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +@@ -1862,8 +2018,8 @@ CONFIG_MTD_PHYSMAP_OF=y + # CONFIG_MTD_MCHP23K256 is not set + # CONFIG_MTD_MCHP48L640 is not set + # CONFIG_MTD_SST25L is not set +-# CONFIG_MTD_SLRAM is not set +-# CONFIG_MTD_PHRAM is not set ++CONFIG_MTD_SLRAM=m ++CONFIG_MTD_PHRAM=m + # CONFIG_MTD_MTDRAM is not set + CONFIG_MTD_BLOCK2MTD=m + +@@ -1876,13 +2032,15 @@ CONFIG_MTD_BLOCK2MTD=m + # + # NAND + # ++CONFIG_MTD_NAND_CORE=y + # CONFIG_MTD_ONENAND is not set + # CONFIG_MTD_RAW_NAND is not set +-# CONFIG_MTD_SPI_NAND is not set ++CONFIG_MTD_SPI_NAND=y + + # + # ECC engine support + # ++CONFIG_MTD_NAND_ECC=y + # CONFIG_MTD_NAND_ECC_SW_HAMMING is not set + # CONFIG_MTD_NAND_ECC_SW_BCH is not set + # CONFIG_MTD_NAND_ECC_MXIC is not set +@@ -1895,12 +2053,13 @@ CONFIG_MTD_BLOCK2MTD=m + # CONFIG_MTD_LPDDR is not set + # end of LPDDR & LPDDR2 PCM memory drivers + +-CONFIG_MTD_SPI_NOR=m ++CONFIG_MTD_SPI_NOR=y + CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y + # CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set + CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y + # CONFIG_MTD_SPI_NOR_SWP_KEEP is not set +-CONFIG_MTD_UBI=m ++CONFIG_SPI_SOPHGO_SPIFMC=m ++CONFIG_MTD_UBI=y + CONFIG_MTD_UBI_WL_THRESHOLD=4096 + CONFIG_MTD_UBI_BEB_LIMIT=20 + # CONFIG_MTD_UBI_FASTMAP is not set +@@ -1921,6 +2080,13 @@ CONFIG_OF_RESOLVE=y + CONFIG_OF_OVERLAY=y + CONFIG_OF_NUMA=y + # CONFIG_PARPORT is not set ++CONFIG_PNP=y ++CONFIG_PNP_DEBUG_MESSAGES=y ++ ++# ++# Protocols ++# ++CONFIG_PNPACPI=y + CONFIG_BLK_DEV=y + CONFIG_BLK_DEV_NULL_BLK=m + CONFIG_CDROM=y +@@ -1939,7 +2105,7 @@ CONFIG_BLK_DEV_LOOP=y + CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 + CONFIG_BLK_DEV_DRBD=m + # CONFIG_DRBD_FAULT_INJECTION is not set +-CONFIG_BLK_DEV_NBD=m ++CONFIG_BLK_DEV_NBD=y + CONFIG_BLK_DEV_RAM=m + CONFIG_BLK_DEV_RAM_COUNT=16 + CONFIG_BLK_DEV_RAM_SIZE=16384 +@@ -2008,7 +2174,7 @@ CONFIG_MISC_RTSX=m + # + # EEPROM support + # +-# CONFIG_EEPROM_AT24 is not set ++CONFIG_EEPROM_AT24=y + # CONFIG_EEPROM_AT25 is not set + CONFIG_EEPROM_LEGACY=m + CONFIG_EEPROM_MAX6875=m +@@ -2028,7 +2194,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y + # CONFIG_TI_ST is not set + # end of Texas Instruments shared transport line discipline + +-# CONFIG_SENSORS_LIS3_SPI is not set + CONFIG_SENSORS_LIS3_I2C=m + CONFIG_ALTERA_STAPL=m + # CONFIG_GENWQE is not set +@@ -2109,7 +2274,9 @@ CONFIG_SCSI_MPT3SAS=m + CONFIG_SCSI_MPT2SAS_MAX_SGE=128 + CONFIG_SCSI_MPT3SAS_MAX_SGE=128 + CONFIG_SCSI_MPT2SAS=m ++# CONFIG_SCSI_PS3STOR is not set + # CONFIG_SCSI_MPI3MR is not set ++# CONFIG_SCSI_LEAPIORAID is not set + CONFIG_SCSI_SMARTPQI=m + # CONFIG_SCSI_HPTIOP is not set + # CONFIG_SCSI_BUSLOGIC is not set +@@ -2156,8 +2323,11 @@ CONFIG_SCSI_DH_ALUA=y + + CONFIG_ATA=y + CONFIG_SATA_HOST=y ++CONFIG_PATA_TIMINGS=y + CONFIG_ATA_VERBOSE_ERROR=y + CONFIG_ATA_FORCE=y ++CONFIG_ATA_ACPI=y ++# CONFIG_SATA_ZPODD is not set + CONFIG_SATA_PMP=y + + # +@@ -2168,6 +2338,7 @@ CONFIG_SATA_MOBILE_LPM_POLICY=0 + CONFIG_SATA_AHCI_PLATFORM=y + # CONFIG_AHCI_DWC is not set + # CONFIG_AHCI_CEVA is not set ++# CONFIG_AHCI_SUNXI is not set + # CONFIG_SATA_INIC162X is not set + # CONFIG_SATA_ACARD_AHCI is not set + # CONFIG_SATA_SIL24 is not set +@@ -2189,6 +2360,7 @@ CONFIG_ATA_PIIX=m + # CONFIG_SATA_MV is not set + # CONFIG_SATA_NV is not set + # CONFIG_SATA_PROMISE is not set ++# CONFIG_SATA_RCAR is not set + # CONFIG_SATA_SIL is not set + # CONFIG_SATA_SIS is not set + # CONFIG_SATA_SVW is not set +@@ -2247,6 +2419,7 @@ CONFIG_ATA_PIIX=m + # + # Generic fallback / legacy drivers + # ++# CONFIG_PATA_ACPI is not set + CONFIG_ATA_GENERIC=m + # CONFIG_PATA_LEGACY is not set + CONFIG_MD=y +@@ -2265,14 +2438,14 @@ CONFIG_BCACHE=m + # CONFIG_BCACHE_CLOSURES_DEBUG is not set + # CONFIG_BCACHE_ASYNC_REGISTRATION is not set + CONFIG_BLK_DEV_DM_BUILTIN=y +-CONFIG_BLK_DEV_DM=m ++CONFIG_BLK_DEV_DM=y + CONFIG_DM_DEBUG=y + CONFIG_DM_BUFIO=m + # CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set + CONFIG_DM_BIO_PRISON=m + CONFIG_DM_PERSISTENT_DATA=m + # CONFIG_DM_UNSTRIPED is not set +-CONFIG_DM_CRYPT=m ++CONFIG_DM_CRYPT=y + CONFIG_DM_SNAPSHOT=m + CONFIG_DM_THIN_PROVISIONING=m + CONFIG_DM_CACHE=m +@@ -2292,6 +2465,7 @@ CONFIG_DM_MULTIPATH_ST=m + # CONFIG_DM_MULTIPATH_IOA is not set + CONFIG_DM_DELAY=m + # CONFIG_DM_DUST is not set ++# CONFIG_DM_INIT is not set + CONFIG_DM_UEVENT=y + CONFIG_DM_FLAKEY=m + CONFIG_DM_VERITY=m +@@ -2322,7 +2496,7 @@ CONFIG_ISCSI_TARGET_CXGB4=m + # end of IEEE 1394 (FireWire) support + + CONFIG_NETDEVICES=y +-CONFIG_MII=m ++CONFIG_MII=y + CONFIG_NET_CORE=y + CONFIG_BONDING=m + CONFIG_DUMMY=m +@@ -2366,10 +2540,13 @@ CONFIG_VSOCKMON=m + CONFIG_ETHERNET=y + CONFIG_MDIO=m + # CONFIG_NET_VENDOR_3COM is not set ++# CONFIG_NET_VENDOR_3SNIC is not set + # CONFIG_NET_VENDOR_ADAPTEC is not set + # CONFIG_NET_VENDOR_AGERE is not set + CONFIG_NET_VENDOR_ALACRITECH=y + # CONFIG_SLICOSS is not set ++CONFIG_NET_VENDOR_ALLWINNER=y ++# CONFIG_SUN4I_EMAC is not set + # CONFIG_NET_VENDOR_ALTEON is not set + # CONFIG_ALTERA_TSE is not set + CONFIG_NET_VENDOR_AMAZON=y +@@ -2406,14 +2583,13 @@ CONFIG_BNXT_DCB=y + # CONFIG_BNXT_HWMON is not set + CONFIG_NET_VENDOR_CADENCE=y + CONFIG_MACB=y +-CONFIG_MACB_USE_HWSTAMP=y + # CONFIG_MACB_PCI is not set + CONFIG_NET_VENDOR_CAVIUM=y + CONFIG_THUNDER_NIC_PF=m + CONFIG_THUNDER_NIC_VF=m + CONFIG_THUNDER_NIC_BGX=m + CONFIG_THUNDER_NIC_RGX=m +-CONFIG_CAVIUM_PTP=y ++# CONFIG_CAVIUM_PTP is not set + CONFIG_LIQUIDIO_CORE=m + CONFIG_LIQUIDIO=m + CONFIG_LIQUIDIO_VF=m +@@ -2441,7 +2617,10 @@ CONFIG_NET_VENDOR_ENGLEDER=y + CONFIG_NET_VENDOR_FUNGIBLE=y + # CONFIG_FUN_ETH is not set + CONFIG_NET_VENDOR_GOOGLE=y ++CONFIG_NET_VENDOR_HISILICON=y ++# CONFIG_HIBMCGE is not set + CONFIG_NET_VENDOR_HUAWEI=y ++# CONFIG_BMA is not set + # CONFIG_NET_VENDOR_I825XX is not set + CONFIG_NET_VENDOR_INTEL=y + # CONFIG_E100 is not set +@@ -2464,8 +2643,13 @@ CONFIG_ICE=m + CONFIG_ICE_SWITCHDEV=y + CONFIG_FM10K=m + # CONFIG_IGC is not set ++CONFIG_NET_VENDOR_LINKDATA=y + CONFIG_NET_VENDOR_MUCSE=y + # CONFIG_MXGBE is not set ++# CONFIG_MXGBEVF is not set ++# CONFIG_MXGBEM is not set ++# CONFIG_MGBE is not set ++# CONFIG_MGBEVF is not set + # CONFIG_JME is not set + CONFIG_NET_VENDOR_ADI=y + # CONFIG_ADIN1110 is not set +@@ -2506,6 +2690,8 @@ CONFIG_MLXFW=m + CONFIG_NET_VENDOR_MICROSEMI=y + # CONFIG_MSCC_OCELOT_SWITCH is not set + CONFIG_NET_VENDOR_MICROSOFT=y ++CONFIG_NET_VENDOR_MOTORCOMM=y ++# CONFIG_YT6801 is not set + CONFIG_NET_VENDOR_MYRI=y + # CONFIG_MYRI10GE is not set + # CONFIG_FEALNX is not set +@@ -2539,6 +2725,7 @@ CONFIG_QED_OOO=y + # CONFIG_NET_VENDOR_BROCADE is not set + CONFIG_NET_VENDOR_QUALCOMM=y + # CONFIG_QCA7000_SPI is not set ++# CONFIG_QCA7000_UART is not set + CONFIG_QCOM_EMAC=m + # CONFIG_RMNET is not set + # CONFIG_NET_VENDOR_RDC is not set +@@ -2564,13 +2751,24 @@ CONFIG_SFC_MCDI_MON=y + CONFIG_SFC_SRIOV=y + CONFIG_SFC_MCDI_LOGGING=y + # CONFIG_SFC_FALCON is not set +-# CONFIG_SFC_SIENA is not set + CONFIG_NET_VENDOR_SMSC=y + CONFIG_EPIC100=m + CONFIG_SMSC911X=m + CONFIG_SMSC9420=m + # CONFIG_NET_VENDOR_SOCIONEXT is not set +-# CONFIG_NET_VENDOR_STMICRO is not set ++CONFIG_NET_VENDOR_STMICRO=y ++CONFIG_STMMAC_ETH=y ++# CONFIG_STMMAC_SELFTESTS is not set ++CONFIG_STMMAC_PLATFORM=y ++# CONFIG_DWMAC_DWC_QOS_ETH is not set ++CONFIG_DWMAC_GENERIC=y ++CONFIG_DWMAC_STARFIVE=m ++CONFIG_DWMAC_SUNXI=y ++CONFIG_DWMAC_SUN8I=y ++CONFIG_DWMAC_XUANTIE=y ++CONFIG_DWMAC_SOPHGO=y ++# CONFIG_DWMAC_INTEL_PLAT is not set ++# CONFIG_STMMAC_PCI is not set + # CONFIG_NET_VENDOR_SUN is not set + # CONFIG_NET_VENDOR_SYNOPSYS is not set + # CONFIG_NET_VENDOR_TEHUTI is not set +@@ -2584,8 +2782,14 @@ CONFIG_NGBE=m + CONFIG_TXGBE=m + # CONFIG_NET_VENDOR_WIZNET is not set + # CONFIG_NET_VENDOR_XILINX is not set ++CONFIG_NET_VENDOR_BZWX=y ++# CONFIG_NCE is not set ++CONFIG_NET_VENDOR_SPACEMIT=y ++CONFIG_K1_EMAC=m ++CONFIG_NET_VENDOR_NEBULA_MATRIX=y + # CONFIG_FDDI is not set + # CONFIG_HIPPI is not set ++# CONFIG_NET_SB1000 is not set + CONFIG_PHYLINK=y + CONFIG_PHYLIB=y + CONFIG_SWPHY=y +@@ -2661,6 +2865,7 @@ CONFIG_CAN_CALC_BITTIMING=y + # CONFIG_CAN_GRCAN is not set + # CONFIG_CAN_KVASER_PCIEFD is not set + CONFIG_CAN_SLCAN=m ++# CONFIG_CAN_SUN4I is not set + CONFIG_CAN_C_CAN=m + CONFIG_CAN_C_CAN_PLATFORM=m + CONFIG_CAN_C_CAN_PCI=m +@@ -2672,6 +2877,8 @@ CONFIG_CAN_CC770_PLATFORM=m + # CONFIG_CAN_IFI_CANFD is not set + # CONFIG_CAN_M_CAN is not set + # CONFIG_CAN_PEAK_PCIEFD is not set ++# CONFIG_CAN_RCAR is not set ++# CONFIG_CAN_RCAR_CANFD is not set + CONFIG_CAN_SJA1000=m + CONFIG_CAN_EMS_PCI=m + # CONFIG_CAN_F81601 is not set +@@ -2711,7 +2918,9 @@ CONFIG_MDIO_DEVICE=y + CONFIG_MDIO_BUS=y + CONFIG_FWNODE_MDIO=y + CONFIG_OF_MDIO=y ++CONFIG_ACPI_MDIO=y + CONFIG_MDIO_DEVRES=y ++# CONFIG_MDIO_SUN4I is not set + CONFIG_MDIO_BITBANG=m + CONFIG_MDIO_BCM_UNIMAC=m + CONFIG_MDIO_CAVIUM=m +@@ -2728,6 +2937,7 @@ CONFIG_MDIO_THUNDER=m + # + # MDIO Multiplexers + # ++CONFIG_MDIO_BUS_MUX=y + # CONFIG_MDIO_BUS_MUX_GPIO is not set + # CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set + # CONFIG_MDIO_BUS_MUX_MMIOREG is not set +@@ -2735,7 +2945,7 @@ CONFIG_MDIO_THUNDER=m + # + # PCS device drivers + # +-CONFIG_PCS_XPCS=m ++CONFIG_PCS_XPCS=y + # end of PCS device drivers + + CONFIG_PPP=m +@@ -2768,8 +2978,8 @@ CONFIG_USB_RTL8150=m + CONFIG_USB_RTL8152=m + CONFIG_USB_LAN78XX=m + CONFIG_USB_USBNET=m +-CONFIG_USB_NET_AX8817X=m +-CONFIG_USB_NET_AX88179_178A=m ++# CONFIG_USB_NET_AX8817X is not set ++# CONFIG_USB_NET_AX88179_178A is not set + CONFIG_USB_NET_CDCETHER=m + CONFIG_USB_NET_CDC_EEM=m + CONFIG_USB_NET_CDC_NCM=m +@@ -2781,7 +2991,7 @@ CONFIG_USB_NET_SR9700=m + CONFIG_USB_NET_SMSC75XX=m + CONFIG_USB_NET_SMSC95XX=m + CONFIG_USB_NET_GL620A=m +-CONFIG_USB_NET_NET1080=m ++# CONFIG_USB_NET_NET1080 is not set + CONFIG_USB_NET_PLUSB=m + CONFIG_USB_NET_MCS7830=m + CONFIG_USB_NET_RNDIS_HOST=m +@@ -2865,7 +3075,39 @@ CONFIG_RT2X00_LIB_CRYPTO=y + CONFIG_RT2X00_LIB_LEDS=y + # CONFIG_RT2X00_LIB_DEBUGFS is not set + # CONFIG_RT2X00_DEBUG is not set +-# CONFIG_WLAN_VENDOR_REALTEK is not set ++CONFIG_WLAN_VENDOR_REALTEK=y ++# CONFIG_RTL8180 is not set ++# CONFIG_RTL8187 is not set ++CONFIG_RTL_CARDS=m ++# CONFIG_RTL8192CE is not set ++# CONFIG_RTL8192SE is not set ++# CONFIG_RTL8192DE is not set ++# CONFIG_RTL8723AE is not set ++# CONFIG_RTL8723BE is not set ++# CONFIG_RTL8188EE is not set ++# CONFIG_RTL8192EE is not set ++# CONFIG_RTL8821AE is not set ++# CONFIG_RTL8192CU is not set ++# CONFIG_RTL8XXXU is not set ++CONFIG_RTW88=m ++CONFIG_RTW88_CORE=m ++CONFIG_RTW88_SDIO=m ++CONFIG_RTW88_8723D=m ++# CONFIG_RTW88_8822BE is not set ++# CONFIG_RTW88_8822BS is not set ++# CONFIG_RTW88_8822BU is not set ++# CONFIG_RTW88_8822CE is not set ++# CONFIG_RTW88_8822CS is not set ++# CONFIG_RTW88_8822CU is not set ++# CONFIG_RTW88_8723DE is not set ++CONFIG_RTW88_8723DS=m ++# CONFIG_RTW88_8723DU is not set ++# CONFIG_RTW88_8821CE is not set ++# CONFIG_RTW88_8821CS is not set ++# CONFIG_RTW88_8821CU is not set ++# CONFIG_RTW88_DEBUG is not set ++# CONFIG_RTW88_DEBUGFS is not set ++# CONFIG_RTW89 is not set + # CONFIG_WLAN_VENDOR_RSI is not set + CONFIG_WLAN_VENDOR_SILABS=y + # CONFIG_WFX is not set +@@ -2876,6 +3118,10 @@ CONFIG_WLAN_VENDOR_SILABS=y + # CONFIG_USB_NET_RNDIS_WLAN is not set + # CONFIG_MAC80211_HWSIM is not set + # CONFIG_VIRT_WIFI is not set ++CONFIG_AIC_WLAN_SUPPORT=m ++CONFIG_AIC_FW_PATH="/lib/firmware/aic8800" ++CONFIG_AIC8800_WLAN_SUPPORT=m ++CONFIG_AIC8800_BTLPM_SUPPORT=m + CONFIG_WAN=y + CONFIG_HDLC=m + CONFIG_HDLC_RAW=m +@@ -2900,6 +3146,7 @@ CONFIG_HDLC_PPP=m + # end of Wireless WAN + + # CONFIG_VMXNET3 is not set ++# CONFIG_FUJITSU_ES is not set + CONFIG_USB4_NET=m + # CONFIG_NETDEVSIM is not set + CONFIG_NET_FAILOVER=y +@@ -2930,6 +3177,7 @@ CONFIG_INPUT_EVDEV=y + # Input Device Drivers + # + CONFIG_INPUT_KEYBOARD=y ++# CONFIG_KEYBOARD_ADC is not set + # CONFIG_KEYBOARD_ADP5588 is not set + # CONFIG_KEYBOARD_ADP5589 is not set + CONFIG_KEYBOARD_ATKBD=y +@@ -2955,6 +3203,7 @@ CONFIG_KEYBOARD_GPIO=y + # CONFIG_KEYBOARD_GOLDFISH_EVENTS is not set + # CONFIG_KEYBOARD_STOWAWAY is not set + # CONFIG_KEYBOARD_SUNKBD is not set ++CONFIG_KEYBOARD_SUN4I_LRADC=m + # CONFIG_KEYBOARD_OMAP4 is not set + # CONFIG_KEYBOARD_TM2_TOUCHKEY is not set + # CONFIG_KEYBOARD_XTKBD is not set +@@ -2987,7 +3236,83 @@ CONFIG_MOUSE_SYNAPTICS_I2C=m + CONFIG_MOUSE_SYNAPTICS_USB=m + # CONFIG_INPUT_JOYSTICK is not set + # CONFIG_INPUT_TABLET is not set +-# CONFIG_INPUT_TOUCHSCREEN is not set ++CONFIG_INPUT_TOUCHSCREEN=y ++# CONFIG_TOUCHSCREEN_ADS7846 is not set ++# CONFIG_TOUCHSCREEN_AD7877 is not set ++# CONFIG_TOUCHSCREEN_AD7879 is not set ++# CONFIG_TOUCHSCREEN_ADC is not set ++# CONFIG_TOUCHSCREEN_AR1021_I2C is not set ++# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set ++# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set ++# CONFIG_TOUCHSCREEN_BU21013 is not set ++# CONFIG_TOUCHSCREEN_BU21029 is not set ++# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set ++# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set ++# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set ++# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set ++# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set ++# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set ++# CONFIG_TOUCHSCREEN_CYTTSP5 is not set ++# CONFIG_TOUCHSCREEN_DYNAPRO is not set ++# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set ++# CONFIG_TOUCHSCREEN_EETI is not set ++# CONFIG_TOUCHSCREEN_EGALAX is not set ++# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set ++# CONFIG_TOUCHSCREEN_EXC3000 is not set ++# CONFIG_TOUCHSCREEN_FUJITSU is not set ++# CONFIG_TOUCHSCREEN_GOODIX is not set ++# CONFIG_TOUCHSCREEN_HIDEEP is not set ++# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set ++# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set ++# CONFIG_TOUCHSCREEN_ILI210X is not set ++# CONFIG_TOUCHSCREEN_ILITEK is not set ++# CONFIG_TOUCHSCREEN_S6SY761 is not set ++# CONFIG_TOUCHSCREEN_GUNZE is not set ++# CONFIG_TOUCHSCREEN_EKTF2127 is not set ++# CONFIG_TOUCHSCREEN_ELAN is not set ++# CONFIG_TOUCHSCREEN_ELO is not set ++# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set ++# CONFIG_TOUCHSCREEN_WACOM_I2C is not set ++# CONFIG_TOUCHSCREEN_MAX11801 is not set ++# CONFIG_TOUCHSCREEN_MCS5000 is not set ++# CONFIG_TOUCHSCREEN_MMS114 is not set ++# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set ++# CONFIG_TOUCHSCREEN_MSG2638 is not set ++# CONFIG_TOUCHSCREEN_MTOUCH is not set ++# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set ++# CONFIG_TOUCHSCREEN_IMAGIS is not set ++# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set ++# CONFIG_TOUCHSCREEN_INEXIO is not set ++# CONFIG_TOUCHSCREEN_PENMOUNT is not set ++# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set ++# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set ++# CONFIG_TOUCHSCREEN_TOUCHWIN is not set ++# CONFIG_TOUCHSCREEN_PIXCIR is not set ++# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set ++# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set ++# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set ++# CONFIG_TOUCHSCREEN_TSC_SERIO is not set ++# CONFIG_TOUCHSCREEN_TSC2004 is not set ++# CONFIG_TOUCHSCREEN_TSC2005 is not set ++# CONFIG_TOUCHSCREEN_TSC2007 is not set ++# CONFIG_TOUCHSCREEN_RM_TS is not set ++# CONFIG_TOUCHSCREEN_SILEAD is not set ++# CONFIG_TOUCHSCREEN_SIS_I2C is not set ++# CONFIG_TOUCHSCREEN_ST1232 is not set ++# CONFIG_TOUCHSCREEN_STMFTS is not set ++# CONFIG_TOUCHSCREEN_SUN4I is not set ++# CONFIG_TOUCHSCREEN_SUR40 is not set ++# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set ++# CONFIG_TOUCHSCREEN_SX8654 is not set ++# CONFIG_TOUCHSCREEN_TPS6507X is not set ++# CONFIG_TOUCHSCREEN_ZET6223 is not set ++# CONFIG_TOUCHSCREEN_ZFORCE is not set ++# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set ++# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set ++# CONFIG_TOUCHSCREEN_IQS5XX is not set ++# CONFIG_TOUCHSCREEN_IQS7211 is not set ++# CONFIG_TOUCHSCREEN_ZINITIX is not set ++# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set + CONFIG_INPUT_MISC=y + # CONFIG_INPUT_AD714X is not set + # CONFIG_INPUT_ATMEL_CAPTOUCH is not set +@@ -3004,7 +3329,7 @@ CONFIG_INPUT_MISC=y + # CONFIG_INPUT_YEALINK is not set + # CONFIG_INPUT_CM109 is not set + # CONFIG_INPUT_REGULATOR_HAPTIC is not set +-CONFIG_INPUT_UINPUT=m ++CONFIG_INPUT_UINPUT=y + # CONFIG_INPUT_PCF8574 is not set + # CONFIG_INPUT_PWM_BEEPER is not set + # CONFIG_INPUT_PWM_VIBRA is not set +@@ -3017,9 +3342,11 @@ CONFIG_INPUT_UINPUT=m + # CONFIG_INPUT_IQS626A is not set + # CONFIG_INPUT_IQS7222 is not set + # CONFIG_INPUT_CMA3000 is not set ++# CONFIG_INPUT_SOC_BUTTON_ARRAY is not set + # CONFIG_INPUT_DRV260X_HAPTICS is not set + # CONFIG_INPUT_DRV2665_HAPTICS is not set + # CONFIG_INPUT_DRV2667_HAPTICS is not set ++CONFIG_INPUT_SPACEMIT_P1_PWRKEY=m + CONFIG_RMI4_CORE=m + CONFIG_RMI4_I2C=m + CONFIG_RMI4_SPI=m +@@ -3048,6 +3375,7 @@ CONFIG_SERIO_ALTERA_PS2=m + # CONFIG_SERIO_PS2MULT is not set + CONFIG_SERIO_ARC_PS2=m + # CONFIG_SERIO_APBPS2 is not set ++# CONFIG_SERIO_SUN4I_PS2 is not set + # CONFIG_SERIO_GPIO_PS2 is not set + # CONFIG_USERIO is not set + # CONFIG_GAMEPORT is not set +@@ -3075,6 +3403,7 @@ CONFIG_LDISC_AUTOLOAD=y + CONFIG_SERIAL_EARLYCON=y + CONFIG_SERIAL_8250=y + # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set ++CONFIG_SERIAL_8250_PNP=y + CONFIG_SERIAL_8250_16550A_VARIANTS=y + # CONFIG_SERIAL_8250_FINTEK is not set + CONFIG_SERIAL_8250_CONSOLE=y +@@ -3082,8 +3411,8 @@ CONFIG_SERIAL_8250_DMA=y + CONFIG_SERIAL_8250_PCILIB=y + CONFIG_SERIAL_8250_PCI=y + CONFIG_SERIAL_8250_EXAR=y +-CONFIG_SERIAL_8250_NR_UARTS=32 +-CONFIG_SERIAL_8250_RUNTIME_UARTS=4 ++CONFIG_SERIAL_8250_NR_UARTS=6 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=6 + CONFIG_SERIAL_8250_EXTENDED=y + CONFIG_SERIAL_8250_MANY_PORTS=y + # CONFIG_SERIAL_8250_PCI1XXXX is not set +@@ -3092,6 +3421,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y + CONFIG_SERIAL_8250_RSA=y + CONFIG_SERIAL_8250_DWLIB=y + CONFIG_SERIAL_8250_DW=y ++# CONFIG_SERIAL_8250_EM is not set + CONFIG_SERIAL_8250_RT288X=y + CONFIG_SERIAL_8250_PERICOM=y + CONFIG_SERIAL_OF_PLATFORM=y +@@ -3105,7 +3435,14 @@ CONFIG_SERIAL_OF_PLATFORM=y + # CONFIG_SERIAL_KGDB_NMI is not set + # CONFIG_SERIAL_MAX3100 is not set + # CONFIG_SERIAL_MAX310X is not set ++CONFIG_SERIAL_SPACEMIT_K1X=y ++CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE=y + # CONFIG_SERIAL_UARTLITE is not set ++CONFIG_SERIAL_SH_SCI=y ++CONFIG_SERIAL_SH_SCI_NR_UARTS=18 ++CONFIG_SERIAL_SH_SCI_CONSOLE=y ++CONFIG_SERIAL_SH_SCI_EARLYCON=y ++CONFIG_SERIAL_SH_SCI_DMA=y + CONFIG_SERIAL_CORE=y + CONFIG_SERIAL_CORE_CONSOLE=y + CONFIG_CONSOLE_POLL=y +@@ -3136,10 +3473,12 @@ CONFIG_N_GSM=m + # CONFIG_NULL_TTY is not set + CONFIG_HVC_DRIVER=y + # CONFIG_RPMSG_TTY is not set +-# CONFIG_SERIAL_DEV_BUS is not set ++CONFIG_SERIAL_DEV_BUS=y ++CONFIG_SERIAL_DEV_CTRL_TTYPORT=y + # CONFIG_TTY_PRINTK is not set + CONFIG_VIRTIO_CONSOLE=y + CONFIG_IPMI_HANDLER=m ++CONFIG_IPMI_DMI_DECODE=y + CONFIG_IPMI_PLAT_DATA=y + # CONFIG_IPMI_PANIC_EVENT is not set + CONFIG_IPMI_DEVICE_INTERFACE=m +@@ -3154,6 +3493,7 @@ CONFIG_HW_RANDOM=y + CONFIG_HW_RANDOM_TIMERIOMEM=m + # CONFIG_HW_RANDOM_BA431 is not set + CONFIG_HW_RANDOM_VIRTIO=y ++CONFIG_HW_RANDOM_OPTEE=y + # CONFIG_HW_RANDOM_CCTRNG is not set + # CONFIG_HW_RANDOM_XIPHERA is not set + # CONFIG_HW_RANDOM_JH7110 is not set +@@ -3172,7 +3512,10 @@ CONFIG_TCG_TIS_I2C_ATMEL=m + CONFIG_TCG_TIS_I2C_INFINEON=m + CONFIG_TCG_TIS_I2C_NUVOTON=m + CONFIG_TCG_ATMEL=m ++# CONFIG_TCG_INFINEON is not set ++CONFIG_TCG_CRB=y + # CONFIG_TCG_VTPM_PROXY is not set ++# CONFIG_TCG_FTPM_TEE is not set + CONFIG_TCG_TIS_ST33ZP24=m + CONFIG_TCG_TIS_ST33ZP24_I2C=m + CONFIG_TCG_TIS_ST33ZP24_SPI=m +@@ -3184,6 +3527,7 @@ CONFIG_TCG_TIS_ST33ZP24_SPI=m + # I2C support + # + CONFIG_I2C=y ++CONFIG_ACPI_I2C_OPREGION=y + CONFIG_I2C_BOARDINFO=y + CONFIG_I2C_COMPAT=y + CONFIG_I2C_CHARDEV=y +@@ -3221,6 +3565,7 @@ CONFIG_I2C_CCGX_UCSI=m + # CONFIG_I2C_ALI15X3 is not set + # CONFIG_I2C_AMD756 is not set + # CONFIG_I2C_AMD8111 is not set ++# CONFIG_I2C_AMD_MP2 is not set + # CONFIG_I2C_I801 is not set + # CONFIG_I2C_ISCH is not set + # CONFIG_I2C_PIIX4 is not set +@@ -3229,9 +3574,15 @@ CONFIG_I2C_NFORCE2=m + # CONFIG_I2C_SIS5595 is not set + # CONFIG_I2C_SIS630 is not set + # CONFIG_I2C_SIS96X is not set ++CONFIG_I2C_SPACEMIT_K1=y + # CONFIG_I2C_VIA is not set + # CONFIG_I2C_VIAPRO is not set + ++# ++# ACPI drivers ++# ++# CONFIG_I2C_SCMI is not set ++ + # + # I2C system bus drivers (mostly embedded / system-on-chip) + # +@@ -3243,12 +3594,17 @@ CONFIG_I2C_DESIGNWARE_PCI=m + # CONFIG_I2C_EMEV2 is not set + CONFIG_I2C_GPIO=m + # CONFIG_I2C_GPIO_FAULT_INJECTOR is not set ++CONFIG_I2C_MV64XXX=y + # CONFIG_I2C_NOMADIK is not set + # CONFIG_I2C_OCORES is not set + CONFIG_I2C_PCA_PLATFORM=m ++# CONFIG_I2C_RIIC is not set + # CONFIG_I2C_RK3X is not set ++# CONFIG_I2C_RZV2M is not set ++# CONFIG_I2C_SH_MOBILE is not set + CONFIG_I2C_SIMTEC=m + # CONFIG_I2C_XILINX is not set ++# CONFIG_I2C_RCAR is not set + + # + # External I2C/SMBus adapter drivers +@@ -3290,6 +3646,7 @@ CONFIG_SPI_MEM=y + CONFIG_SPI_CADENCE=m + # CONFIG_SPI_CADENCE_QUADSPI is not set + # CONFIG_SPI_CADENCE_XSPI is not set ++CONFIG_SPI_DW_QUAD=y + CONFIG_SPI_DESIGNWARE=y + # CONFIG_SPI_DW_DMA is not set + CONFIG_SPI_DW_PCI=m +@@ -3302,9 +3659,17 @@ CONFIG_SPI_DW_MMIO=y + # CONFIG_SPI_PCI1XXXX is not set + # CONFIG_SPI_PL022 is not set + # CONFIG_SPI_PXA2XX is not set ++# CONFIG_SPI_RSPI is not set ++# CONFIG_SPI_RZV2M_CSI is not set + # CONFIG_SPI_SC18IS602 is not set ++# CONFIG_SPI_SH_MSIOF is not set ++# CONFIG_SPI_SH_HSPI is not set + CONFIG_SPI_SIFIVE=y + # CONFIG_SPI_SN_F_OSPI is not set ++CONFIG_SPI_SPACEMIT_K1=y ++CONFIG_SPI_SPACEMIT_K1_QSPI=y ++# CONFIG_SPI_SUN4I is not set ++CONFIG_SPI_SUN6I=y + # CONFIG_SPI_MXIC is not set + # CONFIG_SPI_XCOMM is not set + # CONFIG_SPI_XILINX is not set +@@ -3319,7 +3684,7 @@ CONFIG_SPI_SIFIVE=y + # + # SPI Protocol Masters + # +-# CONFIG_SPI_SPIDEV is not set ++CONFIG_SPI_SPIDEV=y + # CONFIG_SPI_LOOPBACK_TEST is not set + # CONFIG_SPI_TLE62X0 is not set + # CONFIG_SPI_SLAVE is not set +@@ -3343,14 +3708,8 @@ CONFIG_PPS_CLIENT_GPIO=m + # + # PTP clock support + # +-CONFIG_PTP_1588_CLOCK=y ++# CONFIG_PTP_1588_CLOCK is not set + CONFIG_PTP_1588_CLOCK_OPTIONAL=y +-CONFIG_DP83640_PHY=m +-# CONFIG_PTP_1588_CLOCK_INES is not set +-# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +-# CONFIG_PTP_1588_CLOCK_IDTCM is not set +-# CONFIG_PTP_1588_CLOCK_MOCK is not set +-# CONFIG_PTP_1588_CLOCK_OCP is not set + # end of PTP clock support + + CONFIG_PINCTRL=y +@@ -3360,26 +3719,57 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y + CONFIG_PINCONF=y + CONFIG_GENERIC_PINCONF=y + # CONFIG_DEBUG_PINCTRL is not set ++# CONFIG_PINCTRL_AMD is not set + # CONFIG_PINCTRL_CY8C95X0 is not set + # CONFIG_PINCTRL_MCP23S08 is not set + # CONFIG_PINCTRL_MICROCHIP_SGPIO is not set + # CONFIG_PINCTRL_OCELOT is not set + # CONFIG_PINCTRL_SINGLE is not set ++CONFIG_PINCTRL_SPACEMIT_P1=m + # CONFIG_PINCTRL_STMFX is not set + # CONFIG_PINCTRL_SX150X is not set ++CONFIG_PINCTRL_TH1520=y ++CONFIG_PINCTRL_SPACEMIT_K1X=y + + # + # Renesas pinctrl drivers + # ++CONFIG_PINCTRL_RENESAS=y + # end of Renesas pinctrl drivers + + CONFIG_PINCTRL_STARFIVE_JH7100=y + CONFIG_PINCTRL_STARFIVE_JH7110=y + CONFIG_PINCTRL_STARFIVE_JH7110_SYS=y + CONFIG_PINCTRL_STARFIVE_JH7110_AON=y ++CONFIG_PINCTRL_SUNXI=y ++# CONFIG_PINCTRL_SUN4I_A10 is not set ++# CONFIG_PINCTRL_SUN5I is not set ++# CONFIG_PINCTRL_SUN6I_A31 is not set ++# CONFIG_PINCTRL_SUN6I_A31_R is not set ++# CONFIG_PINCTRL_SUN8I_A23 is not set ++# CONFIG_PINCTRL_SUN8I_A33 is not set ++# CONFIG_PINCTRL_SUN8I_A83T is not set ++# CONFIG_PINCTRL_SUN8I_A83T_R is not set ++# CONFIG_PINCTRL_SUN8I_A23_R is not set ++# CONFIG_PINCTRL_SUN8I_H3 is not set ++# CONFIG_PINCTRL_SUN8I_H3_R is not set ++# CONFIG_PINCTRL_SUN8I_V3S is not set ++# CONFIG_PINCTRL_SUN9I_A80 is not set ++# CONFIG_PINCTRL_SUN9I_A80_R is not set ++CONFIG_PINCTRL_SUN20I_D1=y ++# CONFIG_PINCTRL_SUN50I_A64 is not set ++# CONFIG_PINCTRL_SUN50I_A64_R is not set ++# CONFIG_PINCTRL_SUN50I_A100 is not set ++# CONFIG_PINCTRL_SUN50I_A100_R is not set ++# CONFIG_PINCTRL_SUN50I_H5 is not set ++# CONFIG_PINCTRL_SUN50I_H6 is not set ++# CONFIG_PINCTRL_SUN50I_H6_R is not set ++# CONFIG_PINCTRL_SUN50I_H616 is not set ++# CONFIG_PINCTRL_SUN50I_H616_R is not set + CONFIG_GPIOLIB=y + CONFIG_GPIOLIB_FASTPATH_LIMIT=512 + CONFIG_OF_GPIO=y ++CONFIG_GPIO_ACPI=y + CONFIG_GPIOLIB_IRQCHIP=y + # CONFIG_DEBUG_GPIO is not set + CONFIG_GPIO_SYSFS=y +@@ -3392,6 +3782,7 @@ CONFIG_GPIO_GENERIC=y + # + # CONFIG_GPIO_74XX_MMIO is not set + # CONFIG_GPIO_ALTERA is not set ++# CONFIG_GPIO_AMDPT is not set + CONFIG_GPIO_CADENCE=m + CONFIG_GPIO_DWAPB=y + # CONFIG_GPIO_EXAR is not set +@@ -3402,6 +3793,7 @@ CONFIG_GPIO_GENERIC_PLATFORM=m + # CONFIG_GPIO_LOGICVC is not set + # CONFIG_GPIO_MB86S7X is not set + # CONFIG_GPIO_PL061 is not set ++# CONFIG_GPIO_RCAR is not set + CONFIG_GPIO_SIFIVE=y + # CONFIG_GPIO_SYSCON is not set + # CONFIG_GPIO_XILINX is not set +@@ -3417,7 +3809,8 @@ CONFIG_GPIO_SIFIVE=y + # CONFIG_GPIO_GW_PLD is not set + # CONFIG_GPIO_MAX7300 is not set + # CONFIG_GPIO_MAX732X is not set +-# CONFIG_GPIO_PCA953X is not set ++CONFIG_GPIO_PCA953X=y ++CONFIG_GPIO_PCA953X_IRQ=y + # CONFIG_GPIO_PCA9570 is not set + # CONFIG_GPIO_PCF857X is not set + # CONFIG_GPIO_TPIC2810 is not set +@@ -3461,6 +3854,7 @@ CONFIG_GPIO_SIFIVE=y + # CONFIG_GPIO_MOCKUP is not set + # CONFIG_GPIO_VIRTIO is not set + # CONFIG_GPIO_SIM is not set ++CONFIG_GPIO_K1X=y + # end of Virtual GPIO drivers + + # CONFIG_W1 is not set +@@ -3477,6 +3871,7 @@ CONFIG_POWER_RESET_SYSCON_POWEROFF=y + CONFIG_POWER_SUPPLY=y + # CONFIG_POWER_SUPPLY_DEBUG is not set + CONFIG_POWER_SUPPLY_HWMON=y ++# CONFIG_GENERIC_ADC_BATTERY is not set + # CONFIG_IP5XXX_POWER is not set + # CONFIG_TEST_POWER is not set + # CONFIG_CHARGER_ADP5061 is not set +@@ -3565,6 +3960,7 @@ CONFIG_SENSORS_G762=m + # CONFIG_SENSORS_HS3001 is not set + CONFIG_SENSORS_IBMAEM=m + CONFIG_SENSORS_IBMPEX=m ++# CONFIG_SENSORS_IIO_HWMON is not set + CONFIG_SENSORS_IT87=m + CONFIG_SENSORS_JC42=m + CONFIG_SENSORS_POWR1220=m +@@ -3600,7 +3996,7 @@ CONFIG_SENSORS_MAX31790=m + CONFIG_SENSORS_MCP3021=m + # CONFIG_SENSORS_TC654 is not set + # CONFIG_SENSORS_TPS23861 is not set +-# CONFIG_SENSORS_MR75203 is not set ++CONFIG_SENSORS_MR75203=m + CONFIG_SENSORS_ADCXX=m + CONFIG_SENSORS_LM63=m + CONFIG_SENSORS_LM70=m +@@ -3620,6 +4016,7 @@ CONFIG_SENSORS_LM95241=m + CONFIG_SENSORS_LM95245=m + CONFIG_SENSORS_PC87360=m + CONFIG_SENSORS_PC87427=m ++# CONFIG_SENSORS_NTC_THERMISTOR is not set + CONFIG_SENSORS_NCT6683=m + # CONFIG_SENSORS_NCT6775 is not set + # CONFIG_SENSORS_NCT6775_I2C is not set +@@ -3680,7 +4077,7 @@ CONFIG_SENSORS_UCD9200=m + # CONFIG_SENSORS_XDPE152 is not set + # CONFIG_SENSORS_XDPE122 is not set + CONFIG_SENSORS_ZL6100=m +-CONFIG_SENSORS_PWM_FAN=y ++CONFIG_SENSORS_PWM_FAN=m + # CONFIG_SENSORS_SBTSI is not set + # CONFIG_SENSORS_SBRMI is not set + CONFIG_SENSORS_SHT15=m +@@ -3733,9 +4130,14 @@ CONFIG_SENSORS_W83L785TS=m + CONFIG_SENSORS_W83L786NG=m + CONFIG_SENSORS_W83627HF=m + CONFIG_SENSORS_W83627EHF=m ++ ++# ++# ACPI drivers ++# ++# CONFIG_SENSORS_ACPI_POWER is not set + CONFIG_THERMAL=y + # CONFIG_THERMAL_NETLINK is not set +-# CONFIG_THERMAL_STATISTICS is not set ++CONFIG_THERMAL_STATISTICS=y + CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 + CONFIG_THERMAL_HWMON=y + CONFIG_THERMAL_OF=y +@@ -3743,41 +4145,62 @@ CONFIG_THERMAL_OF=y + CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y + # CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set + # CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set ++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set + CONFIG_THERMAL_GOV_FAIR_SHARE=y + CONFIG_THERMAL_GOV_STEP_WISE=y + # CONFIG_THERMAL_GOV_BANG_BANG is not set + CONFIG_THERMAL_GOV_USER_SPACE=y ++CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y + CONFIG_CPU_THERMAL=y + CONFIG_CPU_FREQ_THERMAL=y ++CONFIG_DEVFREQ_THERMAL=y + CONFIG_THERMAL_EMULATION=y + # CONFIG_THERMAL_MMIO is not set ++# CONFIG_SUN8I_THERMAL is not set ++# CONFIG_RCAR_THERMAL is not set ++# CONFIG_RCAR_GEN3_THERMAL is not set ++# CONFIG_RZG2L_THERMAL is not set ++# CONFIG_GENERIC_ADC_THERMAL is not set + CONFIG_WATCHDOG=y + CONFIG_WATCHDOG_CORE=y + # CONFIG_WATCHDOG_NOWAYOUT is not set + CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +-CONFIG_WATCHDOG_OPEN_TIMEOUT=0 ++CONFIG_WATCHDOG_OPEN_TIMEOUT=32 + CONFIG_WATCHDOG_SYSFS=y + # CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + + # + # Watchdog Pretimeout Governors + # +-# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set ++CONFIG_WATCHDOG_PRETIMEOUT_GOV=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_SEL=m ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y ++CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y ++# CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC is not set + + # + # Watchdog Device Drivers + # + CONFIG_SOFT_WATCHDOG=m ++# CONFIG_SOFT_WATCHDOG_PRETIMEOUT is not set + CONFIG_GPIO_WATCHDOG=m ++# CONFIG_WDAT_WDT is not set + # CONFIG_XILINX_WATCHDOG is not set + # CONFIG_ZIIRAVE_WATCHDOG is not set + # CONFIG_CADENCE_WATCHDOG is not set +-# CONFIG_DW_WATCHDOG is not set ++CONFIG_DW_WATCHDOG=y ++CONFIG_SUNXI_WATCHDOG=y + # CONFIG_MAX63XX_WATCHDOG is not set ++# CONFIG_RENESAS_WDT is not set ++# CONFIG_RENESAS_RZAWDT is not set ++# CONFIG_RENESAS_RZN1WDT is not set ++# CONFIG_RENESAS_RZG2LWDT is not set + CONFIG_ALIM7101_WDT=m + CONFIG_I6300ESB_WDT=m + # CONFIG_MEN_A21_WDT is not set + CONFIG_STARFIVE_WATCHDOG=y ++CONFIG_TH1520_PMIC_WATCHDOG=y + + # + # PCI-based Watchdog Cards +@@ -3804,8 +4227,9 @@ CONFIG_BCMA_DRIVER_GPIO=y + # + # Multifunction device drivers + # +-CONFIG_MFD_CORE=m ++CONFIG_MFD_CORE=y + # CONFIG_MFD_ACT8945A is not set ++# CONFIG_MFD_SUN4I_GPADC is not set + # CONFIG_MFD_AS3711 is not set + # CONFIG_MFD_SMPRO is not set + # CONFIG_MFD_AS3722 is not set +@@ -3877,8 +4301,8 @@ CONFIG_MFD_CORE=m + # CONFIG_MFD_SM501 is not set + # CONFIG_MFD_SKY81452 is not set + # CONFIG_MFD_STMPE is not set ++# CONFIG_MFD_SUN6I_PRCM is not set + CONFIG_MFD_SYSCON=y +-# CONFIG_MFD_TI_AM335X_TSCADC is not set + # CONFIG_MFD_LP3943 is not set + # CONFIG_MFD_LP8788 is not set + # CONFIG_MFD_TI_LMU is not set +@@ -3921,6 +4345,8 @@ CONFIG_MFD_SYSCON=y + # CONFIG_MFD_STMFX is not set + # CONFIG_MFD_ATC260X_I2C is not set + # CONFIG_MFD_QCOM_PM8008 is not set ++CONFIG_MFD_SPACEMIT_P1=y ++# CONFIG_RAVE_SP_CORE is not set + # CONFIG_MFD_INTEL_M10_BMC_SPI is not set + # CONFIG_MFD_RSMU_I2C is not set + # CONFIG_MFD_RSMU_SPI is not set +@@ -3987,6 +4413,7 @@ CONFIG_REGULATOR_PWM=y + # CONFIG_REGULATOR_RTQ6752 is not set + # CONFIG_REGULATOR_RTQ2208 is not set + # CONFIG_REGULATOR_SLG51000 is not set ++CONFIG_REGULATOR_SPACEMIT_P1=y + # CONFIG_REGULATOR_SY8106A is not set + # CONFIG_REGULATOR_SY8824X is not set + # CONFIG_REGULATOR_SY8827N is not set +@@ -3999,6 +4426,7 @@ CONFIG_REGULATOR_PWM=y + # CONFIG_REGULATOR_TPS65132 is not set + # CONFIG_REGULATOR_TPS6524X is not set + # CONFIG_REGULATOR_VCTRL is not set ++CONFIG_REGULATOR_TH1520_AON=y + # CONFIG_RC_CORE is not set + + # +@@ -4007,7 +4435,7 @@ CONFIG_REGULATOR_PWM=y + # CONFIG_MEDIA_CEC_SUPPORT is not set + # end of CEC support + +-CONFIG_MEDIA_SUPPORT=m ++CONFIG_MEDIA_SUPPORT=y + # CONFIG_MEDIA_SUPPORT_FILTER is not set + # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +@@ -4131,10 +4559,12 @@ CONFIG_RADIO_ADAPTERS=m + # CONFIG_USB_RAREMONO is not set + # CONFIG_RADIO_SI470X is not set + CONFIG_MEDIA_PLATFORM_DRIVERS=y +-# CONFIG_V4L_PLATFORM_DRIVERS is not set ++CONFIG_V4L_PLATFORM_DRIVERS=y + # CONFIG_SDR_PLATFORM_DRIVERS is not set + # CONFIG_DVB_PLATFORM_DRIVERS is not set +-# CONFIG_V4L_MEM2MEM_DRIVERS is not set ++CONFIG_V4L_MEM2MEM_DRIVERS=y ++# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set ++# CONFIG_VIDEO_MUX is not set + + # + # Allegro DVT media platform drivers +@@ -4173,6 +4603,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y + # + # Marvell media platform drivers + # ++# CONFIG_VIDEO_CAFE_CCIC is not set + + # + # Mediatek media platform drivers +@@ -4197,6 +4628,15 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y + # + # Renesas media platform drivers + # ++# CONFIG_VIDEO_RCAR_ISP is not set ++# CONFIG_VIDEO_RCAR_CSI2 is not set ++# CONFIG_VIDEO_RCAR_VIN is not set ++# CONFIG_VIDEO_RZG2L_CSI2 is not set ++# CONFIG_VIDEO_RZG2L_CRU is not set ++# CONFIG_VIDEO_RENESAS_FCP is not set ++# CONFIG_VIDEO_RENESAS_FDP1 is not set ++# CONFIG_VIDEO_RENESAS_JPU is not set ++# CONFIG_VIDEO_RENESAS_VSP1 is not set + + # + # Rockchip media platform drivers +@@ -4213,6 +4653,11 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y + # + # Sunxi media platform drivers + # ++# CONFIG_VIDEO_SUN4I_CSI is not set ++# CONFIG_VIDEO_SUN6I_CSI is not set ++# CONFIG_VIDEO_SUN8I_A83T_MIPI_CSI2 is not set ++# CONFIG_VIDEO_SUN8I_DEINTERLACE is not set ++# CONFIG_VIDEO_SUN8I_ROTATE is not set + + # + # Texas Instruments drivers +@@ -4221,6 +4666,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y + # + # Verisilicon media platform drivers + # ++# CONFIG_VIDEO_HANTRO is not set + + # + # VIA media platform drivers +@@ -4229,6 +4675,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y + # + # Xilinx media platform drivers + # ++# CONFIG_VIDEO_XILINX is not set + + # + # MMC/SDIO DVB adapters +@@ -4283,6 +4730,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y + # CONFIG_VIDEO_OV2659 is not set + # CONFIG_VIDEO_OV2680 is not set + # CONFIG_VIDEO_OV2685 is not set ++# CONFIG_VIDEO_OV2740 is not set + # CONFIG_VIDEO_OV4689 is not set + # CONFIG_VIDEO_OV5640 is not set + # CONFIG_VIDEO_OV5645 is not set +@@ -4304,6 +4752,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y + # CONFIG_VIDEO_OV9282 is not set + # CONFIG_VIDEO_OV9640 is not set + # CONFIG_VIDEO_OV9650 is not set ++# CONFIG_VIDEO_OV9734 is not set + # CONFIG_VIDEO_RDACM20 is not set + # CONFIG_VIDEO_RDACM21 is not set + # CONFIG_VIDEO_RJ54N1 is not set +@@ -4341,6 +4790,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y + # CONFIG_VIDEO_CS53L32A is not set + # CONFIG_VIDEO_MSP3400 is not set + # CONFIG_VIDEO_SONY_BTF_MPX is not set ++# CONFIG_VIDEO_TDA1997X is not set + # CONFIG_VIDEO_TDA7432 is not set + # CONFIG_VIDEO_TDA9840 is not set + # CONFIG_VIDEO_TEA6415C is not set +@@ -4451,7 +4901,7 @@ CONFIG_CXD2880_SPI_DRV=m + # CONFIG_VIDEO_GS1662 is not set + # end of Media SPI Adapters + +-CONFIG_MEDIA_TUNER=m ++CONFIG_MEDIA_TUNER=y + + # + # Customize TV tuners +@@ -4668,6 +5118,7 @@ CONFIG_DVB_SP2=m + # Graphics support + # + CONFIG_APERTURE_HELPERS=y ++CONFIG_SCREEN_INFO=y + CONFIG_VIDEO_CMDLINE=y + CONFIG_VIDEO_NOMODESET=y + CONFIG_AUXDISPLAY=y +@@ -4679,6 +5130,7 @@ CONFIG_AUXDISPLAY=y + # CONFIG_CHARLCD_BL_ON is not set + CONFIG_CHARLCD_BL_FLASH=y + CONFIG_DRM=y ++CONFIG_DRM_MIPI_DSI=y + # CONFIG_DRM_DEBUG_MM is not set + CONFIG_DRM_KMS_HELPER=y + # CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +@@ -4687,7 +5139,7 @@ CONFIG_DRM_FBDEV_EMULATION=y + CONFIG_DRM_FBDEV_OVERALLOC=100 + # CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set + CONFIG_DRM_LOAD_EDID_FIRMWARE=y +-CONFIG_DRM_DISPLAY_HELPER=m ++CONFIG_DRM_DISPLAY_HELPER=y + CONFIG_DRM_DISPLAY_DP_HELPER=y + CONFIG_DRM_DISPLAY_HDCP_HELPER=y + CONFIG_DRM_DISPLAY_HDMI_HELPER=y +@@ -4720,7 +5172,7 @@ CONFIG_DRM_I2C_NXP_TDA998X=m + CONFIG_DRM_RADEON=m + CONFIG_DRM_RADEON_USERPTR=y + CONFIG_DRM_AMDGPU=m +-# CONFIG_DRM_AMDGPU_SI is not set ++CONFIG_DRM_AMDGPU_SI=y + CONFIG_DRM_AMDGPU_CIK=y + CONFIG_DRM_AMDGPU_USERPTR=y + # CONFIG_DRM_AMDGPU_WERROR is not set +@@ -4735,9 +5187,13 @@ CONFIG_DRM_AMDGPU_USERPTR=y + # Display Engine Configuration + # + CONFIG_DRM_AMD_DC=y ++CONFIG_DRM_AMD_DC_FP=y ++# CONFIG_DRM_AMD_DC_SI is not set + # CONFIG_DEBUG_KERNEL_DC is not set ++# CONFIG_DRM_AMD_SECURE_DISPLAY is not set + # end of Display Engine Configuration + ++CONFIG_HSA_AMD=y + CONFIG_DRM_NOUVEAU=m + CONFIG_NOUVEAU_DEBUG=5 + CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +@@ -4749,6 +5205,9 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y + CONFIG_DRM_UDL=m + CONFIG_DRM_AST=m + CONFIG_DRM_MGAG200=m ++# CONFIG_DRM_RZG2L_MIPI_DSI is not set ++# CONFIG_DRM_SHMOBILE is not set ++# CONFIG_DRM_SUN4I is not set + CONFIG_DRM_QXL=m + CONFIG_DRM_VIRTIO_GPU=m + CONFIG_DRM_VIRTIO_GPU_KMS=y +@@ -4759,36 +5218,89 @@ CONFIG_DRM_PANEL=y + # + # CONFIG_DRM_PANEL_ABT_Y030XX067A is not set + # CONFIG_DRM_PANEL_ARM_VERSATILE is not set ++# CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596 is not set + # CONFIG_DRM_PANEL_AUO_A030JTN01 is not set ++# CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0 is not set ++# CONFIG_DRM_PANEL_BOE_HIMAX8279D is not set ++# CONFIG_DRM_PANEL_BOE_TV101WUM_NL6 is not set ++# CONFIG_DRM_PANEL_DSI_CM is not set + # CONFIG_DRM_PANEL_LVDS is not set +-# CONFIG_DRM_PANEL_SIMPLE is not set ++CONFIG_DRM_PANEL_SIMPLE=y + # CONFIG_DRM_PANEL_EDP is not set ++# CONFIG_DRM_PANEL_EBBG_FT8719 is not set ++# CONFIG_DRM_PANEL_ELIDA_KD35T133 is not set ++# CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02 is not set ++# CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D is not set ++# CONFIG_DRM_PANEL_HIMAX_HX8394 is not set + # CONFIG_DRM_PANEL_ILITEK_IL9322 is not set + # CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set ++# CONFIG_DRM_PANEL_ILITEK_ILI9881C is not set + # CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set ++# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set ++CONFIG_DRM_PANEL_JADARD_JD9365DA_H3=y ++# CONFIG_DRM_PANEL_JDI_LT070ME05000 is not set ++# CONFIG_DRM_PANEL_JDI_R63452 is not set ++# CONFIG_DRM_PANEL_KHADAS_TS050 is not set ++# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set ++# CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W is not set ++# CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829 is not set + # CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set + # CONFIG_DRM_PANEL_LG_LB035Q02 is not set + # CONFIG_DRM_PANEL_LG_LG4573 is not set ++# CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966 is not set + # CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set ++# CONFIG_DRM_PANEL_NEWVISION_NV3051D is not set + # CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set ++# CONFIG_DRM_PANEL_NOVATEK_NT35510 is not set ++# CONFIG_DRM_PANEL_NOVATEK_NT35560 is not set ++# CONFIG_DRM_PANEL_NOVATEK_NT35950 is not set ++# CONFIG_DRM_PANEL_NOVATEK_NT36523 is not set ++# CONFIG_DRM_PANEL_NOVATEK_NT36672A is not set + # CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set ++# CONFIG_DRM_PANEL_MANTIX_MLAF057WE51 is not set + # CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set + # CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set ++# CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set ++# CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS is not set ++# CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set ++# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set ++# CONFIG_DRM_PANEL_RAYDIUM_RM67191 is not set ++# CONFIG_DRM_PANEL_RAYDIUM_RM68200 is not set ++# CONFIG_DRM_PANEL_RONBO_RB070D30 is not set + # CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set + # CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set ++# CONFIG_DRM_PANEL_SAMSUNG_S6D16D0 is not set + # CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set + # CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set ++# CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set ++# CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set + # CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set + # CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set + # CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set ++# CONFIG_DRM_PANEL_SAMSUNG_SOFEF00 is not set + # CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set ++# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set + # CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set ++# CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set ++# CONFIG_DRM_PANEL_SHARP_LS060T1SX01 is not set ++# CONFIG_DRM_PANEL_SITRONIX_ST7701 is not set ++# CONFIG_DRM_PANEL_SITRONIX_ST7703 is not set + # CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set + # CONFIG_DRM_PANEL_SONY_ACX565AKM is not set ++# CONFIG_DRM_PANEL_SONY_TD4353_JDI is not set ++# CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521 is not set ++# CONFIG_DRM_PANEL_STARTEK_KD070FHFID015 is not set ++# CONFIG_DRM_PANEL_TDO_TL070WSH30 is not set + # CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set + # CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set + # CONFIG_DRM_PANEL_TPO_TPG110 is not set ++# CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA is not set ++# CONFIG_DRM_PANEL_VISIONOX_RM69299 is not set ++# CONFIG_DRM_PANEL_VISIONOX_VTDR6130 is not set ++# CONFIG_DRM_PANEL_VISIONOX_R66451 is not set + # CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set ++# CONFIG_DRM_PANEL_XINPENG_XPP055C272 is not set ++# CONFIG_DRM_PANEL_HX8279 is not set + # end of Display Panels + + CONFIG_DRM_BRIDGE=y +@@ -4834,10 +5346,16 @@ CONFIG_DRM_PANEL_BRIDGE=y + # CONFIG_DRM_I2C_ADV7511 is not set + # CONFIG_DRM_CDNS_DSI is not set + # CONFIG_DRM_CDNS_MHDP8546 is not set ++CONFIG_DRM_DW_HDMI=y ++# CONFIG_DRM_DW_HDMI_AHB_AUDIO is not set ++# CONFIG_DRM_DW_HDMI_I2S_AUDIO is not set ++# CONFIG_DRM_DW_HDMI_GP_AUDIO is not set ++# CONFIG_DRM_DW_HDMI_CEC is not set + # end of Display Interface Bridges + + # CONFIG_DRM_LOONGSON is not set +-# CONFIG_DRM_ETNAVIV is not set ++CONFIG_DRM_ETNAVIV=m ++CONFIG_DRM_ETNAVIV_THERMAL=y + # CONFIG_DRM_LOGICVC is not set + # CONFIG_DRM_ARCPGU is not set + CONFIG_DRM_BOCHS=m +@@ -4856,6 +5374,14 @@ CONFIG_DRM_CIRRUS_QEMU=m + # CONFIG_TINYDRM_ST7735R is not set + # CONFIG_DRM_GUD is not set + # CONFIG_DRM_SSD130X is not set ++CONFIG_DRM_VERISILICON=y ++# CONFIG_VERISILICON_VIRTUAL_DISPLAY is not set ++CONFIG_VERISILICON_DW_MIPI_DSI=y ++CONFIG_VERISILICON_DW_HDMI_TH1520=y ++# CONFIG_VERISILICON_MMU is not set ++# CONFIG_VERISILICON_DEC is not set ++CONFIG_DRM_POWERVR_ROGUE=m ++# CONFIG_DRM_POWERVR_ROGUE_DEBUG is not set + # CONFIG_DRM_LEGACY is not set + CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +@@ -4894,6 +5420,7 @@ CONFIG_FB_RADEON_BACKLIGHT=y + # CONFIG_FB_ARK is not set + # CONFIG_FB_PM3 is not set + # CONFIG_FB_CARMINE is not set ++# CONFIG_FB_SH_MOBILE_LCDC is not set + # CONFIG_FB_SMSCUFX is not set + # CONFIG_FB_UDL is not set + # CONFIG_FB_IBM_GXT4500 is not set +@@ -4919,6 +5446,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y + # CONFIG_FB_FOREIGN_ENDIAN is not set + CONFIG_FB_SYS_FOPS=y + CONFIG_FB_DEFERRED_IO=y ++CONFIG_FB_IOMEM_FOPS=y + CONFIG_FB_IOMEM_HELPERS=y + CONFIG_FB_SYSMEM_HELPERS=y + CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +@@ -4946,7 +5474,7 @@ CONFIG_LCD_PLATFORM=m + CONFIG_BACKLIGHT_CLASS_DEVICE=y + # CONFIG_BACKLIGHT_KTD253 is not set + # CONFIG_BACKLIGHT_KTZ8866 is not set +-CONFIG_BACKLIGHT_PWM=m ++CONFIG_BACKLIGHT_PWM=y + # CONFIG_BACKLIGHT_QCOM_WLED is not set + # CONFIG_BACKLIGHT_ADP8860 is not set + # CONFIG_BACKLIGHT_ADP8870 is not set +@@ -4960,6 +5488,7 @@ CONFIG_BACKLIGHT_GPIO=m + # CONFIG_BACKLIGHT_LED is not set + # end of Backlight & LCD device support + ++CONFIG_VIDEOMODE_HELPERS=y + CONFIG_HDMI=y + + # +@@ -4983,10 +5512,13 @@ CONFIG_LOGO_LINUX_CLUT224=y + # end of Graphics support + + # CONFIG_DRM_ACCEL is not set +-CONFIG_SOUND=m +-CONFIG_SND=m +-CONFIG_SND_TIMER=m +-CONFIG_SND_PCM=m ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_TIMER=y ++CONFIG_SND_PCM=y ++CONFIG_SND_PCM_ELD=y ++CONFIG_SND_PCM_IEC958=y ++CONFIG_SND_DMAENGINE_PCM=y + CONFIG_SND_HWDEP=m + CONFIG_SND_RAWMIDI=m + CONFIG_SND_JACK=y +@@ -5010,6 +5542,7 @@ CONFIG_SND_ALOOP=m + # CONFIG_SND_PCMTEST is not set + # CONFIG_SND_MTPAV is not set + # CONFIG_SND_SERIAL_U16550 is not set ++# CONFIG_SND_SERIAL_GENERIC is not set + # CONFIG_SND_MPU401 is not set + CONFIG_SND_PCI=y + # CONFIG_SND_AD1889 is not set +@@ -5074,6 +5607,11 @@ CONFIG_SND_HDA_INTEL=m + # CONFIG_SND_HDA_RECONFIG is not set + # CONFIG_SND_HDA_INPUT_BEEP is not set + # CONFIG_SND_HDA_PATCH_LOADER is not set ++# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set ++# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set ++# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set ++# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set ++# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set + # CONFIG_SND_HDA_CODEC_REALTEK is not set + # CONFIG_SND_HDA_CODEC_ANALOG is not set + # CONFIG_SND_HDA_CODEC_SIGMATEL is not set +@@ -5095,7 +5633,9 @@ CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 + CONFIG_SND_HDA_CORE=m + CONFIG_SND_HDA_COMPONENT=y + CONFIG_SND_HDA_PREALLOC_SIZE=64 ++CONFIG_SND_INTEL_NHLT=y + CONFIG_SND_INTEL_DSP_CONFIG=m ++CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m + CONFIG_SND_SPI=y + CONFIG_SND_USB=y + CONFIG_SND_USB_AUDIO=m +@@ -5110,7 +5650,273 @@ CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y + # CONFIG_SND_USB_PODHD is not set + # CONFIG_SND_USB_TONEPORT is not set + # CONFIG_SND_USB_VARIAX is not set +-# CONFIG_SND_SOC is not set ++CONFIG_SND_SOC=y ++CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y ++# CONFIG_SND_SOC_ADI is not set ++# CONFIG_SND_SOC_AMD_ACP is not set ++# CONFIG_SND_AMD_ACP_CONFIG is not set ++# CONFIG_SND_ATMEL_SOC is not set ++# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set ++# CONFIG_SND_DESIGNWARE_I2S is not set ++ ++# ++# SoC Audio for Freescale CPUs ++# ++ ++# ++# Common SoC Audio options for Freescale CPUs: ++# ++# CONFIG_SND_SOC_FSL_ASRC is not set ++# CONFIG_SND_SOC_FSL_SAI is not set ++# CONFIG_SND_SOC_FSL_AUDMIX is not set ++# CONFIG_SND_SOC_FSL_SSI is not set ++# CONFIG_SND_SOC_FSL_SPDIF is not set ++# CONFIG_SND_SOC_FSL_ESAI is not set ++# CONFIG_SND_SOC_FSL_MICFIL is not set ++# CONFIG_SND_SOC_FSL_XCVR is not set ++# CONFIG_SND_SOC_FSL_RPMSG is not set ++# CONFIG_SND_SOC_IMX_AUDMUX is not set ++# end of SoC Audio for Freescale CPUs ++ ++# CONFIG_SND_SOC_CHV3_I2S is not set ++# CONFIG_SND_I2S_HI6210_I2S is not set ++# CONFIG_SND_SOC_IMG is not set ++# CONFIG_SND_SOC_MTK_BTCVSD is not set ++ ++# ++# SoC Audio support for Renesas SoCs ++# ++# CONFIG_SND_SOC_SH4_FSI is not set ++# CONFIG_SND_SOC_RCAR is not set ++# end of SoC Audio support for Renesas SoCs ++ ++# CONFIG_SND_SOC_SOF_TOPLEVEL is not set ++# CONFIG_SND_SOC_STARFIVE is not set ++ ++# ++# STMicroelectronics STM32 SOC audio support ++# ++# end of STMicroelectronics STM32 SOC audio support ++ ++# ++# Allwinner SoC Audio support ++# ++# CONFIG_SND_SUN4I_CODEC is not set ++# CONFIG_SND_SUN4I_I2S is not set ++# CONFIG_SND_SUN4I_SPDIF is not set ++# CONFIG_SND_SUN50I_DMIC is not set ++# end of Allwinner SoC Audio support ++ ++# CONFIG_SND_SOC_XILINX_I2S is not set ++# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set ++# CONFIG_SND_SOC_XILINX_SPDIF is not set ++# CONFIG_SND_SOC_XTFPGA_I2S is not set ++CONFIG_SND_SOC_XUANTIE=y ++CONFIG_SND_SOC_XUANTIE_TH1520_I2S=y ++# CONFIG_SND_SOC_XUANTIE_TH1520_I2S_CH8 is not set ++CONFIG_SND_SOC_XUANTIE_TH1520_HDMI_PCM=y ++CONFIG_SND_SOC_XUANTIE_TH1520_TDM=y ++CONFIG_SND_SOC_XUANTIE_TH1520_SPDIF=y ++CONFIG_SND_SOC_I2C_AND_SPI=y ++ ++# ++# CODEC drivers ++# ++# CONFIG_SND_SOC_AC97_CODEC is not set ++# CONFIG_SND_SOC_ADAU1372_I2C is not set ++# CONFIG_SND_SOC_ADAU1372_SPI is not set ++# CONFIG_SND_SOC_ADAU1701 is not set ++# CONFIG_SND_SOC_ADAU1761_I2C is not set ++# CONFIG_SND_SOC_ADAU1761_SPI is not set ++# CONFIG_SND_SOC_ADAU7002 is not set ++# CONFIG_SND_SOC_ADAU7118_HW is not set ++# CONFIG_SND_SOC_ADAU7118_I2C is not set ++# CONFIG_SND_SOC_AK4104 is not set ++# CONFIG_SND_SOC_AK4118 is not set ++# CONFIG_SND_SOC_AK4375 is not set ++# CONFIG_SND_SOC_AK4458 is not set ++# CONFIG_SND_SOC_AK4554 is not set ++# CONFIG_SND_SOC_AK4613 is not set ++# CONFIG_SND_SOC_AK4642 is not set ++# CONFIG_SND_SOC_AK5386 is not set ++# CONFIG_SND_SOC_AK5558 is not set ++# CONFIG_SND_SOC_ALC5623 is not set ++# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set ++# CONFIG_SND_SOC_AW8738 is not set ++# CONFIG_SND_SOC_AW87519 is not set ++# CONFIG_SND_SOC_AW88395 is not set ++# CONFIG_SND_SOC_AW88261 is not set ++# CONFIG_SND_SOC_BD28623 is not set ++# CONFIG_SND_SOC_BT_SCO is not set ++# CONFIG_SND_SOC_CHV3_CODEC is not set ++# CONFIG_SND_SOC_CS35L32 is not set ++# CONFIG_SND_SOC_CS35L33 is not set ++# CONFIG_SND_SOC_CS35L34 is not set ++# CONFIG_SND_SOC_CS35L35 is not set ++# CONFIG_SND_SOC_CS35L36 is not set ++# CONFIG_SND_SOC_CS35L41_SPI is not set ++# CONFIG_SND_SOC_CS35L41_I2C is not set ++# CONFIG_SND_SOC_CS35L45_SPI is not set ++# CONFIG_SND_SOC_CS35L45_I2C is not set ++# CONFIG_SND_SOC_CS35L56_I2C is not set ++# CONFIG_SND_SOC_CS35L56_SPI is not set ++# CONFIG_SND_SOC_CS42L42 is not set ++# CONFIG_SND_SOC_CS42L51_I2C is not set ++# CONFIG_SND_SOC_CS42L52 is not set ++# CONFIG_SND_SOC_CS42L56 is not set ++# CONFIG_SND_SOC_CS42L73 is not set ++# CONFIG_SND_SOC_CS42L83 is not set ++# CONFIG_SND_SOC_CS4234 is not set ++# CONFIG_SND_SOC_CS4265 is not set ++# CONFIG_SND_SOC_CS4270 is not set ++# CONFIG_SND_SOC_CS4271_I2C is not set ++# CONFIG_SND_SOC_CS4271_SPI is not set ++# CONFIG_SND_SOC_CS42XX8_I2C is not set ++# CONFIG_SND_SOC_CS43130 is not set ++# CONFIG_SND_SOC_CS4341 is not set ++# CONFIG_SND_SOC_CS4349 is not set ++# CONFIG_SND_SOC_CS53L30 is not set ++# CONFIG_SND_SOC_CX2072X is not set ++# CONFIG_SND_SOC_DA7213 is not set ++# CONFIG_SND_SOC_DMIC is not set ++CONFIG_SND_SOC_HDMI_CODEC=m ++# CONFIG_SND_SOC_ES7134 is not set ++CONFIG_SND_SOC_ES7210=y ++# CONFIG_SND_SOC_ES7241 is not set ++CONFIG_SND_SOC_ES8156=y ++# CONFIG_SND_SOC_ES8316 is not set ++# CONFIG_SND_SOC_ES8326 is not set ++# CONFIG_SND_SOC_ES8328_I2C is not set ++# CONFIG_SND_SOC_ES8328_SPI is not set ++# CONFIG_SND_SOC_GTM601 is not set ++# CONFIG_SND_SOC_HDA is not set ++# CONFIG_SND_SOC_ICS43432 is not set ++# CONFIG_SND_SOC_IDT821034 is not set ++# CONFIG_SND_SOC_INNO_RK3036 is not set ++# CONFIG_SND_SOC_MAX98088 is not set ++# CONFIG_SND_SOC_MAX98090 is not set ++# CONFIG_SND_SOC_MAX98357A is not set ++# CONFIG_SND_SOC_MAX98504 is not set ++# CONFIG_SND_SOC_MAX9867 is not set ++# CONFIG_SND_SOC_MAX98927 is not set ++# CONFIG_SND_SOC_MAX98520 is not set ++# CONFIG_SND_SOC_MAX98373_I2C is not set ++# CONFIG_SND_SOC_MAX98388 is not set ++# CONFIG_SND_SOC_MAX98390 is not set ++# CONFIG_SND_SOC_MAX98396 is not set ++# CONFIG_SND_SOC_MAX9860 is not set ++# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set ++# CONFIG_SND_SOC_PCM1681 is not set ++# CONFIG_SND_SOC_PCM1789_I2C is not set ++# CONFIG_SND_SOC_PCM179X_I2C is not set ++# CONFIG_SND_SOC_PCM179X_SPI is not set ++# CONFIG_SND_SOC_PCM186X_I2C is not set ++# CONFIG_SND_SOC_PCM186X_SPI is not set ++# CONFIG_SND_SOC_PCM3060_I2C is not set ++# CONFIG_SND_SOC_PCM3060_SPI is not set ++# CONFIG_SND_SOC_PCM3168A_I2C is not set ++# CONFIG_SND_SOC_PCM3168A_SPI is not set ++# CONFIG_SND_SOC_PCM5102A is not set ++# CONFIG_SND_SOC_PCM512x_I2C is not set ++# CONFIG_SND_SOC_PCM512x_SPI is not set ++# CONFIG_SND_SOC_PEB2466 is not set ++# CONFIG_SND_SOC_RK3328 is not set ++# CONFIG_SND_SOC_RT5616 is not set ++# CONFIG_SND_SOC_RT5631 is not set ++# CONFIG_SND_SOC_RT5640 is not set ++# CONFIG_SND_SOC_RT5659 is not set ++# CONFIG_SND_SOC_RT9120 is not set ++# CONFIG_SND_SOC_SGTL5000 is not set ++# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set ++# CONFIG_SND_SOC_SIMPLE_MUX is not set ++# CONFIG_SND_SOC_SMA1303 is not set ++# CONFIG_SND_SOC_SPDIF is not set ++# CONFIG_SND_SOC_SRC4XXX_I2C is not set ++# CONFIG_SND_SOC_SSM2305 is not set ++# CONFIG_SND_SOC_SSM2518 is not set ++# CONFIG_SND_SOC_SSM2602_SPI is not set ++# CONFIG_SND_SOC_SSM2602_I2C is not set ++# CONFIG_SND_SOC_SSM3515 is not set ++# CONFIG_SND_SOC_SSM4567 is not set ++# CONFIG_SND_SOC_STA32X is not set ++# CONFIG_SND_SOC_STA350 is not set ++# CONFIG_SND_SOC_STI_SAS is not set ++# CONFIG_SND_SOC_TAS2552 is not set ++# CONFIG_SND_SOC_TAS2562 is not set ++# CONFIG_SND_SOC_TAS2764 is not set ++# CONFIG_SND_SOC_TAS2770 is not set ++# CONFIG_SND_SOC_TAS2780 is not set ++# CONFIG_SND_SOC_TAS2781_I2C is not set ++# CONFIG_SND_SOC_TAS5086 is not set ++# CONFIG_SND_SOC_TAS571X is not set ++# CONFIG_SND_SOC_TAS5720 is not set ++# CONFIG_SND_SOC_TAS5805M is not set ++# CONFIG_SND_SOC_TAS6424 is not set ++# CONFIG_SND_SOC_TDA7419 is not set ++# CONFIG_SND_SOC_TFA9879 is not set ++# CONFIG_SND_SOC_TFA989X is not set ++# CONFIG_SND_SOC_TLV320ADC3XXX is not set ++# CONFIG_SND_SOC_TLV320AIC23_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC23_SPI is not set ++# CONFIG_SND_SOC_TLV320AIC31XX is not set ++# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set ++# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set ++# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set ++# CONFIG_SND_SOC_TLV320ADCX140 is not set ++# CONFIG_SND_SOC_TS3A227E is not set ++# CONFIG_SND_SOC_TSCS42XX is not set ++# CONFIG_SND_SOC_TSCS454 is not set ++# CONFIG_SND_SOC_UDA1334 is not set ++# CONFIG_SND_SOC_WM8510 is not set ++# CONFIG_SND_SOC_WM8523 is not set ++# CONFIG_SND_SOC_WM8524 is not set ++# CONFIG_SND_SOC_WM8580 is not set ++# CONFIG_SND_SOC_WM8711 is not set ++# CONFIG_SND_SOC_WM8728 is not set ++# CONFIG_SND_SOC_WM8731_I2C is not set ++# CONFIG_SND_SOC_WM8731_SPI is not set ++# CONFIG_SND_SOC_WM8737 is not set ++# CONFIG_SND_SOC_WM8741 is not set ++# CONFIG_SND_SOC_WM8750 is not set ++# CONFIG_SND_SOC_WM8753 is not set ++# CONFIG_SND_SOC_WM8770 is not set ++# CONFIG_SND_SOC_WM8776 is not set ++# CONFIG_SND_SOC_WM8782 is not set ++# CONFIG_SND_SOC_WM8804_I2C is not set ++# CONFIG_SND_SOC_WM8804_SPI is not set ++# CONFIG_SND_SOC_WM8903 is not set ++# CONFIG_SND_SOC_WM8904 is not set ++# CONFIG_SND_SOC_WM8940 is not set ++# CONFIG_SND_SOC_WM8960 is not set ++# CONFIG_SND_SOC_WM8961 is not set ++# CONFIG_SND_SOC_WM8962 is not set ++# CONFIG_SND_SOC_WM8974 is not set ++# CONFIG_SND_SOC_WM8978 is not set ++# CONFIG_SND_SOC_WM8985 is not set ++# CONFIG_SND_SOC_ZL38060 is not set ++# CONFIG_SND_SOC_MAX9759 is not set ++# CONFIG_SND_SOC_MT6351 is not set ++# CONFIG_SND_SOC_MT6358 is not set ++# CONFIG_SND_SOC_MT6660 is not set ++# CONFIG_SND_SOC_NAU8315 is not set ++# CONFIG_SND_SOC_NAU8540 is not set ++# CONFIG_SND_SOC_NAU8810 is not set ++# CONFIG_SND_SOC_NAU8821 is not set ++# CONFIG_SND_SOC_NAU8822 is not set ++# CONFIG_SND_SOC_NAU8824 is not set ++# CONFIG_SND_SOC_TPA6130A2 is not set ++# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set ++# CONFIG_SND_SOC_LPASS_VA_MACRO is not set ++# CONFIG_SND_SOC_LPASS_RX_MACRO is not set ++# CONFIG_SND_SOC_LPASS_TX_MACRO is not set ++# end of CODEC drivers ++ ++CONFIG_SND_SIMPLE_CARD_UTILS=y ++CONFIG_SND_SIMPLE_CARD=y ++# CONFIG_SND_AUDIO_GRAPH_CARD is not set ++# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set ++# CONFIG_SND_TEST_COMPONENT is not set + # CONFIG_SND_VIRTIO is not set + CONFIG_HID_SUPPORT=y + CONFIG_HID=y +@@ -5195,6 +6001,7 @@ CONFIG_HID_MULTITOUCH=m + # CONFIG_HID_NINTENDO is not set + # CONFIG_HID_NTI is not set + CONFIG_HID_NTRIG=y ++# CONFIG_HID_NVIDIA_SHIELD is not set + CONFIG_HID_ORTEK=m + CONFIG_HID_PANTHERLORD=m + # CONFIG_PANTHERLORD_FF is not set +@@ -5261,6 +6068,7 @@ CONFIG_USB_HIDDEV=y + # end of USB HID support + + CONFIG_I2C_HID=y ++# CONFIG_I2C_HID_ACPI is not set + # CONFIG_I2C_HID_OF is not set + # CONFIG_I2C_HID_OF_ELAN is not set + # CONFIG_I2C_HID_OF_GOODIX is not set +@@ -5297,6 +6105,7 @@ CONFIG_USB_XHCI_HCD=y + CONFIG_USB_XHCI_PCI=y + # CONFIG_USB_XHCI_PCI_RENESAS is not set + CONFIG_USB_XHCI_PLATFORM=y ++CONFIG_USB_XHCI_RCAR=y + CONFIG_USB_EHCI_HCD=y + CONFIG_USB_EHCI_ROOT_HUB_TT=y + CONFIG_USB_EHCI_TT_NEWSCHED=y +@@ -5314,6 +6123,7 @@ CONFIG_USB_UHCI_HCD=y + # CONFIG_USB_R8A66597_HCD is not set + # CONFIG_USB_HCD_BCMA is not set + # CONFIG_USB_HCD_TEST_MODE is not set ++# CONFIG_USB_RENESAS_USBHS is not set + + # + # USB Device Class drivers +@@ -5338,8 +6148,8 @@ CONFIG_USB_STORAGE_DATAFAB=m + CONFIG_USB_STORAGE_FREECOM=m + CONFIG_USB_STORAGE_ISD200=m + CONFIG_USB_STORAGE_USBAT=m +-CONFIG_USB_STORAGE_SDDR09=m +-CONFIG_USB_STORAGE_SDDR55=m ++CONFIG_USB_STORAGE_SDDR09=y ++CONFIG_USB_STORAGE_SDDR55=y + CONFIG_USB_STORAGE_JUMPSHOT=m + CONFIG_USB_STORAGE_ALAUDA=m + CONFIG_USB_STORAGE_ONETOUCH=m +@@ -5360,7 +6170,19 @@ CONFIG_USB_MICROTEK=m + # + # CONFIG_USB_CDNS_SUPPORT is not set + # CONFIG_USB_MUSB_HDRC is not set +-# CONFIG_USB_DWC3 is not set ++CONFIG_USB_DWC3=m ++# CONFIG_USB_DWC3_ULPI is not set ++# CONFIG_USB_DWC3_HOST is not set ++# CONFIG_USB_DWC3_GADGET is not set ++CONFIG_USB_DWC3_DUAL_ROLE=y ++ ++# ++# Platform Glue Driver Support ++# ++CONFIG_USB_DWC3_PCI=m ++CONFIG_USB_DWC3_HAPS=m ++# CONFIG_USB_DWC3_OF_SIMPLE is not set ++CONFIG_USB_DWC3_XUANTIE=m + # CONFIG_USB_DWC2 is not set + # CONFIG_USB_CHIPIDEA is not set + # CONFIG_USB_ISP1760 is not set +@@ -5452,7 +6274,7 @@ CONFIG_USB_HSIC_USB3503=m + # CONFIG_USB_HSIC_USB4604 is not set + # CONFIG_USB_LINK_LAYER_TEST is not set + CONFIG_USB_CHAOSKEY=m +-# CONFIG_USB_ONBOARD_HUB is not set ++CONFIG_USB_ONBOARD_HUB=m + CONFIG_USB_ATM=m + # CONFIG_USB_SPEEDTOUCH is not set + CONFIG_USB_CXACRU=m +@@ -5467,7 +6289,101 @@ CONFIG_USB_XUSBATM=m + # CONFIG_USB_ISP1301 is not set + # end of USB Physical Layer drivers + +-# CONFIG_USB_GADGET is not set ++CONFIG_USB_GADGET=m ++# CONFIG_USB_GADGET_DEBUG is not set ++# CONFIG_USB_GADGET_DEBUG_FILES is not set ++# CONFIG_USB_GADGET_DEBUG_FS is not set ++CONFIG_USB_GADGET_VBUS_DRAW=2 ++CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 ++# CONFIG_U_SERIAL_CONSOLE is not set ++ ++# ++# USB Peripheral Controller ++# ++# CONFIG_USB_GR_UDC is not set ++# CONFIG_USB_R8A66597 is not set ++# CONFIG_USB_RENESAS_USB3 is not set ++# CONFIG_USB_RENESAS_USBF is not set ++# CONFIG_USB_PXA27X is not set ++# CONFIG_USB_MV_UDC is not set ++# CONFIG_USB_MV_U3D is not set ++# CONFIG_USB_SNP_UDC_PLAT is not set ++# CONFIG_USB_M66592 is not set ++# CONFIG_USB_BDC_UDC is not set ++# CONFIG_USB_AMD5536UDC is not set ++# CONFIG_USB_NET2272 is not set ++# CONFIG_USB_NET2280 is not set ++# CONFIG_USB_GOKU is not set ++# CONFIG_USB_EG20T is not set ++# CONFIG_USB_GADGET_XILINX is not set ++# CONFIG_USB_MAX3420_UDC is not set ++# CONFIG_USB_CDNS2_UDC is not set ++# CONFIG_USB_DUMMY_HCD is not set ++# end of USB Peripheral Controller ++ ++CONFIG_USB_LIBCOMPOSITE=m ++CONFIG_USB_F_ACM=m ++CONFIG_USB_F_SS_LB=m ++CONFIG_USB_U_SERIAL=m ++CONFIG_USB_U_ETHER=m ++CONFIG_USB_F_SERIAL=m ++CONFIG_USB_F_OBEX=m ++CONFIG_USB_F_NCM=m ++CONFIG_USB_F_ECM=m ++CONFIG_USB_F_EEM=m ++CONFIG_USB_F_SUBSET=m ++CONFIG_USB_F_RNDIS=m ++CONFIG_USB_F_MASS_STORAGE=m ++CONFIG_USB_F_FS=m ++CONFIG_USB_CONFIGFS=m ++# CONFIG_USB_CONFIGFS_SERIAL is not set ++# CONFIG_USB_CONFIGFS_ACM is not set ++# CONFIG_USB_CONFIGFS_OBEX is not set ++# CONFIG_USB_CONFIGFS_NCM is not set ++# CONFIG_USB_CONFIGFS_ECM is not set ++# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set ++# CONFIG_USB_CONFIGFS_RNDIS is not set ++# CONFIG_USB_CONFIGFS_EEM is not set ++# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set ++# CONFIG_USB_CONFIGFS_F_LB_SS is not set ++CONFIG_USB_CONFIGFS_F_FS=y ++# CONFIG_USB_CONFIGFS_F_UAC1 is not set ++# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set ++# CONFIG_USB_CONFIGFS_F_UAC2 is not set ++# CONFIG_USB_CONFIGFS_F_MIDI is not set ++# CONFIG_USB_CONFIGFS_F_MIDI2 is not set ++# CONFIG_USB_CONFIGFS_F_HID is not set ++# CONFIG_USB_CONFIGFS_F_UVC is not set ++# CONFIG_USB_CONFIGFS_F_PRINTER is not set ++# CONFIG_USB_CONFIGFS_F_TCM is not set ++ ++# ++# USB Gadget precomposed configurations ++# ++CONFIG_USB_ZERO=m ++# CONFIG_USB_AUDIO is not set ++CONFIG_USB_ETH=m ++CONFIG_USB_ETH_RNDIS=y ++CONFIG_USB_ETH_EEM=y ++CONFIG_USB_G_NCM=m ++# CONFIG_USB_GADGETFS is not set ++# CONFIG_USB_FUNCTIONFS is not set ++CONFIG_USB_MASS_STORAGE=m ++# CONFIG_USB_GADGET_TARGET is not set ++CONFIG_USB_G_SERIAL=m ++# CONFIG_USB_MIDI_GADGET is not set ++# CONFIG_USB_G_PRINTER is not set ++# CONFIG_USB_CDC_COMPOSITE is not set ++# CONFIG_USB_G_ACM_MS is not set ++CONFIG_USB_G_MULTI=m ++CONFIG_USB_G_MULTI_RNDIS=y ++CONFIG_USB_G_MULTI_CDC=y ++# CONFIG_USB_G_HID is not set ++# CONFIG_USB_G_DBGP is not set ++# CONFIG_USB_G_WEBCAM is not set ++# CONFIG_USB_RAW_GADGET is not set ++# end of USB Gadget precomposed configurations ++ + CONFIG_TYPEC=m + CONFIG_TYPEC_TCPM=m + CONFIG_TYPEC_TCPCI=m +@@ -5476,6 +6392,7 @@ CONFIG_TYPEC_TCPCI=m + # CONFIG_TYPEC_FUSB302 is not set + CONFIG_TYPEC_UCSI=m + # CONFIG_UCSI_CCG is not set ++# CONFIG_UCSI_ACPI is not set + # CONFIG_UCSI_STM32G0 is not set + # CONFIG_TYPEC_TPS6598X is not set + # CONFIG_TYPEC_ANX7411 is not set +@@ -5500,7 +6417,7 @@ CONFIG_TYPEC_DP_ALTMODE=m + # CONFIG_TYPEC_NVIDIA_ALTMODE is not set + # end of USB Type-C Alternate Mode drivers + +-CONFIG_USB_ROLE_SWITCH=y ++CONFIG_USB_ROLE_SWITCH=m + CONFIG_MMC=y + CONFIG_PWRSEQ_EMMC=m + CONFIG_PWRSEQ_SIMPLE=m +@@ -5519,15 +6436,19 @@ CONFIG_MMC_SDHCI=y + CONFIG_MMC_SDHCI_IO_ACCESSORS=y + CONFIG_MMC_SDHCI_PCI=m + CONFIG_MMC_RICOH_MMC=y ++# CONFIG_MMC_SDHCI_ACPI is not set + CONFIG_MMC_SDHCI_PLTFM=y + # CONFIG_MMC_SDHCI_OF_ARASAN is not set + # CONFIG_MMC_SDHCI_OF_AT91 is not set +-# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set ++CONFIG_MMC_SDHCI_OF_DWCMSHC=y ++CONFIG_MMC_SDHCI_OF_K1=y + CONFIG_MMC_SDHCI_CADENCE=y + # CONFIG_MMC_SDHCI_F_SDH30 is not set + # CONFIG_MMC_SDHCI_MILBEAUT is not set + CONFIG_MMC_TIFM_SD=m + CONFIG_MMC_SPI=y ++CONFIG_MMC_SDHCI_SOPHGO=y ++# CONFIG_MMC_SDHI is not set + CONFIG_MMC_CB710=m + CONFIG_MMC_VIA_SDMMC=m + CONFIG_MMC_DW=m +@@ -5538,18 +6459,18 @@ CONFIG_MMC_DW_BLUEFIELD=m + # CONFIG_MMC_DW_K3 is not set + CONFIG_MMC_DW_PCI=m + # CONFIG_MMC_DW_STARFIVE is not set ++# CONFIG_MMC_SH_MMCIF is not set + CONFIG_MMC_VUB300=m + CONFIG_MMC_USHC=m + # CONFIG_MMC_USDHI6ROL0 is not set + CONFIG_MMC_REALTEK_PCI=m + CONFIG_MMC_REALTEK_USB=m ++CONFIG_MMC_SUNXI=y + CONFIG_MMC_CQHCI=y + CONFIG_MMC_HSQ=m + CONFIG_MMC_TOSHIBA_PCI=m + CONFIG_MMC_MTK=m + CONFIG_MMC_SDHCI_XENON=m +-# CONFIG_MMC_SDHCI_OMAP is not set +-# CONFIG_MMC_SDHCI_AM654 is not set + # CONFIG_SCSI_UFSHCD is not set + CONFIG_MEMSTICK=m + # CONFIG_MEMSTICK_DEBUG is not set +@@ -5590,7 +6511,7 @@ CONFIG_LEDS_LM3530=m + # CONFIG_LEDS_LM3642 is not set + # CONFIG_LEDS_LM3692X is not set + # CONFIG_LEDS_PCA9532 is not set +-# CONFIG_LEDS_GPIO is not set ++CONFIG_LEDS_GPIO=y + CONFIG_LEDS_LP3944=m + # CONFIG_LEDS_LP3952 is not set + # CONFIG_LEDS_LP50XX is not set +@@ -5672,6 +6593,7 @@ CONFIG_INFINIBAND_USER_MEM=y + CONFIG_INFINIBAND_ON_DEMAND_PAGING=y + CONFIG_INFINIBAND_ADDR_TRANS=y + CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y ++# CONFIG_INFINIBAND_PEER_MEMORY is not set + CONFIG_INFINIBAND_VIRT_DMA=y + CONFIG_INFINIBAND_BNXT_RE=m + CONFIG_INFINIBAND_CXGB4=m +@@ -5753,6 +6675,7 @@ CONFIG_RTC_DRV_EM3027=m + # CONFIG_RTC_DRV_RV3032 is not set + CONFIG_RTC_DRV_RV8803=m + # CONFIG_RTC_DRV_SD3078 is not set ++CONFIG_RTC_DRV_SPACEMIT_P1=m + + # + # SPI RTC drivers +@@ -5804,21 +6727,28 @@ CONFIG_RTC_DRV_M48T35=m + CONFIG_RTC_DRV_M48T59=m + CONFIG_RTC_DRV_MSM6242=m + CONFIG_RTC_DRV_RP5C01=m ++# CONFIG_RTC_DRV_OPTEE is not set + # CONFIG_RTC_DRV_ZYNQMP is not set + + # + # on-CPU RTC drivers + # ++# CONFIG_RTC_DRV_SH is not set + # CONFIG_RTC_DRV_PL030 is not set + # CONFIG_RTC_DRV_PL031 is not set ++CONFIG_RTC_DRV_SUN6I=y + # CONFIG_RTC_DRV_CADENCE is not set + # CONFIG_RTC_DRV_FTRTC010 is not set ++CONFIG_RTC_DRV_XGENE=m ++CONFIG_RTC_DRV_XGENE_PRESCALER=y + # CONFIG_RTC_DRV_R7301 is not set + + # + # HID Sensor RTC drivers + # ++# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set + CONFIG_RTC_DRV_GOLDFISH=y ++# CONFIG_RTC_DRV_ASTBMC is not set + CONFIG_DMADEVICES=y + # CONFIG_DMADEVICES_DEBUG is not set + +@@ -5826,12 +6756,16 @@ CONFIG_DMADEVICES=y + # DMA Devices + # + CONFIG_DMA_ENGINE=y ++CONFIG_DMA_VIRTUAL_CHANNELS=y ++CONFIG_DMA_ACPI=y + CONFIG_DMA_OF=y + # CONFIG_ALTERA_MSGDMA is not set + # CONFIG_AMBA_PL08X is not set +-# CONFIG_DW_AXI_DMAC is not set ++CONFIG_DMA_SUN6I=m ++CONFIG_DW_AXI_DMAC=y + # CONFIG_FSL_EDMA is not set + # CONFIG_INTEL_IDMA64 is not set ++CONFIG_SPACEMIT_K1_DMA=y + # CONFIG_PL330_DMA is not set + # CONFIG_PLX_DMA is not set + # CONFIG_XILINX_DMA is not set +@@ -5844,6 +6778,8 @@ CONFIG_DW_DMAC=m + CONFIG_DW_DMAC_PCI=m + # CONFIG_DW_EDMA is not set + # CONFIG_SF_PDMA is not set ++# CONFIG_RCAR_DMAC is not set ++# CONFIG_RENESAS_USB_DMAC is not set + + # + # DMA Clients +@@ -5855,11 +6791,11 @@ CONFIG_ASYNC_TX_DMA=y + # DMABUF options + # + CONFIG_SYNC_FILE=y +-# CONFIG_SW_SYNC is not set +-# CONFIG_UDMABUF is not set ++CONFIG_SW_SYNC=y ++CONFIG_UDMABUF=y + # CONFIG_DMABUF_MOVE_NOTIFY is not set + # CONFIG_DMABUF_DEBUG is not set +-# CONFIG_DMABUF_SELFTESTS is not set ++CONFIG_DMABUF_SELFTESTS=m + # CONFIG_DMABUF_HEAPS is not set + # CONFIG_DMABUF_SYSFS_STATS is not set + # end of DMABUF options +@@ -5879,6 +6815,7 @@ CONFIG_VFIO_GROUP=y + CONFIG_VFIO_CONTAINER=y + CONFIG_VFIO_NOIOMMU=y + CONFIG_VFIO_VIRQFD=y ++# CONFIG_VFIO_DEBUGFS is not set + + # + # VFIO support for PCI devices +@@ -5948,8 +6885,11 @@ CONFIG_COMMON_CLK=y + # CONFIG_COMMON_CLK_VC7 is not set + # CONFIG_COMMON_CLK_FIXED_MMIO is not set + CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC=y ++CONFIG_CLK_RENESAS=y ++# CONFIG_CLK_RCAR_USB2_CLOCK_SEL is not set + CONFIG_CLK_SIFIVE=y + CONFIG_CLK_SIFIVE_PRCI=y ++CONFIG_SPACEMIT_K1X_CCU=y + CONFIG_CLK_STARFIVE_JH71X0=y + CONFIG_CLK_STARFIVE_JH7100=y + CONFIG_CLK_STARFIVE_JH7100_AUDIO=m +@@ -5959,15 +6899,27 @@ CONFIG_CLK_STARFIVE_JH7110_AON=m + CONFIG_CLK_STARFIVE_JH7110_STG=m + CONFIG_CLK_STARFIVE_JH7110_ISP=m + CONFIG_CLK_STARFIVE_JH7110_VOUT=m ++CONFIG_SUNXI_CCU=y ++CONFIG_SUN20I_D1_CCU=y ++CONFIG_SUN20I_D1_R_CCU=y ++CONFIG_SUN6I_RTC_CCU=y ++CONFIG_SUN8I_DE2_CCU=m + # CONFIG_XILINX_VCU is not set + # CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set ++CONFIG_XUANTIE_CLK=y ++CONFIG_CLK_TH1520_FM=y + CONFIG_HWSPINLOCK=y ++# CONFIG_HWSPINLOCK_SUN6I is not set ++CONFIG_HWSPINLOCK_TH1520=y + + # + # Clock Source drivers + # + CONFIG_TIMER_OF=y + CONFIG_TIMER_PROBE=y ++CONFIG_CLKSRC_MMIO=y ++CONFIG_SUN4I_TIMER=y ++# CONFIG_RENESAS_OSTM is not set + CONFIG_RISCV_TIMER=y + # end of Clock Source drivers + +@@ -5976,8 +6928,11 @@ CONFIG_MAILBOX=y + # CONFIG_ARM_MHU_V2 is not set + # CONFIG_PLATFORM_MHU is not set + # CONFIG_PL320_MBOX is not set ++# CONFIG_PCC is not set + # CONFIG_ALTERA_MBOX is not set + # CONFIG_MAILBOX_TEST is not set ++CONFIG_SUN6I_MSGBOX=y ++CONFIG_TH1520_MBOX=y + CONFIG_IOMMU_API=y + CONFIG_IOMMU_SUPPORT=y + +@@ -5992,6 +6947,9 @@ CONFIG_IOMMU_DEFAULT_DMA_LAZY=y + # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set + CONFIG_OF_IOMMU=y + # CONFIG_IOMMUFD is not set ++CONFIG_RISCV_IOMMU=y ++CONFIG_RISCV_IOMMU_PCI=y ++CONFIG_SUN50I_IOMMU=y + + # + # Remoteproc drivers +@@ -6007,6 +6965,7 @@ CONFIG_RPMSG_CHAR=y + CONFIG_RPMSG_CTRL=y + CONFIG_RPMSG_NS=y + # CONFIG_RPMSG_QCOM_GLINK_RPM is not set ++CONFIG_RPMSG_TH1520=y + CONFIG_RPMSG_VIRTIO=y + # end of Rpmsg drivers + +@@ -6055,22 +7014,72 @@ CONFIG_RPMSG_VIRTIO=y + # CONFIG_QCOM_PMIC_GLINK is not set + # end of Qualcomm SoC drivers + ++CONFIG_SOC_RENESAS=y + CONFIG_SIFIVE_CCACHE=y ++ ++# ++# Spacemit SoC drivers ++# ++CONFIG_SPACEMIT_MEM_RANGE=y ++# end of Spacemit SoC drivers ++ + CONFIG_JH71XX_PMU=y ++CONFIG_SUNXI_SRAM=y ++# CONFIG_SUN20I_PPU is not set + # CONFIG_SOC_TI is not set + + # + # Xilinx SoC drivers + # + # end of Xilinx SoC drivers ++ ++# ++# XuanTie SoC drivers ++# ++CONFIG_TH1520_REBOOTMODE=y ++CONFIG_VHA=m ++# CONFIG_VHA_XUANTIE_TH1520_FPGA_C910 is not set ++CONFIG_VHA_XUANTIE_TH1520=y ++# CONFIG_VHA_DUMMY is not set ++# CONFIG_HW_AX2 is not set ++CONFIG_HW_AX3=y ++# CONFIG_HW_AX3_MC is not set ++CONFIG_TARGET_OSID=0 ++CONFIG_VHA_MMU_MIRRORED_CTX=y ++CONFIG_VHA_SYS_AURA=y ++# CONFIG_VHA_SYS_VAGUS is not set ++CONFIG_VHA_LO_PRI_SUBSEGS=y ++CONFIG_VIDEO_VC8000D=m ++CONFIG_VIDEO_VC8000E=m ++# CONFIG_VIDEO_MEMORY is not set ++CONFIG_TH1520_SYSTEM_MONITOR=y ++CONFIG_TH1520_REGDUMP=y ++CONFIG_TH1520_IOPMP=y ++# end of XuanTie SoC drivers + # end of SOC (System On Chip) specific Drivers + +-# CONFIG_PM_DEVFREQ is not set ++CONFIG_PM_DEVFREQ=y ++ ++# ++# DEVFREQ Governors ++# ++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y ++CONFIG_DEVFREQ_GOV_PERFORMANCE=y ++CONFIG_DEVFREQ_GOV_POWERSAVE=y ++CONFIG_DEVFREQ_GOV_USERSPACE=y ++CONFIG_DEVFREQ_GOV_PASSIVE=y ++ ++# ++# DEVFREQ Drivers ++# ++# CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ is not set ++CONFIG_PM_DEVFREQ_EVENT=y + CONFIG_EXTCON=y + + # + # Extcon Device Drivers + # ++# CONFIG_EXTCON_ADC_JACK is not set + # CONFIG_EXTCON_FSA9480 is not set + CONFIG_EXTCON_GPIO=m + # CONFIG_EXTCON_MAX3355 is not set +@@ -6080,7 +7089,540 @@ CONFIG_EXTCON_GPIO=m + # CONFIG_EXTCON_USB_GPIO is not set + # CONFIG_EXTCON_USBC_TUSB320 is not set + # CONFIG_MEMORY is not set +-# CONFIG_IIO is not set ++CONFIG_IIO=y ++# CONFIG_IIO_BUFFER is not set ++CONFIG_IIO_CONFIGFS=y ++# CONFIG_IIO_TRIGGER is not set ++CONFIG_IIO_SW_DEVICE=y ++# CONFIG_IIO_SW_TRIGGER is not set ++# CONFIG_IIO_TRIGGERED_EVENT is not set ++ ++# ++# Accelerometers ++# ++# CONFIG_ADIS16201 is not set ++# CONFIG_ADIS16209 is not set ++# CONFIG_ADXL313_I2C is not set ++# CONFIG_ADXL313_SPI is not set ++# CONFIG_ADXL345_I2C is not set ++# CONFIG_ADXL345_SPI is not set ++# CONFIG_ADXL355_I2C is not set ++# CONFIG_ADXL355_SPI is not set ++# CONFIG_ADXL367_SPI is not set ++# CONFIG_ADXL367_I2C is not set ++# CONFIG_ADXL372_SPI is not set ++# CONFIG_ADXL372_I2C is not set ++# CONFIG_BMA180 is not set ++# CONFIG_BMA220 is not set ++# CONFIG_BMA400 is not set ++# CONFIG_BMC150_ACCEL is not set ++# CONFIG_BMI088_ACCEL is not set ++# CONFIG_DA280 is not set ++# CONFIG_DA311 is not set ++# CONFIG_DMARD06 is not set ++# CONFIG_DMARD09 is not set ++# CONFIG_DMARD10 is not set ++# CONFIG_FXLS8962AF_I2C is not set ++# CONFIG_FXLS8962AF_SPI is not set ++# CONFIG_HID_SENSOR_ACCEL_3D is not set ++# CONFIG_IIO_ST_ACCEL_3AXIS is not set ++# CONFIG_IIO_KX022A_SPI is not set ++# CONFIG_IIO_KX022A_I2C is not set ++# CONFIG_KXSD9 is not set ++# CONFIG_KXCJK1013 is not set ++# CONFIG_MC3230 is not set ++# CONFIG_MMA7455_I2C is not set ++# CONFIG_MMA7455_SPI is not set ++# CONFIG_MMA7660 is not set ++# CONFIG_MMA8452 is not set ++# CONFIG_MMA9551 is not set ++# CONFIG_MMA9553 is not set ++# CONFIG_MSA311 is not set ++# CONFIG_MXC4005 is not set ++# CONFIG_MXC6255 is not set ++# CONFIG_SCA3000 is not set ++# CONFIG_SCA3300 is not set ++# CONFIG_STK8312 is not set ++# CONFIG_STK8BA50 is not set ++# end of Accelerometers ++ ++# ++# Analog to digital converters ++# ++# CONFIG_AD4130 is not set ++# CONFIG_AD7091R5 is not set ++# CONFIG_AD7124 is not set ++# CONFIG_AD7192 is not set ++# CONFIG_AD7266 is not set ++# CONFIG_AD7280 is not set ++# CONFIG_AD7291 is not set ++# CONFIG_AD7292 is not set ++# CONFIG_AD7298 is not set ++# CONFIG_AD7476 is not set ++# CONFIG_AD7606_IFACE_PARALLEL is not set ++# CONFIG_AD7606_IFACE_SPI is not set ++# CONFIG_AD7766 is not set ++# CONFIG_AD7768_1 is not set ++# CONFIG_AD7780 is not set ++# CONFIG_AD7791 is not set ++# CONFIG_AD7793 is not set ++# CONFIG_AD7887 is not set ++# CONFIG_AD7923 is not set ++# CONFIG_AD7949 is not set ++# CONFIG_AD799X is not set ++# CONFIG_AD9467 is not set ++# CONFIG_ADI_AXI_ADC is not set ++# CONFIG_CC10001_ADC is not set ++# CONFIG_ENVELOPE_DETECTOR is not set ++# CONFIG_HI8435 is not set ++# CONFIG_HX711 is not set ++# CONFIG_INA2XX_ADC is not set ++# CONFIG_LTC2471 is not set ++# CONFIG_LTC2485 is not set ++# CONFIG_LTC2496 is not set ++# CONFIG_LTC2497 is not set ++# CONFIG_MAX1027 is not set ++# CONFIG_MAX11100 is not set ++# CONFIG_MAX1118 is not set ++# CONFIG_MAX11205 is not set ++# CONFIG_MAX11410 is not set ++# CONFIG_MAX1241 is not set ++# CONFIG_MAX1363 is not set ++# CONFIG_MAX9611 is not set ++# CONFIG_MCP320X is not set ++# CONFIG_MCP3422 is not set ++# CONFIG_MCP3911 is not set ++# CONFIG_NAU7802 is not set ++# CONFIG_RICHTEK_RTQ6056 is not set ++CONFIG_SPACEMIT_P1_ADC=m ++# CONFIG_SD_ADC_MODULATOR is not set ++# CONFIG_SUN20I_GPADC is not set ++# CONFIG_TI_ADC081C is not set ++# CONFIG_TI_ADC0832 is not set ++# CONFIG_TI_ADC084S021 is not set ++# CONFIG_TI_ADC12138 is not set ++# CONFIG_TI_ADC108S102 is not set ++# CONFIG_TI_ADC128S052 is not set ++# CONFIG_TI_ADC161S626 is not set ++# CONFIG_TI_ADS1015 is not set ++# CONFIG_TI_ADS7924 is not set ++# CONFIG_TI_ADS1100 is not set ++# CONFIG_TI_ADS7950 is not set ++# CONFIG_TI_ADS8344 is not set ++# CONFIG_TI_ADS8688 is not set ++# CONFIG_TI_ADS124S08 is not set ++# CONFIG_TI_ADS131E08 is not set ++# CONFIG_TI_LMP92064 is not set ++# CONFIG_TI_TLC4541 is not set ++# CONFIG_TI_TSC2046 is not set ++# CONFIG_VF610_ADC is not set ++# CONFIG_XILINX_XADC is not set ++CONFIG_XUANTIE_TH1520_ADC=m ++# end of Analog to digital converters ++ ++# ++# Analog to digital and digital to analog converters ++# ++# CONFIG_AD74115 is not set ++# CONFIG_AD74413R is not set ++# end of Analog to digital and digital to analog converters ++ ++# ++# Analog Front Ends ++# ++# CONFIG_IIO_RESCALE is not set ++# end of Analog Front Ends ++ ++# ++# Amplifiers ++# ++# CONFIG_AD8366 is not set ++# CONFIG_ADA4250 is not set ++# CONFIG_HMC425 is not set ++# end of Amplifiers ++ ++# ++# Capacitance to digital converters ++# ++# CONFIG_AD7150 is not set ++# CONFIG_AD7746 is not set ++# end of Capacitance to digital converters ++ ++# ++# Chemical Sensors ++# ++# CONFIG_ATLAS_PH_SENSOR is not set ++# CONFIG_ATLAS_EZO_SENSOR is not set ++# CONFIG_BME680 is not set ++# CONFIG_CCS811 is not set ++# CONFIG_IAQCORE is not set ++# CONFIG_PMS7003 is not set ++# CONFIG_SCD30_CORE is not set ++# CONFIG_SCD4X is not set ++# CONFIG_SENSIRION_SGP30 is not set ++# CONFIG_SENSIRION_SGP40 is not set ++# CONFIG_SPS30_I2C is not set ++# CONFIG_SPS30_SERIAL is not set ++# CONFIG_SENSEAIR_SUNRISE_CO2 is not set ++# CONFIG_VZ89X is not set ++# end of Chemical Sensors ++ ++# ++# Hid Sensor IIO Common ++# ++# CONFIG_HID_SENSOR_IIO_COMMON is not set ++# end of Hid Sensor IIO Common ++ ++# ++# IIO SCMI Sensors ++# ++# end of IIO SCMI Sensors ++ ++# ++# SSP Sensor Common ++# ++# CONFIG_IIO_SSP_SENSORHUB is not set ++# end of SSP Sensor Common ++ ++# ++# Digital to analog converters ++# ++# CONFIG_AD3552R is not set ++# CONFIG_AD5064 is not set ++# CONFIG_AD5360 is not set ++# CONFIG_AD5380 is not set ++# CONFIG_AD5421 is not set ++# CONFIG_AD5446 is not set ++# CONFIG_AD5449 is not set ++# CONFIG_AD5592R is not set ++# CONFIG_AD5593R is not set ++# CONFIG_AD5504 is not set ++# CONFIG_AD5624R_SPI is not set ++# CONFIG_LTC2688 is not set ++# CONFIG_AD5686_SPI is not set ++# CONFIG_AD5696_I2C is not set ++# CONFIG_AD5755 is not set ++# CONFIG_AD5758 is not set ++# CONFIG_AD5761 is not set ++# CONFIG_AD5764 is not set ++# CONFIG_AD5766 is not set ++# CONFIG_AD5770R is not set ++# CONFIG_AD5791 is not set ++# CONFIG_AD7293 is not set ++# CONFIG_AD7303 is not set ++# CONFIG_AD8801 is not set ++# CONFIG_DPOT_DAC is not set ++# CONFIG_DS4424 is not set ++# CONFIG_LTC1660 is not set ++# CONFIG_LTC2632 is not set ++# CONFIG_M62332 is not set ++# CONFIG_MAX517 is not set ++# CONFIG_MAX5522 is not set ++# CONFIG_MAX5821 is not set ++# CONFIG_MCP4725 is not set ++# CONFIG_MCP4728 is not set ++# CONFIG_MCP4922 is not set ++# CONFIG_TI_DAC082S085 is not set ++# CONFIG_TI_DAC5571 is not set ++# CONFIG_TI_DAC7311 is not set ++# CONFIG_TI_DAC7612 is not set ++# CONFIG_VF610_DAC is not set ++# end of Digital to analog converters ++ ++# ++# IIO dummy driver ++# ++# CONFIG_IIO_SIMPLE_DUMMY is not set ++# end of IIO dummy driver ++ ++# ++# Filters ++# ++# CONFIG_ADMV8818 is not set ++# end of Filters ++ ++# ++# Frequency Synthesizers DDS/PLL ++# ++ ++# ++# Clock Generator/Distribution ++# ++# CONFIG_AD9523 is not set ++# end of Clock Generator/Distribution ++ ++# ++# Phase-Locked Loop (PLL) frequency synthesizers ++# ++# CONFIG_ADF4350 is not set ++# CONFIG_ADF4371 is not set ++# CONFIG_ADF4377 is not set ++# CONFIG_ADMV1013 is not set ++# CONFIG_ADMV1014 is not set ++# CONFIG_ADMV4420 is not set ++# CONFIG_ADRF6780 is not set ++# end of Phase-Locked Loop (PLL) frequency synthesizers ++# end of Frequency Synthesizers DDS/PLL ++ ++# ++# Digital gyroscope sensors ++# ++# CONFIG_ADIS16080 is not set ++# CONFIG_ADIS16130 is not set ++# CONFIG_ADIS16136 is not set ++# CONFIG_ADIS16260 is not set ++# CONFIG_ADXRS290 is not set ++# CONFIG_ADXRS450 is not set ++# CONFIG_BMG160 is not set ++# CONFIG_FXAS21002C is not set ++# CONFIG_HID_SENSOR_GYRO_3D is not set ++# CONFIG_MPU3050_I2C is not set ++# CONFIG_IIO_ST_GYRO_3AXIS is not set ++# CONFIG_ITG3200 is not set ++# end of Digital gyroscope sensors ++ ++# ++# Health Sensors ++# ++ ++# ++# Heart Rate Monitors ++# ++# CONFIG_AFE4403 is not set ++# CONFIG_AFE4404 is not set ++# CONFIG_MAX30100 is not set ++# CONFIG_MAX30102 is not set ++# end of Heart Rate Monitors ++# end of Health Sensors ++ ++# ++# Humidity sensors ++# ++# CONFIG_AM2315 is not set ++# CONFIG_DHT11 is not set ++# CONFIG_HDC100X is not set ++# CONFIG_HDC2010 is not set ++# CONFIG_HID_SENSOR_HUMIDITY is not set ++# CONFIG_HTS221 is not set ++# CONFIG_HTU21 is not set ++# CONFIG_SI7005 is not set ++# CONFIG_SI7020 is not set ++# end of Humidity sensors ++ ++# ++# Inertial measurement units ++# ++# CONFIG_ADIS16400 is not set ++# CONFIG_ADIS16460 is not set ++# CONFIG_ADIS16475 is not set ++# CONFIG_ADIS16480 is not set ++# CONFIG_BMI160_I2C is not set ++# CONFIG_BMI160_SPI is not set ++# CONFIG_BOSCH_BNO055_SERIAL is not set ++# CONFIG_BOSCH_BNO055_I2C is not set ++# CONFIG_FXOS8700_I2C is not set ++# CONFIG_FXOS8700_SPI is not set ++# CONFIG_KMX61 is not set ++# CONFIG_INV_ICM42600_I2C is not set ++# CONFIG_INV_ICM42600_SPI is not set ++# CONFIG_INV_MPU6050_I2C is not set ++# CONFIG_INV_MPU6050_SPI is not set ++# CONFIG_IIO_ST_LSM6DSX is not set ++# CONFIG_IIO_ST_LSM9DS0 is not set ++# end of Inertial measurement units ++ ++# ++# Light sensors ++# ++# CONFIG_ACPI_ALS is not set ++# CONFIG_ADJD_S311 is not set ++# CONFIG_ADUX1020 is not set ++# CONFIG_AL3010 is not set ++# CONFIG_AL3320A is not set ++# CONFIG_APDS9300 is not set ++# CONFIG_APDS9960 is not set ++# CONFIG_AS73211 is not set ++# CONFIG_BH1750 is not set ++# CONFIG_BH1780 is not set ++# CONFIG_CM32181 is not set ++# CONFIG_CM3232 is not set ++# CONFIG_CM3323 is not set ++# CONFIG_CM3605 is not set ++# CONFIG_CM36651 is not set ++# CONFIG_GP2AP002 is not set ++# CONFIG_GP2AP020A00F is not set ++# CONFIG_SENSORS_ISL29018 is not set ++# CONFIG_SENSORS_ISL29028 is not set ++# CONFIG_ISL29125 is not set ++# CONFIG_HID_SENSOR_ALS is not set ++# CONFIG_HID_SENSOR_PROX is not set ++# CONFIG_JSA1212 is not set ++# CONFIG_ROHM_BU27008 is not set ++# CONFIG_ROHM_BU27034 is not set ++# CONFIG_RPR0521 is not set ++# CONFIG_LTR501 is not set ++# CONFIG_LTRF216A is not set ++# CONFIG_LV0104CS is not set ++# CONFIG_MAX44000 is not set ++# CONFIG_MAX44009 is not set ++# CONFIG_NOA1305 is not set ++# CONFIG_OPT3001 is not set ++# CONFIG_OPT4001 is not set ++# CONFIG_PA12203001 is not set ++# CONFIG_SI1133 is not set ++# CONFIG_SI1145 is not set ++# CONFIG_STK3310 is not set ++# CONFIG_ST_UVIS25 is not set ++# CONFIG_TCS3414 is not set ++# CONFIG_TCS3472 is not set ++# CONFIG_SENSORS_TSL2563 is not set ++# CONFIG_TSL2583 is not set ++# CONFIG_TSL2591 is not set ++# CONFIG_TSL2772 is not set ++# CONFIG_TSL4531 is not set ++# CONFIG_US5182D is not set ++# CONFIG_VCNL4000 is not set ++# CONFIG_VCNL4035 is not set ++# CONFIG_VEML6030 is not set ++# CONFIG_VEML6070 is not set ++# CONFIG_VL6180 is not set ++# CONFIG_ZOPT2201 is not set ++# end of Light sensors ++ ++# ++# Magnetometer sensors ++# ++# CONFIG_AK8974 is not set ++# CONFIG_AK8975 is not set ++# CONFIG_AK09911 is not set ++# CONFIG_BMC150_MAGN_I2C is not set ++# CONFIG_BMC150_MAGN_SPI is not set ++# CONFIG_MAG3110 is not set ++# CONFIG_HID_SENSOR_MAGNETOMETER_3D is not set ++# CONFIG_MMC35240 is not set ++# CONFIG_IIO_ST_MAGN_3AXIS is not set ++# CONFIG_SENSORS_HMC5843_I2C is not set ++# CONFIG_SENSORS_HMC5843_SPI is not set ++# CONFIG_SENSORS_RM3100_I2C is not set ++# CONFIG_SENSORS_RM3100_SPI is not set ++# CONFIG_TI_TMAG5273 is not set ++# CONFIG_YAMAHA_YAS530 is not set ++# end of Magnetometer sensors ++ ++# ++# Multiplexers ++# ++# CONFIG_IIO_MUX is not set ++# end of Multiplexers ++ ++# ++# Inclinometer sensors ++# ++# CONFIG_HID_SENSOR_INCLINOMETER_3D is not set ++# CONFIG_HID_SENSOR_DEVICE_ROTATION is not set ++# end of Inclinometer sensors ++ ++# ++# Linear and angular position sensors ++# ++# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set ++# end of Linear and angular position sensors ++ ++# ++# Digital potentiometers ++# ++# CONFIG_AD5110 is not set ++# CONFIG_AD5272 is not set ++# CONFIG_DS1803 is not set ++# CONFIG_MAX5432 is not set ++# CONFIG_MAX5481 is not set ++# CONFIG_MAX5487 is not set ++# CONFIG_MCP4018 is not set ++# CONFIG_MCP4131 is not set ++# CONFIG_MCP4531 is not set ++# CONFIG_MCP41010 is not set ++# CONFIG_TPL0102 is not set ++# CONFIG_X9250 is not set ++# end of Digital potentiometers ++ ++# ++# Digital potentiostats ++# ++# CONFIG_LMP91000 is not set ++# end of Digital potentiostats ++ ++# ++# Pressure sensors ++# ++# CONFIG_ABP060MG is not set ++# CONFIG_BMP280 is not set ++# CONFIG_DLHL60D is not set ++# CONFIG_DPS310 is not set ++# CONFIG_HID_SENSOR_PRESS is not set ++# CONFIG_HP03 is not set ++# CONFIG_ICP10100 is not set ++# CONFIG_MPL115_I2C is not set ++# CONFIG_MPL115_SPI is not set ++# CONFIG_MPL3115 is not set ++# CONFIG_MPRLS0025PA is not set ++# CONFIG_MS5611 is not set ++# CONFIG_MS5637 is not set ++# CONFIG_IIO_ST_PRESS is not set ++# CONFIG_T5403 is not set ++# CONFIG_HP206C is not set ++# CONFIG_ZPA2326 is not set ++# end of Pressure sensors ++ ++# ++# Lightning sensors ++# ++# CONFIG_AS3935 is not set ++# end of Lightning sensors ++ ++# ++# Proximity and distance sensors ++# ++# CONFIG_IRSD200 is not set ++# CONFIG_ISL29501 is not set ++# CONFIG_LIDAR_LITE_V2 is not set ++# CONFIG_MB1232 is not set ++# CONFIG_PING is not set ++# CONFIG_RFD77402 is not set ++# CONFIG_SRF04 is not set ++# CONFIG_SX9310 is not set ++# CONFIG_SX9324 is not set ++# CONFIG_SX9360 is not set ++# CONFIG_SX9500 is not set ++# CONFIG_SRF08 is not set ++# CONFIG_VCNL3020 is not set ++# CONFIG_VL53L0X_I2C is not set ++# end of Proximity and distance sensors ++ ++# ++# Resolver to digital converters ++# ++# CONFIG_AD2S90 is not set ++# CONFIG_AD2S1200 is not set ++# end of Resolver to digital converters ++ ++# ++# Temperature sensors ++# ++# CONFIG_LTC2983 is not set ++# CONFIG_MAXIM_THERMOCOUPLE is not set ++# CONFIG_HID_SENSOR_TEMP is not set ++# CONFIG_MLX90614 is not set ++# CONFIG_MLX90632 is not set ++# CONFIG_TMP006 is not set ++# CONFIG_TMP007 is not set ++# CONFIG_TMP117 is not set ++# CONFIG_TSYS01 is not set ++# CONFIG_TSYS02D is not set ++# CONFIG_MAX30208 is not set ++# CONFIG_MAX31856 is not set ++# CONFIG_MAX31865 is not set ++# end of Temperature sensors ++ + # CONFIG_NTB is not set + CONFIG_PWM=y + CONFIG_PWM_SYSFS=y +@@ -6090,7 +7632,12 @@ CONFIG_PWM_SYSFS=y + # CONFIG_PWM_DWC is not set + # CONFIG_PWM_FSL_FTM is not set + # CONFIG_PWM_PCA9685 is not set ++CONFIG_PWM_PXA=m ++# CONFIG_PWM_RCAR is not set ++# CONFIG_PWM_RENESAS_TPU is not set + CONFIG_PWM_SIFIVE=m ++# CONFIG_PWM_SUN4I is not set ++CONFIG_PWM_XUANTIE=y + # CONFIG_PWM_XILINX is not set + + # +@@ -6099,15 +7646,24 @@ CONFIG_PWM_SIFIVE=m + CONFIG_IRQCHIP=y + # CONFIG_AL_FIC is not set + # CONFIG_XILINX_INTC is not set ++CONFIG_SOPHGO_SG2044_MSI=y + CONFIG_RISCV_INTC=y ++CONFIG_RISCV_APLIC=y ++CONFIG_RISCV_APLIC_MSI=y ++CONFIG_RISCV_IMSIC=y ++CONFIG_RISCV_IMSIC_PCI=y + CONFIG_SIFIVE_PLIC=y ++CONFIG_THEAD_C900_ACLINT_SSWI=y + # end of IRQ chip support + + # CONFIG_IPACK_BUS is not set + CONFIG_RESET_CONTROLLER=y + CONFIG_RESET_SIMPLE=y ++CONFIG_RESET_SUNXI=y ++CONFIG_RESET_TH1520=y + # CONFIG_RESET_TI_SYSCON is not set + # CONFIG_RESET_TI_TPS380X is not set ++CONFIG_RESET_K1X_SPACEMIT=y + CONFIG_RESET_STARFIVE_JH71X0=y + CONFIG_RESET_STARFIVE_JH7100=y + CONFIG_RESET_STARFIVE_JH7110=y +@@ -6116,7 +7672,12 @@ CONFIG_RESET_STARFIVE_JH7110=y + # PHY Subsystem + # + CONFIG_GENERIC_PHY=y ++CONFIG_GENERIC_PHY_MIPI_DPHY=y + # CONFIG_PHY_CAN_TRANSCEIVER is not set ++CONFIG_PHY_SUN4I_USB=m ++# CONFIG_PHY_SUN6I_MIPI_DPHY is not set ++# CONFIG_PHY_SUN9I_USB is not set ++# CONFIG_PHY_SUN50I_USB3 is not set + + # + # PHY drivers for Broadcom platforms +@@ -6132,14 +7693,21 @@ CONFIG_GENERIC_PHY=y + # CONFIG_PHY_PXA_28NM_HSIC is not set + # CONFIG_PHY_PXA_28NM_USB2 is not set + # CONFIG_PHY_LAN966X_SERDES is not set ++# CONFIG_PHY_CPCAP_USB is not set + # CONFIG_PHY_MAPPHONE_MDM6600 is not set + # CONFIG_PHY_OCELOT_SERDES is not set + # CONFIG_PHY_QCOM_USB_HS is not set + # CONFIG_PHY_QCOM_USB_HSIC is not set ++# CONFIG_PHY_R8A779F0_ETHERNET_SERDES is not set ++# CONFIG_PHY_RCAR_GEN2 is not set ++# CONFIG_PHY_RCAR_GEN3_PCIE is not set ++# CONFIG_PHY_RCAR_GEN3_USB2 is not set ++# CONFIG_PHY_RCAR_GEN3_USB3 is not set + # CONFIG_PHY_STARFIVE_JH7110_DPHY_RX is not set + # CONFIG_PHY_STARFIVE_JH7110_PCIE is not set + # CONFIG_PHY_STARFIVE_JH7110_USB is not set + # CONFIG_PHY_TUSB1210 is not set ++CONFIG_PHY_DW_DPHY=y + # end of PHY Subsystem + + # CONFIG_POWERCAP is not set +@@ -6151,6 +7719,7 @@ CONFIG_GENERIC_PHY=y + CONFIG_RISCV_PMU=y + CONFIG_RISCV_PMU_LEGACY=y + CONFIG_RISCV_PMU_SBI=y ++CONFIG_ANDES_CUSTOM_PMU=y + # end of Performance monitor support + + CONFIG_RAS=y +@@ -6191,7 +7760,9 @@ CONFIG_NVMEM_SYSFS=y + # end of Layout Types + + # CONFIG_NVMEM_RMEM is not set ++CONFIG_NVMEM_SUNXI_SID=y + # CONFIG_NVMEM_U_BOOT_ENV is not set ++CONFIG_NVMEM_XUANTIE_TH1520_EFUSE=y + + # + # HW tracing support +@@ -6202,6 +7773,8 @@ CONFIG_NVMEM_SYSFS=y + + # CONFIG_FPGA is not set + # CONFIG_FSI is not set ++CONFIG_TEE=y ++CONFIG_OPTEE=y + CONFIG_PM_OPP=y + # CONFIG_SIOX is not set + # CONFIG_SLIMBUS is not set +@@ -6223,6 +7796,7 @@ CONFIG_INTERCONNECT=y + # + CONFIG_VALIDATE_FS_PARSER=y + CONFIG_FS_IOMAP=y ++CONFIG_FS_STACK=y + CONFIG_BUFFER_HEAD=y + CONFIG_LEGACY_DIRECT_IO=y + # CONFIG_EXT2_FS is not set +@@ -6235,6 +7809,7 @@ CONFIG_EXT4_FS_POSIX_ACL=y + CONFIG_EXT4_FS_SECURITY=y + # CONFIG_EXT4_DEBUG is not set + # CONFIG_EXT4_ERROR_REPORT is not set ++# CONFIG_EXT4_DIOREAD_NOLOCK_PARAM is not set + CONFIG_JBD2=y + # CONFIG_JBD2_DEBUG is not set + CONFIG_FS_MBCACHE=y +@@ -6289,10 +7864,11 @@ CONFIG_QUOTA_TREE=y + CONFIG_QFMT_V2=y + CONFIG_QUOTACTL=y + CONFIG_AUTOFS_FS=y +-CONFIG_FUSE_FS=m ++CONFIG_FUSE_FS=y + CONFIG_CUSE=m + CONFIG_VIRTIO_FS=m +-CONFIG_OVERLAY_FS=m ++CONFIG_FUSE_PASSTHROUGH=y ++CONFIG_OVERLAY_FS=y + # CONFIG_OVERLAY_FS_REDIRECT_DIR is not set + CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y + # CONFIG_OVERLAY_FS_INDEX is not set +@@ -6363,9 +7939,9 @@ CONFIG_TMPFS_XATTR=y + # CONFIG_TMPFS_QUOTA is not set + CONFIG_ARCH_SUPPORTS_HUGETLBFS=y + CONFIG_HUGETLBFS=y ++# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set + CONFIG_HUGETLB_PAGE=y + CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +-# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set + # CONFIG_HUGETLB_ALLOC_LIMIT is not set + CONFIG_ARCH_HAS_GIGANTIC_PAGE=y + CONFIG_CONFIGFS_FS=y +@@ -6382,8 +7958,24 @@ CONFIG_MISC_FILESYSTEMS=y + # CONFIG_BEFS_FS is not set + # CONFIG_BFS_FS is not set + # CONFIG_EFS_FS is not set +-# CONFIG_JFFS2_FS is not set +-# CONFIG_UBIFS_FS is not set ++CONFIG_JFFS2_FS=y ++CONFIG_JFFS2_FS_DEBUG=0 ++CONFIG_JFFS2_FS_WRITEBUFFER=y ++# CONFIG_JFFS2_FS_WBUF_VERIFY is not set ++# CONFIG_JFFS2_SUMMARY is not set ++# CONFIG_JFFS2_FS_XATTR is not set ++# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set ++CONFIG_JFFS2_ZLIB=y ++CONFIG_JFFS2_RTIME=y ++CONFIG_UBIFS_FS=y ++# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set ++CONFIG_UBIFS_FS_LZO=y ++CONFIG_UBIFS_FS_ZLIB=y ++CONFIG_UBIFS_FS_ZSTD=y ++# CONFIG_UBIFS_ATIME_SUPPORT is not set ++CONFIG_UBIFS_FS_XATTR=y ++CONFIG_UBIFS_FS_SECURITY=y ++# CONFIG_UBIFS_FS_AUTHENTICATION is not set + CONFIG_CRAMFS=m + CONFIG_CRAMFS_BLOCKDEV=y + # CONFIG_CRAMFS_MTD is not set +@@ -6432,7 +8024,7 @@ CONFIG_NFS_V4=y + CONFIG_NFS_V4_1=y + CONFIG_NFS_V4_2=y + CONFIG_PNFS_FILE_LAYOUT=y +-CONFIG_PNFS_BLOCK=m ++CONFIG_PNFS_BLOCK=y + CONFIG_PNFS_FLEXFILE_LAYOUT=m + CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" + # CONFIG_NFS_V4_1_MIGRATION is not set +@@ -6518,7 +8110,7 @@ CONFIG_NLS_ISO8859_8=m + CONFIG_NLS_CODEPAGE_1250=m + CONFIG_NLS_CODEPAGE_1251=m + CONFIG_NLS_ASCII=y +-CONFIG_NLS_ISO8859_1=m ++CONFIG_NLS_ISO8859_1=y + CONFIG_NLS_ISO8859_2=m + CONFIG_NLS_ISO8859_3=m + CONFIG_NLS_ISO8859_4=m +@@ -6557,6 +8149,7 @@ CONFIG_KEYS=y + CONFIG_PERSISTENT_KEYRINGS=y + CONFIG_TRUSTED_KEYS=y + CONFIG_TRUSTED_KEYS_TPM=y ++CONFIG_TRUSTED_KEYS_TEE=y + CONFIG_ENCRYPTED_KEYS=y + # CONFIG_USER_DECRYPTED_DATA is not set + # CONFIG_KEY_DH_OPERATIONS is not set +@@ -6635,6 +8228,7 @@ CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y + CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y + # CONFIG_IMA_DISABLE_HTABLE is not set + # CONFIG_IMA_DIGEST_LIST is not set ++# CONFIG_IMA_OVERLAYFS_DETECTION_BYPASS is not set + CONFIG_EVM=y + # CONFIG_EVM_DEFAULT_HASH_SHA1 is not set + CONFIG_EVM_DEFAULT_HASH_SHA256=y +@@ -6657,6 +8251,8 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,appar + # Memory initialization + # + CONFIG_INIT_STACK_NONE=y ++# CONFIG_INIT_STACK_ALL_PATTERN is not set ++# CONFIG_INIT_STACK_ALL_ZERO is not set + # CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set + # CONFIG_INIT_ON_FREE_DEFAULT_ON is not set + # CONFIG_ZERO_CALL_USED_REGS is not set +@@ -6671,8 +8267,6 @@ CONFIG_LIST_HARDENED=y + + CONFIG_RANDSTRUCT_NONE=y + # end of Kernel hardening options +- +-# CONFIG_SECURITY_BOOT_INIT is not set + # end of Security options + + CONFIG_XOR_BLOCKS=m +@@ -6693,6 +8287,7 @@ CONFIG_CRYPTO_ALGAPI=y + CONFIG_CRYPTO_ALGAPI2=y + CONFIG_CRYPTO_AEAD=y + CONFIG_CRYPTO_AEAD2=y ++CONFIG_CRYPTO_SIG=y + CONFIG_CRYPTO_SIG2=y + CONFIG_CRYPTO_SKCIPHER=y + CONFIG_CRYPTO_SKCIPHER2=y +@@ -6704,18 +8299,18 @@ CONFIG_CRYPTO_RNG_DEFAULT=y + CONFIG_CRYPTO_AKCIPHER2=y + CONFIG_CRYPTO_AKCIPHER=y + CONFIG_CRYPTO_KPP2=y +-CONFIG_CRYPTO_KPP=m ++CONFIG_CRYPTO_KPP=y + CONFIG_CRYPTO_ACOMP2=y + CONFIG_CRYPTO_MANAGER=y + CONFIG_CRYPTO_MANAGER2=y +-CONFIG_CRYPTO_USER=m ++CONFIG_CRYPTO_USER=y + # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set + # CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set + CONFIG_CRYPTO_NULL=y + CONFIG_CRYPTO_NULL2=y + CONFIG_CRYPTO_PCRYPT=m + CONFIG_CRYPTO_CRYPTD=m +-CONFIG_CRYPTO_AUTHENC=m ++CONFIG_CRYPTO_AUTHENC=y + CONFIG_CRYPTO_TEST=m + CONFIG_CRYPTO_ENGINE=y + # end of Crypto core or helper +@@ -6724,14 +8319,14 @@ CONFIG_CRYPTO_ENGINE=y + # Public-key cryptography + # + CONFIG_CRYPTO_RSA=y +-CONFIG_CRYPTO_DH=m ++CONFIG_CRYPTO_DH=y + # CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +-CONFIG_CRYPTO_ECC=m +-CONFIG_CRYPTO_ECDH=m ++CONFIG_CRYPTO_ECC=y ++CONFIG_CRYPTO_ECDH=y + # CONFIG_CRYPTO_ECDSA is not set + # CONFIG_CRYPTO_ECRDSA is not set + CONFIG_CRYPTO_SM2=y +-CONFIG_CRYPTO_CURVE25519=m ++CONFIG_CRYPTO_CURVE25519=y + # end of Public-key cryptography + + # +@@ -6747,7 +8342,7 @@ CONFIG_CRYPTO_CAMELLIA=m + CONFIG_CRYPTO_CAST_COMMON=m + CONFIG_CRYPTO_CAST5=m + CONFIG_CRYPTO_CAST6=m +-CONFIG_CRYPTO_DES=m ++CONFIG_CRYPTO_DES=y + CONFIG_CRYPTO_FCRYPT=m + CONFIG_CRYPTO_KHAZAD=m + CONFIG_CRYPTO_SEED=m +@@ -6764,7 +8359,7 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m + # + # CONFIG_CRYPTO_ADIANTUM is not set + CONFIG_CRYPTO_ARC4=m +-CONFIG_CRYPTO_CHACHA20=m ++CONFIG_CRYPTO_CHACHA20=y + CONFIG_CRYPTO_CBC=y + # CONFIG_CRYPTO_CFB is not set + CONFIG_CRYPTO_CTR=y +@@ -6773,35 +8368,35 @@ CONFIG_CRYPTO_ECB=y + # CONFIG_CRYPTO_HCTR2 is not set + # CONFIG_CRYPTO_KEYWRAP is not set + CONFIG_CRYPTO_LRW=m +-# CONFIG_CRYPTO_OFB is not set ++CONFIG_CRYPTO_OFB=y + CONFIG_CRYPTO_PCBC=m +-CONFIG_CRYPTO_XTS=m ++CONFIG_CRYPTO_XTS=y + # end of Length-preserving ciphers and modes + + # + # AEAD (authenticated encryption with associated data) ciphers + # + # CONFIG_CRYPTO_AEGIS128 is not set +-CONFIG_CRYPTO_CHACHA20POLY1305=m +-CONFIG_CRYPTO_CCM=m ++CONFIG_CRYPTO_CHACHA20POLY1305=y ++CONFIG_CRYPTO_CCM=y + CONFIG_CRYPTO_GCM=y + CONFIG_CRYPTO_GENIV=y + CONFIG_CRYPTO_SEQIV=y + CONFIG_CRYPTO_ECHAINIV=m +-CONFIG_CRYPTO_ESSIV=m ++CONFIG_CRYPTO_ESSIV=y + # end of AEAD (authenticated encryption with associated data) ciphers + + # + # Hashes, digests, and MACs + # + CONFIG_CRYPTO_BLAKE2B=m +-CONFIG_CRYPTO_CMAC=m ++CONFIG_CRYPTO_CMAC=y + CONFIG_CRYPTO_GHASH=y + CONFIG_CRYPTO_HMAC=y + CONFIG_CRYPTO_MD4=m + CONFIG_CRYPTO_MD5=y + CONFIG_CRYPTO_MICHAEL_MIC=m +-CONFIG_CRYPTO_POLY1305=m ++CONFIG_CRYPTO_POLY1305=y + CONFIG_CRYPTO_RMD160=m + CONFIG_CRYPTO_SHA1=y + CONFIG_CRYPTO_SHA256=y +@@ -6864,6 +8459,10 @@ CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y + + CONFIG_CRYPTO_HASH_INFO=y + CONFIG_CRYPTO_HW=y ++CONFIG_CRYPTO_DEV_ALLWINNER=y ++# CONFIG_CRYPTO_DEV_SUN4I_SS is not set ++# CONFIG_CRYPTO_DEV_SUN8I_CE is not set ++# CONFIG_CRYPTO_DEV_SUN8I_SS is not set + # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set + # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set + # CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +@@ -6871,6 +8470,7 @@ CONFIG_CRYPTO_HW=y + # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set + # CONFIG_CRYPTO_DEV_QAT_C62X is not set + # CONFIG_CRYPTO_DEV_QAT_4XXX is not set ++# CONFIG_CRYPTO_DEV_QAT_420XX is not set + # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set + # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set + # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +@@ -6933,16 +8533,16 @@ CONFIG_GENERIC_PCI_IOMAP=y + # + CONFIG_CRYPTO_LIB_UTILS=y + CONFIG_CRYPTO_LIB_AES=y +-CONFIG_CRYPTO_LIB_ARC4=m ++CONFIG_CRYPTO_LIB_ARC4=y + CONFIG_CRYPTO_LIB_GF128MUL=y + CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +-CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m ++CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y + # CONFIG_CRYPTO_LIB_CHACHA is not set +-CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m ++CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=y + # CONFIG_CRYPTO_LIB_CURVE25519 is not set +-CONFIG_CRYPTO_LIB_DES=m ++CONFIG_CRYPTO_LIB_DES=y + CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +-CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m ++CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y + # CONFIG_CRYPTO_LIB_POLY1305 is not set + # CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set + CONFIG_CRYPTO_LIB_SHA1=y +@@ -7013,6 +8613,7 @@ CONFIG_HAS_IOPORT_MAP=y + CONFIG_HAS_DMA=y + CONFIG_NEED_DMA_MAP_STATE=y + CONFIG_ARCH_DMA_ADDR_T_64BIT=y ++CONFIG_ARCH_HAS_DMA_WRITE_COMBINE=y + CONFIG_DMA_DECLARE_COHERENT=y + CONFIG_ARCH_HAS_SETUP_DMA_OPS=y + CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +@@ -7032,7 +8633,7 @@ CONFIG_DMA_CMA=y + # + # Default contiguous memory area size: + # +-CONFIG_CMA_SIZE_MBYTES=64 ++CONFIG_CMA_SIZE_MBYTES=32 + CONFIG_CMA_SIZE_SEL_MBYTES=y + # CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set + # CONFIG_CMA_SIZE_SEL_MIN is not set +@@ -7043,7 +8644,6 @@ CONFIG_DMA_MAP_BENCHMARK=y + CONFIG_SGL_ALLOC=y + CONFIG_CHECK_SIGNATURE=y + # CONFIG_CPUMASK_OFFSTACK is not set +-# CONFIG_FORCE_NR_CPUS is not set + CONFIG_CPU_RMAP=y + CONFIG_DQL=y + CONFIG_GLOB=y +@@ -7113,6 +8713,7 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y + # CONFIG_DEBUG_INFO_REDUCED is not set + CONFIG_DEBUG_INFO_COMPRESSED_NONE=y + # CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set ++# CONFIG_DEBUG_INFO_SPLIT is not set + CONFIG_DEBUG_INFO_BTF=y + CONFIG_PAHOLE_HAS_SPLIT_BTF=y + CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y +@@ -7179,7 +8780,6 @@ CONFIG_SLUB_DEBUG=y + # CONFIG_PAGE_TABLE_CHECK is not set + # CONFIG_PAGE_POISONING is not set + # CONFIG_DEBUG_PAGE_REF is not set +-# CONFIG_DEBUG_RODATA_TEST is not set + CONFIG_ARCH_HAS_DEBUG_WX=y + # CONFIG_DEBUG_WX is not set + CONFIG_GENERIC_PTDUMP=y +@@ -7257,7 +8857,7 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y + # CONFIG_DEBUG_LOCK_ALLOC is not set + CONFIG_DEBUG_ATOMIC_SLEEP=y + # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +-# CONFIG_LOCK_TORTURE_TEST is not set ++CONFIG_LOCK_TORTURE_TEST=m + # CONFIG_WW_MUTEX_SELFTEST is not set + # CONFIG_SCF_TORTURE_TEST is not set + # CONFIG_CSD_LOCK_WAIT_DEBUG is not set +@@ -7281,8 +8881,9 @@ CONFIG_DEBUG_LIST=y + # + # RCU Debugging + # ++CONFIG_TORTURE_TEST=m + # CONFIG_RCU_SCALE_TEST is not set +-# CONFIG_RCU_TORTURE_TEST is not set ++CONFIG_RCU_TORTURE_TEST=m + # CONFIG_RCU_REF_SCALE_TEST is not set + CONFIG_RCU_CPU_STALL_TIMEOUT=60 + CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +@@ -7358,7 +8959,38 @@ CONFIG_RING_BUFFER_BENCHMARK=m + # CONFIG_SYNTH_EVENT_GEN_TEST is not set + # CONFIG_KPROBE_EVENT_GEN_TEST is not set + # CONFIG_RV is not set +-# CONFIG_SAMPLES is not set ++CONFIG_SAMPLES=y ++# CONFIG_SAMPLE_AUXDISPLAY is not set ++# CONFIG_SAMPLE_TRACE_EVENTS is not set ++# CONFIG_SAMPLE_TRACE_CUSTOM_EVENTS is not set ++# CONFIG_SAMPLE_TRACE_PRINTK is not set ++# CONFIG_SAMPLE_FTRACE_OPS is not set ++# CONFIG_SAMPLE_TRACE_ARRAY is not set ++# CONFIG_SAMPLE_KOBJECT is not set ++# CONFIG_SAMPLE_KPROBES is not set ++# CONFIG_SAMPLE_KFIFO is not set ++# CONFIG_SAMPLE_KDB is not set ++# CONFIG_SAMPLE_RPMSG_CLIENT is not set ++CONFIG_SAMPLE_LIVEPATCH=m ++# CONFIG_SAMPLE_CONFIGFS is not set ++# CONFIG_SAMPLE_CONNECTOR is not set ++# CONFIG_SAMPLE_FANOTIFY_ERROR is not set ++# CONFIG_SAMPLE_HIDRAW is not set ++# CONFIG_SAMPLE_LANDLOCK is not set ++# CONFIG_SAMPLE_PIDFD is not set ++# CONFIG_SAMPLE_SECCOMP is not set ++# CONFIG_SAMPLE_TIMER is not set ++# CONFIG_SAMPLE_UHID is not set ++# CONFIG_SAMPLE_VFIO_MDEV_MTTY is not set ++# CONFIG_SAMPLE_VFIO_MDEV_MDPY is not set ++# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set ++# CONFIG_SAMPLE_VFIO_MDEV_MBOCHS is not set ++# CONFIG_SAMPLE_ANDROID_BINDERFS is not set ++# CONFIG_SAMPLE_VFS is not set ++# CONFIG_SAMPLE_TPS6594_PFSM is not set ++# CONFIG_SAMPLE_WATCHDOG is not set ++# CONFIG_SAMPLE_WATCH_QUEUE is not set ++# CONFIG_SAMPLE_KMEMLEAK is not set + CONFIG_STRICT_DEVMEM=y + CONFIG_IO_STRICT_DEVMEM=y + +@@ -7376,7 +9008,47 @@ CONFIG_FUNCTION_ERROR_INJECTION=y + # CONFIG_FAULT_INJECTION is not set + CONFIG_ARCH_HAS_KCOV=y + # CONFIG_KCOV is not set +-# CONFIG_RUNTIME_TESTING_MENU is not set ++CONFIG_RUNTIME_TESTING_MENU=y ++# CONFIG_TEST_DHRY is not set ++# CONFIG_LKDTM is not set ++# CONFIG_TEST_MIN_HEAP is not set ++# CONFIG_TEST_DIV64 is not set ++# CONFIG_BACKTRACE_SELF_TEST is not set ++# CONFIG_TEST_REF_TRACKER is not set ++# CONFIG_RBTREE_TEST is not set ++# CONFIG_REED_SOLOMON_TEST is not set ++# CONFIG_INTERVAL_TREE_TEST is not set ++# CONFIG_PERCPU_TEST is not set ++# CONFIG_ATOMIC64_SELFTEST is not set ++# CONFIG_ASYNC_RAID6_TEST is not set ++# CONFIG_TEST_HEXDUMP is not set ++# CONFIG_STRING_SELFTEST is not set ++# CONFIG_TEST_STRING_HELPERS is not set ++# CONFIG_TEST_KSTRTOX is not set ++# CONFIG_TEST_PRINTF is not set ++# CONFIG_TEST_SCANF is not set ++# CONFIG_TEST_BITMAP is not set ++# CONFIG_TEST_UUID is not set ++# CONFIG_TEST_XARRAY is not set ++# CONFIG_TEST_MAPLE_TREE is not set ++# CONFIG_TEST_RHASHTABLE is not set ++# CONFIG_TEST_IDA is not set ++# CONFIG_TEST_LKM is not set ++# CONFIG_TEST_BITOPS is not set ++# CONFIG_TEST_VMALLOC is not set ++# CONFIG_TEST_USER_COPY is not set ++CONFIG_TEST_BPF=m ++# CONFIG_TEST_BLACKHOLE_DEV is not set ++# CONFIG_FIND_BIT_BENCHMARK is not set ++# CONFIG_TEST_FIRMWARE is not set ++# CONFIG_TEST_SYSCTL is not set ++# CONFIG_TEST_UDELAY is not set ++# CONFIG_TEST_STATIC_KEYS is not set ++# CONFIG_TEST_DYNAMIC_DEBUG is not set ++# CONFIG_TEST_KMOD is not set ++# CONFIG_TEST_MEMCAT_P is not set ++# CONFIG_TEST_MEMINIT is not set ++# CONFIG_TEST_FREE_PAGES is not set + CONFIG_ARCH_USE_MEMTEST=y + # CONFIG_MEMTEST is not set + # end of Kernel Testing and Coverage +@@ -7388,9 +9060,3 @@ CONFIG_ARCH_USE_MEMTEST=y + # end of Kernel hacking + + # CONFIG_KWORKER_NUMA_AFFINITY is not set +- +-# enable openEuler livepatch +-CONFIG_LIVEPATCH=y +-CONFIG_LIVEPATCH_WO_FTRACE=y +-CONFIG_SAMPLES=y +-CONFIG_SAMPLE_LIVEPATCH=m +diff --git a/arch/riscv/configs/sg2042_defconfig b/arch/riscv/configs/sg2042_defconfig +new file mode 100644 +index 000000000000..7c6d2e3ddeaa +--- /dev/null ++++ b/arch/riscv/configs/sg2042_defconfig +@@ -0,0 +1,9 @@ ++# ++# sg2042 SoC support ++# ++CONFIG_ARCH_SOPHGO=y ++CONFIG_MMC_SDHCI_SOPHGO=y ++CONFIG_PCIE_CADENCE_SOPHGO=y ++CONFIG_RISCV_ISA_ZICBOM=n ++CONFIG_SPI_SOPHGO_SPIFMC=m ++ +diff --git a/arch/riscv/configs/th1520_defconfig b/arch/riscv/configs/th1520_defconfig +new file mode 100644 +index 000000000000..10d67d6ff963 +--- /dev/null ++++ b/arch/riscv/configs/th1520_defconfig +@@ -0,0 +1,470 @@ ++CONFIG_SYSVIPC=y ++CONFIG_POSIX_MQUEUE=y ++CONFIG_NO_HZ_IDLE=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_BPF_SYSCALL=y ++CONFIG_IKCONFIG=y ++CONFIG_IKCONFIG_PROC=y ++CONFIG_CGROUPS=y ++CONFIG_NAMESPACES=y ++CONFIG_USER_NS=y ++CONFIG_CHECKPOINT_RESTORE=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_EXPERT=y ++CONFIG_ARCH_RENESAS=y ++CONFIG_ARCH_THEAD=y ++CONFIG_ARCH_XUANTIE=y ++CONFIG_SOC_SIFIVE=y ++CONFIG_SOC_STARFIVE=y ++CONFIG_ARCH_SUNXI=y ++CONFIG_ARCH_CANAAN=y ++CONFIG_SMP=y ++CONFIG_HOTPLUG_CPU=y ++CONFIG_PM=y ++CONFIG_CPU_IDLE=y ++CONFIG_VIRTUALIZATION=y ++CONFIG_KVM=y ++CONFIG_ACPI=y ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++CONFIG_BLK_DEV_THROTTLING=y ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_PNP=y ++CONFIG_IP_PNP_DHCP=y ++CONFIG_IP_PNP_BOOTP=y ++CONFIG_IP_PNP_RARP=y ++CONFIG_BRIDGE_NETFILTER=m ++CONFIG_NF_CONNTRACK=m ++CONFIG_NF_CONNTRACK_FTP=m ++CONFIG_NF_CONNTRACK_TFTP=m ++CONFIG_NETFILTER_XT_MARK=m ++CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m ++CONFIG_NETFILTER_XT_MATCH_IPVS=m ++CONFIG_IP_VS=m ++CONFIG_IP_VS_PROTO_TCP=y ++CONFIG_IP_VS_PROTO_UDP=y ++CONFIG_IP_VS_RR=m ++CONFIG_IP_VS_NFCT=y ++CONFIG_NF_LOG_ARP=m ++CONFIG_NF_LOG_IPV4=m ++CONFIG_IP_NF_IPTABLES=m ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_TARGET_REJECT=m ++CONFIG_IP_NF_NAT=m ++CONFIG_IP_NF_TARGET_MASQUERADE=m ++CONFIG_IP_NF_TARGET_REDIRECT=m ++CONFIG_IP_NF_MANGLE=m ++CONFIG_NF_LOG_IPV6=m ++CONFIG_IP6_NF_IPTABLES=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_TARGET_REJECT=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_BRIDGE=y ++CONFIG_BRIDGE_VLAN_FILTERING=y ++CONFIG_NET_CLS_CGROUP=m ++CONFIG_NETLINK_DIAG=y ++CONFIG_NET_9P=y ++CONFIG_NET_9P_VIRTIO=y ++CONFIG_PCIE_FU740=y ++CONFIG_DEVTMPFS=y ++CONFIG_DEVTMPFS_MOUNT=y ++CONFIG_BLK_DEV_LOOP=y ++CONFIG_VIRTIO_BLK=y ++CONFIG_BLK_DEV_NVME=m ++CONFIG_BLK_DEV_SD=y ++CONFIG_MD=y ++CONFIG_BLK_DEV_DM=y ++CONFIG_BLK_DEV_DM_BUILTIN=y ++CONFIG_DM_THIN_PROVISIONING=m ++CONFIG_NETDEVICES=y ++CONFIG_VIRTIO_NET=y ++CONFIG_MACB=y ++CONFIG_STMMAC_ETH=y ++CONFIG_MICROSEMI_PHY=y ++CONFIG_INPUT_MOUSEDEV=y ++CONFIG_KEYBOARD_SUN4I_LRADC=m ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_SERIAL_8250_DW=y ++CONFIG_SERIAL_OF_PLATFORM=y ++CONFIG_SERIAL_SH_SCI=y ++CONFIG_VIRTIO_CONSOLE=y ++CONFIG_HW_RANDOM=y ++CONFIG_HW_RANDOM_VIRTIO=y ++CONFIG_I2C_MV64XXX=y ++CONFIG_I2C_DESIGNWARE_PLATFORM=y ++CONFIG_I2C_CHARDEV=y ++CONFIG_SPI=y ++CONFIG_SPI_DW_QUAD=y ++CONFIG_SPI_DESIGNWARE=y ++CONFIG_SPI_DW_MMIO=y ++CONFIG_SPI_SPIDEV=y ++CONFIG_SPI_SUN6I=y ++# CONFIG_PTP_1588_CLOCK is not set ++CONFIG_WATCHDOG=y ++CONFIG_DW_WATCHDOG=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP=y ++CONFIG_WATCHDOG_PRETIMEOUT_GOV_PANIC=y ++#CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_PANIC=y ++CONFIG_WATCHDOG_PRETIMEOUT_DEFAULT_GOV_NOOP=y ++CONFIG_WATCHDOG_OPEN_TIMEOUT=32 ++CONFIG_SUNXI_WATCHDOG=y ++CONFIG_REGULATOR=y ++CONFIG_REGULATOR_FIXED_VOLTAGE=y ++CONFIG_DRM=y ++CONFIG_DRM_PANEL_SIMPLE=y ++CONFIG_DRM_PANEL_JADARD_JD9365DA_H3=y ++CONFIG_DRM_PANEL_HX8279=y ++CONFIG_DRM_VERISILICON=y ++CONFIG_BACKLIGHT_CLASS_DEVICE=y ++CONFIG_BACKLIGHT_PWM=y ++CONFIG_FB=y ++CONFIG_FRAMEBUFFER_CONSOLE=y ++CONFIG_ARCH_FORCE_MAX_ORDER=15 ++CONFIG_LOGO=y ++CONFIG_USB=y ++CONFIG_USB_XHCI_HCD=y ++CONFIG_USB_XHCI_PLATFORM=y ++CONFIG_USB_OHCI_HCD=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_MUSB_SUNXI=m ++CONFIG_MMC=y ++CONFIG_MMC_SDHCI=y ++CONFIG_MMC_SDHCI_PLTFM=y ++CONFIG_MMC_SDHCI_OF_DWCMSHC=y ++CONFIG_MMC_SUNXI=y ++CONFIG_RTC_CLASS=y ++CONFIG_RTC_DRV_SUN6I=y ++CONFIG_RTC_DRV_XGENE=m ++CONFIG_RTC_DRV_XGENE_PRESCALER=y ++CONFIG_DMADEVICES=y ++CONFIG_DMA_SUN6I=m ++CONFIG_DW_AXI_DMAC=y ++CONFIG_DMATEST=y ++CONFIG_VIRTIO_MMIO=y ++CONFIG_SUN8I_DE2_CCU=m ++CONFIG_SUN50I_IOMMU=y ++CONFIG_RPMSG=y ++CONFIG_RPMSG_CHAR=y ++CONFIG_RPMSG_CTRL=y ++CONFIG_RPMSG_VIRTIO=y ++CONFIG_RPMSG_TH1520=y ++CONFIG_ARCH_R9A07G043=y ++CONFIG_PHY_SUN4I_USB=m ++CONFIG_NVMEM_SUNXI_SID=y ++CONFIG_EXT4_FS=y ++CONFIG_EXT4_FS_POSIX_ACL=y ++CONFIG_BTRFS_FS_POSIX_ACL=y ++CONFIG_AUTOFS_FS=y ++CONFIG_OVERLAY_FS=y ++CONFIG_JOLIET=y ++CONFIG_ZISOFS=y ++CONFIG_MSDOS_FS=y ++CONFIG_VFAT_FS=y ++CONFIG_TMPFS=y ++CONFIG_TMPFS_POSIX_ACL=y ++CONFIG_HUGETLBFS=y ++CONFIG_NFS_FS=y ++CONFIG_NFS_V4=y ++CONFIG_NFS_V4_1=y ++CONFIG_NFS_V4_2=y ++CONFIG_ROOT_NFS=y ++CONFIG_9P_FS=y ++CONFIG_NLS_CODEPAGE_437=y ++CONFIG_NLS_ISO8859_1=y ++CONFIG_SECURITY_SELINUX=y ++CONFIG_SECURITY_APPARMOR=y ++CONFIG_DEFAULT_SECURITY_DAC=y ++CONFIG_CRYPTO_USER_API_HASH=y ++CONFIG_PRINTK_TIME=y ++CONFIG_DEBUG_FS=y ++CONFIG_SOFTLOCKUP_DETECTOR=y ++CONFIG_DEBUG_ATOMIC_SLEEP=y ++# CONFIG_RCU_TRACE is not set ++# Enable TEE ++CONFIG_TEE=y ++CONFIG_OPTEE=y ++CONFIG_RCU_TORTURE_TEST=m ++CONFIG_LOCK_TORTURE_TEST=m ++CONFIG_TORTURE_TEST=m ++CONFIG_BLK_DEV_NBD=y ++CONFIG_FUSE_FS=y ++CONFIG_STRICT_KERNEL_RWX=n ++# Enable BPF_JIT & TEST_BPF ++CONFIG_BPF_JIT=y ++CONFIG_RUNTIME_TESTING_MENU=y ++CONFIG_TEST_BPF=m ++# TH1520 ++CONFIG_PINCTRL=y ++CONFIG_PINCTRL_TH1520=y ++# TH1520 PWM ++CONFIG_PWM=y ++CONFIG_PWM_XUANTIE=y ++# TH1520 dwmac ++CONFIG_DWMAC_XUANTIE=y ++# TH1520 GPIO ++CONFIG_GPIO_DWAPB=y ++CONFIG_GPIO_PCA953X=y ++CONFIG_GPIO_PCA953X_IRQ=y ++# TH1520 cpu thermal ++CONFIG_CPU_THERMAL=y ++CONFIG_THERMAL_EMULATION=y ++CONFIG_SENSORS_MR75203=m ++CONFIG_SENSORS_PWM_FAN=m ++# TH1520 USB ++CONFIG_USB_DWC3=m ++CONFIG_USB_DWC3_DUAL_ROLE=y ++CONFIG_USB_DWC3_XUANTIE=m ++CONFIG_USB_ROLE_SWITCH=m ++CONFIG_USB_ONBOARD_HUB=m ++CONFIG_USB_GADGET=y ++CONFIG_USB_CONFIGFS=y ++CONFIG_USB_CONFIGFS_F_FS=y ++CONFIG_USB_CONFIGFS_F_ACC=y ++CONFIG_USB_ZERO=m ++CONFIG_USB_ETH=m ++CONFIG_USB_ETH_EEM=y ++CONFIG_USB_G_NCM=m ++CONFIG_USB_MASS_STORAGE=m ++CONFIG_USB_G_SERIAL=m ++CONFIG_USB_G_MULTI=m ++CONFIG_USB_G_MULTI_CDC=y ++CONFIG_TYPEC=m ++CONFIG_IIO=y ++CONFIG_XUANTIE_TH1520_ADC=m ++# TH1520 CLOCK ++CONFIG_CLK_TH1520_FM=y ++CONFIG_MTD=y ++CONFIG_MTD_TESTS=m ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_SLRAM=m ++CONFIG_MTD_PHRAM=m ++CONFIG_MTD_SPI_NAND=y ++CONFIG_MTD_SPI_NOR=y ++CONFIG_MTD_UBI=y ++# TH1520 MAILBOX ++CONFIG_MAILBOX=y ++# TH1520 AON_SUBSYS ++CONFIG_TH1520_AON_PD=y ++# TH1520 CPU DVFS ++CONFIG_CPU_FREQ=y ++CONFIG_CPU_FREQ_STAT=y ++CONFIG_CPU_FREQ_GOV_POWERSAVE=y ++CONFIG_CPU_FREQ_GOV_USERSPACE=y ++CONFIG_CPU_FREQ_GOV_ONDEMAND=y ++CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y ++CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y ++CONFIG_CPUFREQ_DT=y ++CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ=y ++# TH1520 PMIC_WDT ++CONFIG_TH1520_PMIC_WATCHDOG=y ++# TH1520 audio ++CONFIG_SOUND=y ++CONFIG_SND=y ++CONFIG_SND_SIMPLE_CARD=y ++CONFIG_SND_USB_AUDIO=m ++CONFIG_SND_SOC=y ++CONFIG_SND_SOC_XUANTIE=y ++# TH1520 i2s ++CONFIG_SND_SOC_XUANTIE_TH1520_I2S=y ++CONFIG_SND_SOC_XUANTIE_TH1520_I2S_CH8=y ++CONFIG_SND_SOC_XUANTIE_TH1520_HDMI_PCM=y ++# TH1520 codec ++CONFIG_SND_SOC_ES7210=y ++CONFIG_SND_SOC_ES8156=y ++CONFIG_SND_SOC_AW87519=y ++# TH1520 devfreq thermal ++CONFIG_PM_DEVFREQ=y ++CONFIG_PM_DEVFREQ_EVENT=y ++CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y ++CONFIG_DEVFREQ_GOV_PERFORMANCE=y ++CONFIG_DEVFREQ_GOV_POWERSAVE=y ++CONFIG_DEVFREQ_GOV_USERSPACE=y ++CONFIG_DEVFREQ_GOV_PASSIVE=y ++CONFIG_THERMAL=y ++CONFIG_THERMAL_STATISTICS=y ++CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y ++# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set ++CONFIG_DEVFREQ_THERMAL=y ++CONFIG_ABX500_CORE=y ++CONFIG_CRASH_DUMP=y ++# CONFIG_BUG is not set ++CONFIG_DEBUG_INFO=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_DMABUF_SELFTESTS=m ++CONFIG_DYNAMIC_DEBUG=y ++CONFIG_EEPROM_AT24=y ++# CONFIG_GCC_PLUGINS is not set ++CONFIG_HID_PID=y ++CONFIG_IIO_SW_DEVICE=y ++CONFIG_JFFS2_FS=y ++CONFIG_KEXEC=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_PERF_EVENTS=y ++CONFIG_POWER_SUPPLY=y ++CONFIG_PREEMPT=y ++CONFIG_REALTEK_PHY=y ++CONFIG_TASK_DELAY_ACCT=y ++CONFIG_TASK_IO_ACCOUNTING=y ++CONFIG_TASKSTATS=y ++CONFIG_TASK_XACCT=y ++CONFIG_V4L_MEM2MEM_DRIVERS=y ++CONFIG_V4L_PLATFORM_DRIVERS=y ++CONFIG_SCHED_INFO=y ++CONFIG_SCHED_MC=y ++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set ++CONFIG_SW_SYNC=y ++CONFIG_UBIFS_FS=y ++CONFIG_UDMABUF=y ++CONFIG_CRYPTO_AUTHENC=y ++CONFIG_CRYPTO_CBC=y ++CONFIG_CRYPTO_CHACHA20POLY1305=y ++CONFIG_CRYPTO_CTR=y ++CONFIG_CRYPTO_CURVE25519=y ++CONFIG_CRYPTO_DES=y ++CONFIG_CRYPTO_DH=y ++CONFIG_CRYPTO_ESSIV=y ++CONFIG_CRYPTO_MD5=y ++CONFIG_CRYPTO_OFB=y ++CONFIG_CRYPTO_SHA3=y ++CONFIG_CRYPTO_SHA512=y ++CONFIG_CRYPTO_SM3=y ++CONFIG_CRYPTO_SM4=y ++CONFIG_CRYPTO_USER_API_AEAD=y ++# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set ++CONFIG_CRYPTO_USER_API_RNG=y ++CONFIG_CRYPTO_USER_API_SKCIPHER=y ++CONFIG_CRYPTO_USER=y ++CONFIG_CRYPTO_XTS=y ++# CONFIG_DM_BOW is not set ++# CONFIG_DM_CACHE is not set ++# CONFIG_DM_CLONE is not set ++CONFIG_DM_CRYPT=y ++# CONFIG_DM_DEBUG is not set ++# CONFIG_DM_DELAY is not set ++# CONFIG_DM_DUST is not set ++# CONFIG_DM_EBS is not set ++# CONFIG_DM_ERA is not set ++# CONFIG_DM_FLAKEY is not set ++# CONFIG_DM_INIT is not set ++# CONFIG_DM_INTEGRITY is not set ++# CONFIG_DM_LOG_WRITES is not set ++# CONFIG_DM_MIRROR is not set ++# CONFIG_DM_MULTIPATH is not set ++# CONFIG_DM_RAID is not set ++# CONFIG_DM_SNAPSHOT is not set ++# CONFIG_DM_SWITCH is not set ++# CONFIG_DM_THIN_PROVISIONING is not set ++# CONFIG_DM_UEVENT is not set ++# CONFIG_DM_UNSTRIPED is not set ++CONFIG_DM_USER=y ++# CONFIG_DM_VERITY is not set ++# CONFIG_DM_WRITECACHE is not set ++# CONFIG_DM_ZERO is not set ++CONFIG_ENERGY_MODEL=y ++CONFIG_GPIOLIB=y ++CONFIG_INPUT_EVDEV=y ++CONFIG_INPUT_TOUCHSCREEN=y ++CONFIG_KEYBOARD_GPIO=y ++CONFIG_PNFS_BLOCK=y ++# CONFIG_PM_SLEEP is not set ++# CONFIG_SUSPEND is not set ++CONFIG_INPUT_MISC=y ++CONFIG_INPUT_UINPUT=y ++CONFIG_SCSI=y ++# CONFIG_USB_DWC3_OF_SIMPLE is not set ++CONFIG_USB_HIDDEV=y ++# CONFIG_USB_NET_AX88179_178A is not set ++# CONFIG_USB_NET_AX8817X is not set ++# CONFIG_USB_NET_NET1080 is not set ++CONFIG_USB_STORAGE_SDDR09=y ++CONFIG_USB_STORAGE_SDDR55=y ++CONFIG_USB_USBNET=m ++CONFIG_USB_VIDEO_CLASS=m ++CONFIG_VIDEO_ASPEED=y ++# 8723ds wifi ++CONFIG_CFG80211=m ++CONFIG_MAC80211=m ++CONFIG_RTW88=m ++CONFIG_RTW88_8723DS=m ++# PowerVR Rogue GPU ++CONFIG_DRM_POWERVR_ROGUE=m ++# npu-ax3386 ++CONFIG_VHA=m ++CONFIG_VHA_XUANTIE_TH1520=y ++# vc8000d vc8000e ++CONFIG_VIDEO_VC8000D=m ++CONFIG_VIDEO_VC8000E=m ++# video_memory ++CONFIG_VIDEO_MEMORY=m ++# TH1520 tdm ++CONFIG_SND_SOC_XUANTIE_TH1520_TDM=y ++# TH1520 spdif ++CONFIG_SND_SOC_XUANTIE_TH1520_SPDIF=y ++# AIC8800 ++CONFIG_BT=y ++CONFIG_BT_RFCOMM=y ++CONFIG_BT_RFCOMM_TTY=y ++CONFIG_BT_BNEP=y ++CONFIG_BT_HIDP=y ++CONFIG_BT_HCIUART=y ++CONFIG_BT_HCIUART_H4=y ++CONFIG_CFG80211=y ++CONFIG_CFG80211_WEXT=y ++CONFIG_MAC80211=y ++CONFIG_RFKILL=y ++CONFIG_HOSTAP=y ++CONFIG_HOSTAP_FIRMWARE=y ++CONFIG_HOSTAP_FIRMWARE_NVRAM=y ++CONFIG_SERIAL_8250_NR_UARTS=6 ++CONFIG_SERIAL_8250_RUNTIME_UARTS=6 ++CONFIG_I2C=y ++CONFIG_GPIO_SYSFS=y ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=y ++CONFIG_LEDS_GPIO=y ++# AIC8800 modules ++CONFIG_AIC_WLAN_SUPPORT=m ++CONFIG_AIC8800_WLAN_SUPPORT=m ++CONFIG_AIC8800_BTLPM_SUPPORT=m ++# add MAX_ORDER config for riscv and th1520 ++CONFIG_ARCH_FORCE_MAX_ORDER=15 ++# enable cma config ++CONFIG_MEMORY_ISOLATION=y ++CONFIG_CONTIG_ALLOC=y ++CONFIG_CMA=y ++# CONFIG_CMA_DEBUG is not set ++CONFIG_CMA_DEBUGFS=y ++# CONFIG_CMA_SYSFS is not set ++CONFIG_CMA_AREAS=7 ++CONFIG_DMA_CMA=y ++# ++# Default contiguous memory area size: ++# ++CONFIG_CMA_SIZE_MBYTES=32 ++CONFIG_CMA_SIZE_SEL_MBYTES=y ++# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set ++# CONFIG_CMA_SIZE_SEL_MIN is not set ++# CONFIG_CMA_SIZE_SEL_MAX is not set ++CONFIG_CMA_ALIGNMENT=8 ++# TH1520 efuse ++CONFIG_NVMEM_XUANTIE_TH1520_EFUSE=y ++# TH1520 hwspinlock ++CONFIG_HWSPINLOCK=y ++CONFIG_HWSPINLOCK_TH1520=y ++# TH1520 regdump ++CONFIG_TH1520_REGDUMP=y ++# TH1520 media ++CONFIG_MEDIA_SUPPORT=y ++CONFIG_MEDIA_USB_SUPPORT=y ++CONFIG_V4L_PLATFORM_DRIVERS=y ++CONFIG_V4L_MEM2MEM_DRIVERS=y +diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c +index 17a904869724..fc1a34faa5f3 100644 +--- a/arch/riscv/errata/andes/errata.c ++++ b/arch/riscv/errata/andes/errata.c +@@ -17,10 +17,11 @@ + #include + #include + #include ++#include + +-#define ANDESTECH_AX45MP_MARCHID 0x8000000000008a45UL +-#define ANDESTECH_AX45MP_MIMPID 0x500UL +-#define ANDESTECH_SBI_EXT_ANDES 0x0900031E ++#define ANDES_AX45MP_MARCHID 0x8000000000008a45UL ++#define ANDES_AX45MP_MIMPID 0x500UL ++#define ANDES_SBI_EXT_ANDES 0x0900031E + + #define ANDES_SBI_EXT_IOCP_SW_WORKAROUND 1 + +@@ -32,7 +33,7 @@ static long ax45mp_iocp_sw_workaround(void) + * ANDES_SBI_EXT_IOCP_SW_WORKAROUND SBI EXT checks if the IOCP is missing and + * cache is controllable only then CMO will be applied to the platform. + */ +- ret = sbi_ecall(ANDESTECH_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, ++ ret = sbi_ecall(ANDES_SBI_EXT_ANDES, ANDES_SBI_EXT_IOCP_SW_WORKAROUND, + 0, 0, 0, 0, 0, 0); + + return ret.error ? 0 : ret.value; +@@ -50,7 +51,7 @@ static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigne + + done = true; + +- if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID) ++ if (arch_id != ANDES_AX45MP_MARCHID || impid != ANDES_AX45MP_MIMPID) + return; + + if (!ax45mp_iocp_sw_workaround()) +@@ -65,6 +66,8 @@ void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct al + unsigned long archid, unsigned long impid, + unsigned int stage) + { ++ BUILD_BUG_ON(ERRATA_ANDES_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + if (stage == RISCV_ALTERNATIVES_BOOT) + errata_probe_iocp(stage, archid, impid); + +diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c +index 3d9a32d791f7..b68b023115c2 100644 +--- a/arch/riscv/errata/sifive/errata.c ++++ b/arch/riscv/errata/sifive/errata.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + struct errata_info_t { + char name[32]; +@@ -91,6 +92,8 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + u32 cpu_apply_errata = 0; + u32 tmp; + ++ BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + +diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c +index 0554ed4bf087..928d8f7fe288 100644 +--- a/arch/riscv/errata/thead/errata.c ++++ b/arch/riscv/errata/thead/errata.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + + static bool errata_probe_pbmt(unsigned int stage, + unsigned long arch_id, unsigned long impid) +@@ -95,6 +96,8 @@ void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + u32 tmp; + void *oldptr, *altptr; + ++ BUILD_BUG_ON(ERRATA_THEAD_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ + for (alt = begin; alt < end; alt++) { + if (alt->vendor_id != THEAD_VENDOR_ID) + continue; +diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h +index d5604d2073bc..e0a1f84404f3 100644 +--- a/arch/riscv/include/asm/acpi.h ++++ b/arch/riscv/include/asm/acpi.h +@@ -61,11 +61,16 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } + + void acpi_init_rintc_map(void); + struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu); +-u32 get_acpi_id_for_cpu(int cpu); ++static inline u32 get_acpi_id_for_cpu(int cpu) ++{ ++ return acpi_cpu_get_madt_rintc(cpu)->uid; ++} ++ + int acpi_get_riscv_isa(struct acpi_table_header *table, + unsigned int cpu, const char **isa); + +-static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } ++void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, ++ u32 *cboz_size, u32 *cbop_size); + #else + static inline void acpi_init_rintc_map(void) { } + static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) +@@ -79,6 +84,18 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table, + return -EINVAL; + } + ++static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, ++ u32 *cbom_size, u32 *cboz_size, ++ u32 *cbop_size) { } ++ + #endif /* CONFIG_ACPI */ + ++#ifdef CONFIG_ACPI_NUMA ++int acpi_numa_get_nid(unsigned int cpu); ++void acpi_map_cpus_to_nodes(void); ++#else ++static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } ++static inline void acpi_map_cpus_to_nodes(void) { } ++#endif /* CONFIG_ACPI_NUMA */ ++ + #endif /*_ASM_ACPI_H*/ +diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h +new file mode 100644 +index 000000000000..85b2c443823e +--- /dev/null ++++ b/arch/riscv/include/asm/arch_hweight.h +@@ -0,0 +1,78 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Based on arch/x86/include/asm/arch_hweight.h ++ */ ++ ++#ifndef _ASM_RISCV_HWEIGHT_H ++#define _ASM_RISCV_HWEIGHT_H ++ ++#include ++#include ++ ++#if (BITS_PER_LONG == 64) ++#define CPOPW "cpopw " ++#elif (BITS_PER_LONG == 32) ++#define CPOPW "cpop " ++#else ++#error "Unexpected BITS_PER_LONG" ++#endif ++ ++static __always_inline unsigned int __arch_hweight32(unsigned int w) ++{ ++#ifdef CONFIG_RISCV_ISA_ZBB ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm (".option push\n" ++ ".option arch,+zbb\n" ++ CPOPW "%0, %0\n" ++ ".option pop\n" ++ : "+r" (w) : :); ++ ++ return w; ++ ++legacy: ++#endif ++ return __sw_hweight32(w); ++} ++ ++static inline unsigned int __arch_hweight16(unsigned int w) ++{ ++ return __arch_hweight32(w & 0xffff); ++} ++ ++static inline unsigned int __arch_hweight8(unsigned int w) ++{ ++ return __arch_hweight32(w & 0xff); ++} ++ ++#if BITS_PER_LONG == 64 ++static __always_inline unsigned long __arch_hweight64(__u64 w) ++{ ++# ifdef CONFIG_RISCV_ISA_ZBB ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm (".option push\n" ++ ".option arch,+zbb\n" ++ "cpop %0, %0\n" ++ ".option pop\n" ++ : "+r" (w) : :); ++ ++ return w; ++ ++legacy: ++# endif ++ return __sw_hweight64(w); ++} ++#else /* BITS_PER_LONG == 64 */ ++static inline unsigned long __arch_hweight64(__u64 w) ++{ ++ return __arch_hweight32((u32)w) + ++ __arch_hweight32((u32)(w >> 32)); ++} ++#endif /* !(BITS_PER_LONG == 64) */ ++ ++#endif /* _ASM_RISCV_HWEIGHT_H */ +diff --git a/arch/riscv/include/asm/archrandom.h b/arch/riscv/include/asm/archrandom.h +new file mode 100644 +index 000000000000..5345360adfb9 +--- /dev/null ++++ b/arch/riscv/include/asm/archrandom.h +@@ -0,0 +1,72 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Kernel interface for the RISCV arch_random_* functions ++ * ++ * Copyright (c) 2023 Rivos Inc. ++ * ++ */ ++ ++#ifndef ASM_RISCV_ARCHRANDOM_H ++#define ASM_RISCV_ARCHRANDOM_H ++ ++#include ++#include ++ ++#define SEED_RETRY_LOOPS 100 ++ ++static inline bool __must_check csr_seed_long(unsigned long *v) ++{ ++ unsigned int retry = SEED_RETRY_LOOPS, valid_seeds = 0; ++ const int needed_seeds = sizeof(long) / sizeof(u16); ++ u16 *entropy = (u16 *)v; ++ ++ do { ++ /* ++ * The SEED CSR must be accessed with a read-write instruction. ++ */ ++ unsigned long csr_seed = csr_swap(CSR_SEED, 0); ++ unsigned long opst = csr_seed & SEED_OPST_MASK; ++ ++ switch (opst) { ++ case SEED_OPST_ES16: ++ entropy[valid_seeds++] = csr_seed & SEED_ENTROPY_MASK; ++ if (valid_seeds == needed_seeds) ++ return true; ++ break; ++ ++ case SEED_OPST_DEAD: ++ pr_err_once("archrandom: Unrecoverable error\n"); ++ return false; ++ ++ case SEED_OPST_BIST: ++ case SEED_OPST_WAIT: ++ default: ++ cpu_relax(); ++ continue; ++ } ++ } while (--retry); ++ ++ return false; ++} ++ ++static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) ++{ ++ return 0; ++} ++ ++static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) ++{ ++ if (!max_longs) ++ return 0; ++ ++ /* ++ * If Zkr is supported and csr_seed_long succeeds, we return one long ++ * worth of entropy. ++ */ ++ if (riscv_has_extension_likely(RISCV_ISA_EXT_ZKR) && csr_seed_long(v)) ++ return 1; ++ ++ return 0; ++} ++ ++#endif /* ASM_RISCV_ARCHRANDOM_H */ +diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h +index f5dfef6c2153..0e0522e588ca 100644 +--- a/arch/riscv/include/asm/atomic.h ++++ b/arch/riscv/include/asm/atomic.h +@@ -17,7 +17,6 @@ + #endif + + #include +-#include + + #define __atomic_acquire_fence() \ + __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") +@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int + " add %[rc], %[p], %[a]\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) +@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, + " add %[rc], %[p], %[a]\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) +@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) + " addi %[rc], %[p], 1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) + " addi %[rc], %[p], -1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) + " bltz %[rc], 1f\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) + " addi %[rc], %[p], 1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) + " addi %[rc], %[p], -1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) + " bltz %[rc], 1f\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" +- " fence rw, rw\n" ++ RISCV_FULL_BARRIER + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : +diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h +index 110752594228..feebe8e02ae0 100644 +--- a/arch/riscv/include/asm/barrier.h ++++ b/arch/riscv/include/asm/barrier.h +@@ -11,13 +11,13 @@ + #define _ASM_RISCV_BARRIER_H + + #ifndef __ASSEMBLY__ ++#include ++#include + + #define nop() __asm__ __volatile__ ("nop") + #define __nops(n) ".rept " #n "\nnop\n.endr\n" + #define nops(n) __asm__ __volatile__ (__nops(n)) + +-#define RISCV_FENCE(p, s) \ +- __asm__ __volatile__ ("fence " #p "," #s : : : "memory") + + /* These barriers need to enforce ordering on both devices or memory. */ + #define mb() RISCV_FENCE(iorw,iorw) +@@ -29,21 +29,6 @@ + #define __smp_rmb() RISCV_FENCE(r,r) + #define __smp_wmb() RISCV_FENCE(w,w) + +-#define __smp_store_release(p, v) \ +-do { \ +- compiletime_assert_atomic_type(*p); \ +- RISCV_FENCE(rw,w); \ +- WRITE_ONCE(*p, v); \ +-} while (0) +- +-#define __smp_load_acquire(p) \ +-({ \ +- typeof(*p) ___p1 = READ_ONCE(*p); \ +- compiletime_assert_atomic_type(*p); \ +- RISCV_FENCE(r,rw); \ +- ___p1; \ +-}) +- + /* + * This is a very specific barrier: it's currently only used in two places in + * the kernel, both in the scheduler. See include/linux/spinlock.h for the two +@@ -71,6 +56,45 @@ do { \ + */ + #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) + ++#ifdef CONFIG_ARCH_SOPHGO ++#define __smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(rw,w); \ ++ WRITE_ONCE(*p, v); \ ++ RISCV_FENCE(w,rw); \ ++} while (0) ++#else ++#define __smp_store_release(p, v) \ ++do { \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(rw,w); \ ++ WRITE_ONCE(*p, v); \ ++} while (0) ++#endif ++ ++#define __smp_load_acquire(p) \ ++({ \ ++ typeof(*p) ___p1 = READ_ONCE(*p); \ ++ compiletime_assert_atomic_type(*p); \ ++ RISCV_FENCE(r, rw); \ ++ ___p1; \ ++}) ++ ++#ifdef CONFIG_RISCV_ISA_ZAWRS ++#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ ++ typeof(ptr) __PTR = (ptr); \ ++ __unqual_scalar_typeof(*ptr) VAL; \ ++ for (;;) { \ ++ VAL = READ_ONCE(*__PTR); \ ++ if (cond_expr) \ ++ break; \ ++ __cmpwait_relaxed(ptr, VAL); \ ++ } \ ++ (typeof(*ptr))VAL; \ ++}) ++#endif ++ + #include + + #endif /* __ASSEMBLY__ */ +diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h +index 3540b690944b..3cdcc2bbaaf5 100644 +--- a/arch/riscv/include/asm/bitops.h ++++ b/arch/riscv/include/asm/bitops.h +@@ -15,15 +15,265 @@ + #include + #include + ++#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) + #include +-#include +-#include + #include ++#include ++#include ++ ++#else ++#include ++#include ++ ++#if (BITS_PER_LONG == 64) ++#define CTZW "ctzw " ++#define CLZW "clzw " ++#elif (BITS_PER_LONG == 32) ++#define CTZW "ctz " ++#define CLZW "clz " ++#else ++#error "Unexpected BITS_PER_LONG" ++#endif ++ ++static __always_inline unsigned long variable__ffs(unsigned long word) ++{ ++ int num; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ "ctz %0, %1\n" ++ ".option pop\n" ++ : "=r" (word) : "r" (word) :); ++ ++ return word; ++ ++legacy: ++ num = 0; ++#if BITS_PER_LONG == 64 ++ if ((word & 0xffffffff) == 0) { ++ num += 32; ++ word >>= 32; ++ } ++#endif ++ if ((word & 0xffff) == 0) { ++ num += 16; ++ word >>= 16; ++ } ++ if ((word & 0xff) == 0) { ++ num += 8; ++ word >>= 8; ++ } ++ if ((word & 0xf) == 0) { ++ num += 4; ++ word >>= 4; ++ } ++ if ((word & 0x3) == 0) { ++ num += 2; ++ word >>= 2; ++ } ++ if ((word & 0x1) == 0) ++ num += 1; ++ return num; ++} ++ ++/** ++ * __ffs - find first set bit in a long word ++ * @word: The word to search ++ * ++ * Undefined if no set bit exists, so code should check against 0 first. ++ */ ++#define __ffs(word) \ ++ (__builtin_constant_p(word) ? \ ++ (unsigned long)__builtin_ctzl(word) : \ ++ variable__ffs(word)) ++ ++static __always_inline unsigned long variable__fls(unsigned long word) ++{ ++ int num; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ "clz %0, %1\n" ++ ".option pop\n" ++ : "=r" (word) : "r" (word) :); ++ ++ return BITS_PER_LONG - 1 - word; ++ ++legacy: ++ num = BITS_PER_LONG - 1; ++#if BITS_PER_LONG == 64 ++ if (!(word & (~0ul << 32))) { ++ num -= 32; ++ word <<= 32; ++ } ++#endif ++ if (!(word & (~0ul << (BITS_PER_LONG - 16)))) { ++ num -= 16; ++ word <<= 16; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 8)))) { ++ num -= 8; ++ word <<= 8; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 4)))) { ++ num -= 4; ++ word <<= 4; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 2)))) { ++ num -= 2; ++ word <<= 2; ++ } ++ if (!(word & (~0ul << (BITS_PER_LONG - 1)))) ++ num -= 1; ++ return num; ++} ++ ++/** ++ * __fls - find last set bit in a long word ++ * @word: the word to search ++ * ++ * Undefined if no set bit exists, so code should check against 0 first. ++ */ ++#define __fls(word) \ ++ (__builtin_constant_p(word) ? \ ++ (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \ ++ variable__fls(word)) ++ ++static __always_inline int variable_ffs(int x) ++{ ++ int r; ++ ++ if (!x) ++ return 0; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ CTZW "%0, %1\n" ++ ".option pop\n" ++ : "=r" (r) : "r" (x) :); ++ ++ return r + 1; ++ ++legacy: ++ r = 1; ++ if (!(x & 0xffff)) { ++ x >>= 16; ++ r += 16; ++ } ++ if (!(x & 0xff)) { ++ x >>= 8; ++ r += 8; ++ } ++ if (!(x & 0xf)) { ++ x >>= 4; ++ r += 4; ++ } ++ if (!(x & 3)) { ++ x >>= 2; ++ r += 2; ++ } ++ if (!(x & 1)) { ++ x >>= 1; ++ r += 1; ++ } ++ return r; ++} ++ ++/** ++ * ffs - find first set bit in a word ++ * @x: the word to search ++ * ++ * This is defined the same way as the libc and compiler builtin ffs routines. ++ * ++ * ffs(value) returns 0 if value is 0 or the position of the first set bit if ++ * value is nonzero. The first (least significant) bit is at position 1. ++ */ ++#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x)) ++ ++static __always_inline int variable_fls(unsigned int x) ++{ ++ int r; ++ ++ if (!x) ++ return 0; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBB, 1) ++ : : : : legacy); ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbb\n" ++ CLZW "%0, %1\n" ++ ".option pop\n" ++ : "=r" (r) : "r" (x) :); ++ ++ return 32 - r; ++ ++legacy: ++ r = 32; ++ if (!(x & 0xffff0000u)) { ++ x <<= 16; ++ r -= 16; ++ } ++ if (!(x & 0xff000000u)) { ++ x <<= 8; ++ r -= 8; ++ } ++ if (!(x & 0xf0000000u)) { ++ x <<= 4; ++ r -= 4; ++ } ++ if (!(x & 0xc0000000u)) { ++ x <<= 2; ++ r -= 2; ++ } ++ if (!(x & 0x80000000u)) { ++ x <<= 1; ++ r -= 1; ++ } ++ return r; ++} ++ ++/** ++ * fls - find last set bit in a word ++ * @x: the word to search ++ * ++ * This is defined in a similar way as ffs, but returns the position of the most ++ * significant set bit. ++ * ++ * fls(value) returns 0 if value is 0 or the position of the last set bit if ++ * value is nonzero. The last (most significant) bit is at position 32. ++ */ ++#define fls(x) \ ++({ \ ++ typeof(x) x_ = (x); \ ++ __builtin_constant_p(x_) ? \ ++ (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ ++ : \ ++ variable_fls(x_); \ ++}) ++ ++#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */ ++ ++#include + #include + #include +-#include + +-#include ++#include ++ ++#include + + #if (BITS_PER_LONG == 64) + #define __AMO(op) "amo" #op ".d" +diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h +index 2f4726d3cfcc..1f4cd12e4664 100644 +--- a/arch/riscv/include/asm/cmpxchg.h ++++ b/arch/riscv/include/asm/cmpxchg.h +@@ -8,143 +8,87 @@ + + #include + +-#include ++#include + #include ++#include ++#include ++#include + +-#define __xchg_relaxed(ptr, new, size) \ ++#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ + ({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z4\n" \ ++ " or %1, %1, %z3\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ append \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" (__newx), "rJ" (~__mask) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ + }) + +-#define arch_xchg_relaxed(ptr, x) \ ++#define __arch_xchg(sfx, prepend, append, r, p, n) \ + ({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ +- _x_, sizeof(*(ptr))); \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amoswap" sfx " %0, %2, %1\n" \ ++ append \ ++ : "=r" (r), "+A" (*(p)) \ ++ : "r" (n) \ ++ : "memory"); \ + }) + +-#define __xchg_acquire(ptr, new, size) \ ++#define _arch_xchg(ptr, new, sc_sfx, swap_sfx, prepend, \ ++ sc_append, swap_append) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ case 2: \ ++ __arch_xchg_masked(sc_sfx, prepend, sc_append, \ ++ __ret, __ptr, __new); \ ++ break; \ + case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w %0, %2, %1\n" \ +- RISCV_ACQUIRE_BARRIER \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ ++ __arch_xchg(".w" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d %0, %2, %1\n" \ +- RISCV_ACQUIRE_BARRIER \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ ++ __arch_xchg(".d" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +- __ret; \ ++ (__typeof__(*(__ptr)))__ret; \ + }) + +-#define arch_xchg_acquire(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_acquire((ptr), \ +- _x_, sizeof(*(ptr))); \ +-}) ++#define arch_xchg_relaxed(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", "", "") + +-#define __xchg_release(ptr, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- " amoswap.w %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- " amoswap.d %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) ++#define arch_xchg_acquire(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", \ ++ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) + + #define arch_xchg_release(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __xchg_release((ptr), \ +- _x_, sizeof(*(ptr))); \ +-}) +- +-#define __arch_xchg(ptr, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(new) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- " amoswap.w.aqrl %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- " amoswap.d.aqrl %0, %2, %1\n" \ +- : "=r" (__ret), "+A" (*__ptr) \ +- : "r" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) ++ _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") + + #define arch_xchg(ptr, x) \ +-({ \ +- __typeof__(*(ptr)) _x_ = (x); \ +- (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \ +-}) ++ _arch_xchg(ptr, x, ".rl", ".aqrl", "", RISCV_FULL_BARRIER, "") + + #define xchg32(ptr, x) \ + ({ \ +@@ -163,190 +107,128 @@ + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ +-#define __cmpxchg_relaxed(ptr, old, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ +- break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) + +-#define arch_cmpxchg_relaxed(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ ++#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ ++({ \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ ++ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ ++ r = o; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amocas" cas_sfx " %0, %z2, %1\n" \ ++ append \ ++ : "+&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ ++ : "memory"); \ ++ } else { \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __oldx = (ulong)(o) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z5\n" \ ++ " bne %1, %z3, 1f\n" \ ++ " and %1, %0, %z6\n" \ ++ " or %1, %1, %z4\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ append \ ++ "1:\n" \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" ((long)__oldx), "rJ" (__newx), \ ++ "rJ" (__mask), "rJ" (~__mask) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ ++ } \ + }) + +-#define __cmpxchg_acquire(ptr, old, new, size) \ ++#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ + ({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \ ++ r = o; \ ++ \ + __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- RISCV_ACQUIRE_BARRIER \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ ++ prepend \ ++ " amocas" sc_cas_sfx " %0, %z2, %1\n" \ ++ append \ ++ : "+&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ + : "memory"); \ +- break; \ +- case 8: \ ++ } else { \ ++ register unsigned int __rc; \ ++ \ + __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ ++ prepend \ ++ "0: lr" lr_sfx " %0, %2\n" \ ++ " bne %0, %z3, 1f\n" \ ++ " sc" sc_cas_sfx " %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ +- RISCV_ACQUIRE_BARRIER \ ++ append \ + "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ ++ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \ ++ : "rJ" (co o), "rJ" (n) \ + : "memory"); \ +- break; \ +- default: \ +- BUILD_BUG(); \ + } \ +- __ret; \ + }) + +-#define arch_cmpxchg_acquire(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) +- +-#define __cmpxchg_release(ptr, old, new, size) \ ++#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ + ({ \ + __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ +- case 4: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ ++ __typeof__(*(__ptr)) __old = (old); \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ ++ prepend, append, \ ++ __ret, __ptr, __old, __new); \ + break; \ +- case 8: \ +- __asm__ __volatile__ ( \ +- RISCV_RELEASE_BARRIER \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ ++ case 2: \ ++ __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ ++ prepend, append, \ ++ __ret, __ptr, __old, __new); \ + break; \ +- default: \ +- BUILD_BUG(); \ +- } \ +- __ret; \ +-}) +- +-#define arch_cmpxchg_release(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg_release((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) +- +-#define __cmpxchg(ptr, old, new, size) \ +-({ \ +- __typeof__(ptr) __ptr = (ptr); \ +- __typeof__(*(ptr)) __old = (old); \ +- __typeof__(*(ptr)) __new = (new); \ +- __typeof__(*(ptr)) __ret; \ +- register unsigned int __rc; \ +- switch (size) { \ + case 4: \ +- __asm__ __volatile__ ( \ +- "0: lr.w %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.w.rl %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- " fence rw, rw\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" ((long)__old), "rJ" (__new) \ +- : "memory"); \ ++ __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ ++ __ret, __ptr, (long), __old, __new); \ + break; \ + case 8: \ +- __asm__ __volatile__ ( \ +- "0: lr.d %0, %2\n" \ +- " bne %0, %z3, 1f\n" \ +- " sc.d.rl %1, %z4, %2\n" \ +- " bnez %1, 0b\n" \ +- " fence rw, rw\n" \ +- "1:\n" \ +- : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ +- : "rJ" (__old), "rJ" (__new) \ +- : "memory"); \ ++ __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ ++ __ret, __ptr, /**/, __old, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +- __ret; \ ++ (__typeof__(*(__ptr)))__ret; \ + }) + ++#define arch_cmpxchg_relaxed(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", "", "") ++ ++#define arch_cmpxchg_acquire(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) ++ ++#define arch_cmpxchg_release(ptr, o, n) \ ++ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") ++ + #define arch_cmpxchg(ptr, o, n) \ +-({ \ +- __typeof__(*(ptr)) _o_ = (o); \ +- __typeof__(*(ptr)) _n_ = (n); \ +- (__typeof__(*(ptr))) __cmpxchg((ptr), \ +- _o_, _n_, sizeof(*(ptr))); \ +-}) ++ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") + + #define arch_cmpxchg_local(ptr, o, n) \ +- (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) ++ arch_cmpxchg_relaxed((ptr), (o), (n)) + + #define arch_cmpxchg64(ptr, o, n) \ + ({ \ +@@ -360,4 +242,82 @@ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ + }) + ++#define arch_cmpxchg64_relaxed(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_relaxed((ptr), (o), (n)); \ ++}) ++ ++#define arch_cmpxchg64_acquire(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_acquire((ptr), (o), (n)); \ ++}) ++ ++#define arch_cmpxchg64_release(ptr, o, n) \ ++({ \ ++ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ ++ arch_cmpxchg_release((ptr), (o), (n)); \ ++}) ++ ++#ifdef CONFIG_RISCV_ISA_ZAWRS ++/* ++ * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to ++ * @val we expect it to still terminate within a "reasonable" amount of time ++ * for an implementation-specific other reason, a pending, locally-enabled ++ * interrupt, or because it has been configured to raise an illegal ++ * instruction exception. ++ */ ++static __always_inline void __cmpwait(volatile void *ptr, ++ unsigned long val, ++ int size) ++{ ++ unsigned long tmp; ++ ++ asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop", ++ 0, RISCV_ISA_EXT_ZAWRS, 1) ++ : : : : no_zawrs); ++ ++ switch (size) { ++ case 1: ++ fallthrough; ++ case 2: ++ /* RISC-V doesn't have lr instructions on byte and half-word. */ ++ goto no_zawrs; ++ case 4: ++ asm volatile( ++ " lr.w %0, %1\n" ++ " xor %0, %0, %2\n" ++ " bnez %0, 1f\n" ++ ZAWRS_WRS_NTO "\n" ++ "1:" ++ : "=&r" (tmp), "+A" (*(u32 *)ptr) ++ : "r" (val)); ++ break; ++#if __riscv_xlen == 64 ++ case 8: ++ asm volatile( ++ " lr.d %0, %1\n" ++ " xor %0, %0, %2\n" ++ " bnez %0, 1f\n" ++ ZAWRS_WRS_NTO "\n" ++ "1:" ++ : "=&r" (tmp), "+A" (*(u64 *)ptr) ++ : "r" (val)); ++ break; ++#endif ++ default: ++ BUILD_BUG(); ++ } ++ ++ return; ++ ++no_zawrs: ++ asm volatile(RISCV_PAUSE : : : "memory"); ++} ++ ++#define __cmpwait_relaxed(ptr, val) \ ++ __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) ++#endif ++ + #endif /* _ASM_RISCV_CMPXCHG_H */ +diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h +index 2ac955b51148..6b79287baecc 100644 +--- a/arch/riscv/include/asm/compat.h ++++ b/arch/riscv/include/asm/compat.h +@@ -9,7 +9,6 @@ + */ + #include + #include +-#include + #include + + static inline int is_compat_task(void) +diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h +new file mode 100644 +index 000000000000..a8103edbf51f +--- /dev/null ++++ b/arch/riscv/include/asm/cpufeature-macros.h +@@ -0,0 +1,66 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2022-2024 Rivos, Inc ++ */ ++ ++#ifndef _ASM_CPUFEATURE_MACROS_H ++#define _ASM_CPUFEATURE_MACROS_H ++ ++#include ++#include ++ ++#define STANDARD_EXT 0 ++ ++bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); ++#define riscv_isa_extension_available(isa_bitmap, ext) \ ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) ++ ++static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1) ++ : ++ : [vendor] "i" (vendor), [ext] "i" (ext) ++ : ++ : l_no); ++ ++ return true; ++l_no: ++ return false; ++} ++ ++static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1) ++ : ++ : [vendor] "i" (vendor), [ext] "i" (ext) ++ : ++ : l_yes); ++ ++ return false; ++l_yes: ++ return true; ++} ++ ++static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_unlikely(STANDARD_EXT, ext); ++ ++ return __riscv_isa_extension_available(NULL, ext); ++} ++ ++static __always_inline bool riscv_has_extension_likely(const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_likely(STANDARD_EXT, ext); ++ ++ return __riscv_isa_extension_available(NULL, ext); ++} ++ ++#endif /* _ASM_CPUFEATURE_MACROS_H */ +diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h +index d0345bd659c9..c8346dc0bed8 100644 +--- a/arch/riscv/include/asm/cpufeature.h ++++ b/arch/riscv/include/asm/cpufeature.h +@@ -7,7 +7,12 @@ + #define _ASM_CPUFEATURE_H + + #include ++#include ++#include ++#include ++#include + #include ++#include + + /* + * These are probed via a device_initcall(), via either the SBI or directly +@@ -31,5 +36,69 @@ DECLARE_PER_CPU(long, misaligned_access_speed); + extern struct riscv_isainfo hart_isa[NR_CPUS]; + + void check_unaligned_access(int cpu); ++void __init riscv_user_isa_enable(void); ++ ++#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \ ++ .name = #_name, \ ++ .property = #_name, \ ++ .id = _id, \ ++ .subset_ext_ids = _subset_exts, \ ++ .subset_ext_size = _subset_exts_size, \ ++ .validate = _validate \ ++} ++ ++#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, NULL) ++ ++#define __RISCV_ISA_EXT_DATA_VALIDATE(_name, _id, _validate) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0, _validate) ++ ++/* Used to declare pure "lasso" extension (Zk for instance) */ ++#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ ++ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, \ ++ ARRAY_SIZE(_bundled_exts), NULL) ++ ++/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ ++#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), NULL) ++#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \ ++ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) ++ ++unsigned long riscv_get_elf_hwcap(void); ++ ++struct riscv_isa_ext_data { ++ const unsigned int id; ++ const char *name; ++ const char *property; ++ const unsigned int *subset_ext_ids; ++ const unsigned int subset_ext_size; ++ int (*validate)(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap); ++}; ++ ++extern const struct riscv_isa_ext_data riscv_isa_ext[]; ++extern const size_t riscv_isa_ext_count; ++extern bool riscv_isa_fallback; ++ ++unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); ++static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_likely(STANDARD_EXT, ext)) ++ return true; ++ ++ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) ++{ ++ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX"); ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_unlikely(STANDARD_EXT, ext)) ++ return true; ++ ++ return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); ++} + + #endif +diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h +index 777cb8299551..1fd1bc2f220b 100644 +--- a/arch/riscv/include/asm/csr.h ++++ b/arch/riscv/include/asm/csr.h +@@ -194,6 +194,7 @@ + /* xENVCFG flags */ + #define ENVCFG_STCE (_AC(1, ULL) << 63) + #define ENVCFG_PBMTE (_AC(1, ULL) << 62) ++#define ENVCFG_ADUE (_AC(1, ULL) << 61) + #define ENVCFG_CBZE (_AC(1, UL) << 7) + #define ENVCFG_CBCFE (_AC(1, UL) << 6) + #define ENVCFG_CBIE_SHIFT 4 +@@ -275,6 +276,7 @@ + #define CSR_SIE 0x104 + #define CSR_STVEC 0x105 + #define CSR_SCOUNTEREN 0x106 ++#define CSR_SENVCFG 0x10a + #define CSR_SSCRATCH 0x140 + #define CSR_SEPC 0x141 + #define CSR_SCAUSE 0x142 +@@ -393,10 +395,20 @@ + #define CSR_VTYPE 0xc21 + #define CSR_VLENB 0xc22 + ++/* Scalar Crypto Extension - Entropy */ ++#define CSR_SEED 0x015 ++#define SEED_OPST_MASK _AC(0xC0000000, UL) ++#define SEED_OPST_BIST _AC(0x00000000, UL) ++#define SEED_OPST_WAIT _AC(0x40000000, UL) ++#define SEED_OPST_ES16 _AC(0x80000000, UL) ++#define SEED_OPST_DEAD _AC(0xC0000000, UL) ++#define SEED_ENTROPY_MASK _AC(0xFFFF, UL) ++ + #ifdef CONFIG_RISCV_M_MODE + # define CSR_STATUS CSR_MSTATUS + # define CSR_IE CSR_MIE + # define CSR_TVEC CSR_MTVEC ++# define CSR_ENVCFG CSR_MENVCFG + # define CSR_SCRATCH CSR_MSCRATCH + # define CSR_EPC CSR_MEPC + # define CSR_CAUSE CSR_MCAUSE +@@ -421,6 +433,7 @@ + # define CSR_STATUS CSR_SSTATUS + # define CSR_IE CSR_SIE + # define CSR_TVEC CSR_STVEC ++# define CSR_ENVCFG CSR_SENVCFG + # define CSR_SCRATCH CSR_SSCRATCH + # define CSR_EPC CSR_SEPC + # define CSR_CAUSE CSR_SCAUSE +diff --git a/arch/riscv/include/asm/dmi.h b/arch/riscv/include/asm/dmi.h +new file mode 100644 +index 000000000000..ca7cce557ef7 +--- /dev/null ++++ b/arch/riscv/include/asm/dmi.h +@@ -0,0 +1,24 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2024 Intel Corporation ++ * ++ * based on arch/arm64/include/asm/dmi.h ++ * ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ */ ++ ++#ifndef __ASM_DMI_H ++#define __ASM_DMI_H ++ ++#include ++#include ++ ++#define dmi_early_remap(x, l) memremap(x, l, MEMREMAP_WB) ++#define dmi_early_unmap(x, l) memunmap(x) ++#define dmi_remap(x, l) memremap(x, l, MEMREMAP_WB) ++#define dmi_unmap(x) memunmap(x) ++#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) ++ ++#endif +diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h +index b3b2dfbdf945..06c236bfab53 100644 +--- a/arch/riscv/include/asm/elf.h ++++ b/arch/riscv/include/asm/elf.h +@@ -14,7 +14,7 @@ + #include + #include + #include +-#include ++#include + + /* + * These are used to set parameters in the core dumps. +diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h +index d3f3c237adad..5469341b60ce 100644 +--- a/arch/riscv/include/asm/errata_list.h ++++ b/arch/riscv/include/asm/errata_list.h +@@ -12,8 +12,8 @@ + #include + + #ifdef CONFIG_ERRATA_ANDES +-#define ERRATA_ANDESTECH_NO_IOCP 0 +-#define ERRATA_ANDESTECH_NUMBER 1 ++#define ERRATA_ANDES_NO_IOCP 0 ++#define ERRATA_ANDES_NUMBER 1 + #endif + + #ifdef CONFIG_ERRATA_SIFIVE +@@ -128,9 +128,12 @@ asm volatile(ALTERNATIVE( \ + * 0000000 11001 00000 000 00000 0001011 + */ + #define THEAD_inval_A0 ".long 0x0265000b" +-#define THEAD_clean_A0 ".long 0x0255000b" ++#define THEAD_clean_A0 ".long 0x0275000b" + #define THEAD_flush_A0 ".long 0x0275000b" + #define THEAD_SYNC_S ".long 0x0190000b" ++#define THEAD_inval_PA_A0 ".long 0x02a5000b" ++#define THEAD_clean_PA_A0 ".long 0x02b5000b" ++#define THEAD_flush_PA_A0 ".long 0x02b5000b" + + #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ + asm volatile(ALTERNATIVE_2( \ +@@ -157,18 +160,36 @@ asm volatile(ALTERNATIVE_2( \ + "r"((unsigned long)(_start) + (_size)) \ + : "a0") + ++#define ALT_CMO_OP_VPA(_op, _vaddr, _paddr, _size, _cachesize) \ ++asm volatile(ALTERNATIVE_2( \ ++ __nops(6), \ ++ "mv a0, %1\n\t" \ ++ "j 2f\n\t" \ ++ "3:\n\t" \ ++ CBO_##_op(a0) \ ++ "add a0, a0, %0\n\t" \ ++ "2:\n\t" \ ++ "bltu a0, %2, 3b\n\t" \ ++ "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \ ++ "mv a0, %3\n\t" \ ++ "j 2f\n\t" \ ++ "3:\n\t" \ ++ THEAD_##_op##_PA_A0 "\n\t" \ ++ "add a0, a0, %0\n\t" \ ++ "2:\n\t" \ ++ "bltu a0, %4, 3b\n\t" \ ++ THEAD_SYNC_S, THEAD_VENDOR_ID, \ ++ ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \ ++ : : "r"(_cachesize), \ ++ "r"((unsigned long)(_vaddr) & ~((_cachesize) - 1UL)), \ ++ "r"((unsigned long)(_vaddr) + (_size)), \ ++ "r"((unsigned long)(_paddr) & ~((_cachesize) - 1UL)), \ ++ "r"((unsigned long)(_paddr) + (_size)) \ ++ : "a0") ++ + #define THEAD_C9XX_RV_IRQ_PMU 17 + #define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5 + +-#define ALT_SBI_PMU_OVERFLOW(__ovl) \ +-asm volatile(ALTERNATIVE( \ +- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \ +- "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ +- THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ +- CONFIG_ERRATA_THEAD_PMU) \ +- : "=r" (__ovl) : \ +- : "memory") +- + #endif /* __ASSEMBLY__ */ + + #endif +diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h +index 2b443a3a487f..6bcd80325dfc 100644 +--- a/arch/riscv/include/asm/fence.h ++++ b/arch/riscv/include/asm/fence.h +@@ -1,12 +1,18 @@ + #ifndef _ASM_RISCV_FENCE_H + #define _ASM_RISCV_FENCE_H + ++#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n" ++#define RISCV_FENCE(p, s) \ ++ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); }) ++ + #ifdef CONFIG_SMP +-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" +-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n" ++#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw) ++#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w) ++#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw) + #else + #define RISCV_ACQUIRE_BARRIER + #define RISCV_RELEASE_BARRIER ++#define RISCV_FULL_BARRIER + #endif + + #endif /* _ASM_RISCV_FENCE_H */ +diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h +index f4157034efa9..869da082252a 100644 +--- a/arch/riscv/include/asm/hwcap.h ++++ b/arch/riscv/include/asm/hwcap.h +@@ -8,25 +8,16 @@ + #ifndef _ASM_RISCV_HWCAP_H + #define _ASM_RISCV_HWCAP_H + +-#include +-#include +-#include + #include + + #define RISCV_ISA_EXT_a ('a' - 'a') +-#define RISCV_ISA_EXT_b ('b' - 'a') + #define RISCV_ISA_EXT_c ('c' - 'a') + #define RISCV_ISA_EXT_d ('d' - 'a') + #define RISCV_ISA_EXT_f ('f' - 'a') + #define RISCV_ISA_EXT_h ('h' - 'a') + #define RISCV_ISA_EXT_i ('i' - 'a') +-#define RISCV_ISA_EXT_j ('j' - 'a') +-#define RISCV_ISA_EXT_k ('k' - 'a') + #define RISCV_ISA_EXT_m ('m' - 'a') +-#define RISCV_ISA_EXT_p ('p' - 'a') + #define RISCV_ISA_EXT_q ('q' - 'a') +-#define RISCV_ISA_EXT_s ('s' - 'a') +-#define RISCV_ISA_EXT_u ('u' - 'a') + #define RISCV_ISA_EXT_v ('v' - 'a') + + /* +@@ -58,85 +49,69 @@ + #define RISCV_ISA_EXT_ZICSR 40 + #define RISCV_ISA_EXT_ZIFENCEI 41 + #define RISCV_ISA_EXT_ZIHPM 42 +- +-#define RISCV_ISA_EXT_MAX 64 ++#define RISCV_ISA_EXT_SMSTATEEN 43 ++#define RISCV_ISA_EXT_ZICOND 44 ++#define RISCV_ISA_EXT_ZBC 45 ++#define RISCV_ISA_EXT_ZBKB 46 ++#define RISCV_ISA_EXT_ZBKC 47 ++#define RISCV_ISA_EXT_ZBKX 48 ++#define RISCV_ISA_EXT_ZKND 49 ++#define RISCV_ISA_EXT_ZKNE 50 ++#define RISCV_ISA_EXT_ZKNH 51 ++#define RISCV_ISA_EXT_ZKR 52 ++#define RISCV_ISA_EXT_ZKSED 53 ++#define RISCV_ISA_EXT_ZKSH 54 ++#define RISCV_ISA_EXT_ZKT 55 ++#define RISCV_ISA_EXT_ZVBB 56 ++#define RISCV_ISA_EXT_ZVBC 57 ++#define RISCV_ISA_EXT_ZVKB 58 ++#define RISCV_ISA_EXT_ZVKG 59 ++#define RISCV_ISA_EXT_ZVKNED 60 ++#define RISCV_ISA_EXT_ZVKNHA 61 ++#define RISCV_ISA_EXT_ZVKNHB 62 ++#define RISCV_ISA_EXT_ZVKSED 63 ++#define RISCV_ISA_EXT_ZVKSH 64 ++#define RISCV_ISA_EXT_ZVKT 65 ++#define RISCV_ISA_EXT_ZFH 66 ++#define RISCV_ISA_EXT_ZFHMIN 67 ++#define RISCV_ISA_EXT_ZIHINTNTL 68 ++#define RISCV_ISA_EXT_ZVFH 69 ++#define RISCV_ISA_EXT_ZVFHMIN 70 ++#define RISCV_ISA_EXT_ZFA 71 ++#define RISCV_ISA_EXT_ZTSO 72 ++#define RISCV_ISA_EXT_ZACAS 73 ++#define RISCV_ISA_EXT_ZVE32X 74 ++#define RISCV_ISA_EXT_ZVE32F 75 ++#define RISCV_ISA_EXT_ZVE64X 76 ++#define RISCV_ISA_EXT_ZVE64F 77 ++#define RISCV_ISA_EXT_ZVE64D 78 ++#define RISCV_ISA_EXT_ZIMOP 79 ++#define RISCV_ISA_EXT_ZCA 80 ++#define RISCV_ISA_EXT_ZCB 81 ++#define RISCV_ISA_EXT_ZCD 82 ++#define RISCV_ISA_EXT_ZCF 83 ++#define RISCV_ISA_EXT_ZCMOP 84 ++#define RISCV_ISA_EXT_ZAWRS 85 ++#define RISCV_ISA_EXT_SVVPTC 86 ++#define RISCV_ISA_EXT_SMMPM 87 ++#define RISCV_ISA_EXT_SMNPM 88 ++#define RISCV_ISA_EXT_SSNPM 89 ++#define RISCV_ISA_EXT_ZABHA 90 ++#define RISCV_ISA_EXT_ZICCRSE 91 ++#define RISCV_ISA_EXT_SVADE 92 ++#define RISCV_ISA_EXT_SVADU 93 ++ ++#define RISCV_ISA_EXT_XLINUXENVCFG 127 ++ ++#define RISCV_ISA_EXT_MAX 128 ++#define RISCV_ISA_EXT_INVALID U32_MAX + + #ifdef CONFIG_RISCV_M_MODE + #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA ++#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM + #else + #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA +-#endif +- +-#ifndef __ASSEMBLY__ +- +-#include +- +-unsigned long riscv_get_elf_hwcap(void); +- +-struct riscv_isa_ext_data { +- const unsigned int id; +- const char *name; +- const char *property; +-}; +- +-extern const struct riscv_isa_ext_data riscv_isa_ext[]; +-extern const size_t riscv_isa_ext_count; +-extern bool riscv_isa_fallback; +- +-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); +- +-#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) +- +-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +-#define riscv_isa_extension_available(isa_bitmap, ext) \ +- __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) +- +-static __always_inline bool +-riscv_has_extension_likely(const unsigned long ext) +-{ +- compiletime_assert(ext < RISCV_ISA_EXT_MAX, +- "ext must be < RISCV_ISA_EXT_MAX"); +- +- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { +- asm goto( +- ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) +- : +- : [ext] "i" (ext) +- : +- : l_no); +- } else { +- if (!__riscv_isa_extension_available(NULL, ext)) +- goto l_no; +- } +- +- return true; +-l_no: +- return false; +-} +- +-static __always_inline bool +-riscv_has_extension_unlikely(const unsigned long ext) +-{ +- compiletime_assert(ext < RISCV_ISA_EXT_MAX, +- "ext must be < RISCV_ISA_EXT_MAX"); +- +- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { +- asm goto( +- ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) +- : +- : [ext] "i" (ext) +- : +- : l_yes); +- } else { +- if (__riscv_isa_extension_available(NULL, ext)) +- goto l_yes; +- } +- +- return false; +-l_yes: +- return true; +-} +- ++#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM + #endif + + #endif /* _ASM_RISCV_HWCAP_H */ +diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h +index 7cad513538d8..ef01c182af2b 100644 +--- a/arch/riscv/include/asm/hwprobe.h ++++ b/arch/riscv/include/asm/hwprobe.h +@@ -8,11 +8,35 @@ + + #include + +-#define RISCV_HWPROBE_MAX_KEY 5 ++#define RISCV_HWPROBE_MAX_KEY 8 + + static inline bool riscv_hwprobe_key_is_valid(__s64 key) + { + return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY; + } + ++static inline bool hwprobe_key_is_bitmask(__s64 key) ++{ ++ switch (key) { ++ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: ++ case RISCV_HWPROBE_KEY_IMA_EXT_0: ++ case RISCV_HWPROBE_KEY_CPUPERF_0: ++ return true; ++ } ++ ++ return false; ++} ++ ++static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, ++ struct riscv_hwprobe *other_pair) ++{ ++ if (pair->key != other_pair->key) ++ return false; ++ ++ if (hwprobe_key_is_bitmask(pair->key)) ++ return (pair->value & other_pair->value) == other_pair->value; ++ ++ return pair->value == other_pair->value; ++} ++ + #endif +diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h +index 6960beb75f32..cbd51bfdf527 100644 +--- a/arch/riscv/include/asm/insn-def.h ++++ b/arch/riscv/include/asm/insn-def.h +@@ -196,4 +196,8 @@ + INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ + RS1(base), SIMM12(4)) + ++#define RISCV_PAUSE ".4byte 0x100000f" ++#define ZAWRS_WRS_NTO ".4byte 0x00d00073" ++#define ZAWRS_WRS_STO ".4byte 0x01d00073" ++ + #endif /* __ASM_INSN_DEF_H */ +diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h +index 42497d487a17..8118363494e0 100644 +--- a/arch/riscv/include/asm/io.h ++++ b/arch/riscv/include/asm/io.h +@@ -47,10 +47,10 @@ + * sufficient to ensure this works sanely on controllers that support I/O + * writes. + */ +-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); +-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); +-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); +-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); ++#define __io_pbr() RISCV_FENCE(io, i) ++#define __io_par(v) RISCV_FENCE(i, ior) ++#define __io_pbw() RISCV_FENCE(iow, o) ++#define __io_paw() RISCV_FENCE(o, io) + + /* + * Accesses from a single hart to a single I/O address must be ordered. This +@@ -140,4 +140,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) + ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) + #endif + ++#undef ioremap_wc ++#define ioremap_wc(addr, size) \ ++ ioremap_prot((addr), (size), _PAGE_IOREMAP_WC) ++ + #endif /* _ASM_RISCV_IO_H */ +diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h +index 8e10a94430a2..dba0359f029e 100644 +--- a/arch/riscv/include/asm/irq.h ++++ b/arch/riscv/include/asm/irq.h +@@ -12,8 +12,68 @@ + + #include + ++#define INVALID_CONTEXT UINT_MAX ++ ++#ifdef CONFIG_SMP ++bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu); ++#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace ++#endif ++ + void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)); + + struct fwnode_handle *riscv_get_intc_hwnode(void); + ++#ifdef CONFIG_ACPI ++ ++enum riscv_irqchip_type { ++ ACPI_RISCV_IRQCHIP_INTC = 0x00, ++ ACPI_RISCV_IRQCHIP_IMSIC = 0x01, ++ ACPI_RISCV_IRQCHIP_PLIC = 0x02, ++ ACPI_RISCV_IRQCHIP_APLIC = 0x03, ++}; ++ ++int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs); ++struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi); ++unsigned long acpi_rintc_index_to_hartid(u32 index); ++unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, unsigned int ctxt_idx); ++unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id); ++unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx); ++int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res); ++ ++#else ++static inline int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs) ++{ ++ return 0; ++} ++ ++static inline unsigned long acpi_rintc_index_to_hartid(u32 index) ++{ ++ return INVALID_HARTID; ++} ++ ++static inline unsigned long acpi_rintc_ext_parent_to_hartid(unsigned int plic_id, ++ unsigned int ctxt_idx) ++{ ++ return INVALID_HARTID; ++} ++ ++static inline unsigned int acpi_rintc_get_plic_nr_contexts(unsigned int plic_id) ++{ ++ return INVALID_CONTEXT; ++} ++ ++static inline unsigned int acpi_rintc_get_plic_context(unsigned int plic_id, unsigned int ctxt_idx) ++{ ++ return INVALID_CONTEXT; ++} ++ ++static inline int __init acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res) ++{ ++ return 0; ++} ++ ++#endif /* CONFIG_ACPI */ ++ + #endif /* _ASM_RISCV_IRQ_H */ +diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h +deleted file mode 100644 +index 6dd1a4809ec1..000000000000 +--- a/arch/riscv/include/asm/kvm_aia_aplic.h ++++ /dev/null +@@ -1,58 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Copyright (C) 2021 Western Digital Corporation or its affiliates. +- * Copyright (C) 2022 Ventana Micro Systems Inc. +- */ +-#ifndef __KVM_RISCV_AIA_IMSIC_H +-#define __KVM_RISCV_AIA_IMSIC_H +- +-#include +- +-#define APLIC_MAX_IDC BIT(14) +-#define APLIC_MAX_SOURCE 1024 +- +-#define APLIC_DOMAINCFG 0x0000 +-#define APLIC_DOMAINCFG_RDONLY 0x80000000 +-#define APLIC_DOMAINCFG_IE BIT(8) +-#define APLIC_DOMAINCFG_DM BIT(2) +-#define APLIC_DOMAINCFG_BE BIT(0) +- +-#define APLIC_SOURCECFG_BASE 0x0004 +-#define APLIC_SOURCECFG_D BIT(10) +-#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +-#define APLIC_SOURCECFG_SM_MASK 0x00000007 +-#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +-#define APLIC_SOURCECFG_SM_DETACH 0x1 +-#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +-#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +-#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +-#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 +- +-#define APLIC_IRQBITS_PER_REG 32 +- +-#define APLIC_SETIP_BASE 0x1c00 +-#define APLIC_SETIPNUM 0x1cdc +- +-#define APLIC_CLRIP_BASE 0x1d00 +-#define APLIC_CLRIPNUM 0x1ddc +- +-#define APLIC_SETIE_BASE 0x1e00 +-#define APLIC_SETIENUM 0x1edc +- +-#define APLIC_CLRIE_BASE 0x1f00 +-#define APLIC_CLRIENUM 0x1fdc +- +-#define APLIC_SETIPNUM_LE 0x2000 +-#define APLIC_SETIPNUM_BE 0x2004 +- +-#define APLIC_GENMSI 0x3000 +- +-#define APLIC_TARGET_BASE 0x3004 +-#define APLIC_TARGET_HART_IDX_SHIFT 18 +-#define APLIC_TARGET_HART_IDX_MASK 0x3fff +-#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +-#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +-#define APLIC_TARGET_IPRIO_MASK 0xff +-#define APLIC_TARGET_EIID_MASK 0x7ff +- +-#endif +diff --git a/arch/riscv/include/asm/kvm_aia_imsic.h b/arch/riscv/include/asm/kvm_aia_imsic.h +deleted file mode 100644 +index da5881d2bde0..000000000000 +--- a/arch/riscv/include/asm/kvm_aia_imsic.h ++++ /dev/null +@@ -1,38 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0-only */ +-/* +- * Copyright (C) 2021 Western Digital Corporation or its affiliates. +- * Copyright (C) 2022 Ventana Micro Systems Inc. +- */ +-#ifndef __KVM_RISCV_AIA_IMSIC_H +-#define __KVM_RISCV_AIA_IMSIC_H +- +-#include +-#include +- +-#define IMSIC_MMIO_PAGE_SHIFT 12 +-#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT) +-#define IMSIC_MMIO_PAGE_LE 0x00 +-#define IMSIC_MMIO_PAGE_BE 0x04 +- +-#define IMSIC_MIN_ID 63 +-#define IMSIC_MAX_ID 2048 +- +-#define IMSIC_EIDELIVERY 0x70 +- +-#define IMSIC_EITHRESHOLD 0x72 +- +-#define IMSIC_EIP0 0x80 +-#define IMSIC_EIP63 0xbf +-#define IMSIC_EIPx_BITS 32 +- +-#define IMSIC_EIE0 0xc0 +-#define IMSIC_EIE63 0xff +-#define IMSIC_EIEx_BITS 32 +- +-#define IMSIC_FIRST IMSIC_EIDELIVERY +-#define IMSIC_LAST IMSIC_EIE63 +- +-#define IMSIC_MMIO_SETIPNUM_LE 0x00 +-#define IMSIC_MMIO_SETIPNUM_BE 0x04 +- +-#endif +diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h +index 6c016ebb5020..47b240d0d596 100644 +--- a/arch/riscv/include/asm/membarrier.h ++++ b/arch/riscv/include/asm/membarrier.h +@@ -22,6 +22,25 @@ static inline void membarrier_arch_switch_mm(struct mm_struct *prev, + /* + * The membarrier system call requires a full memory barrier + * after storing to rq->curr, before going back to user-space. ++ * ++ * This barrier is also needed for the SYNC_CORE command when ++ * switching between processes; in particular, on a transition ++ * from a thread belonging to another mm to a thread belonging ++ * to the mm for which a membarrier SYNC_CORE is done on CPU0: ++ * ++ * - [CPU0] sets all bits in the mm icache_stale_mask (in ++ * prepare_sync_core_cmd()); ++ * ++ * - [CPU1] stores to rq->curr (by the scheduler); ++ * ++ * - [CPU0] loads rq->curr within membarrier and observes ++ * cpu_rq(1)->curr->mm != mm, so the IPI is skipped on ++ * CPU1; this means membarrier relies on switch_mm() to ++ * issue the sync-core; ++ * ++ * - [CPU1] switch_mm() loads icache_stale_mask; if the bit ++ * is zero, switch_mm() may incorrectly skip the sync-core. ++ * + * Matches a full barrier in the proximity of the membarrier + * system call entry. + */ +diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h +index 4c58ee7f95ec..06cadfd7a237 100644 +--- a/arch/riscv/include/asm/mmio.h ++++ b/arch/riscv/include/asm/mmio.h +@@ -12,6 +12,7 @@ + #define _ASM_RISCV_MMIO_H + + #include ++#include + #include + + /* Generic IO read/write. These perform native-endian accesses. */ +@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) + * doesn't define any ordering between the memory space and the I/O space. + */ + #define __io_br() do {} while (0) +-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) ++#define __io_ar(v) RISCV_FENCE(i, ir) ++#define __io_bw() RISCV_FENCE(w, o) + #define __io_aw() mmiowb_set_pending() + + #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h +index 0b2333e71fdc..52ce4a399d9b 100644 +--- a/arch/riscv/include/asm/mmiowb.h ++++ b/arch/riscv/include/asm/mmiowb.h +@@ -7,7 +7,7 @@ + * "o,w" is sufficient to ensure that all writes to the device have completed + * before the write to the spinlock is allowed to commit. + */ +-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); ++#define mmiowb() RISCV_FENCE(o, w) + + #include + #include +diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h +index d169a4f41a2e..deaf971253a2 100644 +--- a/arch/riscv/include/asm/pgalloc.h ++++ b/arch/riscv/include/asm/pgalloc.h +@@ -95,7 +95,19 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) + __pud_free(mm, pud); + } + +-#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) ++static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ++ unsigned long addr) ++{ ++ if (pgtable_l4_enabled) { ++ struct ptdesc *ptdesc = virt_to_ptdesc(pud); ++ ++ pagetable_pud_dtor(ptdesc); ++ if (riscv_use_ipi_for_rfence()) ++ tlb_remove_page_ptdesc(tlb, ptdesc); ++ else ++ tlb_remove_ptdesc(tlb, ptdesc); ++ } ++} + + #define p4d_alloc_one p4d_alloc_one + static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) +@@ -124,7 +136,16 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) + __p4d_free(mm, p4d); + } + +-#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) ++static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, ++ unsigned long addr) ++{ ++ if (pgtable_l5_enabled) { ++ if (riscv_use_ipi_for_rfence()) ++ tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d)); ++ else ++ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); ++ } ++} + #endif /* __PAGETABLE_PMD_FOLDED */ + + static inline void sync_kernel_mappings(pgd_t *pgd) +@@ -149,15 +170,31 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) + + #ifndef __PAGETABLE_PMD_FOLDED + +-#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) ++static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, ++ unsigned long addr) ++{ ++ struct ptdesc *ptdesc = virt_to_ptdesc(pmd); ++ ++ pagetable_pmd_dtor(ptdesc); ++ if (riscv_use_ipi_for_rfence()) ++ tlb_remove_page_ptdesc(tlb, ptdesc); ++ else ++ tlb_remove_ptdesc(tlb, ptdesc); ++} + + #endif /* __PAGETABLE_PMD_FOLDED */ + +-#define __pte_free_tlb(tlb, pte, buf) \ +-do { \ +- pagetable_pte_dtor(page_ptdesc(pte)); \ +- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\ +-} while (0) ++static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, ++ unsigned long addr) ++{ ++ struct ptdesc *ptdesc = page_ptdesc(pte); ++ ++ pagetable_pte_dtor(ptdesc); ++ if (riscv_use_ipi_for_rfence()) ++ tlb_remove_page_ptdesc(tlb, ptdesc); ++ else ++ tlb_remove_ptdesc(tlb, ptdesc); ++} + #endif /* CONFIG_MMU */ + + #endif /* _ASM_RISCV_PGALLOC_H */ +diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h +index 3272ca7a5270..b99bd66107a6 100644 +--- a/arch/riscv/include/asm/pgtable-64.h ++++ b/arch/riscv/include/asm/pgtable-64.h +@@ -126,14 +126,18 @@ enum napot_cont_order { + + /* + * [63:59] T-Head Memory Type definitions: +- * +- * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable ++ * bit[63] SO - Strong Order ++ * bit[62] C - Cacheable ++ * bit[61] B - Bufferable ++ * bit[60] SH - Shareable ++ * bit[59] Sec - Trustable ++ * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable + * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable +- * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable ++ * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable + */ + #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60)) +-#define _PAGE_NOCACHE_THEAD 0UL +-#define _PAGE_IO_THEAD (1UL << 63) ++#define _PAGE_NOCACHE_THEAD ((1UL << 61) | (1UL << 60)) ++#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60)) + #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59)) + + static inline u64 riscv_page_mtmask(void) +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h +index e58315cedfd3..e93155c2c200 100644 +--- a/arch/riscv/include/asm/pgtable.h ++++ b/arch/riscv/include/asm/pgtable.h +@@ -117,6 +117,7 @@ + #include + #include + #include ++#include + + #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) + +@@ -205,7 +206,8 @@ extern struct pt_alloc_ops pt_ops __initdata; + + #define PAGE_TABLE __pgprot(_PAGE_TABLE) + +-#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) ++#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) ++#define _PAGE_IOREMAP_WC ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_NOCACHE) + #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) + + extern pgd_t swapper_pg_dir[]; +@@ -620,6 +622,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) + return __pgprot(prot); + } + ++/* ++ * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By ++ * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in ++ * DT. ++ */ ++#define arch_has_hw_pte_young arch_has_hw_pte_young ++static inline bool arch_has_hw_pte_young(void) ++{ ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU); ++} ++ + /* + * THP functions + */ +@@ -663,6 +676,12 @@ static inline int pmd_write(pmd_t pmd) + return pte_write(pmd_pte(pmd)); + } + ++#define pud_write pud_write ++static inline int pud_write(pud_t pud) ++{ ++ return pte_write(pud_pte(pud)); ++} ++ + static inline int pmd_dirty(pmd_t pmd) + { + return pte_dirty(pmd_pte(pmd)); +diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h +index 4f6af8c6cfa0..0de0e2e29a82 100644 +--- a/arch/riscv/include/asm/processor.h ++++ b/arch/riscv/include/asm/processor.h +@@ -57,6 +57,12 @@ + + #define STACK_TOP DEFAULT_MAP_WINDOW + ++#ifdef CONFIG_MMU ++#define user_max_virt_addr() arch_get_mmap_end(ULONG_MAX, 0, 0) ++#else ++#define user_max_virt_addr() 0 ++#endif /* CONFIG_MMU */ ++ + /* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. +diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h +index 3ed853b8a8c8..8d261a317175 100644 +--- a/arch/riscv/include/asm/sbi.h ++++ b/arch/riscv/include/asm/sbi.h +@@ -29,6 +29,7 @@ enum sbi_ext_id { + SBI_EXT_RFENCE = 0x52464E43, + SBI_EXT_HSM = 0x48534D, + SBI_EXT_SRST = 0x53525354, ++ SBI_EXT_SUSP = 0x53555350, + SBI_EXT_PMU = 0x504D55, + + /* Experimentals extensions must lie within this range */ +@@ -113,6 +114,14 @@ enum sbi_srst_reset_reason { + SBI_SRST_RESET_REASON_SYS_FAILURE, + }; + ++enum sbi_ext_susp_fid { ++ SBI_EXT_SUSP_SYSTEM_SUSPEND = 0, ++}; ++ ++enum sbi_ext_susp_sleep_type { ++ SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0, ++}; ++ + enum sbi_ext_pmu_fid { + SBI_EXT_PMU_NUM_COUNTERS = 0, + SBI_EXT_PMU_COUNTER_GET_INFO, +diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h +index 2f901a410586..87ab782be702 100644 +--- a/arch/riscv/include/asm/sparsemem.h ++++ b/arch/riscv/include/asm/sparsemem.h +@@ -5,7 +5,7 @@ + + #ifdef CONFIG_SPARSEMEM + #ifdef CONFIG_64BIT +-#define MAX_PHYSMEM_BITS 56 ++#define MAX_PHYSMEM_BITS 44 + #else + #define MAX_PHYSMEM_BITS 32 + #endif /* CONFIG_64BIT */ +diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h +index 02f87867389a..4ffb022b097f 100644 +--- a/arch/riscv/include/asm/suspend.h ++++ b/arch/riscv/include/asm/suspend.h +@@ -13,7 +13,7 @@ struct suspend_context { + /* Saved and restored by low-level functions */ + struct pt_regs regs; + /* Saved and restored by high-level functions */ +- unsigned long scratch; ++ unsigned long envcfg; + unsigned long tvec; + unsigned long ie; + #ifdef CONFIG_MMU +@@ -55,4 +55,7 @@ int hibernate_resume_nonboot_cpu_disable(void); + asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp, + unsigned long cpu_resume); + asmlinkage int hibernate_core_restore_code(void); ++bool riscv_sbi_hsm_is_supported(void); ++bool riscv_sbi_suspend_state_is_valid(u32 state); ++int riscv_sbi_hart_suspend(u32 state); + #endif +diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h +index a727be723c56..7508f3ec8063 100644 +--- a/arch/riscv/include/asm/switch_to.h ++++ b/arch/riscv/include/asm/switch_to.h +@@ -9,7 +9,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + #include +@@ -63,6 +63,21 @@ static __always_inline bool has_fpu(void) + return riscv_has_extension_likely(RISCV_ISA_EXT_f) || + riscv_has_extension_likely(RISCV_ISA_EXT_d); + } ++ ++ ++static inline void kernel_fpu_begin(void) ++{ ++ preempt_disable(); ++ fstate_save(current, task_pt_regs(current)); ++ csr_set(CSR_SSTATUS, SR_FS); ++} ++ ++static inline void kernel_fpu_end(void) ++{ ++ csr_clear(CSR_SSTATUS, SR_FS); ++ fstate_restore(current, task_pt_regs(current)); ++ preempt_enable(); ++} + #else + static __always_inline bool has_fpu(void) { return false; } + #define fstate_save(task, regs) do { } while (0) +diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h +new file mode 100644 +index 000000000000..9153016da8f1 +--- /dev/null ++++ b/arch/riscv/include/asm/sync_core.h +@@ -0,0 +1,29 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_RISCV_SYNC_CORE_H ++#define _ASM_RISCV_SYNC_CORE_H ++ ++/* ++ * RISC-V implements return to user-space through an xRET instruction, ++ * which is not core serializing. ++ */ ++static inline void sync_core_before_usermode(void) ++{ ++ asm volatile ("fence.i" ::: "memory"); ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * Ensure the next switch_mm() on every CPU issues a core serializing ++ * instruction for the given @mm. ++ */ ++static inline void prepare_sync_core_cmd(struct mm_struct *mm) ++{ ++ cpumask_setall(&mm->context.icache_stale_mask); ++} ++#else ++static inline void prepare_sync_core_cmd(struct mm_struct *mm) ++{ ++} ++#endif /* CONFIG_SMP */ ++ ++#endif /* _ASM_RISCV_SYNC_CORE_H */ +diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h +index 50b63b5c15bd..1f6c38420d8e 100644 +--- a/arch/riscv/include/asm/tlb.h ++++ b/arch/riscv/include/asm/tlb.h +@@ -10,6 +10,24 @@ struct mmu_gather; + + static void tlb_flush(struct mmu_gather *tlb); + ++#ifdef CONFIG_MMU ++#include ++ ++/* ++ * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to ++ * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use ++ * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this ++ * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the ++ * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h ++ * for more details. ++ */ ++static inline void __tlb_remove_table(void *table) ++{ ++ free_page_and_swap_cache(table); ++} ++ ++#endif /* CONFIG_MMU */ ++ + #define tlb_flush tlb_flush + #include + +diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h +index 96b65a5396df..8f383f05a290 100644 +--- a/arch/riscv/include/asm/vdso/processor.h ++++ b/arch/riscv/include/asm/vdso/processor.h +@@ -5,6 +5,7 @@ + #ifndef __ASSEMBLY__ + + #include ++#include + + static inline void cpu_relax(void) + { +@@ -14,16 +15,11 @@ static inline void cpu_relax(void) + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); + #endif + +-#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ +- __asm__ __volatile__ ("pause"); +-#else +- /* Encoding of the pause instruction */ +- __asm__ __volatile__ (".4byte 0x100000F"); +-#endif ++ __asm__ __volatile__ (RISCV_PAUSE); + barrier(); + } + +diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h +index c5ee07b3df07..be77ae870829 100644 +--- a/arch/riscv/include/asm/vector.h ++++ b/arch/riscv/include/asm/vector.h +@@ -15,7 +15,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -25,7 +25,7 @@ bool riscv_v_first_use_handler(struct pt_regs *regs); + + static __always_inline bool has_vector(void) + { +- return riscv_has_extension_unlikely(RISCV_ISA_EXT_v); ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_ZVE32X); + } + + static inline void __riscv_v_vstate_clean(struct pt_regs *regs) +@@ -79,7 +79,7 @@ static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src + { + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvl x0, %2, %1\n\t" + ".option pop\n\t" + "csrw " __stringify(CSR_VSTART) ", %0\n\t" +@@ -97,7 +97,7 @@ static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to, + __vstate_csr_save(save_to); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vse8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" +@@ -119,7 +119,7 @@ static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_ + riscv_v_enable(); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vle8.v v0, (%1)\n\t" + "add %1, %1, %0\n\t" +@@ -141,7 +141,7 @@ static inline void __riscv_v_vstate_discard(void) + riscv_v_enable(); + asm volatile ( + ".option push\n\t" +- ".option arch, +v\n\t" ++ ".option arch, +zve32x\n\t" + "vsetvli %0, x0, e8, m8, ta, ma\n\t" + "vmv.v.i v0, -1\n\t" + "vmv.v.i v8, -1\n\t" +diff --git a/arch/riscv/include/asm/vendor_extensions.h b/arch/riscv/include/asm/vendor_extensions.h +new file mode 100644 +index 000000000000..0517ce38c5be +--- /dev/null ++++ b/arch/riscv/include/asm/vendor_extensions.h +@@ -0,0 +1,103 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2024 Rivos, Inc ++ */ ++ ++#ifndef _ASM_VENDOR_EXTENSIONS_H ++#define _ASM_VENDOR_EXTENSIONS_H ++ ++#include ++ ++#include ++ ++/* ++ * The extension keys of each vendor must be strictly less than this value. ++ */ ++#define RISCV_ISA_VENDOR_EXT_MAX 32 ++ ++struct riscv_isavendorinfo { ++ DECLARE_BITMAP(isa, RISCV_ISA_VENDOR_EXT_MAX); ++}; ++ ++struct riscv_isa_vendor_ext_data_list { ++ bool is_initialized; ++ const size_t ext_data_count; ++ const struct riscv_isa_ext_data *ext_data; ++ struct riscv_isavendorinfo per_hart_isa_bitmap[NR_CPUS]; ++ struct riscv_isavendorinfo all_harts_isa_bitmap; ++}; ++ ++extern struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[]; ++ ++extern const size_t riscv_isa_vendor_ext_list_size; ++ ++/* ++ * The alternatives need some way of distinguishing between vendor extensions ++ * and errata. Incrementing all of the vendor extension keys so they are at ++ * least 0x8000 accomplishes that. ++ */ ++#define RISCV_VENDOR_EXT_ALTERNATIVES_BASE 0x8000 ++ ++#define VENDOR_EXT_ALL_CPUS -1 ++ ++bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit); ++#define riscv_cpu_isa_vendor_extension_available(cpu, vendor, ext) \ ++ __riscv_isa_vendor_extension_available(cpu, vendor, RISCV_ISA_VENDOR_EXT_##ext) ++#define riscv_isa_vendor_extension_available(vendor, ext) \ ++ __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, \ ++ RISCV_ISA_VENDOR_EXT_##ext) ++ ++static __always_inline bool riscv_has_vendor_extension_likely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_likely(vendor, ++ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ ++ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); ++} ++ ++static __always_inline bool riscv_has_vendor_extension_unlikely(const unsigned long vendor, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) ++ return __riscv_has_extension_unlikely(vendor, ++ ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE); ++ ++ return __riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_vendor_extension_likely(const unsigned long vendor, ++ int cpu, const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_likely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ return true; ++ ++ return __riscv_isa_vendor_extension_available(cpu, vendor, ext); ++} ++ ++static __always_inline bool riscv_cpu_has_vendor_extension_unlikely(const unsigned long vendor, ++ int cpu, ++ const unsigned long ext) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return false; ++ ++ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && ++ __riscv_has_extension_unlikely(vendor, ext + RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ return true; ++ ++ return __riscv_isa_vendor_extension_available(cpu, vendor, ext); ++} ++ ++#endif /* _ASM_VENDOR_EXTENSIONS_H */ +diff --git a/arch/riscv/include/asm/vendor_extensions/andes.h b/arch/riscv/include/asm/vendor_extensions/andes.h +new file mode 100644 +index 000000000000..7bb2fc43438f +--- /dev/null ++++ b/arch/riscv/include/asm/vendor_extensions/andes.h +@@ -0,0 +1,19 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H ++#define _ASM_RISCV_VENDOR_EXTENSIONS_ANDES_H ++ ++#include ++ ++#include ++ ++#define RISCV_ISA_VENDOR_EXT_XANDESPMU 0 ++ ++/* ++ * Extension keys should be strictly less than max. ++ * It is safe to increment this when necessary. ++ */ ++#define RISCV_ISA_VENDOR_EXT_MAX_ANDES 32 ++ ++extern struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes; ++ ++#endif +diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h +index e55407ace0c3..2f2bb0c84f9a 100644 +--- a/arch/riscv/include/asm/vendorid_list.h ++++ b/arch/riscv/include/asm/vendorid_list.h +@@ -5,7 +5,7 @@ + #ifndef ASM_VENDOR_LIST_H + #define ASM_VENDOR_LIST_H + +-#define ANDESTECH_VENDOR_ID 0x31e ++#define ANDES_VENDOR_ID 0x31e + #define SIFIVE_VENDOR_ID 0x489 + #define THEAD_VENDOR_ID 0x5b7 + +diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h +index 006bfb48343d..6fdaefa62e14 100644 +--- a/arch/riscv/include/uapi/asm/hwprobe.h ++++ b/arch/riscv/include/uapi/asm/hwprobe.h +@@ -10,7 +10,7 @@ + + /* + * Interface for probing hardware capabilities from userspace, see +- * Documentation/riscv/hwprobe.rst for more information. ++ * Documentation/arch/riscv/hwprobe.rst for more information. + */ + struct riscv_hwprobe { + __s64 key; +@@ -29,6 +29,50 @@ struct riscv_hwprobe { + #define RISCV_HWPROBE_EXT_ZBA (1 << 3) + #define RISCV_HWPROBE_EXT_ZBB (1 << 4) + #define RISCV_HWPROBE_EXT_ZBS (1 << 5) ++#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) ++#define RISCV_HWPROBE_EXT_ZBC (1 << 7) ++#define RISCV_HWPROBE_EXT_ZBKB (1 << 8) ++#define RISCV_HWPROBE_EXT_ZBKC (1 << 9) ++#define RISCV_HWPROBE_EXT_ZBKX (1 << 10) ++#define RISCV_HWPROBE_EXT_ZKND (1 << 11) ++#define RISCV_HWPROBE_EXT_ZKNE (1 << 12) ++#define RISCV_HWPROBE_EXT_ZKNH (1 << 13) ++#define RISCV_HWPROBE_EXT_ZKSED (1 << 14) ++#define RISCV_HWPROBE_EXT_ZKSH (1 << 15) ++#define RISCV_HWPROBE_EXT_ZKT (1 << 16) ++#define RISCV_HWPROBE_EXT_ZVBB (1 << 17) ++#define RISCV_HWPROBE_EXT_ZVBC (1 << 18) ++#define RISCV_HWPROBE_EXT_ZVKB (1 << 19) ++#define RISCV_HWPROBE_EXT_ZVKG (1 << 20) ++#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21) ++#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22) ++#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23) ++#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24) ++#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25) ++#define RISCV_HWPROBE_EXT_ZVKT (1 << 26) ++#define RISCV_HWPROBE_EXT_ZFH (1 << 27) ++#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) ++#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) ++#define RISCV_HWPROBE_EXT_ZVFH (1 << 30) ++#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) ++#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) ++#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) ++#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) ++#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) ++#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36) ++#define RISCV_HWPROBE_EXT_ZVE32X (1ULL << 37) ++#define RISCV_HWPROBE_EXT_ZVE32F (1ULL << 38) ++#define RISCV_HWPROBE_EXT_ZVE64X (1ULL << 39) ++#define RISCV_HWPROBE_EXT_ZVE64F (1ULL << 40) ++#define RISCV_HWPROBE_EXT_ZVE64D (1ULL << 41) ++#define RISCV_HWPROBE_EXT_ZIMOP (1ULL << 42) ++#define RISCV_HWPROBE_EXT_ZCA (1ULL << 43) ++#define RISCV_HWPROBE_EXT_ZCB (1ULL << 44) ++#define RISCV_HWPROBE_EXT_ZCD (1ULL << 45) ++#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) ++#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) ++#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) ++#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) + #define RISCV_HWPROBE_KEY_CPUPERF_0 5 + #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) + #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) +@@ -36,6 +80,12 @@ struct riscv_hwprobe { + #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0) + #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) + #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) ++#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 ++#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 ++#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 + /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ + ++/* Flags */ ++#define RISCV_HWPROBE_WHICH_CPUS (1 << 0) ++ + #endif +diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile +index a2499fcc1cf3..70d04e1875f0 100644 +--- a/arch/riscv/kernel/Makefile ++++ b/arch/riscv/kernel/Makefile +@@ -52,12 +52,15 @@ obj-y += setup.o + obj-y += signal.o + obj-y += syscall_table.o + obj-y += sys_riscv.o ++obj-y += sys_hwprobe.o + obj-y += time.o + obj-y += traps.o + obj-y += riscv_ksyms.o + obj-y += stacktrace.o + obj-y += cacheinfo.o + obj-y += patch.o ++obj-y += vendor_extensions.o ++obj-y += vendor_extensions/ + obj-y += probes/ + obj-$(CONFIG_MMU) += vdso.o vdso/ + +@@ -104,3 +107,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/ + obj-$(CONFIG_64BIT) += pi/ + obj-$(CONFIG_ACPI) += acpi.o + obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o ++obj-$(CONFIG_ACPI_NUMA) += acpi_numa.o +diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c +index 07a43843368d..2fd29695a788 100644 +--- a/arch/riscv/kernel/acpi.c ++++ b/arch/riscv/kernel/acpi.c +@@ -14,9 +14,12 @@ + */ + + #include ++#include + #include ++#include ++#include + #include +-#include ++#include + + int acpi_noirq = 1; /* skip ACPI IRQ initialization */ + int acpi_disabled = 1; +@@ -130,7 +133,7 @@ void __init acpi_boot_table_init(void) + if (param_acpi_off || + (!param_acpi_on && !param_acpi_force && + efi.acpi20 == EFI_INVALID_TABLE_ADDR)) +- return; ++ goto done; + + /* + * ACPI is disabled at this point. Enable it in order to parse +@@ -150,6 +153,14 @@ void __init acpi_boot_table_init(void) + if (!param_acpi_force) + disable_acpi(); + } ++ ++done: ++ if (acpi_disabled) { ++ if (earlycon_acpi_spcr_enable) ++ early_init_dt_scan_chosen_stdout(); ++ } else { ++ acpi_parse_spcr(earlycon_acpi_spcr_enable, true); ++ } + } + + static int acpi_parse_madt_rintc(union acpi_subtable_headers *header, const unsigned long end) +@@ -190,11 +201,6 @@ struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) + return &cpu_madt_rintc[cpu]; + } + +-u32 get_acpi_id_for_cpu(int cpu) +-{ +- return acpi_cpu_get_madt_rintc(cpu)->uid; +-} +- + /* + * __acpi_map_table() will be called before paging_init(), so early_ioremap() + * or early_memremap() should be called here to for ACPI table mapping. +@@ -217,35 +223,114 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size) + + void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) + { +- return (void __iomem *)memremap(phys, size, MEMREMAP_WB); ++ efi_memory_desc_t *md, *region = NULL; ++ pgprot_t prot; ++ ++ if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP))) ++ return NULL; ++ ++ for_each_efi_memory_desc(md) { ++ u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); ++ ++ if (phys < md->phys_addr || phys >= end) ++ continue; ++ ++ if (phys + size > end) { ++ pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n"); ++ return NULL; ++ } ++ region = md; ++ break; ++ } ++ ++ /* ++ * It is fine for AML to remap regions that are not represented in the ++ * EFI memory map at all, as it only describes normal memory, and MMIO ++ * regions that require a virtual mapping to make them accessible to ++ * the EFI runtime services. ++ */ ++ prot = PAGE_KERNEL_IO; ++ if (region) { ++ switch (region->type) { ++ case EFI_LOADER_CODE: ++ case EFI_LOADER_DATA: ++ case EFI_BOOT_SERVICES_CODE: ++ case EFI_BOOT_SERVICES_DATA: ++ case EFI_CONVENTIONAL_MEMORY: ++ case EFI_PERSISTENT_MEMORY: ++ if (memblock_is_map_memory(phys) || ++ !memblock_is_region_memory(phys, size)) { ++ pr_warn(FW_BUG "requested region covers kernel memory\n"); ++ return NULL; ++ } ++ ++ /* ++ * Mapping kernel memory is permitted if the region in ++ * question is covered by a single memblock with the ++ * NOMAP attribute set: this enables the use of ACPI ++ * table overrides passed via initramfs. ++ * This particular use case only requires read access. ++ */ ++ fallthrough; ++ ++ case EFI_RUNTIME_SERVICES_CODE: ++ /* ++ * This would be unusual, but not problematic per se, ++ * as long as we take care not to create a writable ++ * mapping for executable code. ++ */ ++ prot = PAGE_KERNEL_RO; ++ break; ++ ++ case EFI_ACPI_RECLAIM_MEMORY: ++ /* ++ * ACPI reclaim memory is used to pass firmware tables ++ * and other data that is intended for consumption by ++ * the OS only, which may decide it wants to reclaim ++ * that memory and use it for something else. We never ++ * do that, but we usually add it to the linear map ++ * anyway, in which case we should use the existing ++ * mapping. ++ */ ++ if (memblock_is_map_memory(phys)) ++ return (void __iomem *)__va(phys); ++ fallthrough; ++ ++ default: ++ if (region->attribute & EFI_MEMORY_WB) ++ prot = PAGE_KERNEL; ++ else if ((region->attribute & EFI_MEMORY_WC) || ++ (region->attribute & EFI_MEMORY_WT)) ++ prot = pgprot_writecombine(PAGE_KERNEL); ++ } ++ } ++ ++ return ioremap_prot(phys, size, pgprot_val(prot)); + } + + #ifdef CONFIG_PCI + + /* +- * These interfaces are defined just to enable building ACPI core. +- * TODO: Update it with actual implementation when external interrupt +- * controller support is added in RISC-V ACPI. ++ * raw_pci_read/write - Platform-specific PCI config space access. + */ +-int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, +- int reg, int len, u32 *val) ++int raw_pci_read(unsigned int domain, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 *val) + { +- return PCIBIOS_DEVICE_NOT_FOUND; +-} ++ struct pci_bus *b = pci_find_bus(domain, bus); + +-int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, +- int reg, int len, u32 val) +-{ +- return PCIBIOS_DEVICE_NOT_FOUND; ++ if (!b) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ return b->ops->read(b, devfn, reg, len, val); + } + +-int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) ++int raw_pci_write(unsigned int domain, unsigned int bus, ++ unsigned int devfn, int reg, int len, u32 val) + { +- return -1; +-} ++ struct pci_bus *b = pci_find_bus(domain, bus); + +-struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +-{ +- return NULL; ++ if (!b) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ return b->ops->write(b, devfn, reg, len, val); + } ++ + #endif /* CONFIG_PCI */ +diff --git a/arch/riscv/kernel/acpi_numa.c b/arch/riscv/kernel/acpi_numa.c +new file mode 100644 +index 000000000000..1a97cbdafd01 +--- /dev/null ++++ b/arch/riscv/kernel/acpi_numa.c +@@ -0,0 +1,130 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * ACPI 6.6 based NUMA setup for RISCV ++ * Lots of code was borrowed from arch/arm64/kernel/acpi_numa.c ++ * ++ * Copyright 2004 Andi Kleen, SuSE Labs. ++ * Copyright (C) 2013-2016, Linaro Ltd. ++ * Author: Hanjun Guo ++ * Copyright (C) 2024 Intel Corporation. ++ * ++ * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs. ++ * ++ * Called from acpi_numa_init while reading the SRAT and SLIT tables. ++ * Assumes all memory regions belonging to a single proximity domain ++ * are in one chunk. Holes between them will be included in the node. ++ */ ++ ++#define pr_fmt(fmt) "ACPI: NUMA: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++static int acpi_early_node_map[NR_CPUS] __initdata = { NUMA_NO_NODE }; ++ ++int __init acpi_numa_get_nid(unsigned int cpu) ++{ ++ return acpi_early_node_map[cpu]; ++} ++ ++static inline int get_cpu_for_acpi_id(u32 uid) ++{ ++ int cpu; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) ++ if (uid == get_acpi_id_for_cpu(cpu)) ++ return cpu; ++ ++ return -EINVAL; ++} ++ ++static int __init acpi_parse_rintc_pxm(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_rintc_affinity *pa; ++ int cpu, pxm, node; ++ ++ if (srat_disabled()) ++ return -EINVAL; ++ pa = (struct acpi_srat_rintc_affinity *)header; ++ if (!pa) ++ return -EINVAL; ++ ++ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED)) ++ return 0; ++ ++ pxm = pa->proximity_domain; ++ node = pxm_to_node(pxm); ++ ++ /* ++ * If we can't map the UID to a logical cpu this ++ * means that the UID is not part of possible cpus ++ * so we do not need a NUMA mapping for it, skip ++ * the SRAT entry and keep parsing. ++ */ ++ cpu = get_cpu_for_acpi_id(pa->acpi_processor_uid); ++ if (cpu < 0) ++ return 0; ++ ++ acpi_early_node_map[cpu] = node; ++ pr_info("SRAT: PXM %d -> HARTID 0x%lx -> Node %d\n", pxm, ++ cpuid_to_hartid_map(cpu), node); ++ ++ return 0; ++} ++ ++void __init acpi_map_cpus_to_nodes(void) ++{ ++ int i; ++ ++ /* ++ * In ACPI, SMP and CPU NUMA information is provided in separate ++ * static tables, namely the MADT and the SRAT. ++ * ++ * Thus, it is simpler to first create the cpu logical map through ++ * an MADT walk and then map the logical cpus to their node ids ++ * as separate steps. ++ */ ++ acpi_table_parse_entries(ACPI_SIG_SRAT, sizeof(struct acpi_table_srat), ++ ACPI_SRAT_TYPE_RINTC_AFFINITY, acpi_parse_rintc_pxm, 0); ++ ++ for (i = 0; i < nr_cpu_ids; i++) ++ early_map_cpu_to_node(i, acpi_numa_get_nid(i)); ++} ++ ++/* Callback for Proximity Domain -> logical node ID mapping */ ++void __init acpi_numa_rintc_affinity_init(struct acpi_srat_rintc_affinity *pa) ++{ ++ int pxm, node; ++ ++ if (srat_disabled()) ++ return; ++ ++ if (pa->header.length < sizeof(struct acpi_srat_rintc_affinity)) { ++ pr_err("SRAT: Invalid SRAT header length: %d\n", pa->header.length); ++ bad_srat(); ++ return; ++ } ++ ++ if (!(pa->flags & ACPI_SRAT_RINTC_ENABLED)) ++ return; ++ ++ pxm = pa->proximity_domain; ++ node = acpi_map_pxm_to_node(pxm); ++ ++ if (node == NUMA_NO_NODE) { ++ pr_err("SRAT: Too many proximity domains %d\n", pxm); ++ bad_srat(); ++ return; ++ } ++ ++ node_set(node, numa_nodes_parsed); ++} +diff --git a/arch/riscv/kernel/alternative.c b/arch/riscv/kernel/alternative.c +index 319a1da0358b..0128b161bfda 100644 +--- a/arch/riscv/kernel/alternative.c ++++ b/arch/riscv/kernel/alternative.c +@@ -43,7 +43,7 @@ static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info + + switch (cpu_mfr_info->vendor_id) { + #ifdef CONFIG_ERRATA_ANDES +- case ANDESTECH_VENDOR_ID: ++ case ANDES_VENDOR_ID: + cpu_mfr_info->patch_func = andes_errata_patch_func; + break; + #endif +diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c +index bb5fb2b820a2..820f579e4581 100644 +--- a/arch/riscv/kernel/cpufeature.c ++++ b/arch/riscv/kernel/cpufeature.c +@@ -23,6 +23,7 @@ + #include + #include + #include ++#include + + #include "copy-unaligned.h" + +@@ -32,6 +33,8 @@ + #define MISALIGNED_BUFFER_SIZE 0x4000 + #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) + ++static bool any_cpu_has_zicboz; ++ + unsigned long elf_hwcap __read_mostly; + + /* Host ISA bitmap */ +@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); + * + * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. + */ +-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) ++bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit) + { + const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; + +@@ -80,37 +83,204 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) + } + EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); + +-static bool riscv_isa_extension_check(int id) ++static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) + { +- switch (id) { +- case RISCV_ISA_EXT_ZICBOM: +- if (!riscv_cbom_block_size) { +- pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); +- return false; +- } else if (!is_power_of_2(riscv_cbom_block_size)) { +- pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); +- return false; +- } +- return true; +- case RISCV_ISA_EXT_ZICBOZ: +- if (!riscv_cboz_block_size) { +- pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n"); +- return false; +- } else if (!is_power_of_2(riscv_cboz_block_size)) { +- pr_err("cboz-block-size present, but is not a power-of-2\n"); +- return false; +- } +- return true; ++ if (!riscv_cbom_block_size) { ++ pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cbom_block_size)) { ++ pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; + } ++ return 0; ++} + +- return true; ++static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (!riscv_cboz_block_size) { ++ pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cboz_block_size)) { ++ pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; ++ } ++ any_cpu_has_zicboz = true; ++ return 0; + } + +-#define __RISCV_ISA_EXT_DATA(_name, _id) { \ +- .name = #_name, \ +- .property = #_name, \ +- .id = _id, \ ++static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA)) ++ return 0; ++ ++ return -EPROBE_DEFER; + } ++static int riscv_ext_zcd_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (IS_ENABLED(CONFIG_64BIT)) ++ return -EINVAL; ++ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ /* SVADE has already been detected, use SVADE only */ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_SVADE)) ++ return -EOPNOTSUPP; ++ ++ return 0; ++} ++ ++static const unsigned int riscv_zk_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZBKX, ++ RISCV_ISA_EXT_ZKND, ++ RISCV_ISA_EXT_ZKNE, ++ RISCV_ISA_EXT_ZKR, ++ RISCV_ISA_EXT_ZKT, ++}; ++ ++static const unsigned int riscv_zkn_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZBKX, ++ RISCV_ISA_EXT_ZKND, ++ RISCV_ISA_EXT_ZKNE, ++ RISCV_ISA_EXT_ZKNH, ++}; ++ ++static const unsigned int riscv_zks_bundled_exts[] = { ++ RISCV_ISA_EXT_ZBKB, ++ RISCV_ISA_EXT_ZBKC, ++ RISCV_ISA_EXT_ZKSED, ++ RISCV_ISA_EXT_ZKSH ++}; ++ ++#define RISCV_ISA_EXT_ZVKN \ ++ RISCV_ISA_EXT_ZVKNED, \ ++ RISCV_ISA_EXT_ZVKNHB, \ ++ RISCV_ISA_EXT_ZVKB, \ ++ RISCV_ISA_EXT_ZVKT ++ ++static const unsigned int riscv_zvkn_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN ++}; ++ ++static const unsigned int riscv_zvknc_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN, ++ RISCV_ISA_EXT_ZVBC ++}; ++ ++static const unsigned int riscv_zvkng_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKN, ++ RISCV_ISA_EXT_ZVKG ++}; ++ ++#define RISCV_ISA_EXT_ZVKS \ ++ RISCV_ISA_EXT_ZVKSED, \ ++ RISCV_ISA_EXT_ZVKSH, \ ++ RISCV_ISA_EXT_ZVKB, \ ++ RISCV_ISA_EXT_ZVKT ++ ++static const unsigned int riscv_zvks_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS ++}; ++ ++static const unsigned int riscv_zvksc_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS, ++ RISCV_ISA_EXT_ZVBC ++}; ++ ++static const unsigned int riscv_zvksg_bundled_exts[] = { ++ RISCV_ISA_EXT_ZVKS, ++ RISCV_ISA_EXT_ZVKG ++}; ++ ++static const unsigned int riscv_zvbb_exts[] = { ++ RISCV_ISA_EXT_ZVKB ++}; ++ ++#define RISCV_ISA_EXT_ZVE64F_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64X, \ ++ RISCV_ISA_EXT_ZVE32F, \ ++ RISCV_ISA_EXT_ZVE32X ++ ++#define RISCV_ISA_EXT_ZVE64D_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64F, \ ++ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST ++ ++#define RISCV_ISA_EXT_V_IMPLY_LIST \ ++ RISCV_ISA_EXT_ZVE64D, \ ++ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST ++ ++static const unsigned int riscv_zve32f_exts[] = { ++ RISCV_ISA_EXT_ZVE32X ++}; ++ ++static const unsigned int riscv_zve64f_exts[] = { ++ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_zve64d_exts[] = { ++ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_v_exts[] = { ++ RISCV_ISA_EXT_V_IMPLY_LIST ++}; ++ ++static const unsigned int riscv_zve64x_exts[] = { ++ RISCV_ISA_EXT_ZVE32X, ++ RISCV_ISA_EXT_ZVE64X ++}; ++ ++/* ++ * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V ++ * privileged ISA, the existence of the CSRs is implied by any extension which ++ * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the ++ * existence of the CSR, and treat it as a subset of those other extensions. ++ */ ++static const unsigned int riscv_xlinuxenvcfg_exts[] = { ++ RISCV_ISA_EXT_XLINUXENVCFG ++}; ++ ++/* ++ * Zc* spec states that: ++ * - C always implies Zca ++ * - C+F implies Zcf (RV32 only) ++ * - C+D implies Zcd ++ * ++ * These extensions will be enabled and then validated depending on the ++ * availability of F/D RV32. ++ */ ++static const unsigned int riscv_c_exts[] = { ++ RISCV_ISA_EXT_ZCA, ++ RISCV_ISA_EXT_ZCF, ++ RISCV_ISA_EXT_ZCD, ++}; + + /* + * The canonical order of ISA extension names in the ISA string is defined in +@@ -158,36 +328,177 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { + __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f), + __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), + __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), +- __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c), +- __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b), +- __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k), +- __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j), +- __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p), +- __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v), ++ __RISCV_ISA_EXT_SUPERSET(c, RISCV_ISA_EXT_c, riscv_c_exts), ++ __RISCV_ISA_EXT_SUPERSET(v, RISCV_ISA_EXT_v, riscv_v_exts), + __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h), +- __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), +- __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), ++ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, ++ riscv_ext_zicbom_validate), ++ __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, ++ riscv_ext_zicboz_validate), ++ __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), + __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), ++ __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), + __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), + __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI), ++ __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL), + __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), + __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), ++ __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), ++ __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), ++ __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), ++ __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), ++ __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), ++ __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), ++ __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), ++ __RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcb, RISCV_ISA_EXT_ZCB, riscv_ext_zca_depends), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcd, RISCV_ISA_EXT_ZCD, riscv_ext_zcd_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcf, RISCV_ISA_EXT_ZCF, riscv_ext_zcf_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zcmop, RISCV_ISA_EXT_ZCMOP, riscv_ext_zca_depends), + __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA), + __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), ++ __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC), ++ __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB), ++ __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC), ++ __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX), + __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS), ++ __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND), ++ __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE), ++ __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH), ++ __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR), ++ __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT), ++ __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED), ++ __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH), ++ __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO), ++ __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts), ++ __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC), ++ __RISCV_ISA_EXT_SUPERSET(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts), ++ __RISCV_ISA_EXT_DATA(zve32x, RISCV_ISA_EXT_ZVE32X), ++ __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts), ++ __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts), ++ __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts), ++ __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), ++ __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), ++ __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), ++ __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG), ++ __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED), ++ __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA), ++ __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB), ++ __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts), ++ __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED), ++ __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH), ++ __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), ++ __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), + __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), ++ __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), ++ __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), ++ __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), + __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), + __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), ++ __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), + __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), ++ __RISCV_ISA_EXT_DATA(svade, RISCV_ISA_EXT_SVADE), ++ __RISCV_ISA_EXT_DATA_VALIDATE(svadu, RISCV_ISA_EXT_SVADU, riscv_ext_svadu_validate), + __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), + __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), + __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), ++ __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC), + }; + + const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); + +-static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo, +- unsigned long *isa2hwcap, const char *isa) ++static void riscv_isa_set_ext(const struct riscv_isa_ext_data *ext, unsigned long *bitmap) ++{ ++ if (ext->id != RISCV_ISA_EXT_INVALID) ++ set_bit(ext->id, bitmap); ++ ++ for (int i = 0; i < ext->subset_ext_size; i++) { ++ if (ext->subset_ext_ids[i] != RISCV_ISA_EXT_INVALID) ++ set_bit(ext->subset_ext_ids[i], bitmap); ++ } ++} ++ ++static const struct riscv_isa_ext_data *riscv_get_isa_ext_data(unsigned int ext_id) ++{ ++ for (int i = 0; i < riscv_isa_ext_count; i++) { ++ if (riscv_isa_ext[i].id == ext_id) ++ return &riscv_isa_ext[i]; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * "Resolve" a source ISA bitmap into one that matches kernel configuration as ++ * well as correct extension dependencies. Some extensions depends on specific ++ * kernel configuration to be usable (V needs CONFIG_RISCV_ISA_V for instance) ++ * and this function will actually validate all the extensions provided in ++ * source_isa into the resolved_isa based on extensions validate() callbacks. ++ */ ++static void __init riscv_resolve_isa(unsigned long *source_isa, ++ unsigned long *resolved_isa, unsigned long *this_hwcap, ++ unsigned long *isa2hwcap) ++{ ++ bool loop; ++ const struct riscv_isa_ext_data *ext; ++ DECLARE_BITMAP(prev_resolved_isa, RISCV_ISA_EXT_MAX); ++ int max_loop_count = riscv_isa_ext_count, ret; ++ unsigned int bit; ++ ++ do { ++ loop = false; ++ if (max_loop_count-- < 0) { ++ pr_err("Failed to reach a stable ISA state\n"); ++ return; ++ } ++ bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX); ++ for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) { ++ ext = riscv_get_isa_ext_data(bit); ++ ++ if (ext && ext->validate) { ++ ret = ext->validate(ext, resolved_isa); ++ if (ret == -EPROBE_DEFER) { ++ loop = true; ++ continue; ++ } else if (ret) { ++ /* Disable the extension entirely */ ++ clear_bit(bit, source_isa); ++ continue; ++ } ++ } ++ ++ set_bit(bit, resolved_isa); ++ /* No need to keep it in source isa now that it is enabled */ ++ clear_bit(bit, source_isa); ++ ++ /* Single letter extensions get set in hwcap */ ++ if (bit < RISCV_ISA_EXT_BASE) ++ *this_hwcap |= isa2hwcap[bit]; ++ } ++ } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); ++} ++ ++static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) ++{ ++ for (int i = 0; i < riscv_isa_ext_count; i++) { ++ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; ++ ++ if ((name_end - name == strlen(ext->name)) && ++ !strncasecmp(name, ext->name, name_end - name)) { ++ riscv_isa_set_ext(ext, bitmap); ++ break; ++ } ++ } ++} ++ ++static void __init riscv_parse_isa_string(const char *isa, unsigned long *bitmap) + { + /* + * For all possible cpus, we have already validated in +@@ -200,15 +511,31 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + while (*isa) { + const char *ext = isa++; + const char *ext_end = isa; +- bool ext_long = false, ext_err = false; ++ bool ext_err = false; + + switch (*ext) { ++ case 'x': ++ case 'X': ++ if (acpi_disabled) ++ pr_warn_once("Vendor extensions are ignored in riscv,isa. Use riscv,isa-extensions instead."); ++ /* ++ * To skip an extension, we find its end. ++ * As multi-letter extensions must be split from other multi-letter ++ * extensions with an "_", the end of a multi-letter extension will ++ * either be the null character or the "_" at the start of the next ++ * multi-letter extension. ++ */ ++ for (; *isa && *isa != '_'; ++isa) ++ ; ++ ext_err = true; ++ break; + case 's': + /* +- * Workaround for invalid single-letter 's' & 'u'(QEMU). ++ * Workaround for invalid single-letter 's' & 'u' (QEMU). + * No need to set the bit in riscv_isa as 's' & 'u' are +- * not valid ISA extensions. It works until multi-letter +- * extension starting with "Su" appears. ++ * not valid ISA extensions. It works unless the first ++ * multi-letter extension in the ISA string begins with ++ * "Su" and is not prefixed with an underscore. + */ + if (ext[-1] != '_' && ext[1] == 'u') { + ++isa; +@@ -217,8 +544,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + } + fallthrough; + case 'S': +- case 'x': +- case 'X': + case 'z': + case 'Z': + /* +@@ -239,7 +564,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + * character itself while eliminating the extensions version number. + * A simple re-increment solves this problem. + */ +- ext_long = true; + for (; *isa && *isa != '_'; ++isa) + if (unlikely(!isalnum(*isa))) + ext_err = true; +@@ -317,29 +641,10 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc + if (*isa == '_') + ++isa; + +-#define SET_ISA_EXT_MAP(name, bit) \ +- do { \ +- if ((ext_end - ext == strlen(name)) && \ +- !strncasecmp(ext, name, strlen(name)) && \ +- riscv_isa_extension_check(bit)) \ +- set_bit(bit, isainfo->isa); \ +- } while (false) \ +- + if (unlikely(ext_err)) + continue; +- if (!ext_long) { +- int nr = tolower(*ext) - 'a'; + +- if (riscv_isa_extension_check(nr)) { +- *this_hwcap |= isa2hwcap[nr]; +- set_bit(nr, isainfo->isa); +- } +- } else { +- for (int i = 0; i < riscv_isa_ext_count; i++) +- SET_ISA_EXT_MAP(riscv_isa_ext[i].name, +- riscv_isa_ext[i].id); +- } +-#undef SET_ISA_EXT_MAP ++ match_isa_ext(ext, ext_end, bitmap); + } + } + +@@ -366,6 +671,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + for_each_possible_cpu(cpu) { + struct riscv_isainfo *isainfo = &hart_isa[cpu]; + unsigned long this_hwcap = 0; ++ DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; + + if (acpi_disabled) { + node = of_cpu_device_node_get(cpu); +@@ -388,7 +694,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + } + } + +- riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa); ++ riscv_parse_isa_string(isa, source_isa); + + /* + * These ones were as they were part of the base ISA when the +@@ -396,10 +702,10 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + * unconditionally where `i` is in riscv,isa on DT systems. + */ + if (acpi_disabled) { +- set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa); +- set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa); ++ set_bit(RISCV_ISA_EXT_ZICSR, source_isa); ++ set_bit(RISCV_ISA_EXT_ZIFENCEI, source_isa); ++ set_bit(RISCV_ISA_EXT_ZICNTR, source_isa); ++ set_bit(RISCV_ISA_EXT_ZIHPM, source_isa); + } + + /* +@@ -412,9 +718,11 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + */ + if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) { + this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v]; +- clear_bit(RISCV_ISA_EXT_v, isainfo->isa); ++ clear_bit(RISCV_ISA_EXT_v, source_isa); + } + ++ riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); ++ + /* + * All "okay" hart should have same isa. Set HWCAP based on + * common capabilities of every "okay" hart, in case they don't +@@ -435,6 +743,61 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) + acpi_put_table((struct acpi_table_header *)rhct); + } + ++static void __init riscv_fill_cpu_vendor_ext(struct device_node *cpu_node, int cpu) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return; ++ ++ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { ++ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; ++ ++ for (int j = 0; j < ext_list->ext_data_count; j++) { ++ const struct riscv_isa_ext_data ext = ext_list->ext_data[j]; ++ struct riscv_isavendorinfo *isavendorinfo = &ext_list->per_hart_isa_bitmap[cpu]; ++ ++ if (of_property_match_string(cpu_node, "riscv,isa-extensions", ++ ext.property) < 0) ++ continue; ++ ++ /* ++ * Assume that subset extensions are all members of the ++ * same vendor. ++ */ ++ if (ext.subset_ext_size) ++ for (int k = 0; k < ext.subset_ext_size; k++) ++ set_bit(ext.subset_ext_ids[k], isavendorinfo->isa); ++ ++ set_bit(ext.id, isavendorinfo->isa); ++ } ++ } ++} ++ ++/* ++ * Populate all_harts_isa_bitmap for each vendor with all of the extensions that ++ * are shared across CPUs for that vendor. ++ */ ++static void __init riscv_fill_vendor_ext_list(int cpu) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) ++ return; ++ ++ for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { ++ struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; ++ ++ if (!ext_list->is_initialized) { ++ bitmap_copy(ext_list->all_harts_isa_bitmap.isa, ++ ext_list->per_hart_isa_bitmap[cpu].isa, ++ RISCV_ISA_VENDOR_EXT_MAX); ++ ext_list->is_initialized = true; ++ } else { ++ bitmap_and(ext_list->all_harts_isa_bitmap.isa, ++ ext_list->all_harts_isa_bitmap.isa, ++ ext_list->per_hart_isa_bitmap[cpu].isa, ++ RISCV_ISA_VENDOR_EXT_MAX); ++ } ++ } ++} ++ + static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + { + unsigned int cpu; +@@ -443,6 +806,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + unsigned long this_hwcap = 0; + struct device_node *cpu_node; + struct riscv_isainfo *isainfo = &hart_isa[cpu]; ++ DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) { +@@ -456,20 +820,18 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + } + + for (int i = 0; i < riscv_isa_ext_count; i++) { +- if (of_property_match_string(cpu_node, "riscv,isa-extensions", +- riscv_isa_ext[i].property) < 0) +- continue; ++ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; + +- if (!riscv_isa_extension_check(riscv_isa_ext[i].id)) ++ if (of_property_match_string(cpu_node, "riscv,isa-extensions", ++ ext->property) < 0) + continue; + +- /* Only single letter extensions get set in hwcap */ +- if (strnlen(riscv_isa_ext[i].name, 2) == 1) +- this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; +- +- set_bit(riscv_isa_ext[i].id, isainfo->isa); ++ riscv_isa_set_ext(ext, source_isa); + } + ++ riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); ++ riscv_fill_cpu_vendor_ext(cpu_node, cpu); ++ + of_node_put(cpu_node); + + /* +@@ -485,6 +847,8 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) + bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); + else + bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); ++ ++ riscv_fill_vendor_ext_list(cpu); + } + + if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) +@@ -539,8 +903,14 @@ void __init riscv_fill_hwcap(void) + elf_hwcap &= ~COMPAT_HWCAP_ISA_F; + } + +- if (elf_hwcap & COMPAT_HWCAP_ISA_V) { ++ if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) { ++ /* ++ * This cannot fail when called on the boot hart ++ */ + riscv_v_setup_vsize(); ++ } ++ ++ if (elf_hwcap & COMPAT_HWCAP_ISA_V) { + /* + * ISA string in device tree might have 'v' flag, but + * CONFIG_RISCV_ISA_V is disabled in kernel. +@@ -668,7 +1038,7 @@ void check_unaligned_access(int cpu) + __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); + } + +-static int check_unaligned_access_boot_cpu(void) ++static int __init check_unaligned_access_boot_cpu(void) + { + check_unaligned_access(0); + return 0; +@@ -676,6 +1046,14 @@ static int check_unaligned_access_boot_cpu(void) + + arch_initcall(check_unaligned_access_boot_cpu); + ++void __init riscv_user_isa_enable(void) ++{ ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) ++ csr_set(CSR_ENVCFG, ENVCFG_CBZE); ++ else if (any_cpu_has_zicboz) ++ pr_warn("Zicboz disabled as it is unavailable on some harts\n"); ++} ++ + #ifdef CONFIG_RISCV_ALTERNATIVE + /* + * Alternative patch sites consider 48 bits when determining when to patch +@@ -716,28 +1094,45 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, + { + struct alt_entry *alt; + void *oldptr, *altptr; +- u16 id, value; ++ u16 id, value, vendor; + + if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) + return; + + for (alt = begin; alt < end; alt++) { +- if (alt->vendor_id != 0) +- continue; +- + id = PATCH_ID_CPUFEATURE_ID(alt->patch_id); ++ vendor = PATCH_ID_CPUFEATURE_ID(alt->vendor_id); + +- if (id >= RISCV_ISA_EXT_MAX) { +- WARN(1, "This extension id:%d is not in ISA extension list", id); +- continue; +- } ++ /* ++ * Any alternative with a patch_id that is less than ++ * RISCV_ISA_EXT_MAX is interpreted as a standard extension. ++ * ++ * Any alternative with patch_id that is greater than or equal ++ * to RISCV_VENDOR_EXT_ALTERNATIVES_BASE is interpreted as a ++ * vendor extension. ++ */ ++ if (id < RISCV_ISA_EXT_MAX) { ++ /* ++ * This patch should be treated as errata so skip ++ * processing here. ++ */ ++ if (alt->vendor_id != 0) ++ continue; + +- if (!__riscv_isa_extension_available(NULL, id)) +- continue; ++ if (!__riscv_isa_extension_available(NULL, id)) ++ continue; + +- value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); +- if (!riscv_cpufeature_patch_check(id, value)) ++ value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); ++ if (!riscv_cpufeature_patch_check(id, value)) ++ continue; ++ } else if (id >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE) { ++ if (!__riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, ++ id - RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) ++ continue; ++ } else { ++ WARN(1, "This extension id:%d is not in ISA extension list", id); + continue; ++ } + + oldptr = ALT_OLD_PTR(alt); + altptr = ALT_ALT_PTR(alt); +diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c +index df4f6fec5d17..ced5a09abaaa 100644 +--- a/arch/riscv/kernel/module.c ++++ b/arch/riscv/kernel/module.c +@@ -337,6 +337,45 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, + [R_RISCV_SUB64] = apply_r_riscv_sub64_rela, + }; + ++static inline unsigned int apply_calc_pcrel_lo12(Elf_Shdr *sechdrs, ++ Elf_Rela *rel, Elf_Sym *sym, unsigned int idx, ++ unsigned int symindex, unsigned int relsec, ++ struct module *me, Elf_Addr *v) ++{ ++ unsigned long hi20_loc = ++ sechdrs[sechdrs[relsec].sh_info].sh_addr ++ + rel[idx].r_offset; ++ u32 hi20_type = ELF_RISCV_R_TYPE(rel[idx].r_info); ++ unsigned int found = 0; ++ ++ /* Find the corresponding HI20 relocation entry */ ++ if (hi20_loc == sym->st_value ++ && (hi20_type == R_RISCV_PCREL_HI20 ++ || hi20_type == R_RISCV_GOT_HI20)) { ++ s32 hi20, lo12; ++ Elf_Sym *hi20_sym = ++ (Elf_Sym *)sechdrs[symindex].sh_addr ++ + ELF_RISCV_R_SYM(rel[idx].r_info); ++ unsigned long hi20_sym_val = ++ hi20_sym->st_value + rel[idx].r_addend; ++ ++ /* Calculate lo12 */ ++ size_t offset = hi20_sym_val - hi20_loc; ++ if (IS_ENABLED(CONFIG_MODULE_SECTIONS) ++ && hi20_type == R_RISCV_GOT_HI20) { ++ offset = module_emit_got_entry(me, hi20_sym_val); ++ offset = offset - hi20_loc; ++ } ++ hi20 = (offset + 0x800) & 0xfffff000; ++ lo12 = offset - hi20; ++ *v = (Elf_Addr)lo12; ++ ++ found = 1; ++ } ++ ++ return found; ++} ++ + int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +@@ -385,40 +424,24 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, + + if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) { + unsigned int j; ++ unsigned int found = 0; + +- for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { +- unsigned long hi20_loc = +- sechdrs[sechdrs[relsec].sh_info].sh_addr +- + rel[j].r_offset; +- u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info); +- +- /* Find the corresponding HI20 relocation entry */ +- if (hi20_loc == sym->st_value +- && (hi20_type == R_RISCV_PCREL_HI20 +- || hi20_type == R_RISCV_GOT_HI20)) { +- s32 hi20, lo12; +- Elf_Sym *hi20_sym = +- (Elf_Sym *)sechdrs[symindex].sh_addr +- + ELF_RISCV_R_SYM(rel[j].r_info); +- unsigned long hi20_sym_val = +- hi20_sym->st_value +- + rel[j].r_addend; +- +- /* Calculate lo12 */ +- size_t offset = hi20_sym_val - hi20_loc; +- if (IS_ENABLED(CONFIG_MODULE_SECTIONS) +- && hi20_type == R_RISCV_GOT_HI20) { +- offset = module_emit_got_entry( +- me, hi20_sym_val); +- offset = offset - hi20_loc; +- } +- hi20 = (offset + 0x800) & 0xfffff000; +- lo12 = offset - hi20; +- v = lo12; ++ if (i > 0) { ++ j = i - 1; ++ found = apply_calc_pcrel_lo12(sechdrs, rel, sym, j, ++ symindex, relsec, me, &v); ++ } + +- break; ++ if (found == 0) { ++ for (j = 0; j < sechdrs[relsec].sh_size/sizeof(*rel); j++) { ++ found = apply_calc_pcrel_lo12(sechdrs, rel, sym, ++ j, symindex, relsec, me, &v); ++ if (found) { ++ break; ++ } + } + } ++ + if (j == sechdrs[relsec].sh_size / sizeof(*rel)) { + pr_err( + "%s: Can not find HI20 relocation information\n", +diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c +index 83e223318822..dd973216e31c 100644 +--- a/arch/riscv/kernel/process.c ++++ b/arch/riscv/kernel/process.c +@@ -204,3 +204,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) + p->thread.sp = (unsigned long)childregs; /* kernel sp */ + return 0; + } ++ ++EXPORT_SYMBOL_GPL(__fstate_save); ++EXPORT_SYMBOL_GPL(__fstate_restore); +diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c +index a4559695ce62..0e2e19352469 100644 +--- a/arch/riscv/kernel/sbi-ipi.c ++++ b/arch/riscv/kernel/sbi-ipi.c +@@ -3,18 +3,35 @@ + * Multiplex several IPIs over a single HW IPI. + * + * Copyright (c) 2022 Ventana Micro Systems Inc. ++ * Copyright (C) 2024 Alibaba Group Holding Limited. + */ + + #define pr_fmt(fmt) "riscv: " fmt + #include + #include + #include ++#include + #include + #include ++#include ++#include ++#include + #include + + static int sbi_ipi_virq; + ++static u32 __iomem *sswi_base; ++ ++static void sswi_send_ipi(unsigned int cpu) ++{ ++ writel(1, sswi_base + cpuid_to_hartid_map(cpu)); ++} ++ ++static void sswi_clear_ipi(void) ++{ ++ writel(0, sswi_base + cpuid_to_hartid_map(smp_processor_id())); ++} ++ + static void sbi_ipi_handle(struct irq_desc *desc) + { + struct irq_chip *chip = irq_desc_get_chip(desc); +@@ -22,6 +39,9 @@ static void sbi_ipi_handle(struct irq_desc *desc) + chained_irq_enter(chip, desc); + + csr_clear(CSR_IP, IE_SIE); ++ if (sswi_base) ++ sswi_clear_ipi(); ++ + ipi_mux_process(); + + chained_irq_exit(chip, desc); +@@ -54,7 +74,8 @@ void __init sbi_ipi_init(void) + return; + } + +- virq = ipi_mux_create(BITS_PER_BYTE, sbi_send_ipi); ++ virq = ipi_mux_create(BITS_PER_BYTE, sswi_base ? sswi_send_ipi ++ : sbi_send_ipi); + if (virq <= 0) { + pr_err("unable to create muxed IPIs\n"); + irq_dispose_mapping(sbi_ipi_virq); +@@ -68,10 +89,27 @@ void __init sbi_ipi_init(void) + * the masking/unmasking of virtual IPIs is done + * via generic IPI-Mux + */ +- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, ++ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, ++ sswi_base ? ++ "irqchip/sswi-ipi:starting" : + "irqchip/sbi-ipi:starting", + sbi_ipi_starting_cpu, NULL); + +- riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, false); +- pr_info("providing IPIs using SBI IPI extension\n"); ++ riscv_ipi_set_virq_range(virq, BITS_PER_BYTE, ++ sswi_base ? true : false); ++ pr_info("providing IPIs using %s IPI extension\n", ++ sswi_base ? "ACLINT SSWI" : "SBI"); ++} ++ ++static int __init aclint_sswi_probe(struct device_node *node, ++ struct device_node *parent) ++{ ++ sswi_base = of_iomap(node, 0); ++ if (!sswi_base) { ++ pr_err("RISC-V ACLINT SSWI device probe failure\n"); ++ return -ENODEV; ++ } ++ ++ return 0; + } ++IRQCHIP_DECLARE(riscv_aclint_sswi, "riscv,aclint-sswi", aclint_sswi_probe); +diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c +index c2cdf812ebd0..d949fd3c0884 100644 +--- a/arch/riscv/kernel/setup.c ++++ b/arch/riscv/kernel/setup.c +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -299,17 +300,22 @@ void __init setup_arch(char **cmdline_p) + setup_smp(); + #endif + +- if (!acpi_disabled) ++ if (!acpi_disabled) { + acpi_init_rintc_map(); ++ acpi_map_cpus_to_nodes(); ++ } + + riscv_init_cbo_blocksizes(); + riscv_fill_hwcap(); + init_rt_signal_env(); + apply_boot_alternatives(); ++ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && + riscv_isa_extension_available(NULL, ZICBOM)) + riscv_noncoherent_supported(); + riscv_set_dma_cache_alignment(); ++ ++ riscv_user_isa_enable(); + } + + bool arch_cpu_is_hotpluggable(int cpu) +diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c +index 40420afbb1a0..ccb0f93c9786 100644 +--- a/arch/riscv/kernel/smp.c ++++ b/arch/riscv/kernel/smp.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + enum ipi_message_type { + IPI_RESCHEDULE, +@@ -33,6 +34,7 @@ enum ipi_message_type { + IPI_CPU_CRASH_STOP, + IPI_IRQ_WORK, + IPI_TIMER, ++ IPI_CPU_BACKTRACE, + IPI_MAX + }; + +@@ -136,6 +138,9 @@ static irqreturn_t handle_IPI(int irq, void *data) + tick_receive_broadcast(); + break; + #endif ++ case IPI_CPU_BACKTRACE: ++ nmi_cpu_backtrace(get_irq_regs()); ++ break; + default: + pr_warn("CPU%d: unhandled IPI%d\n", smp_processor_id(), ipi); + break; +@@ -212,6 +217,7 @@ static const char * const ipi_names[] = { + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", ++ [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + }; + + void show_ipi_stats(struct seq_file *p, int prec) +@@ -332,3 +338,14 @@ void arch_smp_send_reschedule(int cpu) + send_ipi_single(cpu, IPI_RESCHEDULE); + } + EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); ++ ++static void riscv_backtrace_ipi(cpumask_t *mask) ++{ ++ send_ipi_mask(mask, IPI_CPU_BACKTRACE); ++} ++ ++bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu) ++{ ++ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, riscv_backtrace_ipi); ++ return true; ++} +diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c +index 1b8da4e40a4d..3f37eec7a790 100644 +--- a/arch/riscv/kernel/smpboot.c ++++ b/arch/riscv/kernel/smpboot.c +@@ -25,6 +25,8 @@ + #include + #include + #include ++ ++#include + #include + #include + #include +@@ -105,7 +107,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un + if (hart == cpuid_to_hartid_map(0)) { + BUG_ON(found_boot_cpu); + found_boot_cpu = true; +- early_map_cpu_to_node(0, acpi_numa_get_nid(cpu_count)); + return 0; + } + +@@ -115,7 +116,6 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un + } + + cpuid_to_hartid_map(cpu_count) = hart; +- early_map_cpu_to_node(cpu_count, acpi_numa_get_nid(cpu_count)); + cpu_count++; + + return 0; +diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c +index 3c89b8ec69c4..9a8a0dc035b2 100644 +--- a/arch/riscv/kernel/suspend.c ++++ b/arch/riscv/kernel/suspend.c +@@ -4,13 +4,18 @@ + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + ++#define pr_fmt(fmt) "suspend: " fmt ++ + #include ++#include + #include ++#include + #include + + void suspend_save_csrs(struct suspend_context *context) + { +- context->scratch = csr_read(CSR_SCRATCH); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) ++ context->envcfg = csr_read(CSR_ENVCFG); + context->tvec = csr_read(CSR_TVEC); + context->ie = csr_read(CSR_IE); + +@@ -31,7 +36,9 @@ void suspend_save_csrs(struct suspend_context *context) + + void suspend_restore_csrs(struct suspend_context *context) + { +- csr_write(CSR_SCRATCH, context->scratch); ++ csr_write(CSR_SCRATCH, 0); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG)) ++ csr_write(CSR_ENVCFG, context->envcfg); + csr_write(CSR_TVEC, context->tvec); + csr_write(CSR_IE, context->ie); + +@@ -85,3 +92,92 @@ int cpu_suspend(unsigned long arg, + + return rc; + } ++ ++#ifdef CONFIG_RISCV_SBI ++static int sbi_system_suspend(unsigned long sleep_type, ++ unsigned long resume_addr, ++ unsigned long opaque) ++{ ++ struct sbiret ret; ++ ++ ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND, ++ sleep_type, resume_addr, opaque, 0, 0, 0); ++ if (ret.error) ++ return sbi_err_map_linux_errno(ret.error); ++ ++ return ret.value; ++} ++ ++static int sbi_system_suspend_enter(suspend_state_t state) ++{ ++ return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend); ++} ++ ++static const struct platform_suspend_ops sbi_system_suspend_ops = { ++ .valid = suspend_valid_only_mem, ++ .enter = sbi_system_suspend_enter, ++}; ++ ++static int __init sbi_system_suspend_init(void) ++{ ++ if (sbi_spec_version >= sbi_mk_version(2, 0) && ++ sbi_probe_extension(SBI_EXT_SUSP) > 0) { ++ pr_info("SBI SUSP extension detected\n"); ++ if (IS_ENABLED(CONFIG_SUSPEND)) ++ suspend_set_ops(&sbi_system_suspend_ops); ++ } ++ ++ return 0; ++} ++ ++arch_initcall(sbi_system_suspend_init); ++ ++static int sbi_suspend_finisher(unsigned long suspend_type, ++ unsigned long resume_addr, ++ unsigned long opaque) ++{ ++ struct sbiret ret; ++ ++ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, ++ suspend_type, resume_addr, opaque, 0, 0, 0); ++ ++ return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; ++} ++ ++int riscv_sbi_hart_suspend(u32 state) ++{ ++ if (state & SBI_HSM_SUSP_NON_RET_BIT) ++ return cpu_suspend(state, sbi_suspend_finisher); ++ else ++ return sbi_suspend_finisher(state, 0, 0); ++} ++ ++bool riscv_sbi_suspend_state_is_valid(u32 state) ++{ ++ if (state > SBI_HSM_SUSPEND_RET_DEFAULT && ++ state < SBI_HSM_SUSPEND_RET_PLATFORM) ++ return false; ++ ++ if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && ++ state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) ++ return false; ++ ++ return true; ++} ++ ++bool riscv_sbi_hsm_is_supported(void) ++{ ++ /* ++ * The SBI HSM suspend function is only available when: ++ * 1) SBI version is 0.3 or higher ++ * 2) SBI HSM extension is available ++ */ ++ if (sbi_spec_version < sbi_mk_version(0, 3) || ++ !sbi_probe_extension(SBI_EXT_HSM)) { ++ pr_info("HSM suspend not available\n"); ++ return false; ++ } ++ ++ return true; ++} ++#endif /* CONFIG_RISCV_SBI */ +diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c +new file mode 100644 +index 000000000000..052a41f53dc2 +--- /dev/null ++++ b/arch/riscv/kernel/sys_hwprobe.c +@@ -0,0 +1,349 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * The hwprobe interface, for allowing userspace to probe to see which features ++ * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for ++ * more details. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++static void hwprobe_arch_id(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ u64 id = -1ULL; ++ bool first = true; ++ int cpu; ++ ++ for_each_cpu(cpu, cpus) { ++ u64 cpu_id; ++ ++ switch (pair->key) { ++ case RISCV_HWPROBE_KEY_MVENDORID: ++ cpu_id = riscv_cached_mvendorid(cpu); ++ break; ++ case RISCV_HWPROBE_KEY_MIMPID: ++ cpu_id = riscv_cached_mimpid(cpu); ++ break; ++ case RISCV_HWPROBE_KEY_MARCHID: ++ cpu_id = riscv_cached_marchid(cpu); ++ break; ++ } ++ ++ if (first) { ++ id = cpu_id; ++ first = false; ++ } ++ ++ /* ++ * If there's a mismatch for the given set, return -1 in the ++ * value. ++ */ ++ if (id != cpu_id) { ++ id = -1ULL; ++ break; ++ } ++ } ++ ++ pair->value = id; ++} ++ ++static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ int cpu; ++ u64 missing = 0; ++ ++ pair->value = 0; ++ if (has_fpu()) ++ pair->value |= RISCV_HWPROBE_IMA_FD; ++ ++ if (riscv_isa_extension_available(NULL, c)) ++ pair->value |= RISCV_HWPROBE_IMA_C; ++ ++ if (has_vector()) ++ pair->value |= RISCV_HWPROBE_IMA_V; ++ ++ /* ++ * Loop through and record extensions that 1) anyone has, and 2) anyone ++ * doesn't have. ++ */ ++ for_each_cpu(cpu, cpus) { ++ struct riscv_isainfo *isainfo = &hart_isa[cpu]; ++ ++#define EXT_KEY(ext) \ ++ do { \ ++ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ ++ pair->value |= RISCV_HWPROBE_EXT_##ext; \ ++ else \ ++ missing |= RISCV_HWPROBE_EXT_##ext; \ ++ } while (false) ++ ++ /* ++ * Only use EXT_KEY() for extensions which can be exposed to userspace, ++ * regardless of the kernel's configuration, as no other checks, besides ++ * presence in the hart_isa bitmap, are made. ++ */ ++ EXT_KEY(ZACAS); ++ EXT_KEY(ZAWRS); ++ EXT_KEY(ZBA); ++ EXT_KEY(ZBB); ++ EXT_KEY(ZBC); ++ EXT_KEY(ZBKB); ++ EXT_KEY(ZBKC); ++ EXT_KEY(ZBKX); ++ EXT_KEY(ZBS); ++ EXT_KEY(ZCA); ++ EXT_KEY(ZCB); ++ EXT_KEY(ZCMOP); ++ EXT_KEY(ZICBOZ); ++ EXT_KEY(ZICOND); ++ EXT_KEY(ZIHINTNTL); ++ EXT_KEY(ZIHINTPAUSE); ++ EXT_KEY(ZIMOP); ++ EXT_KEY(ZKND); ++ EXT_KEY(ZKNE); ++ EXT_KEY(ZKNH); ++ EXT_KEY(ZKSED); ++ EXT_KEY(ZKSH); ++ EXT_KEY(ZKT); ++ EXT_KEY(ZTSO); ++ ++ if (has_vector()) { ++ EXT_KEY(ZVBB); ++ EXT_KEY(ZVBC); ++ EXT_KEY(ZVE32F); ++ EXT_KEY(ZVE32X); ++ EXT_KEY(ZVE64D); ++ EXT_KEY(ZVE64F); ++ EXT_KEY(ZVE64X); ++ EXT_KEY(ZVFH); ++ EXT_KEY(ZVFHMIN); ++ EXT_KEY(ZVKB); ++ EXT_KEY(ZVKG); ++ EXT_KEY(ZVKNED); ++ EXT_KEY(ZVKNHA); ++ EXT_KEY(ZVKNHB); ++ EXT_KEY(ZVKSED); ++ EXT_KEY(ZVKSH); ++ EXT_KEY(ZVKT); ++ } ++ ++ if (has_fpu()) { ++ EXT_KEY(ZCD); ++ EXT_KEY(ZCF); ++ EXT_KEY(ZFA); ++ EXT_KEY(ZFH); ++ EXT_KEY(ZFHMIN); ++ } ++ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM)) ++ EXT_KEY(SUPM); ++#undef EXT_KEY ++ } ++ ++ /* Now turn off reporting features if any CPU is missing it. */ ++ pair->value &= ~missing; ++} ++ ++static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) ++{ ++ struct riscv_hwprobe pair; ++ ++ hwprobe_isa_ext0(&pair, cpus); ++ return (pair.value & ext); ++} ++ ++static u64 hwprobe_misaligned(const struct cpumask *cpus) ++{ ++ int cpu; ++ u64 perf = -1ULL; ++ ++ for_each_cpu(cpu, cpus) { ++ int this_perf = per_cpu(misaligned_access_speed, cpu); ++ ++ if (perf == -1ULL) ++ perf = this_perf; ++ ++ if (perf != this_perf) { ++ perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; ++ break; ++ } ++ } ++ ++ if (perf == -1ULL) ++ return RISCV_HWPROBE_MISALIGNED_UNKNOWN; ++ ++ return perf; ++} ++ ++static void hwprobe_one_pair(struct riscv_hwprobe *pair, ++ const struct cpumask *cpus) ++{ ++ switch (pair->key) { ++ case RISCV_HWPROBE_KEY_MVENDORID: ++ case RISCV_HWPROBE_KEY_MARCHID: ++ case RISCV_HWPROBE_KEY_MIMPID: ++ hwprobe_arch_id(pair, cpus); ++ break; ++ /* ++ * The kernel already assumes that the base single-letter ISA ++ * extensions are supported on all harts, and only supports the ++ * IMA base, so just cheat a bit here and tell that to ++ * userspace. ++ */ ++ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: ++ pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; ++ break; ++ ++ case RISCV_HWPROBE_KEY_IMA_EXT_0: ++ hwprobe_isa_ext0(pair, cpus); ++ break; ++ ++ case RISCV_HWPROBE_KEY_CPUPERF_0: ++ pair->value = hwprobe_misaligned(cpus); ++ break; ++ ++ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: ++ pair->value = 0; ++ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) ++ pair->value = riscv_cboz_block_size; ++ break; ++ case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS: ++ pair->value = user_max_virt_addr(); ++ break; ++ ++ case RISCV_HWPROBE_KEY_TIME_CSR_FREQ: ++ pair->value = riscv_timebase; ++ break; ++ ++ /* ++ * For forward compatibility, unknown keys don't fail the whole ++ * call, but get their element key set to -1 and value set to 0 ++ * indicating they're unrecognized. ++ */ ++ default: ++ pair->key = -1; ++ pair->value = 0; ++ break; ++ } ++} ++ ++static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, ++ size_t pair_count, size_t cpusetsize, ++ unsigned long __user *cpus_user, ++ unsigned int flags) ++{ ++ size_t out; ++ int ret; ++ cpumask_t cpus; ++ ++ /* Check the reserved flags. */ ++ if (flags != 0) ++ return -EINVAL; ++ ++ /* ++ * The interface supports taking in a CPU mask, and returns values that ++ * are consistent across that mask. Allow userspace to specify NULL and ++ * 0 as a shortcut to all online CPUs. ++ */ ++ cpumask_clear(&cpus); ++ if (!cpusetsize && !cpus_user) { ++ cpumask_copy(&cpus, cpu_online_mask); ++ } else { ++ if (cpusetsize > cpumask_size()) ++ cpusetsize = cpumask_size(); ++ ++ ret = copy_from_user(&cpus, cpus_user, cpusetsize); ++ if (ret) ++ return -EFAULT; ++ ++ /* ++ * Userspace must provide at least one online CPU, without that ++ * there's no way to define what is supported. ++ */ ++ cpumask_and(&cpus, &cpus, cpu_online_mask); ++ if (cpumask_empty(&cpus)) ++ return -EINVAL; ++ } ++ ++ for (out = 0; out < pair_count; out++, pairs++) { ++ struct riscv_hwprobe pair; ++ ++ if (get_user(pair.key, &pairs->key)) ++ return -EFAULT; ++ ++ pair.value = 0; ++ hwprobe_one_pair(&pair, &cpus); ++ ret = put_user(pair.key, &pairs->key); ++ if (ret == 0) ++ ret = put_user(pair.value, &pairs->value); ++ ++ if (ret) ++ return -EFAULT; ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_MMU ++ ++static int __init init_hwprobe_vdso_data(void) ++{ ++ struct vdso_data *vd = __arch_get_k_vdso_data(); ++ struct arch_vdso_data *avd = &vd->arch_data; ++ u64 id_bitsmash = 0; ++ struct riscv_hwprobe pair; ++ int key; ++ ++ /* ++ * Initialize vDSO data with the answers for the "all CPUs" case, to ++ * save a syscall in the common case. ++ */ ++ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { ++ pair.key = key; ++ hwprobe_one_pair(&pair, cpu_online_mask); ++ ++ WARN_ON_ONCE(pair.key < 0); ++ ++ avd->all_cpu_hwprobe_values[key] = pair.value; ++ /* ++ * Smash together the vendor, arch, and impl IDs to see if ++ * they're all 0 or any negative. ++ */ ++ if (key <= RISCV_HWPROBE_KEY_MIMPID) ++ id_bitsmash |= pair.value; ++ } ++ ++ /* ++ * If the arch, vendor, and implementation ID are all the same across ++ * all harts, then assume all CPUs are the same, and allow the vDSO to ++ * answer queries for arbitrary masks. However if all values are 0 (not ++ * populated) or any value returns -1 (varies across CPUs), then the ++ * vDSO should defer to the kernel for exotic cpu masks. ++ */ ++ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; ++ return 0; ++} ++ ++arch_initcall_sync(init_hwprobe_vdso_data); ++ ++#endif /* CONFIG_MMU */ ++ ++SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, ++ size_t, pair_count, size_t, cpusetsize, unsigned long __user *, ++ cpus, unsigned int, flags) ++{ ++ return do_riscv_hwprobe(pairs, pair_count, cpusetsize, ++ cpus, flags); ++} +diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c +index 473159b5f303..f1c1416a9f1e 100644 +--- a/arch/riscv/kernel/sys_riscv.c ++++ b/arch/riscv/kernel/sys_riscv.c +@@ -7,15 +7,7 @@ + + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include + #include +-#include + + static long riscv_sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, +@@ -77,265 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, + return 0; + } + +-/* +- * The hwprobe interface, for allowing userspace to probe to see which features +- * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more +- * details. +- */ +-static void hwprobe_arch_id(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- u64 id = -1ULL; +- bool first = true; +- int cpu; +- +- for_each_cpu(cpu, cpus) { +- u64 cpu_id; +- +- switch (pair->key) { +- case RISCV_HWPROBE_KEY_MVENDORID: +- cpu_id = riscv_cached_mvendorid(cpu); +- break; +- case RISCV_HWPROBE_KEY_MIMPID: +- cpu_id = riscv_cached_mimpid(cpu); +- break; +- case RISCV_HWPROBE_KEY_MARCHID: +- cpu_id = riscv_cached_marchid(cpu); +- break; +- } +- +- if (first) { +- id = cpu_id; +- first = false; +- } +- +- /* +- * If there's a mismatch for the given set, return -1 in the +- * value. +- */ +- if (id != cpu_id) { +- id = -1ULL; +- break; +- } +- } +- +- pair->value = id; +-} +- +-static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- int cpu; +- u64 missing = 0; +- +- pair->value = 0; +- if (has_fpu()) +- pair->value |= RISCV_HWPROBE_IMA_FD; +- +- if (riscv_isa_extension_available(NULL, c)) +- pair->value |= RISCV_HWPROBE_IMA_C; +- +- if (has_vector()) +- pair->value |= RISCV_HWPROBE_IMA_V; +- +- /* +- * Loop through and record extensions that 1) anyone has, and 2) anyone +- * doesn't have. +- */ +- for_each_cpu(cpu, cpus) { +- struct riscv_isainfo *isainfo = &hart_isa[cpu]; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBA)) +- pair->value |= RISCV_HWPROBE_EXT_ZBA; +- else +- missing |= RISCV_HWPROBE_EXT_ZBA; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBB)) +- pair->value |= RISCV_HWPROBE_EXT_ZBB; +- else +- missing |= RISCV_HWPROBE_EXT_ZBB; +- +- if (riscv_isa_extension_available(isainfo->isa, ZBS)) +- pair->value |= RISCV_HWPROBE_EXT_ZBS; +- else +- missing |= RISCV_HWPROBE_EXT_ZBS; +- } +- +- /* Now turn off reporting features if any CPU is missing it. */ +- pair->value &= ~missing; +-} +- +-static u64 hwprobe_misaligned(const struct cpumask *cpus) +-{ +- int cpu; +- u64 perf = -1ULL; +- +- for_each_cpu(cpu, cpus) { +- int this_perf = per_cpu(misaligned_access_speed, cpu); +- +- if (perf == -1ULL) +- perf = this_perf; +- +- if (perf != this_perf) { +- perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; +- break; +- } +- } +- +- if (perf == -1ULL) +- return RISCV_HWPROBE_MISALIGNED_UNKNOWN; +- +- return perf; +-} +- +-static void hwprobe_one_pair(struct riscv_hwprobe *pair, +- const struct cpumask *cpus) +-{ +- switch (pair->key) { +- case RISCV_HWPROBE_KEY_MVENDORID: +- case RISCV_HWPROBE_KEY_MARCHID: +- case RISCV_HWPROBE_KEY_MIMPID: +- hwprobe_arch_id(pair, cpus); +- break; +- /* +- * The kernel already assumes that the base single-letter ISA +- * extensions are supported on all harts, and only supports the +- * IMA base, so just cheat a bit here and tell that to +- * userspace. +- */ +- case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: +- pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; +- break; +- +- case RISCV_HWPROBE_KEY_IMA_EXT_0: +- hwprobe_isa_ext0(pair, cpus); +- break; +- +- case RISCV_HWPROBE_KEY_CPUPERF_0: +- pair->value = hwprobe_misaligned(cpus); +- break; +- +- /* +- * For forward compatibility, unknown keys don't fail the whole +- * call, but get their element key set to -1 and value set to 0 +- * indicating they're unrecognized. +- */ +- default: +- pair->key = -1; +- pair->value = 0; +- break; +- } +-} +- +-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, +- size_t pair_count, size_t cpu_count, +- unsigned long __user *cpus_user, +- unsigned int flags) +-{ +- size_t out; +- int ret; +- cpumask_t cpus; +- +- /* Check the reserved flags. */ +- if (flags != 0) +- return -EINVAL; +- +- /* +- * The interface supports taking in a CPU mask, and returns values that +- * are consistent across that mask. Allow userspace to specify NULL and +- * 0 as a shortcut to all online CPUs. +- */ +- cpumask_clear(&cpus); +- if (!cpu_count && !cpus_user) { +- cpumask_copy(&cpus, cpu_online_mask); +- } else { +- if (cpu_count > cpumask_size()) +- cpu_count = cpumask_size(); +- +- ret = copy_from_user(&cpus, cpus_user, cpu_count); +- if (ret) +- return -EFAULT; +- +- /* +- * Userspace must provide at least one online CPU, without that +- * there's no way to define what is supported. +- */ +- cpumask_and(&cpus, &cpus, cpu_online_mask); +- if (cpumask_empty(&cpus)) +- return -EINVAL; +- } +- +- for (out = 0; out < pair_count; out++, pairs++) { +- struct riscv_hwprobe pair; +- +- if (get_user(pair.key, &pairs->key)) +- return -EFAULT; +- +- pair.value = 0; +- hwprobe_one_pair(&pair, &cpus); +- ret = put_user(pair.key, &pairs->key); +- if (ret == 0) +- ret = put_user(pair.value, &pairs->value); +- +- if (ret) +- return -EFAULT; +- } +- +- return 0; +-} +- +-#ifdef CONFIG_MMU +- +-static int __init init_hwprobe_vdso_data(void) +-{ +- struct vdso_data *vd = __arch_get_k_vdso_data(); +- struct arch_vdso_data *avd = &vd->arch_data; +- u64 id_bitsmash = 0; +- struct riscv_hwprobe pair; +- int key; +- +- /* +- * Initialize vDSO data with the answers for the "all CPUs" case, to +- * save a syscall in the common case. +- */ +- for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { +- pair.key = key; +- hwprobe_one_pair(&pair, cpu_online_mask); +- +- WARN_ON_ONCE(pair.key < 0); +- +- avd->all_cpu_hwprobe_values[key] = pair.value; +- /* +- * Smash together the vendor, arch, and impl IDs to see if +- * they're all 0 or any negative. +- */ +- if (key <= RISCV_HWPROBE_KEY_MIMPID) +- id_bitsmash |= pair.value; +- } +- +- /* +- * If the arch, vendor, and implementation ID are all the same across +- * all harts, then assume all CPUs are the same, and allow the vDSO to +- * answer queries for arbitrary masks. However if all values are 0 (not +- * populated) or any value returns -1 (varies across CPUs), then the +- * vDSO should defer to the kernel for exotic cpu masks. +- */ +- avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; +- return 0; +-} +- +-arch_initcall_sync(init_hwprobe_vdso_data); +- +-#endif /* CONFIG_MMU */ +- +-SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, +- size_t, pair_count, size_t, cpu_count, unsigned long __user *, +- cpus, unsigned int, flags) +-{ +- return do_riscv_hwprobe(pairs, pair_count, cpu_count, +- cpus, flags); +-} +- + /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ + asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) + { +diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c +index cadf725ef798..1e926e4b5881 100644 +--- a/arch/riscv/kernel/vdso/hwprobe.c ++++ b/arch/riscv/kernel/vdso/hwprobe.c +@@ -3,26 +3,22 @@ + * Copyright 2023 Rivos, Inc + */ + ++#include + #include + #include + #include + + extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, ++ size_t cpusetsize, unsigned long *cpus, + unsigned int flags); + +-/* Add a prototype to avoid -Wmissing-prototypes warning. */ +-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, +- unsigned int flags); +- +-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, +- size_t cpu_count, unsigned long *cpus, +- unsigned int flags) ++static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) + { + const struct vdso_data *vd = __arch_get_vdso_data(); + const struct arch_vdso_data *avd = &vd->arch_data; +- bool all_cpus = !cpu_count && !cpus; ++ bool all_cpus = !cpusetsize && !cpus; + struct riscv_hwprobe *p = pairs; + struct riscv_hwprobe *end = pairs + pair_count; + +@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + * masks. + */ + if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) +- return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); ++ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); + + /* This is something we can handle, fill out the pairs. */ + while (p < end) { +@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + + return 0; + } ++ ++static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) ++{ ++ const struct vdso_data *vd = __arch_get_vdso_data(); ++ const struct arch_vdso_data *avd = &vd->arch_data; ++ struct riscv_hwprobe *p = pairs; ++ struct riscv_hwprobe *end = pairs + pair_count; ++ unsigned char *c = (unsigned char *)cpus; ++ bool empty_cpus = true; ++ bool clear_all = false; ++ int i; ++ ++ if (!cpusetsize || !cpus) ++ return -EINVAL; ++ ++ for (i = 0; i < cpusetsize; i++) { ++ if (c[i]) { ++ empty_cpus = false; ++ break; ++ } ++ } ++ ++ if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus) ++ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); ++ ++ while (p < end) { ++ if (riscv_hwprobe_key_is_valid(p->key)) { ++ struct riscv_hwprobe t = { ++ .key = p->key, ++ .value = avd->all_cpu_hwprobe_values[p->key], ++ }; ++ ++ if (!riscv_hwprobe_pair_cmp(&t, p)) ++ clear_all = true; ++ } else { ++ clear_all = true; ++ p->key = -1; ++ p->value = 0; ++ } ++ p++; ++ } ++ ++ if (clear_all) { ++ for (i = 0; i < cpusetsize; i++) ++ c[i] = 0; ++ } ++ ++ return 0; ++} ++ ++/* Add a prototype to avoid -Wmissing-prototypes warning. */ ++int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags); ++ ++int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, ++ size_t cpusetsize, unsigned long *cpus, ++ unsigned int flags) ++{ ++ if (flags & RISCV_HWPROBE_WHICH_CPUS) ++ return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize, ++ cpus, flags); ++ ++ return riscv_vdso_get_values(pairs, pair_count, cpusetsize, ++ cpus, flags); ++} +diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c +index 81886fc36ed6..a0d1841c4388 100644 +--- a/arch/riscv/kernel/vector.c ++++ b/arch/riscv/kernel/vector.c +@@ -83,7 +83,8 @@ static bool insn_is_vector(u32 insn_buf) + static int riscv_v_thread_zalloc(void) + { + void *datap; +- ++ if (!riscv_v_vsize) ++ return -EINVAL; + datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + if (!datap) + return -ENOMEM; +@@ -136,8 +137,11 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) + u32 __user *epc = (u32 __user *)regs->epc; + u32 insn = (u32)regs->badaddr; + ++ if (!has_vector()) ++ return false; ++ + /* Do not handle if V is not supported, or disabled */ +- if (!(ELF_HWCAP & COMPAT_HWCAP_ISA_V)) ++ if (!riscv_v_vstate_ctrl_user_allowed()) + return false; + + /* If V has been enabled then it is not the first-use trap */ +diff --git a/arch/riscv/kernel/vendor_extensions.c b/arch/riscv/kernel/vendor_extensions.c +new file mode 100644 +index 000000000000..aeb8839d2f8a +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions.c +@@ -0,0 +1,56 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright 2024 Rivos, Inc ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++struct riscv_isa_vendor_ext_data_list *riscv_isa_vendor_ext_list[] = { ++#ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES ++ &riscv_isa_vendor_ext_list_andes, ++#endif ++}; ++ ++const size_t riscv_isa_vendor_ext_list_size = ARRAY_SIZE(riscv_isa_vendor_ext_list); ++ ++/** ++ * __riscv_isa_vendor_extension_available() - Check whether given vendor ++ * extension is available or not. ++ * ++ * @cpu: check if extension is available on this cpu ++ * @vendor: vendor that the extension is a member of ++ * @bit: bit position of the desired extension ++ * Return: true or false ++ * ++ * NOTE: When cpu is -1, will check if extension is available on all cpus ++ */ ++bool __riscv_isa_vendor_extension_available(int cpu, unsigned long vendor, unsigned int bit) ++{ ++ struct riscv_isavendorinfo *bmap; ++ struct riscv_isavendorinfo *cpu_bmap; ++ ++ switch (vendor) { ++ #ifdef CONFIG_RISCV_ISA_VENDOR_EXT_ANDES ++ case ANDES_VENDOR_ID: ++ bmap = &riscv_isa_vendor_ext_list_andes.all_harts_isa_bitmap; ++ cpu_bmap = &riscv_isa_vendor_ext_list_andes.per_hart_isa_bitmap[cpu]; ++ break; ++ #endif ++ default: ++ return false; ++ } ++ ++ if (cpu != -1) ++ bmap = &cpu_bmap[cpu]; ++ ++ if (bit >= RISCV_ISA_VENDOR_EXT_MAX) ++ return false; ++ ++ return test_bit(bit, bmap->isa) ? true : false; ++} ++EXPORT_SYMBOL_GPL(__riscv_isa_vendor_extension_available); +diff --git a/arch/riscv/kernel/vendor_extensions/Makefile b/arch/riscv/kernel/vendor_extensions/Makefile +new file mode 100644 +index 000000000000..6a61aed944f1 +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions/Makefile +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++ ++obj-$(CONFIG_RISCV_ISA_VENDOR_EXT_ANDES) += andes.o +diff --git a/arch/riscv/kernel/vendor_extensions/andes.c b/arch/riscv/kernel/vendor_extensions/andes.c +new file mode 100644 +index 000000000000..4d8dfc974f00 +--- /dev/null ++++ b/arch/riscv/kernel/vendor_extensions/andes.c +@@ -0,0 +1,18 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++/* All Andes vendor extensions supported in Linux */ ++const struct riscv_isa_ext_data riscv_isa_vendor_ext_andes[] = { ++ __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_VENDOR_EXT_XANDESPMU), ++}; ++ ++struct riscv_isa_vendor_ext_data_list riscv_isa_vendor_ext_list_andes = { ++ .ext_data_count = ARRAY_SIZE(riscv_isa_vendor_ext_andes), ++ .ext_data = riscv_isa_vendor_ext_andes, ++}; +diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c +index 74bb27440527..596209f1a6ff 100644 +--- a/arch/riscv/kvm/aia.c ++++ b/arch/riscv/kvm/aia.c +@@ -10,12 +10,12 @@ + #include + #include + #include ++#include + #include + #include + #include + #include +-#include +-#include ++#include + + struct aia_hgei_control { + raw_spinlock_t lock; +@@ -394,6 +394,8 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, + { + int ret = -ENOENT; + unsigned long flags; ++ const struct imsic_global_config *gc; ++ const struct imsic_local_config *lc; + struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu); + + if (!kvm_riscv_aia_available() || !hgctrl) +@@ -409,11 +411,14 @@ int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner, + + raw_spin_unlock_irqrestore(&hgctrl->lock, flags); + +- /* TODO: To be updated later by AIA IMSIC HW guest file support */ +- if (hgei_va) +- *hgei_va = NULL; +- if (hgei_pa) +- *hgei_pa = 0; ++ gc = imsic_get_global_config(); ++ lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL; ++ if (lc && ret > 0) { ++ if (hgei_va) ++ *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ); ++ if (hgei_pa) ++ *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ); ++ } + + return ret; + } +@@ -600,9 +605,11 @@ void kvm_riscv_aia_disable(void) + int kvm_riscv_aia_init(void) + { + int rc; ++ const struct imsic_global_config *gc; + + if (!riscv_isa_extension_available(NULL, SxAIA)) + return -ENODEV; ++ gc = imsic_get_global_config(); + + /* Figure-out number of bits in HGEIE */ + csr_write(CSR_HGEIE, -1UL); +@@ -614,17 +621,17 @@ int kvm_riscv_aia_init(void) + /* + * Number of usable HGEI lines should be minimum of per-HART + * IMSIC guest files and number of bits in HGEIE +- * +- * TODO: To be updated later by AIA IMSIC HW guest file support + */ +- kvm_riscv_aia_nr_hgei = 0; ++ if (gc) ++ kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei, ++ BIT(gc->guest_index_bits) - 1); ++ else ++ kvm_riscv_aia_nr_hgei = 0; + +- /* +- * Find number of guest MSI IDs +- * +- * TODO: To be updated later by AIA IMSIC HW guest file support +- */ ++ /* Find number of guest MSI IDs */ + kvm_riscv_aia_max_ids = IMSIC_MAX_ID; ++ if (gc && kvm_riscv_aia_nr_hgei) ++ kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1; + + /* Initialize guest external interrupt line management */ + rc = aia_hgei_init(); +diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c +index 9d5b04c971c4..f59d1c0c8c43 100644 +--- a/arch/riscv/kvm/aia_aplic.c ++++ b/arch/riscv/kvm/aia_aplic.c +@@ -7,12 +7,12 @@ + * Anup Patel + */ + ++#include + #include + #include + #include + #include + #include +-#include + + struct aplic_irq { + raw_spinlock_t lock; +diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c +index 5cd407c6a8e4..39cd26af5a69 100644 +--- a/arch/riscv/kvm/aia_device.c ++++ b/arch/riscv/kvm/aia_device.c +@@ -8,9 +8,9 @@ + */ + + #include ++#include + #include + #include +-#include + + static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) + { +diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c +index c1585444f856..a8085cd8215e 100644 +--- a/arch/riscv/kvm/aia_imsic.c ++++ b/arch/riscv/kvm/aia_imsic.c +@@ -9,13 +9,13 @@ + + #include + #include ++#include + #include + #include + #include + #include + #include + #include +-#include + + #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) + +diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c +index 48ae0d4b3932..225a435d9c9a 100644 +--- a/arch/riscv/kvm/main.c ++++ b/arch/riscv/kvm/main.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + #include + + long kvm_arch_dev_ioctl(struct file *filp, +diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c +index 44bc324aeeb0..23c0e82b5103 100644 +--- a/arch/riscv/kvm/tlb.c ++++ b/arch/riscv/kvm/tlb.c +@@ -12,7 +12,7 @@ + #include + #include + #include +-#include ++#include + #include + + #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) +diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c +index 08ba48a395aa..030904d82b58 100644 +--- a/arch/riscv/kvm/vcpu_fp.c ++++ b/arch/riscv/kvm/vcpu_fp.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + + #ifdef CONFIG_FPU + void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c +index d520b25d8561..9e7e755163a9 100644 +--- a/arch/riscv/kvm/vcpu_onereg.c ++++ b/arch/riscv/kvm/vcpu_onereg.c +@@ -13,7 +13,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c +index b430cbb69521..b339a2682f25 100644 +--- a/arch/riscv/kvm/vcpu_vector.c ++++ b/arch/riscv/kvm/vcpu_vector.c +@@ -11,7 +11,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile +index 26cb2502ecf8..183bf2097d57 100644 +--- a/arch/riscv/lib/Makefile ++++ b/arch/riscv/lib/Makefile +@@ -9,5 +9,6 @@ lib-y += strncmp.o + lib-$(CONFIG_MMU) += uaccess.o + lib-$(CONFIG_64BIT) += tishift.o + lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o ++lib-$(CONFIG_RISCV_ISA_ZBC) += crc32.o + + obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o +diff --git a/arch/riscv/lib/crc32.c b/arch/riscv/lib/crc32.c +new file mode 100644 +index 000000000000..d7dc599af3ef +--- /dev/null ++++ b/arch/riscv/lib/crc32.c +@@ -0,0 +1,294 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Accelerated CRC32 implementation with Zbc extension. ++ * ++ * Copyright (C) 2024 Intel Corporation ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * Refer to https://www.corsix.org/content/barrett-reduction-polynomials for ++ * better understanding of how this math works. ++ * ++ * let "+" denotes polynomial add (XOR) ++ * let "-" denotes polynomial sub (XOR) ++ * let "*" denotes polynomial multiplication ++ * let "/" denotes polynomial floor division ++ * let "S" denotes source data, XLEN bit wide ++ * let "P" denotes CRC32 polynomial ++ * let "T" denotes 2^(XLEN+32) ++ * let "QT" denotes quotient of T/P, with the bit for 2^XLEN being implicit ++ * ++ * crc32(S, P) ++ * => S * (2^32) - S * (2^32) / P * P ++ * => lowest 32 bits of: S * (2^32) / P * P ++ * => lowest 32 bits of: S * (2^32) * (T / P) / T * P ++ * => lowest 32 bits of: S * (2^32) * quotient / T * P ++ * => lowest 32 bits of: S * quotient / 2^XLEN * P ++ * => lowest 32 bits of: (clmul_high_part(S, QT) + S) * P ++ * => clmul_low_part(clmul_high_part(S, QT) + S, P) ++ * ++ * In terms of below implementations, the BE case is more intuitive, since the ++ * higher order bit sits at more significant position. ++ */ ++ ++#if __riscv_xlen == 64 ++/* Slide by XLEN bits per iteration */ ++# define STEP_ORDER 3 ++ ++/* Each below polynomial quotient has an implicit bit for 2^XLEN */ ++ ++/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in LE format */ ++# define CRC32_POLY_QT_LE 0x5a72d812fb808b20 ++ ++/* Polynomial quotient of (2^(XLEN+32))/CRC32C_POLY, in LE format */ ++# define CRC32C_POLY_QT_LE 0xa434f61c6f5389f8 ++ ++/* Polynomial quotient of (2^(XLEN+32))/CRC32_POLY, in BE format, it should be ++ * the same as the bit-reversed version of CRC32_POLY_QT_LE ++ */ ++# define CRC32_POLY_QT_BE 0x04d101df481b4e5a ++ ++static inline u64 crc32_le_prep(u32 crc, unsigned long const *ptr) ++{ ++ return (u64)crc ^ (__force u64)__cpu_to_le64(*ptr); ++} ++ ++static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt) ++{ ++ u32 crc; ++ ++ /* We don't have a "clmulrh" insn, so use clmul + slli instead. */ ++ asm volatile (".option push\n" ++ ".option arch,+zbc\n" ++ "clmul %0, %1, %2\n" ++ "slli %0, %0, 1\n" ++ "xor %0, %0, %1\n" ++ "clmulr %0, %0, %3\n" ++ "srli %0, %0, 32\n" ++ ".option pop\n" ++ : "=&r" (crc) ++ : "r" (s), ++ "r" (poly_qt), ++ "r" ((u64)poly << 32) ++ :); ++ return crc; ++} ++ ++static inline u64 crc32_be_prep(u32 crc, unsigned long const *ptr) ++{ ++ return ((u64)crc << 32) ^ (__force u64)__cpu_to_be64(*ptr); ++} ++ ++#elif __riscv_xlen == 32 ++# define STEP_ORDER 2 ++/* Each quotient should match the upper half of its analog in RV64 */ ++# define CRC32_POLY_QT_LE 0xfb808b20 ++# define CRC32C_POLY_QT_LE 0x6f5389f8 ++# define CRC32_POLY_QT_BE 0x04d101df ++ ++static inline u32 crc32_le_prep(u32 crc, unsigned long const *ptr) ++{ ++ return crc ^ (__force u32)__cpu_to_le32(*ptr); ++} ++ ++static inline u32 crc32_le_zbc(unsigned long s, u32 poly, unsigned long poly_qt) ++{ ++ u32 crc; ++ ++ /* We don't have a "clmulrh" insn, so use clmul + slli instead. */ ++ asm volatile (".option push\n" ++ ".option arch,+zbc\n" ++ "clmul %0, %1, %2\n" ++ "slli %0, %0, 1\n" ++ "xor %0, %0, %1\n" ++ "clmulr %0, %0, %3\n" ++ ".option pop\n" ++ : "=&r" (crc) ++ : "r" (s), ++ "r" (poly_qt), ++ "r" (poly) ++ :); ++ return crc; ++} ++ ++static inline u32 crc32_be_prep(u32 crc, unsigned long const *ptr) ++{ ++ return crc ^ (__force u32)__cpu_to_be32(*ptr); ++} ++ ++#else ++# error "Unexpected __riscv_xlen" ++#endif ++ ++static inline u32 crc32_be_zbc(unsigned long s) ++{ ++ u32 crc; ++ ++ asm volatile (".option push\n" ++ ".option arch,+zbc\n" ++ "clmulh %0, %1, %2\n" ++ "xor %0, %0, %1\n" ++ "clmul %0, %0, %3\n" ++ ".option pop\n" ++ : "=&r" (crc) ++ : "r" (s), ++ "r" (CRC32_POLY_QT_BE), ++ "r" (CRC32_POLY_BE) ++ :); ++ return crc; ++} ++ ++#define STEP (1 << STEP_ORDER) ++#define OFFSET_MASK (STEP - 1) ++ ++typedef u32 (*fallback)(u32 crc, unsigned char const *p, size_t len); ++ ++static inline u32 crc32_le_unaligned(u32 crc, unsigned char const *p, ++ size_t len, u32 poly, ++ unsigned long poly_qt) ++{ ++ size_t bits = len * 8; ++ unsigned long s = 0; ++ u32 crc_low = 0; ++ ++ for (int i = 0; i < len; i++) ++ s = ((unsigned long)*p++ << (__riscv_xlen - 8)) | (s >> 8); ++ ++ s ^= (unsigned long)crc << (__riscv_xlen - bits); ++ if (__riscv_xlen == 32 || len < sizeof(u32)) ++ crc_low = crc >> bits; ++ ++ crc = crc32_le_zbc(s, poly, poly_qt); ++ crc ^= crc_low; ++ ++ return crc; ++} ++ ++static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p, ++ size_t len, u32 poly, ++ unsigned long poly_qt, ++ fallback crc_fb) ++{ ++ size_t offset, head_len, tail_len; ++ unsigned long const *p_ul; ++ unsigned long s; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBC, 1) ++ : : : : legacy); ++ ++ /* Handle the unaligned head. */ ++ offset = (unsigned long)p & OFFSET_MASK; ++ if (offset && len) { ++ head_len = min(STEP - offset, len); ++ crc = crc32_le_unaligned(crc, p, head_len, poly, poly_qt); ++ p += head_len; ++ len -= head_len; ++ } ++ ++ tail_len = len & OFFSET_MASK; ++ len = len >> STEP_ORDER; ++ p_ul = (unsigned long const *)p; ++ ++ for (int i = 0; i < len; i++) { ++ s = crc32_le_prep(crc, p_ul); ++ crc = crc32_le_zbc(s, poly, poly_qt); ++ p_ul++; ++ } ++ ++ /* Handle the tail bytes. */ ++ p = (unsigned char const *)p_ul; ++ if (tail_len) ++ crc = crc32_le_unaligned(crc, p, tail_len, poly, poly_qt); ++ ++ return crc; ++ ++legacy: ++ return crc_fb(crc, p, len); ++} ++ ++u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) ++{ ++ return crc32_le_generic(crc, p, len, CRC32_POLY_LE, CRC32_POLY_QT_LE, ++ crc32_le_base); ++} ++ ++u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) ++{ ++ return crc32_le_generic(crc, p, len, CRC32C_POLY_LE, ++ CRC32C_POLY_QT_LE, __crc32c_le_base); ++} ++ ++static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p, ++ size_t len) ++{ ++ size_t bits = len * 8; ++ unsigned long s = 0; ++ u32 crc_low = 0; ++ ++ s = 0; ++ for (int i = 0; i < len; i++) ++ s = *p++ | (s << 8); ++ ++ if (__riscv_xlen == 32 || len < sizeof(u32)) { ++ s ^= crc >> (32 - bits); ++ crc_low = crc << bits; ++ } else { ++ s ^= (unsigned long)crc << (bits - 32); ++ } ++ ++ crc = crc32_be_zbc(s); ++ crc ^= crc_low; ++ ++ return crc; ++} ++ ++u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) ++{ ++ size_t offset, head_len, tail_len; ++ unsigned long const *p_ul; ++ unsigned long s; ++ ++ asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, ++ RISCV_ISA_EXT_ZBC, 1) ++ : : : : legacy); ++ ++ /* Handle the unaligned head. */ ++ offset = (unsigned long)p & OFFSET_MASK; ++ if (offset && len) { ++ head_len = min(STEP - offset, len); ++ crc = crc32_be_unaligned(crc, p, head_len); ++ p += head_len; ++ len -= head_len; ++ } ++ ++ tail_len = len & OFFSET_MASK; ++ len = len >> STEP_ORDER; ++ p_ul = (unsigned long const *)p; ++ ++ for (int i = 0; i < len; i++) { ++ s = crc32_be_prep(crc, p_ul); ++ crc = crc32_be_zbc(s); ++ p_ul++; ++ } ++ ++ /* Handle the tail bytes. */ ++ p = (unsigned char const *)p_ul; ++ if (tail_len) ++ crc = crc32_be_unaligned(crc, p, tail_len); ++ ++ return crc; ++ ++legacy: ++ return crc32_be_base(crc, p, len); ++} +diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c +index f1387272a551..55a34f2020a8 100644 +--- a/arch/riscv/mm/cacheflush.c ++++ b/arch/riscv/mm/cacheflush.c +@@ -3,7 +3,9 @@ + * Copyright (C) 2017 SiFive + */ + ++#include + #include ++#include + #include + + #ifdef CONFIG_SMP +@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void) + unsigned long cbom_hartid, cboz_hartid; + u32 cbom_block_size = 0, cboz_block_size = 0; + struct device_node *node; ++ struct acpi_table_header *rhct; ++ acpi_status status; ++ ++ if (acpi_disabled) { ++ for_each_of_cpu_node(node) { ++ /* set block-size for cbom and/or cboz extension if available */ ++ cbo_get_block_size(node, "riscv,cbom-block-size", ++ &cbom_block_size, &cbom_hartid); ++ cbo_get_block_size(node, "riscv,cboz-block-size", ++ &cboz_block_size, &cboz_hartid); ++ } ++ } else { ++ status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); ++ if (ACPI_FAILURE(status)) ++ return; + +- for_each_of_cpu_node(node) { +- /* set block-size for cbom and/or cboz extension if available */ +- cbo_get_block_size(node, "riscv,cbom-block-size", +- &cbom_block_size, &cbom_hartid); +- cbo_get_block_size(node, "riscv,cboz-block-size", +- &cboz_block_size, &cboz_hartid); ++ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL); ++ acpi_put_table((struct acpi_table_header *)rhct); + } + + if (cbom_block_size) +diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c +index a77342eb3489..32031a7d96d4 100644 +--- a/arch/riscv/mm/dma-noncoherent.c ++++ b/arch/riscv/mm/dma-noncoherent.c +@@ -25,7 +25,7 @@ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size) + return; + } + #endif +- ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); ++ ALT_CMO_OP_VPA(clean, vaddr, paddr, size, riscv_cbom_block_size); + } + + static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) +@@ -39,7 +39,7 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) + } + #endif + +- ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size); ++ ALT_CMO_OP_VPA(inval, vaddr, paddr, size, riscv_cbom_block_size); + } + + static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) +@@ -53,7 +53,7 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) + } + #endif + +- ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); ++ ALT_CMO_OP_VPA(flush, vaddr, paddr, size, riscv_cbom_block_size); + } + + static inline bool arch_sync_dma_clean_before_fromdevice(void) +@@ -117,6 +117,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + void arch_dma_prep_coherent(struct page *page, size_t size) + { + void *flush_addr = page_address(page); ++ phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS + if (unlikely(noncoherent_cache_ops.wback_inv)) { +@@ -125,7 +126,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) + } + #endif + +- ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); ++ ALT_CMO_OP_VPA(flush, flush_addr, paddr, size, riscv_cbom_block_size); + } + + void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, +diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c +index ef887efcb679..21ea6ed76470 100644 +--- a/arch/riscv/mm/pgtable.c ++++ b/arch/riscv/mm/pgtable.c +@@ -36,6 +36,7 @@ pud_t *pud_offset(p4d_t *p4d, unsigned long address) + + return (pud_t *)p4d; + } ++EXPORT_SYMBOL_GPL(pud_offset); + + p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) + { +@@ -44,6 +45,7 @@ p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) + + return (p4d_t *)pgd; + } ++EXPORT_SYMBOL_GPL(p4d_offset); + #endif + + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c +index 324e8cd9b502..a9f4af9f7f3f 100644 +--- a/arch/riscv/mm/tlbflush.c ++++ b/arch/riscv/mm/tlbflush.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + /* + * Flush entire TLB if number of entries to be flushed is greater +@@ -12,6 +13,26 @@ + */ + static unsigned long tlb_flush_all_threshold __read_mostly = 64; + ++#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) ++ ++static inline void local_sfence_inval_ir(void) ++{ ++ asm volatile(SFENCE_INVAL_IR() ::: "memory"); ++} ++ ++static inline void local_sfence_w_inval(void) ++{ ++ asm volatile(SFENCE_W_INVAL() ::: "memory"); ++} ++ ++static inline void local_sinval_vma(unsigned long vma, unsigned long asid) ++{ ++ if (asid != FLUSH_TLB_NO_ASID) ++ asm volatile(SINVAL_VMA( %0, %1) : : "r" (vma), "r" (asid) : "memory"); ++ else ++ asm volatile(SINVAL_VMA( %0, zero) : : "r" (vma) : "memory"); ++} ++ + static void local_flush_tlb_range_threshold_asid(unsigned long start, + unsigned long size, + unsigned long stride, +@@ -25,6 +46,16 @@ static void local_flush_tlb_range_threshold_asid(unsigned long start, + return; + } + ++ if (has_svinval()) { ++ local_sfence_w_inval(); ++ for (i = 0; i < nr_ptes_in_range; ++i) { ++ local_sinval_vma(start, asid); ++ start += stride; ++ } ++ local_sfence_inval_ir(); ++ return; ++ } ++ + for (i = 0; i < nr_ptes_in_range; ++i) { + local_flush_tlb_page_asid(start, asid); + start += stride; +diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig +index ca5f37673985..28efc9b30f91 100644 +--- a/arch/sw_64/Kconfig ++++ b/arch/sw_64/Kconfig +@@ -428,7 +428,6 @@ source "kernel/livepatch/Kconfig" + config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM +- select ACPI_NUMA if ACPI + select OF_NUMA + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory +diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h +index 551829884734..dcfaa3812306 100644 +--- a/arch/x86/include/asm/hw_irq.h ++++ b/arch/x86/include/asm/hw_irq.h +@@ -16,8 +16,6 @@ + + #include + +-#define IRQ_MATRIX_BITS NR_VECTORS +- + #ifndef __ASSEMBLY__ + + #include +diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c +index 8e1ef5345b7a..a67bb8f982bd 100644 +--- a/arch/x86/mm/pgtable.c ++++ b/arch/x86/mm/pgtable.c +@@ -76,6 +76,9 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) + #if CONFIG_PGTABLE_LEVELS > 3 + void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) + { ++ struct ptdesc *ptdesc = virt_to_ptdesc(pud); ++ ++ pagetable_pud_dtor(ptdesc); + paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); + paravirt_tlb_remove_table(tlb, virt_to_page(pud)); + } +diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig +index c4746869d67b..34dfe1430d75 100644 +--- a/drivers/acpi/Kconfig ++++ b/drivers/acpi/Kconfig +@@ -281,7 +281,7 @@ config ACPI_CPPC_LIB + + config ACPI_PROCESSOR + tristate "Processor" +- depends on X86 || IA64 || ARM64 || LOONGARCH ++ depends on X86 || IA64 || ARM64 || LOONGARCH || RISCV + select ACPI_PROCESSOR_IDLE + select ACPI_CPU_FREQ_PSS if X86 || IA64 || LOONGARCH + select THERMAL +diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile +index eaa09bf52f17..d367e649714f 100644 +--- a/drivers/acpi/Makefile ++++ b/drivers/acpi/Makefile +@@ -37,7 +37,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o + # ACPI Bus and Device Drivers + # + acpi-y += bus.o glue.o +-acpi-y += scan.o ++acpi-y += scan.o mipi-disco-img.o + acpi-y += resource.o + acpi-y += acpi_processor.o + acpi-y += processor_core.o +diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c +index 6b1b31eabfdd..96d70d640a21 100644 +--- a/drivers/acpi/acpi_apd.c ++++ b/drivers/acpi/acpi_apd.c +@@ -40,8 +40,9 @@ struct apd_private_data { + const struct apd_device_desc *dev_desc; + }; + +-#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || \ +-defined(CONFIG_ARM64) || defined(CONFIG_SW64) ++#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) || \ ++ defined(CONFIG_SW64) || defined(CONFIG_RISCV) ++ + #define APD_ADDR(desc) ((unsigned long)&desc) + + static int acpi_apd_setup(struct apd_private_data *pdata) +@@ -205,6 +206,18 @@ static int sw64_acpi_apd_setup(struct apd_private_data *pdata) + } + #endif /* CONFIG_SW64 */ + ++#ifdef CONFIG_RISCV ++static const struct apd_device_desc sophgo_i2c_desc = { ++ .setup = acpi_apd_setup, ++ .fixed_clk_rate = 100000000, ++}; ++ ++static const struct apd_device_desc sophgo_spi_desc = { ++ .setup = acpi_apd_setup, ++ .fixed_clk_rate = 250000000, ++}; ++#endif /* CONFIG_RISCV */ ++ + #endif + + /* +@@ -277,6 +290,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { + #ifdef CONFIG_SW64 + { "SUNW0005", APD_ADDR(sunway_i2c_desc) }, + { "SUNW0008", APD_ADDR(sunway_spi_desc) }, ++#endif ++#ifdef CONFIG_RISCV ++ { "SOPH0003", APD_ADDR(sophgo_i2c_desc) }, ++ { "SOPH0004", APD_ADDR(sophgo_spi_desc) }, + #endif + { } + }; +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c +index 98a2ab3b6844..1a418424d250 100644 +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -579,25 +579,26 @@ static struct device *acpi_lpss_find_device(const char *hid, const char *uid) + static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle) + { + struct acpi_handle_list dep_devices; +- acpi_status status; ++ bool ret = false; + int i; + + if (!acpi_has_method(adev->handle, "_DEP")) + return false; + +- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL, +- &dep_devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(adev->handle, "_DEP", NULL, &dep_devices)) { + dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n"); + return false; + } + + for (i = 0; i < dep_devices.count; i++) { +- if (dep_devices.handles[i] == handle) +- return true; ++ if (dep_devices.handles[i] == handle) { ++ ret = true; ++ break; ++ } + } + +- return false; ++ acpi_handle_list_free(&dep_devices); ++ return ret; + } + + static void acpi_lpss_link_consumer(struct device *dev1, +diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c +index 93d796531af3..52b2abf88689 100644 +--- a/drivers/acpi/arm64/dma.c ++++ b/drivers/acpi/arm64/dma.c +@@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev) + { + int ret; + u64 end, mask; +- u64 size = 0; + const struct bus_dma_region *map = NULL; + + /* +@@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev) + } + + if (dev->coherent_dma_mask) +- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); ++ end = dev->coherent_dma_mask; + else +- size = 1ULL << 32; ++ end = (1ULL << 32) - 1; + + ret = acpi_dma_get_range(dev, &map); + if (!ret && map) { +- const struct bus_dma_region *r = map; +- +- for (end = 0; r->size; r++) { +- if (r->dma_start + r->size - 1 > end) +- end = r->dma_start + r->size - 1; +- } +- +- size = end + 1; ++ end = dma_range_map_max(map); + dev->dma_range_map = map; + } + + if (ret == -ENODEV) +- ret = iort_dma_get_ranges(dev, &size); ++ ret = iort_dma_get_ranges(dev, &end); + if (!ret) { + /* + * Limit coherent and dma mask based on size retrieved from + * firmware. + */ +- end = size - 1; + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->bus_dma_limit = end; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); +diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c +index e4848287cc85..e3d6f3af211c 100644 +--- a/drivers/acpi/arm64/iort.c ++++ b/drivers/acpi/arm64/iort.c +@@ -1464,7 +1464,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) + { return -ENODEV; } + #endif + +-static int nc_dma_get_range(struct device *dev, u64 *size) ++static int nc_dma_get_range(struct device *dev, u64 *limit) + { + struct acpi_iort_node *node; + struct acpi_iort_named_component *ncomp; +@@ -1481,13 +1481,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) + return -EINVAL; + } + +- *size = ncomp->memory_address_limit >= 64 ? U64_MAX : +- 1ULL<memory_address_limit; ++ *limit = ncomp->memory_address_limit >= 64 ? U64_MAX : ++ (1ULL << ncomp->memory_address_limit) - 1; + + return 0; + } + +-static int rc_dma_get_range(struct device *dev, u64 *size) ++static int rc_dma_get_range(struct device *dev, u64 *limit) + { + struct acpi_iort_node *node; + struct acpi_iort_root_complex *rc; +@@ -1505,8 +1505,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) + return -EINVAL; + } + +- *size = rc->memory_address_limit >= 64 ? U64_MAX : +- 1ULL<memory_address_limit; ++ *limit = rc->memory_address_limit >= 64 ? U64_MAX : ++ (1ULL << rc->memory_address_limit) - 1; + + return 0; + } +@@ -1514,16 +1514,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) + /** + * iort_dma_get_ranges() - Look up DMA addressing limit for the device + * @dev: device to lookup +- * @size: DMA range size result pointer ++ * @limit: DMA limit result pointer + * + * Return: 0 on success, an error otherwise. + */ +-int iort_dma_get_ranges(struct device *dev, u64 *size) ++int iort_dma_get_ranges(struct device *dev, u64 *limit) + { + if (dev_is_pci(dev)) +- return rc_dma_get_range(dev, size); ++ return rc_dma_get_range(dev, limit); + else +- return nc_dma_get_range(dev, size); ++ return nc_dma_get_range(dev, limit); + } + + static void __init acpi_iort_register_irq(int hwirq, const char *name, +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index e0800d1f8ff7..5ae0f1aa57ce 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -1179,6 +1179,9 @@ static int __init acpi_bus_init_irq(void) + message = "SWPIC"; + break; + #endif ++ case ACPI_IRQ_MODEL_RINTC: ++ message = "RINTC"; ++ break; + default: + pr_info("Unknown interrupt routing model\n"); + return -ENODEV; +@@ -1435,6 +1438,7 @@ static int __init acpi_init(void) + acpi_hest_init(); + acpi_ghes_init(); + acpi_arm_init(); ++ acpi_riscv_init(); + acpi_scan_init(); + acpi_ec_init(); + acpi_debugfs_init(); +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 0592aebe0c39..510b2ee3c71c 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -274,4 +274,12 @@ void acpi_init_lpit(void); + static inline void acpi_init_lpit(void) { } + #endif + ++/*-------------------------------------------------------------------------- ++ ACPI _CRS CSI-2 and MIPI DisCo for Imaging ++ -------------------------------------------------------------------------- */ ++ ++void acpi_mipi_check_crs_csi2(acpi_handle handle); ++void acpi_mipi_scan_crs_csi2(void); ++void acpi_mipi_crs_csi2_cleanup(void); ++ + #endif /* _ACPI_INTERNAL_H_ */ +diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c +new file mode 100644 +index 000000000000..91281c8cb4f2 +--- /dev/null ++++ b/drivers/acpi/mipi-disco-img.c +@@ -0,0 +1,292 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * MIPI DisCo for Imaging support. ++ * ++ * Copyright (C) 2023 Intel Corporation ++ * ++ * Support MIPI DisCo for Imaging by parsing ACPI _CRS CSI-2 records defined in ++ * Section 6.4.3.8.2.4 "Camera Serial Interface (CSI-2) Connection Resource ++ * Descriptor" of ACPI 6.5. ++ * ++ * The implementation looks for the information in the ACPI namespace (CSI-2 ++ * resource descriptors in _CRS) and constructs software nodes compatible with ++ * Documentation/firmware-guide/acpi/dsd/graph.rst to represent the CSI-2 ++ * connection graph. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "internal.h" ++ ++static LIST_HEAD(acpi_mipi_crs_csi2_list); ++ ++static void acpi_mipi_data_tag(acpi_handle handle, void *context) ++{ ++} ++ ++/* Connection data extracted from one _CRS CSI-2 resource descriptor. */ ++struct crs_csi2_connection { ++ struct list_head entry; ++ struct acpi_resource_csi2_serialbus csi2_data; ++ acpi_handle remote_handle; ++ char remote_name[]; ++}; ++ ++/* Data extracted from _CRS CSI-2 resource descriptors for one device. */ ++struct crs_csi2 { ++ struct list_head entry; ++ acpi_handle handle; ++ struct acpi_device_software_nodes *swnodes; ++ struct list_head connections; ++ u32 port_count; ++}; ++ ++struct csi2_resources_walk_data { ++ acpi_handle handle; ++ struct list_head connections; ++}; ++ ++static acpi_status parse_csi2_resource(struct acpi_resource *res, void *context) ++{ ++ struct csi2_resources_walk_data *crwd = context; ++ struct acpi_resource_csi2_serialbus *csi2_res; ++ struct acpi_resource_source *csi2_res_src; ++ u16 csi2_res_src_length; ++ struct crs_csi2_connection *conn; ++ acpi_handle remote_handle; ++ ++ if (res->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) ++ return AE_OK; ++ ++ csi2_res = &res->data.csi2_serial_bus; ++ ++ if (csi2_res->type != ACPI_RESOURCE_SERIAL_TYPE_CSI2) ++ return AE_OK; ++ ++ csi2_res_src = &csi2_res->resource_source; ++ if (ACPI_FAILURE(acpi_get_handle(NULL, csi2_res_src->string_ptr, ++ &remote_handle))) { ++ acpi_handle_debug(crwd->handle, ++ "unable to find resource source\n"); ++ return AE_OK; ++ } ++ csi2_res_src_length = csi2_res_src->string_length; ++ if (!csi2_res_src_length) { ++ acpi_handle_debug(crwd->handle, ++ "invalid resource source string length\n"); ++ return AE_OK; ++ } ++ ++ conn = kmalloc(struct_size(conn, remote_name, csi2_res_src_length + 1), ++ GFP_KERNEL); ++ if (!conn) ++ return AE_OK; ++ ++ conn->csi2_data = *csi2_res; ++ strscpy(conn->remote_name, csi2_res_src->string_ptr, csi2_res_src_length); ++ conn->csi2_data.resource_source.string_ptr = conn->remote_name; ++ conn->remote_handle = remote_handle; ++ ++ list_add(&conn->entry, &crwd->connections); ++ ++ return AE_OK; ++} ++ ++static struct crs_csi2 *acpi_mipi_add_crs_csi2(acpi_handle handle, ++ struct list_head *list) ++{ ++ struct crs_csi2 *csi2; ++ ++ csi2 = kzalloc(sizeof(*csi2), GFP_KERNEL); ++ if (!csi2) ++ return NULL; ++ ++ csi2->handle = handle; ++ INIT_LIST_HEAD(&csi2->connections); ++ csi2->port_count = 1; ++ ++ if (ACPI_FAILURE(acpi_attach_data(handle, acpi_mipi_data_tag, csi2))) { ++ kfree(csi2); ++ return NULL; ++ } ++ ++ list_add(&csi2->entry, list); ++ ++ return csi2; ++} ++ ++static struct crs_csi2 *acpi_mipi_get_crs_csi2(acpi_handle handle) ++{ ++ struct crs_csi2 *csi2; ++ ++ if (ACPI_FAILURE(acpi_get_data_full(handle, acpi_mipi_data_tag, ++ (void **)&csi2, NULL))) ++ return NULL; ++ ++ return csi2; ++} ++ ++static void csi_csr2_release_connections(struct list_head *list) ++{ ++ struct crs_csi2_connection *conn, *conn_tmp; ++ ++ list_for_each_entry_safe(conn, conn_tmp, list, entry) { ++ list_del(&conn->entry); ++ kfree(conn); ++ } ++} ++ ++static void acpi_mipi_del_crs_csi2(struct crs_csi2 *csi2) ++{ ++ list_del(&csi2->entry); ++ acpi_detach_data(csi2->handle, acpi_mipi_data_tag); ++ kfree(csi2->swnodes); ++ csi_csr2_release_connections(&csi2->connections); ++ kfree(csi2); ++} ++ ++/** ++ * acpi_mipi_check_crs_csi2 - Look for CSI-2 resources in _CRS ++ * @handle: Device object handle to evaluate _CRS for. ++ * ++ * Find all CSI-2 resource descriptors in the given device's _CRS ++ * and collect them into a list. ++ */ ++void acpi_mipi_check_crs_csi2(acpi_handle handle) ++{ ++ struct csi2_resources_walk_data crwd = { ++ .handle = handle, ++ .connections = LIST_HEAD_INIT(crwd.connections), ++ }; ++ struct crs_csi2 *csi2; ++ ++ /* ++ * Avoid allocating _CRS CSI-2 objects for devices without any CSI-2 ++ * resource descriptions in _CRS to reduce overhead. ++ */ ++ acpi_walk_resources(handle, METHOD_NAME__CRS, parse_csi2_resource, &crwd); ++ if (list_empty(&crwd.connections)) ++ return; ++ ++ /* ++ * Create a _CRS CSI-2 entry to store the extracted connection ++ * information and add it to the global list. ++ */ ++ csi2 = acpi_mipi_add_crs_csi2(handle, &acpi_mipi_crs_csi2_list); ++ if (!csi2) { ++ csi_csr2_release_connections(&crwd.connections); ++ return; /* Nothing really can be done about this. */ ++ } ++ ++ list_replace(&crwd.connections, &csi2->connections); ++} ++ ++#define NO_CSI2_PORT (UINT_MAX - 1) ++ ++static void alloc_crs_csi2_swnodes(struct crs_csi2 *csi2) ++{ ++ size_t port_count = csi2->port_count; ++ struct acpi_device_software_nodes *swnodes; ++ size_t alloc_size; ++ unsigned int i; ++ ++ /* ++ * Allocate memory for ports, node pointers (number of nodes + ++ * 1 (guardian), nodes (root + number of ports * 2 (because for ++ * every port there is an endpoint)). ++ */ ++ if (check_mul_overflow(sizeof(*swnodes->ports) + ++ sizeof(*swnodes->nodes) * 2 + ++ sizeof(*swnodes->nodeptrs) * 2, ++ port_count, &alloc_size) || ++ check_add_overflow(sizeof(*swnodes) + ++ sizeof(*swnodes->nodes) + ++ sizeof(*swnodes->nodeptrs) * 2, ++ alloc_size, &alloc_size)) { ++ acpi_handle_info(csi2->handle, ++ "too many _CRS CSI-2 resource handles (%zu)", ++ port_count); ++ return; ++ } ++ ++ swnodes = kmalloc(alloc_size, GFP_KERNEL); ++ if (!swnodes) ++ return; ++ ++ swnodes->ports = (struct acpi_device_software_node_port *)(swnodes + 1); ++ swnodes->nodes = (struct software_node *)(swnodes->ports + port_count); ++ swnodes->nodeptrs = (const struct software_node **)(swnodes->nodes + 1 + ++ 2 * port_count); ++ swnodes->num_ports = port_count; ++ ++ for (i = 0; i < 2 * port_count + 1; i++) ++ swnodes->nodeptrs[i] = &swnodes->nodes[i]; ++ ++ swnodes->nodeptrs[i] = NULL; ++ ++ for (i = 0; i < port_count; i++) ++ swnodes->ports[i].port_nr = NO_CSI2_PORT; ++ ++ csi2->swnodes = swnodes; ++} ++ ++/** ++ * acpi_mipi_scan_crs_csi2 - Create ACPI _CRS CSI-2 software nodes ++ * ++ * Note that this function must be called before any struct acpi_device objects ++ * are bound to any ACPI drivers or scan handlers, so it cannot assume the ++ * existence of struct acpi_device objects for every device present in the ACPI ++ * namespace. ++ * ++ * acpi_scan_lock in scan.c must be held when calling this function. ++ */ ++void acpi_mipi_scan_crs_csi2(void) ++{ ++ struct crs_csi2 *csi2; ++ LIST_HEAD(aux_list); ++ ++ /* Count references to each ACPI handle in the CSI-2 connection graph. */ ++ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) { ++ struct crs_csi2_connection *conn; ++ ++ list_for_each_entry(conn, &csi2->connections, entry) { ++ struct crs_csi2 *remote_csi2; ++ ++ csi2->port_count++; ++ ++ remote_csi2 = acpi_mipi_get_crs_csi2(conn->remote_handle); ++ if (remote_csi2) { ++ remote_csi2->port_count++; ++ continue; ++ } ++ /* ++ * The remote endpoint has no _CRS CSI-2 list entry yet, ++ * so create one for it and add it to the list. ++ */ ++ acpi_mipi_add_crs_csi2(conn->remote_handle, &aux_list); ++ } ++ } ++ list_splice(&aux_list, &acpi_mipi_crs_csi2_list); ++ ++ /* Allocate software nodes for representing the CSI-2 information. */ ++ list_for_each_entry(csi2, &acpi_mipi_crs_csi2_list, entry) ++ alloc_crs_csi2_swnodes(csi2); ++} ++ ++/** ++ * acpi_mipi_crs_csi2_cleanup - Free _CRS CSI-2 temporary data ++ */ ++void acpi_mipi_crs_csi2_cleanup(void) ++{ ++ struct crs_csi2 *csi2, *csi2_tmp; ++ ++ list_for_each_entry_safe(csi2, csi2_tmp, &acpi_mipi_crs_csi2_list, entry) ++ acpi_mipi_del_crs_csi2(csi2); ++} +diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig +index 67d1f40bfa9f..f33194d1e43f 100644 +--- a/drivers/acpi/numa/Kconfig ++++ b/drivers/acpi/numa/Kconfig +@@ -1,9 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0 + config ACPI_NUMA +- bool "NUMA support" +- depends on NUMA +- depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) +- default y if IA64 || ARM64 ++ def_bool NUMA && !X86 + + config ACPI_HMAT + bool "ACPI Heterogeneous Memory Attribute Table Support" +diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c +index aad85ccae2e0..e146e1b46806 100644 +--- a/drivers/acpi/numa/srat.c ++++ b/drivers/acpi/numa/srat.c +@@ -165,6 +165,19 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header) + } + } + break; ++ ++ case ACPI_SRAT_TYPE_RINTC_AFFINITY: ++ { ++ struct acpi_srat_rintc_affinity *p = ++ (struct acpi_srat_rintc_affinity *)header; ++ pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n", ++ p->acpi_processor_uid, ++ p->proximity_domain, ++ (p->flags & ACPI_SRAT_RINTC_ENABLED) ? ++ "enabled" : "disabled"); ++ } ++ break; ++ + default: + pr_warn("Found unsupported SRAT entry (type = 0x%x)\n", + header->type); +@@ -206,7 +219,7 @@ int __init srat_disabled(void) + return acpi_numa < 0; + } + +-#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) ++#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) || defined(CONFIG_RISCV) + /* + * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for + * I/O localities since SRAT does not list them. I/O localities are +@@ -466,6 +479,21 @@ acpi_parse_memory_affinity(union acpi_subtable_headers * header, + return 0; + } + ++static int __init ++acpi_parse_rintc_affinity(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_srat_rintc_affinity *rintc_affinity; ++ ++ rintc_affinity = (struct acpi_srat_rintc_affinity *)header; ++ acpi_table_print_srat_entry(&header->common); ++ ++ /* let architecture-dependent part to do it */ ++ acpi_numa_rintc_affinity_init(rintc_affinity); ++ ++ return 0; ++} ++ + static int __init acpi_parse_srat(struct acpi_table_header *table) + { + struct acpi_table_srat *srat = (struct acpi_table_srat *)table; +@@ -501,7 +529,7 @@ int __init acpi_numa_init(void) + + /* SRAT: System Resource Affinity Table */ + if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { +- struct acpi_subtable_proc srat_proc[4]; ++ struct acpi_subtable_proc srat_proc[5]; + + memset(srat_proc, 0, sizeof(srat_proc)); + srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY; +@@ -512,6 +540,8 @@ int __init acpi_numa_init(void) + srat_proc[2].handler = acpi_parse_gicc_affinity; + srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY; + srat_proc[3].handler = acpi_parse_gi_affinity; ++ srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY; ++ srat_proc[4].handler = acpi_parse_rintc_affinity; + + acpi_table_parse_entries_array(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), +diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c +index aa1038b8aec4..b727db968f33 100644 +--- a/drivers/acpi/pci_link.c ++++ b/drivers/acpi/pci_link.c +@@ -748,6 +748,8 @@ static int acpi_pci_link_add(struct acpi_device *device, + if (result) + kfree(link); + ++ acpi_dev_clear_dependencies(device); ++ + return result < 0 ? result : 1; + } + +diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c +index f5cb96ff8768..82f049627306 100644 +--- a/drivers/acpi/pci_mcfg.c ++++ b/drivers/acpi/pci_mcfg.c +@@ -225,6 +225,23 @@ static struct mcfg_fixup mcfg_quirks[] = { + SW64_ECAM_QUIRK("SUNWAY ", 1, 0x06, &sunway_pci_ecam_ops), + SW64_ECAM_QUIRK("SUNWAY ", 1, 0x07, &sunway_pci_ecam_ops), + #endif /* SW64 */ ++ ++#ifdef CONFIG_RISCV ++#define RISCV_ECAM_MCFG(table_id, seg) \ ++ { "SOPHGO", table_id, 1, seg, MCFG_BUS_ANY, &sophgo_pci_ecam_ops } ++ ++ RISCV_ECAM_MCFG("2044 ", 0), ++ RISCV_ECAM_MCFG("2044 ", 1), ++ RISCV_ECAM_MCFG("2044 ", 2), ++ RISCV_ECAM_MCFG("2044 ", 3), ++ RISCV_ECAM_MCFG("2044 ", 4), ++ RISCV_ECAM_MCFG("2044 ", 5), ++ RISCV_ECAM_MCFG("2044 ", 6), ++ RISCV_ECAM_MCFG("2044 ", 7), ++ RISCV_ECAM_MCFG("2044 ", 8), ++ RISCV_ECAM_MCFG("2044 ", 9), ++#endif /* RISCV */ ++ + }; + + static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; +diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile +index 8b3b126e0b94..a96fdf1e2cb8 100644 +--- a/drivers/acpi/riscv/Makefile ++++ b/drivers/acpi/riscv/Makefile +@@ -1,2 +1,4 @@ + # SPDX-License-Identifier: GPL-2.0-only +-obj-y += rhct.o ++obj-y += rhct.o init.o irq.o ++obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o ++obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o +diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c +new file mode 100644 +index 000000000000..4cdff387deff +--- /dev/null ++++ b/drivers/acpi/riscv/cppc.c +@@ -0,0 +1,157 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Implement CPPC FFH helper routines for RISC-V. ++ * ++ * Copyright (C) 2024 Ventana Micro Systems Inc. ++ */ ++ ++#include ++#include ++#include ++ ++#define SBI_EXT_CPPC 0x43505043 ++ ++/* CPPC interfaces defined in SBI spec */ ++#define SBI_CPPC_PROBE 0x0 ++#define SBI_CPPC_READ 0x1 ++#define SBI_CPPC_READ_HI 0x2 ++#define SBI_CPPC_WRITE 0x3 ++ ++/* RISC-V FFH definitions from RISC-V FFH spec */ ++#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60) ++#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0)) ++#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0)) ++ ++#define FFH_CPPC_SBI 0x1 ++#define FFH_CPPC_CSR 0x2 ++ ++struct sbi_cppc_data { ++ u64 val; ++ u32 reg; ++ struct sbiret ret; ++}; ++ ++static bool cppc_ext_present; ++ ++static int __init sbi_cppc_init(void) ++{ ++ if (sbi_spec_version >= sbi_mk_version(2, 0) && ++ sbi_probe_extension(SBI_EXT_CPPC) > 0) { ++ pr_info("SBI CPPC extension detected\n"); ++ cppc_ext_present = true; ++ } else { ++ pr_info("SBI CPPC extension NOT detected!!\n"); ++ cppc_ext_present = false; ++ } ++ ++ return 0; ++} ++device_initcall(sbi_cppc_init); ++ ++static void sbi_cppc_read(void *read_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; ++ ++ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ, ++ data->reg, 0, 0, 0, 0, 0); ++} ++ ++static void sbi_cppc_write(void *write_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; ++ ++ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE, ++ data->reg, data->val, 0, 0, 0, 0); ++} ++ ++static void cppc_ffh_csr_read(void *read_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data; ++ ++ switch (data->reg) { ++ /* Support only TIME CSR for now */ ++ case CSR_TIME: ++ data->ret.value = csr_read(CSR_TIME); ++ data->ret.error = 0; ++ break; ++ default: ++ data->ret.error = -EINVAL; ++ break; ++ } ++} ++ ++static void cppc_ffh_csr_write(void *write_data) ++{ ++ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data; ++ ++ data->ret.error = -EINVAL; ++} ++ ++/* ++ * Refer to drivers/acpi/cppc_acpi.c for the description of the functions ++ * below. ++ */ ++bool cpc_ffh_supported(void) ++{ ++ return true; ++} ++ ++int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val) ++{ ++ struct sbi_cppc_data data; ++ ++ if (WARN_ON_ONCE(irqs_disabled())) ++ return -EPERM; ++ ++ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { ++ if (!cppc_ext_present) ++ return -EINVAL; ++ ++ data.reg = FFH_CPPC_SBI_REG(reg->address); ++ ++ smp_call_function_single(cpu, sbi_cppc_read, &data, 1); ++ ++ *val = data.ret.value; ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { ++ data.reg = FFH_CPPC_CSR_NUM(reg->address); ++ ++ smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1); ++ ++ *val = data.ret.value; ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } ++ ++ return -EINVAL; ++} ++ ++int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val) ++{ ++ struct sbi_cppc_data data; ++ ++ if (WARN_ON_ONCE(irqs_disabled())) ++ return -EPERM; ++ ++ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) { ++ if (!cppc_ext_present) ++ return -EINVAL; ++ ++ data.reg = FFH_CPPC_SBI_REG(reg->address); ++ data.val = val; ++ ++ smp_call_function_single(cpu, sbi_cppc_write, &data, 1); ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) { ++ data.reg = FFH_CPPC_CSR_NUM(reg->address); ++ data.val = val; ++ ++ smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1); ++ ++ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0; ++ } ++ ++ return -EINVAL; ++} +diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c +new file mode 100644 +index 000000000000..624f9bbdb58c +--- /dev/null ++++ b/drivers/acpi/riscv/cpuidle.c +@@ -0,0 +1,81 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60) ++#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32) ++ ++#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60) ++ ++static int acpi_cpu_init_idle(unsigned int cpu) ++{ ++ int i; ++ struct acpi_lpi_state *lpi; ++ struct acpi_processor *pr = per_cpu(processors, cpu); ++ ++ if (unlikely(!pr || !pr->flags.has_lpi)) ++ return -EINVAL; ++ ++ if (!riscv_sbi_hsm_is_supported()) ++ return -ENODEV; ++ ++ if (pr->power.count <= 1) ++ return -ENODEV; ++ ++ for (i = 1; i < pr->power.count; i++) { ++ u32 state; ++ ++ lpi = &pr->power.lpi_states[i]; ++ ++ /* ++ * Validate Entry Method as per FFH spec. ++ * bits[63:60] should be 0x1 ++ * bits[59:32] should be 0x0 ++ * bits[31:0] represent a SBI power_state ++ */ ++ if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) || ++ (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) { ++ pr_warn("Invalid LPI entry method %#llx\n", lpi->address); ++ return -EINVAL; ++ } ++ ++ state = lpi->address; ++ if (!riscv_sbi_suspend_state_is_valid(state)) { ++ pr_warn("Invalid SBI power state %#x\n", state); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++int acpi_processor_ffh_lpi_probe(unsigned int cpu) ++{ ++ return acpi_cpu_init_idle(cpu); ++} ++ ++int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) ++{ ++ u32 state = lpi->address; ++ ++ if (state & SBI_HSM_SUSP_NON_RET_BIT) ++ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, ++ lpi->index, ++ state); ++ else ++ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, ++ lpi->index, ++ state); ++} +diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c +new file mode 100644 +index 000000000000..5ef97905a727 +--- /dev/null ++++ b/drivers/acpi/riscv/init.c +@@ -0,0 +1,13 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2023-2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ */ ++ ++#include ++#include "init.h" ++ ++void __init acpi_riscv_init(void) ++{ ++ riscv_acpi_init_gsi_mapping(); ++} +diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h +new file mode 100644 +index 000000000000..0b9a07e4031f +--- /dev/null ++++ b/drivers/acpi/riscv/init.h +@@ -0,0 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++#include ++ ++void __init riscv_acpi_init_gsi_mapping(void); +diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c +new file mode 100644 +index 000000000000..cced960c2aef +--- /dev/null ++++ b/drivers/acpi/riscv/irq.c +@@ -0,0 +1,335 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2023-2024, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ */ ++ ++#include ++#include ++#include ++ ++#include "init.h" ++ ++struct riscv_ext_intc_list { ++ acpi_handle handle; ++ u32 gsi_base; ++ u32 nr_irqs; ++ u32 nr_idcs; ++ u32 id; ++ u32 type; ++ struct list_head list; ++}; ++ ++struct acpi_irq_dep_ctx { ++ int rc; ++ unsigned int index; ++ acpi_handle handle; ++}; ++ ++LIST_HEAD(ext_intc_list); ++ ++static int irqchip_cmp_func(const void *in0, const void *in1) ++{ ++ struct acpi_probe_entry *elem0 = (struct acpi_probe_entry *)in0; ++ struct acpi_probe_entry *elem1 = (struct acpi_probe_entry *)in1; ++ ++ return (elem0->type > elem1->type) - (elem0->type < elem1->type); ++} ++ ++/* ++ * On RISC-V, RINTC structures in MADT should be probed before any other ++ * interrupt controller structures and IMSIC before APLIC. The interrupt ++ * controller subtypes in MADT of ACPI spec for RISC-V are defined in ++ * the incremental order like RINTC(24)->IMSIC(25)->APLIC(26)->PLIC(27). ++ * Hence, simply sorting the subtypes in incremental order will ++ * establish the required order. ++ */ ++void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) ++{ ++ struct acpi_probe_entry *ape = ap_head; ++ ++ if (nr == 1 || !ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) ++ return; ++ sort(ape, nr, sizeof(*ape), irqchip_cmp_func, NULL); ++} ++ ++static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i, *tmp; ++ ++ list_for_each_safe(i, tmp, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi_base == ext_intc_element->gsi_base) { ++ ext_intc_element->handle = handle; ++ return AE_OK; ++ } ++ } ++ ++ return AE_NOT_FOUND; ++} ++ ++int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base, ++ u32 *id, u32 *nr_irqs, u32 *nr_idcs) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (ext_intc_element->handle == ACPI_HANDLE_FWNODE(fwnode)) { ++ *gsi_base = ext_intc_element->gsi_base; ++ *id = ext_intc_element->id; ++ *nr_irqs = ext_intc_element->nr_irqs; ++ if (nr_idcs) ++ *nr_idcs = ext_intc_element->nr_idcs; ++ ++ return 0; ++ } ++ } ++ ++ return -ENODEV; ++} ++ ++struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct acpi_device *adev; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi >= ext_intc_element->gsi_base && ++ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) { ++ adev = acpi_fetch_acpi_dev(ext_intc_element->handle); ++ if (!adev) ++ return NULL; ++ ++ return acpi_fwnode_handle(adev); ++ } ++ } ++ ++ return NULL; ++} ++ ++static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs, ++ u32 id, u32 type) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ ++ ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL); ++ if (!ext_intc_element) ++ return -ENOMEM; ++ ++ ext_intc_element->gsi_base = gsi_base; ++ ext_intc_element->nr_irqs = nr_irqs; ++ ext_intc_element->nr_idcs = nr_idcs; ++ ext_intc_element->id = id; ++ list_add_tail(&ext_intc_element->list, &ext_intc_list); ++ return 0; ++} ++ ++static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level, ++ void *context, void **return_value) ++{ ++ acpi_status status; ++ u64 gbase; ++ ++ if (!acpi_has_method(handle, "_GSB")) { ++ acpi_handle_err(handle, "_GSB method not found\n"); ++ return AE_ERROR; ++ } ++ ++ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to evaluate _GSB method\n"); ++ return status; ++ } ++ ++ status = riscv_acpi_update_gsi_handle((u32)gbase, handle); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to find the GSI mapping entry\n"); ++ return status; ++ } ++ ++ return AE_OK; ++} ++ ++static int __init riscv_acpi_aplic_parse_madt(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_aplic *aplic = (struct acpi_madt_aplic *)header; ++ ++ return riscv_acpi_register_ext_intc(aplic->gsi_base, aplic->num_sources, aplic->num_idcs, ++ aplic->id, ACPI_RISCV_IRQCHIP_APLIC); ++} ++ ++static int __init riscv_acpi_plic_parse_madt(union acpi_subtable_headers *header, ++ const unsigned long end) ++{ ++ struct acpi_madt_plic *plic = (struct acpi_madt_plic *)header; ++ ++ return riscv_acpi_register_ext_intc(plic->gsi_base, plic->num_irqs, 0, ++ plic->id, ACPI_RISCV_IRQCHIP_PLIC); ++} ++ ++void __init riscv_acpi_init_gsi_mapping(void) ++{ ++ /* There can be either PLIC or APLIC */ ++ if (acpi_table_parse_madt(ACPI_MADT_TYPE_PLIC, riscv_acpi_plic_parse_madt, 0) > 0) { ++ acpi_get_devices("RSCV0001", riscv_acpi_create_gsi_map, NULL, NULL); ++ return; ++ } ++ ++ if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0) ++ acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL); ++} ++ ++static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi) ++{ ++ struct riscv_ext_intc_list *ext_intc_element; ++ struct list_head *i; ++ ++ list_for_each(i, &ext_intc_list) { ++ ext_intc_element = list_entry(i, struct riscv_ext_intc_list, list); ++ if (gsi >= ext_intc_element->gsi_base && ++ gsi < (ext_intc_element->gsi_base + ext_intc_element->nr_irqs)) ++ return ext_intc_element->handle; ++ } ++ ++ return NULL; ++} ++ ++static acpi_status riscv_acpi_irq_get_parent(struct acpi_resource *ares, void *context) ++{ ++ struct acpi_irq_dep_ctx *ctx = context; ++ struct acpi_resource_irq *irq; ++ struct acpi_resource_extended_irq *eirq; ++ ++ switch (ares->type) { ++ case ACPI_RESOURCE_TYPE_IRQ: ++ irq = &ares->data.irq; ++ if (ctx->index >= irq->interrupt_count) { ++ ctx->index -= irq->interrupt_count; ++ return AE_OK; ++ } ++ ctx->handle = riscv_acpi_get_gsi_handle(irq->interrupts[ctx->index]); ++ return AE_CTRL_TERMINATE; ++ case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ++ eirq = &ares->data.extended_irq; ++ if (eirq->producer_consumer == ACPI_PRODUCER) ++ return AE_OK; ++ ++ if (ctx->index >= eirq->interrupt_count) { ++ ctx->index -= eirq->interrupt_count; ++ return AE_OK; ++ } ++ ++ /* Support GSIs only */ ++ if (eirq->resource_source.string_length) ++ return AE_OK; ++ ++ ctx->handle = riscv_acpi_get_gsi_handle(eirq->interrupts[ctx->index]); ++ return AE_CTRL_TERMINATE; ++ } ++ ++ return AE_OK; ++} ++ ++static int riscv_acpi_irq_get_dep(acpi_handle handle, unsigned int index, acpi_handle *gsi_handle) ++{ ++ struct acpi_irq_dep_ctx ctx = {-EINVAL, index, NULL}; ++ ++ if (!gsi_handle) ++ return 0; ++ ++ acpi_walk_resources(handle, METHOD_NAME__CRS, riscv_acpi_irq_get_parent, &ctx); ++ *gsi_handle = ctx.handle; ++ if (*gsi_handle) ++ return 1; ++ ++ return 0; ++} ++ ++static u32 riscv_acpi_add_prt_dep(acpi_handle handle) ++{ ++ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; ++ struct acpi_pci_routing_table *entry; ++ struct acpi_handle_list dep_devices; ++ acpi_handle gsi_handle; ++ acpi_handle link_handle; ++ acpi_status status; ++ u32 count = 0; ++ ++ status = acpi_get_irq_routing_table(handle, &buffer); ++ if (ACPI_FAILURE(status)) { ++ acpi_handle_err(handle, "failed to get IRQ routing table\n"); ++ kfree(buffer.pointer); ++ return 0; ++ } ++ ++ entry = buffer.pointer; ++ while (entry && (entry->length > 0)) { ++ if (entry->source[0]) { ++ acpi_get_handle(handle, entry->source, &link_handle); ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = link_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } else { ++ gsi_handle = riscv_acpi_get_gsi_handle(entry->source_index); ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = gsi_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } ++ ++ entry = (struct acpi_pci_routing_table *) ++ ((unsigned long)entry + entry->length); ++ } ++ ++ kfree(buffer.pointer); ++ return count; ++} ++ ++static u32 riscv_acpi_add_irq_dep(acpi_handle handle) ++{ ++ struct acpi_handle_list dep_devices; ++ acpi_handle gsi_handle; ++ u32 count = 0; ++ int i; ++ ++ for (i = 0; ++ riscv_acpi_irq_get_dep(handle, i, &gsi_handle); ++ i++) { ++ dep_devices.count = 1; ++ dep_devices.handles = kcalloc(1, sizeof(*dep_devices.handles), GFP_KERNEL); ++ if (!dep_devices.handles) { ++ acpi_handle_err(handle, "failed to allocate memory\n"); ++ continue; ++ } ++ ++ dep_devices.handles[0] = gsi_handle; ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ } ++ ++ return count; ++} ++ ++u32 arch_acpi_add_auto_dep(acpi_handle handle) ++{ ++ if (acpi_has_method(handle, "_PRT")) ++ return riscv_acpi_add_prt_dep(handle); ++ ++ return riscv_acpi_add_irq_dep(handle); ++} +diff --git a/drivers/acpi/riscv/rhct.c b/drivers/acpi/riscv/rhct.c +index b280b3e9c7d9..caa2c16e1697 100644 +--- a/drivers/acpi/riscv/rhct.c ++++ b/drivers/acpi/riscv/rhct.c +@@ -8,8 +8,9 @@ + #define pr_fmt(fmt) "ACPI: RHCT: " fmt + + #include ++#include + +-static struct acpi_table_header *acpi_get_rhct(void) ++static struct acpi_table_rhct *acpi_get_rhct(void) + { + static struct acpi_table_header *rhct; + acpi_status status; +@@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void) + } + } + +- return rhct; ++ return (struct acpi_table_rhct *)rhct; + } + + /* +@@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const + BUG_ON(acpi_disabled); + + if (!table) { +- rhct = (struct acpi_table_rhct *)acpi_get_rhct(); ++ rhct = acpi_get_rhct(); + if (!rhct) + return -ENOENT; + } else { +@@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const + + return -1; + } ++ ++static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct, ++ struct acpi_rhct_hart_info *hart_info, ++ u32 *cbom_size, u32 *cboz_size, u32 *cbop_size) ++{ ++ u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info); ++ u32 size_hdr = sizeof(struct acpi_rhct_node_header); ++ struct acpi_rhct_node_header *ref_node; ++ struct acpi_rhct_cmo_node *cmo_node; ++ u32 *hart_info_node_offset; ++ ++ hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo); ++ for (int i = 0; i < hart_info->num_offsets; i++) { ++ ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header, ++ rhct, hart_info_node_offset[i]); ++ if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) { ++ cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node, ++ ref_node, size_hdr); ++ if (cbom_size && cmo_node->cbom_size <= 30) { ++ if (!*cbom_size) ++ *cbom_size = BIT(cmo_node->cbom_size); ++ else if (*cbom_size != BIT(cmo_node->cbom_size)) ++ pr_warn("CBOM size is not the same across harts\n"); ++ } ++ ++ if (cboz_size && cmo_node->cboz_size <= 30) { ++ if (!*cboz_size) ++ *cboz_size = BIT(cmo_node->cboz_size); ++ else if (*cboz_size != BIT(cmo_node->cboz_size)) ++ pr_warn("CBOZ size is not the same across harts\n"); ++ } ++ ++ if (cbop_size && cmo_node->cbop_size <= 30) { ++ if (!*cbop_size) ++ *cbop_size = BIT(cmo_node->cbop_size); ++ else if (*cbop_size != BIT(cmo_node->cbop_size)) ++ pr_warn("CBOP size is not the same across harts\n"); ++ } ++ } ++ } ++} ++ ++/* ++ * During early boot, the caller should call acpi_get_table() and pass its pointer to ++ * these functions (and free up later). At run time, since this table can be used ++ * multiple times, pass NULL so that the table remains in memory. ++ */ ++void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, ++ u32 *cboz_size, u32 *cbop_size) ++{ ++ u32 size_hdr = sizeof(struct acpi_rhct_node_header); ++ struct acpi_rhct_node_header *node, *end; ++ struct acpi_rhct_hart_info *hart_info; ++ struct acpi_table_rhct *rhct; ++ ++ if (acpi_disabled) ++ return; ++ ++ if (table) { ++ rhct = (struct acpi_table_rhct *)table; ++ } else { ++ rhct = acpi_get_rhct(); ++ if (!rhct) ++ return; ++ } ++ ++ if (cbom_size) ++ *cbom_size = 0; ++ ++ if (cboz_size) ++ *cboz_size = 0; ++ ++ if (cbop_size) ++ *cbop_size = 0; ++ ++ end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length); ++ for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset); ++ node < end; ++ node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) { ++ if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) { ++ hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr); ++ acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size, ++ cboz_size, cbop_size); ++ } ++ } ++} +diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c +index 9a40052d31f3..84dcd3b5ce83 100644 +--- a/drivers/acpi/scan.c ++++ b/drivers/acpi/scan.c +@@ -870,6 +870,9 @@ static const char * const acpi_honor_dep_ids[] = { + "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */ + "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ + "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ ++ "RSCV0001", /* RISC-V PLIC */ ++ "RSCV0002", /* RISC-V APLIC */ ++ "PNP0C0F", /* PCI Link Device */ + NULL + }; + +@@ -2034,54 +2037,18 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) + mutex_unlock(&acpi_scan_lock); + } + +-static void acpi_scan_init_hotplug(struct acpi_device *adev) +-{ +- struct acpi_hardware_id *hwid; +- +- if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { +- acpi_dock_add(adev); +- return; +- } +- list_for_each_entry(hwid, &adev->pnp.ids, list) { +- struct acpi_scan_handler *handler; +- +- handler = acpi_scan_match_handler(hwid->id, NULL); +- if (handler) { +- adev->flags.hotplug_notify = true; +- break; +- } +- } +-} +- +-static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) ++int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) + { +- struct acpi_handle_list dep_devices; +- acpi_status status; + u32 count; + int i; + +- /* +- * Check for _HID here to avoid deferring the enumeration of: +- * 1. PCI devices. +- * 2. ACPI nodes describing USB ports. +- * Still, checking for _HID catches more then just these cases ... +- */ +- if (!check_dep || !acpi_has_method(handle, "_DEP") || +- !acpi_has_method(handle, "_HID")) +- return 0; +- +- status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices); +- if (ACPI_FAILURE(status)) { +- acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); +- return 0; +- } +- +- for (count = 0, i = 0; i < dep_devices.count; i++) { ++ for (count = 0, i = 0; i < dep_devices->count; i++) { + struct acpi_device_info *info; + struct acpi_dep_data *dep; + bool skip, honor_dep; ++ acpi_status status; + +- status = acpi_get_object_info(dep_devices.handles[i], &info); ++ status = acpi_get_object_info(dep_devices->handles[i], &info); + if (ACPI_FAILURE(status)) { + acpi_handle_debug(handle, "Error reading _DEP device info\n"); + continue; +@@ -2100,19 +2067,79 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) + + count++; + +- dep->supplier = dep_devices.handles[i]; ++ dep->supplier = dep_devices->handles[i]; + dep->consumer = handle; + dep->honor_dep = honor_dep; + + mutex_lock(&acpi_dep_list_lock); +- list_add_tail(&dep->node , &acpi_dep_list); ++ list_add_tail(&dep->node, &acpi_dep_list); + mutex_unlock(&acpi_dep_list_lock); + } + ++ acpi_handle_list_free(dep_devices); + return count; + } + +-static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, ++static void acpi_scan_init_hotplug(struct acpi_device *adev) ++{ ++ struct acpi_hardware_id *hwid; ++ ++ if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { ++ acpi_dock_add(adev); ++ return; ++ } ++ list_for_each_entry(hwid, &adev->pnp.ids, list) { ++ struct acpi_scan_handler *handler; ++ ++ handler = acpi_scan_match_handler(hwid->id, NULL); ++ if (handler) { ++ adev->flags.hotplug_notify = true; ++ break; ++ } ++ } ++} ++ ++u32 __weak arch_acpi_add_auto_dep(acpi_handle handle) { return 0; } ++ ++static u32 acpi_scan_check_dep(acpi_handle handle) ++{ ++ struct acpi_handle_list dep_devices; ++ u32 count = 0; ++ ++ /* ++ * Some architectures like RISC-V need to add dependencies for ++ * all devices which use GSI to the interrupt controller so that ++ * interrupt controller is probed before any of those devices. ++ * Instead of mandating _DEP on all the devices, detect the ++ * dependency and add automatically. ++ */ ++ count += arch_acpi_add_auto_dep(handle); ++ ++ /* ++ * Check for _HID here to avoid deferring the enumeration of: ++ * 1. PCI devices. ++ * 2. ACPI nodes describing USB ports. ++ * Still, checking for _HID catches more then just these cases ... ++ */ ++ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID")) ++ return count; ++ ++ if (!acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices)) { ++ acpi_handle_debug(handle, "Failed to evaluate _DEP.\n"); ++ return count; ++ } ++ ++ count += acpi_scan_add_dep(handle, &dep_devices); ++ return count; ++} ++ ++static acpi_status acpi_scan_check_crs_csi2_cb(acpi_handle handle, u32 a, void *b, void **c) ++{ ++ acpi_mipi_check_crs_csi2(handle); ++ return AE_OK; ++} ++ ++static acpi_status acpi_bus_check_add(acpi_handle handle, bool first_pass, + struct acpi_device **adev_p) + { + struct acpi_device *device = acpi_fetch_acpi_dev(handle); +@@ -2130,9 +2157,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, + if (acpi_device_should_be_hidden(handle)) + return AE_OK; + +- /* Bail out if there are dependencies. */ +- if (acpi_scan_check_dep(handle, check_dep) > 0) +- return AE_CTRL_DEPTH; ++ if (first_pass) { ++ acpi_mipi_check_crs_csi2(handle); ++ ++ /* Bail out if there are dependencies. */ ++ if (acpi_scan_check_dep(handle) > 0) { ++ /* ++ * The entire CSI-2 connection graph needs to be ++ * extracted before any drivers or scan handlers ++ * are bound to struct device objects, so scan ++ * _CRS CSI-2 resource descriptors for all ++ * devices below the current handle. ++ */ ++ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, ++ ACPI_UINT32_MAX, ++ acpi_scan_check_crs_csi2_cb, ++ NULL, NULL, NULL); ++ return AE_CTRL_DEPTH; ++ } ++ } + + fallthrough; + case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ +@@ -2155,10 +2198,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, + } + + /* +- * If check_dep is true at this point, the device has no dependencies, ++ * If first_pass is true at this point, the device has no dependencies, + * or the creation of the device object would have been postponed above. + */ +- acpi_add_single_object(&device, handle, type, !check_dep); ++ acpi_add_single_object(&device, handle, type, !first_pass); + if (!device) + return AE_CTRL_DEPTH; + +@@ -2581,12 +2624,21 @@ int acpi_bus_scan(acpi_handle handle) + if (!device) + return -ENODEV; + ++ /* ++ * Allocate ACPI _CRS CSI-2 software nodes using information extracted ++ * from the _CRS CSI-2 resource descriptors during the ACPI namespace ++ * walk above. ++ */ ++ acpi_mipi_scan_crs_csi2(); ++ + acpi_bus_attach(device, (void *)true); + + /* Pass 2: Enumerate all of the remaining devices. */ + + acpi_scan_postponed(); + ++ acpi_mipi_crs_csi2_cleanup(); ++ + return 0; + } + EXPORT_SYMBOL(acpi_bus_scan); +@@ -2735,6 +2787,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header, + return 0; + } + ++void __weak arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr) { } ++ + int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + { + int count = 0; +@@ -2743,6 +2797,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) + return 0; + + mutex_lock(&acpi_probe_mutex); ++ arch_sort_irqchip_probe(ap_head, nr); + for (ape = ap_head; nr; ape++, nr--) { + if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) { + acpi_probe_count = 0; +diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c +index 8263508415a8..9b5a1c786230 100644 +--- a/drivers/acpi/thermal.c ++++ b/drivers/acpi/thermal.c +@@ -297,9 +297,8 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + } + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.trip.valid) { + memset(&devices, 0, sizeof(struct acpi_handle_list)); +- status = acpi_evaluate_reference(tz->device->handle, "_PSL", +- NULL, &devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(tz->device->handle, "_PSL", ++ NULL, &devices)) { + acpi_handle_info(tz->device->handle, + "Invalid passive threshold\n"); + tz->trips.passive.trip.valid = false; +@@ -307,10 +306,10 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + tz->trips.passive.trip.valid = true; + } + +- if (memcmp(&tz->trips.passive.devices, &devices, +- sizeof(struct acpi_handle_list))) { +- memcpy(&tz->trips.passive.devices, &devices, +- sizeof(struct acpi_handle_list)); ++ if (acpi_handle_list_equal(&tz->trips.passive.devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->trips.passive.devices, &devices); + ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); + } + } +@@ -362,9 +361,8 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + name[2] = 'L'; + if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].trip.valid) { + memset(&devices, 0, sizeof(struct acpi_handle_list)); +- status = acpi_evaluate_reference(tz->device->handle, +- name, NULL, &devices); +- if (ACPI_FAILURE(status)) { ++ if (!acpi_evaluate_reference(tz->device->handle, ++ name, NULL, &devices)) { + acpi_handle_info(tz->device->handle, + "Invalid active%d threshold\n", i); + tz->trips.active[i].trip.valid = false; +@@ -372,10 +370,10 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + tz->trips.active[i].trip.valid = true; + } + +- if (memcmp(&tz->trips.active[i].devices, &devices, +- sizeof(struct acpi_handle_list))) { +- memcpy(&tz->trips.active[i].devices, &devices, +- sizeof(struct acpi_handle_list)); ++ if (acpi_handle_list_equal(&tz->trips.active[i].devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->trips.active[i].devices, &devices); + ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); + } + } +@@ -389,12 +387,14 @@ static void __acpi_thermal_trips_update(struct acpi_thermal *tz, int flag) + + if (flag & ACPI_TRIPS_DEVICES) { + memset(&devices, 0, sizeof(devices)); +- status = acpi_evaluate_reference(tz->device->handle, "_TZD", +- NULL, &devices); +- if (ACPI_SUCCESS(status) && +- memcmp(&tz->devices, &devices, sizeof(devices))) { +- tz->devices = devices; +- ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); ++ if (acpi_evaluate_reference(tz->device->handle, "_TZD", ++ NULL, &devices)) { ++ if (acpi_handle_list_equal(&tz->devices, &devices)) { ++ acpi_handle_list_free(&devices); ++ } else { ++ acpi_handle_list_replace(&tz->devices, &devices); ++ ACPI_THERMAL_TRIPS_EXCEPTION(flag, tz, "device"); ++ } + } + } + } +@@ -920,6 +920,18 @@ static void acpi_thermal_check_fn(struct work_struct *work) + mutex_unlock(&tz->thermal_check_lock); + } + ++static void acpi_thermal_free_thermal_zone(struct acpi_thermal *tz) ++{ ++ int i; ++ ++ acpi_handle_list_free(&tz->trips.passive.devices); ++ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) ++ acpi_handle_list_free(&tz->trips.active[i].devices); ++ acpi_handle_list_free(&tz->devices); ++ ++ kfree(tz); ++} ++ + static int acpi_thermal_add(struct acpi_device *device) + { + struct acpi_thermal *tz; +@@ -966,7 +978,7 @@ static int acpi_thermal_add(struct acpi_device *device) + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); + free_memory: +- kfree(tz); ++ acpi_thermal_free_thermal_zone(tz); + + return result; + } +@@ -986,7 +998,7 @@ static void acpi_thermal_remove(struct acpi_device *device) + flush_workqueue(acpi_thermal_pm_queue); + acpi_thermal_unregister_thermal_zone(tz); + kfree(tz->trip_table); +- kfree(tz); ++ acpi_thermal_free_thermal_zone(tz); + } + + #ifdef CONFIG_PM_SLEEP +diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c +index 2ea14648a661..e84106a4ef59 100644 +--- a/drivers/acpi/utils.c ++++ b/drivers/acpi/utils.c +@@ -329,22 +329,18 @@ const char *acpi_get_subsystem_id(acpi_handle handle) + } + EXPORT_SYMBOL_GPL(acpi_get_subsystem_id); + +-acpi_status +-acpi_evaluate_reference(acpi_handle handle, +- acpi_string pathname, +- struct acpi_object_list *arguments, +- struct acpi_handle_list *list) ++bool acpi_evaluate_reference(acpi_handle handle, acpi_string pathname, ++ struct acpi_object_list *arguments, ++ struct acpi_handle_list *list) + { +- acpi_status status = AE_OK; +- union acpi_object *package = NULL; +- union acpi_object *element = NULL; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; +- u32 i = 0; +- ++ union acpi_object *package; ++ acpi_status status; ++ bool ret = false; ++ u32 i; + +- if (!list) { +- return AE_BAD_PARAMETER; +- } ++ if (!list) ++ return false; + + /* Evaluate object. */ + +@@ -354,64 +350,106 @@ acpi_evaluate_reference(acpi_handle handle, + + package = buffer.pointer; + +- if ((buffer.length == 0) || !package) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } +- if (package->type != ACPI_TYPE_PACKAGE) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } +- if (!package->package.count) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- goto end; +- } ++ if (buffer.length == 0 || !package || ++ package->type != ACPI_TYPE_PACKAGE || !package->package.count) ++ goto err; + +- if (package->package.count > ACPI_MAX_HANDLES) { +- kfree(package); +- return AE_NO_MEMORY; +- } + list->count = package->package.count; ++ list->handles = kcalloc(list->count, sizeof(*list->handles), GFP_KERNEL); ++ if (!list->handles) ++ goto err_clear; + + /* Extract package data. */ + + for (i = 0; i < list->count; i++) { ++ union acpi_object *element = &(package->package.elements[i]); + +- element = &(package->package.elements[i]); ++ if (element->type != ACPI_TYPE_LOCAL_REFERENCE || ++ !element->reference.handle) ++ goto err_free; + +- if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { +- status = AE_BAD_DATA; +- acpi_util_eval_error(handle, pathname, status); +- break; +- } +- +- if (!element->reference.handle) { +- status = AE_NULL_ENTRY; +- acpi_util_eval_error(handle, pathname, status); +- break; +- } + /* Get the acpi_handle. */ + + list->handles[i] = element->reference.handle; + acpi_handle_debug(list->handles[i], "Found in reference list\n"); + } + +- end: +- if (ACPI_FAILURE(status)) { +- list->count = 0; +- //kfree(list->handles); +- } ++ ret = true; + ++end: + kfree(buffer.pointer); + +- return status; ++ return ret; ++ ++err_free: ++ kfree(list->handles); ++ list->handles = NULL; ++ ++err_clear: ++ list->count = 0; ++ ++err: ++ acpi_util_eval_error(handle, pathname, status); ++ goto end; + } + + EXPORT_SYMBOL(acpi_evaluate_reference); + ++/** ++ * acpi_handle_list_equal - Check if two ACPI handle lists are the same ++ * @list1: First list to compare. ++ * @list2: Second list to compare. ++ * ++ * Return true if the given ACPI handle lists are of the same size and ++ * contain the same ACPI handles in the same order. Otherwise, return false. ++ */ ++bool acpi_handle_list_equal(struct acpi_handle_list *list1, ++ struct acpi_handle_list *list2) ++{ ++ return list1->count == list2->count && ++ !memcmp(list1->handles, list2->handles, ++ list1->count * sizeof(*list1->handles)); ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_equal); ++ ++/** ++ * acpi_handle_list_replace - Replace one ACPI handle list with another ++ * @dst: ACPI handle list to replace. ++ * @src: Source ACPI handle list. ++ * ++ * Free the handles table in @dst, move the handles table from @src to @dst, ++ * copy count from @src to @dst and clear @src. ++ */ ++void acpi_handle_list_replace(struct acpi_handle_list *dst, ++ struct acpi_handle_list *src) ++{ ++ if (dst->count) ++ kfree(dst->handles); ++ ++ dst->count = src->count; ++ dst->handles = src->handles; ++ ++ src->handles = NULL; ++ src->count = 0; ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_replace); ++ ++/** ++ * acpi_handle_list_free - Free the handles table in an ACPI handle list ++ * @list: ACPI handle list to free. ++ * ++ * Free the handles table in @list and clear its count field. ++ */ ++void acpi_handle_list_free(struct acpi_handle_list *list) ++{ ++ if (!list->count) ++ return; ++ ++ kfree(list->handles); ++ list->count = 0; ++} ++EXPORT_SYMBOL_GPL(acpi_handle_list_free); ++ + acpi_status + acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) + { +diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c +index 96281de7010d..7f3ea78722fa 100644 +--- a/drivers/base/arch_numa.c ++++ b/drivers/base/arch_numa.c +@@ -530,7 +530,7 @@ static int __init arch_acpi_numa_init(void) + + ret = acpi_numa_init(); + if (ret) { +- pr_info("Failed to initialise from firmware\n"); ++ pr_debug("Failed to initialise from firmware\n"); + return ret; + } + +diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c +index 3348d4db5f1b..0d01890160f3 100644 +--- a/drivers/base/platform-msi.c ++++ b/drivers/base/platform-msi.c +@@ -13,6 +13,8 @@ + #include + #include + ++/* Begin of removal area. Once everything is converted over. Cleanup the includes too! */ ++ + #define DEV_ID_SHIFT 21 + #define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) + +@@ -204,8 +206,8 @@ static void platform_msi_free_priv_data(struct device *dev) + * Returns: + * Zero for success, or an error code in case of failure + */ +-int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, +- irq_write_msi_msg_t write_msi_msg) ++static int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, ++ irq_write_msi_msg_t write_msi_msg) + { + int err; + +@@ -219,48 +221,6 @@ int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + + return err; + } +-EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); +- +-#ifdef CONFIG_HISI_VIRTCCA_CODA +-/** +- * platform_msi_domain_alloc_range_irqs - Allocate specific scope MSI interrupts for @dev +- * @dev: The device for which to allocate interrupts +- * @start: The start index of msi +- * @end: The end index of msi +- * @write_msi_msg: The function for writing msi message +- * +- * %0 if alloc irqs success +- * %error_code if alloc irqs failed +- * %-EINVAL if platform_data is null +- */ +-int platform_msi_domain_alloc_range_irqs(struct device *dev, unsigned int start, +- unsigned int end, irq_write_msi_msg_t write_msi_msg) +-{ +- int err; +- +- if (!dev->msi.data->platform_data) +- return -EINVAL; +- +- dev->msi.data->platform_data->write_msg = write_msi_msg; +- err = msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, start, end); +- if (err) +- platform_msi_free_priv_data(dev); +- +- return err; +-} +-EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_range_irqs); +-#endif +- +-/** +- * platform_msi_domain_free_irqs - Free MSI interrupts for @dev +- * @dev: The device for which to free interrupts +- */ +-void platform_msi_domain_free_irqs(struct device *dev) +-{ +- msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); +- platform_msi_free_priv_data(dev); +-} +-EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); + + /** + * platform_msi_get_host_data - Query the private data associated with +@@ -380,3 +340,104 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir + + return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg); + } ++ ++/* End of removal area */ ++ ++/* Real per device domain interfaces */ ++ ++/* ++ * This indirection can go when platform_device_msi_init_and_alloc_irqs() ++ * is switched to a proper irq_chip::irq_write_msi_msg() callback. Keep it ++ * simple for now. ++ */ ++static void platform_msi_write_msi_msg(struct irq_data *d, struct msi_msg *msg) ++{ ++ irq_write_msi_msg_t cb = d->chip_data; ++ ++ cb(irq_data_get_msi_desc(d), msg); ++} ++ ++static void platform_msi_set_desc_byindex(msi_alloc_info_t *arg, struct msi_desc *desc) ++{ ++ arg->desc = desc; ++ arg->hwirq = desc->msi_index; ++} ++ ++static const struct msi_domain_template platform_msi_template = { ++ .chip = { ++ .name = "pMSI", ++ .irq_mask = irq_chip_mask_parent, ++ .irq_unmask = irq_chip_unmask_parent, ++ .irq_write_msi_msg = platform_msi_write_msi_msg, ++ /* The rest is filled in by the platform MSI parent */ ++ }, ++ ++ .ops = { ++ .set_desc = platform_msi_set_desc_byindex, ++ }, ++ ++ .info = { ++ .bus_token = DOMAIN_BUS_DEVICE_MSI, ++ }, ++}; ++ ++/** ++ * platform_device_msi_init_and_alloc_irqs - Initialize platform device MSI ++ * and allocate interrupts for @dev ++ * @dev: The device for which to allocate interrupts ++ * @nvec: The number of interrupts to allocate ++ * @write_msi_msg: Callback to write an interrupt message for @dev ++ * ++ * Returns: ++ * Zero for success, or an error code in case of failure ++ * ++ * This creates a MSI domain on @dev which has @dev->msi.domain as ++ * parent. The parent domain sets up the new domain. The domain has ++ * a fixed size of @nvec. The domain is managed by devres and will ++ * be removed when the device is removed. ++ * ++ * Note: For migration purposes this falls back to the original platform_msi code ++ * up to the point where all platforms have been converted to the MSI ++ * parent model. ++ */ ++int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nvec, ++ irq_write_msi_msg_t write_msi_msg) ++{ ++ struct irq_domain *domain = dev->msi.domain; ++ ++ if (!domain || !write_msi_msg) ++ return -EINVAL; ++ ++ /* Migration support. Will go away once everything is converted */ ++ if (!irq_domain_is_msi_parent(domain)) ++ return platform_msi_domain_alloc_irqs(dev, nvec, write_msi_msg); ++ ++ /* ++ * @write_msi_msg is stored in the resulting msi_domain_info::data. ++ * The underlying domain creation mechanism will assign that ++ * callback to the resulting irq chip. ++ */ ++ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN, ++ &platform_msi_template, ++ nvec, NULL, write_msi_msg)) ++ return -ENODEV; ++ ++ return msi_domain_alloc_irqs_range(dev, MSI_DEFAULT_DOMAIN, 0, nvec - 1); ++} ++EXPORT_SYMBOL_GPL(platform_device_msi_init_and_alloc_irqs); ++ ++/** ++ * platform_device_msi_free_irqs_all - Free all interrupts for @dev ++ * @dev: The device for which to free interrupts ++ */ ++void platform_device_msi_free_irqs_all(struct device *dev) ++{ ++ struct irq_domain *domain = dev->msi.domain; ++ ++ msi_domain_free_irqs_all(dev, MSI_DEFAULT_DOMAIN); ++ ++ /* Migration support. Will go away once everything is converted */ ++ if (!irq_domain_is_msi_parent(domain)) ++ platform_msi_free_priv_data(dev); ++} ++EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); +diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c +index 0c92fa3eee88..3cb4ceb53635 100644 +--- a/drivers/char/ipmi/ipmi_si_hardcode.c ++++ b/drivers/char/ipmi/ipmi_si_hardcode.c +@@ -6,7 +6,7 @@ + #include + #include "ipmi_si.h" + #include "ipmi_plat_data.h" +- ++#include + /* + * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, + * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. +@@ -90,6 +90,24 @@ static void __init ipmi_hardcode_init_one(const char *si_type_str, + ipmi_platform_add("hardcode-ipmi-si", i, &p); + } + ++#ifdef CONFIG_ARCH_SOPHGO ++static void variable_init(struct pci_dev *pdev) ++{ ++ unsigned long addr_data = pci_resource_start(pdev, 1) + 0x0e80; ++ // printk("addr_data=0x%lx\n", addr_data); ++ strcpy(si_type_str, "kcs"); ++ addrs[0] = addr_data; ++ num_addrs = 1; ++ regspacings[0] = 4; ++ num_regspacings = 1; ++ regsizes[0] = 4; ++ num_regsizes = 1; ++ irqs[0] = 0; ++ num_irqs = 1; ++ slave_addrs[0] = 0; ++ num_slave_addrs = 1; ++} ++#endif + void __init ipmi_hardcode_init(void) + { + unsigned int i; +@@ -97,7 +115,11 @@ void __init ipmi_hardcode_init(void) + char *si_type[SI_MAX_PARMS]; + + memset(si_type, 0, sizeof(si_type)); +- ++#ifdef CONFIG_ARCH_SOPHGO ++ struct pci_dev *pdev = pci_get_device(0x1A03, 0x2402, NULL); ++ if (pdev != NULL) ++ variable_init(pdev); ++#endif + /* Parse out the si_type string into its components. */ + str = si_type_str; + if (*str != '\0') { +diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c +index 373ee71811e3..8e316d49bce1 100644 +--- a/drivers/char/ipmi/ipmi_si_intf.c ++++ b/drivers/char/ipmi/ipmi_si_intf.c +@@ -2158,7 +2158,8 @@ static int __init init_ipmi_si(void) + return 0; + } + } +-module_init(init_ipmi_si); ++// module_init(init_ipmi_si); ++late_initcall(init_ipmi_si); + + static void wait_msg_processed(struct smi_info *smi_info) + { +diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c +index 74fa2055868b..6a935100e1ae 100644 +--- a/drivers/char/ipmi/ipmi_si_pci.c ++++ b/drivers/char/ipmi/ipmi_si_pci.c +@@ -111,6 +111,12 @@ static int ipmi_pci_probe(struct pci_dev *pdev, + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + ++#ifdef CONFIG_ARCH_SOPHGO ++ io.addr_data = pci_resource_start(pdev, 1) + 0x0e80; ++ io.slave_addr = 0x20; ++ io.regspacing = 4; ++ io.regsize = 4; ++#endif + io.irq = pdev->irq; + if (io.irq) + io.irq_setup = ipmi_std_irq_setup; +diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig +index c30099866174..83976f7b9755 100644 +--- a/drivers/clk/Kconfig ++++ b/drivers/clk/Kconfig +@@ -490,6 +490,7 @@ source "drivers/clk/rockchip/Kconfig" + source "drivers/clk/samsung/Kconfig" + source "drivers/clk/sifive/Kconfig" + source "drivers/clk/socfpga/Kconfig" ++source "drivers/clk/spacemit/Kconfig" + source "drivers/clk/sprd/Kconfig" + source "drivers/clk/starfive/Kconfig" + source "drivers/clk/sunxi/Kconfig" +@@ -501,6 +502,7 @@ source "drivers/clk/visconti/Kconfig" + source "drivers/clk/x86/Kconfig" + source "drivers/clk/xilinx/Kconfig" + source "drivers/clk/zynqmp/Kconfig" ++source "drivers/clk/xuantie/Kconfig" + + # Kunit test cases + config CLK_KUNIT_TEST +diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile +index 18969cbd4bb1..1e4e2e292f5d 100644 +--- a/drivers/clk/Makefile ++++ b/drivers/clk/Makefile +@@ -117,6 +117,7 @@ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ + obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ + obj-$(CONFIG_CLK_SIFIVE) += sifive/ + obj-y += socfpga/ ++obj-$(CONFIG_SOC_SPACEMIT) += spacemit/ + obj-$(CONFIG_PLAT_SPEAR) += spear/ + obj-y += sprd/ + obj-$(CONFIG_ARCH_STI) += st/ +@@ -124,6 +125,7 @@ obj-$(CONFIG_ARCH_STM32) += stm32/ + obj-y += starfive/ + obj-$(CONFIG_ARCH_SUNXI) += sunxi/ + obj-y += sunxi-ng/ ++obj-$(CONFIG_ARCH_SOPHGO) += sophgo/ + obj-$(CONFIG_ARCH_TEGRA) += tegra/ + obj-y += ti/ + obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ +@@ -136,3 +138,4 @@ endif + obj-y += xilinx/ + obj-$(CONFIG_ARCH_ZYNQ) += zynq/ + obj-$(CONFIG_COMMON_CLK_ZYNQMP) += zynqmp/ ++obj-$(CONFIG_ARCH_XUANTIE) += xuantie/ +diff --git a/drivers/clk/sophgo/Makefile b/drivers/clk/sophgo/Makefile +new file mode 100644 +index 000000000000..55997fc07b5b +--- /dev/null ++++ b/drivers/clk/sophgo/Makefile +@@ -0,0 +1,3 @@ ++obj-$(CONFIG_ARCH_SOPHGO) += clk-dummy.o ++obj-$(CONFIG_ARCH_SOPHGO) += clk.o ++obj-$(CONFIG_ARCH_SOPHGO) += clk-mango.o +diff --git a/drivers/clk/sophgo/clk-dummy.c b/drivers/clk/sophgo/clk-dummy.c +new file mode 100644 +index 000000000000..ddbbcf55b964 +--- /dev/null ++++ b/drivers/clk/sophgo/clk-dummy.c +@@ -0,0 +1,594 @@ ++/* ++ * Copyright (c) 2022 SOPHGO ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++/* ++ * @hw: handle between common and hardware-specific interfaces ++ * @reg: register containing divider ++ * @shift: shift to the divider bit field ++ * @width: width of the divider bit field ++ * @initial_val:initial value of the divider ++ * @table: the div table that the divider supports ++ * @lock: register lock ++ */ ++struct mango_clk_divider { ++ struct clk_hw hw; ++ void __iomem *reg; ++ u8 shift; ++ u8 width; ++ u8 flags; ++ u32 initial_val; ++ const struct clk_div_table *table; ++ spinlock_t *lock; ++}; ++ ++static unsigned long mango_clk_divider_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct device_node *node; ++ struct of_phandle_args clkspec; ++ int rc, index = 0; ++ u32 rate; ++ struct clk *clk; ++ ++ node = of_find_node_by_name(NULL, "default_rates"); ++ ++ of_property_for_each_u32 (node, "clock-rates", rate) { ++ if (rate) { ++ rc = of_parse_phandle_with_args(node, "clocks", ++ "#clock-cells", index, &clkspec); ++ if (rc < 0) { ++ /* skip empty (null) phandles */ ++ if (rc == -ENOENT) ++ continue; ++ else ++ return rc; ++ } ++ ++ clk = of_clk_get_from_provider(&clkspec); ++ if (IS_ERR(clk)) ++ return PTR_ERR(clk); ++ if (!strcmp(clk_hw_get_name(hw), __clk_get_name(clk))) ++ return rate; ++ } ++ index++; ++ } ++ return 0; ++} ++ ++static long mango_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ return rate; ++} ++ ++static int mango_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ return 0; ++} ++ ++/* ++ * @hw: ccf use to hook get mango_pll_clock ++ * @parent_rate: parent rate ++ * ++ * The is function will be called through clk_get_rate ++ * and return current rate after decoding reg value ++ */ ++static unsigned long mango_clk_pll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct device_node *node; ++ struct of_phandle_args clkspec; ++ int rc, index = 0; ++ u32 rate; ++ ++ node = of_find_node_by_name(NULL, "default_rates"); ++ ++ of_property_for_each_u32 (node, "clock-rates", rate) { ++ if (rate) { ++ rc = of_parse_phandle_with_args(node, "clocks", ++ "#clock-cells", index, &clkspec); ++ if (rc < 0) { ++ /* skip empty (null) phandles */ ++ if (rc == -ENOENT) ++ continue; ++ else ++ return rc; ++ } ++ ++ if (!strncmp(clk_hw_get_name(hw), clkspec.np->name, 4)) ++ return rate; ++ } ++ index++; ++ } ++ return 0; ++} ++ ++static long mango_clk_pll_round_rate(struct clk_hw *hw, ++ unsigned long req_rate, unsigned long *prate) ++{ ++ return req_rate; ++} ++ ++static int mango_clk_pll_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ req->rate = mango_clk_pll_round_rate(hw, min(req->rate, req->max_rate), ++ &req->best_parent_rate); ++ return 0; ++} ++ ++static int mango_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ return 0; ++} ++ ++const struct clk_ops dm_mango_clk_divider_ops = { ++ .recalc_rate = mango_clk_divider_recalc_rate, ++ .round_rate = mango_clk_divider_round_rate, ++ .set_rate = mango_clk_divider_set_rate, ++}; ++ ++const struct clk_ops dm_mango_clk_divider_ro_ops = { ++ .recalc_rate = mango_clk_divider_recalc_rate, ++ .round_rate = mango_clk_divider_round_rate, ++}; ++ ++const struct clk_ops dm_mango_clk_pll_ops = { ++ .recalc_rate = mango_clk_pll_recalc_rate, ++ .round_rate = mango_clk_pll_round_rate, ++ .determine_rate = mango_clk_pll_determine_rate, ++ .set_rate = mango_clk_pll_set_rate, ++}; ++ ++const struct clk_ops dm_mango_clk_pll_ro_ops = { ++ .recalc_rate = mango_clk_pll_recalc_rate, ++ .round_rate = mango_clk_pll_round_rate, ++}; ++ ++struct mux_cb_clk_name { ++ const char *name; ++ struct list_head node; ++}; ++ ++static struct list_head mux_cb_clk_name_list = ++ LIST_HEAD_INIT(mux_cb_clk_name_list); ++static int mux_notifier_cb(struct notifier_block *nb, ++ unsigned long event, void *data) ++{ ++ int ret = 0; ++ static unsigned char mux_id = 1; ++ struct clk_notifier_data *ndata = data; ++ struct clk_hw *hw = __clk_get_hw(ndata->clk); ++ const struct clk_ops *ops = &clk_mux_ops; ++ struct mux_cb_clk_name *cb_lsit; ++ ++ if (event == PRE_RATE_CHANGE) { ++ struct clk_hw *hw_p = clk_hw_get_parent(hw); ++ ++ cb_lsit = kmalloc(sizeof(*cb_lsit), GFP_KERNEL); ++ if (cb_lsit) { ++ INIT_LIST_HEAD(&cb_lsit->node); ++ list_add_tail(&cb_lsit->node, &mux_cb_clk_name_list); ++ } else { ++ pr_err("mux cb kmalloc mem fail\n"); ++ goto out; ++ } ++ ++ cb_lsit->name = clk_hw_get_name(hw_p); ++ mux_id = ops->get_parent(hw); ++ if (mux_id > 1) { ++ ret = 1; ++ goto out; ++ } ++ ops->set_parent(hw, !mux_id); ++ } else if (event == POST_RATE_CHANGE) { ++ struct clk_hw *hw_p = clk_hw_get_parent(hw); ++ ++ cb_lsit = list_first_entry_or_null(&mux_cb_clk_name_list, ++ typeof(*cb_lsit), node); ++ if (cb_lsit) { ++ const char *pre_name = cb_lsit->name; ++ ++ list_del_init(&cb_lsit->node); ++ kfree(cb_lsit); ++ if (strcmp(clk_hw_get_name(hw_p), pre_name)) ++ goto out; ++ } ++ ++ ops->set_parent(hw, mux_id); ++ } ++ ++out: ++ return notifier_from_errno(ret); ++} ++ ++int dm_set_default_clk_rates(struct device_node *node) ++{ ++ struct of_phandle_args clkspec; ++ int rc, index = 0; ++ struct clk *clk; ++ u32 rate; ++ ++ of_property_for_each_u32 (node, "clock-rates", rate) { ++ if (rate) { ++ rc = of_parse_phandle_with_args(node, "clocks", ++ "#clock-cells", index, &clkspec); ++ if (rc < 0) { ++ /* skip empty (null) phandles */ ++ if (rc == -ENOENT) ++ continue; ++ else ++ return rc; ++ } ++ ++ clk = of_clk_get_from_provider(&clkspec); ++ if (IS_ERR(clk)) { ++ pr_warn("clk: couldn't get clock %d for %s\n", ++ index, node->full_name); ++ return PTR_ERR(clk); ++ } ++ ++ rc = clk_set_rate(clk, rate); ++ if (rc < 0) ++ pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", ++ __clk_get_name(clk), rate, rc, ++ clk_get_rate(clk)); ++ clk_put(clk); ++ } ++ index++; ++ } ++ ++ return 0; ++} ++ ++static struct clk *__register_divider_clks(struct device *dev, const char *name, ++ const char *parent_name, ++ unsigned long flags, ++ void __iomem *reg, u8 shift, ++ u8 width, u32 initial_val, ++ u8 clk_divider_flags, ++ const struct clk_div_table *table, ++ spinlock_t *lock) ++{ ++ struct mango_clk_divider *div; ++ struct clk_hw *hw; ++ struct clk_init_data init; ++ int ret; ++ ++ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { ++ if (width + shift > 16) { ++ pr_warn("divider value exceeds LOWORD field\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ /* allocate the divider */ ++ div = kzalloc(sizeof(*div), GFP_KERNEL); ++ if (!div) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) ++ init.ops = &dm_mango_clk_divider_ro_ops; ++ else ++ init.ops = &dm_mango_clk_divider_ops; ++ init.flags = flags; ++ init.parent_names = (parent_name ? &parent_name : NULL); ++ init.num_parents = (parent_name ? 1 : 0); ++ ++ /* struct mango_clk_divider assignments */ ++ div->reg = reg; ++ div->shift = shift; ++ div->width = width; ++ div->flags = clk_divider_flags; ++ div->lock = lock; ++ div->hw.init = &init; ++ div->table = table; ++ div->initial_val = initial_val; ++ ++ /* register the clock */ ++ hw = &div->hw; ++ ret = clk_hw_register(dev, hw); ++ if (ret) { ++ kfree(div); ++ hw = ERR_PTR(ret); ++ return ERR_PTR(-EBUSY); ++ } ++ ++ return hw->clk; ++} ++ ++static inline int register_provider_clks ++(struct device_node *node, struct mango_clk_data *clk_data, int clk_num) ++{ ++ return of_clk_add_provider(node, of_clk_src_onecell_get, ++ &clk_data->clk_data); ++} ++ ++static int register_gate_clks(struct device *dev, struct mango_clk_data *clk_data) ++{ ++ struct clk *clk; ++ const struct mango_clk_table *table = clk_data->table; ++ const struct mango_gate_clock *gate_clks = table->gate_clks; ++ void __iomem *base = clk_data->base; ++ int clk_num = table->gate_clks_num; ++ int i; ++ ++ for (i = 0; i < clk_num; i++) { ++ clk = clk_register_gate( ++ dev, gate_clks[i].name, gate_clks[i].parent_name, ++ gate_clks[i].flags | CLK_IS_CRITICAL, base + gate_clks[i].offset, ++ gate_clks[i].bit_idx, gate_clks[i].gate_flags, ++ &clk_data->lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ gate_clks[i].name); ++ goto err; ++ } ++ ++ if (gate_clks[i].alias) ++ clk_register_clkdev(clk, gate_clks[i].alias, NULL); ++ ++ clk_data->clk_data.clks[gate_clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_gate(clk_data->clk_data.clks[gate_clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++ ++static int register_divider_clks(struct device *dev, ++ struct mango_clk_data *clk_data) ++{ ++ struct clk *clk; ++ const struct mango_clk_table *table = clk_data->table; ++ const struct mango_divider_clock *div_clks = table->div_clks; ++ void __iomem *base = clk_data->base; ++ int clk_num = table->div_clks_num; ++ int i, val; ++ ++ for (i = 0; i < clk_num; i++) { ++ clk = __register_divider_clks( ++ NULL, div_clks[i].name, div_clks[i].parent_name, ++ div_clks[i].flags, base + div_clks[i].offset, ++ div_clks[i].shift, div_clks[i].width, ++ div_clks[i].initial_val, ++ (div_clks[i].initial_sel & MANGO_CLK_USE_INIT_VAL) ? ++ div_clks[i].div_flags | CLK_DIVIDER_READ_ONLY : ++ div_clks[i].div_flags, ++ div_clks[i].table, &clk_data->lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ div_clks[i].name); ++ goto err; ++ } ++ ++ clk_data->clk_data.clks[div_clks[i].id] = clk; ++ ++ if (div_clks[i].initial_sel == MANGO_CLK_USE_REG_VAL) { ++ regmap_read(clk_data->syscon_top, div_clks[i].offset, ++ &val); ++ ++ /* ++ * set a default divider factor, ++ * clk driver should not select divider clock as the ++ * clock source, before set the divider by right process ++ * (assert div, set div factor, de assert div). ++ */ ++ if (div_clks[i].initial_val > 0) ++ val |= (div_clks[i].initial_val << 16 | 1 << 3); ++ else { ++ /* ++ * the div register is config to use divider factor, don't change divider ++ */ ++ if (!(val >> 3 & 0x1)) ++ val |= 1 << 16; ++ } ++ ++ regmap_write(clk_data->syscon_top, div_clks[i].offset, ++ val); ++ } ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_divider(clk_data->clk_data.clks[div_clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++ ++static int register_mux_clks(struct device *dev, struct mango_clk_data *clk_data) ++{ ++ struct clk *clk; ++ const struct mango_clk_table *table = clk_data->table; ++ const struct mango_mux_clock *mux_clks = table->mux_clks; ++ void __iomem *base = clk_data->base; ++ int clk_num = table->mux_clks_num; ++ int i; ++ ++ for (i = 0; i < clk_num; i++) { ++ u32 mask = BIT(mux_clks[i].width) - 1; ++ ++ clk = clk_register_mux_table( ++ dev, mux_clks[i].name, mux_clks[i].parent_names, ++ mux_clks[i].num_parents, mux_clks[i].flags, ++ base + mux_clks[i].offset, mux_clks[i].shift, mask, ++ mux_clks[i].mux_flags, mux_clks[i].table, ++ &clk_data->lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ mux_clks[i].name); ++ goto err; ++ } ++ ++ clk_data->clk_data.clks[mux_clks[i].id] = clk; ++ ++ if (!(mux_clks[i].flags & CLK_MUX_READ_ONLY)) { ++ struct clk *parent; ++ struct notifier_block *clk_nb; ++ ++ /* set mux clock default parent here, it's parent index ++ * value is read from the mux clock reg. dts can override ++ * setting the mux clock parent later. ++ */ ++ parent = clk_get_parent(clk); ++ clk_set_parent(clk, parent); ++ ++ /* add a notify callback function */ ++ clk_nb = kzalloc(sizeof(*clk_nb), GFP_KERNEL); ++ if (!clk_nb) ++ goto err; ++ clk_nb->notifier_call = mux_notifier_cb; ++ if (clk_notifier_register(clk, clk_nb)) ++ pr_err("%s: failed to register clock notifier for %s\n", ++ __func__, mux_clks[i].name); ++ } ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_mux(clk_data->clk_data.clks[mux_clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++ ++/* pll clock init */ ++int dm_mango_register_pll_clks(struct device_node *node, ++ struct mango_clk_data *clk_data, const char *clk_name) ++{ ++ struct clk *clk = NULL; ++ struct mango_pll_clock *pll_clks; ++ int i, ret = 0; ++ const struct clk_ops *local_ops; ++ ++ pll_clks = (struct mango_pll_clock *)clk_data->table->pll_clks; ++ for (i = 0; i < clk_data->table->pll_clks_num; i++) { ++ if (!strcmp(clk_name, pll_clks[i].name)) { ++ /* have to assigne pll_clks.syscon_top first ++ * since clk_register_composite will need it ++ * to calculate current rate. ++ */ ++ pll_clks[i].syscon_top = clk_data->syscon_top; ++ pll_clks[i].lock = &clk_data->lock; ++ if (pll_clks[i].ini_flags & MANGO_CLK_RO) ++ local_ops = &dm_mango_clk_pll_ro_ops; ++ else ++ local_ops = &dm_mango_clk_pll_ops; ++ clk = clk_register_composite( ++ NULL, pll_clks[i].name, &pll_clks[i].parent_name, ++ 1, NULL, NULL, &pll_clks[i].hw, local_ops, ++ NULL, NULL, pll_clks[i].flags); ++ ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ pll_clks[i].name); ++ ret = -EINVAL; ++ goto out; ++ } ++ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); ++ if (ret) ++ clk_unregister(clk); ++ } else { ++ continue; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++/* mux clk init */ ++int dm_mango_register_mux_clks(struct device_node *node, struct mango_clk_data *clk_data) ++{ ++ int ret; ++ int count; ++ struct clk **clk_table; ++ ++ count = clk_data->table->mux_clks_num + clk_data->table->gate_clks_num; ++ clk_table = kcalloc(count, sizeof(*clk_table), GFP_KERNEL); ++ if (!clk_table) ++ return -ENOMEM; ++ ++ clk_data->clk_data.clks = clk_table; ++ clk_data->clk_data.clk_num = count; ++ ++ ret = register_mux_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_gate_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_provider_clks(node, clk_data, count); ++ if (ret) ++ goto err; ++ ++ return 0; ++err: ++ kfree(clk_table); ++ return ret; ++} ++ ++/* pll divider init */ ++int dm_mango_register_div_clks(struct device_node *node, struct mango_clk_data *clk_data) ++{ ++ int ret; ++ int count; ++ ++ struct clk **clk_table; ++ ++ count = clk_data->table->div_clks_num + clk_data->table->gate_clks_num; ++ clk_table = kcalloc(count, sizeof(*clk_table), GFP_KERNEL); ++ if (!clk_table) ++ return -ENOMEM; ++ ++ clk_data->clk_data.clks = clk_table; ++ clk_data->clk_data.clk_num = count; ++ ++ ret = register_divider_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_gate_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_provider_clks(node, clk_data, count); ++ if (ret) ++ goto err; ++ ++ ++ return 0; ++err: ++ kfree(clk_table); ++ pr_err("%s error %d\n", __func__, ret); ++ return ret; ++} +diff --git a/drivers/clk/sophgo/clk-mango.c b/drivers/clk/sophgo/clk-mango.c +new file mode 100644 +index 000000000000..7f386092f764 +--- /dev/null ++++ b/drivers/clk/sophgo/clk-mango.c +@@ -0,0 +1,977 @@ ++/* ++ * Copyright (c) 2022 SOPHGO ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++/* fixed clocks */ ++struct mango_pll_clock mango_root_pll_clks[] = { ++ { ++ .id = FPLL_CLK, ++ .name = "fpll_clock", ++ .parent_name = "cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .ini_flags = MANGO_CLK_RO, ++ }, { ++ .id = DPLL0_CLK, ++ .name = "dpll0_clock", ++ .parent_name = "cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .ini_flags = MANGO_CLK_RO, ++ .status_offset = 0xc0, ++ .enable_offset = 0xc4, ++ }, { ++ .id = DPLL1_CLK, ++ .name = "dpll1_clock", ++ .parent_name = "cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .ini_flags = MANGO_CLK_RO, ++ .status_offset = 0xc0, ++ .enable_offset = 0xc4, ++ }, { ++ .id = MPLL_CLK, ++ .name = "mpll_clock", ++ .parent_name = "cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .status_offset = 0xc0, ++ .enable_offset = 0xc4, ++ },{ ++ .id = FPLL_CLK, ++ .name = "s1_fpll_clock", ++ .parent_name = "s1_cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .ini_flags = MANGO_CLK_RO, ++ }, { ++ .id = DPLL0_CLK, ++ .name = "s1_dpll0_clock", ++ .parent_name = "s1_cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .ini_flags = MANGO_CLK_RO, ++ .status_offset = 0xc0, ++ .enable_offset = 0xc4, ++ }, { ++ .id = DPLL1_CLK, ++ .name = "s1_dpll1_clock", ++ .parent_name = "s1_cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .ini_flags = MANGO_CLK_RO, ++ .status_offset = 0xc0, ++ .enable_offset = 0xc4, ++ }, { ++ .id = MPLL_CLK, ++ .name = "s1_mpll_clock", ++ .parent_name = "s1_cgi", ++ .flags = CLK_GET_RATE_NOCACHE | CLK_GET_ACCURACY_NOCACHE, ++ .status_offset = 0xc0, ++ .enable_offset = 0xc4, ++ } ++}; ++ ++/* divider clocks */ ++static const struct mango_divider_clock s0_div_clks[] = { ++ { DIV_CLK_MPLL_RP_CPU_NORMAL_0, "clk_div_rp_cpu_normal_0", "clk_gate_rp_cpu_normal_div0", ++ 0, 0x2044, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_MPLL_AXI_DDR_0, "clk_div_axi_ddr_0", "clk_gate_axi_ddr_div0", ++ 0, 0x20a8, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, 5}, ++ { DIV_CLK_FPLL_DDR01_1, "clk_div_ddr01_1", "clk_gate_ddr01_div1", ++ 0, 0x20b0, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++ { DIV_CLK_FPLL_DDR23_1, "clk_div_ddr23_1", "clk_gate_ddr23_div1", ++ 0, 0x20b8, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++ { DIV_CLK_FPLL_RP_CPU_NORMAL_1, "clk_div_rp_cpu_normal_1", "clk_gate_rp_cpu_normal_div1", ++ 0, 0x2040, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_50M_A53, "clk_div_50m_a53", "fpll_clock", ++ 0, 0x2048, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TOP_RP_CMN_DIV2, "clk_div_top_rp_cmn_div2", "clk_mux_rp_cpu_normal", ++ 0, 0x204c, 16, 16, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_UART_500M, "clk_div_uart_500m", "fpll_clock", ++ 0, 0x2050, 16, 7, CLK_DIVIDER_READ_ONLY, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_AHB_LPC, "clk_div_ahb_lpc", "fpll_clock", ++ 0, 0x2054, 16, 16, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_EFUSE, "clk_div_efuse", "fpll_clock", ++ 0, 0x2078, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TX_ETH0, "clk_div_tx_eth0", "fpll_clock", ++ 0, 0x2080, 16, 11, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_PTP_REF_I_ETH0, "clk_div_ptp_ref_i_eth0", "fpll_clock", ++ 0, 0x2084, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_REF_ETH0, "clk_div_ref_eth0", "fpll_clock", ++ 0, 0x2088, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_EMMC, "clk_div_emmc", "fpll_clock", ++ 0, 0x208c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_SD, "clk_div_sd", "fpll_clock", ++ 0, 0x2094, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TOP_AXI0, "clk_div_top_axi0", "fpll_clock", ++ 0, 0x209c, 16, 5, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TOP_AXI_HSPERI, "clk_div_top_axi_hsperi", "fpll_clock", ++ 0, 0x20a0, 16, 5, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_AXI_DDR_1, "clk_div_axi_ddr_1", "clk_gate_axi_ddr_div1", ++ 0, 0x20a4, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, 5}, ++ { DIV_CLK_FPLL_DIV_TIMER1, "clk_div_timer1", "clk_div_50m_a53", ++ 0, 0x2058, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER2, "clk_div_timer2", "clk_div_50m_a53", ++ 0, 0x205c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER3, "clk_div_timer3", "clk_div_50m_a53", ++ 0, 0x2060, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER4, "clk_div_timer4", "clk_div_50m_a53", ++ 0, 0x2064, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER5, "clk_div_timer5", "clk_div_50m_a53", ++ 0, 0x2068, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER6, "clk_div_timer6", "clk_div_50m_a53", ++ 0, 0x206c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER7, "clk_div_timer7", "clk_div_50m_a53", ++ 0, 0x2070, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER8, "clk_div_timer8", "clk_div_50m_a53", ++ 0, 0x2074, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_100K_EMMC, "clk_div_100k_emmc", "clk_div_top_axi0", ++ 0, 0x2090, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_100K_SD, "clk_div_100k_sd", "clk_div_top_axi0", ++ 0, 0x2098, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_GPIO_DB, "clk_div_gpio_db", "clk_div_top_axi0", ++ 0, 0x207c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_DPLL0_DDR01_0, "clk_div_ddr01_0", "clk_gate_ddr01_div0", ++ 0, 0x20ac, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++ { DIV_CLK_DPLL1_DDR23_0, "clk_div_ddr23_0", "clk_gate_ddr23_div0", ++ 0, 0x20b4, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++}; ++ ++/* gate clocks */ ++static const struct mango_gate_clock s0_gate_clks[] = { ++ { GATE_CLK_RP_CPU_NORMAL_DIV0, "clk_gate_rp_cpu_normal_div1", "mpll_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2000, 0, 0 }, ++ { GATE_CLK_AXI_DDR_DIV0, "clk_gate_axi_ddr_div1", "mpll_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 13, 0 }, ++ { GATE_CLK_DDR01_DIV0, "clk_gate_ddr01_div0", "fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 14, 0 }, ++ { GATE_CLK_DDR23_DIV0, "clk_gate_ddr23_div0", "fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 15, 0 }, ++ { GATE_CLK_RP_CPU_NORMAL_DIV1, "clk_gate_rp_cpu_normal_div0", "fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2000, 0, 0 }, ++ { GATE_CLK_AXI_DDR_DIV1, "clk_gate_axi_ddr_div0", "fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 13, 0 }, ++ { GATE_CLK_DDR01_DIV1, "clk_gate_ddr01_div1", "dpll0_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 14, 0 }, ++ { GATE_CLK_DDR23_DIV1, "clk_gate_ddr23_div1", "dpll1_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 15, 0 }, ++ { GATE_CLK_A53_50M, "clk_gate_a53_50m", "clk_div_50m_a53", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 1, 0 }, ++ { GATE_CLK_TOP_RP_CMN_DIV2, "clk_gate_top_rp_cmn_div2", "clk_gate_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 2, 0 }, ++ { GATE_CLK_AXI_PCIE0, "clk_gate_axi_pcie0", "clk_gate_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 8, 0 }, ++ { GATE_CLK_AXI_PCIE1, "clk_gate_axi_pcie1", "clk_gate_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 9, 0 }, ++ { GATE_CLK_HSDMA, "clk_gate_hsdma", "clk_gate_top_rp_cmn_div2", ++ CLK_SET_RATE_PARENT, 0x2004, 10, 0 }, ++ { GATE_CLK_EMMC_100M, "clk_gate_emmc", "clk_div_emmc", ++ CLK_SET_RATE_PARENT, 0x2004, 3, 0 }, ++ { GATE_CLK_SD_100M, "clk_gate_sd", "clk_div_sd", ++ CLK_SET_RATE_PARENT, 0x2004, 6, 0 }, ++ { GATE_CLK_TX_ETH0, "clk_gate_tx_eth0", "clk_div_tx_eth0", ++ CLK_SET_RATE_PARENT, 0x2000, 30, 0 }, ++ { GATE_CLK_PTP_REF_I_ETH0, "clk_gate_ptp_ref_i_eth0", "clk_div_ptp_ref_i_eth0", ++ CLK_SET_RATE_PARENT, 0x2004, 0, 0 }, ++ { GATE_CLK_REF_ETH0, "clk_gate_ref_eth0", "clk_div_ref_eth0", ++ CLK_SET_RATE_PARENT, 0x2004, 1, 0 }, ++ { GATE_CLK_UART_500M, "clk_gate_uart_500m", "clk_div_uart_500m", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 4, 0 }, ++ { GATE_CLK_AHB_LPC, "clk_gate_ahb_lpc", "clk_div_ahb_lpc", ++ CLK_SET_RATE_PARENT, 0x2000, 7, 0 }, ++ { GATE_CLK_EFUSE, "clk_gate_efuse", "clk_div_efuse", ++ CLK_SET_RATE_PARENT, 0x2000, 20, 0}, ++ { GATE_CLK_TOP_AXI0, "clk_gate_top_axi0", "clk_div_top_axi0", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 11, 0 }, ++ { GATE_CLK_TOP_AXI_HSPERI, "clk_gate_top_axi_hsperi", "clk_div_top_axi_hsperi", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 12, 0 }, ++ { GATE_CLK_AHB_ROM, "clk_gate_ahb_rom", "clk_gate_top_axi0", ++ 0, 0x2000, 8, 0 }, ++ { GATE_CLK_AHB_SF, "clk_gate_ahb_sf", "clk_gate_top_axi0", ++ 0, 0x2000, 9, 0 }, ++ { GATE_CLK_AXI_SRAM, "clk_gate_axi_sram", "clk_gate_top_axi0", ++ CLK_IGNORE_UNUSED, 0x2000, 10, 0 }, ++ { GATE_CLK_APB_TIMER, "clk_gate_apb_timer", "clk_gate_top_axi0", ++ CLK_IGNORE_UNUSED, 0x2000, 11, 0 }, ++ { GATE_CLK_APB_EFUSE, "clk_gate_apb_efuse", "clk_gate_top_axi0", ++ 0, 0x2000, 21, 0 }, ++ { GATE_CLK_APB_GPIO, "clk_gate_apb_gpio", "clk_gate_top_axi0", ++ 0, 0x2000, 22, 0 }, ++ { GATE_CLK_APB_GPIO_INTR, "clk_gate_apb_gpio_intr", "clk_gate_top_axi0", ++ CLK_IS_CRITICAL, 0x2000, 23, 0 }, ++ { GATE_CLK_APB_I2C, "clk_gate_apb_i2c", "clk_gate_top_axi0", ++ 0, 0x2000, 26, 0 }, ++ { GATE_CLK_APB_WDT, "clk_gate_apb_wdt", "clk_gate_top_axi0", ++ 0, 0x2000, 27, 0 }, ++ { GATE_CLK_APB_PWM, "clk_gate_apb_pwm", "clk_gate_top_axi0", ++ 0, 0x2000, 28, 0 }, ++ { GATE_CLK_APB_RTC, "clk_gate_apb_rtc", "clk_gate_top_axi0", ++ 0, 0x2000, 29, 0 }, ++ { GATE_CLK_SYSDMA_AXI, "clk_gate_sysdma_axi", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 3, 0 }, ++ { GATE_CLK_APB_UART, "clk_gate_apb_uart", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 5, 0 }, ++ { GATE_CLK_AXI_DBG_I2C, "clk_gate_axi_dbg_i2c", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 6, 0 }, ++ { GATE_CLK_APB_SPI, "clk_gate_apb_spi", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 25, 0 }, ++ { GATE_CLK_AXI_ETH0, "clk_gate_axi_eth0", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 31, 0 }, ++ { GATE_CLK_AXI_EMMC, "clk_gate_axi_emmc", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2004, 2, 0 }, ++ { GATE_CLK_AXI_SD, "clk_gate_axi_sd", "clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2004, 5, 0 }, ++ { GATE_CLK_TIMER1, "clk_gate_timer1", "clk_div_timer1", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 12, 0 }, ++ { GATE_CLK_TIMER2, "clk_gate_timer2", "clk_div_timer2", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 13, 0 }, ++ { GATE_CLK_TIMER3, "clk_gate_timer3", "clk_div_timer3", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 14, 0 }, ++ { GATE_CLK_TIMER4, "clk_gate_timer4", "clk_div_timer4", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 15, 0 }, ++ { GATE_CLK_TIMER5, "clk_gate_timer5", "clk_div_timer5", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 16, 0 }, ++ { GATE_CLK_TIMER6, "clk_gate_timer6", "clk_div_timer6", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 17, 0 }, ++ { GATE_CLK_TIMER7, "clk_gate_timer7", "clk_div_timer7", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 18, 0 }, ++ { GATE_CLK_TIMER8, "clk_gate_timer8", "clk_div_timer8", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 19, 0 }, ++ { GATE_CLK_100K_EMMC, "clk_gate_100k_emmc", "clk_div_100k_emmc", ++ CLK_SET_RATE_PARENT, 0x2004, 4, 0 }, ++ { GATE_CLK_100K_SD, "clk_gate_100k_sd", "clk_div_100k_sd", ++ CLK_SET_RATE_PARENT, 0x2004, 7, 0 }, ++ { GATE_CLK_GPIO_DB, "clk_gate_gpio_db", "clk_div_gpio_db", ++ CLK_SET_RATE_PARENT, 0x2000, 24, 0 }, ++ { GATE_CLK_DDR01, "clk_gate_ddr01", "clk_mux_ddr01", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 14, 0 }, ++ { GATE_CLK_DDR23, "clk_gate_ddr23", "clk_mux_ddr23", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 15, 0 }, ++ { GATE_CLK_RP_CPU_NORMAL, "clk_gate_rp_cpu_normal", "clk_mux_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2000, 0, 0 }, ++ { GATE_CLK_AXI_DDR, "clk_gate_axi_ddr", "clk_mux_axi_ddr", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 13, 0 }, ++ { GATE_CLK_RXU0, "clk_gate_rxu0", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 0, 0 }, ++ { GATE_CLK_RXU1, "clk_gate_rxu1", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 1, 0 }, ++ { GATE_CLK_RXU2, "clk_gate_rxu2", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 2, 0 }, ++ { GATE_CLK_RXU3, "clk_gate_rxu3", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 3, 0 }, ++ { GATE_CLK_RXU4, "clk_gate_rxu4", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 4, 0 }, ++ { GATE_CLK_RXU5, "clk_gate_rxu5", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 5, 0 }, ++ { GATE_CLK_RXU6, "clk_gate_rxu6", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 6, 0 }, ++ { GATE_CLK_RXU7, "clk_gate_rxu7", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 7, 0 }, ++ { GATE_CLK_RXU8, "clk_gate_rxu8", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 8, 0 }, ++ { GATE_CLK_RXU9, "clk_gate_rxu9", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 9, 0 }, ++ { GATE_CLK_RXU10, "clk_gate_rxu10", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 10, 0 }, ++ { GATE_CLK_RXU11, "clk_gate_rxu11", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 11, 0 }, ++ { GATE_CLK_RXU12, "clk_gate_rxu12", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 12, 0 }, ++ { GATE_CLK_RXU13, "clk_gate_rxu13", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 13, 0 }, ++ { GATE_CLK_RXU14, "clk_gate_rxu14", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 14, 0 }, ++ { GATE_CLK_RXU15, "clk_gate_rxu15", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 15, 0 }, ++ { GATE_CLK_RXU16, "clk_gate_rxu16", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 16, 0 }, ++ { GATE_CLK_RXU17, "clk_gate_rxu17", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 17, 0 }, ++ { GATE_CLK_RXU18, "clk_gate_rxu18", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 18, 0 }, ++ { GATE_CLK_RXU19, "clk_gate_rxu19", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 19, 0 }, ++ { GATE_CLK_RXU20, "clk_gate_rxu20", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 20, 0 }, ++ { GATE_CLK_RXU21, "clk_gate_rxu21", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 21, 0 }, ++ { GATE_CLK_RXU22, "clk_gate_rxu22", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 22, 0 }, ++ { GATE_CLK_RXU23, "clk_gate_rxu23", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 23, 0 }, ++ { GATE_CLK_RXU24, "clk_gate_rxu24", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 24, 0 }, ++ { GATE_CLK_RXU25, "clk_gate_rxu25", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 25, 0 }, ++ { GATE_CLK_RXU26, "clk_gate_rxu26", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 26, 0 }, ++ { GATE_CLK_RXU27, "clk_gate_rxu27", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 27, 0 }, ++ { GATE_CLK_RXU28, "clk_gate_rxu28", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 28, 0 }, ++ { GATE_CLK_RXU29, "clk_gate_rxu29", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 29, 0 }, ++ { GATE_CLK_RXU30, "clk_gate_rxu30", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 30, 0 }, ++ { GATE_CLK_RXU31, "clk_gate_rxu31", "clk_gate_rp_cpu_normal", ++ 0, 0x368, 31, 0 }, ++ { GATE_CLK_MP0, "clk_gate_mp0", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x384, 0, 0 }, ++ { GATE_CLK_MP1, "clk_gate_mp1", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x38c, 0, 0 }, ++ { GATE_CLK_MP2, "clk_gate_mp2", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x394, 0, 0 }, ++ { GATE_CLK_MP3, "clk_gate_mp3", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x39c, 0, 0 }, ++ { GATE_CLK_MP4, "clk_gate_mp4", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3a4, 0, 0 }, ++ { GATE_CLK_MP5, "clk_gate_mp5", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3ac, 0, 0 }, ++ { GATE_CLK_MP6, "clk_gate_mp6", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3b4, 0, 0 }, ++ { GATE_CLK_MP7, "clk_gate_mp7", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3bc, 0, 0 }, ++ { GATE_CLK_MP8, "clk_gate_mp8", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3c4, 0, 0 }, ++ { GATE_CLK_MP9, "clk_gate_mp9", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3cc, 0, 0 }, ++ { GATE_CLK_MP10, "clk_gate_mp10", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3d4, 0, 0 }, ++ { GATE_CLK_MP11, "clk_gate_mp11", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3dc, 0, 0 }, ++ { GATE_CLK_MP12, "clk_gate_mp12", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3e4, 0, 0 }, ++ { GATE_CLK_MP13, "clk_gate_mp13", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3ec, 0, 0 }, ++ { GATE_CLK_MP14, "clk_gate_mp14", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3f4, 0, 0 }, ++ { GATE_CLK_MP15, "clk_gate_mp15", "clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3fc, 0, 0 }, ++}; ++ ++static const struct mango_divider_clock s1_div_clks[] = { ++ { DIV_CLK_MPLL_RP_CPU_NORMAL_0, "s1_clk_div_rp_cpu_normal_0", "s1_clk_gate_rp_cpu_normal_div0", ++ 0, 0x2044, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_MPLL_AXI_DDR_0, "s1_clk_div_axi_ddr_0", "s1_clk_gate_axi_ddr_div0", ++ 0, 0x20a8, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, 5}, ++ { DIV_CLK_FPLL_DDR01_1, "s1_clk_div_ddr01_1", "s1_clk_gate_ddr01_div1", ++ 0, 0x20b0, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++ { DIV_CLK_FPLL_DDR23_1, "s1_clk_div_ddr23_1", "s1_clk_gate_ddr23_div1", ++ 0, 0x20b8, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++ { DIV_CLK_FPLL_RP_CPU_NORMAL_1, "s1_clk_div_rp_cpu_normal_1", "s1_clk_gate_rp_cpu_normal_div1", ++ 0, 0x2040, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_50M_A53, "s1_clk_div_50m_a53", "s1_fpll_clock", ++ 0, 0x2048, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TOP_RP_CMN_DIV2, "s1_clk_div_top_rp_cmn_div2", "s1_clk_mux_rp_cpu_normal", ++ 0, 0x204c, 16, 16, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_UART_500M, "s1_clk_div_uart_500m", "s1_fpll_clock", ++ 0, 0x2050, 16, 7, CLK_DIVIDER_READ_ONLY, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_AHB_LPC, "s1_clk_div_ahb_lpc", "s1_fpll_clock", ++ 0, 0x2054, 16, 16, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_EFUSE, "s1_clk_div_efuse", "s1_fpll_clock", ++ 0, 0x2078, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TX_ETH0, "s1_clk_div_tx_eth0", "s1_fpll_clock", ++ 0, 0x2080, 16, 11, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_PTP_REF_I_ETH0, "s1_clk_div_ptp_ref_i_eth0", "s1_fpll_clock", ++ 0, 0x2084, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_REF_ETH0, "s1_clk_div_ref_eth0", "s1_fpll_clock", ++ 0, 0x2088, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_EMMC, "s1_clk_div_emmc", "s1_fpll_clock", ++ 0, 0x208c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_SD, "s1_clk_div_sd", "s1_fpll_clock", ++ 0, 0x2094, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TOP_AXI0, "s1_clk_div_top_axi0", "s1_fpll_clock", ++ 0, 0x209c, 16, 5, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_TOP_AXI_HSPERI, "s1_clk_div_top_axi_hsperi", "s1_fpll_clock", ++ 0, 0x20a0, 16, 5, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_AXI_DDR_1, "s1_clk_div_axi_ddr_1", "s1_clk_gate_axi_ddr_div1", ++ 0, 0x20a4, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, 5}, ++ { DIV_CLK_FPLL_DIV_TIMER1, "s1_clk_div_timer1", "s1_clk_div_50m_a53", ++ 0, 0x2058, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER2, "s1_clk_div_timer2", "s1_clk_div_50m_a53", ++ 0, 0x205c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER3, "s1_clk_div_timer3", "s1_clk_div_50m_a53", ++ 0, 0x2060, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER4, "s1_clk_div_timer4", "s1_clk_div_50m_a53", ++ 0, 0x2064, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER5, "s1_clk_div_timer5", "s1_clk_div_50m_a53", ++ 0, 0x2068, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER6, "s1_clk_div_timer6", "s1_clk_div_50m_a53", ++ 0, 0x206c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER7, "s1_clk_div_timer7", "s1_clk_div_50m_a53", ++ 0, 0x2070, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_DIV_TIMER8, "s1_clk_div_timer8", "s1_clk_div_50m_a53", ++ 0, 0x2074, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_100K_EMMC, "s1_clk_div_100k_emmc", "s1_clk_div_top_axi0", ++ 0, 0x2090, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_100K_SD, "s1_clk_div_100k_sd", "s1_clk_div_top_axi0", ++ 0, 0x2098, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_FPLL_GPIO_DB, "s1_clk_div_gpio_db", "s1_clk_div_top_axi0", ++ 0, 0x207c, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_REG_VAL, }, ++ { DIV_CLK_DPLL0_DDR01_0, "s1_clk_div_ddr01_0", "s1_clk_gate_ddr01_div0", ++ 0, 0x20ac, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++ { DIV_CLK_DPLL1_DDR23_0, "s1_clk_div_ddr23_0", "s1_clk_gate_ddr23_div0", ++ 0, 0x20b4, 16, 8, CLK_DIVIDER_ONE_BASED | ++ CLK_DIVIDER_ALLOW_ZERO, MANGO_CLK_USE_INIT_VAL, }, ++}; ++ ++static const struct mango_gate_clock s1_gate_clks[] = { ++ { GATE_CLK_RP_CPU_NORMAL_DIV0, "s1_clk_gate_rp_cpu_normal_div1", "s1_mpll_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2000, 0, 0 }, ++ { GATE_CLK_AXI_DDR_DIV0, "s1_clk_gate_axi_ddr_div1", "s1_mpll_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 13, 0 }, ++ { GATE_CLK_DDR01_DIV0, "s1_clk_gate_ddr01_div0", "s1_fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 14, 0 }, ++ { GATE_CLK_DDR23_DIV0, "s1_clk_gate_ddr23_div0", "s1_fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 15, 0 }, ++ { GATE_CLK_RP_CPU_NORMAL_DIV1, "s1_clk_gate_rp_cpu_normal_div0", "s1_fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2000, 0, 0 }, ++ { GATE_CLK_AXI_DDR_DIV1, "s1_clk_gate_axi_ddr_div0", "s1_fpll_clock", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 13, 0 }, ++ { GATE_CLK_DDR01_DIV1, "s1_clk_gate_ddr01_div1", "s1_dpll0_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 14, 0 }, ++ { GATE_CLK_DDR23_DIV1, "s1_clk_gate_ddr23_div1", "s1_dpll1_clock", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 15, 0 }, ++ { GATE_CLK_A53_50M, "s1_clk_gate_a53_50m", "s1_clk_div_50m_a53", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 1, 0 }, ++ { GATE_CLK_TOP_RP_CMN_DIV2, "s1_clk_gate_top_rp_cmn_div2", "s1_clk_gate_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 2, 0 }, ++ { GATE_CLK_AXI_PCIE0, "s1_clk_gate_axi_pcie0", "s1_clk_gate_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 8, 0 }, ++ { GATE_CLK_AXI_PCIE1, "s1_clk_gate_axi_pcie1", "s1_clk_gate_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2004, 9, 0 }, ++ { GATE_CLK_HSDMA, "s1_clk_gate_hsdma", "s1_clk_gate_top_rp_cmn_div2", ++ CLK_SET_RATE_PARENT, 0x2004, 10, 0 }, ++ { GATE_CLK_EMMC_100M, "s1_clk_gate_emmc", "s1_clk_div_emmc", ++ CLK_SET_RATE_PARENT, 0x2004, 3, 0 }, ++ { GATE_CLK_SD_100M, "s1_clk_gate_sd", "s1_clk_div_sd", ++ CLK_SET_RATE_PARENT, 0x2004, 6, 0 }, ++ { GATE_CLK_TX_ETH0, "s1_clk_gate_tx_eth0", "s1_clk_div_tx_eth0", ++ CLK_SET_RATE_PARENT, 0x2000, 30, 0 }, ++ { GATE_CLK_PTP_REF_I_ETH0, "s1_clk_gate_ptp_ref_i_eth0", "s1_clk_div_ptp_ref_i_eth0", ++ CLK_SET_RATE_PARENT, 0x2004, 0, 0 }, ++ { GATE_CLK_REF_ETH0, "s1_clk_gate_ref_eth0", "s1_clk_div_ref_eth0", ++ CLK_SET_RATE_PARENT, 0x2004, 1, 0 }, ++ { GATE_CLK_UART_500M, "s1_clk_gate_uart_500m", "s1_clk_div_uart_500m", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 4, 0 }, ++ { GATE_CLK_AHB_LPC, "s1_clk_gate_ahb_lpc", "s1_clk_div_ahb_lpc", ++ CLK_SET_RATE_PARENT, 0x2000, 7, 0 }, ++ { GATE_CLK_EFUSE, "s1_clk_gate_efuse", "s1_clk_div_efuse", ++ CLK_SET_RATE_PARENT, 0x2000, 20, 0}, ++ { GATE_CLK_TOP_AXI0, "s1_clk_gate_top_axi0", "s1_clk_div_top_axi0", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 11, 0 }, ++ { GATE_CLK_TOP_AXI_HSPERI, "s1_clk_gate_top_axi_hsperi", "s1_clk_div_top_axi_hsperi", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 12, 0 }, ++ { GATE_CLK_AHB_ROM, "s1_clk_gate_ahb_rom", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 8, 0 }, ++ { GATE_CLK_AHB_SF, "s1_clk_gate_ahb_sf", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 9, 0 }, ++ { GATE_CLK_AXI_SRAM, "s1_clk_gate_axi_sram", "s1_clk_gate_top_axi0", ++ CLK_IGNORE_UNUSED, 0x2000, 10, 0 }, ++ { GATE_CLK_APB_TIMER, "s1_clk_gate_apb_timer", "s1_clk_gate_top_axi0", ++ CLK_IGNORE_UNUSED, 0x2000, 11, 0 }, ++ { GATE_CLK_APB_EFUSE, "s1_clk_gate_apb_efuse", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 21, 0 }, ++ { GATE_CLK_APB_GPIO, "s1_clk_gate_apb_gpio", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 22, 0 }, ++ { GATE_CLK_APB_GPIO_INTR, "s1_clk_gate_apb_gpio_intr", "s1_clk_gate_top_axi0", ++ CLK_IS_CRITICAL, 0x2000, 23, 0 }, ++ { GATE_CLK_APB_I2C, "s1_clk_gate_apb_i2c", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 26, 0 }, ++ { GATE_CLK_APB_WDT, "s1_clk_gate_apb_wdt", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 27, 0 }, ++ { GATE_CLK_APB_PWM, "s1_clk_gate_apb_pwm", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 28, 0 }, ++ { GATE_CLK_APB_RTC, "s1_clk_gate_apb_rtc", "s1_clk_gate_top_axi0", ++ 0, 0x2000, 29, 0 }, ++ { GATE_CLK_SYSDMA_AXI, "s1_clk_gate_sysdma_axi", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 3, 0 }, ++ { GATE_CLK_APB_UART, "s1_clk_gate_apb_uart", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 5, 0 }, ++ { GATE_CLK_AXI_DBG_I2C, "s1_clk_gate_axi_dbg_i2c", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 6, 0 }, ++ { GATE_CLK_APB_SPI, "s1_clk_gate_apb_spi", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 25, 0 }, ++ { GATE_CLK_AXI_ETH0, "s1_clk_gate_axi_eth0", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2000, 31, 0 }, ++ { GATE_CLK_AXI_EMMC, "s1_clk_gate_axi_emmc", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2004, 2, 0 }, ++ { GATE_CLK_AXI_SD, "s1_clk_gate_axi_sd", "s1_clk_gate_top_axi_hsperi", ++ CLK_SET_RATE_PARENT, 0x2004, 5, 0 }, ++ { GATE_CLK_TIMER1, "s1_clk_gate_timer1", "s1_clk_div_timer1", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 12, 0 }, ++ { GATE_CLK_TIMER2, "s1_clk_gate_timer2", "s1_clk_div_timer2", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 13, 0 }, ++ { GATE_CLK_TIMER3, "s1_clk_gate_timer3", "s1_clk_div_timer3", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 14, 0 }, ++ { GATE_CLK_TIMER4, "s1_clk_gate_timer4", "s1_clk_div_timer4", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 15, 0 }, ++ { GATE_CLK_TIMER5, "s1_clk_gate_timer5", "s1_clk_div_timer5", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 16, 0 }, ++ { GATE_CLK_TIMER6, "s1_clk_gate_timer6", "s1_clk_div_timer6", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 17, 0 }, ++ { GATE_CLK_TIMER7, "s1_clk_gate_timer7", "s1_clk_div_timer7", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 18, 0 }, ++ { GATE_CLK_TIMER8, "s1_clk_gate_timer8", "s1_clk_div_timer8", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2000, 19, 0 }, ++ { GATE_CLK_100K_EMMC, "s1_clk_gate_100k_emmc", "s1_clk_div_100k_emmc", ++ CLK_SET_RATE_PARENT, 0x2004, 4, 0 }, ++ { GATE_CLK_100K_SD, "s1_clk_gate_100k_sd", "s1_clk_div_100k_sd", ++ CLK_SET_RATE_PARENT, 0x2004, 7, 0 }, ++ { GATE_CLK_GPIO_DB, "s1_clk_gate_gpio_db", "s1_clk_div_gpio_db", ++ CLK_SET_RATE_PARENT, 0x2000, 24, 0 }, ++ { GATE_CLK_DDR01, "s1_clk_gate_ddr01", "s1_clk_mux_ddr01", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 14, 0 }, ++ { GATE_CLK_DDR23, "s1_clk_gate_ddr23", "s1_clk_mux_ddr23", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 15, 0 }, ++ { GATE_CLK_RP_CPU_NORMAL, "s1_clk_gate_rp_cpu_normal", "s1_clk_mux_rp_cpu_normal", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2000, 0, 0 }, ++ { GATE_CLK_AXI_DDR, "s1_clk_gate_axi_ddr", "s1_clk_mux_axi_ddr", ++ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x2004, 13, 0 }, ++ { GATE_CLK_RXU0, "s1_clk_gate_rxu0", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 0, 0 }, ++ { GATE_CLK_RXU1, "s1_clk_gate_rxu1", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 1, 0 }, ++ { GATE_CLK_RXU2, "s1_clk_gate_rxu2", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 2, 0 }, ++ { GATE_CLK_RXU3, "s1_clk_gate_rxu3", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 3, 0 }, ++ { GATE_CLK_RXU4, "s1_clk_gate_rxu4", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 4, 0 }, ++ { GATE_CLK_RXU5, "s1_clk_gate_rxu5", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 5, 0 }, ++ { GATE_CLK_RXU6, "s1_clk_gate_rxu6", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 6, 0 }, ++ { GATE_CLK_RXU7, "s1_clk_gate_rxu7", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 7, 0 }, ++ { GATE_CLK_RXU8, "s1_clk_gate_rxu8", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 8, 0 }, ++ { GATE_CLK_RXU9, "s1_clk_gate_rxu9", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 9, 0 }, ++ { GATE_CLK_RXU10, "s1_clk_gate_rxu10", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 10, 0 }, ++ { GATE_CLK_RXU11, "s1_clk_gate_rxu11", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 11, 0 }, ++ { GATE_CLK_RXU12, "s1_clk_gate_rxu12", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 12, 0 }, ++ { GATE_CLK_RXU13, "s1_clk_gate_rxu13", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 13, 0 }, ++ { GATE_CLK_RXU14, "s1_clk_gate_rxu14", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 14, 0 }, ++ { GATE_CLK_RXU15, "s1_clk_gate_rxu15", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 15, 0 }, ++ { GATE_CLK_RXU16, "s1_clk_gate_rxu16", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 16, 0 }, ++ { GATE_CLK_RXU17, "s1_clk_gate_rxu17", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 17, 0 }, ++ { GATE_CLK_RXU18, "s1_clk_gate_rxu18", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 18, 0 }, ++ { GATE_CLK_RXU19, "s1_clk_gate_rxu19", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 19, 0 }, ++ { GATE_CLK_RXU20, "s1_clk_gate_rxu20", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 20, 0 }, ++ { GATE_CLK_RXU21, "s1_clk_gate_rxu21", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 21, 0 }, ++ { GATE_CLK_RXU22, "s1_clk_gate_rxu22", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 22, 0 }, ++ { GATE_CLK_RXU23, "s1_clk_gate_rxu23", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 23, 0 }, ++ { GATE_CLK_RXU24, "s1_clk_gate_rxu24", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 24, 0 }, ++ { GATE_CLK_RXU25, "s1_clk_gate_rxu25", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 25, 0 }, ++ { GATE_CLK_RXU26, "s1_clk_gate_rxu26", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 26, 0 }, ++ { GATE_CLK_RXU27, "s1_clk_gate_rxu27", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 27, 0 }, ++ { GATE_CLK_RXU28, "s1_clk_gate_rxu28", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 28, 0 }, ++ { GATE_CLK_RXU29, "s1_clk_gate_rxu29", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 29, 0 }, ++ { GATE_CLK_RXU30, "s1_clk_gate_rxu30", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 30, 0 }, ++ { GATE_CLK_RXU31, "s1_clk_gate_rxu31", "s1_clk_gate_rp_cpu_normal", ++ 0, 0x368, 31, 0 }, ++ { GATE_CLK_MP0, "s1_clk_gate_mp0", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x384, 0, 0 }, ++ { GATE_CLK_MP1, "s1_clk_gate_mp1", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x38c, 0, 0 }, ++ { GATE_CLK_MP2, "s1_clk_gate_mp2", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x394, 0, 0 }, ++ { GATE_CLK_MP3, "s1_clk_gate_mp3", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x39c, 0, 0 }, ++ { GATE_CLK_MP4, "s1_clk_gate_mp4", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3a4, 0, 0 }, ++ { GATE_CLK_MP5, "s1_clk_gate_mp5", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3ac, 0, 0 }, ++ { GATE_CLK_MP6, "s1_clk_gate_mp6", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3b4, 0, 0 }, ++ { GATE_CLK_MP7, "s1_clk_gate_mp7", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3bc, 0, 0 }, ++ { GATE_CLK_MP8, "s1_clk_gate_mp8", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3c4, 0, 0 }, ++ { GATE_CLK_MP9, "s1_clk_gate_mp9", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3cc, 0, 0 }, ++ { GATE_CLK_MP10, "s1_clk_gate_mp10", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3d4, 0, 0 }, ++ { GATE_CLK_MP11, "s1_clk_gate_mp11", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3dc, 0, 0 }, ++ { GATE_CLK_MP12, "s1_clk_gate_mp12", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3e4, 0, 0 }, ++ { GATE_CLK_MP13, "s1_clk_gate_mp13", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3ec, 0, 0 }, ++ { GATE_CLK_MP14, "s1_clk_gate_mp14", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3f4, 0, 0 }, ++ { GATE_CLK_MP15, "s1_clk_gate_mp15", "s1_clk_gate_rp_cpu_normal", ++ CLK_IGNORE_UNUSED | CLK_IS_CRITICAL, 0x3fc, 0, 0 }, ++}; ++ ++/* socket0 mux clocks */ ++static const char *const clk_mux_ddr01_p[] = { ++ "clk_div_ddr01_0", "clk_div_ddr01_1"}; ++static const char *const clk_mux_ddr23_p[] = { ++ "clk_div_ddr23_0", "clk_div_ddr23_1"}; ++static const char *const clk_mux_rp_cpu_normal_p[] = { ++ "clk_div_rp_cpu_normal_0", "clk_div_rp_cpu_normal_1"}; ++static const char *const clk_mux_axi_ddr_p[] = { ++ "clk_div_axi_ddr_0", "clk_div_axi_ddr_1"}; ++ ++struct mango_mux_clock s0_mux_clks[] = { ++ { ++ MUX_CLK_DDR01, "clk_mux_ddr01", clk_mux_ddr01_p, ++ ARRAY_SIZE(clk_mux_ddr01_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT | ++ CLK_MUX_READ_ONLY, ++ 0x2020, 2, 1, 0, ++ }, ++ { ++ MUX_CLK_DDR23, "clk_mux_ddr23", clk_mux_ddr23_p, ++ ARRAY_SIZE(clk_mux_ddr23_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT | ++ CLK_MUX_READ_ONLY, ++ 0x2020, 3, 1, 0, ++ }, ++ { ++ MUX_CLK_RP_CPU_NORMAL, "clk_mux_rp_cpu_normal", clk_mux_rp_cpu_normal_p, ++ ARRAY_SIZE(clk_mux_rp_cpu_normal_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, ++ 0x2020, 0, 1, 0, ++ }, ++ { ++ MUX_CLK_AXI_DDR, "clk_mux_axi_ddr", clk_mux_axi_ddr_p, ++ ARRAY_SIZE(clk_mux_axi_ddr_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, ++ 0x2020, 1, 1, 0, ++ }, ++}; ++ ++/* socket1 mux clocks */ ++static const char *const s1_clk_mux_ddr01_p[] = { ++ "s1_clk_div_ddr01_0", "s1_clk_div_ddr01_1"}; ++static const char *const s1_clk_mux_ddr23_p[] = { ++ "s1_clk_div_ddr23_0", "s1_clk_div_ddr23_1"}; ++static const char *const s1_clk_mux_rp_cpu_normal_p[] = { ++ "s1_clk_div_rp_cpu_normal_0", "s1_clk_div_rp_cpu_normal_1"}; ++static const char *const s1_clk_mux_axi_ddr_p[] = { ++ "s1_clk_div_axi_ddr_0", "s1_clk_div_axi_ddr_1"}; ++ ++struct mango_mux_clock s1_mux_clks[] = { ++ { ++ MUX_CLK_DDR01, "s1_clk_mux_ddr01", s1_clk_mux_ddr01_p, ++ ARRAY_SIZE(s1_clk_mux_ddr01_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT | ++ CLK_MUX_READ_ONLY, ++ 0x2020, 2, 1, 0, ++ }, ++ { ++ MUX_CLK_DDR23, "s1_clk_mux_ddr23", s1_clk_mux_ddr23_p, ++ ARRAY_SIZE(s1_clk_mux_ddr23_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT | ++ CLK_MUX_READ_ONLY, ++ 0x2020, 3, 1, 0, ++ }, ++ { ++ MUX_CLK_RP_CPU_NORMAL, "s1_clk_mux_rp_cpu_normal", s1_clk_mux_rp_cpu_normal_p, ++ ARRAY_SIZE(s1_clk_mux_rp_cpu_normal_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, ++ 0x2020, 0, 1, 0, ++ }, ++ { ++ MUX_CLK_AXI_DDR, "s1_clk_mux_axi_ddr", s1_clk_mux_axi_ddr_p, ++ ARRAY_SIZE(s1_clk_mux_axi_ddr_p), ++ CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, ++ 0x2020, 1, 1, 0, ++ }, ++}; ++ ++struct mango_clk_table pll_clk_tables = { ++ .pll_clks_num = ARRAY_SIZE(mango_root_pll_clks), ++ .pll_clks = mango_root_pll_clks, ++}; ++ ++struct mango_clk_table div_clk_tables[] = { ++ { ++ .id = S0_DIV_CLK_TABLE, ++ .div_clks_num = ARRAY_SIZE(s0_div_clks), ++ .div_clks = s0_div_clks, ++ .gate_clks_num = ARRAY_SIZE(s0_gate_clks), ++ .gate_clks = s0_gate_clks, ++ },{ ++ .id = S1_DIV_CLK_TABLE, ++ .div_clks_num = ARRAY_SIZE(s1_div_clks), ++ .div_clks = s1_div_clks, ++ .gate_clks_num = ARRAY_SIZE(s1_gate_clks), ++ .gate_clks = s1_gate_clks, ++ }, ++}; ++ ++struct mango_clk_table mux_clk_tables[] = { ++ { ++ .id = S0_MUX_CLK_TABLE, ++ .mux_clks_num = ARRAY_SIZE(s0_mux_clks), ++ .mux_clks = s0_mux_clks, ++ },{ ++ .id = S1_MUX_CLK_TABLE, ++ .mux_clks_num = ARRAY_SIZE(s1_mux_clks), ++ .mux_clks = s1_mux_clks, ++ }, ++}; ++ ++static const struct of_device_id mango_clk_match_ids_tables[] = { ++ { ++ .compatible = "mango, pll-clock", ++ .data = &pll_clk_tables, ++ }, ++ { ++ .compatible = "mango, pll-child-clock", ++ .data = div_clk_tables, ++ }, ++ { ++ .compatible = "mango, pll-mux-clock", ++ .data = mux_clk_tables, ++ }, ++ { ++ .compatible = "mango, clk-default-rates", ++ }, ++ { ++ .compatible = "mango, dm-pll-clock", ++ .data = &pll_clk_tables, ++ }, ++ { ++ .compatible = "mango, dm-pll-child-clock", ++ .data = div_clk_tables, ++ }, ++ { ++ .compatible = "mango, dm-pll-mux-clock", ++ .data = mux_clk_tables, ++ }, ++ { ++ .compatible = "mango, dm-clk-default-rates", ++ }, ++ {} ++}; ++ ++static void __init mango_clk_init(struct device_node *node) ++{ ++ struct device_node *np_top; ++ struct mango_clk_data *clk_data = NULL; ++ const struct mango_clk_table *dev_data; ++ struct regmap *syscon; ++ void __iomem *base; ++ int i, ret = 0; ++ unsigned int id; ++ const char *clk_name; ++ const struct of_device_id *match = NULL; ++ ++ clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); ++ if (!clk_data) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ match = of_match_node(mango_clk_match_ids_tables, node); ++ if (match) { ++ dev_data = (struct mango_clk_table *)match->data; ++ } else { ++ pr_err("%s did't match node data\n", __func__); ++ ret = -ENODEV; ++ goto no_match_data; ++ } ++ ++ np_top = of_parse_phandle(node, "subctrl-syscon", 0); ++ if (!np_top) { ++ pr_err("%s can't get subctrl-syscon node\n", ++ __func__); ++ ret = -EINVAL; ++ goto no_match_data; ++ } ++ ++ syscon = device_node_to_regmap(np_top); ++ if (IS_ERR_OR_NULL(syscon)) { ++ pr_err("%s cannot get regmap %ld\n", __func__, PTR_ERR(syscon)); ++ ret = -ENODEV; ++ goto no_match_data; ++ } ++ base = of_iomap(np_top, 0); ++ ++ spin_lock_init(&clk_data->lock); ++ if (of_device_is_compatible(node, "mango, pll-clock") || ++ of_device_is_compatible(node, "mango, dm-pll-clock")) { ++ if (!dev_data->pll_clks_num) { ++ ret = -EINVAL; ++ goto no_match_data; ++ } ++ ++ clk_data->table = dev_data; ++ clk_data->base = base; ++ clk_data->syscon_top = syscon; ++ ++ if (of_property_read_string(node, "clock-output-names", &clk_name)) { ++ pr_err("%s cannot get pll name for %s\n", ++ __func__, node->full_name); ++ ret = -ENODEV; ++ goto no_match_data; ++ } ++ if (of_device_is_compatible(node, "mango, pll-clock")) ++ ret = mango_register_pll_clks(node, clk_data, clk_name); ++ else ++ ret = dm_mango_register_pll_clks(node, clk_data, clk_name); ++ } ++ ++ if (of_device_is_compatible(node, "mango, pll-child-clock") || ++ of_device_is_compatible(node, "mango, dm-pll-child-clock")) { ++ ret = of_property_read_u32(node, "id", &id); ++ if (ret) { ++ pr_err("not assigned id for %s\n", node->full_name); ++ ret = -ENODEV; ++ goto no_match_data; ++ } ++ ++ /* Below brute-force to check dts property "id" ++ * whether match id of array ++ */ ++ for (i = 0; i < ARRAY_SIZE(div_clk_tables); i++) { ++ if (id == dev_data[i].id) ++ break; /* found */ ++ } ++ clk_data->table = &dev_data[i]; ++ clk_data->base = base; ++ clk_data->syscon_top = syscon; ++ if (of_device_is_compatible(node, "mango, pll-child-clock")) ++ ret = mango_register_div_clks(node, clk_data); ++ else ++ ret = dm_mango_register_div_clks(node, clk_data); ++ } ++ ++ if (of_device_is_compatible(node, "mango, pll-mux-clock") || ++ of_device_is_compatible(node, "mango, dm-pll-mux-clock")) { ++ ret = of_property_read_u32(node, "id", &id); ++ if (ret) { ++ pr_err("not assigned id for %s\n", node->full_name); ++ ret = -ENODEV; ++ goto no_match_data; ++ } ++ ++ /* Below brute-force to check dts property "id" ++ * whether match id of array ++ */ ++ for (i = 0; i < ARRAY_SIZE(mux_clk_tables); i++) { ++ if (id == dev_data[i].id) ++ break; /* found */ ++ } ++ clk_data->table = &dev_data[i]; ++ clk_data->base = base; ++ clk_data->syscon_top = syscon; ++ if (of_device_is_compatible(node, "mango, pll-mux-clock")) ++ ret = mango_register_mux_clks(node, clk_data); ++ else ++ ret = dm_mango_register_mux_clks(node, clk_data); ++ } ++ ++ if (of_device_is_compatible(node, "mango, clk-default-rates")) ++ ret = set_default_clk_rates(node); ++ ++ if (of_device_is_compatible(node, "mango, dm-clk-default-rates")) ++ ret = dm_set_default_clk_rates(node); ++ ++ if (!ret) ++ return; ++ ++no_match_data: ++ kfree(clk_data); ++ ++out: ++ pr_err("%s failed error number %d\n", __func__, ret); ++} ++ ++CLK_OF_DECLARE(mango_clk_pll, "mango, pll-clock", mango_clk_init); ++CLK_OF_DECLARE(mango_clk_pll_child, "mango, pll-child-clock", mango_clk_init); ++CLK_OF_DECLARE(mango_clk_pll_mux, "mango, pll-mux-clock", mango_clk_init); ++CLK_OF_DECLARE(mango_clk_default_rate, "mango, clk-default-rates", mango_clk_init); ++CLK_OF_DECLARE(dm_mango_clk_pll, "mango, dm-pll-clock", mango_clk_init); ++CLK_OF_DECLARE(dm_mango_clk_pll_child, "mango, dm-pll-child-clock", mango_clk_init); ++CLK_OF_DECLARE(dm_mango_clk_pll_mux, "mango, dm-pll-mux-clock", mango_clk_init); ++CLK_OF_DECLARE(dm_mango_clk_default_rate, "mango, dm-clk-default-rates", mango_clk_init); +diff --git a/drivers/clk/sophgo/clk.c b/drivers/clk/sophgo/clk.c +new file mode 100644 +index 000000000000..c77f2f631a8c +--- /dev/null ++++ b/drivers/clk/sophgo/clk.c +@@ -0,0 +1,881 @@ ++/* ++ * Copyright (c) 2022 SOPHGO ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++/* ++ * @hw: handle between common and hardware-specific interfaces ++ * @reg: register containing divider ++ * @shift: shift to the divider bit field ++ * @width: width of the divider bit field ++ * @initial_val:initial value of the divider ++ * @table: the div table that the divider supports ++ * @lock: register lock ++ */ ++struct mango_clk_divider { ++ struct clk_hw hw; ++ void __iomem *reg; ++ u8 shift; ++ u8 width; ++ u8 flags; ++ u32 initial_val; ++ const struct clk_div_table *table; ++ spinlock_t *lock; ++}; ++ ++static inline int mango_pll_enable(struct regmap *map, ++ struct mango_pll_clock *pll, bool en) ++{ ++ unsigned int value; ++ unsigned long enter; ++ unsigned int id = pll->id; ++ ++ if (en) { ++ /* wait pll lock */ ++ enter = jiffies; ++ regmap_read(map, pll->status_offset, &value); ++ while (!((value >> (PLL_STAT_LOCK_OFFSET + id)) & 0x1)) { ++ regmap_read(map, pll->status_offset, &value); ++ if (time_after(jiffies, enter + HZ / 10)) ++ pr_warn("%s not locked\n", pll->name); ++ } ++ /* wait pll updating */ ++ enter = jiffies; ++ regmap_read(map, pll->status_offset, &value); ++ while (((value >> id) & 0x1)) { ++ regmap_read(map, pll->status_offset, &value); ++ if (time_after(jiffies, enter + HZ / 10)) ++ pr_warn("%s still updating\n", pll->name); ++ } ++ /* enable pll */ ++ regmap_read(map, pll->enable_offset, &value); ++ regmap_write(map, pll->enable_offset, value | (1 << id)); ++ } else { ++ /* disable pll */ ++ regmap_read(map, pll->enable_offset, &value); ++ regmap_write(map, pll->enable_offset, value & (~(1 << id))); ++ } ++ ++ return 0; ++} ++ ++static inline int mango_pll_write(struct regmap *map, int id, int value) ++{ ++ return regmap_write(map, PLL_CTRL_OFFSET + (id << 2), value); ++} ++ ++static inline int mango_pll_read(struct regmap *map, int id, unsigned int *pvalue) ++{ ++ return regmap_read(map, PLL_CTRL_OFFSET + (id << 2), pvalue); ++} ++ ++static unsigned int _get_table_div(const struct clk_div_table *table, ++ unsigned int val) ++{ ++ const struct clk_div_table *clkt; ++ ++ for (clkt = table; clkt->div; clkt++) ++ if (clkt->val == val) ++ return clkt->div; ++ return 0; ++} ++ ++static unsigned int _get_div(const struct clk_div_table *table, ++ unsigned int val, unsigned long flags, u8 width) ++{ ++ if (flags & CLK_DIVIDER_ONE_BASED) ++ return val; ++ if (flags & CLK_DIVIDER_POWER_OF_TWO) ++ return 1 << val; ++ if (flags & CLK_DIVIDER_MAX_AT_ZERO) ++ return val ? val : div_mask(width) + 1; ++ if (table) ++ return _get_table_div(table, val); ++ return val + 1; ++} ++ ++static unsigned long mango_clk_divider_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct mango_clk_divider *divider = to_mango_clk_divider(hw); ++ unsigned int val; ++ ++ val = readl(divider->reg) >> divider->shift; ++ val &= div_mask(divider->width); ++ ++#ifdef CONFIG_ARCH_BM1880 ++ /* if select divide factor from initial value */ ++ if (!(readl(divider->reg) & BIT(3))) ++ val = divider->initial_val; ++#endif ++ ++ return divider_recalc_rate(hw, parent_rate, val, divider->table, ++ divider->flags, divider->width); ++} ++ ++static long mango_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ int bestdiv; ++ struct mango_clk_divider *divider = to_mango_clk_divider(hw); ++ ++ /* if read only, just return current value */ ++ if (divider->flags & CLK_DIVIDER_READ_ONLY) { ++ bestdiv = readl(divider->reg) >> divider->shift; ++ bestdiv &= div_mask(divider->width); ++ bestdiv = _get_div(divider->table, bestdiv, divider->flags, ++ divider->width); ++ return DIV_ROUND_UP_ULL((u64)*prate, bestdiv); ++ } ++ ++ return divider_round_rate(hw, rate, prate, divider->table, ++ divider->width, divider->flags); ++} ++ ++static int mango_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ unsigned int value; ++ unsigned int val; ++ unsigned long flags = 0; ++ struct mango_clk_divider *divider = to_mango_clk_divider(hw); ++ ++ value = divider_get_val(rate, parent_rate, divider->table, ++ divider->width, divider->flags); ++ ++ if (divider->lock) ++ spin_lock_irqsave(divider->lock, flags); ++ else ++ __acquire(divider->lock); ++ ++ /* div assert */ ++ val = readl(divider->reg); ++ val &= ~0x1; ++ writel(val, divider->reg); ++ ++ if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { ++ val = div_mask(divider->width) << (divider->shift + 16); ++ } else { ++ val = readl(divider->reg); ++ val &= ~(div_mask(divider->width) << divider->shift); ++ } ++ ++ val |= value << divider->shift; ++ writel(val, divider->reg); ++ ++ if (!(divider->flags & CLK_DIVIDER_READ_ONLY)) ++ val |= 1 << 3; ++ ++ /* de-assert */ ++ val |= 1; ++ writel(val, divider->reg); ++ if (divider->lock) ++ spin_unlock_irqrestore(divider->lock, flags); ++ else ++ __release(divider->lock); ++ ++ return 0; ++} ++ ++/* Below array is the total combination lists of POSTDIV1 and POSTDIV2 ++ * for example: ++ * postdiv1_2[0] = {1, 1, 1} ++ * ==> div1 = 1, div2 = 1 , div1 * div2 = 1 ++ * postdiv1_2[22] = {6, 7, 42} ++ * ==> div1 = 6, div2 = 7 , div1 * div2 = 42 ++ * ++ * And POSTDIV_RESULT_INDEX point to 3rd element in the array ++ */ ++#define POSTDIV_RESULT_INDEX 2 ++int postdiv1_2[][3] = { ++ {2, 4, 8}, {3, 3, 9}, {2, 5, 10}, {2, 6, 12}, ++ {2, 7, 14}, {3, 5, 15}, {4, 4, 16}, {3, 6, 18}, ++ {4, 5, 20}, {3, 7, 21}, {4, 6, 24}, {5, 5, 25}, ++ {4, 7, 28}, {5, 6, 30}, {5, 7, 35}, {6, 6, 36}, ++ {6, 7, 42}, {7, 7, 49} ++}; ++ ++/* ++ * @reg_value: current register value ++ * @parent_rate: parent frequency ++ * ++ * This function is used to calculate below "rate" in equation ++ * rate = (parent_rate/REFDIV) x FBDIV/POSTDIV1/POSTDIV2 ++ * = (parent_rate x FBDIV) / (REFDIV x POSTDIV1 x POSTDIV2) ++ */ ++static unsigned long __pll_recalc_rate(unsigned int reg_value, ++ unsigned long parent_rate) ++{ ++ unsigned int fbdiv, refdiv; ++ unsigned int postdiv1, postdiv2; ++ u64 rate, numerator, denominator; ++ ++ fbdiv = (reg_value >> 16) & 0xfff; ++ refdiv = reg_value & 0x3f; ++ postdiv1 = (reg_value >> 8) & 0x7; ++ postdiv2 = (reg_value >> 12) & 0x7; ++ ++ numerator = parent_rate * fbdiv; ++ denominator = refdiv * postdiv1 * postdiv2; ++ do_div(numerator, denominator); ++ rate = numerator; ++ ++ return rate; ++} ++ ++/* ++ * @reg_value: current register value ++ * @rate: request rate ++ * @prate: parent rate ++ * @pctrl_table: use to save div1/div2/fbdiv/refdiv ++ * ++ * We use below equation to get POSTDIV1 and POSTDIV2 ++ * POSTDIV = (parent_rate/REFDIV) x FBDIV/input_rate ++ * above POSTDIV = POSTDIV1*POSTDIV2 ++ */ ++static int __pll_get_postdiv_1_2(unsigned long rate, unsigned long prate, ++ unsigned int fbdiv, unsigned int refdiv, unsigned int *postdiv1, ++ unsigned int *postdiv2) ++{ ++ int index = 0; ++ int ret = 0; ++ u64 tmp0; ++ ++ /* calculate (parent_rate/refdiv) ++ * and result save to prate ++ */ ++ tmp0 = prate; ++ do_div(tmp0, refdiv); ++ ++ /* calcuate ((parent_rate/REFDIV) x FBDIV) ++ * and result save to prate ++ */ ++ tmp0 *= fbdiv; ++ ++ /* calcuate (((parent_rate/REFDIV) x FBDIV)/input_rate) ++ * and result save to prate ++ * here *prate is (POSTDIV1*POSTDIV2) ++ */ ++ do_div(tmp0, rate); ++ ++ /* calculate div1 and div2 value */ ++ if (tmp0 <= 7) { ++ /* (div1 * div2) <= 7, no need to use array search */ ++ *postdiv1 = tmp0; ++ *postdiv2 = 1; ++ } else { ++ /* (div1 * div2) > 7, use array search */ ++ for (index = 0; index < ARRAY_SIZE(postdiv1_2); index++) { ++ if (tmp0 > postdiv1_2[index][POSTDIV_RESULT_INDEX]) { ++ continue; ++ } else { ++ /* found it */ ++ break; ++ } ++ } ++ if (index < ARRAY_SIZE(postdiv1_2)) { ++ *postdiv1 = postdiv1_2[index][1]; ++ *postdiv2 = postdiv1_2[index][0]; ++ } else { ++ pr_debug("%s out of postdiv array range!\n", __func__); ++ ret = -ESPIPE; ++ } ++ } ++ ++ return ret; ++} ++ ++static int __get_pll_ctl_setting(struct mango_pll_ctrl *best, ++ unsigned long req_rate, unsigned long parent_rate) ++{ ++ int ret; ++ unsigned int fbdiv, refdiv, fref, postdiv1, postdiv2; ++ unsigned long tmp = 0, foutvco; ++ ++ fref = parent_rate; ++ ++ for (refdiv = REFDIV_MIN; refdiv < REFDIV_MAX + 1; refdiv++) { ++ for (fbdiv = FBDIV_MIN; fbdiv < FBDIV_MAX + 1; fbdiv++) { ++ foutvco = fref * fbdiv / refdiv; ++ /* check fpostdiv pfd */ ++ if (foutvco < PLL_FREQ_MIN || foutvco > PLL_FREQ_MAX ++ || (fref / refdiv) < 10) ++ continue; ++ ++ ret = __pll_get_postdiv_1_2(req_rate, fref, fbdiv, ++ refdiv, &postdiv1, &postdiv2); ++ if (ret) ++ continue; ++ ++ tmp = foutvco / (postdiv1 * postdiv2); ++ if (abs_diff(tmp, req_rate) < abs_diff(best->freq, req_rate)) { ++ best->freq = tmp; ++ best->refdiv = refdiv; ++ best->fbdiv = fbdiv; ++ best->postdiv1 = postdiv1; ++ best->postdiv2 = postdiv2; ++ if (tmp == req_rate) ++ return 0; ++ } ++ continue; ++ } ++ } ++ ++ return 0; ++} ++ ++/* ++ * @hw: ccf use to hook get mango_pll_clock ++ * @parent_rate: parent rate ++ * ++ * The is function will be called through clk_get_rate ++ * and return current rate after decoding reg value ++ */ ++static unsigned long mango_clk_pll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ unsigned int value; ++ unsigned long rate; ++ struct mango_pll_clock *mango_pll = to_mango_pll_clk(hw); ++ ++ mango_pll_read(mango_pll->syscon_top, mango_pll->id, &value); ++ rate = __pll_recalc_rate(value, parent_rate); ++ return rate; ++} ++ ++static long mango_clk_pll_round_rate(struct clk_hw *hw, ++ unsigned long req_rate, unsigned long *prate) ++{ ++ unsigned int value; ++ struct mango_pll_ctrl pctrl_table; ++ struct mango_pll_clock *mango_pll = to_mango_pll_clk(hw); ++ long proper_rate; ++ ++ memset(&pctrl_table, 0, sizeof(struct mango_pll_ctrl)); ++ ++ /* use current setting to get fbdiv, refdiv ++ * then combine with prate, and req_rate to ++ * get postdiv1 and postdiv2 ++ */ ++ mango_pll_read(mango_pll->syscon_top, mango_pll->id, &value); ++ __get_pll_ctl_setting(&pctrl_table, req_rate, *prate); ++ if (!pctrl_table.freq) { ++ proper_rate = 0; ++ goto out; ++ } ++ ++ value = TOP_PLL_CTRL(pctrl_table.fbdiv, pctrl_table.postdiv1, ++ pctrl_table.postdiv2, pctrl_table.refdiv); ++ proper_rate = (long)__pll_recalc_rate(value, *prate); ++ ++out: ++ return proper_rate; ++} ++ ++static int mango_clk_pll_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ req->rate = mango_clk_pll_round_rate(hw, min(req->rate, req->max_rate), ++ &req->best_parent_rate); ++ return 0; ++} ++ ++static int mango_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ unsigned long flags; ++ unsigned int value; ++ int ret = 0; ++ struct mango_pll_ctrl pctrl_table; ++ struct mango_pll_clock *mango_pll = to_mango_pll_clk(hw); ++ ++ memset(&pctrl_table, 0, sizeof(struct mango_pll_ctrl)); ++ spin_lock_irqsave(mango_pll->lock, flags); ++ if (mango_pll_enable(mango_pll->syscon_top, mango_pll, 0)) { ++ pr_warn("Can't disable pll(%s), status error\n", mango_pll->name); ++ goto out; ++ } ++ mango_pll_read(mango_pll->syscon_top, mango_pll->id, &value); ++ __get_pll_ctl_setting(&pctrl_table, rate, parent_rate); ++ if (!pctrl_table.freq) { ++ pr_warn("%s: Can't find a proper pll setting\n", mango_pll->name); ++ goto out; ++ } ++ ++ value = TOP_PLL_CTRL(pctrl_table.fbdiv, pctrl_table.postdiv1, ++ pctrl_table.postdiv2, pctrl_table.refdiv); ++ ++ /* write the value to top register */ ++ mango_pll_write(mango_pll->syscon_top, mango_pll->id, value); ++ mango_pll_enable(mango_pll->syscon_top, mango_pll, 1); ++out: ++ spin_unlock_irqrestore(mango_pll->lock, flags); ++ return ret; ++} ++ ++const struct clk_ops mango_clk_divider_ops = { ++ .recalc_rate = mango_clk_divider_recalc_rate, ++ .round_rate = mango_clk_divider_round_rate, ++ .set_rate = mango_clk_divider_set_rate, ++}; ++ ++const struct clk_ops mango_clk_divider_ro_ops = { ++ .recalc_rate = mango_clk_divider_recalc_rate, ++ .round_rate = mango_clk_divider_round_rate, ++}; ++ ++const struct clk_ops mango_clk_pll_ops = { ++ .recalc_rate = mango_clk_pll_recalc_rate, ++ .round_rate = mango_clk_pll_round_rate, ++ .determine_rate = mango_clk_pll_determine_rate, ++ .set_rate = mango_clk_pll_set_rate, ++}; ++ ++const struct clk_ops mango_clk_pll_ro_ops = { ++ .recalc_rate = mango_clk_pll_recalc_rate, ++ .round_rate = mango_clk_pll_round_rate, ++}; ++ ++struct mux_cb_clk_name { ++ const char *name; ++ struct list_head node; ++}; ++ ++static struct list_head mux_cb_clk_name_list = ++ LIST_HEAD_INIT(mux_cb_clk_name_list); ++static int mux_notifier_cb(struct notifier_block *nb, ++ unsigned long event, void *data) ++{ ++ int ret = 0; ++ static unsigned char mux_id = 1; ++ struct clk_notifier_data *ndata = data; ++ struct clk_hw *hw = __clk_get_hw(ndata->clk); ++ const struct clk_ops *ops = &clk_mux_ops; ++ struct mux_cb_clk_name *cb_lsit; ++ ++ if (event == PRE_RATE_CHANGE) { ++ struct clk_hw *hw_p = clk_hw_get_parent(hw); ++ ++ cb_lsit = kmalloc(sizeof(*cb_lsit), GFP_KERNEL); ++ if (cb_lsit) { ++ INIT_LIST_HEAD(&cb_lsit->node); ++ list_add_tail(&cb_lsit->node, &mux_cb_clk_name_list); ++ } else { ++ pr_err("mux cb kmalloc mem fail\n"); ++ goto out; ++ } ++ ++ cb_lsit->name = clk_hw_get_name(hw_p); ++ mux_id = ops->get_parent(hw); ++ if (mux_id > 1) { ++ ret = 1; ++ goto out; ++ } ++ ops->set_parent(hw, !mux_id); ++ } else if (event == POST_RATE_CHANGE) { ++ struct clk_hw *hw_p = clk_hw_get_parent(hw); ++ ++ cb_lsit = list_first_entry_or_null(&mux_cb_clk_name_list, ++ typeof(*cb_lsit), node); ++ if (cb_lsit) { ++ const char *pre_name = cb_lsit->name; ++ ++ list_del_init(&cb_lsit->node); ++ kfree(cb_lsit); ++ if (strcmp(clk_hw_get_name(hw_p), pre_name)) ++ goto out; ++ } ++ ++ ops->set_parent(hw, mux_id); ++ } ++ ++out: ++ return notifier_from_errno(ret); ++} ++ ++int set_default_clk_rates(struct device_node *node) ++{ ++ struct of_phandle_args clkspec; ++ int rc, index = 0; ++ struct clk *clk; ++ u32 rate; ++ ++ of_property_for_each_u32 (node, "clock-rates", rate) { ++ if (rate) { ++ rc = of_parse_phandle_with_args(node, "clocks", ++ "#clock-cells", index, &clkspec); ++ if (rc < 0) { ++ /* skip empty (null) phandles */ ++ if (rc == -ENOENT) ++ continue; ++ else ++ return rc; ++ } ++ ++ clk = of_clk_get_from_provider(&clkspec); ++ if (IS_ERR(clk)) { ++ pr_warn("clk: couldn't get clock %d for %s\n", ++ index, node->full_name); ++ return PTR_ERR(clk); ++ } ++ ++ rc = clk_set_rate(clk, rate); ++ if (rc < 0) ++ pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", ++ __clk_get_name(clk), rate, rc, ++ clk_get_rate(clk)); ++ clk_put(clk); ++ } ++ index++; ++ } ++ ++ return 0; ++} ++ ++static struct clk *__register_divider_clks(struct device *dev, const char *name, ++ const char *parent_name, ++ unsigned long flags, ++ void __iomem *reg, u8 shift, ++ u8 width, u32 initial_val, ++ u8 clk_divider_flags, ++ const struct clk_div_table *table, ++ spinlock_t *lock) ++{ ++ struct mango_clk_divider *div; ++ struct clk_hw *hw; ++ struct clk_init_data init; ++ int ret; ++ ++ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) { ++ if (width + shift > 16) { ++ pr_warn("divider value exceeds LOWORD field\n"); ++ return ERR_PTR(-EINVAL); ++ } ++ } ++ ++ /* allocate the divider */ ++ div = kzalloc(sizeof(*div), GFP_KERNEL); ++ if (!div) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) ++ init.ops = &mango_clk_divider_ro_ops; ++ else ++ init.ops = &mango_clk_divider_ops; ++ init.flags = flags; ++ init.parent_names = (parent_name ? &parent_name : NULL); ++ init.num_parents = (parent_name ? 1 : 0); ++ ++ /* struct mango_clk_divider assignments */ ++ div->reg = reg; ++ div->shift = shift; ++ div->width = width; ++ div->flags = clk_divider_flags; ++ div->lock = lock; ++ div->hw.init = &init; ++ div->table = table; ++ div->initial_val = initial_val; ++ ++ /* register the clock */ ++ hw = &div->hw; ++ ret = clk_hw_register(dev, hw); ++ if (ret) { ++ kfree(div); ++ hw = ERR_PTR(ret); ++ return ERR_PTR(-EBUSY); ++ } ++ ++ return hw->clk; ++} ++ ++static inline int register_provider_clks ++(struct device_node *node, struct mango_clk_data *clk_data, int clk_num) ++{ ++ return of_clk_add_provider(node, of_clk_src_onecell_get, ++ &clk_data->clk_data); ++} ++ ++static int register_gate_clks(struct device *dev, struct mango_clk_data *clk_data) ++{ ++ struct clk *clk; ++ const struct mango_clk_table *table = clk_data->table; ++ const struct mango_gate_clock *gate_clks = table->gate_clks; ++ void __iomem *base = clk_data->base; ++ int clk_num = table->gate_clks_num; ++ int i; ++ ++ for (i = 0; i < clk_num; i++) { ++ clk = clk_register_gate( ++ dev, gate_clks[i].name, gate_clks[i].parent_name, ++ gate_clks[i].flags, base + gate_clks[i].offset, ++ gate_clks[i].bit_idx, gate_clks[i].gate_flags, ++ &clk_data->lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ gate_clks[i].name); ++ goto err; ++ } ++ ++ if (gate_clks[i].alias) ++ clk_register_clkdev(clk, gate_clks[i].alias, NULL); ++ ++ clk_data->clk_data.clks[gate_clks[i].id] = clk; ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_gate(clk_data->clk_data.clks[gate_clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++ ++static int register_divider_clks(struct device *dev, ++ struct mango_clk_data *clk_data) ++{ ++ struct clk *clk; ++ const struct mango_clk_table *table = clk_data->table; ++ const struct mango_divider_clock *div_clks = table->div_clks; ++ void __iomem *base = clk_data->base; ++ int clk_num = table->div_clks_num; ++ int i, val; ++ ++ for (i = 0; i < clk_num; i++) { ++ clk = __register_divider_clks( ++ NULL, div_clks[i].name, div_clks[i].parent_name, ++ div_clks[i].flags, base + div_clks[i].offset, ++ div_clks[i].shift, div_clks[i].width, ++ div_clks[i].initial_val, ++ (div_clks[i].initial_sel & MANGO_CLK_USE_INIT_VAL) ? ++ div_clks[i].div_flags | CLK_DIVIDER_READ_ONLY : ++ div_clks[i].div_flags, ++ div_clks[i].table, &clk_data->lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ div_clks[i].name); ++ goto err; ++ } ++ ++ clk_data->clk_data.clks[div_clks[i].id] = clk; ++ ++ if (div_clks[i].initial_sel == MANGO_CLK_USE_REG_VAL) { ++ regmap_read(clk_data->syscon_top, div_clks[i].offset, ++ &val); ++ ++ /* ++ * set a default divider factor, ++ * clk driver should not select divider clock as the ++ * clock source, before set the divider by right process ++ * (assert div, set div factor, de assert div). ++ */ ++ if (div_clks[i].initial_val > 0) ++ val |= (div_clks[i].initial_val << 16 | 1 << 3); ++ else { ++ /* ++ * the div register is config to use divider factor, don't change divider ++ */ ++ if (!(val >> 3 & 0x1)) ++ val |= 1 << 16; ++ } ++ ++ regmap_write(clk_data->syscon_top, div_clks[i].offset, ++ val); ++ } ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_divider(clk_data->clk_data.clks[div_clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++ ++static int register_mux_clks(struct device *dev, struct mango_clk_data *clk_data) ++{ ++ struct clk *clk; ++ const struct mango_clk_table *table = clk_data->table; ++ const struct mango_mux_clock *mux_clks = table->mux_clks; ++ void __iomem *base = clk_data->base; ++ int clk_num = table->mux_clks_num; ++ int i; ++ ++ for (i = 0; i < clk_num; i++) { ++ u32 mask = BIT(mux_clks[i].width) - 1; ++ ++ clk = clk_register_mux_table( ++ dev, mux_clks[i].name, mux_clks[i].parent_names, ++ mux_clks[i].num_parents, mux_clks[i].flags, ++ base + mux_clks[i].offset, mux_clks[i].shift, mask, ++ mux_clks[i].mux_flags, mux_clks[i].table, ++ &clk_data->lock); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ mux_clks[i].name); ++ goto err; ++ } ++ ++ clk_data->clk_data.clks[mux_clks[i].id] = clk; ++ ++ if (!(mux_clks[i].flags & CLK_MUX_READ_ONLY)) { ++ struct clk *parent; ++ struct notifier_block *clk_nb; ++ ++ /* set mux clock default parent here, it's parent index ++ * value is read from the mux clock reg. dts can override ++ * setting the mux clock parent later. ++ */ ++ parent = clk_get_parent(clk); ++ clk_set_parent(clk, parent); ++ ++ /* add a notify callback function */ ++ clk_nb = kzalloc(sizeof(*clk_nb), GFP_KERNEL); ++ if (!clk_nb) ++ goto err; ++ clk_nb->notifier_call = mux_notifier_cb; ++ if (clk_notifier_register(clk, clk_nb)) ++ pr_err("%s: failed to register clock notifier for %s\n", ++ __func__, mux_clks[i].name); ++ } ++ } ++ ++ return 0; ++ ++err: ++ while (i--) ++ clk_unregister_mux(clk_data->clk_data.clks[mux_clks[i].id]); ++ ++ return PTR_ERR(clk); ++} ++ ++/* pll clock init */ ++int mango_register_pll_clks(struct device_node *node, ++ struct mango_clk_data *clk_data, const char *clk_name) ++{ ++ struct clk *clk = NULL; ++ struct mango_pll_clock *pll_clks; ++ int i, ret = 0; ++ const struct clk_ops *local_ops; ++ ++ pll_clks = (struct mango_pll_clock *)clk_data->table->pll_clks; ++ for (i = 0; i < clk_data->table->pll_clks_num; i++) { ++ if (!strcmp(clk_name, pll_clks[i].name)) { ++ /* have to assigne pll_clks.syscon_top first ++ * since clk_register_composite will need it ++ * to calculate current rate. ++ */ ++ pll_clks[i].syscon_top = clk_data->syscon_top; ++ pll_clks[i].lock = &clk_data->lock; ++ if (pll_clks[i].ini_flags & MANGO_CLK_RO) ++ local_ops = &mango_clk_pll_ro_ops; ++ else ++ local_ops = &mango_clk_pll_ops; ++ clk = clk_register_composite( ++ NULL, pll_clks[i].name, &pll_clks[i].parent_name, ++ 1, NULL, NULL, &pll_clks[i].hw, local_ops, ++ NULL, NULL, pll_clks[i].flags); ++ ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register clock %s\n", __func__, ++ pll_clks[i].name); ++ ret = -EINVAL; ++ goto out; ++ } ++ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); ++ if (ret) ++ clk_unregister(clk); ++ } else { ++ continue; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++/* mux clk init */ ++int mango_register_mux_clks(struct device_node *node, struct mango_clk_data *clk_data) ++{ ++ int ret; ++ int count; ++ struct clk **clk_table; ++ ++ count = clk_data->table->mux_clks_num + clk_data->table->gate_clks_num; ++ clk_table = kcalloc(count, sizeof(*clk_table), GFP_KERNEL); ++ if (!clk_table) ++ return -ENOMEM; ++ ++ clk_data->clk_data.clks = clk_table; ++ clk_data->clk_data.clk_num = count; ++ ++ ret = register_mux_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_gate_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_provider_clks(node, clk_data, count); ++ if (ret) ++ goto err; ++ ++ return 0; ++err: ++ kfree(clk_table); ++ return ret; ++} ++ ++/* pll divider init */ ++int mango_register_div_clks(struct device_node *node, struct mango_clk_data *clk_data) ++{ ++ int ret; ++ int count; ++ ++ struct clk **clk_table; ++ ++ count = clk_data->table->div_clks_num + clk_data->table->gate_clks_num; ++ clk_table = kcalloc(count, sizeof(*clk_table), GFP_KERNEL); ++ if (!clk_table) ++ return -ENOMEM; ++ ++ clk_data->clk_data.clks = clk_table; ++ clk_data->clk_data.clk_num = count; ++ ++ ret = register_divider_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_gate_clks(NULL, clk_data); ++ if (ret) ++ goto err; ++ ++ ret = register_provider_clks(node, clk_data, count); ++ if (ret) ++ goto err; ++ ++ ++ return 0; ++err: ++ kfree(clk_table); ++ pr_err("%s error %d\n", __func__, ret); ++ return ret; ++} +diff --git a/drivers/clk/sophgo/clk.h b/drivers/clk/sophgo/clk.h +new file mode 100644 +index 000000000000..81e9f9eb1b20 +--- /dev/null ++++ b/drivers/clk/sophgo/clk.h +@@ -0,0 +1,152 @@ ++#ifndef __SOPHGO_CLOCK__ ++#define __SOPHGO_CLOCK__ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define KHZ 1000L ++#define MHZ (KHZ * KHZ) ++ ++#define MANGO_CLK_USE_INIT_VAL BIT(0) /* use default value */ ++#define MANGO_CLK_USE_REG_VAL BIT(1) /* use reg divider value */ ++#define MANGO_CLK_RO BIT(2) /* use reg divider value */ ++ ++#define CLK_PLL BIT(0) ++#define CLK_MUX BIT(1) ++ ++#define PLL_CTRL_OFFSET 0xE8 ++#define PLL_STAT_LOCK_OFFSET 0x8 ++#define CLK_MODE 0x4 ++#define CLK_MODE_MASK 0x3 ++ ++#define REFDIV_MIN 1 ++#define REFDIV_MAX 64 ++#define FBDIV_MIN 16 ++#define FBDIV_MAX 321 ++ ++#define PLL_FREQ_MIN (16 * MHZ) ++#define PLL_FREQ_MAX (3200 * MHZ) ++ ++#define div_mask(width) ((1 << (width)) - 1) ++#define TOP_PLL_CTRL(fbdiv, p1, p2, refdiv) \ ++ (((fbdiv & 0xfff) << 16) | ((p2 & 0x7) << 12) | ((p1 & 0x7) << 8) | (refdiv & 0x3f)) ++ ++struct mango_pll_ctrl { ++ unsigned int mode; ++ unsigned long freq; ++ ++ unsigned int fbdiv; ++ unsigned int postdiv1; ++ unsigned int postdiv2; ++ unsigned int refdiv; ++}; ++ ++struct mango_pll_clock { ++ unsigned int id; ++ char *name; ++ const char *parent_name; ++ unsigned long flags; ++ struct clk_hw hw; ++ struct regmap *syscon_top; ++ ++ /* Below lock used to protect PLL top register during write */ ++ spinlock_t *lock; ++ u32 ini_flags; ++ ++ u32 status_offset; ++ u32 enable_offset; ++ ++ struct mango_pll_ctrl pctrl_table[4]; ++}; ++ ++#define to_mango_pll_clk(_hw) container_of(_hw, struct mango_pll_clock, hw) ++ ++#define to_mango_clk_divider(_hw) \ ++ container_of(_hw, struct mango_clk_divider, hw) ++ ++#define to_mango_clk_mux(nb) \ ++ container_of(nb, struct mango_mux_clock, clk_nb) ++ ++struct mango_divider_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ unsigned long flags; ++ unsigned long offset; ++ u8 shift; ++ u8 width; ++ u8 div_flags; ++ u32 initial_sel; ++ u32 initial_val; ++ struct clk_div_table *table; ++}; ++ ++struct mango_mux_clock { ++ unsigned int id; ++ const char *name; ++ const char *const *parent_names; ++ u8 num_parents; ++ unsigned long flags; ++ unsigned long offset; ++ u8 shift; ++ u8 width; ++ u8 mux_flags; ++ u32 *table; ++ ++ struct notifier_block clk_nb; ++}; ++ ++struct mango_gate_clock { ++ unsigned int id; ++ const char *name; ++ const char *parent_name; ++ unsigned long flags; ++ unsigned long offset; ++ u8 bit_idx; ++ u8 gate_flags; ++ const char *alias; ++}; ++ ++struct mango_clk_table { ++ u32 id; ++ u32 pll_clks_num; ++ u32 div_clks_num; ++ u32 gate_clks_num; ++ u32 mux_clks_num; ++ ++ const struct mango_pll_clock *pll_clks; ++ const struct mango_divider_clock *div_clks; ++ const struct mango_gate_clock *gate_clks; ++ const struct mango_mux_clock *mux_clks; ++}; ++ ++struct mango_clk_data { ++ void __iomem *base; ++ spinlock_t lock; ++ struct regmap *syscon_top; ++ struct clk_onecell_data clk_data; ++ const struct mango_clk_table *table; ++}; ++ ++int mango_register_mux_clks ++(struct device_node *node, struct mango_clk_data *clk_data); ++int mango_register_div_clks ++(struct device_node *node, struct mango_clk_data *clk_data); ++int mango_register_pll_clks ++(struct device_node *node, struct mango_clk_data *clk_data, const char *clk_name); ++int set_default_clk_rates(struct device_node *node); ++ ++int dm_mango_register_mux_clks ++(struct device_node *node, struct mango_clk_data *clk_data); ++int dm_mango_register_div_clks ++(struct device_node *node, struct mango_clk_data *clk_data); ++int dm_mango_register_pll_clks ++(struct device_node *node, struct mango_clk_data *clk_data, const char *name); ++int dm_set_default_clk_rates(struct device_node *node); ++#endif +diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig +new file mode 100644 +index 000000000000..fe905e7cf2d3 +--- /dev/null ++++ b/drivers/clk/spacemit/Kconfig +@@ -0,0 +1,9 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# common clock support for SPACEMIT SoC family. ++ ++config SPACEMIT_K1X_CCU ++ tristate "Clock support for Spacemit k1x SoCs" ++ depends on SOC_SPACEMIT_K1X ++ help ++ Build the driver for Spacemit K1x Clock Driver. ++ +diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile +new file mode 100644 +index 000000000000..6bfb749658d7 +--- /dev/null ++++ b/drivers/clk/spacemit/Makefile +@@ -0,0 +1,11 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Spacemit Clock specific Makefile ++# ++ ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu-spacemit-k1x.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_mix.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_pll.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_dpll.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_ddn.o ++obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_ddr.o +diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.c b/drivers/clk/spacemit/ccu-spacemit-k1x.c +new file mode 100644 +index 000000000000..e4b176b39247 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu-spacemit-k1x.c +@@ -0,0 +1,2123 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit k1x clock controller driver ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ccu-spacemit-k1x.h" ++#include "ccu_mix.h" ++#include "ccu_pll.h" ++#include "ccu_ddn.h" ++#include "ccu_dpll.h" ++#include "ccu_ddr.h" ++ ++DEFINE_SPINLOCK(g_cru_lock); ++ ++/* APBS register offset */ ++/* pll1 */ ++#define APB_SPARE1_REG 0x100 ++#define APB_SPARE2_REG 0x104 ++#define APB_SPARE3_REG 0x108 ++/* pll2 */ ++#define APB_SPARE7_REG 0x118 ++#define APB_SPARE8_REG 0x11c ++#define APB_SPARE9_REG 0x120 ++/* pll3 */ ++#define APB_SPARE10_REG 0x124 ++#define APB_SPARE11_REG 0x128 ++#define APB_SPARE12_REG 0x12c ++/* end of APBS register offset */ ++ ++/* APBC register offset */ ++#define APBC_UART1_CLK_RST 0x0 ++#define APBC_UART2_CLK_RST 0x4 ++#define APBC_GPIO_CLK_RST 0x8 ++#define APBC_PWM0_CLK_RST 0xc ++#define APBC_PWM1_CLK_RST 0x10 ++#define APBC_PWM2_CLK_RST 0x14 ++#define APBC_PWM3_CLK_RST 0x18 ++#define APBC_TWSI8_CLK_RST 0x20 ++#define APBC_UART3_CLK_RST 0x24 ++#define APBC_RTC_CLK_RST 0x28 ++#define APBC_TWSI0_CLK_RST 0x2c ++#define APBC_TWSI1_CLK_RST 0x30 ++#define APBC_TIMERS1_CLK_RST 0x34 ++#define APBC_TWSI2_CLK_RST 0x38 ++#define APBC_AIB_CLK_RST 0x3c ++#define APBC_TWSI4_CLK_RST 0x40 ++#define APBC_TIMERS2_CLK_RST 0x44 ++#define APBC_ONEWIRE_CLK_RST 0x48 ++#define APBC_TWSI5_CLK_RST 0x4c ++#define APBC_DRO_CLK_RST 0x58 ++#define APBC_IR_CLK_RST 0x5c ++#define APBC_TWSI6_CLK_RST 0x60 ++#define APBC_COUNTER_CLK_SEL 0x64 ++ ++#define APBC_TWSI7_CLK_RST 0x68 ++#define APBC_TSEN_CLK_RST 0x6c ++ ++#define APBC_UART4_CLK_RST 0x70 ++#define APBC_UART5_CLK_RST 0x74 ++#define APBC_UART6_CLK_RST 0x78 ++#define APBC_SSP3_CLK_RST 0x7c ++ ++#define APBC_SSPA0_CLK_RST 0x80 ++#define APBC_SSPA1_CLK_RST 0x84 ++ ++#define APBC_IPC_AP2AUD_CLK_RST 0x90 ++#define APBC_UART7_CLK_RST 0x94 ++#define APBC_UART8_CLK_RST 0x98 ++#define APBC_UART9_CLK_RST 0x9c ++ ++#define APBC_CAN0_CLK_RST 0xa0 ++#define APBC_PWM4_CLK_RST 0xa8 ++#define APBC_PWM5_CLK_RST 0xac ++#define APBC_PWM6_CLK_RST 0xb0 ++#define APBC_PWM7_CLK_RST 0xb4 ++#define APBC_PWM8_CLK_RST 0xb8 ++#define APBC_PWM9_CLK_RST 0xbc ++#define APBC_PWM10_CLK_RST 0xc0 ++#define APBC_PWM11_CLK_RST 0xc4 ++#define APBC_PWM12_CLK_RST 0xc8 ++#define APBC_PWM13_CLK_RST 0xcc ++#define APBC_PWM14_CLK_RST 0xd0 ++#define APBC_PWM15_CLK_RST 0xd4 ++#define APBC_PWM16_CLK_RST 0xd8 ++#define APBC_PWM17_CLK_RST 0xdc ++#define APBC_PWM18_CLK_RST 0xe0 ++#define APBC_PWM19_CLK_RST 0xe4 ++/* end of APBC register offset */ ++ ++/* MPMU register offset */ ++#define MPMU_POSR 0x10 ++#define POSR_PLL1_LOCK BIT(27) ++#define POSR_PLL2_LOCK BIT(28) ++#define POSR_PLL3_LOCK BIT(29) ++ ++#define MPMU_VRCR 0x18 ++#define MPMU_VRCR_REQ_EN0 BIT(0) ++#define MPMU_VRCR_REQ_EN2 BIT(2) ++#define MPMU_VRCR_REQ_POL2 BIT(6) ++#define MPMU_VRCR_VCXO_OUT_REQ_EN2 BIT(14) ++ ++#define MPMU_WDTPCR 0x200 ++#define MPMU_RIPCCR 0x210 ++#define MPMU_ACGR 0x1024 ++#define MPMU_SUCCR 0x14 ++#define MPMU_ISCCR 0x44 ++#define MPMU_SUCCR_1 0x10b0 ++#define MPMU_APBCSCR 0x1050 ++ ++/* end of MPMU register offset */ ++ ++/* APMU register offset */ ++#define APMU_JPG_CLK_RES_CTRL 0x20 ++#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x24 ++#define APMU_ISP_CLK_RES_CTRL 0x38 ++#define APMU_LCD_CLK_RES_CTRL1 0x44 ++#define APMU_LCD_SPI_CLK_RES_CTRL 0x48 ++#define APMU_LCD_CLK_RES_CTRL2 0x4c ++#define APMU_CCIC_CLK_RES_CTRL 0x50 ++#define APMU_SDH0_CLK_RES_CTRL 0x54 ++#define APMU_SDH1_CLK_RES_CTRL 0x58 ++#define APMU_USB_CLK_RES_CTRL 0x5c ++#define APMU_QSPI_CLK_RES_CTRL 0x60 ++#define APMU_USB_CLK_RES_CTRL 0x5c ++#define APMU_DMA_CLK_RES_CTRL 0x64 ++#define APMU_AES_CLK_RES_CTRL 0x68 ++#define APMU_VPU_CLK_RES_CTRL 0xa4 ++#define APMU_GPU_CLK_RES_CTRL 0xcc ++#define APMU_SDH2_CLK_RES_CTRL 0xe0 ++#define APMU_PMUA_MC_CTRL 0xe8 ++#define APMU_PMU_CC2_AP 0x100 ++#define APMU_PMUA_EM_CLK_RES_CTRL 0x104 ++ ++#define APMU_AUDIO_CLK_RES_CTRL 0x14c ++#define APMU_HDMI_CLK_RES_CTRL 0x1B8 ++#define APMU_CCI550_CLK_CTRL 0x300 ++#define APMU_ACLK_CLK_CTRL 0x388 ++#define APMU_CPU_C0_CLK_CTRL 0x38C ++#define APMU_CPU_C1_CLK_CTRL 0x390 ++ ++#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc ++#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4 ++#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc ++ ++#define APMU_EMAC0_CLK_RES_CTRL 0x3e4 ++#define APMU_EMAC1_CLK_RES_CTRL 0x3ec ++ ++#define APMU_DFC_AP 0x180 ++#define APMU_DFC_STATUS 0x188 ++ ++#define APMU_DFC_LEVEL0 0x190 ++#define APMU_DFC_LEVEL1 0x194 ++#define APMU_DFC_LEVEL2 0x198 ++#define APMU_DFC_LEVEL3 0x19c ++#define APMU_DFC_LEVEL4 0x1a0 ++#define APMU_DFC_LEVEL5 0x1a4 ++#define APMU_DFC_LEVEL6 0x1a8 ++#define APMU_DFC_LEVEL7 0x1ac ++ ++#define APMU_DPLL1_CLK_CTRL1 0x39c ++#define APMU_DPLL1_CLK_CTRL2 0x3a0 ++#define APMU_DPLL2_CLK_CTRL1 0x3a8 ++#define APMU_DPLL2_CLK_CTRL2 0x3ac ++/* end of APMU register offset */ ++ ++/* APBC2 register offset */ ++#define APBC2_UART1_CLK_RST 0x00 ++#define APBC2_SSP2_CLK_RST 0x04 ++#define APBC2_TWSI3_CLK_RST 0x08 ++#define APBC2_RTC_CLK_RST 0x0c ++#define APBC2_TIMERS0_CLK_RST 0x10 ++#define APBC2_KPC_CLK_RST 0x14 ++#define APBC2_GPIO_CLK_RST 0x1c ++/* end of APBC2 register offset */ ++ ++/* RCPU register offset */ ++#define RCPU_HDMI_CLK_RST 0x2044 ++#define RCPU_CAN_CLK_RST 0x4c ++#define RCPU_I2C0_CLK_RST 0x30 ++ ++#define RCPU_SSP0_CLK_RST 0x28 ++#define RCPU_IR_CLK_RST 0x48 ++#define RCPU_UART0_CLK_RST 0xd8 ++#define RCPU_UART1_CLK_RST 0x3c ++/* end of RCPU register offset */ ++ ++/* RCPU2 register offset */ ++#define RCPU2_PWM0_CLK_RST 0x00 ++#define RCPU2_PWM1_CLK_RST 0x04 ++#define RCPU2_PWM2_CLK_RST 0x08 ++#define RCPU2_PWM3_CLK_RST 0x0c ++#define RCPU2_PWM4_CLK_RST 0x10 ++#define RCPU2_PWM5_CLK_RST 0x14 ++#define RCPU2_PWM6_CLK_RST 0x18 ++#define RCPU2_PWM7_CLK_RST 0x1c ++#define RCPU2_PWM8_CLK_RST 0x20 ++#define RCPU2_PWM9_CLK_RST 0x24 ++/* end of RCPU2 register offset */ ++ ++struct spacemit_k1x_clk k1x_clock_controller; ++ ++static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = { ++ PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), ++ PLL_RATE(3200000000UL, 0x67, 0xdd, 0x50, 0x00, 0x43, 0xeaaaab), ++ PLL_RATE(2457600000UL, 0x64, 0xdd, 0x50, 0x00, 0x33, 0x0ccccd), ++ PLL_RATE(2800000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3a, 0x155555), ++}; ++ ++static const struct ccu_pll_rate_tbl pll3_rate_tbl[] = { ++ PLL_RATE(1600000000UL, 0x61, 0xcd, 0x50, 0x00, 0x43, 0xeaaaab), ++ PLL_RATE(1800000000UL, 0x61, 0xcd, 0x50, 0x00, 0x4b, 0x000000), ++ PLL_RATE(2000000000UL, 0x62, 0xdd, 0x50, 0x00, 0x2a, 0xeaaaab), ++ PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), ++ PLL_RATE(3200000000UL, 0x67, 0xdd, 0x50, 0x00, 0x43, 0xeaaaab), ++ PLL_RATE(2457600000UL, 0x64, 0xdd, 0x50, 0x00, 0x33, 0x0ccccd), ++}; ++ ++static SPACEMIT_CCU_PLL(pll2, "pll2", &pll2_rate_tbl, ++ ARRAY_SIZE(pll2_rate_tbl), ++ BASE_TYPE_APBS, APB_SPARE7_REG, APB_SPARE8_REG, APB_SPARE9_REG, ++ MPMU_POSR, POSR_PLL2_LOCK, 1, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_PLL(pll3, "pll3", &pll3_rate_tbl, ++ ARRAY_SIZE(pll3_rate_tbl), ++ BASE_TYPE_APBS, APB_SPARE10_REG, APB_SPARE11_REG, APB_SPARE12_REG, ++ MPMU_POSR, POSR_PLL3_LOCK, 1, ++ CLK_IGNORE_UNUSED); ++ ++/* pll1 */ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d2, "pll1_d2", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(1), BIT(1), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d3, "pll1_d3", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(2), BIT(2), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d4, "pll1_d4", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(3), BIT(3), 0x0, ++ 4, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d5, "pll1_d5", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(4), BIT(4), 0x0, ++ 5, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d6, "pll1_d6", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(5), BIT(5), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d7, "pll1_d7", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(6), BIT(6), 0x0, ++ 7, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d8, "pll1_d8", "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(7), BIT(7), 0x0, ++ 8, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d11_223p4, "pll1_d11_223p4", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(15), BIT(15), 0x0, ++ 11, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d13_189, "pll1_d13_189", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(16), BIT(16), 0x0, ++ 13, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d23_106p8, "pll1_d23_106p8", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(20), BIT(20), 0x0, ++ 23, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d64_38p4, "pll1_d64_38p4", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(0), BIT(0), 0x0, ++ 64, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_aud_245p7, "pll1_aud_245p7", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(10), BIT(10), 0x0, ++ 10, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_aud_24p5, "pll1_aud_24p5", ++ "pll1_2457p6_vco", ++ BASE_TYPE_APBS, APB_SPARE2_REG, ++ BIT(11), BIT(11), 0x0, ++ 100, 1, CLK_IGNORE_UNUSED); ++ ++/* pll2 */ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d1, "pll2_d1", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(0), BIT(0), 0x0, ++ 1, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d2, "pll2_d2", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(1), BIT(1), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d3, "pll2_d3", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(2), BIT(2), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d4, "pll2_d4", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(3), BIT(3), 0x0, ++ 4, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d5, "pll2_d5", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(4), BIT(4), 0x0, ++ 5, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d6, "pll2_d6", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(5), BIT(5), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d7, "pll2_d7", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(6), BIT(6), 0x0, ++ 7, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll2_d8, "pll2_d8", "pll2", ++ BASE_TYPE_APBS, APB_SPARE8_REG, ++ BIT(7), BIT(7), 0x0, ++ 8, 1, CLK_IGNORE_UNUSED); ++ ++/* pll3 */ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d1, "pll3_d1", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(0), BIT(0), 0x0, ++ 1, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d2, "pll3_d2", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(1), BIT(1), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d3, "pll3_d3", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(2), BIT(2), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d4, "pll3_d4", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(3), BIT(3), 0x0, ++ 4, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d5, "pll3_d5", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(4), BIT(4), 0x0, ++ 5, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d6, "pll3_d6", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(5), BIT(5), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d7, "pll3_d7", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(6), BIT(6), 0x0, ++ 7, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll3_d8, "pll3_d8", "pll3", ++ BASE_TYPE_APBS, APB_SPARE11_REG, ++ BIT(7), BIT(7), 0x0, ++ 8, 1, CLK_IGNORE_UNUSED); ++ ++/* pll3_div */ ++static SPACEMIT_CCU_FACTOR(pll3_80, "pll3_80", "pll3_d8", ++ 5, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll3_40, "pll3_40", "pll3_d8", ++ 10, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll3_20, "pll3_20", "pll3_d8", ++ 20, 1); ++ ++/* pll1_d8 */ ++static SPACEMIT_CCU_GATE(pll1_d8_307p2, "pll1_d8_307p2", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(13), BIT(13), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d32_76p8, ++ "pll1_d32_76p8", "pll1_d8_307p2", ++ 4, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d40_61p44, ++ "pll1_d40_61p44", "pll1_d8_307p2", ++ 5, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d16_153p6, ++ "pll1_d16_153p6", "pll1_d8", ++ 2, 1); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d24_102p4, ++ "pll1_d24_102p4", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(12), BIT(12), 0x0, ++ 3, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d48_51p2, ++ "pll1_d48_51p2", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(7), BIT(7), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d48_51p2_ap, ++ "pll1_d48_51p2_ap", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(11), BIT(11), 0x0, ++ 6, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_m3d128_57p6, ++ "pll1_m3d128_57p6", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(8), BIT(8), 0x0, ++ 16, 3, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d96_25p6, ++ "pll1_d96_25p6", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(4), BIT(4), 0x0, ++ 12, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d192_12p8, ++ "pll1_d192_12p8", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(3), BIT(3), 0x0, ++ 24, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d192_12p8_wdt, ++ "pll1_d192_12p8_wdt", ++ "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(19), BIT(19), 0x0, ++ 24, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d384_6p4, ++ "pll1_d384_6p4", "pll1_d8", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(2), BIT(2), 0x0, ++ 48, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d768_3p2, ++ "pll1_d768_3p2", "pll1_d384_6p4", ++ 2, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d1536_1p6, ++ "pll1_d1536_1p6", "pll1_d384_6p4", ++ 4, 1); ++ ++static SPACEMIT_CCU_FACTOR(pll1_d3072_0p8, ++ "pll1_d3072_0p8", "pll1_d384_6p4", ++ 8, 1); ++ ++/* pll1_d7 */ ++static SPACEMIT_CCU_FACTOR(pll1_d7_351p08, ++ "pll1_d7_351p08", "pll1_d7", ++ 1, 1); ++ ++/* pll1_d6 */ ++static SPACEMIT_CCU_GATE(pll1_d6_409p6, ++ "pll1_d6_409p6", "pll1_d6", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(0), BIT(0), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d12_204p8, ++ "pll1_d12_204p8", "pll1_d6", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(5), BIT(5), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++/* pll1_d5 */ ++static SPACEMIT_CCU_GATE(pll1_d5_491p52, ++ "pll1_d5_491p52", "pll1_d5", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(21), BIT(21), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d10_245p76, ++ "pll1_d10_245p76", "pll1_d5", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(18), BIT(18), 0x0, ++ 2, 1, CLK_IGNORE_UNUSED); ++ ++/* pll1_d4 */ ++static SPACEMIT_CCU_GATE(pll1_d4_614p4, ++ "pll1_d4_614p4", "pll1_d4", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(15), BIT(15), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d52_47p26, ++ "pll1_d52_47p26", "pll1_d4", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(10), BIT(10), 0x0, ++ 13, 1, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_GATE_FACTOR(pll1_d78_31p5, ++ "pll1_d78_31p5", "pll1_d4", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(6), BIT(6), 0x0, ++ 39, 2, CLK_IGNORE_UNUSED); ++ ++/* pll1_d3 */ ++static SPACEMIT_CCU_GATE(pll1_d3_819p2, ++ "pll1_d3_819p2", "pll1_d3", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(14), BIT(14), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++/* pll1_d2 */ ++static SPACEMIT_CCU_GATE(pll1_d2_1228p8, ++ "pll1_d2_1228p8", "pll1_d2", ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(16), BIT(16), 0x0, ++ CLK_IGNORE_UNUSED); ++ ++/* dpll */ ++static const struct ccu_dpll_rate_tbl dpll1_rate_tbl[] = { ++ DPLL_RATE(2400000000UL, 0x00, 0x00, 0x20, ++ 0x2a, 0x32, 0x64, 0xdd, 0x50), ++ DPLL_RATE(2400000000UL, 0x00, 0x3b, 0x20, ++ 0x2a, 0x32, 0x64, 0xdd, 0x50), ++}; ++ ++static const struct ccu_dpll_rate_tbl dpll2_rate_tbl[] = { ++ DPLL_RATE(3200000000UL, 0x55, 0x55, 0x3d, ++ 0x2a, 0x43, 0x67, 0xdd, 0x50), ++}; ++ ++static SPACEMIT_CCU_DPLL(dpll1, "dpll1", &dpll1_rate_tbl, ++ ARRAY_SIZE(dpll1_rate_tbl), ++ BASE_TYPE_APMU, APMU_DPLL1_CLK_CTRL1, APMU_DPLL1_CLK_CTRL2, ++ 0, CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_DPLL(dpll2, "dpll2", &dpll2_rate_tbl, ++ ARRAY_SIZE(dpll2_rate_tbl), ++ BASE_TYPE_APMU, APMU_DPLL2_CLK_CTRL1, APMU_DPLL2_CLK_CTRL2, ++ 0, CLK_IGNORE_UNUSED); ++ ++static const char * const dfc_lvl_parents[] = { ++ "dpll2", "dpll1" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl0, "dfc_lvl0", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL0, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl1, "dfc_lvl1", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL1, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl2, "dfc_lvl2", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL2, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl3, "dfc_lvl3", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL3, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl4, "dfc_lvl4", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL4, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl5, "dfc_lvl5", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL5, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl6, "dfc_lvl6", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL6, ++ 14, 2, 8, 1, 0); ++ ++static SPACEMIT_CCU_DIV_MUX(dfc_lvl7, "dfc_lvl7", ++ dfc_lvl_parents, BASE_TYPE_APMU, APMU_DFC_LEVEL7, ++ 14, 2, 8, 1, 0); ++ ++static const char * const ddr_clk_parents[] = { ++ "dfc_lvl0", "dfc_lvl1", "dfc_lvl2", "dfc_lvl3", ++ "dfc_lvl4", "dfc_lvl5", "dfc_lvl6", "dfc_lvl7" ++}; ++ ++static SPACEMIT_CCU_DDR_FC(ddr, "ddr", ddr_clk_parents, ++ BASE_TYPE_APMU, APMU_DFC_AP, BIT(0), ++ 1, 3, 0); ++ ++static struct ccu_ddn_info uart_ddn_mask_info = { ++ .factor = 2, ++ .num_mask = 0x1fff, ++ .den_mask = 0x1fff, ++ .num_shift = 16, ++ .den_shift = 0, ++}; ++ ++static struct ccu_ddn_tbl slow_uart1_tbl[] = { ++ {.num = 125, .den = 24}, ++}; ++ ++static struct ccu_ddn_tbl slow_uart2_tbl[] = { ++ {.num = 6144, .den = 960}, ++}; ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(slow_uart, ++ "slow_uart", NULL, ++ BASE_TYPE_MPMU, MPMU_ACGR, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DDN(slow_uart1_14p74, ++ "slow_uart1_14p74", "pll1_d16_153p6", ++ &uart_ddn_mask_info, &slow_uart1_tbl, ++ ARRAY_SIZE(slow_uart1_tbl), ++ BASE_TYPE_MPMU, MPMU_SUCCR, ++ CLK_IGNORE_UNUSED); ++ ++static SPACEMIT_CCU_DDN(slow_uart2_48, ++ "slow_uart2_48", "pll1_d4_614p4", ++ &uart_ddn_mask_info, &slow_uart2_tbl, ++ ARRAY_SIZE(slow_uart2_tbl), ++ BASE_TYPE_MPMU, MPMU_SUCCR_1, ++ CLK_IGNORE_UNUSED); ++ ++static const char * const uart_parent_names[] = { ++ "pll1_m3d128_57p6", "slow_uart1_14p74", "slow_uart2_48" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(uart1_clk, "uart1_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart2_clk, "uart2_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart3_clk, "uart3_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART3_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart4_clk, "uart4_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART4_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart5_clk, "uart5_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART5_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart6_clk, "uart6_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART6_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart7_clk, "uart7_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART7_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart8_clk, "uart8_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART8_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(uart9_clk, "uart9_clk", ++ uart_parent_names, BASE_TYPE_APBC, APBC_UART9_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(gpio_clk, "gpio_clk", "vctcxo_24", ++ BASE_TYPE_APBC, APBC_GPIO_CLK_RST, ++ 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const pwm_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(pwm0_clk, "pwm0_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM0_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm1_clk, "pwm1_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM1_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm2_clk, "pwm2_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM2_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm3_clk, "pwm3_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM3_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm4_clk, "pwm4_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM4_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm5_clk, "pwm5_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM5_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm6_clk, "pwm6_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM6_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm7_clk, "pwm7_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM7_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm8_clk, "pwm8_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM8_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm9_clk, "pwm9_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM9_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm10_clk, "pwm10_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM10_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm11_clk, "pwm11_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM11_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm12_clk, "pwm12_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM12_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm13_clk, "pwm13_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM13_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm14_clk, "pwm14_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM14_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm15_clk, "pwm15_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM15_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm16_clk, "pwm16_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM16_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm17_clk, "pwm17_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM17_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm18_clk, "pwm18_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM18_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(pwm19_clk, "pwm19_clk", ++ pwm_parent_names, BASE_TYPE_APBC, APBC_PWM19_CLK_RST, ++ 4, 3, 0x2, 0x2, 0x0, ++ 0); ++ ++static const char * const ssp_parent_names[] = { ++ "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", ++ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", ++ "pll1_d3072_0p8" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ssp3_clk, ++ "ssp3_clk", ssp_parent_names, ++ BASE_TYPE_APBC, APBC_SSP3_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(rtc_clk, "rtc_clk", "clk_32k", ++ BASE_TYPE_APBC, APBC_RTC_CLK_RST, ++ 0x83, 0x83, 0x0, 0); ++ ++static const char * const twsi_parent_names[] = { ++ "pll1_d78_31p5", "pll1_d48_51p2", "pll1_d40_61p44" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(twsi0_clk, "twsi0_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI0_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi1_clk, "twsi1_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi2_clk, "twsi2_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi4_clk, "twsi4_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI4_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi5_clk, "twsi5_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI5_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi6_clk, "twsi6_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI6_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi7_clk, "twsi7_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI7_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(twsi8_clk, "twsi8_clk", ++ twsi_parent_names, BASE_TYPE_APBC, APBC_TWSI8_CLK_RST, ++ 4, 3, 0x7, 0x3, 0x4, ++ 0); ++ ++static const char * const timer_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", ++ "vctcxo_3", "vctcxo_1" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(timers1_clk, "timers1_clk", ++ timer_parent_names, BASE_TYPE_APBC, APBC_TIMERS1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(timers2_clk, ++ "timers2_clk", timer_parent_names, ++ BASE_TYPE_APBC, APBC_TIMERS2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(aib_clk, "aib_clk", "vctcxo_24", ++ BASE_TYPE_APBC, APBC_AIB_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(onewire_clk, ++ "onewire_clk", NULL, ++ BASE_TYPE_APBC, APBC_ONEWIRE_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_FACTOR(i2s_sysclk, ++ "i2s_sysclk", "pll1_d8_307p2", ++ BASE_TYPE_MPMU, MPMU_ISCCR, ++ BIT(31), BIT(31), 0x0, ++ 200, 1, 0); ++ ++static SPACEMIT_CCU_GATE_FACTOR(i2s_bclk, ++ "i2s_bclk", "i2s_sysclk", ++ BASE_TYPE_MPMU, MPMU_ISCCR, ++ BIT(29), BIT(29), 0x0, ++ 1, 1, 0); ++ ++static const char * const sspa_parent_names[] = { ++ "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", ++ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", ++ "pll1_d3072_0p8", "i2s_bclk" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(sspa0_clk, "sspa0_clk", sspa_parent_names, ++ BASE_TYPE_APBC, APBC_SSPA0_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_MUX_GATE(sspa1_clk, "sspa1_clk", sspa_parent_names, ++ BASE_TYPE_APBC, APBC_SSPA1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dro_clk, "dro_clk", NULL, ++ BASE_TYPE_APBC, APBC_DRO_CLK_RST, ++ 0x1, 0x1, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(ir_clk, "ir_clk", NULL, ++ BASE_TYPE_APBC, APBC_IR_CLK_RST, ++ 0x1, 0x1, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(tsen_clk, "tsen_clk", NULL, ++ BASE_TYPE_APBC, APBC_TSEN_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(ipc_ap2aud_clk, "ipc_ap2aud_clk", ++ NULL, BASE_TYPE_APBC, APBC_IPC_AP2AUD_CLK_RST, ++ 0x3, 0x3, 0x0, 0); ++ ++static const char * const can_parent_names[] = { ++ "pll3_20", "pll3_40", "pll3_80" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(can0_clk, "can0_clk", can_parent_names, ++ BASE_TYPE_APBC, APBC_CAN0_CLK_RST, ++ 4, 3, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(can0_bus_clk, "can0_bus_clk", NULL, ++ BASE_TYPE_APBC, APBC_CAN0_CLK_RST, ++ BIT(0), BIT(0), 0x0, 0); ++ ++static SPACEMIT_CCU_GATE(wdt_clk, "wdt_clk", "pll1_d96_25p6", ++ BASE_TYPE_MPMU, MPMU_WDTPCR, ++ 0x3, 0x3, 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(ripc_clk, "ripc_clk", NULL, ++ BASE_TYPE_MPMU, MPMU_RIPCCR, ++ 0x3, 0x3, 0x0, 0); ++ ++static const char * const jpg_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d6_409p6", "pll1_d5_491p52", ++ "pll1_d3_819p2", "pll1_d2_1228p8", "pll2_d4", "pll2_d3" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(jpg_clk, "jpg_clk", ++ jpg_parent_names, BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, ++ 5, 3, BIT(15), ++ 2, 3, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(jpg_4kafbc_clk, "jpg_4kafbc_clk", ++ NULL, BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, ++ BIT(16), BIT(16), 0x0, 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(jpg_2kafbc_clk, "jpg_2kafbc_clk", ++ NULL, BASE_TYPE_APMU, APMU_JPG_CLK_RES_CTRL, ++ BIT(17), BIT(17), 0x0, 0); ++ ++static const char * const ccic2phy_parent_names[] = { ++ "pll1_d24_102p4", "pll1_d48_51p2_ap" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ccic2phy_clk, "ccic2phy_clk", ++ ccic2phy_parent_names, ++ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 7, 1, BIT(5), BIT(5), 0x0, ++ 0); ++ ++static const char * const ccic3phy_parent_names[] = { ++ "pll1_d24_102p4", "pll1_d48_51p2_ap" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ccic3phy_clk, "ccic3phy_clk", ++ ccic3phy_parent_names, ++ BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 31, 1, BIT(30), BIT(30), 0x0, 0); ++ ++static const char * const csi_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d4_614p4", ++ "pll1_d3_819p2", "pll2_d2", "pll2_d3", "pll2_d4", ++ "pll1_d2_1228p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(csi_clk, "csi_clk", ++ csi_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 20, 3, BIT(15), ++ 16, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const camm_parent_names[] = { ++ "pll1_d8_307p2", "pll2_d5", "pll1_d6_409p6", "vctcxo_24" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(camm0_clk, "camm0_clk", ++ camm_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 23, 4, 8, 2, ++ BIT(28), BIT(28), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(camm1_clk, "camm1_clk", ++ camm_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 23, 4, 8, 2, ++ BIT(6), BIT(6), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(camm2_clk, "camm2_clk", ++ camm_parent_names, BASE_TYPE_APMU, APMU_CSI_CCIC2_CLK_RES_CTRL, ++ 23, 4, 8, 2, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const isp_cpp_parent_names[] = { ++ "pll1_d8_307p2", "pll1_d6_409p6" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(isp_cpp_clk, "isp_cpp_clk", ++ isp_cpp_parent_names, ++ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, ++ 24, 2, 26, 1, ++ BIT(28), BIT(28), 0x0, ++ 0); ++ ++static const char * const isp_bus_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d8_307p2", ++ "pll1_d10_245p76" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(isp_bus_clk, "isp_bus_clk", ++ isp_bus_parent_names, ++ BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, ++ 18, 3, BIT(23), ++ 21, 2, BIT(17), BIT(17), 0x0, ++ 0); ++ ++static const char * const isp_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", ++ "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(isp_clk, "isp_clk", ++ isp_parent_names, BASE_TYPE_APMU, APMU_ISP_CLK_RES_CTRL, ++ 4, 3, BIT(7), ++ 8, 2, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static const char * const dpumclk_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", ++ "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV2_FC_MUX_GATE(dpu_mclk, "dpu_mclk", ++ dpumclk_parent_names, BASE_TYPE_APMU, ++ APMU_LCD_CLK_RES_CTRL1, APMU_LCD_CLK_RES_CTRL2, ++ 1, 4, BIT(29), ++ 5, 3, BIT(0), BIT(0), 0x0, ++ 0); ++ ++static const char * const dpuesc_parent_names[] = { ++ "pll1_d48_51p2_ap", "pll1_d52_47p26", "pll1_d96_25p6", ++ "pll1_d32_76p8" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(dpu_esc_clk, "dpu_esc_clk", dpuesc_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ 0, 2, BIT(2), BIT(2), 0x0, ++ 0); ++ ++static const char * const dpubit_parent_names[] = { ++ "pll1_d3_819p2", "pll2_d2", "pll2_d3", "pll1_d2_1228p8", ++ "pll2_d4", "pll2_d5", "pll2_d8", "pll2_d8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(dpu_bit_clk, "dpu_bit_clk", ++ dpubit_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ 17, 3, BIT(31), ++ 20, 3, BIT(16), BIT(16), 0x0, ++ 0); ++ ++static const char * const dpupx_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", ++ "pll1_d8_307p2", "pll2_d7", "pll2_d8" ++}; ++ ++static SPACEMIT_CCU_DIV2_FC_MUX_GATE(dpu_pxclk, "dpu_pxclk", dpupx_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, APMU_LCD_CLK_RES_CTRL2, ++ 17, 4, BIT(30), ++ 21, 3, BIT(16), BIT(16), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_hclk, "dpu_hclk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ BIT(5), BIT(5), 0x0, ++ 0); ++ ++static const char * const dpu_spi_parent_names[] = { ++ "pll1_d8_307p2", "pll1_d6_409p6", "pll1_d10_245p76", ++ "pll1_d11_223p4", "pll1_d13_189", "pll1_d23_106p8", ++ "pll2_d3", "pll2_d5" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(dpu_spi_clk, "dpu_spi_clk", ++ dpu_spi_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ 8, 3, BIT(7), ++ 12, 3, BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_hbus_clk, "dpu_spi_hbus_clk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_bus_clk, "dpu_spi_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(5), BIT(5), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dpu_spi_aclk, "dpu_spi_aclk", NULL, ++ BASE_TYPE_APMU, APMU_LCD_SPI_CLK_RES_CTRL, ++ BIT(6), BIT(6), 0x0, ++ 0); ++ ++static const char * const v2d_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d8_307p2", ++ "pll1_d4_614p4", ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(v2d_clk, "v2d_clk", v2d_parent_names, ++ BASE_TYPE_APMU, APMU_LCD_CLK_RES_CTRL1, ++ 9, 3, BIT(28), ++ 12, 2, BIT(8), BIT(8), 0x0, ++ 0); ++ ++static const char * const ccic_4x_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d6_409p6", "pll1_d4_614p4", ++ "pll1_d3_819p2", "pll2_d2", "pll2_d3", "pll2_d4", ++ "pll1_d2_1228p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(ccic_4x_clk, "ccic_4x_clk", ++ ccic_4x_parent_names, ++ BASE_TYPE_APMU, APMU_CCIC_CLK_RES_CTRL, ++ 18, 3, BIT(15), ++ 23, 2, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const ccic1phy_parent_names[] = { ++ "pll1_d24_102p4", "pll1_d48_51p2_ap" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ccic1phy_clk, "ccic1phy_clk", ++ ccic1phy_parent_names, ++ BASE_TYPE_APMU, APMU_CCIC_CLK_RES_CTRL, ++ 7, 1, BIT(5), BIT(5), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(sdh_axi_aclk, "sdh_axi_aclk", NULL, ++ BASE_TYPE_APMU, APMU_SDH0_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const sdh01_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d4_614p4", "pll2_d8", "pll2_d5", ++ "pll1_d11_223p4", "pll1_d13_189", "pll1_d23_106p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh0_clk, "sdh0_clk", sdh01_parent_names, ++ BASE_TYPE_APMU, APMU_SDH0_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 5, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh1_clk, "sdh1_clk", sdh01_parent_names, ++ BASE_TYPE_APMU, APMU_SDH1_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 5, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const sdh2_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d4_614p4", "pll2_d8", ++ "pll1_d3_819p2", "pll1_d11_223p4", "pll1_d13_189", ++ "pll1_d23_106p8" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(sdh2_clk, "sdh2_clk", sdh2_parent_names, ++ BASE_TYPE_APMU, APMU_SDH2_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 5, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(usb_axi_clk, "usb_axi_clk", NULL, ++ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(usb_p1_aclk, "usb_p1_aclk", NULL, ++ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, ++ BIT(5), BIT(5), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(usb30_clk, "usb30_clk", NULL, ++ BASE_TYPE_APMU, APMU_USB_CLK_RES_CTRL, ++ BIT(8), BIT(8), 0x0, ++ 0); ++ ++static const char * const qspi_parent_names[] = { ++ "pll1_d6_409p6", "pll2_d8", "pll1_d8_307p2", ++ "pll1_d10_245p76", "pll1_d11_223p4", "pll1_d23_106p8", ++ "pll1_d5_491p52", "pll1_d13_189" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(qspi_clk, "qspi_clk", qspi_parent_names, ++ BASE_TYPE_APMU, APMU_QSPI_CLK_RES_CTRL, ++ 9, 3, ++ 6, 3, BIT(4), BIT(4), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(qspi_bus_clk, "qspi_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_QSPI_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(dma_clk, "dma_clk", NULL, ++ BASE_TYPE_APMU, APMU_DMA_CLK_RES_CTRL, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const aes_parent_names[] = { ++ "pll1_d12_204p8", "pll1_d24_102p4" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(aes_clk, "aes_clk", aes_parent_names, ++ BASE_TYPE_APMU, APMU_AES_CLK_RES_CTRL, ++ 6, 1, BIT(5), BIT(5), 0x0, ++ 0); ++ ++static const char * const vpu_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d5_491p52", "pll1_d3_819p2", ++ "pll1_d6_409p6", "pll3_d6", "pll2_d3", "pll2_d4", "pll2_d5" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(vpu_clk, "vpu_clk", vpu_parent_names, ++ BASE_TYPE_APMU, APMU_VPU_CLK_RES_CTRL, ++ 13, 3, BIT(21), ++ 10, 3, ++ BIT(3), BIT(3), 0x0, ++ 0); ++ ++static const char * const gpu_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d5_491p52", "pll1_d3_819p2", "pll1_d6_409p6", ++ "pll3_d6", "pll2_d3", "pll2_d4", "pll2_d5" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(gpu_clk, "gpu_clk", gpu_parent_names, ++ BASE_TYPE_APMU, APMU_GPU_CLK_RES_CTRL, ++ 12, 3, BIT(15), ++ 18, 3, ++ BIT(4), BIT(4), 0x0, ++ 0); ++ ++static const char * const emmc_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d4_614p4", "pll1_d52_47p26", "pll1_d3_819p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(emmc_clk, "emmc_clk", emmc_parent_names, ++ BASE_TYPE_APMU, APMU_PMUA_EM_CLK_RES_CTRL, ++ 8, 3, BIT(11), ++ 6, 2, ++ 0x18, 0x18, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_GATE(emmc_x_clk, "emmc_x_clk", "pll1_d2_1228p8", ++ BASE_TYPE_APMU, APMU_PMUA_EM_CLK_RES_CTRL, ++ 12, 3, BIT(15), BIT(15), 0x0, ++ 0); ++ ++static const char * const audio_parent_names[] = { ++ "pll1_aud_245p7", "pll1_d8_307p2", "pll1_d6_409p6" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(audio_clk, "audio_clk", audio_parent_names, ++ BASE_TYPE_APMU, APMU_AUDIO_CLK_RES_CTRL, ++ 4, 3, BIT(15), ++ 7, 3, ++ BIT(12), BIT(12), 0x0, ++ 0); ++ ++static const char * const hdmi_parent_names[] = { ++ "pll1_d6_409p6", "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX_GATE(hdmi_mclk, "hdmi_mclk", hdmi_parent_names, ++ BASE_TYPE_APMU, APMU_HDMI_CLK_RES_CTRL, ++ 1, 4, BIT(29), ++ 5, 3, ++ BIT(0), BIT(0), 0x0, ++ 0); ++ ++static const char * const cci550_parent_names[] = { ++ "pll1_d5_491p52", "pll1_d4_614p4", "pll1_d3_819p2", "pll2_d3" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX(cci550_clk, "cci550_clk", cci550_parent_names, ++ BASE_TYPE_APMU, APMU_CCI550_CLK_CTRL, ++ 8, 3, BIT(12), ++ 0, 2, ++ 0); ++ ++static const char * const pmua_aclk_parent_names[] = { ++ "pll1_d10_245p76", "pll1_d8_307p2" ++}; ++ ++static SPACEMIT_CCU_DIV_FC_MUX(pmua_aclk, "pmua_aclk", pmua_aclk_parent_names, ++ BASE_TYPE_APMU, APMU_ACLK_CLK_CTRL, ++ 1, 2, BIT(4), ++ 0, 1, ++ 0); ++ ++static const char * const cpu_c0_hi_parent_names[] = { ++ "pll3_d2", "pll3_d1" ++}; ++ ++static SPACEMIT_CCU_MUX(cpu_c0_hi_clk, "cpu_c0_hi_clk", cpu_c0_hi_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ 13, 1, ++ 0); ++ ++static const char * const cpu_c0_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d3_819p2", "pll1_d6_409p6", ++ "pll1_d5_491p52", "pll1_d2_1228p8", "pll3_d3", ++ "pll2_d3", "cpu_c0_hi_clk" ++}; ++ ++static SPACEMIT_CCU_MUX_FC(cpu_c0_core_clk, "cpu_c0_core_clk", ++ cpu_c0_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ BIT(12), ++ 0, 3, ++ 0); ++ ++static SPACEMIT_CCU_DIV(cpu_c0_ace_clk, "cpu_c0_ace_clk", "cpu_c0_core_clk", ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ 6, 3, ++ 0); ++ ++static SPACEMIT_CCU_DIV(cpu_c0_tcm_clk, "cpu_c0_tcm_clk", "cpu_c0_core_clk", ++ BASE_TYPE_APMU, APMU_CPU_C0_CLK_CTRL, ++ 9, 3, ++ 0); ++ ++static const char * const cpu_c1_hi_parent_names[] = { ++ "pll3_d2", "pll3_d1" ++}; ++ ++static SPACEMIT_CCU_MUX(cpu_c1_hi_clk, "cpu_c1_hi_clk", cpu_c1_hi_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, ++ 13, 1, ++ 0); ++ ++static const char * const cpu_c1_parent_names[] = { ++ "pll1_d4_614p4", "pll1_d3_819p2", "pll1_d6_409p6", ++ "pll1_d5_491p52", "pll1_d2_1228p8", "pll3_d3", ++ "pll2_d3", "cpu_c1_hi_clk" ++}; ++ ++static SPACEMIT_CCU_MUX_FC(cpu_c1_pclk, "cpu_c1_pclk", cpu_c1_parent_names, ++ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, ++ BIT(12), ++ 0, 3, ++ 0); ++ ++static SPACEMIT_CCU_DIV(cpu_c1_ace_clk, "cpu_c1_ace_clk", "cpu_c1_pclk", ++ BASE_TYPE_APMU, APMU_CPU_C1_CLK_CTRL, ++ 6, 3, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(pcie0_clk, "pcie0_clk", NULL, ++ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_0, ++ 0x7, 0x7, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(pcie1_clk, "pcie1_clk", NULL, ++ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_1, ++ 0x7, 0x7, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(pcie2_clk, "pcie2_clk", NULL, ++ BASE_TYPE_APMU, APMU_PCIE_CLK_RES_CTRL_2, ++ 0x7, 0x7, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(emac0_bus_clk, "emac0_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_EMAC0_CLK_RES_CTRL, ++ BIT(0), BIT(0), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(emac0_ptp_clk, "emac0_ptp_clk", "pll2_d6", ++ BASE_TYPE_APMU, APMU_EMAC0_CLK_RES_CTRL, ++ BIT(15), BIT(15), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(emac1_bus_clk, "emac1_bus_clk", NULL, ++ BASE_TYPE_APMU, APMU_EMAC1_CLK_RES_CTRL, ++ BIT(0), BIT(0), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(emac1_ptp_clk, "emac1_ptp_clk", "pll2_d6", ++ BASE_TYPE_APMU, APMU_EMAC1_CLK_RES_CTRL, ++ BIT(15), BIT(15), 0x0, ++ 0); ++ ++static const char * const uart1_sec_parent_names[] = { ++ "pll1_m3d128_57p6", "slow_uart1_14p74", "slow_uart2_48" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(uart1_sec_clk, "uart1_sec_clk", ++ uart1_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_UART1_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const ssp2_sec_parent_names[] = { ++ "pll1_d384_6p4", "pll1_d192_12p8", "pll1_d96_25p6", ++ "pll1_d48_51p2", "pll1_d768_3p2", "pll1_d1536_1p6", ++ "pll1_d3072_0p8" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(ssp2_sec_clk, "ssp2_sec_clk", ++ ssp2_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_SSP2_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const twsi3_sec_parent_names[] = { ++ "pll1_d78_31p5", "pll1_d48_51p2", "pll1_d40_61p44" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(twsi3_sec_clk, "twsi3_sec_clk", ++ twsi3_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_TWSI3_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(rtc_sec_clk, "rtc_sec_clk", "clk_32k", ++ BASE_TYPE_APBC2, APBC2_RTC_CLK_RST, ++ 0x83, 0x83, 0x0, 0); ++ ++static const char * const timer_sec_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(timers0_sec_clk, "timers0_sec_clk", ++ timer_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_TIMERS0_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const kpc_sec_parent_names[] = { ++ "pll1_d192_12p8", "clk_32k", "pll1_d384_6p4", "vctcxo_3", "vctcxo_1" ++}; ++ ++static SPACEMIT_CCU_MUX_GATE(kpc_sec_clk, "kpc_sec_clk", kpc_sec_parent_names, ++ BASE_TYPE_APBC2, APBC2_KPC_CLK_RST, ++ 4, 3, 0x3, 0x3, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE(gpio_sec_clk, "gpio_sec_clk", "vctcxo_24", ++ BASE_TYPE_APBC2, APBC2_GPIO_CLK_RST, ++ 0x3, 0x3, 0x0, ++ 0); ++ ++static const char * const apb_parent_names[] = { ++ "pll1_d96_25p6", "pll1_d48_51p2", "pll1_d96_25p6", "pll1_d24_102p4" ++}; ++ ++static SPACEMIT_CCU_MUX(apb_clk, "apb_clk", apb_parent_names, ++ BASE_TYPE_MPMU, MPMU_APBCSCR, ++ 0, 2, 0); ++ ++static const char * const rhdmi_audio_parent_names[] = { ++ "pll1_aud_24p5", "pll1_aud_245p7" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rhdmi_audio_clk, "rhdmi_audio_clk", ++ rhdmi_audio_parent_names, ++ BASE_TYPE_RCPU, RCPU_HDMI_CLK_RST, ++ 4, 11, 16, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static const char * const rcan_parent_names[] = { ++ "pll3_20", "pll3_40", "pll3_80" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rcan_clk, "rcan_clk", rcan_parent_names, ++ BASE_TYPE_RCPU, RCPU_CAN_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(rcan_bus_clk, "rcan_bus_clk", NULL, ++ BASE_TYPE_RCPU, RCPU_CAN_CLK_RST, ++ BIT(2), BIT(2), 0x0, 0); ++ ++static const char * const rpwm_parent_names[] = { ++ "pll1_aud_245p7", "pll1_aud_24p5" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm0_clk, "rpwm0_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM0_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm1_clk, "rpwm1_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM1_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm2_clk, "rpwm2_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM2_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm3_clk, "rpwm3_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM3_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm4_clk, "rpwm4_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM4_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm5_clk, "rpwm5_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM5_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm6_clk, "rpwm6_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM6_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm7_clk, "rpwm7_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM7_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm8_clk, "rpwm8_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM8_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rpwm9_clk, "rpwm9_clk", rpwm_parent_names, ++ BASE_TYPE_RCPU2, RCPU2_PWM9_CLK_RST, ++ 8, 11, 4, 2, ++ BIT(1), BIT(1), 0x0, ++ 0); ++ ++static const char * const ri2c_parent_names[] = { ++ "pll1_d40_61p44", "pll1_d96_25p6", "pll1_d192_12p8", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(ri2c0_clk, "ri2c0_clk", ri2c_parent_names, ++ BASE_TYPE_RCPU, RCPU_I2C0_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static const char * const rssp0_parent_names[] = { ++ "pll1_d40_61p44", "pll1_d96_25p6", "pll1_d192_12p8", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(rssp0_clk, "rssp0_clk", rssp0_parent_names, ++ BASE_TYPE_RCPU, RCPU_SSP0_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static SPACEMIT_CCU_GATE_NO_PARENT(rir_clk, "rir_clk", NULL, ++ BASE_TYPE_RCPU, RCPU_IR_CLK_RST, ++ BIT(2), BIT(2), 0x0, ++ 0); ++ ++static const char * const ruart0_parent_names[] = { ++ "pll1_aud_24p5", "pll1_aud_245p7", "vctcxo_24", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(ruart0_clk, "ruart0_clk", ruart0_parent_names, ++ BASE_TYPE_RCPU, RCPU_UART0_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static const char * const ruart1_parent_names[] = { ++ "pll1_aud_24p5", "pll1_aud_245p7", "vctcxo_24", "vctcxo_3" ++}; ++ ++static SPACEMIT_CCU_DIV_MUX_GATE(ruart1_clk, "ruart1_clk", ruart1_parent_names, ++ BASE_TYPE_RCPU, RCPU_UART1_CLK_RST, ++ 8, 11, 4, 2, ++ 0x6, 0x6, 0x0, ++ 0); ++ ++static struct clk_hw_onecell_data spacemit_k1x_hw_clks = { ++ .hws = { ++ [CLK_PLL2] = &pll2.common.hw, ++ [CLK_PLL3] = &pll3.common.hw, ++ [CLK_PLL1_D2] = &pll1_d2.common.hw, ++ [CLK_PLL1_D3] = &pll1_d3.common.hw, ++ [CLK_PLL1_D4] = &pll1_d4.common.hw, ++ [CLK_PLL1_D5] = &pll1_d5.common.hw, ++ [CLK_PLL1_D6] = &pll1_d6.common.hw, ++ [CLK_PLL1_D7] = &pll1_d7.common.hw, ++ [CLK_PLL1_D8] = &pll1_d8.common.hw, ++ [CLK_PLL1_D11] = &pll1_d11_223p4.common.hw, ++ [CLK_PLL1_D13] = &pll1_d13_189.common.hw, ++ [CLK_PLL1_D23] = &pll1_d23_106p8.common.hw, ++ [CLK_PLL1_D64] = &pll1_d64_38p4.common.hw, ++ [CLK_PLL1_D10_AUD] = &pll1_aud_245p7.common.hw, ++ [CLK_PLL1_D100_AUD] = &pll1_aud_24p5.common.hw, ++ [CLK_PLL2_D1] = &pll2_d1.common.hw, ++ [CLK_PLL2_D2] = &pll2_d2.common.hw, ++ [CLK_PLL2_D3] = &pll2_d3.common.hw, ++ [CLK_PLL2_D4] = &pll2_d4.common.hw, ++ [CLK_PLL2_D5] = &pll2_d5.common.hw, ++ [CLK_PLL2_D6] = &pll2_d6.common.hw, ++ [CLK_PLL2_D7] = &pll2_d7.common.hw, ++ [CLK_PLL2_D8] = &pll2_d8.common.hw, ++ [CLK_PLL3_D1] = &pll3_d1.common.hw, ++ [CLK_PLL3_D2] = &pll3_d2.common.hw, ++ [CLK_PLL3_D3] = &pll3_d3.common.hw, ++ [CLK_PLL3_D4] = &pll3_d4.common.hw, ++ [CLK_PLL3_D5] = &pll3_d5.common.hw, ++ [CLK_PLL3_D6] = &pll3_d6.common.hw, ++ [CLK_PLL3_D7] = &pll3_d7.common.hw, ++ [CLK_PLL3_D8] = &pll3_d8.common.hw, ++ [CLK_PLL3_80] = &pll3_80.common.hw, ++ [CLK_PLL3_40] = &pll3_40.common.hw, ++ [CLK_PLL3_20] = &pll3_20.common.hw, ++ [CLK_PLL1_307P2] = &pll1_d8_307p2.common.hw, ++ [CLK_PLL1_76P8] = &pll1_d32_76p8.common.hw, ++ [CLK_PLL1_61P44] = &pll1_d40_61p44.common.hw, ++ [CLK_PLL1_153P6] = &pll1_d16_153p6.common.hw, ++ [CLK_PLL1_102P4] = &pll1_d24_102p4.common.hw, ++ [CLK_PLL1_51P2] = &pll1_d48_51p2.common.hw, ++ [CLK_PLL1_51P2_AP] = &pll1_d48_51p2_ap.common.hw, ++ [CLK_PLL1_57P6] = &pll1_m3d128_57p6.common.hw, ++ [CLK_PLL1_25P6] = &pll1_d96_25p6.common.hw, ++ [CLK_PLL1_12P8] = &pll1_d192_12p8.common.hw, ++ [CLK_PLL1_12P8_WDT] = &pll1_d192_12p8_wdt.common.hw, ++ [CLK_PLL1_6P4] = &pll1_d384_6p4.common.hw, ++ [CLK_PLL1_3P2] = &pll1_d768_3p2.common.hw, ++ [CLK_PLL1_1P6] = &pll1_d1536_1p6.common.hw, ++ [CLK_PLL1_0P8] = &pll1_d3072_0p8.common.hw, ++ [CLK_PLL1_351] = &pll1_d7_351p08.common.hw, ++ [CLK_PLL1_409P6] = &pll1_d6_409p6.common.hw, ++ [CLK_PLL1_204P8] = &pll1_d12_204p8.common.hw, ++ [CLK_PLL1_491] = &pll1_d5_491p52.common.hw, ++ [CLK_PLL1_245P76] = &pll1_d10_245p76.common.hw, ++ [CLK_PLL1_614] = &pll1_d4_614p4.common.hw, ++ [CLK_PLL1_47P26] = &pll1_d52_47p26.common.hw, ++ [CLK_PLL1_31P5] = &pll1_d78_31p5.common.hw, ++ [CLK_PLL1_819] = &pll1_d3_819p2.common.hw, ++ [CLK_PLL1_1228] = &pll1_d2_1228p8.common.hw, ++ [CLK_SLOW_UART1] = &slow_uart1_14p74.common.hw, ++ [CLK_SLOW_UART2] = &slow_uart2_48.common.hw, ++ [CLK_UART1] = &uart1_clk.common.hw, ++ [CLK_UART2] = &uart2_clk.common.hw, ++ [CLK_UART3] = &uart3_clk.common.hw, ++ [CLK_UART4] = &uart4_clk.common.hw, ++ [CLK_UART5] = &uart5_clk.common.hw, ++ [CLK_UART6] = &uart6_clk.common.hw, ++ [CLK_UART7] = &uart7_clk.common.hw, ++ [CLK_UART8] = &uart8_clk.common.hw, ++ [CLK_UART9] = &uart9_clk.common.hw, ++ [CLK_GPIO] = &gpio_clk.common.hw, ++ [CLK_PWM0] = &pwm0_clk.common.hw, ++ [CLK_PWM1] = &pwm1_clk.common.hw, ++ [CLK_PWM2] = &pwm2_clk.common.hw, ++ [CLK_PWM3] = &pwm3_clk.common.hw, ++ [CLK_PWM4] = &pwm4_clk.common.hw, ++ [CLK_PWM5] = &pwm5_clk.common.hw, ++ [CLK_PWM6] = &pwm6_clk.common.hw, ++ [CLK_PWM7] = &pwm7_clk.common.hw, ++ [CLK_PWM8] = &pwm8_clk.common.hw, ++ [CLK_PWM9] = &pwm9_clk.common.hw, ++ [CLK_PWM10] = &pwm10_clk.common.hw, ++ [CLK_PWM11] = &pwm11_clk.common.hw, ++ [CLK_PWM12] = &pwm12_clk.common.hw, ++ [CLK_PWM13] = &pwm13_clk.common.hw, ++ [CLK_PWM14] = &pwm14_clk.common.hw, ++ [CLK_PWM15] = &pwm15_clk.common.hw, ++ [CLK_PWM16] = &pwm16_clk.common.hw, ++ [CLK_PWM17] = &pwm17_clk.common.hw, ++ [CLK_PWM18] = &pwm18_clk.common.hw, ++ [CLK_PWM19] = &pwm19_clk.common.hw, ++ [CLK_SSP3] = &ssp3_clk.common.hw, ++ [CLK_RTC] = &rtc_clk.common.hw, ++ [CLK_TWSI0] = &twsi0_clk.common.hw, ++ [CLK_TWSI1] = &twsi1_clk.common.hw, ++ [CLK_TWSI2] = &twsi2_clk.common.hw, ++ [CLK_TWSI4] = &twsi4_clk.common.hw, ++ [CLK_TWSI5] = &twsi5_clk.common.hw, ++ [CLK_TWSI6] = &twsi6_clk.common.hw, ++ [CLK_TWSI7] = &twsi7_clk.common.hw, ++ [CLK_TWSI8] = &twsi8_clk.common.hw, ++ [CLK_TIMERS1] = &timers1_clk.common.hw, ++ [CLK_TIMERS2] = &timers2_clk.common.hw, ++ [CLK_AIB] = &aib_clk.common.hw, ++ [CLK_ONEWIRE] = &onewire_clk.common.hw, ++ [CLK_SSPA0] = &sspa0_clk.common.hw, ++ [CLK_SSPA1] = &sspa1_clk.common.hw, ++ [CLK_DRO] = &dro_clk.common.hw, ++ [CLK_IR] = &ir_clk.common.hw, ++ [CLK_TSEN] = &tsen_clk.common.hw, ++ [CLK_IPC_AP2AUD] = &ipc_ap2aud_clk.common.hw, ++ [CLK_CAN0] = &can0_clk.common.hw, ++ [CLK_CAN0_BUS] = &can0_bus_clk.common.hw, ++ [CLK_WDT] = &wdt_clk.common.hw, ++ [CLK_RIPC] = &ripc_clk.common.hw, ++ [CLK_JPG] = &jpg_clk.common.hw, ++ [CLK_JPF_4KAFBC] = &jpg_4kafbc_clk.common.hw, ++ [CLK_JPF_2KAFBC] = &jpg_2kafbc_clk.common.hw, ++ [CLK_CCIC2PHY] = &ccic2phy_clk.common.hw, ++ [CLK_CCIC3PHY] = &ccic3phy_clk.common.hw, ++ [CLK_CSI] = &csi_clk.common.hw, ++ [CLK_CAMM0] = &camm0_clk.common.hw, ++ [CLK_CAMM1] = &camm1_clk.common.hw, ++ [CLK_CAMM2] = &camm2_clk.common.hw, ++ [CLK_ISP_CPP] = &isp_cpp_clk.common.hw, ++ [CLK_ISP_BUS] = &isp_bus_clk.common.hw, ++ [CLK_ISP] = &isp_clk.common.hw, ++ [CLK_DPU_MCLK] = &dpu_mclk.common.hw, ++ [CLK_DPU_ESC] = &dpu_esc_clk.common.hw, ++ [CLK_DPU_BIT] = &dpu_bit_clk.common.hw, ++ [CLK_DPU_PXCLK] = &dpu_pxclk.common.hw, ++ [CLK_DPU_HCLK] = &dpu_hclk.common.hw, ++ [CLK_DPU_SPI] = &dpu_spi_clk.common.hw, ++ [CLK_DPU_SPI_HBUS] = &dpu_spi_hbus_clk.common.hw, ++ [CLK_DPU_SPIBUS] = &dpu_spi_bus_clk.common.hw, ++ [CLK_SPU_SPI_ACLK] = &dpu_spi_aclk.common.hw, ++ [CLK_V2D] = &v2d_clk.common.hw, ++ [CLK_CCIC_4X] = &ccic_4x_clk.common.hw, ++ [CLK_CCIC1PHY] = &ccic1phy_clk.common.hw, ++ [CLK_SDH_AXI] = &sdh_axi_aclk.common.hw, ++ [CLK_SDH0] = &sdh0_clk.common.hw, ++ [CLK_SDH1] = &sdh1_clk.common.hw, ++ [CLK_SDH2] = &sdh2_clk.common.hw, ++ [CLK_USB_P1] = &usb_p1_aclk.common.hw, ++ [CLK_USB_AXI] = &usb_axi_clk.common.hw, ++ [CLK_USB30] = &usb30_clk.common.hw, ++ [CLK_QSPI] = &qspi_clk.common.hw, ++ [CLK_QSPI_BUS] = &qspi_bus_clk.common.hw, ++ [CLK_DMA] = &dma_clk.common.hw, ++ [CLK_AES] = &aes_clk.common.hw, ++ [CLK_VPU] = &vpu_clk.common.hw, ++ [CLK_GPU] = &gpu_clk.common.hw, ++ [CLK_EMMC] = &emmc_clk.common.hw, ++ [CLK_EMMC_X] = &emmc_x_clk.common.hw, ++ [CLK_AUDIO] = &audio_clk.common.hw, ++ [CLK_HDMI] = &hdmi_mclk.common.hw, ++ [CLK_CCI550] = &cci550_clk.common.hw, ++ [CLK_PMUA_ACLK] = &pmua_aclk.common.hw, ++ [CLK_CPU_C0_HI] = &cpu_c0_hi_clk.common.hw, ++ [CLK_CPU_C0_CORE] = &cpu_c0_core_clk.common.hw, ++ [CLK_CPU_C0_ACE] = &cpu_c0_ace_clk.common.hw, ++ [CLK_CPU_C0_TCM] = &cpu_c0_tcm_clk.common.hw, ++ [CLK_CPU_C1_HI] = &cpu_c1_hi_clk.common.hw, ++ [CLK_CPU_C1_CORE] = &cpu_c1_pclk.common.hw, ++ [CLK_CPU_C1_ACE] = &cpu_c1_ace_clk.common.hw, ++ [CLK_PCIE0] = &pcie0_clk.common.hw, ++ [CLK_PCIE1] = &pcie1_clk.common.hw, ++ [CLK_PCIE2] = &pcie2_clk.common.hw, ++ [CLK_EMAC0_BUS] = &emac0_bus_clk.common.hw, ++ [CLK_EMAC0_PTP] = &emac0_ptp_clk.common.hw, ++ [CLK_EMAC1_BUS] = &emac1_bus_clk.common.hw, ++ [CLK_EMAC1_PTP] = &emac1_ptp_clk.common.hw, ++ [CLK_SEC_UART1] = &uart1_sec_clk.common.hw, ++ [CLK_SEC_SSP2] = &ssp2_sec_clk.common.hw, ++ [CLK_SEC_TWSI3] = &twsi3_sec_clk.common.hw, ++ [CLK_SEC_RTC] = &rtc_sec_clk.common.hw, ++ [CLK_SEC_TIMERS0] = &timers0_sec_clk.common.hw, ++ [CLK_SEC_KPC] = &kpc_sec_clk.common.hw, ++ [CLK_SEC_GPIO] = &gpio_sec_clk.common.hw, ++ [CLK_APB] = &apb_clk.common.hw, ++ [CLK_SLOW_UART] = &slow_uart.common.hw, ++ [CLK_I2S_SYSCLK] = &i2s_sysclk.common.hw, ++ [CLK_I2S_BCLK] = &i2s_bclk.common.hw, ++ [CLK_RCPU_HDMIAUDIO] = &rhdmi_audio_clk.common.hw, ++ [CLK_RCPU_CAN] = &rcan_clk.common.hw, ++ [CLK_RCPU_CAN_BUS] = &rcan_bus_clk.common.hw, ++ [CLK_RCPU_I2C0] = &ri2c0_clk.common.hw, ++ [CLK_RCPU_SSP0] = &rssp0_clk.common.hw, ++ [CLK_RCPU_IR] = &rir_clk.common.hw, ++ [CLK_RCPU_UART0] = &ruart0_clk.common.hw, ++ [CLK_RCPU_UART1] = &ruart1_clk.common.hw, ++ [CLK_DPLL1] = &dpll1.common.hw, ++ [CLK_DPLL2] = &dpll2.common.hw, ++ [CLK_DFC_LVL0] = &dfc_lvl0.common.hw, ++ [CLK_DFC_LVL1] = &dfc_lvl1.common.hw, ++ [CLK_DFC_LVL2] = &dfc_lvl2.common.hw, ++ [CLK_DFC_LVL3] = &dfc_lvl3.common.hw, ++ [CLK_DFC_LVL4] = &dfc_lvl4.common.hw, ++ [CLK_DFC_LVL5] = &dfc_lvl5.common.hw, ++ [CLK_DFC_LVL6] = &dfc_lvl6.common.hw, ++ [CLK_DFC_LVL7] = &dfc_lvl7.common.hw, ++ [CLK_DDR] = &ddr.common.hw, ++ [CLK_RCPU2_PWM0] = &rpwm0_clk.common.hw, ++ [CLK_RCPU2_PWM1] = &rpwm1_clk.common.hw, ++ [CLK_RCPU2_PWM2] = &rpwm2_clk.common.hw, ++ [CLK_RCPU2_PWM3] = &rpwm3_clk.common.hw, ++ [CLK_RCPU2_PWM4] = &rpwm4_clk.common.hw, ++ [CLK_RCPU2_PWM5] = &rpwm5_clk.common.hw, ++ [CLK_RCPU2_PWM6] = &rpwm6_clk.common.hw, ++ [CLK_RCPU2_PWM7] = &rpwm7_clk.common.hw, ++ [CLK_RCPU2_PWM8] = &rpwm8_clk.common.hw, ++ [CLK_RCPU2_PWM9] = &rpwm9_clk.common.hw, ++ }, ++ .num = CLK_MAX_NO, ++}; ++ ++static struct clk_hw_table bootup_enable_clk_table[] = { ++ {"pll1_d8_307p2", CLK_PLL1_307P2}, ++ {"pll1_d6_409p6", CLK_PLL1_409P6}, ++ {"pll1_d5_491p52", CLK_PLL1_491}, ++ {"pll1_d4_614p4", CLK_PLL1_614}, ++ {"pll1_d3_819p2", CLK_PLL1_819}, ++ {"pll1_d2_1228p8", CLK_PLL1_1228}, ++ {"pll1_d10_245p76", CLK_PLL1_245P76}, ++ {"pll1_d48_51p2", CLK_PLL1_51P2}, ++ {"pll1_d48_51p2_ap", CLK_PLL1_51P2_AP}, ++ {"pll1_d96_25p6", CLK_PLL1_25P6}, ++ {"pll3_d1", CLK_PLL3_D1}, ++ {"pll3_d2", CLK_PLL3_D2}, ++ {"pll3_d3", CLK_PLL3_D3}, ++ {"pll2_d3", CLK_PLL2_D3}, ++ {"apb_clk", CLK_APB}, ++ {"pmua_aclk", CLK_PMUA_ACLK}, ++ {"dma_clk", CLK_DMA}, ++}; ++ ++void spacemit_clocks_enable(struct clk_hw_table *tbl, int tbl_size) ++{ ++ int i; ++ struct clk *clk; ++ struct clk_hw *hw_clk; ++ ++ for (i = 0; i < tbl_size; i++) { ++ hw_clk = spacemit_k1x_hw_clks.hws[tbl[i].clk_hw_id]; ++ clk = clk_hw_get_clk(hw_clk, tbl[i].name); ++ if (!IS_ERR_OR_NULL(clk)) ++ clk_prepare_enable(clk); ++ else ++ pr_err("%s : can't find clk %s\n", ++ __func__, tbl[i].name); ++ } ++} ++ ++unsigned long spacemit_k1x_ddr_freq_tbl[MAX_FREQ_LV + 1] = {0}; ++ ++void spacemit_fill_ddr_freq_tbl(void) ++{ ++ int i; ++ struct clk *clk; ++ struct clk_hw *hw_clk; ++ ++ for (i = 0; i < ARRAY_SIZE(spacemit_k1x_ddr_freq_tbl); i++) { ++ hw_clk = spacemit_k1x_hw_clks.hws[CLK_DFC_LVL0 + i]; ++ clk = clk_hw_get_clk(hw_clk, ddr_clk_parents[i]); ++ ++ if (!IS_ERR_OR_NULL(clk)) ++ spacemit_k1x_ddr_freq_tbl[i] = clk_get_rate(clk); ++ else ++ pr_err("%s : can't find clk %s\n", ++ __func__, ddr_clk_parents[i]); ++ } ++} ++ ++int ccu_common_init(struct clk_hw *hw, struct spacemit_k1x_clk *clk_info) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ struct ccu_pll *pll = hw_to_ccu_pll(hw); ++ ++ if (!common) ++ return -1; ++ ++ common->lock = &g_cru_lock; ++ ++ switch (common->base_type) { ++ case BASE_TYPE_MPMU: ++ common->base = clk_info->mpmu_base; ++ break; ++ case BASE_TYPE_APMU: ++ common->base = clk_info->apmu_base; ++ break; ++ case BASE_TYPE_APBC: ++ common->base = clk_info->apbc_base; ++ break; ++ case BASE_TYPE_APBS: ++ common->base = clk_info->apbs_base; ++ break; ++ case BASE_TYPE_CIU: ++ common->base = clk_info->ciu_base; ++ break; ++ case BASE_TYPE_DCIU: ++ common->base = clk_info->dciu_base; ++ break; ++ case BASE_TYPE_DDRC: ++ common->base = clk_info->ddrc_base; ++ break; ++ case BASE_TYPE_AUDC: ++ common->base = clk_info->audio_ctrl_base; ++ break; ++ case BASE_TYPE_APBC2: ++ common->base = clk_info->apbc2_base; ++ break; ++ case BASE_TYPE_RCPU: ++ common->base = clk_info->rcpu_base; ++ break; ++ case BASE_TYPE_RCPU2: ++ common->base = clk_info->rcpu2_base; ++ break; ++ default: ++ common->base = clk_info->apbc_base; ++ break; ++ } ++ ++ if (common->is_pll) ++ pll->pll.lock_base = clk_info->mpmu_base; ++ ++ return 0; ++} ++ ++int spacemit_ccu_probe(struct device_node *node, ++ struct spacemit_k1x_clk *clk_info, ++ struct clk_hw_onecell_data *hw_clks) ++{ ++ int i, ret; ++ ++ for (i = 0; i < hw_clks->num; i++) { ++ struct clk_hw *hw = hw_clks->hws[i]; ++ const char *name; ++ ++ if (!hw) ++ continue; ++ if (!hw->init) ++ continue; ++ ++ ccu_common_init(hw, clk_info); ++ name = hw->init->name; ++ ++ ret = of_clk_hw_register(node, hw); ++ if (ret) { ++ pr_err("Couldn't register clock %d - %s\n", i, name); ++ goto err_clk_unreg; ++ } ++ } ++ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, hw_clks); ++ if (ret) ++ goto err_clk_unreg; ++ ++ spacemit_clocks_enable(bootup_enable_clk_table, ++ ARRAY_SIZE(bootup_enable_clk_table)); ++ spacemit_fill_ddr_freq_tbl(); ++ ++ return 0; ++ ++err_clk_unreg: ++ while (--i >= 0) { ++ struct clk_hw *hw = hw_clks->hws[i]; ++ ++ if (!hw) ++ continue; ++ clk_hw_unregister(hw); ++ } ++ ++ return ret; ++} ++ ++static void spacemit_k1x_ccu_probe(struct device_node *np) ++{ ++ int ret; ++ struct spacemit_k1x_clk *clk_info; ++ struct clk_hw_onecell_data *hw_clks = &spacemit_k1x_hw_clks; ++ ++ if (of_device_is_compatible(np, "spacemit,k1x-clock")) { ++ clk_info = &k1x_clock_controller; ++ ++ clk_info->mpmu_base = of_iomap(np, 0); ++ if (!clk_info->mpmu_base) { ++ pr_err("failed to map mpmu registers\n"); ++ goto out; ++ } ++ ++ clk_info->apmu_base = of_iomap(np, 1); ++ if (!clk_info->apmu_base) { ++ pr_err("failed to map apmu registers\n"); ++ goto out; ++ } ++ ++ clk_info->apbc_base = of_iomap(np, 2); ++ if (!clk_info->apbc_base) { ++ pr_err("failed to map apbc registers\n"); ++ goto out; ++ } ++ ++ clk_info->apbs_base = of_iomap(np, 3); ++ if (!clk_info->apbs_base) { ++ pr_err("failed to map apbs registers\n"); ++ goto out; ++ } ++ ++ clk_info->ciu_base = of_iomap(np, 4); ++ if (!clk_info->ciu_base) { ++ pr_err("failed to map ciu registers\n"); ++ goto out; ++ } ++ ++ clk_info->dciu_base = of_iomap(np, 5); ++ if (!clk_info->dciu_base) { ++ pr_err("failed to map dragon ciu registers\n"); ++ goto out; ++ } ++ ++ clk_info->ddrc_base = of_iomap(np, 6); ++ if (!clk_info->ddrc_base) { ++ pr_err("failed to map ddrc registers\n"); ++ goto out; ++ } ++ ++ clk_info->apbc2_base = of_iomap(np, 7); ++ if (!clk_info->apbc2_base) { ++ pr_err("failed to map apbc2 registers\n"); ++ goto out; ++ } ++ ++ clk_info->rcpu_base = of_iomap(np, 8); ++ if (!clk_info->rcpu_base) { ++ pr_err("failed to map rcpu registers\n"); ++ goto out; ++ } ++ ++ clk_info->rcpu2_base = of_iomap(np, 9); ++ if (!clk_info->rcpu2_base) { ++ pr_err("failed to map rcpu2 registers\n"); ++ goto out; ++ } ++ } else { ++ pr_err("not spacemit,k1x-clock\n"); ++ goto out; ++ } ++ ret = spacemit_ccu_probe(np, clk_info, hw_clks); ++ if (ret) ++ return; ++out: ++ return; ++} ++ ++void *spacemit_get_ddr_freq_tbl(void) ++{ ++ return spacemit_k1x_ddr_freq_tbl; ++} ++EXPORT_SYMBOL_GPL(spacemit_get_ddr_freq_tbl); ++ ++u32 spacemit_get_ddr_freq_level(void) ++{ ++ u32 ddr_freq_lvl = 0; ++ struct clk_hw *hw = spacemit_k1x_hw_clks.hws[CLK_DDR]; ++ ++ ddr_freq_lvl = clk_hw_get_parent_index(hw); ++ ++ return ddr_freq_lvl; ++} ++EXPORT_SYMBOL_GPL(spacemit_get_ddr_freq_level); ++ ++int spacemit_set_ddr_freq_level(u32 level) ++{ ++ int ret = 0; ++ struct clk_hw *hw = spacemit_k1x_hw_clks.hws[CLK_DDR]; ++ ++ if (level < 0 || level > MAX_FREQ_LV) ++ return -EINVAL; ++ ++ ret = clk_hw_set_parent(hw, clk_hw_get_parent_by_index(hw, level)); ++ if (ret) ++ pr_err("%s : set ddr freq fail\n", __func__); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(spacemit_set_ddr_freq_level); ++ ++CLK_OF_DECLARE(k1x_clock, "spacemit,k1x-clock", spacemit_k1x_ccu_probe); ++ +diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.h b/drivers/clk/spacemit/ccu-spacemit-k1x.h +new file mode 100644 +index 000000000000..2662b9e40400 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu-spacemit-k1x.h +@@ -0,0 +1,81 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_SPACEMIT_K1X_H_ ++#define _CCU_SPACEMIT_K1X_H_ ++ ++#include ++#include ++ ++enum ccu_base_type { ++ BASE_TYPE_MPMU = 0, ++ BASE_TYPE_APMU = 1, ++ BASE_TYPE_APBC = 2, ++ BASE_TYPE_APBS = 3, ++ BASE_TYPE_CIU = 4, ++ BASE_TYPE_DCIU = 5, ++ BASE_TYPE_DDRC = 6, ++ BASE_TYPE_AUDC = 7, ++ BASE_TYPE_APBC2 = 8, ++ BASE_TYPE_RCPU = 9, ++ BASE_TYPE_RCPU2 = 10, ++}; ++ ++enum { ++ CLK_DIV_TYPE_1REG_NOFC_V1 = 0, ++ CLK_DIV_TYPE_1REG_FC_V2, ++ CLK_DIV_TYPE_2REG_NOFC_V3, ++ CLK_DIV_TYPE_2REG_FC_V4, ++ CLK_DIV_TYPE_1REG_FC_DIV_V5, ++ CLK_DIV_TYPE_1REG_FC_MUX_V6, ++}; ++ ++struct ccu_common { ++ void __iomem *base; ++ enum ccu_base_type base_type; ++ u32 reg_type; ++ u32 reg_ctrl; ++ u32 reg_sel; ++ u32 reg_xtc; ++ u32 fc; ++ bool is_pll; ++ const char *name; ++ const struct clk_ops *ops; ++ const char * const *parent_names; ++ u8 num_parents; ++ unsigned long flags; ++ spinlock_t *lock; ++ struct clk_hw hw; ++}; ++ ++struct spacemit_k1x_clk { ++ void __iomem *mpmu_base; ++ void __iomem *apmu_base; ++ void __iomem *apbc_base; ++ void __iomem *apbs_base; ++ void __iomem *ciu_base; ++ void __iomem *dciu_base; ++ void __iomem *ddrc_base; ++ void __iomem *audio_ctrl_base; ++ void __iomem *apbc2_base; ++ void __iomem *rcpu_base; ++ void __iomem *rcpu2_base; ++}; ++ ++struct clk_hw_table { ++ char *name; ++ u32 clk_hw_id; ++}; ++ ++extern spinlock_t g_cru_lock; ++ ++static inline struct ccu_common *hw_to_ccu_common(struct clk_hw *hw) ++{ ++ return container_of(hw, struct ccu_common, hw); ++} ++ ++int spacemit_ccu_probe(struct device_node *node, ++ struct spacemit_k1x_clk *clk_info, ++ struct clk_hw_onecell_data *desc); ++ ++#endif /* _CCU_SPACEMIT_K1X_H_ */ +diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c +new file mode 100644 +index 000000000000..a23d9dad8e32 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddn.c +@@ -0,0 +1,161 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type ddn ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include "ccu_ddn.h" ++ ++/* ++ * It is M/N clock ++ * ++ * Fout from synthesizer can be given from two equations: ++ * numerator/denominator = Fin / (Fout * factor) ++ */ ++ ++static void ccu_ddn_disable(struct clk_hw *hw) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_common *common = &ddn->common; ++ unsigned long flags; ++ u32 reg; ++ ++ if (!ddn->gate) ++ return; ++ ++ spin_lock_irqsave(common->lock, flags); ++ reg = readl(common->base + common->reg_sel); ++ writel(reg & ~ddn->gate, common->base + common->reg_sel); ++ spin_unlock_irqrestore(common->lock, flags); ++} ++ ++static int ccu_ddn_enable(struct clk_hw *hw) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_common *common = &ddn->common; ++ unsigned long flags; ++ u32 reg; ++ ++ if (!ddn->gate) ++ return 0; ++ ++ spin_lock_irqsave(common->lock, flags); ++ reg = readl(common->base + common->reg_sel); ++ writel(reg | ddn->gate, common->base + common->reg_sel); ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ return 0; ++} ++ ++static int ccu_ddn_is_enabled(struct clk_hw *hw) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_common *common = &ddn->common; ++ ++ if (!ddn->gate) ++ return 1; ++ ++ return readl(common->base + common->reg_sel) & ddn->gate; ++} ++ ++static long clk_ddn_round_rate(struct clk_hw *hw, unsigned long drate, ++ unsigned long *prate) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_ddn_config *params = &ddn->ddn; ++ unsigned long rate = 0, prev_rate; ++ unsigned long result; ++ int i; ++ ++ for (i = 0; i < params->tbl_size; i++) { ++ prev_rate = rate; ++ rate = (((*prate / 10000) * params->tbl[i].den) / ++ (params->tbl[i].num * params->info->factor)) * 10000; ++ if (rate > drate) ++ break; ++ } ++ ++ if (i == 0 || i == params->tbl_size) { ++ result = rate; ++ } else { ++ if ((drate - prev_rate) > (rate - drate)) ++ result = rate; ++ else ++ result = prev_rate; ++ } ++ return result; ++} ++ ++static unsigned long clk_ddn_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_ddn_config *params = &ddn->ddn; ++ unsigned int val, num, den; ++ unsigned long rate; ++ ++ val = readl(ddn->common.base + ddn->common.reg_ctrl); ++ num = (val >> params->info->num_shift) & params->info->num_mask; ++ den = (val >> params->info->den_shift) & params->info->den_mask; ++ if (!den) ++ return 0; ++ ++ rate = (((parent_rate / 10000) * den) / ++ (num * params->info->factor)) * 10000; ++ ++ return rate; ++} ++ ++/* Configures new clock rate*/ ++static int clk_ddn_set_rate(struct clk_hw *hw, unsigned long drate, ++ unsigned long prate) ++{ ++ struct ccu_ddn *ddn = hw_to_ccu_ddn(hw); ++ struct ccu_ddn_config *params = &ddn->ddn; ++ int i; ++ unsigned long val; ++ unsigned long prev_rate, rate = 0; ++ unsigned long flags = 0; ++ ++ for (i = 0; i < params->tbl_size; i++) { ++ prev_rate = rate; ++ rate = (((prate / 10000) * params->tbl[i].den) / ++ (params->tbl[i].num * params->info->factor)) * 10000; ++ if (rate > drate) ++ break; ++ } ++ ++ if (i > 0) ++ i--; ++ ++ if (ddn->common.lock) ++ spin_lock_irqsave(ddn->common.lock, flags); ++ ++ val = readl(ddn->common.base + ddn->common.reg_ctrl); ++ val &= ~(params->info->num_mask << params->info->num_shift); ++ val |= (params->tbl[i].num & params->info->num_mask) ++ << params->info->num_shift; ++ val &= ~(params->info->den_mask << params->info->den_shift); ++ val |= (params->tbl[i].den & params->info->den_mask) ++ << params->info->den_shift; ++ writel(val, ddn->common.base + ddn->common.reg_ctrl); ++ ++ if (ddn->common.lock) ++ spin_unlock_irqrestore(ddn->common.lock, flags); ++ ++ return 0; ++} ++ ++const struct clk_ops ccu_ddn_ops = { ++ .disable = ccu_ddn_disable, ++ .enable = ccu_ddn_enable, ++ .is_enabled = ccu_ddn_is_enabled, ++ .recalc_rate = clk_ddn_recalc_rate, ++ .round_rate = clk_ddn_round_rate, ++ .set_rate = clk_ddn_set_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h +new file mode 100644 +index 000000000000..577f25250a11 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddn.h +@@ -0,0 +1,86 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_DDN_H_ ++#define _CCU_DDN_H_ ++ ++#include ++#include ++ ++#include "ccu-spacemit-k1x.h" ++ ++struct ccu_ddn_tbl { ++ unsigned int num; ++ unsigned int den; ++}; ++ ++struct ccu_ddn_info { ++ unsigned int factor; ++ unsigned int num_mask; ++ unsigned int den_mask; ++ unsigned int num_shift; ++ unsigned int den_shift; ++}; ++ ++struct ccu_ddn_config { ++ struct ccu_ddn_info *info; ++ struct ccu_ddn_tbl *tbl; ++ u32 tbl_size; ++}; ++ ++#define PLL_DDN_TBL(_num, _den) \ ++ { \ ++ .num = (_num), \ ++ .den = (_den), \ ++ } ++ ++struct ccu_ddn { ++ u32 gate; ++ struct ccu_ddn_config ddn; ++ struct ccu_common common; ++}; ++ ++#define _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size) \ ++ { \ ++ .info = (struct ccu_ddn_info *)_info, \ ++ .tbl = (struct ccu_ddn_tbl *)_table, \ ++ .tbl_size = _size, \ ++ } ++ ++#define SPACEMIT_CCU_DDN(_struct, _name, _parent, _info, _table, \ ++ _size, _base_type, _reg_ctrl, _flags) \ ++ struct ccu_ddn _struct = { \ ++ .ddn = _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .base_type = _base_type, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_ddn_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DDN_GATE(_struct, _name, _parent, _info, \ ++ _table, _size, _base_type, _reg_ddn, \ ++ __reg_gate, _gate_mask, _flags) \ ++ struct ccu_ddn _struct = { \ ++ .gate = _gate_mask, \ ++ .ddn = _SPACEMIT_CCU_DDN_CONFIG(_info, _table, _size), \ ++ .common = { \ ++ .reg_ctrl = _reg_ddn, \ ++ .reg_sel = __reg_gate, \ ++ .base_type = _base_type, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_ddn_ops, _flags), \ ++ } \ ++ } ++ ++static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_ddn, common); ++} ++ ++extern const struct clk_ops ccu_ddn_ops; ++ ++#endif +diff --git a/drivers/clk/spacemit/ccu_ddr.c b/drivers/clk/spacemit/ccu_ddr.c +new file mode 100644 +index 000000000000..ffd8650a6e79 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddr.c +@@ -0,0 +1,272 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type ddr ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ccu_ddr.h" ++ ++#define PMU_AP_IMR (0x098) ++#define AP_DCLK_FC_DONE_INT_MSK BIT(15) ++#define DCLK_FC_DONE_INT_MSK BIT(4) ++ ++#define PMU_AP_ISR (0x0a0) ++#define AP_DCLK_FC_DONE_INT_STS BIT(15) ++#define DCLK_FC_DONE_INT_STS BIT(4) ++#define AP_FC_STS BIT(1) ++ ++#define DFC_AP (0x180) ++#define DFC_FREQ_LV 0x1 ++#define DFC_REQ BIT(0) ++ ++#define DFC_STATUS (0x188) ++#define DFC_CAUSE_SHIFT 0x7 ++#define DFC_STS BIT(0) ++ ++/* enable/disable ddr frequency change done interrupt */ ++static void ccu_ddr_enable_dfc_int(struct ccu_common *common, bool enable) ++{ ++ u32 val; ++ unsigned long flags; ++ ++ spin_lock_irqsave(common->lock, flags); ++ val = readl(common->base + PMU_AP_IMR); ++ if (enable) ++ val |= AP_DCLK_FC_DONE_INT_MSK; ++ else ++ val &= ~AP_DCLK_FC_DONE_INT_MSK; ++ ++ writel(val, common->base + PMU_AP_IMR); ++ spin_unlock_irqrestore(common->lock, flags); ++} ++ ++/* clear ddr frequency change done interrupt status*/ ++static void ccu_ddr_clear_dfc_int_status(struct ccu_common *common) ++{ ++ u32 val; ++ unsigned long flags; ++ ++ spin_lock_irqsave(common->lock, flags); ++ val = readl(common->base + PMU_AP_ISR); ++ val &= ~(AP_DCLK_FC_DONE_INT_STS | AP_FC_STS); ++ writel(val, common->base + PMU_AP_ISR); ++ spin_unlock_irqrestore(common->lock, flags); ++} ++ ++static int ccu_ddr_wait_freq_change_done(struct ccu_common *common) ++{ ++ int timeout = 100; ++ u32 val; ++ ++ while (--timeout) { ++ udelay(10); ++ val = readl(common->base + PMU_AP_ISR); ++ if (val & AP_DCLK_FC_DONE_INT_STS) ++ break; ++ } ++ if (!timeout) { ++ pr_err("%s: wait dfc done timeout!\n", __func__); ++ return -EBUSY; ++ } ++ return 0; ++} ++ ++static int ccu_ddr_freq_chg(struct ccu_common *common, ++ struct ccu_mux_config *mux, u8 level) ++{ ++ u32 reg; ++ u32 timeout; ++ unsigned long flags; ++ ++ if (level > MAX_FREQ_LV) { ++ pr_err("%s: invalid %d freq level\n", __func__, level); ++ return -EINVAL; ++ } ++ ++ /* check if dfc in progress */ ++ timeout = 1000; ++ while (--timeout) { ++ if (!(readl(common->base + DFC_STATUS) & DFC_STS)) ++ break; ++ udelay(10); ++ } ++ ++ if (!timeout) { ++ pr_err("%s: another dfc is in pregress. status:0x%x\n", ++ __func__, readl(common->base + DFC_STATUS)); ++ return -EBUSY; ++ } ++ ++ spin_lock_irqsave(common->lock, flags); ++ reg = readl(common->base + common->reg_sel); ++ reg &= ~GENMASK(mux->width + mux->shift - 1, mux->shift); ++ writel(reg | (level << mux->shift) | common->fc, ++ common->base + common->reg_sel); ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ timeout = 1000; ++ while (--timeout) { ++ udelay(10); ++ if (!(readl(common->base + DFC_STATUS) & DFC_STS)) ++ break; ++ } ++ ++ if (!timeout) { ++ pr_err("dfc error! status:0x%x\n", ++ readl(common->base + DFC_STATUS)); ++ return -EBUSY; ++ } ++ ++ return 0; ++} ++ ++static unsigned long ccu_ddr_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return parent_rate; ++} ++ ++static long ccu_ddr_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ return rate; ++} ++ ++unsigned long ccu_ddr_calc_best_rate(struct clk_hw *hw, unsigned long rate, ++ u32 *mux_val) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct clk_hw *parent; ++ unsigned long parent_rate = 0, best_rate = 0; ++ u32 i; ++ ++ for (i = 0; i < common->num_parents; i++) { ++ parent = clk_hw_get_parent_by_index(hw, i); ++ if (!parent) ++ continue; ++ parent_rate = clk_get_rate(clk_hw_get_clk(parent, ++ common->name)); ++ if (abs(parent_rate - rate) < abs(best_rate - rate)) { ++ best_rate = parent_rate; ++ *mux_val = i; ++ } ++ } ++ return best_rate; ++} ++ ++static int ccu_ddr_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct ccu_mux_config *mux = ddr->mux ? ddr->mux : NULL; ++ unsigned long best_rate = 0; ++ u32 cur_mux, mux_val = 0; ++ u32 reg = 0; ++ ++ if (!mux) ++ return 0; ++ ++ best_rate = ccu_ddr_calc_best_rate(hw, rate, &mux_val); ++ ++ reg = readl(common->base + common->reg_sel); ++ if (mux) { ++ cur_mux = reg >> mux->shift; ++ cur_mux &= (1 << mux->width) - 1; ++ if (cur_mux != mux_val) ++ clk_hw_set_parent(hw, clk_hw_get_parent_by_index(hw, mux_val)); ++ } ++ return 0; ++} ++ ++static u8 ccu_ddr_get_parent(struct clk_hw *hw) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct ccu_mux_config *mux = ddr->mux; ++ u32 reg; ++ u8 parent; ++ ++ if (!mux) ++ return 0; ++ ++ reg = readl(common->base + common->reg_sel); ++ ++ parent = reg >> mux->shift; ++ parent &= (1 << mux->width) - 1; ++ ++ if (mux->table) { ++ int num_parents = clk_hw_get_num_parents(&common->hw); ++ int i; ++ ++ for (i = 0; i < num_parents; i++) ++ if (mux->table[i] == parent) ++ return i; ++ } ++ return parent; ++} ++ ++static int ccu_ddr_set_parent(struct clk_hw *hw, u8 index) ++{ ++ struct ccu_ddr *ddr = hw_to_ccu_ddr(hw); ++ struct ccu_common *common = &ddr->common; ++ struct ccu_mux_config *mux = ddr->mux; ++ int ret = 0; ++ ++ if (!mux) ++ return 0; ++ ++ if (mux->table) ++ index = mux->table[index]; ++ ++ /* request change begin */ ++ ccu_ddr_enable_dfc_int(common, true); ++ ++ /* change parent*/ ++ ret = ccu_ddr_freq_chg(common, mux, index); ++ if (ret < 0) { ++ pr_err("%s: ddr_freq_chg fail. ret = %d\n", __func__, ret); ++ return ret; ++ } ++ ++ /* wait for frequency change done */ ++ ret = ccu_ddr_wait_freq_change_done(common); ++ if (ret < 0) { ++ pr_err("%s: wait_freq_change_done timeout. ret = %d\n", ++ __func__, ret); ++ return ret; ++ } ++ ccu_ddr_clear_dfc_int_status(common); ++ ccu_ddr_enable_dfc_int(common, false); ++ ++ return 0; ++} ++ ++static int ccu_ddr_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ unsigned long best_rate = req->rate; ++ u32 mux_val = 0; ++ ++ best_rate = ccu_ddr_calc_best_rate(hw, req->rate, &mux_val); ++ req->rate = best_rate; ++ return 0; ++} ++ ++const struct clk_ops ccu_ddr_ops = { ++ .get_parent = ccu_ddr_get_parent, ++ .set_parent = ccu_ddr_set_parent, ++ .determine_rate = ccu_ddr_determine_rate, ++ .round_rate = ccu_ddr_round_rate, ++ .recalc_rate = ccu_ddr_recalc_rate, ++ .set_rate = ccu_ddr_set_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_ddr.h b/drivers/clk/spacemit/ccu_ddr.h +new file mode 100644 +index 000000000000..960ca3456796 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_ddr.h +@@ -0,0 +1,44 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_DDR_H_ ++#define _CCU_DDR_H_ ++ ++#include ++#include "ccu-spacemit-k1x.h" ++#include "ccu_mix.h" ++ ++struct ccu_ddr { ++ struct ccu_mux_config *mux; ++ struct ccu_common common; ++}; ++ ++#define MAX_FREQ_LV 7 ++ ++#define SPACEMIT_CCU_DDR_FC(_struct, _name, _parents, _base_type, \ ++ _reg, _fc, _shift, _width, _flags) \ ++ struct ccu_ddr _struct = { \ ++ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_sel = _reg, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_ddr_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++static inline struct ccu_ddr *hw_to_ccu_ddr(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_ddr, common); ++} ++ ++extern const struct clk_ops ccu_ddr_ops; ++ ++#endif /* _CCU_DDR_H_ */ +diff --git a/drivers/clk/spacemit/ccu_dpll.c b/drivers/clk/spacemit/ccu_dpll.c +new file mode 100644 +index 000000000000..ff8b699e1ba2 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_dpll.c +@@ -0,0 +1,124 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type pll ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "ccu_dpll.h" ++ ++#define DPLL_MIN_FREQ 1700000000 ++#define DPLL_MAX_FREQ 3400000000 ++ ++#define pll_readl(reg) readl(reg) ++#define pll_readl_pll_swcr1(p) pll_readl(p.base + p.reg_ctrl) ++#define pll_readl_pll_swcr2(p) pll_readl(p.base + p.reg_sel) ++ ++#define pll_writel(val, reg) writel(val, reg) ++#define pll_writel_pll_swcr1(val, p) pll_writel(val, p.base + p.reg_ctrl) ++#define pll_writel_pll_swcr2(val, p) pll_writel(val, p.base + p.reg_sel) ++ ++/* unified dpllx_swcr1 for dpll1~2 */ ++union dpllx_swcr1 { ++ struct { ++ unsigned int reg0:8; ++ unsigned int reg1:8; ++ unsigned int reg2:8; ++ unsigned int reg3:8; ++ } b; ++ unsigned int v; ++}; ++ ++/* unified dpllx_swcr2 for dpll1~2 */ ++union dpllx_swcr2 { ++ struct { ++ unsigned int reg4:8; ++ unsigned int reg5:8; ++ unsigned int reg6:8; ++ unsigned int reg7:8; ++ } b; ++ unsigned int v; ++}; ++ ++/* frequency unit Mhz, return pll vco freq */ ++static unsigned long __get_vco_freq(struct clk_hw *hw) ++{ ++ unsigned int reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7, size, i; ++ struct ccu_dpll_rate_tbl *freq_pll_regs_table, *pll_reg; ++ struct ccu_dpll *p = hw_to_ccu_dpll(hw); ++ union dpllx_swcr1 swcr1; ++ union dpllx_swcr2 swcr2; ++ ++ swcr1.v = pll_readl_pll_swcr1(p->common); ++ swcr2.v = pll_readl_pll_swcr2(p->common); ++ ++ reg0 = swcr1.b.reg0; ++ reg1 = swcr1.b.reg1; ++ reg2 = swcr1.b.reg2; ++ reg3 = swcr1.b.reg3; ++ reg4 = swcr2.b.reg4; ++ reg5 = swcr2.b.reg5; ++ reg6 = swcr2.b.reg6; ++ reg7 = swcr2.b.reg7; ++ ++ freq_pll_regs_table = p->dpll.rate_tbl; ++ size = p->dpll.tbl_size; ++ ++ for (i = 0; i < size; i++) { ++ pll_reg = &freq_pll_regs_table[i]; ++ if (pll_reg->reg0 == reg0 && pll_reg->reg1 == reg1 && ++ pll_reg->reg2 == reg2 && pll_reg->reg3 == reg3 && ++ pll_reg->reg4 == reg4 && pll_reg->reg5 == reg5 && ++ pll_reg->reg6 == reg6 && pll_reg->reg7 == reg7) ++ return pll_reg->rate; ++ } ++ ++ pr_err("Unknown rate for clock %s\n", __clk_get_name(hw->clk)); ++ return 0; ++} ++ ++static unsigned long ccu_dpll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return __get_vco_freq(hw); ++} ++ ++static long ccu_dpll_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct ccu_dpll *p = hw_to_ccu_dpll(hw); ++ unsigned long max_rate = 0; ++ unsigned int i; ++ struct ccu_dpll_config *params = &p->dpll; ++ ++ if (rate > DPLL_MAX_FREQ || rate < DPLL_MIN_FREQ) { ++ pr_err("%lu rate out of range!\n", rate); ++ return -EINVAL; ++ } ++ ++ if (params->rate_tbl) { ++ for (i = 0; i < params->tbl_size; i++) { ++ if (params->rate_tbl[i].rate <= rate) { ++ if (max_rate < params->rate_tbl[i].rate) ++ max_rate = params->rate_tbl[i].rate; ++ } ++ } ++ } else { ++ pr_err("don't find freq table for pll\n"); ++ } ++ ++ return max_rate; ++} ++ ++const struct clk_ops ccu_dpll_ops = { ++ .recalc_rate = ccu_dpll_recalc_rate, ++ .round_rate = ccu_dpll_round_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_dpll.h b/drivers/clk/spacemit/ccu_dpll.h +new file mode 100644 +index 000000000000..d5632528dc1f +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_dpll.h +@@ -0,0 +1,76 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_DPLL_H_ ++#define _CCU_DPLL_H_ ++ ++#include ++#include ++#include "ccu-spacemit-k1x.h" ++ ++struct ccu_dpll_rate_tbl { ++ unsigned long long rate; ++ u32 reg0; ++ u32 reg1; ++ u32 reg2; ++ u32 reg3; ++ u32 reg4; ++ u32 reg5; ++ u32 reg6; ++ u32 reg7; ++}; ++ ++struct ccu_dpll_config { ++ struct ccu_dpll_rate_tbl *rate_tbl; ++ u32 tbl_size; ++}; ++ ++#define DPLL_RATE(_rate, _reg0, _reg1, _reg2, _reg3, _reg4, \ ++ _reg5, _reg6, _reg7) \ ++ { \ ++ .rate = (_rate), \ ++ .reg0 = (_reg0), \ ++ .reg1 = (_reg1), \ ++ .reg2 = (_reg2), \ ++ .reg3 = (_reg3), \ ++ .reg4 = (_reg4), \ ++ .reg5 = (_reg5), \ ++ .reg6 = (_reg6), \ ++ .reg7 = (_reg7), \ ++ } ++ ++struct ccu_dpll { ++ struct ccu_dpll_config dpll; ++ struct ccu_common common; ++}; ++ ++#define _SPACEMIT_CCU_DPLL_CONFIG(_table, _size) \ ++ { \ ++ .rate_tbl = (struct ccu_dpll_rate_tbl *)_table, \ ++ .tbl_size = _size, \ ++ } ++ ++#define SPACEMIT_CCU_DPLL(_struct, _name, _table, _size, _base_type, \ ++ _reg_ctrl, _reg_sel, _is_pll, _flags) \ ++ struct ccu_dpll _struct = { \ ++ .dpll = _SPACEMIT_CCU_DPLL_CONFIG(_table, _size), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .reg_sel = _reg_sel, \ ++ .base_type = _base_type, \ ++ .is_pll = 0, \ ++ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ ++ &ccu_dpll_ops, _flags), \ ++ } \ ++ } ++ ++static inline struct ccu_dpll *hw_to_ccu_dpll(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_dpll, common); ++} ++ ++extern const struct clk_ops ccu_dpll_ops; ++ ++#endif +diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c +new file mode 100644 +index 000000000000..baa341090f53 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_mix.c +@@ -0,0 +1,502 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type mix(div/mux/gate/factor) ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ccu_mix.h" ++ ++#define TIMEOUT_LIMIT (20000) ++static int twsi8_reg_val = 0x04; ++const char *tswi8_clk_name = "twsi8_clk"; ++ ++static void ccu_mix_disable(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_gate_config *gate = mix->gate; ++ unsigned long flags = 0; ++ unsigned long rate; ++ u32 tmp; ++ ++ if (!gate) ++ return; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ twsi8_reg_val &= ~gate->gate_mask; ++ twsi8_reg_val |= gate->val_disable; ++ tmp = twsi8_reg_val; ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ return; ++ } ++ ++ if (common->lock) ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ tmp = readl(common->base + common->reg_sel); ++ else ++ tmp = readl(common->base + common->reg_ctrl); ++ ++ tmp &= ~gate->gate_mask; ++ tmp |= gate->val_disable; ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ ++ if (common->lock) ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ if (gate->flags & SPACEMIT_CLK_GATE_NEED_DELAY) { ++ rate = clk_hw_get_rate(&common->hw); ++ ++ if (rate == 0) ++ pr_err("clock rate of %s is 0.\n", ++ clk_hw_get_name(&common->hw)); ++ else ++ udelay(DIV_ROUND_UP(2000000, rate)); ++ } ++} ++ ++static int ccu_mix_enable(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_gate_config *gate = mix->gate; ++ unsigned long flags = 0; ++ unsigned long rate; ++ u32 tmp; ++ u32 val = 0; ++ int timeout_power = 1; ++ ++ if (!gate) ++ return 0; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ twsi8_reg_val &= ~gate->gate_mask; ++ twsi8_reg_val |= gate->val_enable; ++ tmp = twsi8_reg_val; ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ return 0; ++ } ++ ++ if (common->lock) ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ tmp = readl(common->base + common->reg_sel); ++ else ++ tmp = readl(common->base + common->reg_ctrl); ++ ++ tmp &= ~gate->gate_mask; ++ tmp |= gate->val_enable; ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(tmp, common->base + common->reg_sel); ++ else ++ writel(tmp, common->base + common->reg_ctrl); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ val = readl(common->base + common->reg_sel); ++ else ++ val = readl(common->base + common->reg_ctrl); ++ ++ if (common->lock) ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ while ((val & gate->gate_mask) != gate->val_enable && ++ (timeout_power < TIMEOUT_LIMIT)) { ++ udelay(timeout_power); ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ val = readl(common->base + common->reg_sel); ++ else ++ val = readl(common->base + common->reg_ctrl); ++ timeout_power *= 10; ++ } ++ ++ if (timeout_power > 1) { ++ if (val == tmp) ++ pr_err("write clk_gate %s timeout occur, read pass after %d us delay\n", ++ clk_hw_get_name(&common->hw), timeout_power); ++ else ++ pr_err("write clk_gate %s timeout after %d us!\n", ++ clk_hw_get_name(&common->hw), timeout_power); ++ } ++ ++ if (gate->flags & SPACEMIT_CLK_GATE_NEED_DELAY) { ++ rate = clk_hw_get_rate(&common->hw); ++ ++ if (rate == 0) ++ pr_err("clock rate of %s is 0.\n", ++ clk_hw_get_name(&common->hw)); ++ else ++ udelay(DIV_ROUND_UP(2000000, rate)); ++ } ++ ++ return 0; ++} ++ ++static int ccu_mix_is_enabled(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_gate_config *gate = mix->gate; ++ unsigned long flags = 0; ++ u32 tmp; ++ ++ if (!gate) ++ return 1; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) ++ return (twsi8_reg_val & gate->gate_mask) == gate->val_enable; ++ ++ if (common->lock) ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ tmp = readl(common->base + common->reg_sel); ++ else ++ tmp = readl(common->base + common->reg_ctrl); ++ ++ if (common->lock) ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ return (tmp & gate->gate_mask) == gate->val_enable; ++} ++ ++static unsigned long ccu_mix_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_div_config *div = mix->div; ++ unsigned long val; ++ u32 reg; ++ ++ if (!div) { ++ if (mix->factor) ++ parent_rate = parent_rate * mix->factor->mul / mix->factor->div; ++ return parent_rate; ++ } ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ val = reg >> div->shift; ++ val &= (1 << div->width) - 1; ++ ++ val = divider_recalc_rate(hw, parent_rate, val, div->table, ++ div->flags, div->width); ++ ++ return val; ++} ++ ++static int ccu_mix_trigger_fc(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ unsigned long val = 0; ++ ++ int ret = 0, timeout = 50; ++ ++ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_DIV_V5 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_MUX_V6) { ++ timeout = 50; ++ val = readl(common->base + common->reg_ctrl); ++ val |= common->fc; ++ writel(val, common->base + common->reg_ctrl); ++ ++ do { ++ val = readl(common->base + common->reg_ctrl); ++ timeout--; ++ if (!(val & common->fc)) ++ break; ++ } while (timeout); ++ ++ if (timeout == 0) { ++ timeout = 5000; ++ do { ++ val = readl(common->base + common->reg_ctrl); ++ timeout--; ++ if (!(val & common->fc)) ++ break; ++ } while (timeout); ++ if (timeout != 0) ++ ret = 0; ++ else ++ ret = -1; ++ } ++ } ++ ++ return ret; ++} ++ ++static long ccu_mix_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ return rate; ++} ++ ++unsigned long ccu_mix_calc_best_rate(struct clk_hw *hw, ++ unsigned long rate, u32 *mux_val, ++ u32 *div_val) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_div_config *div = mix->div ? mix->div : NULL; ++ struct clk *clk; ++ struct clk_hw *parent; ++ unsigned long parent_rate = 0, best_rate = 0; ++ u32 i, j, div_max; ++ ++ for (i = 0; i < common->num_parents; i++) { ++ parent = clk_hw_get_parent_by_index(hw, i); ++ if (!parent) ++ continue; ++ clk = clk_hw_get_clk(parent, common->name); ++ parent_rate = clk_get_rate(clk); ++ ++ if (div) ++ div_max = 1 << div->width; ++ else ++ div_max = 1; ++ ++ for (j = 1; j <= div_max; j++) { ++ if (abs(parent_rate / j - rate) ++ < abs(best_rate - rate)) { ++ best_rate = DIV_ROUND_UP_ULL(parent_rate, j); ++ *mux_val = i; ++ *div_val = j - 1; ++ } ++ } ++ } ++ ++ return best_rate; ++} ++ ++static int ccu_mix_determine_rate(struct clk_hw *hw, ++ struct clk_rate_request *req) ++{ ++ return 0; ++} ++ ++static int ccu_mix_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_div_config *div = mix->div ? mix->div : NULL; ++ struct ccu_mux_config *mux = mix->mux ? mix->mux : NULL; ++ struct clk_hw *parent; ++ unsigned long best_rate = 0; ++ unsigned long flags; ++ u32 cur_mux, cur_div, mux_val = 0, div_val = 0; ++ u32 reg = 0; ++ int ret = 0; ++ ++ if (!div && !mux) ++ return 0; ++ ++ best_rate = ccu_mix_calc_best_rate(hw, rate, &mux_val, &div_val); ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ if (mux) { ++ cur_mux = twsi8_reg_val >> mux->shift; ++ cur_mux &= (1 << mux->width) - 1; ++ parent = clk_hw_get_parent_by_index(hw, mux_val); ++ if (cur_mux != mux_val) ++ clk_hw_set_parent(hw, parent); ++ } ++ return 0; ++ } ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ if (mux) { ++ cur_mux = reg >> mux->shift; ++ cur_mux &= (1 << mux->width) - 1; ++ parent = clk_hw_get_parent_by_index(hw, mux_val); ++ if (cur_mux != mux_val) ++ clk_hw_set_parent(hw, parent); ++ } ++ ++ if (div) { ++ cur_div = reg >> div->shift; ++ cur_div &= (1 << div->width) - 1; ++ if (cur_div == div_val) ++ return 0; ++ } else { ++ return 0; ++ } ++ ++ spin_lock_irqsave(common->lock, flags); ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ reg &= ~GENMASK(div->width + div->shift - 1, div->shift); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(reg | (div_val << div->shift), ++ common->base + common->reg_sel); ++ else ++ writel(reg | (div_val << div->shift), ++ common->base + common->reg_ctrl); ++ ++ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_DIV_V5) { ++ ret = ccu_mix_trigger_fc(hw); ++ } ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ if (ret) ++ pr_err("%s of %s timeout\n", __func__, ++ clk_hw_get_name(&common->hw)); ++ ++ return ret; ++} ++ ++static u8 ccu_mix_get_parent(struct clk_hw *hw) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_mux_config *mux = mix->mux; ++ u32 reg; ++ u8 parent; ++ ++ if (!mux) ++ return 0; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ parent = twsi8_reg_val >> mux->shift; ++ parent &= (1 << mux->width) - 1; ++ return parent; ++ } ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ parent = reg >> mux->shift; ++ parent &= (1 << mux->width) - 1; ++ ++ if (mux->table) { ++ int num_parents = clk_hw_get_num_parents(&common->hw); ++ int i; ++ ++ for (i = 0; i < num_parents; i++) ++ if (mux->table[i] == parent) ++ return i; ++ } ++ return parent; ++} ++ ++static int ccu_mix_set_parent(struct clk_hw *hw, u8 index) ++{ ++ struct ccu_mix *mix = hw_to_ccu_mix(hw); ++ struct ccu_common *common = &mix->common; ++ struct ccu_mux_config *mux = mix->mux; ++ unsigned long flags; ++ u32 reg = 0; ++ int ret = 0; ++ ++ if (!mux) ++ return 0; ++ ++ if (mux->table) ++ index = mux->table[index]; ++ ++ if (!strcmp(common->name, tswi8_clk_name)) { ++ twsi8_reg_val &= ~GENMASK(mux->width ++ + mux->shift - 1, mux->shift); ++ twsi8_reg_val |= (index << mux->shift); ++ reg = twsi8_reg_val; ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(reg, common->base + common->reg_sel); ++ else ++ writel(reg, common->base + common->reg_ctrl); ++ return 0; ++ } ++ ++ spin_lock_irqsave(common->lock, flags); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ reg = readl(common->base + common->reg_sel); ++ else ++ reg = readl(common->base + common->reg_ctrl); ++ ++ reg &= ~GENMASK(mux->width + mux->shift - 1, mux->shift); ++ ++ if (common->reg_type == CLK_DIV_TYPE_2REG_NOFC_V3 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4) ++ writel(reg | (index << mux->shift), ++ common->base + common->reg_sel); ++ else ++ writel(reg | (index << mux->shift), ++ common->base + common->reg_ctrl); ++ ++ if (common->reg_type == CLK_DIV_TYPE_1REG_FC_V2 || ++ common->reg_type == CLK_DIV_TYPE_2REG_FC_V4 || ++ common->reg_type == CLK_DIV_TYPE_1REG_FC_MUX_V6) { ++ ret = ccu_mix_trigger_fc(hw); ++ } ++ spin_unlock_irqrestore(common->lock, flags); ++ ++ if (ret) ++ pr_err("%s of %s timeout\n", __func__, ++ clk_hw_get_name(&common->hw)); ++ ++ return 0; ++} ++ ++const struct clk_ops ccu_mix_ops = { ++ .disable = ccu_mix_disable, ++ .enable = ccu_mix_enable, ++ .is_enabled = ccu_mix_is_enabled, ++ .get_parent = ccu_mix_get_parent, ++ .set_parent = ccu_mix_set_parent, ++ .determine_rate = ccu_mix_determine_rate, ++ .round_rate = ccu_mix_round_rate, ++ .recalc_rate = ccu_mix_recalc_rate, ++ .set_rate = ccu_mix_set_rate, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h +new file mode 100644 +index 000000000000..4b7d67cb0225 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_mix.h +@@ -0,0 +1,380 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_MIX_H_ ++#define _CCU_MIX_H_ ++ ++#include ++#include "ccu-spacemit-k1x.h" ++ ++#define SPACEMIT_CLK_GATE_NEED_DELAY BIT(0) ++ ++struct ccu_gate_config { ++ u32 gate_mask; ++ u32 val_enable; ++ u32 val_disable; ++ u32 flags; ++}; ++ ++struct ccu_factor_config { ++ u32 div; ++ u32 mul; ++}; ++ ++struct ccu_mux_config { ++ u8 shift; ++ u8 width; ++ const u8 *table; ++ u32 flags; ++}; ++ ++struct ccu_div_config { ++ u8 shift; ++ u8 width; ++ u32 max; ++ u32 offset; ++ u32 flags; ++ struct clk_div_table *table; ++}; ++ ++struct ccu_mix { ++ struct ccu_gate_config *gate; ++ struct ccu_factor_config *factor; ++ struct ccu_div_config *div; ++ struct ccu_mux_config *mux; ++ struct ccu_common common; ++}; ++ ++#define CCU_GATE_INIT(_gate_mask, _val_enable, _val_disable, _flags) \ ++ (&(struct ccu_gate_config) { \ ++ .gate_mask = _gate_mask, \ ++ .val_enable = _val_enable, \ ++ .val_disable = _val_disable, \ ++ .flags = _flags, \ ++ }) ++ ++#define CCU_FACTOR_INIT(_div, _mul) \ ++ (&(struct ccu_factor_config) { \ ++ .div = _div, \ ++ .mul = _mul, \ ++ }) ++ ++#define CCU_MUX_INIT(_shift, _width, _table, _flags) \ ++ (&(struct ccu_mux_config) { \ ++ .shift = _shift, \ ++ .width = _width, \ ++ .table = _table, \ ++ .flags = _flags, \ ++ }) ++ ++#define CCU_DIV_INIT(_shift, _width, _table, _flags) \ ++ (&(struct ccu_div_config) { \ ++ .shift = _shift, \ ++ .width = _width, \ ++ .flags = _flags, \ ++ .table = _table, \ ++ }) ++ ++#define SPACEMIT_CCU_GATE(_struct, _name, _parent, _base_type, _reg, \ ++ _gate_mask, _val_enable, _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_GATE_NO_PARENT(_struct, _name, _parent, \ ++ _base_type, _reg, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 0, \ ++ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ ++ &ccu_mix_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_FACTOR(_struct, _name, _parent, _div, _mul) \ ++ struct ccu_mix _struct = { \ ++ .factor = CCU_FACTOR_INIT(_div, _mul), \ ++ .common = { \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_mix_ops, 0), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_MUX(_struct, _name, _parents, _base_type, \ ++ _reg, _shift, _width, _flags) \ ++ struct ccu_mix _struct = { \ ++ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DIV(_struct, _name, _parent, _base_type, \ ++ _reg, _shift, _width, _flags) \ ++ struct ccu_mix _struct = { \ ++ .div = CCU_DIV_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_GATE_FACTOR(_struct, _name, _parent, _base_type, \ ++ _reg, _gate_mask, _val_enable, _val_disable, \ ++ _div, _mul, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .factor = CCU_FACTOR_INIT(_div, _mul), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, _flags), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_MUX_GATE(_struct, _name, _parents, _base_type, \ ++ _reg, _shift, _width, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .mux = CCU_MUX_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DIV_GATE(_struct, _name, _parent, _base_type, \ ++ _reg, _shift, _width, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_shift, _width, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, _parent, \ ++ &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ } \ ++ } ++ ++#define SPACEMIT_CCU_DIV_MUX_GATE(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _muxshift, \ ++ _muxwidth, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV2_FC_MUX_GATE(_struct, _name, _parents, \ ++ _base_type, _reg_ctrl, _reg_sel, _mshift, \ ++ _mwidth, _fc, _muxshift, _muxwidth, _gate_mask, \ ++ _val_enable, _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_2REG_FC_V4, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .reg_sel = _reg_sel, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_FC_MUX_GATE(_struct, _name, _parents, \ ++ _base_type, _reg_ctrl, _mshift, _mwidth, _fc, \ ++ _muxshift, _muxwidth, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_MFC_MUX_GATE(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _fc, _muxshift, \ ++ _muxwidth, _gate_mask, _val_enable, \ ++ _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, \ ++ _val_enable, _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_MUX_V6, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_FC_WITH_GATE(_struct, _name, _parent, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _fc, _gate_mask, \ ++ _val_enable, _val_disable, _flags) \ ++ struct ccu_mix _struct = { \ ++ .gate = CCU_GATE_INIT(_gate_mask, _val_enable, \ ++ _val_disable, 0), \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .num_parents = 1, \ ++ .hw.init = CLK_HW_INIT(_name, \ ++ _parent, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_MUX(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _muxshift, _muxwidth, _flags) \ ++ struct ccu_mix _struct = { \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_DIV_FC_MUX(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _mshift, _mwidth, _fc, _muxshift, \ ++ _muxwidth, _flags) \ ++ struct ccu_mix _struct = { \ ++ .div = CCU_DIV_INIT(_mshift, _mwidth, NULL, 0), \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++#define SPACEMIT_CCU_MUX_FC(_struct, _name, _parents, _base_type, \ ++ _reg_ctrl, _fc, _muxshift, _muxwidth, _flags) \ ++ struct ccu_mix _struct = { \ ++ .mux = CCU_MUX_INIT(_muxshift, _muxwidth, NULL, 0), \ ++ .common = { \ ++ .reg_type = CLK_DIV_TYPE_1REG_FC_V2, \ ++ .reg_ctrl = _reg_ctrl, \ ++ .fc = _fc, \ ++ .base_type = _base_type, \ ++ .name = _name, \ ++ .parent_names = _parents, \ ++ .num_parents = ARRAY_SIZE(_parents), \ ++ .hw.init = CLK_HW_INIT_PARENTS(_name, \ ++ _parents, &ccu_mix_ops, \ ++ (_flags) | CLK_GET_RATE_NOCACHE), \ ++ }, \ ++ } ++ ++static inline struct ccu_mix *hw_to_ccu_mix(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_mix, common); ++} ++ ++extern const struct clk_ops ccu_mix_ops; ++ ++#endif /* _CCU_DIV_H_ */ +diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c +new file mode 100644 +index 000000000000..9bc4d1de8b33 +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_pll.c +@@ -0,0 +1,286 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Spacemit clock type pll ++ * ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "ccu_pll.h" ++ ++#define PLL_MIN_FREQ 600000000 ++#define PLL_MAX_FREQ 3400000000 ++#define PLL_DELAYTIME 590 ++ ++#define pll_readl(reg) readl(reg) ++#define pll_readl_pll_swcr1(p) pll_readl(p.base + p.reg_ctrl) ++#define pll_readl_pll_swcr2(p) pll_readl(p.base + p.reg_sel) ++#define pll_readl_pll_swcr3(p) pll_readl(p.base + p.reg_xtc) ++ ++#define pll_writel(val, reg) writel(val, reg) ++#define pll_writel_pll_swcr1(val, p) pll_writel(val, p.base + p.reg_ctrl) ++#define pll_writel_pll_swcr2(val, p) pll_writel(val, p.base + p.reg_sel) ++#define pll_writel_pll_swcr3(val, p) pll_writel(val, p.base + p.reg_xtc) ++ ++/* unified pllx_swcr1 for pll1~3 */ ++union pllx_swcr1 { ++ struct { ++ unsigned int reg5:8; ++ unsigned int reg6:8; ++ unsigned int reg7:8; ++ unsigned int reg8:8; ++ } b; ++ unsigned int v; ++}; ++ ++/* unified pllx_swcr2 for pll1~3 */ ++union pllx_swcr2 { ++ struct { ++ unsigned int div1_en:1; ++ unsigned int div2_en:1; ++ unsigned int div3_en:1; ++ unsigned int div4_en:1; ++ unsigned int div5_en:1; ++ unsigned int div6_en:1; ++ unsigned int div7_en:1; ++ unsigned int div8_en:1; ++ unsigned int reserved1:4; ++ unsigned int atest_en:1; ++ unsigned int cktest_en:1; ++ unsigned int dtest_en:1; ++ unsigned int rdo:2; ++ unsigned int mon_cfg:4; ++ unsigned int reserved2:11; ++ } b; ++ unsigned int v; ++}; ++ ++union pllx_swcr3 { ++ struct { ++ unsigned int div_frc:24; ++ unsigned int div_int:7; ++ unsigned int pll_en:1; ++ } b; ++ ++ unsigned int v; ++}; ++ ++static int ccu_pll_is_enabled(struct clk_hw *hw) ++{ ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr3 swcr3; ++ unsigned int enabled; ++ ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ enabled = swcr3.b.pll_en; ++ ++ return enabled; ++} ++ ++static unsigned long __get_vco_freq(struct clk_hw *hw) ++{ ++ unsigned int reg5, reg6, reg7, reg8, size, i; ++ unsigned int div_int, div_frc; ++ struct ccu_pll_rate_tbl *freq_pll_regs_table, *pll_regs; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr1 swcr1; ++ union pllx_swcr3 swcr3; ++ ++ swcr1.v = pll_readl_pll_swcr1(p->common); ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ ++ reg5 = swcr1.b.reg5; ++ reg6 = swcr1.b.reg6; ++ reg7 = swcr1.b.reg7; ++ reg8 = swcr1.b.reg8; ++ ++ div_int = swcr3.b.div_int; ++ div_frc = swcr3.b.div_frc; ++ ++ freq_pll_regs_table = p->pll.rate_tbl; ++ size = p->pll.tbl_size; ++ ++ for (i = 0; i < size; i++) { ++ pll_regs = &freq_pll_regs_table[i]; ++ if (pll_regs->reg5 == reg5 && pll_regs->reg6 == reg6 && ++ pll_regs->reg7 == reg7 && pll_regs->reg8 == reg8 && ++ pll_regs->div_int == div_int && ++ pll_regs->div_frac == div_frc) ++ return pll_regs->rate; ++ } ++ ++ pr_err("Unknown rate for clock %s\n", __clk_get_name(hw->clk)); ++ ++ return 0; ++} ++ ++static int ccu_pll_enable(struct clk_hw *hw) ++{ ++ unsigned int delaytime = PLL_DELAYTIME; ++ unsigned long flags; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr3 swcr3; ++ ++ if (ccu_pll_is_enabled(hw)) ++ return 0; ++ ++ spin_lock_irqsave(p->common.lock, flags); ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ swcr3.b.pll_en = 1; ++ pll_writel_pll_swcr3(swcr3.v, p->common); ++ spin_unlock_irqrestore(p->common.lock, flags); ++ ++ /* check lock status */ ++ udelay(50); ++ ++ while ((!(readl(p->pll.lock_base + p->pll.reg_lock) ++ & p->pll.lock_enable_bit)) && delaytime) { ++ udelay(5); ++ delaytime--; ++ } ++ ++ if (unlikely(!delaytime)) { ++ pr_err("%s enabling didn't get stable within 3000us!!!\n", ++ __clk_get_name(hw->clk)); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void ccu_pll_disable(struct clk_hw *hw) ++{ ++ unsigned long flags; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ union pllx_swcr3 swcr3; ++ ++ spin_lock_irqsave(p->common.lock, flags); ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ swcr3.b.pll_en = 0; ++ pll_writel_pll_swcr3(swcr3.v, p->common); ++ spin_unlock_irqrestore(p->common.lock, flags); ++} ++ ++/* ++ * pll rate change requires sequence: ++ * clock off -> change rate setting -> clock on ++ * This function doesn't really change rate, but cache the config ++ */ ++static int ccu_pll_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ unsigned int i, reg5 = 0, reg6 = 0, reg7 = 0, reg8 = 0; ++ unsigned int div_int, div_frc; ++ unsigned long flags; ++ unsigned long new_rate = rate, old_rate; ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ struct ccu_pll_config *params = &p->pll; ++ union pllx_swcr1 swcr1; ++ union pllx_swcr3 swcr3; ++ bool found = false; ++ bool pll_enabled = false; ++ ++ if (ccu_pll_is_enabled(hw)) { ++ pll_enabled = true; ++ ccu_pll_disable(hw); ++ } ++ ++ old_rate = __get_vco_freq(hw); ++ ++ /* setp 1: calculate fbd frcd kvco and band */ ++ if (params->rate_tbl) { ++ for (i = 0; i < params->tbl_size; i++) { ++ if (rate == params->rate_tbl[i].rate) { ++ found = true; ++ ++ reg5 = params->rate_tbl[i].reg5; ++ reg6 = params->rate_tbl[i].reg6; ++ reg7 = params->rate_tbl[i].reg7; ++ reg8 = params->rate_tbl[i].reg8; ++ div_int = params->rate_tbl[i].div_int; ++ div_frc = params->rate_tbl[i].div_frac; ++ break; ++ } ++ } ++ ++ WARN_ON_ONCE(!found); ++ } else { ++ pr_err("don't find freq table for pll\n"); ++ if (pll_enabled) ++ ccu_pll_enable(hw); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(p->common.lock, flags); ++ ++ /* setp 2: set pll kvco/band and fbd/frcd setting */ ++ swcr1.v = pll_readl_pll_swcr1(p->common); ++ swcr1.b.reg5 = reg5; ++ swcr1.b.reg6 = reg6; ++ swcr1.b.reg7 = reg7; ++ swcr1.b.reg8 = reg8; ++ pll_writel_pll_swcr1(swcr1.v, p->common); ++ ++ swcr3.v = pll_readl_pll_swcr3(p->common); ++ swcr3.b.div_int = div_int; ++ swcr3.b.div_frc = div_frc; ++ pll_writel_pll_swcr3(swcr3.v, p->common); ++ ++ spin_unlock_irqrestore(p->common.lock, flags); ++ ++ if (pll_enabled) ++ ccu_pll_enable(hw); ++ ++ pr_debug("%s %s rate %lu->%lu!\n", __func__, ++ __clk_get_name(hw->clk), old_rate, new_rate); ++ return 0; ++} ++ ++static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ return __get_vco_freq(hw); ++} ++ ++static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct ccu_pll *p = hw_to_ccu_pll(hw); ++ unsigned long max_rate = 0; ++ unsigned int i; ++ struct ccu_pll_config *params = &p->pll; ++ ++ if (rate > PLL_MAX_FREQ || rate < PLL_MIN_FREQ) { ++ pr_err("%lu rate out of range!\n", rate); ++ return -EINVAL; ++ } ++ ++ if (params->rate_tbl) { ++ for (i = 0; i < params->tbl_size; i++) { ++ if (params->rate_tbl[i].rate <= rate) { ++ if (max_rate < params->rate_tbl[i].rate) ++ max_rate = params->rate_tbl[i].rate; ++ } ++ } ++ } else { ++ pr_err("don't find freq table for pll\n"); ++ } ++ ++ return max_rate; ++} ++ ++const struct clk_ops ccu_pll_ops = { ++ .enable = ccu_pll_enable, ++ .disable = ccu_pll_disable, ++ .set_rate = ccu_pll_set_rate, ++ .recalc_rate = ccu_pll_recalc_rate, ++ .round_rate = ccu_pll_round_rate, ++ .is_enabled = ccu_pll_is_enabled, ++}; ++ +diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h +new file mode 100644 +index 000000000000..0f6f2ed397da +--- /dev/null ++++ b/drivers/clk/spacemit/ccu_pll.h +@@ -0,0 +1,79 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2023, spacemit Corporation. */ ++ ++#ifndef _CCU_PLL_H_ ++#define _CCU_PLL_H_ ++ ++#include ++#include ++#include "ccu-spacemit-k1x.h" ++ ++struct ccu_pll_rate_tbl { ++ unsigned long long rate; ++ u32 reg5; ++ u32 reg6; ++ u32 reg7; ++ u32 reg8; ++ unsigned int div_int; ++ unsigned int div_frac; ++}; ++ ++struct ccu_pll_config { ++ struct ccu_pll_rate_tbl *rate_tbl; ++ u32 tbl_size; ++ void __iomem *lock_base; ++ u32 reg_lock; ++ u32 lock_enable_bit; ++}; ++ ++#define PLL_RATE(_rate, _reg5, _reg6, _reg7, _reg8, _div_int, _div_frac) \ ++ { \ ++ .rate = (_rate), \ ++ .reg5 = (_reg5), \ ++ .reg6 = (_reg6), \ ++ .reg7 = (_reg7), \ ++ .reg8 = (_reg8), \ ++ .div_int = (_div_int), \ ++ .div_frac = (_div_frac), \ ++ } ++ ++struct ccu_pll { ++ struct ccu_pll_config pll; ++ struct ccu_common common; ++}; ++ ++#define _SPACEMIT_CCU_PLL_CONFIG(_table, _size, _reg_lock, _lock_enable_bit) \ ++ { \ ++ .rate_tbl = (struct ccu_pll_rate_tbl *)_table, \ ++ .tbl_size = _size, \ ++ .reg_lock = _reg_lock, \ ++ .lock_enable_bit = _lock_enable_bit, \ ++ } ++ ++#define SPACEMIT_CCU_PLL(_struct, _name, _table, _size, _base_type, \ ++ _reg_ctrl, _reg_sel, _reg_xtc, _reg_lock, \ ++ _lock_enable_bit, _is_pll, _flags) \ ++ struct ccu_pll _struct = { \ ++ .pll = _SPACEMIT_CCU_PLL_CONFIG(_table, _size, \ ++ _reg_lock, _lock_enable_bit), \ ++ .common = { \ ++ .reg_ctrl = _reg_ctrl, \ ++ .reg_sel = _reg_sel, \ ++ .reg_xtc = _reg_xtc, \ ++ .base_type = _base_type, \ ++ .is_pll = _is_pll, \ ++ .hw.init = CLK_HW_INIT_NO_PARENT(_name, \ ++ &ccu_pll_ops, _flags), \ ++ } \ ++ } ++ ++static inline struct ccu_pll *hw_to_ccu_pll(struct clk_hw *hw) ++{ ++ struct ccu_common *common = hw_to_ccu_common(hw); ++ ++ return container_of(common, struct ccu_pll, common); ++} ++ ++extern const struct clk_ops ccu_pll_ops; ++ ++#endif +diff --git a/drivers/clk/xuantie/Kconfig b/drivers/clk/xuantie/Kconfig +new file mode 100644 +index 000000000000..9a2ee8c01bf3 +--- /dev/null ++++ b/drivers/clk/xuantie/Kconfig +@@ -0,0 +1,12 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++config XUANTIE_CLK ++ bool ++ def_bool ARCH_XUANTIE ++ ++config CLK_TH1520_FM ++ bool "XuanTie Th1520 Fullmask Clock Driver" ++ depends on ARCH_XUANTIE ++ default n ++ help ++ Build the driver for th1520 fullmask Clock Driver +diff --git a/drivers/clk/xuantie/Makefile b/drivers/clk/xuantie/Makefile +new file mode 100644 +index 000000000000..58e0ab431ae5 +--- /dev/null ++++ b/drivers/clk/xuantie/Makefile +@@ -0,0 +1,7 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++obj-$(CONFIG_XUANTIE_CLK) += \ ++ clk.o ++ ++obj-$(CONFIG_CLK_TH1520_FM) += clk-th1520-fm.o ++obj-$(CONFIG_CLK_TH1520_FM) += gate/ +diff --git a/drivers/clk/xuantie/clk-th1520-fm.c b/drivers/clk/xuantie/clk-th1520-fm.c +new file mode 100644 +index 000000000000..33b5aa6127fa +--- /dev/null ++++ b/drivers/clk/xuantie/clk-th1520-fm.c +@@ -0,0 +1,646 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++static struct clk *clks[CLK_END]; ++static struct clk_onecell_data clk_data; ++ ++/* Th1520 Fullmask */ ++static u32 share_cnt_x2h_cpusys_clk_en; ++static u32 share_cnt_dmac_cpusys_clk_en; ++static u32 share_cnt_timer0_clk_en; ++static u32 share_cnt_timer1_clk_en; ++static u32 share_cnt_axi4_cpusys2_clk_en; ++static u32 share_cnt_bmu_c910_clk_en; ++static u32 share_cnt_aon2cpu_a2x_clk_en; ++static u32 share_cnt_chip_dbg_clk_en; ++static u32 share_cnt_x2x_cpusys_clk_en; ++static u32 share_cnt_cfg2tee_x2h_clk_en; ++static u32 share_cnt_cpu2aon_x2h_clk_en; ++static u32 share_cnt_cpu2vp_x2p_clk_en; ++static u32 share_cnt_npu_core_clk_en; ++static u32 share_cnt_cpu2peri_x2h_clk_en; ++static u32 share_cnt_cpu2vi_x2h_clk_en; ++static u32 share_cnt_vpsys_axi_aclk_en; ++static u32 share_cnt_gmac1_clk_en; ++static u32 share_cnt_gmac0_clk_en; ++static u32 share_cnt_perisys_apb3_hclk_en; ++static u32 share_cnt_qspi0_clk_en; ++static u32 share_cnt_gmac_axi_clk_en; ++static u32 share_cnt_gpio0_clk_en; ++static u32 share_cnt_gpio1_clk_en; ++static u32 share_cnt_pwm_clk_en; ++static u32 share_cnt_spi_clk_en; ++static u32 share_cnt_uart0_clk_en; ++static u32 share_cnt_uart2_clk_en; ++static u32 share_cnt_i2c2_clk_en; ++static u32 share_cnt_i2c3_clk_en; ++static u32 share_cnt_peri_i2s_clk_en; ++static u32 share_cnt_qspi1_clk_en; ++static u32 share_cnt_uart1_clk_en; ++static u32 share_cnt_uart3_clk_en; ++static u32 share_cnt_uart4_clk_en; ++static u32 share_cnt_uart5_clk_en; ++static u32 share_cnt_i2c0_clk_en; ++static u32 share_cnt_i2c1_clk_en; ++static u32 share_cnt_i2c4_clk_en; ++static u32 share_cnt_i2c5_clk_en; ++static u32 share_cnt_gpio2_clk_en; ++static u32 share_cnt_gpio3_clk_en; ++static u32 share_cnt_vosys_axi_aclk_en; ++ ++/* Th1520 Fullmask PLL Bypass */ ++static const char * const cpu_pll0_bypass_sels[] = {"cpu_pll0_foutpostdiv", "osc_24m", }; ++static const char * const cpu_pll1_bypass_sels[] = {"cpu_pll1_foutpostdiv", "osc_24m", }; ++static const char * const gmac_pll_bypass_sels[] = {"gmac_pll_foutpostdiv", "osc_24m", }; ++static const char * const video_pll_bypass_sels[] = {"video_pll_foutpostdiv", "osc_24m", }; ++static const char * const tee_pll_bypass_sels[] = {"tee_pll_foutpostdiv", "osc_24m"}; ++static const char * const dpu0_pll_bypass_sels[] = {"dpu0_pll_foutpostdiv", "osc_24m"}; ++static const char * const dpu1_pll_bypass_sels[] = {"dpu1_pll_foutpostdiv", "osc_24m"}; ++ ++/* th1520 fullmask mux */ ++static const char * const ahb2_cpusys_hclk_sels[] = {"ahb2_cpusys_hclk_out_div", "osc_24m"}; ++static const char * const c910_cclk_i0_sels[] = {"cpu_pll0_foutpostdiv", "osc_24m"}; ++static const char * const c910_cclk_sels[] = {"c910_cclk_i0", "cpu_pll1_foutpostdiv"}; ++static const char * const cfg_axi_aclk_sels[] = {"cfg_axi_aclk_out_div", "osc_24m"}; ++static const char * const teesys_hclk_sels[] = {"teesys_i1_hclk", "teesys_i0_hclk"}; ++static const char * const perisys_ahb_hclk_sels[] = {"perisys_ahb_hclk_out_div", "osc_24m"}; ++static const char * const clk_out_1_sels[] = {"osc_24m", "clk_out_1_out_div"}; ++static const char * const clk_out_2_sels[] = {"osc_24m", "clk_out_2_out_div"}; ++static const char * const clk_out_3_sels[] = {"osc_24m", "clk_out_3_out_div"}; ++static const char * const clk_out_4_sels[] = {"osc_24m", "clk_out_4_out_div"}; ++static const char * const peri_i2s_src_clk_sels[] = {"clkgen_peri_i2s_src_clk_0", "clkgen_peri_i2s_src_clk_1"}; ++static const char * const npu_cclk_sels[] = {"gmac_pll_foutpostdiv", "npu_cclk_out_div"}; ++static const char * const cfg_apb_pclk_sels[] = {"cfg_apb_pclk_out_div", "osc_24m"}; ++static const char * const uart_sclk_sels[] = {"clk_100m", "osc_24m"}; ++ ++static const struct th1520_pll_rate_table th1520_cpupll_tbl[] = { ++ TH1520_PLL_RATE(2616000000U, 2616000000U, 1, 109, 0, 1, 1), ++ TH1520_PLL_RATE(2592000000U, 2592000000U, 1, 108, 0, 1, 1), ++ TH1520_PLL_RATE(2568000000U, 2568000000U, 1, 107, 0, 1, 1), ++ TH1520_PLL_RATE(2544000000U, 2544000000U, 1, 106, 0, 1, 1), ++ TH1520_PLL_RATE(2520000000U, 2520000000U, 1, 105, 0, 1, 1), ++ TH1520_PLL_RATE(2496000000U, 2496000000U, 1, 104, 0, 1, 1), ++ TH1520_PLL_RATE(2472000000U, 2472000000U, 1, 103, 0, 1, 1), ++ TH1520_PLL_RATE(2448000000U, 2448000000U, 1, 102, 0, 1, 1), ++ TH1520_PLL_RATE(2424000000U, 2424000000U, 1, 101, 0, 1, 1), ++ TH1520_PLL_RATE(2400000000U, 2400000000U, 1, 100, 0, 1, 1), ++ TH1520_PLL_RATE(2376000000U, 2376000000U, 1, 99, 0, 1, 1), ++ TH1520_PLL_RATE(2352000000U, 2352000000U, 1, 98, 0, 1, 1), ++ TH1520_PLL_RATE(2328000000U, 2328000000U, 1, 97, 0, 1, 1), ++ TH1520_PLL_RATE(2304000000U, 2304000000U, 1, 96, 0, 1, 1), ++ TH1520_PLL_RATE(2280000000U, 2280000000U, 1, 95, 0, 1, 1), ++ TH1520_PLL_RATE(2256000000U, 2256000000U, 1, 94, 0, 1, 1), ++ TH1520_PLL_RATE(2232000000U, 2232000000U, 1, 93, 0, 1, 1), ++ TH1520_PLL_RATE(2208000000U, 2208000000U, 1, 92, 0, 1, 1), ++ TH1520_PLL_RATE(2184000000U, 2184000000U, 1, 91, 0, 1, 1), ++ TH1520_PLL_RATE(2160000000U, 2160000000U, 1, 90, 0, 1, 1), ++ TH1520_PLL_RATE(2136000000U, 2136000000U, 1, 89, 0, 1, 1), ++ TH1520_PLL_RATE(2112000000U, 2112000000U, 1, 88, 0, 1, 1), ++ TH1520_PLL_RATE(2088000000U, 2088000000U, 1, 87, 0, 1, 1), ++ TH1520_PLL_RATE(2064000000U, 2064000000U, 1, 86, 0, 1, 1), ++ TH1520_PLL_RATE(2040000000U, 2040000000U, 1, 85, 0, 1, 1), ++ TH1520_PLL_RATE(2016000000U, 2016000000U, 1, 84, 0, 1, 1), ++ TH1520_PLL_RATE(1992000000U, 1992000000U, 1, 83, 0, 1, 1), ++ TH1520_PLL_RATE(1968000000U, 1968000000U, 1, 82, 0, 1, 1), ++ TH1520_PLL_RATE(1944000000U, 1944000000U, 1, 81, 0, 1, 1), ++ TH1520_PLL_RATE(1920000000U, 1920000000U, 1, 80, 0, 1, 1), ++ TH1520_PLL_RATE(1896000000U, 1896000000U, 1, 79, 0, 1, 1), ++ TH1520_PLL_RATE(1872000000U, 1872000000U, 1, 78, 0, 1, 1), ++ TH1520_PLL_RATE(1848000000U, 1848000000U, 1, 77, 0, 1, 1), ++ TH1520_PLL_RATE(1824000000U, 1824000000U, 1, 76, 0, 1, 1), ++ TH1520_PLL_RATE(1800000000U, 1800000000U, 1, 75, 0, 1, 1), ++ TH1520_PLL_RATE(1776000000U, 1776000000U, 1, 74, 0, 1, 1), ++ TH1520_PLL_RATE(1752000000U, 1752000000U, 1, 73, 0, 1, 1), ++ TH1520_PLL_RATE(1728000000U, 1728000000U, 1, 72, 0, 1, 1), ++ TH1520_PLL_RATE(1704000000U, 1704000000U, 1, 71, 0, 1, 1), ++ TH1520_PLL_RATE(1680000000U, 1680000000U, 1, 70, 0, 1, 1), ++ TH1520_PLL_RATE(1656000000U, 1656000000U, 1, 69, 0, 1, 1), ++ TH1520_PLL_RATE(1632000000U, 1632000000U, 1, 68, 0, 1, 1), ++ TH1520_PLL_RATE(1608000000U, 1608000000U, 1, 67, 0, 1, 1), ++ TH1520_PLL_RATE(1584000000U, 1584000000U, 1, 66, 0, 1, 1), ++ TH1520_PLL_RATE(1560000000U, 1560000000U, 1, 65, 0, 1, 1), ++ TH1520_PLL_RATE(1536000000U, 1536000000U, 1, 64, 0, 1, 1), ++ TH1520_PLL_RATE(1512000000U, 1512000000U, 1, 63, 0, 1, 1), ++ TH1520_PLL_RATE(3000000000U, 1500000000U, 1, 125, 0, 2, 1), ++ TH1520_PLL_RATE(2976000000U, 1488000000U, 1, 124, 0, 2, 1), ++ TH1520_PLL_RATE(2952000000U, 1476000000U, 1, 123, 0, 2, 1), ++ TH1520_PLL_RATE(2928000000U, 1464000000U, 1, 122, 0, 2, 1), ++ TH1520_PLL_RATE(2904000000U, 1452000000U, 1, 121, 0, 2, 1), ++ TH1520_PLL_RATE(2880000000U, 1440000000U, 1, 120, 0, 2, 1), ++ TH1520_PLL_RATE(2856000000U, 1428000000U, 1, 119, 0, 2, 1), ++ TH1520_PLL_RATE(2832000000U, 1416000000U, 1, 118, 0, 2, 1), ++ TH1520_PLL_RATE(2808000000U, 1404000000U, 1, 117, 0, 2, 1), ++ TH1520_PLL_RATE(2784000000U, 1392000000U, 1, 116, 0, 2, 1), ++ TH1520_PLL_RATE(2760000000U, 1380000000U, 1, 115, 0, 2, 1), ++ TH1520_PLL_RATE(2736000000U, 1368000000U, 1, 114, 0, 2, 1), ++ TH1520_PLL_RATE(2712000000U, 1356000000U, 1, 113, 0, 2, 1), ++ TH1520_PLL_RATE(2688000000U, 1344000000U, 1, 112, 0, 2, 1), ++ TH1520_PLL_RATE(2664000000U, 1332000000U, 1, 111, 0, 2, 1), ++ TH1520_PLL_RATE(2640000000U, 1320000000U, 1, 110, 0, 2, 1), ++ TH1520_PLL_RATE(2616000000U, 1308000000U, 1, 109, 0, 2, 1), ++ TH1520_PLL_RATE(2592000000U, 1296000000U, 1, 108, 0, 2, 1), ++ TH1520_PLL_RATE(2568000000U, 1284000000U, 1, 107, 0, 2, 1), ++ TH1520_PLL_RATE(2544000000U, 1272000000U, 1, 106, 0, 2, 1), ++ TH1520_PLL_RATE(2520000000U, 1260000000U, 1, 105, 0, 2, 1), ++ TH1520_PLL_RATE(2496000000U, 1248000000U, 1, 104, 0, 2, 1), ++ TH1520_PLL_RATE(2472000000U, 1236000000U, 1, 103, 0, 2, 1), ++ TH1520_PLL_RATE(2448000000U, 1224000000U, 1, 102, 0, 2, 1), ++ TH1520_PLL_RATE(2424000000U, 1212000000U, 1, 101, 0, 2, 1), ++ TH1520_PLL_RATE(2400000000U, 1200000000U, 1, 100, 0, 2, 1), ++ TH1520_PLL_RATE(2376000000U, 1188000000U, 1, 99, 0, 2, 1), ++ TH1520_PLL_RATE(2352000000U, 1176000000U, 1, 98, 0, 2, 1), ++ TH1520_PLL_RATE(2328000000U, 1164000000U, 1, 97, 0, 2, 1), ++ TH1520_PLL_RATE(2304000000U, 1152000000U, 1, 96, 0, 2, 1), ++ TH1520_PLL_RATE(2280000000U, 1140000000U, 1, 95, 0, 2, 1), ++ TH1520_PLL_RATE(2256000000U, 1128000000U, 1, 94, 0, 2, 1), ++ TH1520_PLL_RATE(2232000000U, 1116000000U, 1, 93, 0, 2, 1), ++ TH1520_PLL_RATE(2208000000U, 1104000000U, 1, 92, 0, 2, 1), ++ TH1520_PLL_RATE(2184000000U, 1092000000U, 1, 91, 0, 2, 1), ++ TH1520_PLL_RATE(2160000000U, 1080000000U, 1, 90, 0, 2, 1), ++ TH1520_PLL_RATE(2136000000U, 1068000000U, 1, 89, 0, 2, 1), ++ TH1520_PLL_RATE(2112000000U, 1056000000U, 1, 88, 0, 2, 1), ++ TH1520_PLL_RATE(2088000000U, 1044000000U, 1, 87, 0, 2, 1), ++ TH1520_PLL_RATE(2064000000U, 1032000000U, 1, 86, 0, 2, 1), ++ TH1520_PLL_RATE(2040000000U, 1020000000U, 1, 85, 0, 2, 1), ++ TH1520_PLL_RATE(2016000000U, 1008000000U, 1, 84, 0, 2, 1), ++ TH1520_PLL_RATE(3000000000U, 1000000000U, 1, 125, 0, 3, 1), ++ TH1520_PLL_RATE(2976000000U, 992000000U, 1, 124, 0, 3, 1), ++ TH1520_PLL_RATE(2952000000U, 984000000U, 1, 123, 0, 3, 1), ++ TH1520_PLL_RATE(2928000000U, 976000000U, 1, 122, 0, 3, 1), ++ TH1520_PLL_RATE(2904000000U, 968000000U, 1, 121, 0, 3, 1), ++ TH1520_PLL_RATE(2880000000U, 960000000U, 1, 120, 0, 3, 1), ++ TH1520_PLL_RATE(2856000000U, 952000000U, 1, 119, 0, 3, 1), ++ TH1520_PLL_RATE(2832000000U, 944000000U, 1, 118, 0, 3, 1), ++ TH1520_PLL_RATE(2808000000U, 936000000U, 1, 117, 0, 3, 1), ++ TH1520_PLL_RATE(2784000000U, 928000000U, 1, 116, 0, 3, 1), ++ TH1520_PLL_RATE(2760000000U, 920000000U, 1, 115, 0, 3, 1), ++ TH1520_PLL_RATE(2736000000U, 912000000U, 1, 114, 0, 3, 1), ++ TH1520_PLL_RATE(2712000000U, 904000000U, 1, 113, 0, 3, 1), ++ TH1520_PLL_RATE(1800000000U, 900000000U, 1, 75, 0, 2, 1), ++ TH1520_PLL_RATE(2688000000U, 896000000U, 1, 112, 0, 3, 1), ++ TH1520_PLL_RATE(2664000000U, 888000000U, 1, 111, 0, 3, 1), ++ TH1520_PLL_RATE(2640000000U, 880000000U, 1, 110, 0, 3, 1), ++ TH1520_PLL_RATE(2616000000U, 872000000U, 1, 109, 0, 3, 1), ++ TH1520_PLL_RATE(2592000000U, 864000000U, 1, 108, 0, 3, 1), ++ TH1520_PLL_RATE(2568000000U, 856000000U, 1, 107, 0, 3, 1), ++ TH1520_PLL_RATE(2544000000U, 848000000U, 1, 106, 0, 3, 1), ++ TH1520_PLL_RATE(2520000000U, 840000000U, 1, 105, 0, 3, 1), ++ TH1520_PLL_RATE(2496000000U, 832000000U, 1, 104, 0, 3, 1), ++ TH1520_PLL_RATE(2472000000U, 824000000U, 1, 103, 0, 3, 1), ++ TH1520_PLL_RATE(2448000000U, 816000000U, 1, 102, 0, 3, 1), ++ TH1520_PLL_RATE(2424000000U, 808000000U, 1, 101, 0, 3, 1), ++ TH1520_PLL_RATE(2400000000U, 800000000U, 1, 100, 0, 3, 1), ++ TH1520_PLL_RATE(2376000000U, 792000000U, 1, 99, 0, 3, 1), ++ TH1520_PLL_RATE(2352000000U, 784000000U, 1, 98, 0, 3, 1), ++ TH1520_PLL_RATE(2328000000U, 776000000U, 1, 97, 0, 3, 1), ++ TH1520_PLL_RATE(2304000000U, 768000000U, 1, 96, 0, 3, 1), ++ TH1520_PLL_RATE(2280000000U, 760000000U, 1, 95, 0, 3, 1), ++ TH1520_PLL_RATE(2256000000U, 752000000U, 1, 94, 0, 3, 1), ++ TH1520_PLL_RATE(2232000000U, 744000000U, 1, 93, 0, 3, 1), ++ TH1520_PLL_RATE(2208000000U, 736000000U, 1, 92, 0, 3, 1), ++ TH1520_PLL_RATE(2184000000U, 728000000U, 1, 91, 0, 3, 1), ++ TH1520_PLL_RATE(2160000000U, 720000000U, 1, 90, 0, 3, 1), ++ TH1520_PLL_RATE(2136000000U, 712000000U, 1, 89, 0, 3, 1), ++ TH1520_PLL_RATE(2808000000U, 702000000U, 1, 117, 0, 4, 1), ++ TH1520_PLL_RATE(2760000000U, 690000000U, 1, 115, 0, 4, 1), ++ TH1520_PLL_RATE(2712000000U, 678000000U, 1, 113, 0, 4, 1), ++ TH1520_PLL_RATE(2664000000U, 666000000U, 1, 111, 0, 4, 1), ++ TH1520_PLL_RATE(2616000000U, 654000000U, 1, 109, 0, 4, 1), ++ TH1520_PLL_RATE(2568000000U, 642000000U, 1, 107, 0, 4, 1), ++ TH1520_PLL_RATE(2520000000U, 630000000U, 1, 105, 0, 4, 1), ++ TH1520_PLL_RATE(2472000000U, 618000000U, 1, 103, 0, 4, 1), ++ TH1520_PLL_RATE(2424000000U, 606000000U, 1, 101, 0, 4, 1), ++ TH1520_PLL_RATE(3000000000U, 600000000U, 1, 125, 0, 5, 1), ++ TH1520_PLL_RATE(2952000000U, 590400000U, 1, 123, 0, 5, 1), ++ TH1520_PLL_RATE(2904000000U, 580800000U, 1, 121, 0, 5, 1), ++ TH1520_PLL_RATE(2856000000U, 571200000U, 1, 119, 0, 5, 1), ++ TH1520_PLL_RATE(2808000000U, 561600000U, 1, 117, 0, 5, 1), ++ TH1520_PLL_RATE(2760000000U, 552000000U, 1, 115, 0, 5, 1), ++ TH1520_PLL_RATE(2712000000U, 542400000U, 1, 113, 0, 5, 1), ++ TH1520_PLL_RATE(2664000000U, 532800000U, 1, 111, 0, 5, 1), ++ TH1520_PLL_RATE(2616000000U, 523200000U, 1, 109, 0, 5, 1), ++ TH1520_PLL_RATE(2568000000U, 513600000U, 1, 107, 0, 5, 1), ++ TH1520_PLL_RATE(2520000000U, 504000000U, 1, 105, 0, 5, 1), ++ TH1520_PLL_RATE(3000000000U, 500000000U, 1, 125, 0, 6, 1), ++ TH1520_PLL_RATE(2952000000U, 492000000U, 1, 123, 0, 6, 1), ++ TH1520_PLL_RATE(2904000000U, 484000000U, 1, 121, 0, 6, 1), ++ TH1520_PLL_RATE(2856000000U, 476000000U, 1, 119, 0, 6, 1), ++ TH1520_PLL_RATE(2808000000U, 468000000U, 1, 117, 0, 6, 1), ++ TH1520_PLL_RATE(2760000000U, 460000000U, 1, 115, 0, 6, 1), ++ TH1520_PLL_RATE(2712000000U, 452000000U, 1, 113, 0, 6, 1), ++ TH1520_PLL_RATE(2664000000U, 444000000U, 1, 111, 0, 6, 1), ++ TH1520_PLL_RATE(2616000000U, 436000000U, 1, 109, 0, 6, 1), ++ TH1520_PLL_RATE(2568000000U, 428000000U, 1, 107, 0, 6, 1), ++ TH1520_PLL_RATE(2520000000U, 420000000U, 1, 105, 0, 6, 1), ++ TH1520_PLL_RATE(2472000000U, 412000000U, 1, 103, 0, 6, 1), ++ TH1520_PLL_RATE(2400000000U, 400000000U, 1, 100, 0, 3, 2), ++ TH1520_PLL_RATE(2352000000U, 392000000U, 1, 98, 0, 3, 2), ++ TH1520_PLL_RATE(2304000000U, 384000000U, 1, 96, 0, 3, 2), ++ TH1520_PLL_RATE(2256000000U, 376000000U, 1, 94, 0, 3, 2), ++ TH1520_PLL_RATE(2208000000U, 368000000U, 1, 92, 0, 3, 2), ++ TH1520_PLL_RATE(2160000000U, 360000000U, 1, 90, 0, 3, 2), ++ TH1520_PLL_RATE(2112000000U, 352000000U, 1, 88, 0, 3, 2), ++ TH1520_PLL_RATE(2064000000U, 344000000U, 1, 86, 0, 3, 2), ++ TH1520_PLL_RATE(2016000000U, 336000000U, 1, 84, 0, 3, 2), ++ TH1520_PLL_RATE(1968000000U, 328000000U, 1, 82, 0, 3, 2), ++ TH1520_PLL_RATE(1920000000U, 320000000U, 1, 80, 0, 3, 2), ++ TH1520_PLL_RATE(1872000000U, 312000000U, 1, 78, 0, 3, 2), ++ TH1520_PLL_RATE(1824000000U, 304000000U, 1, 76, 0, 3, 2), ++ TH1520_PLL_RATE(3000000000U, 300000000U, 1, 125, 0, 5, 2), ++ TH1520_PLL_RATE(2880000000U, 288000000U, 1, 120, 0, 5, 2), ++ TH1520_PLL_RATE(2760000000U, 276000000U, 1, 115, 0, 5, 2), ++ TH1520_PLL_RATE(2640000000U, 264000000U, 1, 110, 0, 5, 2), ++ TH1520_PLL_RATE(2520000000U, 252000000U, 1, 105, 0, 5, 2), ++ TH1520_PLL_RATE(2400000000U, 240000000U, 1, 100, 0, 5, 2), ++ TH1520_PLL_RATE(2280000000U, 228000000U, 1, 95, 0, 5, 2), ++ TH1520_PLL_RATE(2160000000U, 216000000U, 1, 90, 0, 5, 2), ++ TH1520_PLL_RATE(2040000000U, 204000000U, 1, 85, 0, 5, 2), ++ TH1520_PLL_RATE(3000000000U, 200000000U, 1, 125, 0, 5, 3), ++ TH1520_PLL_RATE(2880000000U, 192000000U, 1, 120, 0, 5, 3), ++ TH1520_PLL_RATE(2760000000U, 184000000U, 1, 115, 0, 5, 3), ++ TH1520_PLL_RATE(2640000000U, 176000000U, 1, 110, 0, 5, 3), ++ TH1520_PLL_RATE(2520000000U, 168000000U, 1, 105, 0, 5, 3), ++ TH1520_PLL_RATE(2400000000U, 160000000U, 1, 100, 0, 5, 3), ++ TH1520_PLL_RATE(2280000000U, 152000000U, 1, 95, 0, 5, 3), ++ TH1520_PLL_RATE(2160000000U, 144000000U, 1, 90, 0, 5, 3), ++ TH1520_PLL_RATE(2040000000U, 136000000U, 1, 85, 0, 5, 3), ++ TH1520_PLL_RATE(1920000000U, 128000000U, 1, 80, 0, 5, 3), ++ TH1520_PLL_RATE(3000000000U, 125000000U, 1, 125, 0, 6, 4), ++ TH1520_PLL_RATE(2760000000U, 115000000U, 1, 115, 0, 6, 4), ++ TH1520_PLL_RATE(2520000000U, 105000000U, 1, 105, 0, 6, 4), ++ TH1520_PLL_RATE(2280000000U, 95000000U, 1, 95, 0, 6, 4), ++ TH1520_PLL_RATE(2040000000U, 85000000U, 1, 85, 0, 6, 4), ++ TH1520_PLL_RATE(1800000000U, 75000000U, 1, 75, 0, 6, 4), ++ TH1520_PLL_RATE(1560000000U, 65000000U, 1, 65, 0, 6, 4), ++ TH1520_PLL_RATE(1320000000U, 55000000U, 1, 55, 0, 6, 4), ++}; ++ ++static const struct th1520_pll_rate_table th1520_dpupll_tbl[] = { ++ TH1520_PLL_RATE(2376000000U, 1188000000U, 1, 99, 0, 2, 1), ++ TH1520_PLL_RATE(1980000000U, 990000000U, 2, 165, 0, 2, 1), ++ TH1520_PLL_RATE(2970000000U, 742500000U, 4, 495, 0, 4, 1), ++ TH1520_PLL_RATE(2304000000U, 1152000000U, 1, 96, 0, 2, 1), ++ TH1520_PLL_RATE(1512000000U, 504000000U, 1, 63, 0, 3, 1), ++ TH1520_PLL_RATE(1512000000U, 503500000U, 1, 63, 0, 3, 1), ++ TH1520_PLL_RATE(2898000000U, 483000000U, 4, 483, 0, 6, 1), ++ TH1520_PLL_RATE(2592000000U, 648000000U, 1, 108, 0, 4, 1), ++ TH1520_PLL_RATE(2772000000U, 924000000U, 2, 231, 0, 3, 1), ++ TH1520_PLL_RATE(2856000000U, 476000000U, 1, 119, 0, 6, 1), ++ TH1520_PLL_RATE(2130000000U, 355000000U, 4, 355, 0, 6, 1), ++ TH1520_PLL_RATE(3192000000U, 456000000U, 1, 133, 0, 7, 1), ++ TH1520_PLL_RATE(2730000000U, 390000000U, 4, 455, 0, 7, 1), ++ TH1520_PLL_RATE(1680000000U, 240000000U, 1, 70, 0, 7, 1), ++ TH1520_PLL_RATE(2832000000U, 708000000U, 1, 118, 0, 4, 1), ++ TH1520_PLL_RATE(1026000000U, 342000000U, 4, 171, 0, 3, 1), ++ TH1520_PLL_RATE(1260000000U, 630000000U, 4, 210, 0, 2, 1), ++}; ++ ++static struct th1520_pll_clk th1520_cpu_pll0div = { ++ .out_type = TH1520_PLL_DIV, ++ .clk_type = TH1520_CPU_PLL0, ++ .rate_table = th1520_cpupll_tbl, ++ .rate_count = ARRAY_SIZE(th1520_cpupll_tbl), ++}; ++ ++static struct th1520_pll_clk th1520_cpu_pll1div = { ++ .out_type = TH1520_PLL_DIV, ++ .clk_type = TH1520_CPU_PLL1, ++ .rate_table = th1520_cpupll_tbl, ++ .rate_count = ARRAY_SIZE(th1520_cpupll_tbl), ++}; ++ ++static struct th1520_pll_clk th1520_dpu0_plldiv = { ++ .out_type = TH1520_PLL_DIV, ++ .clk_type = TH1520_DPU0_PLL, ++ .rate_table = th1520_dpupll_tbl, ++ .rate_count = ARRAY_SIZE(th1520_dpupll_tbl), ++}; ++ ++static struct th1520_pll_clk th1520_dpu1_plldiv = { ++ .out_type = TH1520_PLL_DIV, ++ .clk_type = TH1520_DPU1_PLL, ++ .rate_table = th1520_dpupll_tbl, ++ .rate_count = ARRAY_SIZE(th1520_dpupll_tbl), ++}; ++ ++static int th1520_clocks_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ void __iomem *ap_base; ++ int ret; ++ const bool *teesys = of_device_get_match_data(dev); ++ ++ /* Clock source */ ++ clks[CLK_DUMMY] = xuantie_clk_fixed("dummy", 0); ++ clks[OSC_32K] = of_clk_get_by_name(np, "osc_32k"); ++ clks[OSC_24M] = of_clk_get_by_name(np, "osc_24m"); ++ clks[RC_24M] = of_clk_get_by_name(np, "rc_24m"); ++ ++ np = dev->of_node; ++ ap_base = devm_platform_ioremap_resource(pdev, 0); ++ if (WARN_ON(IS_ERR(ap_base))) { ++ ret = PTR_ERR(ap_base); ++ goto unregister_clks; ++ } ++ ++ /* Th1520 Fullmask AP PLL clocks */ ++ clks[CPU_PLL0_FOUTPOSTDIV] = xuantie_th1520_pll("cpu_pll0_foutpostdiv", "osc_24m", ap_base, &th1520_cpu_pll0div); ++ clks[CPU_PLL1_FOUTPOSTDIV] = xuantie_th1520_pll("cpu_pll1_foutpostdiv", "osc_24m", ap_base, &th1520_cpu_pll1div); ++ ++ clks[DPU0_PLL_FOUTPOSTDIV] = xuantie_th1520_pll("dpu0_pll_foutpostdiv", "osc_24m", ap_base, &th1520_dpu0_plldiv); ++ clks[DPU1_PLL_FOUTPOSTDIV] = xuantie_th1520_pll("dpu1_pll_foutpostdiv", "osc_24m", ap_base, &th1520_dpu1_plldiv); ++ ++ /* Th1520 Fullmask AP Fixed PLL */ ++ clks[GMAC_PLL_FOUTPOSTDIV] = xuantie_clk_fixed("gmac_pll_foutpostdiv", 1000000000); ++ clks[VIDEO_PLL_FOUTPOSTDIV] = xuantie_clk_fixed("video_pll_foutpostdiv", 792000000); ++ clks[VIDEO_PLL_FOUTVCO] = xuantie_clk_fixed("video_pll_foutvco", 2376000000); ++ clks[TEE_PLL_FOUTPOSTDIV] = xuantie_clk_fixed("tee_pll_foutpostdiv", 792000000); ++ clks[CLKGEN_PERI_I2S_SRC_CLK_0] = xuantie_clk_fixed("clkgen_peri_i2s_src_clk_0", 294912000); //from audio_pll_foutpostdiv ++ clks[CLKGEN_PERI_I2S_SRC_CLK_1] = xuantie_clk_fixed("clkgen_peri_i2s_src_clk_1", 135475200); //from sys_pll_foutpostdiv ++ clks[CLKGEN_C910_BUS_CLK_NO_ICG] = xuantie_clk_fixed("clkgen_c910_bus_clk_no_icg", 750000000); ++ clks[AONSYS_BUS_CLK] = xuantie_clk_fixed("aonsys_hclk", 101606400); //from sys_pll, maybe change ? ++ ++ /* Th1520 Fullmask AP MUX */ ++ clks[CPU_PLL0_BYPASS] = xuantie_th1520_clk_mux_flags("cpu_pll0_bypass", ap_base + 0x4, 30, 1, cpu_pll0_bypass_sels, ARRAY_SIZE(cpu_pll0_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[CPU_PLL1_BYPASS] = xuantie_th1520_clk_mux_flags("cpu_pll1_bypass", ap_base + 0x14, 30, 1, cpu_pll1_bypass_sels, ARRAY_SIZE(cpu_pll1_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[GMAC_PLL_BYPASS] = xuantie_th1520_clk_mux_flags("gmac_pll_bypass", ap_base + 0x24, 30, 1, gmac_pll_bypass_sels, ARRAY_SIZE(gmac_pll_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[VIDEO_PLL_BYPASS] = xuantie_th1520_clk_mux_flags("video_pll_bypass", ap_base + 0x34, 30, 1, video_pll_bypass_sels, ARRAY_SIZE(video_pll_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[TEE_PLL_BYPASS] = xuantie_th1520_clk_mux_flags("tee_pll_bypass", ap_base + 0x64, 30, 1, tee_pll_bypass_sels, ARRAY_SIZE(tee_pll_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[DPU0_PLL_BYPASS] = xuantie_th1520_clk_mux_flags("dpu0_pll_bypass", ap_base + 0x44, 30, 1, dpu0_pll_bypass_sels, ARRAY_SIZE(dpu0_pll_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[DPU1_PLL_BYPASS] = xuantie_th1520_clk_mux_flags("dpu1_pll_bypass", ap_base + 0x54, 30, 1, dpu1_pll_bypass_sels, ARRAY_SIZE(dpu1_pll_bypass_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ ++ clks[AHB2_CPUSYS_HCLK] = xuantie_th1520_clk_mux_flags("ahb2_cpusys_hclk", ap_base + 0x120, 5, 1, ahb2_cpusys_hclk_sels, ARRAY_SIZE(ahb2_cpusys_hclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[C910_CCLK_I0] = xuantie_th1520_clk_mux_flags("c910_cclk_i0", ap_base + 0x100, 1, 1, c910_cclk_i0_sels, ARRAY_SIZE(c910_cclk_i0_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[C910_CCLK] = xuantie_th1520_clk_mux_flags("c910_cclk", ap_base + 0x100, 0, 1, c910_cclk_sels, ARRAY_SIZE(c910_cclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[CFG_AXI_ACLK] = xuantie_th1520_clk_mux_flags("cfg_axi_aclk", ap_base + 0x138, 5, 1, cfg_axi_aclk_sels, ARRAY_SIZE(cfg_axi_aclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ ++ if (teesys) ++ clks[TEESYS_HCLK] = xuantie_th1520_clk_mux_flags("teesys_hclk", ap_base + 0x1cc, 13, 1, teesys_hclk_sels, ARRAY_SIZE(teesys_hclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); //just for teesys!!! ++ ++ clks[PERISYS_AHB_HCLK] = xuantie_th1520_clk_mux_flags("perisys_ahb_hclk", ap_base + 0x140, 5, 1, perisys_ahb_hclk_sels, ARRAY_SIZE(perisys_ahb_hclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[CLK_OUT_1] = xuantie_th1520_clk_mux_flags("clk_out_1", ap_base + 0x1b4, 4, 1, clk_out_1_sels, ARRAY_SIZE(clk_out_1_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[CLK_OUT_2] = xuantie_th1520_clk_mux_flags("clk_out_2", ap_base + 0x1b8, 4, 1, clk_out_2_sels, ARRAY_SIZE(clk_out_2_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[CLK_OUT_3] = xuantie_th1520_clk_mux_flags("clk_out_3", ap_base + 0x1bc, 4, 1, clk_out_3_sels, ARRAY_SIZE(clk_out_3_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[CLK_OUT_4] = xuantie_th1520_clk_mux_flags("clk_out_4", ap_base + 0x1c0, 4, 1, clk_out_4_sels, ARRAY_SIZE(clk_out_4_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[PERI_I2S_SRC_CLK] = xuantie_th1520_clk_mux_flags("peri_i2s_src_clk", ap_base + 0x1f0, 0, 1, peri_i2s_src_clk_sels, ARRAY_SIZE(peri_i2s_src_clk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[NPU_CCLK] = xuantie_th1520_clk_mux_flags("npu_cclk", ap_base + 0x1c8, 6, 1, npu_cclk_sels, ARRAY_SIZE(npu_cclk_sels), CLK_SET_RATE_PARENT); ++ clks[CFG_APB_PCLK] = xuantie_th1520_clk_mux_flags("cfg_apb_pclk", ap_base + 0x1c4, 7, 1, cfg_apb_pclk_sels, ARRAY_SIZE(cfg_apb_pclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ clks[UART_SCLK] = xuantie_th1520_clk_mux_flags("uart_sclk", ap_base + 0x210, 0, 1, uart_sclk_sels, ARRAY_SIZE(uart_sclk_sels), CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT); ++ ++ /* Th1520 Fullmask AP Divider */ ++ clks[AHB2_CPUSYS_HCLK_OUT_DIV] = xuantie_clk_th1520_divider("ahb2_cpusys_hclk_out_div", "gmac_pll_fout1ph0", ap_base + 0x120, 0, 3, 4, MUX_TYPE_DIV, 2, 7); ++ clks[APB3_CPUSYS_PCLK] = xuantie_clk_th1520_divider("apb3_cpusys_pclk", "ahb2_cpusys_hclk", ap_base + 0x130, 0, 3, 3, MUX_TYPE_CDE, 1, 7); ++ clks[AXI4_CPUSYS2_ACLK] = xuantie_clk_th1520_divider("axi4_cpusys2_aclk", "gmac_pll_foutpostdiv", ap_base + 0x134, 0, 3, 4, MUX_TYPE_DIV, 2, 7); ++ clks[CFG_AXI_ACLK_OUT_DIV] = xuantie_clk_th1520_divider("cfg_axi_aclk_out_div", "video_pll_foutpostdiv", ap_base + 0x138, 0, 4, 4, MUX_TYPE_DIV, 2, 15); ++ ++ if (teesys) { ++ clks[TEESYS_I0_HCLK] = xuantie_clk_th1520_divider("teesys_i0_hclk", "tee_pll_foutpostdiv", ap_base + 0x1cc, 0, 4, 4, MUX_TYPE_DIV, 2, 15); //just for teesys!!! ++ clks[TEESYS_I1_HCLK] = xuantie_clk_th1520_divider("teesys_i1_hclk", "video_pll_foutpostdiv", ap_base + 0x1cc, 8, 4, 12, MUX_TYPE_DIV, 2, 15); //just for teesys!!! ++ } ++ ++ clks[PERISYS_AHB_HCLK_OUT_DIV] = xuantie_clk_th1520_divider("perisys_ahb_hclk_out_div", "gmac_pll_fout1ph0", ap_base + 0x140, 0, 4, 4, MUX_TYPE_DIV, 2, 7); ++ clks[PERISYS_APB_PCLK] = xuantie_clk_th1520_divider("perisys_apb_pclk", "perisys_ahb_hclk", ap_base + 0x150, 0, 3, 3, MUX_TYPE_CDE, 3, 7); ++ clks[PERI2SYS_APB_PCLK] = xuantie_clk_th1520_divider("peri2sys_apb_pclk", "gmac_pll_fout4", ap_base + 0x150, 4, 3, 8, MUX_TYPE_DIV, 2, 7); ++ clks[CLK_OUT_1_OUT_DIV] = xuantie_clk_th1520_divider("clk_out_1_out_div", "osc_24m", ap_base + 0x1b4, 0, 3, 3, MUX_TYPE_DIV, 2, 4); ++ clks[CLK_OUT_2_OUT_DIV] = xuantie_clk_th1520_divider("clk_out_2_out_div", "osc_24m", ap_base + 0x1b8, 0, 3, 3, MUX_TYPE_DIV, 2, 4); ++ clks[CLK_OUT_3_OUT_DIV] = xuantie_clk_th1520_divider("clk_out_3_out_div", "osc_24m", ap_base + 0x1bc, 0, 3, 3, MUX_TYPE_DIV, 2, 4); ++ clks[CLK_OUT_4_OUT_DIV] = xuantie_clk_th1520_divider("clk_out_4_out_div", "osc_24m", ap_base + 0x1c0, 0, 3, 3, MUX_TYPE_DIV, 2, 4); ++ clks[VOSYS_ACLK_M] = xuantie_clk_th1520_divider("vosys_aclk_m", "video_pll_foutvco", ap_base + 0x1dc, 0, 4, 4, MUX_TYPE_DIV, 3, 15); ++ clks[NPU_CCLK_OUT_DIV] = xuantie_clk_th1520_divider("npu_cclk_out_div", "video_pll_foutvco", ap_base + 0x1c8, 0, 3, 3, MUX_TYPE_DIV, 3, 7); ++ clks[CFG_APB_PCLK_OUT_DIV] = xuantie_clk_th1520_divider("cfg_apb_pclk_out_div", "gmac_pll_foutpostdiv", ap_base + 0x1c4, 0, 4, 4, MUX_TYPE_DIV, 4, 15); ++ clks[VISYS_ACLK_M] = xuantie_clk_th1520_divider("visys_aclk_m", "video_pll_foutvco", ap_base + 0x1d0, 16, 4, 20, MUX_TYPE_DIV, 3, 15); ++ clks[VISYS_AHB_HCLK] = xuantie_clk_th1520_divider("visys_ahb_hclk", "video_pll_foutvco", ap_base + 0x1d0, 0, 4, 4, MUX_TYPE_DIV, 6, 15); ++ clks[VPSYS_APB_PCLK] = xuantie_clk_th1520_divider("vpsys_apb_pclk", "gmac_pll_fout1ph0", ap_base + 0x1e0, 0, 3, 4, MUX_TYPE_DIV, 2, 7); ++ clks[VPSYS_AXI_ACLK] = xuantie_clk_th1520_divider("vpsys_axi_aclk", "video_pll_foutvco", ap_base + 0x1e0, 8, 4, 12, MUX_TYPE_DIV, 3, 15); ++ clks[VENC_CCLK] = xuantie_clk_th1520_divider_closest("venc_cclk", "gmac_pll_foutpostdiv", ap_base + 0x1e4, 0, 3, 4, MUX_TYPE_DIV, 2, 7); ++ clks[DPU0_PLL_DIV_CLK] = xuantie_clk_th1520_divider("dpu0_pll_div_clk", "dpu0_pll_foutpostdiv", ap_base + 0x1e8, 0, 8, 8, MUX_TYPE_DIV, 2, 214); ++ clks[DPU1_PLL_DIV_CLK] = xuantie_clk_th1520_divider("dpu1_pll_div_clk", "dpu1_pll_foutpostdiv", ap_base + 0x1ec, 0, 8, 8, MUX_TYPE_DIV, 2, 214); ++ ++ /* Th1520 Fullmask PLL FOUT */ ++ clks[GMAC_PLL_FOUT1PH0] = xuantie_th1520_clk_fixed_factor("gmac_pll_fout1ph0", "gmac_pll_bypass", 1, 2); ++ clks[GMAC_PLL_FOUT4] = xuantie_th1520_clk_fixed_factor("gmac_pll_fout4", "gmac_pll_bypass", 1, 8); ++ clks[VIDEO_PLL_FOUT1PH0] = xuantie_th1520_clk_fixed_factor("video_pll_fout1ph0", "video_pll_bypass", 1, 2); ++ clks[VIDEO_PLL_FOUT4] = xuantie_th1520_clk_fixed_factor("video_pll_fout4", "video_pll_bypass", 1, 8); ++ clks[TEE_PLL_FOUT4] = xuantie_th1520_clk_fixed_factor("tee_pll_fout4", "tee_pll_bypass", 1, 8); ++ clks[CPU_PLL0_FOUT4] = xuantie_th1520_clk_fixed_factor("cpu_pll0_fout4", "cpu_pll0_bypass", 1, 8); ++ clks[CPU_PLL1_FOUT4] = xuantie_th1520_clk_fixed_factor("cpu_pll1_fout4", "cpu_pll1_bypass", 1, 8); ++ clks[DPU0_PLL_FOUT4] = xuantie_th1520_clk_fixed_factor("dpu0_pll_fout4", "dpu0_pll_bypass", 1, 8); ++ clks[DPU1_PLL_FOUT4] = xuantie_th1520_clk_fixed_factor("dpu1_pll_fout4", "dpu1_pll_bypass", 1, 8); ++ ++ /* Th1520 Fullmask Fixed Factor */ ++ clks[C910_OSC_CLK] = xuantie_th1520_clk_fixed_factor("c910_osc_clk", "osc_24m", 1, 1); ++ clks[QSPI_SSI_CLK] = xuantie_th1520_clk_fixed_factor("qspi_ssi_clk", "video_pll_foutpostdiv", 1, 1); /* Note: no mux to select, use default value */ ++ clks[QSPI0_SSI_CLK] = xuantie_th1520_clk_fixed_factor("qspi0_ssi_clk", "qspi_ssi_clk", 1, 1); ++ clks[QSPI1_SSI_CLK] = xuantie_th1520_clk_fixed_factor("qspi1_ssi_clk", "video_pll_fout1ph0", 1, 1); ++ clks[SPI_SSI_CLK] = xuantie_th1520_clk_fixed_factor("spi_ssi_clk", "video_pll_fout1ph0", 1, 1); ++ clks[EMMC_SDIO_REF_CLK] = xuantie_th1520_clk_fixed_factor("emmc_sdio_ref_clk", "video_pll_foutpostdiv", 1, 4); /* Note: base clk is div 4 to 198M*/ ++ clks[PWM_CCLK] = xuantie_th1520_clk_fixed_factor("pwm_cclk", "osc_24m", 1, 1); ++ clks[CHIP_DBG_CCLK] = xuantie_th1520_clk_fixed_factor("chip_dbg_cclk", "osc_24m", 1, 1); ++ clks[GMAC_CCLK] = xuantie_th1520_clk_fixed_factor("gmac_cclk", "gmac_pll_fout1ph0", 1, 1); ++ clks[GPIO0_DBCLK] = xuantie_th1520_clk_fixed_factor("gpio0_dbclk", "pad_rtc_clk", 1, 1); ++ clks[GPIO1_DBCLK] = xuantie_th1520_clk_fixed_factor("gpio1_dbclk", "pad_rtc_clk", 1, 1); ++ clks[GPIO2_DBCLK] = xuantie_th1520_clk_fixed_factor("gpio2_dbclk", "pad_rtc_clk", 1, 1); ++ clks[GPIO3_DBCLK] = xuantie_th1520_clk_fixed_factor("gpio3_dbclk", "pad_rtc_clk", 1, 1); ++ clks[CLK_100M] = xuantie_th1520_clk_fixed_factor("clk_100m", "gmac_pll_foutpostdiv", 1, 10); ++ clks[I2C_IC_CLK] = xuantie_th1520_clk_fixed_factor("i2c_ic_clk", "clk_100m", 1, 2); ++ clks[TIMER_CCLK] = xuantie_th1520_clk_fixed_factor("timer_cclk", "osc_24m", 1, 1); ++ clks[AXI4_CPUSYS1_ACLK] = xuantie_th1520_clk_fixed_factor("axi4_cpusys1_aclk", "clkgen_c910_bus_clk_no_icg", 1, 1); ++ clks[CPU_BUS_DFTCLK] = xuantie_th1520_clk_fixed_factor("cpu_bus_dftclk", "cpu_pll0_foutpostdiv", 1, 2); ++ clks[CPU_PLL0_TEST_CLK] = xuantie_th1520_clk_fixed_factor("cpu_pll0_test_clk", "cpu_pll0_fout4", 1, 8); ++ clks[CPU_PLL1_TEST_CLK] = xuantie_th1520_clk_fixed_factor("cpu_pll1_test_clk", "cpu_pll1_fout4", 1, 8); ++ clks[DPU0_PLL_TEST_CLK] = xuantie_th1520_clk_fixed_factor("dpu0_pll_test_clk", "dpu0_pll_fout4", 1, 8); ++ clks[DPU1_PLL_TEST_CLK] = xuantie_th1520_clk_fixed_factor("dpu1_pll_test_clk", "dpu1_pll_fout4", 1, 8); ++ clks[GMAC_PLL_TEST_CLK] = xuantie_th1520_clk_fixed_factor("gmac_pll_test_clk", "gmac_pll_fout4", 1, 8); ++ clks[VIDEO_PLL_TEST_CLK] = xuantie_th1520_clk_fixed_factor("video_pll_test_clk", "video_pll_fout4", 1, 8); ++ clks[TEE_PLL_TEST_CLK] = xuantie_th1520_clk_fixed_factor("tee_pll_test_clk", "tee_pll_fout4", 1, 8); ++ clks[AONSYS_BUS_CLK] = xuantie_th1520_clk_fixed_factor("aonsys_bus_clk", "aonsys_hclk", 1, 1); ++ ++ /* Th1520 Fullmask Clock Gate */ ++ clks[CLKGEN_AHB2_CPUSYS_HCLK] = xuantie_clk_th1520_gate("clkgen_ahb2_cpusys_hclk", "ahb2_cpusys_hclk", ap_base + 0x120, 6); ++ clks[CLKGEN_APB3_CPUSYS_HCLK] = xuantie_clk_th1520_gate("clkgen_apb3_cpusys_hclk", "ahb2_cpusys_hclk", ap_base + 0x130, 4); ++ clks[CLKGEN_C910_BROM_HCLK] = xuantie_clk_th1520_gate("clkgen_c910_brom_hclk", "ahb2_cpusys_hclk", ap_base + 0x100, 4); ++ clks[CLKGEN_SPINLOCK_HCLK] = xuantie_clk_th1520_gate("clkgen_spinlock_hclk", "ahb2_cpusys_hclk", ap_base + 0x208, 10); ++ clks[CLKGEN_MBOX0_PCLK] = xuantie_clk_th1520_gate("clkgen_mbox0_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 7); ++ clks[CLKGEN_MBOX1_PCLK] = xuantie_clk_th1520_gate("clkgen_mbox1_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 6); ++ clks[CLKGEN_MBOX2_PCLK] = xuantie_clk_th1520_gate("clkgen_mbox2_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 5); ++ clks[CLKGEN_MBOX3_PCLK] = xuantie_clk_th1520_gate("clkgen_mbox3_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 4); ++ clks[CLKGEN_WDT0_PCLK] = xuantie_clk_th1520_gate("clkgen_wdt0_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 3); ++ clks[CLKGEN_WDT1_PCLK] = xuantie_clk_th1520_gate("clkgen_wdt1_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 2); ++ ++ if (teesys) ++ clks[CLKGEN_MISCSYS_TEE_CCLK] = xuantie_clk_th1520_gate("clkgen_miscsys_tee_cclk", "teesys_hclk", ap_base + 0x1cc, 25); //just for teesys!!! ++ ++ clks[CLKGEN_SRAM_AXI_ACLK_2] = xuantie_clk_th1520_gate("clkgen_sram_axi_aclk_2", "axi4_cpusys1_aclk", ap_base + 0x20c, 2); ++ clks[CLKGEN_PERISYS_AHB_HCLK] = xuantie_clk_th1520_gate("clkgen_perisys_ahb_hclk", "perisys_ahb_hclk", ap_base + 0x140, 6); ++ clks[CLKGEN_PERISYS_APB1_HCLK] = xuantie_clk_th1520_gate("clkgen_perisys_apb1_hclk", "perisys_ahb_hclk", ap_base + 0x150, 9); ++ clks[CLKGEN_PERISYS_APB2_HCLK] = xuantie_clk_th1520_gate("clkgen_perisys_apb2_hclk", "perisys_ahb_hclk", ap_base + 0x150, 10); ++ clks[CLKGEN_PERISYS_APB4_HCLK] = xuantie_clk_th1520_gate("clkgen_perisys_apb4_hclk", "perisys_ahb_hclk", ap_base + 0x150, 12); ++ clks[CLKGEN_PADCTRL0_APSYS_PCLK] = xuantie_clk_th1520_gate("clkgen_padctrl0_apsys_pclk", "perisys_ahb_hclk", ap_base + 0x204, 22); ++ clks[CLKGEN_DSMART_PCLK] = xuantie_clk_th1520_gate("clkgen_dsmart_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 23); ++ clks[CLKGEN_PADCTRL1_APSYS_PCLK] = xuantie_clk_th1520_gate("clkgen_padctrl1_apsys_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 24); ++ clks[CLKGEN_CLK_OUT_1_CLK] = xuantie_clk_th1520_gate("clkgen_clk_out_1_clk", "clk_out_1", ap_base + 0x1b4, 5); ++ clks[CLKGEN_CLK_OUT_2_CLK] = xuantie_clk_th1520_gate("clkgen_clk_out_2_clk", "clk_out_2", ap_base + 0x1b8, 5); ++ clks[CLKGEN_CLK_OUT_3_CLK] = xuantie_clk_th1520_gate("clkgen_clk_out_3_clk", "clk_out_3", ap_base + 0x1bc, 5); ++ clks[CLKGEN_CLK_OUT_4_CLK] = xuantie_clk_th1520_gate("clkgen_clk_out_4_clk", "clk_out_4", ap_base + 0x1c0, 5); ++ clks[CLKGEN_NPUSYS_AXI_ACLK] = xuantie_clk_th1520_gate("clkgen_npusys_axi_aclk", "npu_cclk", ap_base + 0x1c8, 5); ++ clks[CLKGEN_SRAM_AXI_ACLK_0] = xuantie_clk_th1520_gate("clkgen_sram_axi_aclk_0", "npu_cclk", ap_base + 0x20c, 4); ++ clks[CLKGEN_APB_CPU2CFG_HCLK] = xuantie_clk_th1520_gate("clkgen_apb_cpu2cfg_hclk", "cfg_apb_pclk", ap_base + 0x1c4, 5); ++ clks[CLKGEN_SRAM_AXI_ACLK_1] = xuantie_clk_th1520_gate("clkgen_sram_axi_aclk_1", "visys_aclk_m", ap_base + 0x20c, 3); ++ clks[CLKGEN_SRAM_AXI_ACLK_3] = xuantie_clk_th1520_gate("clkgen_sram_axi_aclk_3", "vpsys_axi_aclk", ap_base + 0x20c, 1); ++ clks[CLKGEN_VPSYS_VENC_CCLK] = xuantie_clk_th1520_gate("clkgen_vpsys_venc_cclk", "venc_cclk", ap_base + 0x1e4, 5); ++ clks[CLKGEN_EMMC_SDIO_REF_CLK] = xuantie_clk_th1520_gate("clkgen_emmc_sdio_ref_clk", "emmc_sdio_ref_clk", ap_base + 0x204, 30); ++ ++ clks[CLKGEN_X2H_CPUSYS_MHCLK] = xuantie_clk_th1520_gate_shared("clkgen_x2h_cpusys_mhclk", "ahb2_cpusys_hclk", ap_base + 0x120, 7, &share_cnt_x2h_cpusys_clk_en); ++ clks[CLKGEN_X2H_CPUSYS_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_x2h_cpusys_aclk", "cfg_axi_aclk", ap_base + 0x120, 7, &share_cnt_x2h_cpusys_clk_en); ++ clks[CLKGEN_DMAC_CPUSYS_HCLK] = xuantie_clk_th1520_gate_shared("clkgen_dmac_cpusys_hclk", "ahb2_cpusys_hclk", ap_base + 0x208, 8, &share_cnt_dmac_cpusys_clk_en); ++ clks[CLKGEN_IOPMP_DMAC_CPUSYS_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_iopmp_dmac_cpusys_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 8, &share_cnt_dmac_cpusys_clk_en); ++ clks[CLKGEN_DMAC_CPUSYS_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_dmac_cpusys_aclk", "axi4_cpusys2_aclk", ap_base + 0x208, 8, &share_cnt_dmac_cpusys_clk_en); ++ clks[CLKGEN_TIMER0_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_timer0_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 1, &share_cnt_timer0_clk_en); ++ clks[CLKGEN_TIMER0_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_timer0_cclk", "timer_cclk", ap_base + 0x208, 1, &share_cnt_timer0_clk_en); ++ clks[CLKGEN_TIMER1_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_timer1_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 0, &share_cnt_timer1_clk_en); ++ clks[CLKGEN_TIMER1_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_timer1_cclk", "timer_cclk", ap_base + 0x208, 0, &share_cnt_timer1_clk_en); ++ clks[CLKGEN_AXI4_CPUSYS2_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_axi4_cpusys2_pclk", "apb3_cpusys_pclk", ap_base + 0x134, 5, &share_cnt_axi4_cpusys2_clk_en); ++ clks[CLKGEN_AXI4_CPUSYS2_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_axi4_cpusys2_aclk", "axi4_cpusys2_aclk", ap_base + 0x134, 5, &share_cnt_axi4_cpusys2_clk_en); ++ clks[CLKGEN_BMU_C910_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_bmu_c910_pclk", "apb3_cpusys_pclk", ap_base + 0x100, 5, &share_cnt_bmu_c910_clk_en); ++ clks[CLKGEN_BMU_C910_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_bmu_c910_aclk", "axi4_cpusys1_aclk", ap_base + 0x100, 5, &share_cnt_bmu_c910_clk_en); ++ clks[CLKGEN_IOPMP_AON_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_iopmp_aon_pclk", "apb3_cpusys_pclk", ap_base + 0x134, 8, &share_cnt_aon2cpu_a2x_clk_en); ++ clks[CLKGEN_AON2CPU_A2X_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_aon2cpu_a2x_aclk", "axi4_cpusys2_aclk", ap_base + 0x134, 8, &share_cnt_aon2cpu_a2x_clk_en); ++ clks[CLKGEN_AON2CPU_A2X_HCLK] = xuantie_clk_th1520_gate_shared("clkgen_aon2cpu_a2x_hclk", "aonsys_bus_clk", ap_base + 0x134, 8, &share_cnt_aon2cpu_a2x_clk_en); ++ clks[CLKGEN_IOPMP_CHIP_DBG_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_iopmp_chip_dbg_pclk", "apb3_cpusys_pclk", ap_base + 0x208, 9, &share_cnt_chip_dbg_clk_en); ++ clks[CLKGEN_CHIP_DBG_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_chip_dbg_aclk", "axi4_cpusys2_aclk", ap_base + 0x208, 9, &share_cnt_chip_dbg_clk_en); ++ clks[CLKGEN_CHIP_DBG_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_chip_dbg_cclk", "chip_dbg_cclk", ap_base + 0x208, 9, &share_cnt_chip_dbg_clk_en); ++ clks[CLKGEN_X2X_CPUSYS_ACLK_M] = xuantie_clk_th1520_gate_shared("clkgen_x2x_cpusys_aclk_m", "axi4_cpusys2_aclk", ap_base + 0x134, 7, &share_cnt_x2x_cpusys_clk_en); ++ clks[CLKGEN_X2X_CPUSYS_ACLK_S] = xuantie_clk_th1520_gate_shared("clkgen_x2x_cpusys_aclk_s", "axi4_cpusys1_aclk", ap_base + 0x134, 7, &share_cnt_x2x_cpusys_clk_en); ++ clks[CLKGEN_CPU2PERI_X2H_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2peri_x2h_aclk", "axi4_cpusys1_aclk", ap_base + 0x140, 9, &share_cnt_cpu2peri_x2h_clk_en); ++ clks[CLKGEN_CPU2PERI_X2H_MHCLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2peri_x2h_mhclk", "perisys_ahb_hclk", ap_base + 0x140, 9, &share_cnt_cpu2peri_x2h_clk_en); ++ clks[CLKGEN_CPU2VI_X2H_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2vi_x2h_aclk", "axi4_cpusys1_aclk", ap_base + 0x1d0, 21, &share_cnt_cpu2vi_x2h_clk_en); ++ clks[CLKGEN_CPU2VI_X2H_MHCLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2vi_x2h_mhclk", "visys_ahb_hclk", ap_base + 0x1d0, 21, &share_cnt_cpu2vi_x2h_clk_en); ++ clks[CLKGEN_CFG2TEE_X2H_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_cfg2tee_x2h_aclk", "cfg_axi_aclk", ap_base + 0x1cc, 24, &share_cnt_cfg2tee_x2h_clk_en); // just for teesys!!! ++ clks[CLKGEN_CFG2TEE_X2H_MHCLK] = xuantie_clk_th1520_gate_shared("clkgen_cfg2tee_x2h_mhclk", "teesys_hclk", ap_base + 0x1cc, 24, &share_cnt_cfg2tee_x2h_clk_en); // just for teesys!!! ++ clks[CLKGEN_CPU2AON_X2H_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2aon_x2h_aclk", "cfg_axi_aclk", ap_base + 0x138, 8, &share_cnt_cpu2aon_x2h_clk_en); ++ clks[CLKGEN_CPU2AON_X2H_MHCLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2aon_x2h_mhclk", "aonsys_bus_clk", ap_base + 0x138, 8, &share_cnt_cpu2aon_x2h_clk_en); ++ clks[CLKGEN_CPU2VP_X2P_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2vp_x2p_aclk", "cfg_axi_aclk", ap_base + 0x1e0, 13, &share_cnt_cpu2vp_x2p_clk_en); ++ clks[CLKGEN_CPU2VP_X2P_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_cpu2vp_x2p_pclk", "vpsys_apb_pclk", ap_base + 0x1e0, 13, &share_cnt_cpu2vp_x2p_clk_en); ++ clks[CLKGEN_TOP_AXI4S_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_top_axi4s_aclk", "cfg_axi_aclk", ap_base + 0x1c8, 4, &share_cnt_npu_core_clk_en); ++ clks[CLKGEN_TOP_APB_SX_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_top_apb_sx_pclk", "cfg_apb_pclk", ap_base + 0x1c8, 4, &share_cnt_npu_core_clk_en); ++ clks[CLKGEN_MISC2VP_X2X_ACLK_M] = xuantie_clk_th1520_gate_shared("clkgen_misc2vp_x2x_aclk_m", "perisys_ahb_hclk", ap_base + 0x1e0, 15, &share_cnt_vpsys_axi_aclk_en); ++ clks[CLKGEN_VPSYS_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_vpsys_aclk", "vpsys_axi_aclk", ap_base + 0x1e0, 15, &share_cnt_vpsys_axi_aclk_en); ++ clks[CLKGEN_GMAC1_HCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac1_hclk", "perisys_ahb_hclk", ap_base + 0x204, 26, &share_cnt_gmac1_clk_en); ++ clks[CLKGEN_GMAC1_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac1_pclk", "perisys_ahb_hclk", ap_base + 0x204, 26, &share_cnt_gmac1_clk_en); ++ clks[CLKGEN_GMAC1_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac1_cclk", "gmac_cclk", ap_base + 0x204, 26, &share_cnt_gmac1_clk_en); ++ clks[CLKGEN_GMAC0_HCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac0_hclk", "perisys_ahb_hclk", ap_base + 0x204, 19, &share_cnt_gmac0_clk_en); ++ clks[CLKGEN_GMAC0_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac0_pclk", "perisys_ahb_hclk", ap_base + 0x204, 19, &share_cnt_gmac0_clk_en); ++ clks[CLKGEN_GMAC0_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac0_cclk", "gmac_cclk", ap_base + 0x204, 19, &share_cnt_gmac0_clk_en); ++ clks[CLKGEN_PERI2PERI1_APB_HCLK] = xuantie_clk_th1520_gate_shared("clkgen_peri2peri1_apb_hclk", "perisys_ahb_hclk", ap_base + 0x150, 11, &share_cnt_perisys_apb3_hclk_en); ++ clks[CLKGEN_PERI2PERI1_APB_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_peri2peri1_apb_pclk", "peri2sys_apb_pclk", ap_base + 0x150, 11, &share_cnt_perisys_apb3_hclk_en); ++ clks[CLKGEN_QSPI0_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_qspi0_pclk", "perisys_ahb_hclk", ap_base + 0x204, 17, &share_cnt_qspi0_clk_en); ++ clks[CLKGEN_QSPI0_SSI_CLK] = xuantie_clk_th1520_gate_shared("clkgen_qspi0_ssi_clk", "qspi0_ssi_clk", ap_base + 0x204, 17, &share_cnt_qspi0_clk_en); ++ clks[CLKGEN_GMAC_AXI_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac_axi_aclk", "perisys_ahb_hclk", ap_base + 0x204, 21, &share_cnt_gmac_axi_clk_en); ++ clks[CLKGEN_GMAC_AXI_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gmac_axi_pclk", "perisys_ahb_hclk", ap_base + 0x204, 21, &share_cnt_gmac_axi_clk_en); ++ clks[CLKGEN_GPIO0_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio0_pclk", "perisys_ahb_hclk", ap_base + 0x204, 8, &share_cnt_gpio0_clk_en); ++ clks[CLKGEN_GPIO0_DBCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio0_dbclk", "gpio0_dbclk", ap_base + 0x204, 8, &share_cnt_gpio0_clk_en); ++ clks[CLKGEN_GPIO1_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio1_pclk", "perisys_ahb_hclk", ap_base + 0x204, 7, &share_cnt_gpio0_clk_en); ++ clks[CLKGEN_GPIO1_DBCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio1_dbclk", "gpio1_dbclk", ap_base + 0x204, 7, &share_cnt_gpio1_clk_en); ++ clks[CLKGEN_PWM_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_pwm_pclk", "perisys_apb_pclk", ap_base + 0x204, 18, &share_cnt_pwm_clk_en); ++ clks[CLKGEN_PWM_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_pwm_cclk", "pwm_cclk", ap_base + 0x204, 18, &share_cnt_pwm_clk_en); ++ clks[CLKGEN_SPI_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_spi_pclk", "perisys_apb_pclk", ap_base + 0x204, 15, &share_cnt_spi_clk_en); ++ clks[CLKGEN_SPI_SSI_CLK] = xuantie_clk_th1520_gate_shared("clkgen_spi_ssi_clk", "spi_ssi_clk", ap_base + 0x204, 15, &share_cnt_spi_clk_en); ++ clks[CLKGEN_UART0_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart0_pclk", "perisys_apb_pclk", ap_base + 0x204, 14, &share_cnt_uart0_clk_en); ++ clks[CLKGEN_UART0_SCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart0_sclk", "uart_sclk", ap_base + 0x204, 14, &share_cnt_uart0_clk_en); ++ clks[CLKGEN_UART2_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart2_pclk", "perisys_apb_pclk", ap_base + 0x204, 12, &share_cnt_uart2_clk_en); ++ clks[CLKGEN_UART2_SCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart2_sclk", "uart_sclk", ap_base + 0x204, 12, &share_cnt_uart2_clk_en); ++ clks[CLKGEN_I2C2_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c2_pclk", "perisys_apb_pclk", ap_base + 0x204, 3, &share_cnt_i2c2_clk_en); ++ clks[CLKGEN_I2C2_IC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c2_ic_clk", "i2c_ic_clk", ap_base + 0x204, 3, &share_cnt_i2c2_clk_en); ++ clks[CLKGEN_I2C3_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c3_pclk", "perisys_apb_pclk", ap_base + 0x204, 2, &share_cnt_i2c3_clk_en); ++ clks[CLKGEN_I2C3_IC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c3_ic_clk", "i2c_ic_clk", ap_base + 0x204, 2, &share_cnt_i2c3_clk_en); ++ clks[CLKGEN_I2S_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2s_pclk", "perisys_apb_pclk", ap_base + 0x1f0, 1, &share_cnt_peri_i2s_clk_en); ++ clks[CLKGEN_I2S_SRC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2s_src_clk", "peri_i2s_src_clk", ap_base + 0x1f0, 1, &share_cnt_peri_i2s_clk_en); ++ clks[CLKGEN_QSPI1_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_qspi1_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 16, &share_cnt_qspi1_clk_en); ++ clks[CLKGEN_QSPI1_SSI_CLK] = xuantie_clk_th1520_gate_shared("clkgen_qspi1_ssi_clk", "qspi1_ssi_clk", ap_base + 0x204, 16, &share_cnt_qspi1_clk_en); ++ clks[CLKGEN_UART1_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart1_pclk", "per2sys_apb_pclk", ap_base + 0x204, 13, &share_cnt_uart1_clk_en); ++ clks[CLKGEN_UART1_SCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart1_sclk", "uart_sclk", ap_base + 0x204, 13, &share_cnt_uart1_clk_en); ++ clks[CLKGEN_UART3_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart3_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 11, &share_cnt_uart3_clk_en); ++ clks[CLKGEN_UART3_SCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart3_sclk", "uart_sclk", ap_base + 0x204, 11, &share_cnt_uart3_clk_en); ++ clks[CLKGEN_UART4_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart4_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 10, &share_cnt_uart4_clk_en); ++ clks[CLKGEN_UART4_SCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart4_sclk", "uart_sclk", ap_base + 0x204, 10, &share_cnt_uart4_clk_en); ++ clks[CLKGEN_UART5_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart5_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 9, &share_cnt_uart5_clk_en); ++ clks[CLKGEN_UART5_SCLK] = xuantie_clk_th1520_gate_shared("clkgen_uart5_sclk", "uart_sclk", ap_base + 0x204, 9, &share_cnt_uart5_clk_en); ++ clks[CLKGEN_I2C0_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c0_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 5, &share_cnt_i2c0_clk_en); ++ clks[CLKGEN_I2C0_IC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c0_ic_clk", "i2c_ic_clk", ap_base + 0x204, 5, &share_cnt_i2c0_clk_en); ++ clks[CLKGEN_I2C1_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c1_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 4, &share_cnt_i2c1_clk_en); ++ clks[CLKGEN_I2C1_IC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c1_ic_clk", "i2c_ic_clk", ap_base + 0x204, 4, &share_cnt_i2c1_clk_en); ++ clks[CLKGEN_I2C4_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c4_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 1, &share_cnt_i2c4_clk_en); ++ clks[CLKGEN_I2C4_IC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c4_ic_clk", "i2c_ic_clk", ap_base + 0x204, 1, &share_cnt_i2c4_clk_en); ++ clks[CLKGEN_I2C5_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c5_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 0, &share_cnt_i2c5_clk_en); ++ clks[CLKGEN_I2C5_IC_CLK] = xuantie_clk_th1520_gate_shared("clkgen_i2c5_ic_clk", "i2c_ic_clk", ap_base + 0x204, 0, &share_cnt_i2c5_clk_en); ++ clks[CLKGEN_GPIO2_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio2_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 6, &share_cnt_gpio2_clk_en); ++ clks[CLKGEN_GPIO2_DBCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio2_dbclk", "gpio2_dbclk", ap_base + 0x204, 6, &share_cnt_gpio2_clk_en); ++ clks[CLKGEN_GPIO3_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio3_pclk", "peri2sys_apb_pclk", ap_base + 0x204, 6, &share_cnt_gpio2_clk_en); //!!! gpio3 pclk is controlled by gpio2_clk_en ++ clks[CLKGEN_GPIO3_DBCLK] = xuantie_clk_th1520_gate_shared("clkgen_gpio3_dbclk", "gpio3_dbclk", ap_base + 0x204, 20, &share_cnt_gpio3_clk_en); ++ clks[CLKGEN_VOSYS_AXI_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_vosys_axi_aclk", "vosys_aclk_m", ap_base + 0x1dc, 5, &share_cnt_vosys_axi_aclk_en); ++ clks[CLKGEN_VOSYS_X2X_ACLK_S] = xuantie_clk_th1520_gate_shared("clkgen_vosys_x2x_aclk_s", "npu_cclk", ap_base + 0x1dc, 5, &share_cnt_vosys_axi_aclk_en); ++ ++ clk_data.clks = clks; ++ clk_data.clk_num = ARRAY_SIZE(clks); ++ ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register clks for th1520\n"); ++ goto unregister_clks; ++ } ++ ++#ifndef FPGA_EMU ++ /* HW defalut */ ++ clk_set_parent(clks[C910_CCLK], clks[CPU_PLL1_FOUTPOSTDIV]); ++#else ++ clk_set_parent(clks[C910_CCLK_I0], clks[OSC_24M]); ++ clk_set_parent(clks[C910_CCLK], clks[C910_CCLK_I0]); ++#endif ++ dev_info(dev, "succeed to register th1520 fullmask clock driver\n"); ++ ++ return 0; ++ ++unregister_clks: ++ xuantie_unregister_clocks(clks, ARRAY_SIZE(clks)); ++ return ret; ++} ++ ++const bool tee_sys_flag; ++ ++static const struct of_device_id th1520_clk_of_match[] = { ++ { .compatible = "xuantie,th1520-fm-ree-clk" }, ++ { .compatible = "xuantie,th1520-fm-tee-clk", .data = &tee_sys_flag,}, ++ { /* Sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, th1520_clk_of_match); ++ ++static struct platform_driver th1520_clk_driver = { ++ .probe = th1520_clocks_probe, ++ .driver = { ++ .name = "th1520-fm-clk", ++ .of_match_table = of_match_ptr(th1520_clk_of_match), ++ }, ++}; ++ ++module_platform_driver(th1520_clk_driver); ++MODULE_AUTHOR("wei.liu "); ++MODULE_DESCRIPTION("XuanTie Th1520 Fullmask clock driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/clk/xuantie/clk.c b/drivers/clk/xuantie/clk.c +new file mode 100644 +index 000000000000..c53814078684 +--- /dev/null ++++ b/drivers/clk/xuantie/clk.c +@@ -0,0 +1,766 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk.h" ++ ++#define TH1520_PLL_CFG0 0x0 ++#define TH1520_PLL_CFG1 0x04 ++#define TH1520_PLL_CFG2 0x8 ++#define TH1520_POSTDIV2_SHIFT 24 ++#define TH1520_POSTDIV2_MASK GENMASK(26, 24) ++#define TH1520_POSTDIV1_SHIFT 20 ++#define TH1520_POSTDIV1_MASK GENMASK(22, 20) ++#define TH1520_FBDIV_SHIFT 8 ++#define TH1520_FBDIV_MASK GENMASK(19, 8) ++#define TH1520_REFDIV_SHIFT 0 ++#define TH1520_REFDIV_MASK GENMASK(5, 0) ++#define TH1520_BYPASS_MASK BIT(30) ++#define TH1520_RST_MASK BIT(29) ++#define TH1520_DSMPD_MASK BIT(24) ++#define TH1520_DACPD_MASK BIT(25) ++#define TH1520_FRAC_MASK GENMASK(23, 0) ++#define TH1520_FRAC_SHIFT 0 ++#define TH1520_FRAC_DIV BIT(24) ++ ++#define LOCK_TIMEOUT_US 10000 ++ ++#define div_mask(d) ((1 << (d->width)) - 1) ++ ++DEFINE_SPINLOCK(xuantie_th1520_clk_lock); ++ ++enum th1520_pll_mode { ++ PLL_MODE_FRAC, ++ PLL_MODE_INT, ++}; ++ ++struct clk_th1520pll { ++ struct clk_hw hw; ++ void __iomem *base; ++ enum th1520_pll_clktype clk_type; ++ enum th1520_pll_outtype out_type; ++ enum th1520_pll_mode pll_mode; ++ const struct th1520_pll_rate_table *rate_table; ++ int rate_count; ++ ++ u32 cfg0_reg_off; ++ u32 pll_sts_off; ++ int pll_lock_bit; ++ ++ /* Th1520 MPW Aon/ddr pll define bypass:rst bits as: 31:30 ++ * but AP pll define bypass:rst bits as: 30:29 ++ * ++ * Th1520 Fullmask align these register field define, all pll ++ * define bypss:rst bits as: 30:29 ++ */ ++ int pll_rst_bit; ++ int pll_bypass_bit; ++}; ++ ++struct clk_th1520div { ++ struct clk_divider divider; ++ enum th1520_div_type div_type; ++ u16 min_div; ++ u16 max_div; ++ u8 sync_en; ++ const struct clk_ops *ops; ++}; ++ ++struct clk_th1520gate { ++ struct clk_gate gate; ++ unsigned int *share_count; ++ const struct clk_ops *ops; ++}; ++ ++#define to_clk_th1520pll(_hw) container_of(_hw, struct clk_th1520pll, hw) ++ ++void xuantie_unregister_clocks(struct clk *clks[], unsigned int count) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < count; i++) ++ clk_unregister(clks[i]); ++} ++ ++static void clk_th1520_pll_cfg_init(struct clk_th1520pll *pll) ++{ ++ switch (pll->clk_type) { ++ case TH1520_AUDIO_PLL: ++ pll->cfg0_reg_off = 0x0; ++ pll->pll_sts_off = 0x90; ++ pll->pll_lock_bit = BIT(0); ++ pll->pll_bypass_bit = BIT(31); ++ pll->pll_rst_bit = BIT(30); ++ pll->pll_mode = PLL_MODE_FRAC; ++ break; ++ case TH1520_SYS_PLL: ++ pll->cfg0_reg_off = 0x10; ++ pll->pll_sts_off = 0x90; ++ pll->pll_lock_bit = BIT(1); ++ pll->pll_bypass_bit = BIT(31); ++ pll->pll_rst_bit = BIT(30); ++ pll->pll_mode = PLL_MODE_FRAC; ++ break; ++ case TH1520_CPU_PLL0: ++ pll->cfg0_reg_off = 0x0; ++ pll->pll_sts_off = 0x80; ++ pll->pll_lock_bit = BIT(1); ++ pll->pll_bypass_bit = BIT(30); ++ pll->pll_rst_bit = BIT(29); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ case TH1520_CPU_PLL1: ++ pll->cfg0_reg_off = 0x10; ++ pll->pll_sts_off = 0x80; ++ pll->pll_lock_bit = BIT(4); ++ pll->pll_bypass_bit = BIT(30); ++ pll->pll_rst_bit = BIT(29); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ case TH1520_GMAC_PLL: ++ pll->cfg0_reg_off = 0x20; ++ pll->pll_sts_off = 0x80; ++ pll->pll_lock_bit = BIT(3); ++ pll->pll_bypass_bit = BIT(30); ++ pll->pll_rst_bit = BIT(29); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ case TH1520_VIDEO_PLL: ++ pll->cfg0_reg_off = 0x30; ++ pll->pll_sts_off = 0x80; ++ pll->pll_lock_bit = BIT(7); ++ pll->pll_bypass_bit = BIT(30); ++ pll->pll_rst_bit = BIT(29); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ case TH1520_DDR_PLL: ++ pll->cfg0_reg_off = 0x8; ++ pll->pll_sts_off = 0x18; ++ pll->pll_lock_bit = BIT(0); ++ pll->pll_bypass_bit = BIT(31); ++ pll->pll_rst_bit = BIT(30); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ case TH1520_DPU0_PLL: ++ pll->cfg0_reg_off = 0x40; ++ pll->pll_sts_off = 0x80; ++ pll->pll_lock_bit = BIT(8); ++ pll->pll_bypass_bit = BIT(30); ++ pll->pll_rst_bit = BIT(29); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ case TH1520_DPU1_PLL: ++ pll->cfg0_reg_off = 0x50; ++ pll->pll_sts_off = 0x80; ++ pll->pll_lock_bit = BIT(9); ++ pll->pll_bypass_bit = BIT(30); ++ pll->pll_rst_bit = BIT(29); ++ pll->pll_mode = PLL_MODE_INT; ++ break; ++ default: ++ pr_err("%s: Unknown pll type\n", __func__); ++ }; ++} ++ ++static int clk_th1520_pll_wait_lock(struct clk_th1520pll *pll) ++{ ++ u32 val; ++ ++ return readl_poll_timeout(pll->base + pll->pll_sts_off, val, ++ val & pll->pll_lock_bit, 0, ++ LOCK_TIMEOUT_US); ++} ++ ++static int clk_th1520_pll_prepare(struct clk_hw *hw) ++{ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ void __iomem *cfg1_off; ++ u32 val; ++ int ret; ++ ++ cfg1_off = pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1; ++ val = readl_relaxed(cfg1_off); ++ if (!(val & pll->pll_rst_bit)) ++ return 0; ++ ++ /* Enable RST */ ++ val |= pll->pll_rst_bit; ++ writel_relaxed(val, cfg1_off); ++ ++ udelay(3); ++ ++ /* Disable RST */ ++ val &= ~pll->pll_rst_bit; ++ writel_relaxed(val, cfg1_off); ++ ++ ret = clk_th1520_pll_wait_lock(pll); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static int clk_th1520_pll_is_prepared(struct clk_hw *hw) ++{ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ u32 val; ++ ++ val = readl_relaxed(pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++ ++ return (val & pll->pll_rst_bit) ? 0 : 1; ++} ++ ++static void clk_th1520_pll_unprepare(struct clk_hw *hw) ++{ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ u32 val; ++ ++ val = readl_relaxed(pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++ val |= pll->pll_rst_bit; ++ writel_relaxed(val, pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++} ++ ++static unsigned long clk_th1520_pll_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++#ifndef CONFIG_TH1520_CLK_EMU ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ u32 refdiv, fbdiv, postdiv1, postdiv2, frac; ++ u32 pll_cfg0, pll_cfg1; ++ u64 fvco = 0; ++ ++ pll_cfg0 = readl_relaxed(pll->base + pll->cfg0_reg_off); ++ pll_cfg1 = readl_relaxed(pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++ refdiv = (pll_cfg0 & TH1520_REFDIV_MASK) >> TH1520_REFDIV_SHIFT; ++ fbdiv = (pll_cfg0 & TH1520_FBDIV_MASK) >> TH1520_FBDIV_SHIFT; ++ postdiv1 = (pll_cfg0 & TH1520_POSTDIV1_MASK) >> TH1520_POSTDIV1_SHIFT; ++ postdiv2 = (pll_cfg0 & TH1520_POSTDIV2_MASK) >> TH1520_POSTDIV2_SHIFT; ++ frac = (pll_cfg1 & TH1520_FRAC_MASK) >> TH1520_FRAC_SHIFT; ++ ++ /* rate calculation: ++ * INT mode: FOUTVCO = FREE * FBDIV / REFDIV ++ * FRAC mode:FOUTVCO = (FREE * FBDIV + FREE * FRAC/BIT(24)) / REFDIV ++ */ ++ if (pll->pll_mode == PLL_MODE_FRAC) ++ fvco = (parent_rate * frac) / TH1520_FRAC_DIV; ++ ++ fvco += (parent_rate * fbdiv); ++ do_div(fvco, refdiv); ++ ++ if (pll->out_type == TH1520_PLL_DIV) ++ do_div(fvco, postdiv1 * postdiv2); ++ ++ return fvco; ++#else ++ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ const struct th1520_pll_rate_table *rate_table = pll->rate_table; ++ ++ /* return minimum supported value */ ++ if (pll->out_type == TH1520_PLL_DIV) ++ return rate_table[0].rate; ++ ++ return rate_table[0].vco_rate; ++#endif ++} ++ ++static const struct th1520_pll_rate_table *th1520_get_pll_div_settings( ++ struct clk_th1520pll *pll, unsigned long rate) ++{ ++ const struct th1520_pll_rate_table *rate_table = pll->rate_table; ++ int i; ++ ++ for (i = 0; i < pll->rate_count; i++) ++ if (rate == rate_table[i].rate) ++ return &rate_table[i]; ++ ++ return NULL; ++} ++ ++static const struct th1520_pll_rate_table *th1520_get_pll_vco_settings( ++ struct clk_th1520pll *pll, unsigned long rate) ++{ ++ const struct th1520_pll_rate_table *rate_table = pll->rate_table; ++ int i; ++ ++ for (i = 0; i < pll->rate_count; i++) ++ if (rate == rate_table[i].vco_rate) ++ return &rate_table[i]; ++ ++ return NULL; ++} ++ ++static inline bool clk_th1520_pll_change(struct clk_th1520pll *pll, ++ const struct th1520_pll_rate_table *rate) ++{ ++ u32 refdiv_old, fbdiv_old, postdiv1_old, postdiv2_old, frac_old; ++ u32 cfg0, cfg1; ++ bool pll_changed; ++ ++ cfg0 = readl_relaxed(pll->base + pll->cfg0_reg_off); ++ cfg1 = readl_relaxed(pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++ ++ refdiv_old = (cfg0 & TH1520_REFDIV_MASK) >> TH1520_REFDIV_SHIFT; ++ fbdiv_old = (cfg0 & TH1520_FBDIV_MASK) >> TH1520_FBDIV_SHIFT; ++ postdiv1_old = (cfg0 & TH1520_POSTDIV1_MASK) >> TH1520_POSTDIV1_SHIFT; ++ postdiv2_old = (cfg0 & TH1520_POSTDIV2_MASK) >> TH1520_POSTDIV2_SHIFT; ++ frac_old = (cfg1 & TH1520_FRAC_MASK) >> TH1520_FRAC_SHIFT; ++ ++ pll_changed = rate->refdiv != refdiv_old || rate->fbdiv != fbdiv_old || ++ rate->postdiv1 != postdiv1_old || rate->postdiv2 != postdiv2_old; ++ if (pll->pll_mode == PLL_MODE_FRAC) ++ pll_changed |= (rate->frac != frac_old); ++ ++ return pll_changed; ++} ++ ++static int clk_th1520_pll_set_rate(struct clk_hw *hw, unsigned long drate, ++ unsigned long prate) ++{ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ const struct th1520_pll_rate_table *rate; ++ void __iomem *cfg1_off; ++ u32 tmp, div_val; ++ int ret; ++ ++ if (pll->out_type == TH1520_PLL_VCO) { ++ rate = th1520_get_pll_vco_settings(pll, drate); ++ if (!rate) { ++ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, ++ drate, clk_hw_get_name(hw)); ++ return -EINVAL; ++ } ++ } else { ++ rate = th1520_get_pll_div_settings(pll, drate); ++ if (!rate) { ++ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, ++ drate, clk_hw_get_name(hw)); ++ return -EINVAL; ++ } ++ } ++ ++ if (!clk_th1520_pll_change(pll, rate)) ++ return 0; ++ ++ /* Enable RST */ ++ cfg1_off = pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1; ++ tmp = readl_relaxed(cfg1_off); ++ tmp |= pll->pll_rst_bit; ++ writel_relaxed(tmp, cfg1_off); ++ ++ div_val = (rate->refdiv << TH1520_REFDIV_SHIFT) | ++ (rate->fbdiv << TH1520_FBDIV_SHIFT) | ++ (rate->postdiv1 << TH1520_POSTDIV1_SHIFT) | ++ (rate->postdiv2 << TH1520_POSTDIV2_SHIFT); ++ writel_relaxed(div_val, pll->base + pll->cfg0_reg_off); ++ ++ if (pll->pll_mode == PLL_MODE_FRAC) { ++ tmp &= ~(TH1520_FRAC_MASK << TH1520_FRAC_SHIFT); ++ tmp |= rate->frac; ++ writel_relaxed(tmp, cfg1_off); ++ } ++ ++ udelay(3); ++ ++ /* Disable RST */ ++ tmp &= ~pll->pll_rst_bit; ++ writel_relaxed(tmp, cfg1_off); ++ ++ /* Wait Lock, ~20us cost */ ++ ret = clk_th1520_pll_wait_lock(pll); ++ if (ret) ++ return ret; ++ ++ /* HW requires 30us for pll stable */ ++ udelay(30); ++ ++ return 0; ++} ++ ++static long clk_th1520_pllvco_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ const struct th1520_pll_rate_table *rate_table = pll->rate_table; ++ unsigned long best = 0, now = 0; ++ unsigned int i, best_i = 0; ++ ++ for (i = 0; i < pll->rate_count; i++) { ++ now = rate_table[i].vco_rate; ++ ++ if (rate == now) { ++ return rate_table[i].vco_rate; ++ } else if (abs(now - rate) < abs(best - rate)) { ++ best = now; ++ best_i = i; ++ } ++ } ++ ++ /* return minimum supported value */ ++ return rate_table[best_i].vco_rate; ++} ++ ++static long clk_th1520_plldiv_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct clk_th1520pll *pll = to_clk_th1520pll(hw); ++ const struct th1520_pll_rate_table *rate_table = pll->rate_table; ++ unsigned long best = 0, now = 0; ++ unsigned int i, best_i = 0; ++ ++ for (i = 0; i < pll->rate_count; i++) { ++ now = rate_table[i].rate; ++ ++ if (rate == now) { ++ return rate_table[i].rate; ++ } else if (abs(now - rate) < abs(best - rate)) { ++ best = now; ++ best_i = i; ++ } ++ } ++ ++ /* return minimum supported value */ ++ return rate_table[best_i].rate; ++} ++ ++static const struct clk_ops clk_th1520_pll_def_ops = { ++ .recalc_rate = clk_th1520_pll_recalc_rate, ++}; ++ ++static const struct clk_ops clk_th1520_pllvco_ops = { ++ .prepare = clk_th1520_pll_prepare, ++ .unprepare = clk_th1520_pll_unprepare, ++ .is_prepared = clk_th1520_pll_is_prepared, ++ .recalc_rate = clk_th1520_pll_recalc_rate, ++ .round_rate = clk_th1520_pllvco_round_rate, ++ .set_rate = clk_th1520_pll_set_rate, ++}; ++ ++static const struct clk_ops clk_th1520_plldiv_ops = { ++ .prepare = clk_th1520_pll_prepare, ++ .unprepare = clk_th1520_pll_unprepare, ++ .is_prepared = clk_th1520_pll_is_prepared, ++ .recalc_rate = clk_th1520_pll_recalc_rate, ++ .round_rate = clk_th1520_plldiv_round_rate, ++ .set_rate = clk_th1520_pll_set_rate, ++}; ++ ++struct clk *xuantie_th1520_pll(const char *name, const char *parent_name, ++ void __iomem *base, ++ const struct th1520_pll_clk *pll_clk) ++{ ++ struct clk_th1520pll *pll; ++ struct clk *clk; ++ struct clk_init_data init; ++ u32 val; ++ ++ pll = kzalloc(sizeof(*pll), GFP_KERNEL); ++ if (!pll) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ init.flags = pll_clk->flags; ++ init.parent_names = &parent_name; ++ init.num_parents = 1; ++ ++ switch (pll_clk->out_type) { ++ case TH1520_PLL_VCO: ++ if (pll_clk->rate_table) ++ init.ops = &clk_th1520_pllvco_ops; ++ break; ++ case TH1520_PLL_DIV: ++ if (pll_clk->rate_table) ++ init.ops = &clk_th1520_plldiv_ops; ++ break; ++ default: ++ pr_err("%s: Unknown pll out type for pll clk %s\n", ++ __func__, name); ++ }; ++ ++ if (!pll_clk->rate_table) ++ init.ops = &clk_th1520_pll_def_ops; ++ ++ pll->base = base; ++ pll->hw.init = &init; ++ pll->out_type = pll_clk->out_type; ++ pll->clk_type = pll_clk->clk_type; ++ pll->rate_table = pll_clk->rate_table; ++ pll->rate_count = pll_clk->rate_count; ++ ++ clk_th1520_pll_cfg_init(pll); ++ ++ val = readl_relaxed(pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++ val &= ~pll->pll_bypass_bit; ++ val |= TH1520_DACPD_MASK; ++ val |= TH1520_DSMPD_MASK; ++ if (pll->pll_mode == PLL_MODE_FRAC) { ++ val &= ~TH1520_DSMPD_MASK; ++ val &= ~TH1520_DACPD_MASK; ++ } ++ writel_relaxed(val, pll->base + pll->cfg0_reg_off + TH1520_PLL_CFG1); ++ ++ clk = clk_register(NULL, &pll->hw); ++ if (IS_ERR(clk)) { ++ pr_err("%s: failed to register pll %s %lu\n", ++ __func__, name, PTR_ERR(clk)); ++ kfree(pll); ++ } ++ ++ return clk; ++} ++ ++static inline struct clk_th1520div *to_clk_th1520div(struct clk_hw *hw) ++{ ++ struct clk_divider *divider = to_clk_divider(hw); ++ ++ return container_of(divider, struct clk_th1520div, divider); ++} ++ ++static unsigned long clk_th1520div_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ struct clk_th1520div *th1520_div = to_clk_th1520div(hw); ++ ++ return th1520_div->ops->recalc_rate(&th1520_div->divider.hw, parent_rate); ++} ++ ++static long clk_th1520div_round_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long *prate) ++{ ++ struct clk_th1520div *th1520_div = to_clk_th1520div(hw); ++ ++ return th1520_div->ops->round_rate(&th1520_div->divider.hw, rate, prate); ++} ++ ++static int clk_th1520div_set_rate(struct clk_hw *hw, unsigned long rate, ++ unsigned long parent_rate) ++{ ++ struct clk_th1520div *th1520_div = to_clk_th1520div(hw); ++ struct clk_divider *div = to_clk_divider(hw); ++ unsigned int divider, value; ++ unsigned long flags = 0; ++ u32 val; ++ ++ /** ++ * The clk-divider will calculate the node frequency by rounding up ++ * based on the parent frequency and the target divider. ++ * This calculation is to restore accurate frequency divider. ++ */ ++ divider = DIV64_U64_ROUND_CLOSEST(parent_rate, rate); ++ ++ /* DIV is zero based divider, but CDE is not */ ++ if (th1520_div->div_type == MUX_TYPE_DIV) ++ value = divider; ++ else ++ value = divider - 1; ++ ++ /* handle the div valid range */ ++ if (value > th1520_div->max_div) ++ value = th1520_div->max_div; ++ if (value < th1520_div->min_div) ++ value = th1520_div->min_div; ++ ++ spin_lock_irqsave(div->lock, flags); ++ ++ val = readl(div->reg); ++ val &= ~BIT(th1520_div->sync_en); ++ writel(val, div->reg); ++ ++ udelay(1); ++ ++ val &= ~(div_mask(div) << div->shift); ++ val |= value << div->shift; ++ writel(val, div->reg); ++ ++ udelay(1); ++ ++ val |= BIT(th1520_div->sync_en); ++ writel(val, div->reg); ++ ++ spin_unlock_irqrestore(div->lock, flags); ++ ++ return 0; ++} ++ ++static const struct clk_ops clk_th1520div_ops = { ++ .recalc_rate = clk_th1520div_recalc_rate, ++ .round_rate = clk_th1520div_round_rate, ++ .set_rate = clk_th1520div_set_rate, ++}; ++ ++static struct clk *xuantie_clk_th1520_divider_internal(const char *name, const char *parent, ++ void __iomem *reg, u8 shift, u8 width, ++ u8 sync, enum th1520_div_type div_type, ++ u16 min, u16 max, bool closest) ++{ ++ struct clk_th1520div *th1520_div; ++ struct clk_hw *hw; ++ struct clk_init_data init; ++ int ret; ++ ++ th1520_div = kzalloc(sizeof(*th1520_div), GFP_KERNEL); ++ if (!th1520_div) ++ return ERR_PTR(-ENOMEM); ++ ++ init.name = name; ++ init.ops = &clk_th1520div_ops; ++ init.flags = CLK_SET_RATE_PARENT; ++ init.parent_names = parent ? &parent : NULL; ++ init.num_parents = parent ? 1 : 0; ++ ++ th1520_div->divider.reg = reg; ++ th1520_div->divider.shift = shift; ++ th1520_div->divider.width = width; ++ th1520_div->divider.lock = &xuantie_th1520_clk_lock; ++ th1520_div->divider.hw.init = &init; ++ th1520_div->ops = &clk_divider_ops; ++ th1520_div->sync_en = sync; ++ th1520_div->div_type = div_type; ++ if (th1520_div->div_type == MUX_TYPE_DIV) ++ th1520_div->divider.flags = CLK_DIVIDER_ONE_BASED; ++ ++ if (closest) ++ th1520_div->divider.flags |= CLK_DIVIDER_ROUND_CLOSEST; ++ ++ th1520_div->min_div = min > ((1 << width) - 1) ? ++ ((1 << width) - 1) : min; ++ th1520_div->max_div = max > ((1 << width) - 1) ? ++ ((1 << width) - 1) : max; ++ ++ hw = &th1520_div->divider.hw; ++ ++ ret = clk_hw_register(NULL, hw); ++ if (ret) { ++ kfree(th1520_div); ++ return ERR_PTR(ret); ++ } ++ ++ return hw->clk; ++} ++ ++struct clk *xuantie_clk_th1520_divider(const char *name, const char *parent, ++ void __iomem *reg, u8 shift, u8 width, ++ u8 sync, enum th1520_div_type div_type, ++ u16 min, u16 max) ++{ ++ return xuantie_clk_th1520_divider_internal(name, parent, reg, shift, width, ++ sync, div_type, min, max, false); ++} ++ ++struct clk *xuantie_clk_th1520_divider_closest(const char *name, const char *parent, ++ void __iomem *reg, u8 shift, u8 width, ++ u8 sync, enum th1520_div_type div_type, ++ u16 min, u16 max) ++{ ++ return xuantie_clk_th1520_divider_internal(name, parent, reg, shift, width, ++ sync, div_type, min, max, true); ++} ++ ++static inline struct clk_th1520gate *to_clk_th1520gate(struct clk_hw *hw) ++{ ++ struct clk_gate *gate = to_clk_gate(hw); ++ ++ return container_of(gate, struct clk_th1520gate, gate); ++} ++ ++static int clk_th1520_gate_share_is_enabled(struct clk_hw *hw) ++{ ++ struct clk_th1520gate *th1520_gate = to_clk_th1520gate(hw); ++ ++ return th1520_gate->ops->is_enabled(hw); ++} ++ ++static int clk_th1520_gate_share_enable(struct clk_hw *hw) ++{ ++ struct clk_th1520gate *th1520_gate = to_clk_th1520gate(hw); ++ ++ if (th1520_gate->share_count && (*th1520_gate->share_count)++ > 0) { ++ pr_debug("[%s,%d]share_count = %d\n", __func__, __LINE__, (*th1520_gate->share_count)); ++ return 0; ++ } ++ ++ pr_debug("[%s,%d]share_count = %d\n", __func__, __LINE__, (*th1520_gate->share_count)); ++ ++ return th1520_gate->ops->enable(hw); ++} ++ ++static void clk_th1520_gate_share_disable(struct clk_hw *hw) ++{ ++ struct clk_th1520gate *th1520_gate = to_clk_th1520gate(hw); ++ ++ if (th1520_gate->share_count) { ++ if (WARN_ON(*th1520_gate->share_count == 0)) ++ return; ++ else if (--(*th1520_gate->share_count) > 0) { ++ pr_debug("[%s,%d]share_count = %d\n", __func__, __LINE__, (*th1520_gate->share_count)); ++ return; ++ } ++ } ++ ++ pr_debug("[%s,%d]share_count = %d\n", __func__, __LINE__, (*th1520_gate->share_count)); ++ ++ th1520_gate->ops->disable(hw); ++} ++ ++static void clk_th1520_gate_share_disable_unused(struct clk_hw *hw) ++{ ++ struct clk_th1520gate *th1520_gate = to_clk_th1520gate(hw); ++ ++ if (!th1520_gate->share_count || *th1520_gate->share_count == 0) ++ return th1520_gate->ops->disable(hw); ++} ++ ++static const struct clk_ops clk_th1520gate_share_ops = { ++ .enable = clk_th1520_gate_share_enable, ++ .disable = clk_th1520_gate_share_disable, ++ .disable_unused = clk_th1520_gate_share_disable_unused, ++ .is_enabled = clk_th1520_gate_share_is_enabled, ++}; ++ ++struct clk *xuantie_clk_th1520_register_gate_shared(const char *name, const char *parent, ++ unsigned long flags, void __iomem *reg, ++ u8 shift, spinlock_t *lock, ++ unsigned int *share_count) ++{ ++ struct clk_th1520gate *th1520_gate; ++ struct clk_hw *hw; ++ struct clk_init_data init; ++ int ret; ++ ++ th1520_gate = kzalloc(sizeof(*th1520_gate), GFP_KERNEL); ++ if (!th1520_gate) ++ return ERR_PTR(-ENOMEM); ++ ++ th1520_gate->gate.reg = reg; ++ th1520_gate->gate.bit_idx = shift; ++ th1520_gate->gate.flags = 0; ++ th1520_gate->gate.lock = lock; ++ th1520_gate->gate.hw.init = &init; ++ th1520_gate->ops = &clk_gate_ops; ++ th1520_gate->share_count = share_count; ++ ++ init.name = name; ++ init.ops = &clk_th1520gate_share_ops; ++ init.flags = flags; ++ init.parent_names = parent ? &parent : NULL; ++ init.num_parents = parent ? 1 : 0; ++ ++ hw = &th1520_gate->gate.hw; ++ ++ ret = clk_hw_register(NULL, hw); ++ if (ret) { ++ kfree(th1520_gate); ++ return ERR_PTR(ret); ++ } ++ ++ return hw->clk; ++} +diff --git a/drivers/clk/xuantie/clk.h b/drivers/clk/xuantie/clk.h +new file mode 100644 +index 000000000000..14c30bcc635e +--- /dev/null ++++ b/drivers/clk/xuantie/clk.h +@@ -0,0 +1,126 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#ifndef __MACH_XUANTIE_CLK_H ++#define __MACH_XUANTIE_CLK_H ++ ++#include ++#include ++ ++extern spinlock_t xuantie_th1520_clk_lock; ++ ++#define TH1520_PLL_RATE(_vco, _rate, _r, _b, _f, _p, _k) \ ++ { \ ++ .vco_rate = (_vco), \ ++ .rate = (_rate), \ ++ .refdiv = (_r), \ ++ .fbdiv = (_b), \ ++ .frac = (_f), \ ++ .postdiv1 = (_p), \ ++ .postdiv2 = (_k), \ ++ } ++ ++enum th1520_pll_outtype { ++ TH1520_PLL_VCO, ++ TH1520_PLL_DIV, ++}; ++ ++enum th1520_div_type { ++ MUX_TYPE_DIV, ++ MUX_TYPE_CDE, ++}; ++ ++enum th1520_pll_clktype { ++ TH1520_AUDIO_PLL, ++ TH1520_SYS_PLL, ++ TH1520_CPU_PLL0, ++ TH1520_CPU_PLL1, ++ TH1520_GMAC_PLL, ++ TH1520_VIDEO_PLL, ++ TH1520_DDR_PLL, ++ TH1520_DPU0_PLL, ++ TH1520_DPU1_PLL, ++}; ++ ++struct th1520_pll_rate_table { ++ unsigned long vco_rate; ++ unsigned long rate; ++ unsigned int refdiv; ++ unsigned int fbdiv; ++ unsigned int frac; ++ unsigned int postdiv1; ++ unsigned int postdiv2; ++}; ++ ++struct th1520_pll_clk { ++ enum th1520_pll_outtype out_type; ++ enum th1520_pll_clktype clk_type; ++ const struct th1520_pll_rate_table *rate_table; ++ int rate_count; ++ int flags; ++}; ++ ++static inline struct clk *xuantie_th1520_clk_fixed_factor(const char *name, ++ const char *parent, unsigned int mult, unsigned int div) ++{ ++ return clk_register_fixed_factor(NULL, name, parent, ++ CLK_SET_RATE_PARENT, mult, div); ++} ++ ++struct clk *xuantie_th1520_pll(const char *name, const char *parent_name, ++ void __iomem *base, ++ const struct th1520_pll_clk *pll_clk); ++ ++static inline struct clk *xuantie_clk_th1520_gate(const char *name, const char *parent, ++ void __iomem *reg, u8 shift) ++{ ++ return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg, ++ shift, 0, &xuantie_th1520_clk_lock); ++} ++ ++struct clk *xuantie_clk_th1520_register_gate_shared(const char *name, const char *parent, ++ unsigned long flags, void __iomem *reg, ++ u8 shift, spinlock_t *lock, ++ unsigned int *share_count); ++ ++struct clk *xuantie_clk_th1520_divider(const char *name, const char *parent, ++ void __iomem *reg, u8 shift, u8 width, ++ u8 sync, enum th1520_div_type div_type, ++ u16 min, u16 max); ++ ++/** ++* By default, the clk framework calculates frequency by rounding downwards. ++* This function is to achieve closest frequency. ++*/ ++struct clk *xuantie_clk_th1520_divider_closest(const char *name, const char *parent, ++ void __iomem *reg, u8 shift, u8 width, ++ u8 sync, enum th1520_div_type div_type, ++ u16 min, u16 max); ++ ++void xuantie_unregister_clocks(struct clk *clks[], unsigned int count); ++ ++static inline struct clk *xuantie_clk_fixed(const char *name, unsigned long rate) ++{ ++ return clk_register_fixed_rate(NULL, name, NULL, 0, rate); ++} ++ ++static inline struct clk *xuantie_clk_th1520_gate_shared(const char *name, const char *parent, ++ void __iomem *reg, u8 shift, ++ unsigned int *share_count) ++{ ++ return xuantie_clk_th1520_register_gate_shared(name, parent, CLK_SET_RATE_PARENT, reg, ++ shift, &xuantie_th1520_clk_lock, share_count); ++} ++ ++static inline struct clk *xuantie_th1520_clk_mux_flags(const char *name, ++ void __iomem *reg, u8 shift, u8 width, ++ const char * const *parents, int num_parents, ++ unsigned long flags) ++{ ++ return clk_register_mux(NULL, name, parents, num_parents, ++ flags , reg, shift, width, 0, ++ &xuantie_th1520_clk_lock); ++} ++#endif +diff --git a/drivers/clk/xuantie/gate/Makefile b/drivers/clk/xuantie/gate/Makefile +new file mode 100644 +index 000000000000..b50a7f6b16a2 +--- /dev/null ++++ b/drivers/clk/xuantie/gate/Makefile +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++obj-$(CONFIG_CLK_TH1520_FM) += xuantie-gate.o visys-gate.o vpsys-gate.o vosys-gate.o dspsys-gate.o audiosys-gate.o miscsys-gate.o +diff --git a/drivers/clk/xuantie/gate/audiosys-gate.c b/drivers/clk/xuantie/gate/audiosys-gate.c +new file mode 100644 +index 000000000000..a2de4c2430d8 +--- /dev/null ++++ b/drivers/clk/xuantie/gate/audiosys-gate.c +@@ -0,0 +1,124 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "clk-gate.h" ++#include "../clk.h" ++ ++static struct clk *gates[TH1520_CLKGEN_AUDIO_CLK_END]; ++static struct clk_onecell_data clk_gate_data; ++ ++static int th1520_audiosys_clk_probe(struct platform_device *pdev) ++{ ++ struct regmap *audiosys_regmap; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ int ret; ++ ++ audiosys_regmap = syscon_regmap_lookup_by_phandle(np, "audiosys-regmap"); ++ if (IS_ERR(audiosys_regmap)) { ++ dev_err(&pdev->dev, "cannot find regmap for vi system register\n"); ++ return PTR_ERR(audiosys_regmap); ++ } ++ ++ printk("%s audiosys_regmap=0x%px\n", __func__, audiosys_regmap); ++ ++ /* we assume that the gate clock is a root clock */ ++ gates[TH1520_CLKGEN_AUDIO_CPU] = xuantie_gate_clk_register("clkgen_audiosys_cpu_clk", NULL, ++ audiosys_regmap, 0x10, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_SRAM0] = xuantie_gate_clk_register("clkgen_audiosys_sram0_clk", NULL, ++ audiosys_regmap, 0x10, 1, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_SRAM1] = xuantie_gate_clk_register("clkgen_audiosys_sram1_clk", NULL, ++ audiosys_regmap, 0x10, 2, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_DMA] = xuantie_gate_clk_register("clkgen_audiosys_dma_clk", NULL, ++ audiosys_regmap, 0x10, 3, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_BSM] = xuantie_gate_clk_register("clkgen_audiosys_bsm_clk", NULL, ++ audiosys_regmap, 0x10, 4, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_TIMER] = xuantie_gate_clk_register("clkgen_audiosys_timer_clk", NULL, ++ audiosys_regmap, 0x10, 8, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_TIMER_CNT1] = xuantie_gate_clk_register("clkgen_audiosys_timer_cnt1_clk", NULL, ++ audiosys_regmap, 0x10, 9, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_TIMER_CNT2] = xuantie_gate_clk_register("clkgen_audiosys_timer_cnt2_clk", NULL, ++ audiosys_regmap, 0x10, 10, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_TIMER_CNT3] = xuantie_gate_clk_register("clkgen_audiosys_timer_cnt3_clk", NULL, ++ audiosys_regmap, 0x10, 11, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_TIMER_CNT4] = xuantie_gate_clk_register("clkgen_audiosys_timer_cnt4_clk", NULL, ++ audiosys_regmap, 0x10, 12, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_WDR] = xuantie_gate_clk_register("clkgen_audiosys_wdr_clk", NULL, ++ audiosys_regmap, 0x10, 13, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_I2C0] = xuantie_gate_clk_register("clkgen_audiosys_i2c0_clk", NULL, ++ audiosys_regmap, 0x10, 14, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_I2C1] = xuantie_gate_clk_register("clkgen_audiosys_i2c1_clk", NULL, ++ audiosys_regmap, 0x10, 15, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_UART] = xuantie_gate_clk_register("clkgen_audiosys_uart_clk", NULL, ++ audiosys_regmap, 0x10, 16, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_I2S0] = xuantie_gate_clk_register("clkgen_audiosys_i2s0_clk", NULL, ++ audiosys_regmap, 0x10, 17, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_I2S1] = xuantie_gate_clk_register("clkgen_audiosys_i2s1_clk", NULL, ++ audiosys_regmap, 0x10, 18, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_I2S2] = xuantie_gate_clk_register("clkgen_audiosys_i2s2_clk", NULL, ++ audiosys_regmap, 0x10, 19, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_I2S8CH] = xuantie_gate_clk_register("clkgen_audiosys_i2s8ch_clk", NULL, ++ audiosys_regmap, 0x10, 20, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_TDM] = xuantie_gate_clk_register("clkgen_audiosys_tdm_clk", NULL, ++ audiosys_regmap, 0x10, 21, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_GPIO] = xuantie_gate_clk_register("clkgen_audiosys_gpio_clk", NULL, ++ audiosys_regmap, 0x10, 22, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_SPDIF0] = xuantie_gate_clk_register("clkgen_audiosys_spdif0_clk", NULL, ++ audiosys_regmap, 0x10, 23, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_SPDIF1] = xuantie_gate_clk_register("clkgen_audiosys_spdif1_clk", NULL, ++ audiosys_regmap, 0x10, 24, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_VAD] = xuantie_gate_clk_register("clkgen_audiosys_vad_clk", NULL, ++ audiosys_regmap, 0x10, 25, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AUDIO_IOMUX] = xuantie_gate_clk_register("clkgen_audiosys_iomux_clk", NULL, ++ audiosys_regmap, 0x10, 26, GATE_NOT_SHARED, NULL, dev); ++ ++ clk_gate_data.clks = gates; ++ clk_gate_data.clk_num = ARRAY_SIZE(gates); ++ ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_gate_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register gate clks for th1520 audiosys\n"); ++ goto unregister_clks; ++ } ++ ++ dev_info(dev, "succeed to register audiosys gate clock provider\n"); ++ ++ return 0; ++ ++unregister_clks: ++ xuantie_unregister_clocks(gates, ARRAY_SIZE(gates)); ++ return ret; ++} ++ ++static const struct of_device_id audiosys_clk_gate_of_match[] = { ++ { .compatible = "xuantie,audiosys-gate-controller" }, ++ { /* sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, audiosys_clk_gate_of_match); ++ ++static struct platform_driver th1520_audiosys_clk_driver = { ++ .probe = th1520_audiosys_clk_probe, ++ .driver = { ++ .name = "audiosys-clk-gate-provider", ++ .of_match_table = of_match_ptr(audiosys_clk_gate_of_match), ++ }, ++}; ++ ++module_platform_driver(th1520_audiosys_clk_driver); ++MODULE_AUTHOR("nanli.yd "); ++MODULE_DESCRIPTION("XuanTie Th1520 Fullmask audiosys clock gate provider"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/clk/xuantie/gate/clk-gate.h b/drivers/clk/xuantie/gate/clk-gate.h +new file mode 100644 +index 000000000000..c6452896fcc5 +--- /dev/null ++++ b/drivers/clk/xuantie/gate/clk-gate.h +@@ -0,0 +1,35 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#ifndef CLK_GATE_H ++#define CLK_GATE_H ++ ++#include ++#include ++ ++enum clk_gate_type { ++ GATE_NOT_SHARED, ++ GATE_SHARED, ++}; ++ ++struct xuantie_clk_gate { ++ struct clk_hw hw; ++ struct regmap *regmap; ++ u32 offset; ++ u8 bit; ++ bool shared; ++ u32 *share_count; ++}; ++ ++struct clk *xuantie_gate_clk_register(const char *name, ++ const char *parent_name, ++ struct regmap *regmap, ++ int offset, ++ u8 bit, ++ bool shared, ++ u32 *share_count, ++ struct device *dev); ++ ++#endif +diff --git a/drivers/clk/xuantie/gate/dspsys-gate.c b/drivers/clk/xuantie/gate/dspsys-gate.c +new file mode 100644 +index 000000000000..6f36b1d0488d +--- /dev/null ++++ b/drivers/clk/xuantie/gate/dspsys-gate.c +@@ -0,0 +1,123 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "clk-gate.h" ++#include "../clk.h" ++ ++static struct clk *gates[TH1520_CLKGEN_DSPSYS_CLK_END]; ++static struct clk_onecell_data clk_gate_data; ++static const char * const dsp0_cclk_sels[] = {"gmac_pll_foutpostdiv", "dspsys_dsp_clk"}; ++static const char * const dsp1_cclk_sels[] = {"gmac_pll_foutpostdiv", "dspsys_dsp_clk"}; ++ ++static int xuantie_dspsys_clk_probe(struct platform_device *pdev) ++{ ++ struct regmap *dspsys_regmap, *tee_dspsys_regmap; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ struct device_node *np_reg = of_parse_phandle(np, "dspsys-regmap", 0); ++ void __iomem *gate_base; ++ int ret; ++ ++ dspsys_regmap = syscon_regmap_lookup_by_phandle(np, "dspsys-regmap"); ++ if (IS_ERR(dspsys_regmap)) { ++ dev_err(&pdev->dev, "cannot find regmap for dsp system register\n"); ++ return PTR_ERR(dspsys_regmap); ++ } ++ ++ tee_dspsys_regmap = syscon_regmap_lookup_by_phandle(np, "tee-dspsys-regmap"); ++ if (IS_ERR(tee_dspsys_regmap)) { ++ dev_warn(&pdev->dev, "cannot find regmap for tee dsp system register\n"); ++ tee_dspsys_regmap = NULL; ++ } ++ gate_base = of_iomap(np_reg,0); ++ // MUX ++ gates[DSPSYS_DSP0_CLK_SWITCH] = xuantie_th1520_clk_mux_flags("dspsys_dsp0_clk_switch", gate_base + 0x1c, 0, 1, dsp0_cclk_sels, ARRAY_SIZE(dsp0_cclk_sels), 0); ++ gates[DSPSYS_DSP1_CLK_SWITCH] = xuantie_th1520_clk_mux_flags("dspsys_dsp1_clk_switch", gate_base + 0x20, 0, 1, dsp1_cclk_sels, ARRAY_SIZE(dsp1_cclk_sels), 0); ++ ++ // DIV & CDE ++ gates[DSPSYS_DSP_CLK] = xuantie_th1520_clk_fixed_factor("dspsys_dsp_clk", "video_pll_foutvco", 1, 3); ++ gates[DSPSYS_DSP0_CLK_CDE] = xuantie_clk_th1520_divider("dspsys_dsp0_clk_cde", "dspsys_dsp0_clk_switch", gate_base + 0x0, 0, 3, 4, MUX_TYPE_CDE, 0, 7); ++ gates[DSPSYS_DSP1_CLK_CDE] = xuantie_clk_th1520_divider("dspsys_dsp1_clk_cde", "dspsys_dsp1_clk_switch", gate_base + 0x4, 0, 3, 4, MUX_TYPE_CDE, 0, 7); ++ ++ // gate ++ gates[CLKGEN_DSP0_PCLK] = xuantie_gate_clk_register("clkgen_dsp0_pclk", NULL, dspsys_regmap, ++ 0x24, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_DSP1_PCLK] = xuantie_gate_clk_register("clkgen_dsp1_pclk", NULL, dspsys_regmap, ++ 0x24, 1, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_DSP1_CCLK] = xuantie_gate_clk_register("clkgen_dsp1_cclk", "dspsys_dsp1_clk_cde", dspsys_regmap, ++ 0x24, 2, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_DSP0_CCLK] = xuantie_gate_clk_register("clkgen_dsp0_cclk", "dspsys_dsp0_clk_cde", dspsys_regmap, ++ 0x24, 3, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_X2X_DSP2_ACLK_S] = xuantie_gate_clk_register("clkgen_x2x_dsp2_aclk_s", NULL, dspsys_regmap, ++ 0x24, 4, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_X2X_DSP0_ACLK_S] = xuantie_gate_clk_register("clkgen_x2x_dsp0_aclk_s", NULL, dspsys_regmap, ++ 0x24, 5, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_X2X_X4_DSPSLV_DSP1_ACLK_M] = xuantie_gate_clk_register("clkgen_x2x_x4_dspslv_dsp1_aclk_m", ++ NULL, dspsys_regmap, 0x24, 6, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_X2X_X4_DSPSLV_DSP0_ACLK_M] = xuantie_gate_clk_register("clkgen_x2x_x4_dspslv_dsp0_aclk_m", ++ NULL, dspsys_regmap, 0x24, 7, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_AXI4_DSPSYS_SLV_ACLK] = xuantie_gate_clk_register("clkgen_axi4_dspsys_slv_aclk", NULL, dspsys_regmap, ++ 0x24, 20, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_AXI4_DSPSYS_SLV_PCLK] = xuantie_gate_clk_register("clkgen_axi4_dspsys_slv_pclk", NULL, dspsys_regmap, ++ 0x24, 21, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_AXI4_DSPSYS_ACLK] = xuantie_gate_clk_register("clkgen_axi4_dspsys_aclk", NULL, dspsys_regmap, ++ 0x24, 23, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_AXI4_DSPSYS_PCLK] = xuantie_gate_clk_register("clkgen_axi4_dspsys_pclk", NULL, dspsys_regmap, ++ 0x24, 24, GATE_NOT_SHARED, NULL, dev); ++ if (tee_dspsys_regmap) { ++ gates[CLKGEN_IOPMP_DSP1_PCLK] = xuantie_gate_clk_register("clkgen_iopmp_dsp1_pclk", NULL, tee_dspsys_regmap, ++ 0x24, 25, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_IOPMP_DSP0_PCLK] = xuantie_gate_clk_register("clkgen_iopmp_dsp0_pclk", NULL, tee_dspsys_regmap, ++ 0x24, 26, GATE_NOT_SHARED, NULL, dev); ++ } ++ ++ clk_gate_data.clks = gates; ++ clk_gate_data.clk_num = ARRAY_SIZE(gates); ++ ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_gate_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register gate clks for th1520 dspsys\n"); ++ goto unregister_clks; ++ } ++ ++ dev_info(dev, "succeed to register dspsys gate clock provider\n"); ++ ++ return 0; ++ ++unregister_clks: ++ xuantie_unregister_clocks(gates, ARRAY_SIZE(gates)); ++ return ret; ++} ++ ++static const struct of_device_id dspsys_clk_gate_of_match[] = { ++ { .compatible = "xuantie,dspsys-gate-controller" }, ++ { /* sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, dspsys_clk_gate_of_match); ++ ++static struct platform_driver xuantie_dspsys_clk_driver = { ++ .probe = xuantie_dspsys_clk_probe, ++ .driver = { ++ .name = "dspsys-clk-gate-provider", ++ .of_match_table = of_match_ptr(dspsys_clk_gate_of_match), ++ }, ++}; ++ ++module_platform_driver(xuantie_dspsys_clk_driver); ++MODULE_AUTHOR("wei.liu "); ++MODULE_DESCRIPTION("XuanTie Th1520 Fullmask dspsys clock gate provider"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/clk/xuantie/gate/miscsys-gate.c b/drivers/clk/xuantie/gate/miscsys-gate.c +new file mode 100644 +index 000000000000..21c3a355b3ef +--- /dev/null ++++ b/drivers/clk/xuantie/gate/miscsys-gate.c +@@ -0,0 +1,108 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "clk-gate.h" ++#include "../clk.h" ++static struct clk *gates[CLKGEN_MISCSYS_CLK_END]; ++static struct clk_onecell_data clk_gate_data; ++static int xuantie_miscsys_clk_probe(struct platform_device *pdev) ++{ ++ struct regmap *miscsys_regmap, *tee_miscsys_regmap = NULL; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ int ret; ++ ++ miscsys_regmap = syscon_regmap_lookup_by_phandle(np, "miscsys-regmap"); ++ if (IS_ERR(miscsys_regmap)) { ++ dev_err(&pdev->dev, "cannot find regmap for misc system register\n"); ++ return PTR_ERR(miscsys_regmap); ++ } ++ tee_miscsys_regmap = syscon_regmap_lookup_by_phandle(np, "tee-miscsys-regmap"); ++ if (IS_ERR(tee_miscsys_regmap)) { ++ dev_err(&pdev->dev, "cannot find regmap for tee misc system register\n"); ++ return PTR_ERR(tee_miscsys_regmap); ++ } ++ /* we assume that the gate clock is a root clock */ ++ gates[CLKGEN_MISCSYS_MISCSYS_ACLK] = xuantie_gate_clk_register("clkgen_missys_aclk", NULL, ++ miscsys_regmap, 0x100, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_USB3_DRD_CLK] = xuantie_gate_clk_register("clkgen_usb3_drd_clk", NULL, ++ miscsys_regmap, 0x104, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_USB3_DRD_CTRL_REF_CLK] = xuantie_gate_clk_register("clkgen_usb3_drd_ctrl_ref_clk", "osc_24m", ++ miscsys_regmap, 0x104, 1, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_USB3_DRD_PHY_REF_CLK] = xuantie_gate_clk_register("clkgen_usb3_drd_phy_ref_clk", "osc_24m", ++ miscsys_regmap, 0x104, 2, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_USB3_DRD_SUSPEND_CLK] = xuantie_gate_clk_register("clkgen_usb3_drd_suspend_clk", NULL, ++ miscsys_regmap, 0x104, 3, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_EMMC_CLK] = xuantie_gate_clk_register("clkgen_emmc_clk", "osc_24m", ++ miscsys_regmap, 0x108, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_SDIO0_CLK] = xuantie_gate_clk_register("clkgen_sdio0_clk", "osc_24m", ++ miscsys_regmap, 0x10c, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_SDIO1_CLK] = xuantie_gate_clk_register("clkgen_sdio1_clk", "osc_24m", ++ miscsys_regmap, 0x110, 0, GATE_NOT_SHARED, NULL, dev); ++ if (tee_miscsys_regmap) { ++ gates[CLKGEN_MISCSYS_AHB2_TEESYS_HCLK] = xuantie_gate_clk_register("clkgen_ahb2_teesys_hclk", NULL, ++ tee_miscsys_regmap, 0x120, 0, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_APB3_TEESYS_HCLK] = xuantie_gate_clk_register("clkgen_apb3_teesys_hclk", NULL, ++ tee_miscsys_regmap, 0x120, 1, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_AXI4_TEESYS_ACLK] = xuantie_gate_clk_register("clkgen_axi4_teesys_aclk", NULL, ++ tee_miscsys_regmap, 0x120, 2, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_EIP120SI_CLK] = xuantie_gate_clk_register("clkgen_eip120si_clk", NULL, ++ tee_miscsys_regmap, 0x120, 3, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_EIP120SII_CLK] = xuantie_gate_clk_register("clkgen_eip120sii_clk", NULL, ++ tee_miscsys_regmap, 0x120, 4, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_EIP120SIII_CLK] = xuantie_gate_clk_register("clkgen_eip120siii_clk", NULL, ++ tee_miscsys_regmap, 0x120, 5, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_TEEDMAC_CLK] = xuantie_gate_clk_register("clkgen_teedmac_clk", NULL, ++ tee_miscsys_regmap, 0x120, 6, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_EIP150B_HCLK] = xuantie_gate_clk_register("clkgen_eip150b_hclk", NULL, ++ tee_miscsys_regmap, 0x120, 7, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_OCRAM_HCLK] = xuantie_gate_clk_register("clkgen_ocram_hclk", NULL, ++ tee_miscsys_regmap, 0x120, 8, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_EFUSE_PCLK] = xuantie_gate_clk_register("clkgen_efuse_pclk", NULL, ++ tee_miscsys_regmap, 0x120, 9, GATE_NOT_SHARED, NULL, dev); ++ gates[CLKGEN_MISCSYS_TEE_SYSREG_PCLK] = xuantie_gate_clk_register("clkgen_tee_sysreg_pclk", NULL, ++ tee_miscsys_regmap, 0x120, 10, GATE_NOT_SHARED, NULL, dev); ++ } ++ clk_gate_data.clks = gates; ++ clk_gate_data.clk_num = ARRAY_SIZE(gates); ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_gate_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register gate clks for th1520 miscsys\n"); ++ goto unregister_clks; ++ } ++ dev_info(dev, "succeed to register miscsys gate clock provider\n"); ++ return 0; ++unregister_clks: ++ xuantie_unregister_clocks(gates, ARRAY_SIZE(gates)); ++ return ret; ++} ++static const struct of_device_id miscsys_clk_gate_of_match[] = { ++ { .compatible = "xuantie,miscsys-gate-controller" }, ++ { /* sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, miscsys_clk_gate_of_match); ++static struct platform_driver xuantie_miscsys_clk_driver = { ++ .probe = xuantie_miscsys_clk_probe, ++ .driver = { ++ .name = "miscsys-clk-gate-provider", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(miscsys_clk_gate_of_match), ++ }, ++}; ++module_platform_driver(xuantie_miscsys_clk_driver); ++MODULE_AUTHOR("wei.liu "); ++MODULE_AUTHOR("Esther.Z "); ++MODULE_DESCRIPTION("XuanTie th1520 Fullmask miscsys clock gate provider"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/clk/xuantie/gate/visys-gate.c b/drivers/clk/xuantie/gate/visys-gate.c +new file mode 100644 +index 000000000000..0ea20d669cae +--- /dev/null ++++ b/drivers/clk/xuantie/gate/visys-gate.c +@@ -0,0 +1,144 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "clk-gate.h" ++#include "../clk.h" ++ ++static struct clk *gates[TH1520_CLKGEN_VISYS_CLK_END]; ++static struct clk_onecell_data clk_gate_data; ++ ++static u32 share_cnt_isp0_hclk_en; ++static u32 share_cnt_isp0_aclk_en; ++ ++static int xuantie_visys_clk_probe(struct platform_device *pdev) ++{ ++ struct regmap *visys_regmap; ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ int ret; ++ ++ visys_regmap = syscon_regmap_lookup_by_phandle(np, "visys-regmap"); ++ if (IS_ERR(visys_regmap)) { ++ dev_err(&pdev->dev, "cannot find regmap for vi system register\n"); ++ return PTR_ERR(visys_regmap); ++ } ++ ++ /* we assume that the gate clock is a root clock */ ++ gates[TH1520_CLKGEN_DW200_ACLK] = xuantie_gate_clk_register("clkgen_dw200_aclk", NULL, ++ visys_regmap, 0xa0, 27, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AXI4_VISYS1_ACLK] = xuantie_gate_clk_register("clkgen_axi4_visys1_aclk", NULL, ++ visys_regmap, 0xa0, 26, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AXI4_VISYS2_ACLK] = xuantie_gate_clk_register("clkgen_axi4_visys2_aclk", NULL, ++ visys_regmap, 0xa0, 25, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_AXI4_VISYS3_ACLK] = xuantie_gate_clk_register("clkgen_axi4_visys3_aclk", NULL, ++ visys_regmap, 0xa0, 24, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP_RY_ACLK] = xuantie_gate_clk_register("clkgen_isp_ry_aclk", NULL, ++ visys_regmap, 0xa0, 22, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP_VENC_SHAKE_ACLK] = xuantie_gate_clk_register("clkgen_isp_venc_shake_aclk", NULL, ++ visys_regmap, 0xa0, 30, GATE_NOT_SHARED, NULL, dev); ++ ++ gates[TH1520_CLKGEN_VIPRE_ACLK] = xuantie_gate_clk_register("clkgen_vipre_aclk", NULL, ++ visys_regmap, 0xa0, 31, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_DW200_HCLK] = xuantie_gate_clk_register("clkgen_dw200_hclk", NULL, ++ visys_regmap, 0xa0, 13, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP_RY_HCLK] = xuantie_gate_clk_register("clkgen_isp_ry_hclk", NULL, ++ visys_regmap, 0xa0, 12, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI0_PCLK] = xuantie_gate_clk_register("clkgen_mipi_csi0_pclk", NULL, ++ visys_regmap, 0xa0, 18, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI1_PCLK] = xuantie_gate_clk_register("clkgen_mipi_csi1_pclk", NULL, ++ visys_regmap, 0xa0, 17, GATE_NOT_SHARED, NULL, dev); ++ ++ gates[TH1520_CLKGEN_MIPI_CSI2_PCLK] = xuantie_gate_clk_register("clkgen_mipi_csi2_pclk", NULL, ++ visys_regmap, 0xa0, 16, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_VIPRE_PCLK] = xuantie_gate_clk_register("clkgen_vipre_pclk", NULL, ++ visys_regmap, 0xa0, 15, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP_VENC_SHAKE_PCLK] = xuantie_gate_clk_register("clkgen_isp_venc_shake_pclk", NULL, ++ visys_regmap, 0xa0, 29, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI0_PIXCLK] = xuantie_gate_clk_register("clkgen_mipi_csi0_pixclk", NULL, ++ visys_regmap, 0xa0, 11, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI1_PIXCLK] = xuantie_gate_clk_register("clkgen_mipi_csi1_pixclk", NULL, ++ visys_regmap, 0xa0, 10, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI2_PIXCLK] = xuantie_gate_clk_register("clkgen_mipi_csi2_pixclk", NULL, ++ visys_regmap, 0xa0, 9, GATE_NOT_SHARED, NULL, dev); ++ ++ gates[TH1520_CLKGEN_VIPRE_PIXELCLK] = xuantie_gate_clk_register("clkgen_vipre_pixelclk", NULL, ++ visys_regmap, 0xa4, 23, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI0_CFG_CLK] = xuantie_gate_clk_register("clkgen_mipi_csi0_cfg_clk", NULL, ++ visys_regmap, 0xa0, 8, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI1_CFG_CLK] = xuantie_gate_clk_register("clkgen_mipi_csi1_cfg_clk", NULL, ++ visys_regmap, 0xa0, 6, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_MIPI_CSI2_CFG_CLK] = xuantie_gate_clk_register("clkgen_mipi_csi2_cfg_clk", NULL, ++ visys_regmap, 0xa0, 7, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_DW200_CLK_VSE] = xuantie_gate_clk_register("clkgen_dw200_clk_vse", NULL, ++ visys_regmap, 0xa0, 5, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_DW200_CLK_DWE] = xuantie_gate_clk_register("clkgen_dw200_clk_dwe", NULL, ++ visys_regmap, 0xa0, 4, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP0_CLK] = xuantie_gate_clk_register("clkgen_isp_clk_0", NULL, ++ visys_regmap, 0xa4, 31, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP1_CLK] = xuantie_gate_clk_register("clkgen_isp_clk_1", NULL, ++ visys_regmap, 0xa4, 30, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP_RY_CCLK] = xuantie_gate_clk_register("clkgen_isp_ry_cclk", NULL, ++ visys_regmap, 0xa0, 21, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP1_PIXELCLK] = xuantie_gate_clk_register("clkgen_isp1_pixelclk", NULL, ++ visys_regmap, 0xa4, 28, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP0_PIXELCLK] = xuantie_gate_clk_register("clkgen_isp0_pixelclk", NULL, ++ visys_regmap, 0xa4, 29, GATE_NOT_SHARED, NULL, dev); ++ gates[TH1520_CLKGEN_ISP1_HCLK] = xuantie_gate_clk_register("clkgen_isp1_hclk", NULL, ++ visys_regmap, 0xa0, 1, GATE_SHARED, &share_cnt_isp0_hclk_en, dev); ++ gates[TH1520_CLKGEN_ISP0_HCLK] = xuantie_gate_clk_register("clkgen_isp0_hclk", NULL, ++ visys_regmap, 0xa0, 1, GATE_SHARED, &share_cnt_isp0_hclk_en, dev); ++ gates[TH1520_CLKGEN_ISP1_ACLK] = xuantie_gate_clk_register("clkgen_isp1_aclk", NULL, ++ visys_regmap, 0xa0, 3, GATE_SHARED, &share_cnt_isp0_aclk_en, dev); ++ gates[TH1520_CLKGEN_ISP0_ACLK] = xuantie_gate_clk_register("clkgen_isp0_aclk", NULL, ++ visys_regmap, 0xa0, 3, GATE_SHARED, &share_cnt_isp0_aclk_en, dev); ++ ++ clk_gate_data.clks = gates; ++ clk_gate_data.clk_num = ARRAY_SIZE(gates); ++ ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_gate_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register gate clks for th1520 visys\n"); ++ goto unregister_clks; ++ } ++ ++ dev_info(dev, "succeed to register visys gate clock provider\n"); ++ ++ return 0; ++ ++unregister_clks: ++ xuantie_unregister_clocks(gates, ARRAY_SIZE(gates)); ++ return ret; ++} ++ ++static const struct of_device_id visys_clk_gate_of_match[] = { ++ { .compatible = "xuantie,visys-gate-controller" }, ++ { /* sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, visys_clk_gate_of_match); ++ ++static struct platform_driver xuantie_visys_clk_driver = { ++ .probe = xuantie_visys_clk_probe, ++ .driver = { ++ .name = "visys-clk-gate-provider", ++ .of_match_table = of_match_ptr(visys_clk_gate_of_match), ++ }, ++}; ++ ++module_platform_driver(xuantie_visys_clk_driver); ++MODULE_AUTHOR("wei.liu "); ++MODULE_DESCRIPTION("XuanTie Th1520 Fullmask visys clock gate provider"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/clk/xuantie/gate/vosys-gate.c b/drivers/clk/xuantie/gate/vosys-gate.c +new file mode 100644 +index 000000000000..96aff9980e82 +--- /dev/null ++++ b/drivers/clk/xuantie/gate/vosys-gate.c +@@ -0,0 +1,111 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../clk.h" ++ ++static struct clk *gates[TH1520_CLKGEN_VOSYS_CLK_END]; ++static struct clk_onecell_data clk_gate_data; ++ ++static int xuantie_vosys_clk_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ void __iomem *gate_base; ++ int ret; ++ ++ gate_base = devm_platform_ioremap_resource(pdev, 0); ++ if (WARN_ON(IS_ERR(gate_base))) ++ return PTR_ERR(gate_base); ++ ++ /* we assume that the gate clock is a root clock */ ++ gates[TH1520_CLKGEN_AXI4_VO_PCLK] = xuantie_clk_th1520_gate("clkgen_axi4_vo_pclk", NULL, ++ gate_base + 0x50, 22); ++ gates[TH1520_CLKGEN_IOPMP_VOSYS_DPU_PCLK] = xuantie_clk_th1520_gate("clkgen_iopmp_dpu_pclk", NULL, ++ gate_base + 0x50, 23); ++ gates[TH1520_CLKGEN_IOPMP_VOSYS_DPU1_PCLK] = xuantie_clk_th1520_gate("clkgen_iopmp_dpu1_pclk", NULL, ++ gate_base + 0x50, 24); ++ gates[TH1520_CLKGEN_IOPMP_VOSYS_GPU_PCLK] = xuantie_clk_th1520_gate("clkgen_iopmp_gpu_pclk", NULL, ++ gate_base + 0x50, 25); ++ gates[TH1520_CLKGEN_HDMI_PCLK] = xuantie_clk_th1520_gate("clkgen_hdmi_pclk", NULL, gate_base + 0x50, 11); ++ gates[TH1520_CLKGEN_MIPIDSI0_PCLK] = xuantie_clk_th1520_gate("clkgen_mipidsi0_pclk", NULL, ++ gate_base + 0x50, 13); ++ gates[TH1520_CLKGEN_MIPIDSI1_PCLK] = xuantie_clk_th1520_gate("clkgen_mipidsi1_pclk", NULL, ++ gate_base + 0x50, 14); ++ gates[TH1520_CLKGEN_AXI4_VO_ACLK] = xuantie_clk_th1520_gate("clkgen_axi4_vo_aclk", NULL, ++ gate_base + 0x50, 0); ++ gates[TH1520_CLKGEN_IOPMP_GPU_ACLK] = xuantie_clk_th1520_gate("clkgen_iopmp_gpu_aclk", NULL, ++ gate_base + 0x50, 29); ++ gates[TH1520_CLKGEN_IOPMP_DPU_ACLK] = xuantie_clk_th1520_gate("clkgen_iopmp_dpu_aclk", NULL, ++ gate_base + 0x50, 28); ++ gates[TH1520_CLKGEN_IOPMP_DPU1_ACLK] = xuantie_clk_th1520_gate("clkgen_iopmp_dpu1_aclk", NULL, ++ gate_base + 0x50, 27); ++ gates[TH1520_CLKGEN_X2H_DPU_ACLK] = xuantie_clk_th1520_gate("clkgen_x2h_dpu_aclk", NULL, gate_base + 0x50, 21); ++ gates[TH1520_CLKGEN_X2H_DPU1_ACLK] = xuantie_clk_th1520_gate("clkgen_x2h_dpu1_aclk", NULL, gate_base + 0x50, 20); ++ gates[TH1520_CLKGEN_MIPIDSI0_PIXCLK] = xuantie_clk_th1520_gate("clkgen_mipidsi0_pixclk", NULL, gate_base + 0x50, 30); ++ gates[TH1520_CLKGEN_HDMI_PIXCLK] = xuantie_clk_th1520_gate("clkgen_hdmi_pixclk", NULL, gate_base + 0x54, 0); ++ gates[TH1520_CLKGEN_MIPIDSI1_PIXCLK] = xuantie_clk_th1520_gate("clkgen_mipidsi1_pixclk", NULL, gate_base + 0x50, 31); ++ gates[TH1520_CLKGEN_HDMI_SFR_CLK] = xuantie_clk_th1520_gate("clkgen_hdmi_sfr_clk", NULL, gate_base + 0x50, 10); ++ gates[TH1520_CLKGEN_HDMI_CEC_CLK] = xuantie_clk_th1520_gate("clkgen_hdmi_cec_cclk", NULL, gate_base + 0x50, 12); ++ gates[TH1520_CLKGEN_HDMI_I2S_CLK] = xuantie_clk_th1520_gate("clkgen_hdmi_i2s_clk", NULL, gate_base + 0x50, 19); ++ gates[TH1520_CLKGEN_MIPIDSI0_CFG_CLK] = xuantie_clk_th1520_gate("clkgen_mipidsi0_cfg_clk", NULL, gate_base + 0x50, 15); ++ gates[TH1520_CLKGEN_MIPIDSI1_CFG_CLK] = xuantie_clk_th1520_gate("clkgen_mipidsi1_cfg_clk", NULL, gate_base + 0x50, 16); ++ gates[TH1520_CLKGEN_MIPIDSI0_REFCLK] = xuantie_clk_th1520_gate("clkgen_mipidsi0_refclk", NULL, gate_base + 0x50, 17); ++ gates[TH1520_CLKGEN_MIPIDSI1_REFCLK] = xuantie_clk_th1520_gate("clkgen_mipidsi1_refclk", NULL, gate_base + 0x50, 18); ++ gates[TH1520_CLKGEN_GPU_CORE_CLK] = xuantie_clk_th1520_gate("clkgen_gpu_core_clk", "vosys_aclk_m", gate_base + 0x50, 3); ++ gates[TH1520_CLKGEN_GPU_CFG_ACLK] = xuantie_clk_th1520_gate("clkgen_gpu_cfg_aclk", NULL, gate_base + 0x50, 4); ++ gates[TH1520_CLKGEN_DPU_HCLK] = xuantie_clk_th1520_gate("clkgen_dpu_hclk", NULL, gate_base + 0x50, 7); ++ gates[TH1520_CLKGEN_DPU_ACLK] = xuantie_clk_th1520_gate("clkgen_dpu_aclk", NULL, gate_base + 0x50, 8); ++ gates[TH1520_CLKGEN_DPU_CCLK] = xuantie_clk_th1520_gate("clkgen_dpu_cclk", NULL, gate_base + 0x50, 9); ++ gates[TH1520_CLKGEN_DPU_PIXCLK0] = xuantie_clk_th1520_gate("clkgen_dpu_pixclk0", NULL, gate_base + 0x50, 5); ++ gates[TH1520_CLKGEN_DPU_PIXCLK1] = xuantie_clk_th1520_gate("clkgen_dpu_pixclk1", NULL, gate_base + 0x50, 6); ++ ++ clk_gate_data.clks = gates; ++ clk_gate_data.clk_num = ARRAY_SIZE(gates); ++ ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_gate_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register gate clks for th1520 vosys\n"); ++ goto unregister_clks; ++ } ++ ++ dev_info(dev, "succeed to register vosys gate clock provider\n"); ++ ++ return 0; ++ ++unregister_clks: ++ xuantie_unregister_clocks(gates, ARRAY_SIZE(gates)); ++ return ret; ++} ++ ++static const struct of_device_id vosys_clk_gate_of_match[] = { ++ { .compatible = "xuantie,vosys-gate-controller" }, ++ { /* sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, vosys_clk_gate_of_match); ++ ++static struct platform_driver xuantie_vosys_clk_driver = { ++ .probe = xuantie_vosys_clk_probe, ++ .driver = { ++ .name = "vosys-clk-gate-provider", ++ .of_match_table = of_match_ptr(vosys_clk_gate_of_match), ++ }, ++}; ++ ++module_platform_driver(xuantie_vosys_clk_driver); ++MODULE_AUTHOR("wei.liu "); ++MODULE_DESCRIPTION("XuanTie Th1520 Fullmask vosys clock gate provider"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/clk/xuantie/gate/vpsys-gate.c b/drivers/clk/xuantie/gate/vpsys-gate.c +new file mode 100644 +index 000000000000..a656d0fc6842 +--- /dev/null ++++ b/drivers/clk/xuantie/gate/vpsys-gate.c +@@ -0,0 +1,99 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "../clk.h" ++ ++static struct clk *gates[TH1520_VPSYS_CLK_END]; ++static struct clk_onecell_data clk_gate_data; ++ ++static u32 share_cnt_g2d_clk_en; ++static u32 share_cnt_fce_clk_en; ++ ++static int xuantie_vpsys_clk_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *np = dev->of_node; ++ void __iomem *gate_base; ++ int ret; ++ ++ gate_base = devm_platform_ioremap_resource(pdev, 0); ++ if (WARN_ON(IS_ERR(gate_base))) ++ return PTR_ERR(gate_base); ++ ++ // DIV & CDE ++ gates[TH1520_VPSYS_G2D_CCLK_DIV] = xuantie_clk_th1520_divider("clkgen_vpsys_g2d_cclk_div", "video_pll_foutvco", gate_base + 0x30, 0, 4, 4, MUX_TYPE_DIV, 3, 15); ++ gates[TH1520_VPSYS_DEC_CCLK_DIV] = xuantie_clk_th1520_divider("clkgen_vpsys_dec_cclk_div", "video_pll_foutvco", gate_base + 0x24, 0, 4, 4, MUX_TYPE_DIV, 4, 15); ++ ++ /* G2D clock configuration : Completed the upward configuration of CCLK */ ++ gates[TH1520_VPSYS_G2D_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_vpsys_g2d_pclk", NULL, ++ gate_base + 0x20, 3, &share_cnt_g2d_clk_en); ++ gates[TH1520_VPSYS_G2D_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_vpsys_g2d_aclk", NULL, ++ gate_base + 0x20, 3, &share_cnt_g2d_clk_en); ++ gates[TH1520_VPSYS_G2D_CCLK] = xuantie_clk_th1520_gate_shared("clkgen_vpsys_g2d_cclk", "clkgen_vpsys_g2d_cclk_div", ++ gate_base + 0x20, 3, &share_cnt_g2d_clk_en); ++ ++ /* we assume that the gate clock is a root clock */ ++ gates[TH1520_VPSYS_FCE_PCLK] = xuantie_clk_th1520_gate_shared("clkgen_vpsys_fce_pclk", NULL, ++ gate_base + 0x20, 2, &share_cnt_fce_clk_en); ++ gates[TH1520_VPSYS_FCE_ACLK] = xuantie_clk_th1520_gate_shared("clkgen_vpsys_fce_aclk", NULL, ++ gate_base + 0x20, 2, &share_cnt_fce_clk_en); ++ ++ /* VENC&VDEC clock configuration : Completed the upward configuration of CCLK */ ++ gates[TH1520_VPSYS_VDEC_ACLK] = xuantie_clk_th1520_gate("clkgen_vdec_aclk", NULL, gate_base + 0x20, 4); ++ gates[TH1520_VPSYS_VDEC_CCLK] = xuantie_clk_th1520_gate("clkgen_vdec_cclk", "clkgen_vpsys_dec_cclk_div", gate_base + 0x20, 5); ++ gates[TH1520_VPSYS_VDEC_PCLK] = xuantie_clk_th1520_gate("clkgen_vdec_pclk", NULL, gate_base + 0x20, 6); ++ ++ gates[TH1520_VPSYS_VENC_CCLK] = xuantie_clk_th1520_gate("clkgen_venc_cclk", "clkgen_vpsys_venc_cclk", gate_base + 0x20, 8); ++ gates[TH1520_VPSYS_VENC_PCLK] = xuantie_clk_th1520_gate("clkgen_venc_pclk", NULL, gate_base + 0x20, 9); ++ gates[TH1520_VPSYS_VENC_ACLK] = xuantie_clk_th1520_gate("clkgen_venc_aclk", NULL, gate_base + 0x20, 7); ++ ++ clk_gate_data.clks = gates; ++ clk_gate_data.clk_num = ARRAY_SIZE(gates); ++ ++ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_gate_data); ++ if (ret < 0) { ++ dev_err(dev, "failed to register gate clks for th1520 vpsys\n"); ++ goto unregister_clks; ++ } ++ ++ dev_info(dev, "succeed to register vpsys gate clock provider\n"); ++ ++ return 0; ++ ++unregister_clks: ++ xuantie_unregister_clocks(gates, ARRAY_SIZE(gates)); ++ return ret; ++} ++ ++static const struct of_device_id vpsys_clk_gate_of_match[] = { ++ { .compatible = "xuantie,vpsys-gate-controller" }, ++ { /* sentinel */ }, ++}; ++MODULE_DEVICE_TABLE(of, vpsys_clk_gate_of_match); ++ ++static struct platform_driver xuantie_vpsys_clk_driver = { ++ .probe = xuantie_vpsys_clk_probe, ++ .driver = { ++ .name = "vpsys-clk-gate-provider", ++ .of_match_table = of_match_ptr(vpsys_clk_gate_of_match), ++ }, ++}; ++ ++module_platform_driver(xuantie_vpsys_clk_driver); ++MODULE_AUTHOR("wei.liu "); ++MODULE_DESCRIPTION("XuanTie Th1520 Fullmask vpsys clock gate provider"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/clk/xuantie/gate/xuantie-gate.c b/drivers/clk/xuantie/gate/xuantie-gate.c +new file mode 100644 +index 000000000000..8bf7a18776f8 +--- /dev/null ++++ b/drivers/clk/xuantie/gate/xuantie-gate.c +@@ -0,0 +1,114 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2022 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "clk-gate.h" ++ ++#define to_xuantie_clk_gate(_hw) container_of(_hw, struct xuantie_clk_gate, hw) ++ ++static int xuantie_clk_gate_is_enabled(struct clk_hw *hw) ++{ ++ struct xuantie_clk_gate *tcg = to_xuantie_clk_gate(hw); ++ u32 val; ++ ++ regmap_read(tcg->regmap, tcg->offset, &val); ++ ++ val &= BIT(tcg->bit); ++ ++ return val != 0; ++} ++ ++static void xuantie_clk_gate_disable(struct clk_hw *hw) ++{ ++ struct xuantie_clk_gate *tcg = to_xuantie_clk_gate(hw); ++ ++ if (!tcg->shared) ++ goto out; ++ ++ if (tcg->share_count) { ++ if (WARN_ON(*tcg->share_count == 0)) ++ return; ++ else if (--(*tcg->share_count) > 0) { ++ pr_info("[%s,%d]share_count = %d\n", __func__, __LINE__, ++ (*tcg->share_count)); ++ return; ++ } ++ } ++ ++out: ++ regmap_update_bits(tcg->regmap, tcg->offset, ++ BIT(tcg->bit), 0); ++} ++ ++static int xuantie_clk_gate_enable(struct clk_hw *hw) ++{ ++ struct xuantie_clk_gate *tcg = to_xuantie_clk_gate(hw); ++ ++ if (!tcg->shared) ++ goto out; ++ ++ if (tcg->share_count && (*tcg->share_count)++ > 0) { ++ pr_info("[%s,%d]share_count = %d\n", __func__, __LINE__, (*tcg->share_count)); ++ return 0; ++ } ++ ++out: ++ return regmap_update_bits(tcg->regmap, tcg->offset, ++ BIT(tcg->bit), BIT(tcg->bit)); ++} ++ ++const struct clk_ops xuantie_gate_clk_ops = { ++ .enable = xuantie_clk_gate_enable, ++ .disable = xuantie_clk_gate_disable, ++ .is_enabled = xuantie_clk_gate_is_enabled, ++}; ++ ++struct clk *xuantie_gate_clk_register(const char *name, ++ const char *parent_name, ++ struct regmap *regmap, ++ int offset, ++ u8 bit, ++ bool shared, ++ u32 *share_count, ++ struct device *dev) ++{ ++ struct xuantie_clk_gate *tcg; ++ struct clk *clk; ++ struct clk_init_data init = {}; ++ ++ tcg = kzalloc(sizeof(*tcg), GFP_KERNEL); ++ if (!tcg) ++ return ERR_PTR(-ENOMEM); ++ ++ tcg->regmap = regmap; ++ tcg->offset = offset; ++ tcg->bit = bit; ++ tcg->shared = shared; ++ tcg->share_count = share_count; ++ ++ init.name = name; ++ init.flags = CLK_SET_RATE_PARENT; ++ init.parent_names = parent_name ? &parent_name : NULL; ++ init.num_parents = parent_name ? 1 : 0; ++ init.ops = &xuantie_gate_clk_ops; ++ ++ tcg->hw.init = &init; ++ ++ clk = clk_register(dev, &tcg->hw); ++ if (IS_ERR(clk)) ++ kfree(tcg); ++ ++ return clk; ++} +diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c +index da3071b387eb..16cf855f55c5 100644 +--- a/drivers/clocksource/timer-riscv.c ++++ b/drivers/clocksource/timer-riscv.c +@@ -24,7 +24,7 @@ + #include + #include + #include +-#include ++#include + #include + #include + +@@ -212,6 +212,10 @@ TIMER_OF_DECLARE(riscv_timer, "riscv", riscv_timer_init_dt); + #ifdef CONFIG_ACPI + static int __init riscv_timer_acpi_init(struct acpi_table_header *table) + { ++ struct acpi_table_rhct *rhct = (struct acpi_table_rhct *)table; ++ ++ riscv_timer_cannot_wake_cpu = rhct->flags & ACPI_RHCT_TIMER_CANNOT_WAKEUP_CPU; ++ + return riscv_timer_init_common(); + } + +diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig +index fd709abd3d0e..d1173f9c318f 100644 +--- a/drivers/cpufreq/Kconfig ++++ b/drivers/cpufreq/Kconfig +@@ -356,5 +356,43 @@ config QORIQ_CPUFREQ + This adds the CPUFreq driver support for Freescale QorIQ SoCs + which are capable of changing the CPU's frequency dynamically. + ++config RISCV_XUANTIE_TH1520_CPUFREQ ++ tristate "CPU frequency scaling driver for XuanTie th1520 SoCs" ++ depends on OF && COMMON_CLK && TH1520_AON && REGULATOR_TH1520_AON ++ select XUANTIE_CLK ++ select PM_OPP ++ help ++ This adds the CPUFreq driver support for XuanTie th1520 SoCs ++ which are capable of changing the CPU's frequency dynamically. ++ + endif ++ ++config ACPI_CPPC_CPUFREQ ++ tristate "CPUFreq driver based on the ACPI CPPC spec" ++ depends on ACPI_PROCESSOR ++ depends on ARM || ARM64 || RISCV ++ select ACPI_CPPC_LIB ++ help ++ This adds a CPUFreq driver which uses CPPC methods ++ as described in the ACPIv5.1 spec. CPPC stands for ++ Collaborative Processor Performance Controls. It ++ is based on an abstract continuous scale of CPU ++ performance values which allows the remote power ++ processor to flexibly optimize for power and ++ performance. CPPC relies on power management firmware ++ support for its operation. ++ ++ If in doubt, say N. ++ ++config ACPI_CPPC_CPUFREQ_FIE ++ bool "Frequency Invariance support for CPPC cpufreq driver" ++ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY ++ depends on ARM || ARM64 || RISCV ++ default y ++ help ++ This extends frequency invariance support in the CPPC cpufreq driver, ++ by using CPPC delivered and reference performance counters. ++ ++ If in doubt, say N. ++ + endmenu +diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm +index 6265c91fbf6b..8045a6d928a4 100644 +--- a/drivers/cpufreq/Kconfig.arm ++++ b/drivers/cpufreq/Kconfig.arm +@@ -3,22 +3,6 @@ + # ARM CPU Frequency scaling drivers + # + +-config ACPI_CPPC_CPUFREQ +- tristate "CPUFreq driver based on the ACPI CPPC spec" +- depends on ACPI_PROCESSOR +- select ACPI_CPPC_LIB +- help +- This adds a CPUFreq driver which uses CPPC methods +- as described in the ACPIv5.1 spec. CPPC stands for +- Collaborative Processor Performance Controls. It +- is based on an abstract continuous scale of CPU +- performance values which allows the remote power +- processor to flexibly optimize for power and +- performance. CPPC relies on power management firmware +- support for its operation. +- +- If in doubt, say N. +- + config CPPC_CPUFREQ_SYSFS_INTERFACE + bool "Enable CPPC CPUFreq sysfs tuning interfaces" + depends on ACPI_CPPC_CPUFREQ && ARM64 +@@ -30,16 +14,6 @@ config CPPC_CPUFREQ_SYSFS_INTERFACE + + If unsure, say N. + +-config ACPI_CPPC_CPUFREQ_FIE +- bool "Frequency Invariance support for CPPC cpufreq driver" +- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY +- default y +- help +- This extends frequency invariance support in the CPPC cpufreq driver, +- by using CPPC delivered and reference performance counters. +- +- If in doubt, say N. +- + config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM + tristate "Allwinner nvmem based SUN50I CPUFreq driver" + depends on ARCH_SUNXI +diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile +index 46c3aa314f97..63f81fbda8ba 100644 +--- a/drivers/cpufreq/Makefile ++++ b/drivers/cpufreq/Makefile +@@ -110,3 +110,4 @@ obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o + obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o + obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o + obj-$(CONFIG_SW64_CPUFREQ) += sunway-cpufreq.o ++obj-$(CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ) += th1520-cpufreq.o +diff --git a/drivers/cpufreq/th1520-cpufreq.c b/drivers/cpufreq/th1520-cpufreq.c +new file mode 100644 +index 000000000000..ef157fd3cdf5 +--- /dev/null ++++ b/drivers/cpufreq/th1520-cpufreq.c +@@ -0,0 +1,588 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_TH1520_SYSTEM_MONITOR ++#include ++ ++struct monitor_dev_info *mdev_info = NULL; ++#endif ++ ++static DEFINE_MUTEX(cpufreq_lock); ++ ++bool cpufreq_denied; ++ ++struct regulator *dvdd_cpu_reg; ++struct regulator *dvddm_cpu_reg; ++ ++enum TH1520_MPW_CPUFREQ_CLKS { ++ TH1520_C910_CCLK, ++ TH1520_C910_CCLK_I0, ++ TH1520_CPU_PLL1_FOUTPOSTDIV, ++ TH1520_CPU_PLL0_FOUTPOSTDIV, ++}; ++ ++#define TH1520_MPW_CPUFREQ_CLK_NUM 4 ++#define TH1520_CPUFREQ_THRE 1500000 ++#define TH1520_C910_BUS_CLK_SYNC BIT(11) ++#define TH1520_C910_BUS_CLK_RATIO_MASK 0x700 ++#define TH1520_C910_BUS_CLK_DIV_RATIO_2 0x100 ++#define TH1520_C910_BUS_CLK_DIV_RATIO_3 0x200 ++ ++#define TH1520_CPU_PLL_IDX(x) (x) ++#define TH1520_CPU_PLL_COUNT 2 ++ ++static int num_clks; ++static struct clk_bulk_data clks[] = { ++ { .id = "c910_cclk" }, ++ { .id = "c910_cclk_i0" }, ++ { .id = "cpu_pll1_foutpostdiv" }, ++ { .id = "cpu_pll0_foutpostdiv" }, ++}; ++ ++static struct device *cpu_dev; ++static struct cpufreq_frequency_table *freq_table; ++static unsigned int max_freq; ++static unsigned int min_freq; ++static unsigned int transition_latency; ++static void __iomem *ap_sys_reg; ++static bool th1520_dvfs_sv; ++ ++static u32 *th1520_dvddm_volt; ++static u32 soc_opp_count; ++ ++static int _th1520_get_pllid(void) ++{ ++ int ret; ++ ++ if (!strcmp(__clk_get_name(clk_get_parent(clks[TH1520_C910_CCLK].clk)), ++ __clk_get_name(clks[TH1520_C910_CCLK_I0].clk))) // pll index 0 ++ ret = TH1520_CPU_PLL_IDX(0); ++ else // pll index 1 ++ ret = TH1520_CPU_PLL_IDX(1); ++ ++ return ret; ++} ++ ++static int _th1520_switch_pllid(int pllid, int target_freq) ++{ ++ int ret; ++ ++ pr_debug("[%s] switch to pll[%d], freq[%u]\n", __func__, pllid, target_freq); ++ if (pllid == TH1520_CPU_PLL_IDX(1)) { ++ clk_prepare_enable(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ clk_set_rate(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk, target_freq * 1000); ++ ret = clk_set_parent(clks[TH1520_C910_CCLK].clk, clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ udelay(1); ++ if (ret) ++ clk_disable_unprepare(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk); ++ } else { ++ clk_prepare_enable(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk); ++ clk_set_rate(clks[TH1520_CPU_PLL0_FOUTPOSTDIV].clk, target_freq * 1000); ++ ret = clk_set_parent(clks[TH1520_C910_CCLK].clk, clks[TH1520_C910_CCLK_I0].clk); ++ udelay(1); ++ if (ret) ++ clk_disable_unprepare(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ } ++ ++ return 0; ++} ++ ++ ++static int th1520_set_target(struct cpufreq_policy *policy, unsigned int index) ++{ ++ struct dev_pm_opp *opp; ++ unsigned long freq_hz; ++ int volt, volt_old; ++ unsigned int old_freq, new_freq; ++ int ret; ++ u32 val; ++ u32 re_modify_bus_freq = 0; ++ ++ mutex_lock(&cpufreq_lock); ++ ++ if (cpufreq_denied) { ++ dev_emerg(cpu_dev, "Denied to set cpu frequency temporarily on reboot\n"); ++ mutex_unlock(&cpufreq_lock); ++ return 0; ++ } ++ new_freq = freq_table[index].frequency; ++ freq_hz = new_freq * 1000; ++ old_freq = policy->cur; ++ ++ opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); ++ if (IS_ERR(opp)) { ++ dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); ++ mutex_unlock(&cpufreq_lock); ++ return PTR_ERR(opp); ++ } ++ ++ volt = dev_pm_opp_get_voltage(opp); ++ dev_pm_opp_put(opp); ++ ++ volt_old = regulator_get_voltage(dvdd_cpu_reg); ++ if (volt_old < 0) { ++ dev_err(cpu_dev, "failed to get cpu voltage\n"); ++ mutex_unlock(&cpufreq_lock); ++ return volt_old; ++ } ++ ++ dev_dbg(cpu_dev, "%u MHz, %d mV --> %u MHz, %d mV\n", ++ old_freq / 1000, volt_old / 1000, ++ new_freq / 1000, volt / 1000); ++ ++ /* change AXI bus clock ratio to match: BUS_CLK = CPU_CCLK/ratio <= 750MHz */ ++ val = readl(ap_sys_reg); ++ if (new_freq > TH1520_CPUFREQ_THRE) { ++ val &= ~TH1520_C910_BUS_CLK_RATIO_MASK; ++ val |= TH1520_C910_BUS_CLK_DIV_RATIO_3; ++ } else { ++ val &= ~TH1520_C910_BUS_CLK_RATIO_MASK; ++ ++ if (old_freq > TH1520_CPUFREQ_THRE) { ++ re_modify_bus_freq = 1; ++ val |= TH1520_C910_BUS_CLK_DIV_RATIO_3; ++ } else ++ val |= TH1520_C910_BUS_CLK_DIV_RATIO_2; ++ } ++ ++ writel(val, ap_sys_reg); ++ val &= ~TH1520_C910_BUS_CLK_SYNC; ++ writel(val, ap_sys_reg); ++ udelay(1); ++ val |= TH1520_C910_BUS_CLK_SYNC; ++ writel(val, ap_sys_reg); ++ udelay(1); ++ ++ /* scaling up? scale voltage before frequency */ ++ if (new_freq > old_freq && !th1520_dvfs_sv) { ++ ret = regulator_set_voltage_tol(dvddm_cpu_reg, th1520_dvddm_volt[index], 0); ++ if (ret) { ++ dev_err(cpu_dev, "failed to scale vddsoc up: %d\n", ret); ++ mutex_unlock(&cpufreq_lock); ++ return ret; ++ } ++ ret = regulator_set_voltage_tol(dvdd_cpu_reg, volt, 0); ++ if (ret) { ++ dev_err(cpu_dev, ++ "failed to scale vddarm up: %d\n", ret); ++ mutex_unlock(&cpufreq_lock); ++ return ret; ++ } ++ } ++ ++ /* switch pll */ ++ _th1520_switch_pllid((_th1520_get_pllid()+1)&(TH1520_CPU_PLL_COUNT-1), new_freq); ++ ++ /*add delay for clk-switch*/ ++ udelay(1); ++ ++ /* Ensure the c910_cclk clock divider is what we expect */ ++ ret = clk_set_rate(clks[TH1520_C910_CCLK].clk, new_freq * 1000); ++ if (ret) { ++ int ret1; ++ ++ dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); ++ ret1 = regulator_set_voltage_tol(dvdd_cpu_reg, volt_old, 0); ++ if (ret1) ++ dev_err(cpu_dev, "failed to restore dvdd_cpu voltage: %d\n", ret1); ++ mutex_unlock(&cpufreq_lock); ++ return ret; ++ } ++ ++ /* scaling down? scale voltage after frequency */ ++ if (new_freq < old_freq && !th1520_dvfs_sv) { ++ ret = regulator_set_voltage_tol(dvddm_cpu_reg, th1520_dvddm_volt[index], 0); ++ if (ret) ++ dev_err(cpu_dev, "failed to scale dvddm down: %d\n", ret); ++ ret = regulator_set_voltage_tol(dvdd_cpu_reg, volt, 0); ++ if (ret) ++ dev_err(cpu_dev, "failed to scale dvdd_cpu down: %d\n", ret); ++ } ++ ++ val = readl(ap_sys_reg); ++ if (re_modify_bus_freq) { ++ val &= ~TH1520_C910_BUS_CLK_RATIO_MASK; ++ val |= TH1520_C910_BUS_CLK_DIV_RATIO_2; ++ ++ writel(val, ap_sys_reg); ++ val &= ~TH1520_C910_BUS_CLK_SYNC; ++ writel(val, ap_sys_reg); ++ udelay(1); ++ val |= TH1520_C910_BUS_CLK_SYNC; ++ writel(val, ap_sys_reg); ++ udelay(1); ++ } ++ ++ mutex_unlock(&cpufreq_lock); ++ ++ return 0; ++} ++ ++static int th1520_cpufreq_suspend(struct cpufreq_policy *policy) ++{ ++ int ret; ++ ++ pr_debug("%s: cpu: %d, %u KHz to %u KHz\n", ++ __func__, policy->cpu, policy->cur, policy->suspend_freq); ++ ++ ret = cpufreq_generic_suspend(policy); ++ if (ret) { ++ pr_err("%s: failed\n", __func__); ++ return ret; ++ } ++ ++ /* ++ * skip to siwtch pll during reboot process ++ */ ++ mutex_lock(&cpufreq_lock); ++ if (cpufreq_denied) { ++ pr_debug("Denied to switch CPU PLL temporarily on reboot\n"); ++ mutex_unlock(&cpufreq_lock); ++ return 0; ++ } ++ /* ++ * Only CPU PLL0 would be active after STR resume. We should switch ++ * CPU PLL to be PLL0 after policy stopped. ++ */ ++ if (_th1520_get_pllid() == TH1520_CPU_PLL_IDX(1)) ++ _th1520_switch_pllid(TH1520_CPU_PLL_IDX(0), policy->suspend_freq); ++ ++ /* ++ * switch pll1 to min_freq, as pll1 also needs to be a known value ++ * or unexpected errors would come out during recovery. ++ */ ++ clk_prepare_enable(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ clk_set_rate(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk, min_freq * 1000); ++ clk_disable_unprepare(clks[TH1520_CPU_PLL1_FOUTPOSTDIV].clk); ++ mutex_unlock(&cpufreq_lock); ++ return 0; ++} ++ ++static int th1520_cpufreq_resume(struct cpufreq_policy *policy) ++{ ++ int ret; ++ ++ ret = __cpufreq_driver_target(policy, min_freq, CPUFREQ_RELATION_H); ++ if (ret) ++ pr_err("%s: unable to set restore-freq: %u. err: %d\n", ++ __func__, min_freq, ret); ++ /* ++ * CPU PLL0 with 300M would be active after STR resume. As we switch CPU PLL ++ * to PLL0 with highest frequency when suspend, switch PLL0 with right one ++ * after resume. ++ */ ++ mutex_lock(&cpufreq_lock); ++ if (_th1520_get_pllid() == TH1520_CPU_PLL_IDX(1)) ++ _th1520_switch_pllid(TH1520_CPU_PLL_IDX(0), min_freq); ++ mutex_unlock(&cpufreq_lock); ++ ++ return ret; ++} ++ ++static int th1520_cpufreq_init(struct cpufreq_policy *policy) ++{ ++ policy->clk = clks[TH1520_C910_CCLK].clk; ++ policy->cur = clk_get_rate(policy->clk) / 1000; ++ cpufreq_generic_init(policy, freq_table, transition_latency); ++ policy->suspend_freq = max_freq; ++ ++ return 0; ++} ++ ++static int th1520_cpufreq_reboot_notifier(struct notifier_block *this, ++ unsigned long event, void *ptr) ++{ ++ mutex_lock(&cpufreq_lock); ++ cpufreq_denied = true; ++ mutex_unlock(&cpufreq_lock); ++ ++ return NOTIFY_DONE; ++} ++ ++static struct notifier_block cpufreq_reboot_notifier = { ++ .notifier_call = th1520_cpufreq_reboot_notifier, ++}; ++ ++static struct cpufreq_driver th1520_cpufreq_driver = { ++ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | ++ CPUFREQ_IS_COOLING_DEV, ++ .verify = cpufreq_generic_frequency_table_verify, ++ .target_index = th1520_set_target, ++ .get = cpufreq_generic_get, ++ .init = th1520_cpufreq_init, ++ .register_em = cpufreq_register_em_with_opp, ++ .name = "th1520-cpufreq", ++ .attr = cpufreq_generic_attr, ++ .suspend = th1520_cpufreq_suspend, ++ .resume = th1520_cpufreq_resume, ++}; ++ ++static int th1520_cpufreq_pm_notify(struct notifier_block *nb, ++ unsigned long event, void *dummy) ++{ ++ switch (event) { ++ case PM_SUSPEND_PREPARE: ++ /* TBD */ ++ break; ++ case PM_POST_SUSPEND: ++ /* TBD */ ++ break; ++ default: ++ break; ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block th1520_cpufreq_pm_notifier = { ++ .notifier_call = th1520_cpufreq_pm_notify, ++}; ++ ++/* ++ * Set CPU PLL1's frequency as minimum on panic ++ */ ++static int panic_cpufreq_notifier_call(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ int cpu = smp_processor_id(); ++ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); ++ ++ pr_info("[%s]Match the clock sw status to the hw after rst\n", __func__); ++ /* ++ * set CPU PLL1's frequency as minimum to compatible voltage ++ * becarefull if the PLL1 is serving the cpu core, switch to PLL0 first ++ */ ++ if (_th1520_get_pllid() == TH1520_CPU_PLL_IDX(1)) ++ _th1520_switch_pllid(TH1520_CPU_PLL_IDX(0), policy->min); ++ ++ /* ++ * since the clk driver will use PLL1 as the default clock source, ++ * in order to compatible voltage which is unpredictable we should ++ * set the CPU PLL1's frequency as minimum in advance, otherwise the ++ * system may crash in crash kernel stage. ++ */ ++ _th1520_switch_pllid(TH1520_CPU_PLL_IDX(1), policy->min); ++ ++ return 0; ++} ++ ++static struct notifier_block panic_cpufreq_notifier = { ++ .notifier_call = panic_cpufreq_notifier_call, ++}; ++ ++#ifdef CONFIG_TH1520_SYSTEM_MONITOR ++static struct monitor_dev_profile cpu_status_monitor = { ++ .type = MONITOR_TPYE_CPU, ++}; ++#endif ++ ++static int th1520_cpufreq_probe(struct platform_device *pdev) ++{ ++ struct device_node *np; ++ int num, ret; ++ const struct property *prop; ++ const __be32 *val; ++ u32 nr, i, j; ++ ++ np = of_find_compatible_node(NULL, NULL, "xuantie,th1520-sys-reg"); ++ if (!np) ++ return -ENOENT; ++ ap_sys_reg = of_iomap(np, 0); ++ WARN_ON(!ap_sys_reg); ++ ++ cpu_dev = get_cpu_device(0); ++ if (!cpu_dev) { ++ pr_err("failed to get cpu0 device\n"); ++ return -ENODEV; ++ } ++ ++ np = of_node_get(cpu_dev->of_node); ++ if (!np) { ++ dev_err(cpu_dev, "failed to find cpu0 node\n"); ++ return -ENOENT; ++ } ++ ++ num_clks = TH1520_MPW_CPUFREQ_CLK_NUM; ++ ret = clk_bulk_get(cpu_dev, num_clks, clks); ++ if (ret) ++ goto put_node; ++ ++ dvdd_cpu_reg = regulator_get(cpu_dev, "dvdd"); ++ dvddm_cpu_reg = regulator_get(cpu_dev, "dvddm"); ++ if (PTR_ERR(dvdd_cpu_reg) == -EPROBE_DEFER || ++ PTR_ERR(dvddm_cpu_reg) == -EPROBE_DEFER) { ++ ret = -EPROBE_DEFER; ++ dev_dbg(cpu_dev, "regulators not ready, defer\n"); ++ goto put_reg; ++ } ++ ++ if (IS_ERR(dvdd_cpu_reg) || IS_ERR(dvddm_cpu_reg)) { ++ dev_err(cpu_dev, "failed to get regulators\n"); ++ ret = -ENOENT; ++ goto put_reg; ++ } ++ ++ ret = dev_pm_opp_of_add_table(cpu_dev); ++ if (ret < 0) { ++ dev_err(cpu_dev, "failed to init OPP table: %d\n", ret); ++ goto put_reg; ++ } ++ ++ num = dev_pm_opp_get_opp_count(cpu_dev); ++ if (num < 0) { ++ ret = num; ++ dev_err(cpu_dev, "no OPP table is found: %d\n", ret); ++ goto out_free_opp; ++ } ++ ++ ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); ++ if (ret) { ++ dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); ++ goto out_free_opp; ++ } ++ ++ /* Make th1520_dvddm_volt array's size same as dvdd opp number */ ++ th1520_dvddm_volt = devm_kcalloc(cpu_dev, num, sizeof(*th1520_dvddm_volt), ++ GFP_KERNEL); ++ if (th1520_dvddm_volt == NULL) { ++ ret = -ENOMEM; ++ goto free_freq_table; ++ } ++ ++ if (of_get_property(np, "dvfs_sv", NULL)) ++ th1520_dvfs_sv = true; ++ else ++ th1520_dvfs_sv = false; ++ ++ prop = of_find_property(np, "th1520,dvddm-operating-points", NULL); ++ if (!prop || !prop->value) ++ goto soc_opp_out; ++ ++ nr = prop->length / sizeof(u32); ++ if (nr % 2 || (nr / 2) < num) ++ goto soc_opp_out; ++ ++ for (j = 0; j < num; j++) { ++ val = prop->value; ++ for (i = 0; i < nr / 2; i++) { ++ unsigned long freq = be32_to_cpup(val++); ++ unsigned long volt = be32_to_cpup(val++); ++ ++ if (freq_table[j].frequency == freq) { ++ th1520_dvddm_volt[soc_opp_count++] = volt; ++ break; ++ } ++ } ++ } ++ ++soc_opp_out: ++ if (soc_opp_count != num) ++ dev_warn(cpu_dev, "Not find valid th1520,dvddm-operating-points property\n"); ++ ++ if (of_property_read_u32(np, "clock-latency", &transition_latency)) ++ transition_latency = CPUFREQ_ETERNAL; ++ ++ max_freq = freq_table[--num].frequency; ++ min_freq = freq_table[0].frequency; ++ ++ ret = cpufreq_register_driver(&th1520_cpufreq_driver); ++ if (ret) { ++ dev_err(cpu_dev, "failed register driver: %d\n", ret); ++ goto free_freq_table; ++ } ++ ++ register_pm_notifier(&th1520_cpufreq_pm_notifier); ++ ++ of_node_put(np); ++ ++ ret = atomic_notifier_chain_register(&panic_notifier_list, ++ &panic_cpufreq_notifier); ++ if (ret) { ++ pr_err("unable to register notifier(%d)\n", ret); ++ goto free_freq_table; ++ } ++ ++ register_reboot_notifier(&cpufreq_reboot_notifier); ++ ++#ifdef CONFIG_TH1520_SYSTEM_MONITOR ++ mdev_info = th1520_system_monitor_register(cpu_dev, &cpu_status_monitor); ++ if (IS_ERR(mdev_info)) { ++ mdev_info = NULL; ++ dev_err(cpu_dev, "failed to register system monitor\n"); ++ } ++#endif ++ dev_info(cpu_dev, "finish to register cpufreq driver\n"); ++ ++ return 0; ++ ++free_freq_table: ++ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); ++out_free_opp: ++ dev_pm_opp_of_remove_table(cpu_dev); ++put_reg: ++ if (!IS_ERR(dvdd_cpu_reg)) ++ regulator_put(dvdd_cpu_reg); ++ if (!IS_ERR(dvddm_cpu_reg)) ++ regulator_put(dvddm_cpu_reg); ++ ++ clk_bulk_put(num_clks, clks); ++put_node: ++ of_node_put(np); ++ ++ return ret; ++} ++ ++static int th1520_cpufreq_remove(struct platform_device *pdev) ++{ ++ cpufreq_unregister_driver(&th1520_cpufreq_driver); ++ dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); ++ dev_pm_opp_of_remove_table(cpu_dev); ++ regulator_put(dvdd_cpu_reg); ++ regulator_put(dvddm_cpu_reg); ++ ++#ifdef CONFIG_TH1520_SYSTEM_MONITOR ++ if (mdev_info) ++ th1520_system_monitor_unregister(mdev_info); ++#endif ++ clk_bulk_put(num_clks, clks); ++ ++ return 0; ++} ++ ++static const struct of_device_id th1520_cpufreq_match[] = { ++ { .compatible = "xuantie,th1520-cpufreq" }, ++ {}, ++}; ++ ++static struct platform_driver th1520_cpufreq_platdrv = { ++ .driver = { ++ .name = "xuantie,th1520-cpufreq", ++ .of_match_table = th1520_cpufreq_match, ++ }, ++ .probe = th1520_cpufreq_probe, ++ .remove = th1520_cpufreq_remove, ++}; ++module_platform_driver(th1520_cpufreq_platdrv); ++ ++MODULE_ALIAS("platform:th1520-cpufreq"); ++MODULE_AUTHOR("fugang.duan "); ++MODULE_DESCRIPTION("XuanTie TH1520 cpufreq driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c +index 71d433bb0ce6..50d128a4b343 100644 +--- a/drivers/cpuidle/cpuidle-riscv-sbi.c ++++ b/drivers/cpuidle/cpuidle-riscv-sbi.c +@@ -74,26 +74,6 @@ static inline bool sbi_is_domain_state_available(void) + return data->available; + } + +-static int sbi_suspend_finisher(unsigned long suspend_type, +- unsigned long resume_addr, +- unsigned long opaque) +-{ +- struct sbiret ret; +- +- ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND, +- suspend_type, resume_addr, opaque, 0, 0, 0); +- +- return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0; +-} +- +-static int sbi_suspend(u32 state) +-{ +- if (state & SBI_HSM_SUSP_NON_RET_BIT) +- return cpu_suspend(state, sbi_suspend_finisher); +- else +- return sbi_suspend_finisher(state, 0, 0); +-} +- + static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) + { +@@ -101,9 +81,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev, + u32 state = states[idx]; + + if (state & SBI_HSM_SUSP_NON_RET_BIT) +- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state); ++ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state); + else +- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend, ++ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend, + idx, state); + } + +@@ -134,7 +114,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev, + else + state = states[idx]; + +- ret = sbi_suspend(state) ? -1 : idx; ++ ret = riscv_sbi_hart_suspend(state) ? -1 : idx; + + ct_cpuidle_exit(); + +@@ -207,17 +187,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = { + { }, + }; + +-static bool sbi_suspend_state_is_valid(u32 state) +-{ +- if (state > SBI_HSM_SUSPEND_RET_DEFAULT && +- state < SBI_HSM_SUSPEND_RET_PLATFORM) +- return false; +- if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT && +- state < SBI_HSM_SUSPEND_NON_RET_PLATFORM) +- return false; +- return true; +-} +- + static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) + { + int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state); +@@ -227,7 +196,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state) + return err; + } + +- if (!sbi_suspend_state_is_valid(*state)) { ++ if (!riscv_sbi_suspend_state_is_valid(*state)) { + pr_warn("Invalid SBI suspend state %#x\n", *state); + return -EINVAL; + } +@@ -600,16 +569,8 @@ static int __init sbi_cpuidle_init(void) + int ret; + struct platform_device *pdev; + +- /* +- * The SBI HSM suspend function is only available when: +- * 1) SBI version is 0.3 or higher +- * 2) SBI HSM extension is available +- */ +- if ((sbi_spec_version < sbi_mk_version(0, 3)) || +- !sbi_probe_extension(SBI_EXT_HSM)) { +- pr_info("HSM suspend not available\n"); ++ if (!riscv_sbi_hsm_is_supported()) + return 0; +- } + + ret = platform_driver_register(&sbi_cpuidle_driver); + if (ret) +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig +index e36506471a4f..83510ecf37c3 100644 +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -489,6 +489,13 @@ config OWL_DMA + help + Enable support for the Actions Semi Owl SoCs DMA controller. + ++config SPACEMIT_K1_DMA ++ bool "Spacemit k1 SoCs DMA support" ++ depends on SOC_SPACEMIT_K1X ++ depends on DMA_ENGINE ++ help ++ Enable support for the Spacemit k1 SoCs DMA controller. ++ + config PCH_DMA + tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" + depends on PCI && (X86_32 || COMPILE_TEST) +diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile +index 83553a97a010..2c9e8b993375 100644 +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -66,6 +66,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ + obj-$(CONFIG_PXA_DMA) += pxa_dma.o + obj-$(CONFIG_RENESAS_DMA) += sh/ + obj-$(CONFIG_SF_PDMA) += sf-pdma/ ++obj-$(CONFIG_SPACEMIT_K1_DMA) += spacemit-k1-dma.o + obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o + obj-$(CONFIG_STM32_DMA) += stm32-dma.o + obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o +diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +index 72fb40de58b3..2235a15930bc 100644 +--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c ++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +@@ -114,37 +114,49 @@ static inline void axi_chan_config_write(struct axi_dma_chan *chan, + static inline void axi_dma_disable(struct axi_dma_chip *chip) + { + u32 val; ++ unsigned long flags; + ++ spin_lock_irqsave(&chip->lock, flags); + val = axi_dma_ioread32(chip, DMAC_CFG); + val &= ~DMAC_EN_MASK; + axi_dma_iowrite32(chip, DMAC_CFG, val); ++ spin_unlock_irqrestore(&chip->lock, flags); + } + + static inline void axi_dma_enable(struct axi_dma_chip *chip) + { + u32 val; ++ unsigned long flags; + ++ spin_lock_irqsave(&chip->lock, flags); + val = axi_dma_ioread32(chip, DMAC_CFG); + val |= DMAC_EN_MASK; + axi_dma_iowrite32(chip, DMAC_CFG, val); ++ spin_unlock_irqrestore(&chip->lock, flags); + } + + static inline void axi_dma_irq_disable(struct axi_dma_chip *chip) + { + u32 val; ++ unsigned long flags; + ++ spin_lock_irqsave(&chip->lock, flags); + val = axi_dma_ioread32(chip, DMAC_CFG); + val &= ~INT_EN_MASK; + axi_dma_iowrite32(chip, DMAC_CFG, val); ++ spin_unlock_irqrestore(&chip->lock, flags); + } + + static inline void axi_dma_irq_enable(struct axi_dma_chip *chip) + { + u32 val; ++ unsigned long flags; + ++ spin_lock_irqsave(&chip->lock, flags); + val = axi_dma_ioread32(chip, DMAC_CFG); + val |= INT_EN_MASK; + axi_dma_iowrite32(chip, DMAC_CFG, val); ++ spin_unlock_irqrestore(&chip->lock, flags); + } + + static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask) +@@ -489,7 +501,7 @@ static int dma_chan_alloc_chan_resources(struct dma_chan *dchan) + } + dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan)); + +- pm_runtime_get(chan->chip->dev); ++ pm_runtime_get_sync(chan->chip->dev); + + return 0; + } +@@ -514,7 +526,7 @@ static void dma_chan_free_chan_resources(struct dma_chan *dchan) + "%s: free resources, descriptor still allocated: %u\n", + axi_chan_name(chan), atomic_read(&chan->descs_allocated)); + +- pm_runtime_put(chan->chip->dev); ++ pm_runtime_put_sync(chan->chip->dev); + } + + static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set) +@@ -523,7 +535,7 @@ static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set) + unsigned long reg_value, val; + + if (!chip->apb_regs) { +- dev_err(chip->dev, "apb_regs not initialized\n"); ++ dev_dbg(chip->dev, "apb_regs not initialized\n"); + return; + } + +@@ -894,6 +906,7 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr, + goto err_desc_get; + + desc->chan = chan; ++ chan->direction = DMA_MEM_TO_MEM; + num = 0; + desc->length = 0; + while (len) { +@@ -1141,6 +1154,9 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) + int ret; + LIST_HEAD(head); + ++ axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_ALL); ++ axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL); ++ + axi_chan_disable(chan); + + ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, +@@ -1174,6 +1190,8 @@ static int dma_chan_pause(struct dma_chan *dchan) + unsigned long flags; + unsigned int timeout = 20; /* timeout iterations */ + u32 val; ++ int ret; ++ u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT; + + spin_lock_irqsave(&chan->vc.lock, flags); + +@@ -1202,13 +1220,44 @@ static int dma_chan_pause(struct dma_chan *dchan) + + spin_unlock_irqrestore(&chan->vc.lock, flags); + ++ chan->ch_sar = axi_chan_ioread32(chan, CH_SAR); ++ chan->ch_dar = axi_chan_ioread32(chan, CH_DAR); ++ chan->ch_dar_h = axi_chan_ioread32(chan, CH_DAR_H); ++ chan->ch_block_ts = axi_chan_ioread32(chan, CH_BLOCK_TS); ++ chan->ch_ctl_l = axi_chan_ioread32(chan, CH_CTL_L); ++ chan->ch_ctl_h = axi_chan_ioread32(chan, CH_CTL_H); ++ chan->ch_cfg_l = axi_chan_ioread32(chan, CH_CFG_L); ++ chan->ch_cfg_h = axi_chan_ioread32(chan, CH_CFG_H); ++ chan->ch_llp = axi_chan_ioread32(chan, CH_LLP); ++ ++ axi_chan_disable(chan); ++ ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val, ++ !(val & chan_active), 1000, 100000); ++ if (ret == -ETIMEDOUT) ++ printk("%s %s failed to stop\n", __func__, axi_chan_name(chan)); ++ + return timeout ? 0 : -EAGAIN; + } + + /* Called in chan locked context */ + static inline void axi_chan_resume(struct axi_dma_chan *chan) + { +- u32 val; ++ u32 val, irq_mask; ++ ++ axi_chan_iowrite32(chan, CH_SAR, chan->ch_sar); ++ axi_chan_iowrite32(chan, CH_DAR, chan->ch_dar); ++ axi_chan_iowrite32(chan, CH_DAR_H, chan->ch_dar_h); ++ axi_chan_iowrite32(chan, CH_BLOCK_TS, chan->ch_block_ts); ++ axi_chan_iowrite32(chan, CH_CTL_L, chan->ch_ctl_l); ++ axi_chan_iowrite32(chan, CH_CTL_H, chan->ch_ctl_h); ++ axi_chan_iowrite32(chan, CH_CFG_L, chan->ch_cfg_l); ++ axi_chan_iowrite32(chan, CH_CFG_H, chan->ch_cfg_h); ++ axi_chan_iowrite32(chan, CH_LLP, chan->ch_llp); ++ irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR; ++ axi_chan_irq_sig_set(chan, irq_mask); ++ /* Generate 'suspend' status but don't generate interrupt */ ++ irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; ++ axi_chan_irq_set(chan, irq_mask); + + if (chan->chip->dw->hdata->reg_map_8_channels) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); +@@ -1222,7 +1271,11 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan) + axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val); + } + ++ axi_chan_enable(chan); ++ + chan->is_paused = false; ++ ++ return; + } + + static int dma_chan_resume(struct dma_chan *dchan) +@@ -1283,6 +1336,40 @@ static int __maybe_unused axi_dma_runtime_resume(struct device *dev) + return axi_dma_resume(chip); + } + ++static int __maybe_unused axi_dma_sleep_suspend(struct device *dev) ++{ ++ //struct axi_dma_chip *chip = dev_get_drvdata(dev); ++ //axi_dma_irq_disable(chip); ++ //axi_dma_disable(chip); ++ ++ //clk_disable_unprepare(chip->core_clk); ++ //clk_disable_unprepare(chip->cfgr_clk); ++ ++ dev_dbg(dev, "%s, %d\n", __func__, __LINE__); ++ ++ return 0; ++} ++ ++static int __maybe_unused axi_dma_sleep_resume(struct device *dev) ++{ ++ struct axi_dma_chip *chip = dev_get_drvdata(dev); ++ int ret = 0; ++ ++ ret = clk_prepare_enable(chip->cfgr_clk); ++ if (ret < 0) ++ return ret; ++ ++ ret = clk_prepare_enable(chip->core_clk); ++ if (ret < 0) ++ return ret; ++ ++ axi_dma_enable(chip); ++ axi_dma_irq_enable(chip); ++ dev_dbg(dev, "%s, %d\n", __func__, __LINE__); ++ ++ return 0; ++} ++ + static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) + { +@@ -1395,6 +1482,7 @@ static int dw_probe(struct platform_device *pdev) + chip->dev = &pdev->dev; + chip->dw->hdata = hdata; + ++ spin_lock_init(&chip->lock); + chip->irq = platform_get_irq(pdev, 0); + if (chip->irq < 0) + return chip->irq; +@@ -1566,9 +1654,16 @@ static int dw_remove(struct platform_device *pdev) + return 0; + } + ++#ifdef CONFIG_PM ++static const struct dev_pm_ops dw_axi_dma_pm_ops = { ++ SET_LATE_SYSTEM_SLEEP_PM_OPS(axi_dma_sleep_suspend, axi_dma_sleep_resume) ++ SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL) ++}; ++#else + static const struct dev_pm_ops dw_axi_dma_pm_ops = { + SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL) + }; ++#endif + + static const struct of_device_id dw_dma_of_id_table[] = { + { +@@ -1579,6 +1674,9 @@ static const struct of_device_id dw_dma_of_id_table[] = { + }, { + .compatible = "starfive,jh7110-axi-dma", + .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), ++ }, { ++ .compatible = "xuantie,th1520-axi-dma", ++ .data = (void *)(AXI_DMA_FLAG_USE_CFG2), + }, + {} + }; +diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +index 8521530a34ec..1e4080928e02 100644 +--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h ++++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +@@ -52,6 +52,15 @@ struct axi_dma_chan { + bool cyclic; + /* these other elements are all protected by vc.lock */ + bool is_paused; ++ u32 ch_sar; ++ u32 ch_dar; ++ u32 ch_dar_h; ++ u32 ch_block_ts; ++ u32 ch_ctl_l; ++ u32 ch_ctl_h; ++ u32 ch_cfg_l; ++ u32 ch_cfg_h; ++ u32 ch_llp; + }; + + struct dw_axi_dma { +@@ -71,6 +80,7 @@ struct axi_dma_chip { + struct clk *core_clk; + struct clk *cfgr_clk; + struct dw_axi_dma *dw; ++ spinlock_t lock; + }; + + /* LLI == Linked List Item */ +@@ -166,6 +176,7 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) + /* DMA channel registers offset */ + #define CH_SAR 0x000 /* R/W Chan Source Address */ + #define CH_DAR 0x008 /* R/W Chan Destination Address */ ++#define CH_DAR_H 0x00C + #define CH_BLOCK_TS 0x010 /* R/W Chan Block Transfer Size */ + #define CH_CTL 0x018 /* R/W Chan Control */ + #define CH_CTL_L 0x018 /* R/W Chan Control 00-31 */ +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c +index 0e1e9ca1c005..cd2b9a6ab621 100644 +--- a/drivers/dma/mv_xor_v2.c ++++ b/drivers/dma/mv_xor_v2.c +@@ -747,8 +747,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev) + if (IS_ERR(xor_dev->clk)) + return PTR_ERR(xor_dev->clk); + +- ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1, +- mv_xor_v2_set_msi_msg); ++ ret = platform_device_msi_init_and_alloc_irqs(&pdev->dev, 1, ++ mv_xor_v2_set_msi_msg); + if (ret) + return ret; + +@@ -851,7 +851,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev) + xor_dev->desc_size * MV_XOR_V2_DESC_NUM, + xor_dev->hw_desq_virt, xor_dev->hw_desq); + free_msi_irqs: +- platform_msi_domain_free_irqs(&pdev->dev); ++ platform_device_msi_free_irqs_all(&pdev->dev); + return ret; + } + +@@ -867,7 +867,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev) + + devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev); + +- platform_msi_domain_free_irqs(&pdev->dev); ++ platform_device_msi_free_irqs_all(&pdev->dev); + + tasklet_kill(&xor_dev->irq_tasklet); + +diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c +index 834ae519c15d..f2b299c23b1e 100644 +--- a/drivers/dma/qcom/hidma.c ++++ b/drivers/dma/qcom/hidma.c +@@ -696,7 +696,7 @@ static void hidma_free_msis(struct hidma_dev *dmadev) + devm_free_irq(dev, virq, &dmadev->lldev); + } + +- platform_msi_domain_free_irqs(dev); ++ platform_device_msi_free_irqs_all(dev); + #endif + } + +@@ -706,8 +706,8 @@ static int hidma_request_msi(struct hidma_dev *dmadev, + #ifdef CONFIG_GENERIC_MSI_IRQ + int rc, i, virq; + +- rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, +- hidma_write_msi_msg); ++ rc = platform_device_msi_init_and_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, ++ hidma_write_msi_msg); + if (rc) + return rc; + +diff --git a/drivers/dma/spacemit-k1-dma.c b/drivers/dma/spacemit-k1-dma.c +new file mode 100644 +index 000000000000..d730ad085e0b +--- /dev/null ++++ b/drivers/dma/spacemit-k1-dma.c +@@ -0,0 +1,1515 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * mmp dma controller driver ++ * Copyright 2012 Marvell International Ltd. ++ * Copyright (c) 2023, spacemit Corporation. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "dmaengine.h" ++ ++#define DDADRH(n) (0x0300 + ((n) << 4)) ++#define DSADRH(n) (0x0304 + ((n) << 4)) ++#define DTADRH(n) (0x0308 + ((n) << 4)) ++#define DCSR_LPAEEN BIT(21) ++#define DRCMR_INVALID 100 ++#define DCMD_BURST64 (4 << 16) ++ ++#define DCSR 0x0000 ++#define DALGN 0x00a0 ++#define DINT 0x00f0 ++#define DDADR 0x0200 ++#define DSADR(n) (0x0204 + ((n) << 4)) ++#define DTADR(n) (0x0208 + ((n) << 4)) ++#define DCMD 0x020c ++ ++#define DCSR_RUN BIT(31) /* Run Bit (read / write) */ ++#define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ ++#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ ++#define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ ++#define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ ++#define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ ++#define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ ++#define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ ++ ++#define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ ++#define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ ++#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ ++#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ ++#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ ++#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ ++#define DCSR_EORINTR BIT(9) /* The end of Receive */ ++ ++#define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) ++#define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ ++#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ ++ ++#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ ++#define DDADR_STOP BIT(0) /* Stop (read / write) */ ++ ++#define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ ++#define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ ++#define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ ++#define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ ++#define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ ++#define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ ++#define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ ++#define DCMD_BURST8 (1 << 16) /* 8 byte burst */ ++#define DCMD_BURST16 (2 << 16) /* 16 byte burst */ ++#define DCMD_BURST32 (3 << 16) /* 32 byte burst */ ++#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ ++#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ ++#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ ++#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ ++ ++#define PDMA_MAX_DESC_BYTES DCMD_LENGTH ++#define PDMA_RESRV_CHAN_ARGS_NUM (2) /* reserved channel arguments count in dts */ ++ ++struct mmp_pdma_desc_hw { ++ u32 ddadr; /* Points to the next descriptor + flags */ ++ u32 dsadr; /* DSADR value for the current transfer */ ++ u32 dtadr; /* DTADR value for the current transfer */ ++ u32 dcmd; /* DCMD value for the current transfer */ ++ u32 ddadrh; /* Points to the next descriptor + flags */ ++ u32 dsadrh; /* DSADR value for the current transfer */ ++ u32 dtadrh; /* DTADR value for the current transfer */ ++ u32 rsvd; /* DCMD value for the current transfer */ ++} __aligned(64); ++ ++struct mmp_pdma_desc_sw { ++ struct mmp_pdma_desc_hw desc; ++ struct list_head node; ++ struct list_head tx_list; ++ struct dma_async_tx_descriptor async_tx; ++}; ++ ++struct mmp_pdma_phy; ++ ++struct mmp_pdma_chan { ++ struct device *dev; ++ struct dma_chan chan; ++ struct dma_async_tx_descriptor desc; ++ struct mmp_pdma_phy *phy; ++ enum dma_transfer_direction dir; ++ struct dma_slave_config slave_config; ++ struct mmp_pdma_desc_sw *cyclic_first; ++ ++ /* channel's basic info */ ++ struct tasklet_struct tasklet; ++ u32 dcmd; ++ u32 drcmr; ++ u32 dev_addr; ++ ++ /* list for desc */ ++ spinlock_t desc_lock; /* Descriptor list lock */ ++ struct list_head chain_pending; /* Link descriptors queue for pending */ ++ struct list_head chain_running; /* Link descriptors queue for running */ ++ bool idle; /* channel statue machine */ ++ bool byte_align; ++ ++ int user_do_qos; ++ int qos_count; /* Per-channel qos count */ ++ enum dma_status status; /* channel state machine */ ++ u32 bytes_residue; ++ ++ struct dma_pool *desc_pool; /* Descriptors pool */ ++}; ++ ++struct mmp_pdma_phy { ++ int idx; ++ void __iomem *base; ++ struct mmp_pdma_chan *vchan; ++}; ++ ++struct reserved_chan { ++ int chan_id; ++ int drcmr; ++}; ++ ++struct mmp_pdma_device { ++ int dma_channels; ++ int nr_reserved_channels; ++ struct reserved_chan *reserved_channels; ++ s32 lpm_qos; ++ struct clk *clk; ++ struct reset_control *resets; ++ int max_burst_size; ++ void __iomem *base; ++ struct device *dev; ++ struct dma_device device; ++ struct mmp_pdma_phy *phy; ++ spinlock_t phy_lock; /* protect alloc/free phy channels */ ++}; ++ ++#define tx_to_mmp_pdma_desc(tx) \ ++ container_of(tx, struct mmp_pdma_desc_sw, async_tx) ++#define to_mmp_pdma_desc(lh) \ ++ container_of(lh, struct mmp_pdma_desc_sw, node) ++#define to_mmp_pdma_chan(dchan) \ ++ container_of(dchan, struct mmp_pdma_chan, chan) ++#define to_mmp_pdma_dev(dmadev) \ ++ container_of(dmadev, struct mmp_pdma_device, device) ++ ++static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) ++{ ++ u32 ddadrh; ++ u32 reg = (phy->idx << 4) + DDADR; ++ ++ writel(addr & 0xffffffff, phy->base + reg); ++ /* config higher bits for desc address */ ++ ddadrh = (addr >> 32); ++ writel(ddadrh, phy->base + DDADRH(phy->idx)); ++} ++ ++static void enable_chan(struct mmp_pdma_phy *phy) ++{ ++ u32 reg, dalgn; ++ u32 dcsr; ++ unsigned long flags; ++ struct mmp_pdma_device *pdev; ++ ++ if (phy == NULL) ++ return; ++ ++ if (!phy->vchan) ++ return; ++ ++ pdev = to_mmp_pdma_dev(phy->vchan->chan.device); ++ ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ ++ reg = DRCMR(phy->vchan->drcmr); ++ writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); ++ ++ dalgn = readl(phy->base + DALGN); ++ if (phy->vchan->byte_align) ++ dalgn |= 1 << phy->idx; ++ else ++ dalgn &= ~(1 << phy->idx); ++ writel(dalgn, phy->base + DALGN); ++ ++ reg = (phy->idx << 2) + DCSR; ++ ++ dcsr = readl(phy->base + reg); ++ dcsr |= (DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN); ++ /* use long descriptor mode: set DCSR_LPAEEN bit */ ++ dcsr |= DCSR_LPAEEN; ++ writel(dcsr, phy->base + reg); ++ ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++} ++ ++static void disable_chan(struct mmp_pdma_phy *phy) ++{ ++ u32 reg; ++ u32 dcsr, cnt = 1000; ++ ++ if (!phy) ++ return; ++ ++ reg = (phy->idx << 2) + DCSR; ++ ++ dcsr = readl(phy->base + reg); ++ dcsr &= ~(DCSR_RUN | DCSR_EORIRQEN | DCSR_EORSTOPEN); ++ /* use long descriptor mode: set DCSR_LPAEEN bit */ ++ dcsr &= ~DCSR_LPAEEN; ++ writel(dcsr, phy->base + reg); ++ ++ /* ensure dma is stopped. */ ++ dcsr = readl(phy->base + reg); ++ while (!(dcsr & (0x1 << 3)) && --cnt) { ++ udelay(10); ++ dcsr = readl(phy->base + reg); ++ } ++ ++ WARN_ON(!cnt); ++} ++ ++static int clear_chan_irq(struct mmp_pdma_phy *phy) ++{ ++ u32 dcsr; ++ u32 dint = readl(phy->base + DINT); ++ u32 reg = (phy->idx << 2) + DCSR; ++ ++ if (!(dint & BIT(phy->idx))) ++ return -EAGAIN; ++ ++ /* clear irq */ ++ dcsr = readl(phy->base + reg); ++ writel(dcsr, phy->base + reg); ++ if ((dcsr & DCSR_BUSERR) && (phy->vchan)) ++ dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); ++ ++ return 0; ++} ++ ++static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) ++{ ++ struct mmp_pdma_phy *phy = dev_id; ++ struct mmp_pdma_chan *pchan = phy->vchan; ++ ++ if (clear_chan_irq(phy) != 0) ++ return IRQ_NONE; ++ ++ if (pchan) ++ tasklet_schedule(&pchan->tasklet); ++ ++ return IRQ_HANDLED; ++} ++ ++static bool is_channel_reserved(struct mmp_pdma_device *pdev, int chan_id) ++{ ++ int i; ++ ++ for (i = 0; i < pdev->nr_reserved_channels; i++) { ++ if (chan_id == pdev->reserved_channels[i].chan_id) ++ return true; ++ } ++ ++ return false; ++} ++ ++static struct mmp_pdma_phy *lookup_phy_for_drcmr(struct mmp_pdma_device *pdev, int drcmr) ++{ ++ int i; ++ int chan_id; ++ struct mmp_pdma_phy *phy; ++ ++ for (i = 0; i < pdev->nr_reserved_channels; i++) { ++ if (drcmr == pdev->reserved_channels[i].drcmr) { ++ chan_id = pdev->reserved_channels[i].chan_id; ++ phy = &pdev->phy[chan_id]; ++ return phy; ++ } ++ } ++ ++ return NULL; ++} ++ ++static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) ++{ ++ struct mmp_pdma_device *pdev = dev_id; ++ struct mmp_pdma_phy *phy; ++ u32 dint = readl(pdev->base + DINT); ++ int i, ret; ++ int irq_num = 0; ++ unsigned long flags; ++ ++ while (dint) { ++ i = __ffs(dint); ++ /* only handle interrupts belonging to pdma driver*/ ++ if (i >= pdev->dma_channels) ++ break; ++ ++ dint &= (dint - 1); ++ phy = &pdev->phy[i]; ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ ++ ret = mmp_pdma_chan_handler(irq, phy); ++ ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++ if (ret == IRQ_HANDLED) ++ irq_num++; ++ } ++ ++ if (irq_num) ++ return IRQ_HANDLED; ++ ++ return IRQ_NONE; ++} ++ ++/* ++ * lookup free phy channel as descending priority ++ * dma channel priorities ++ * ch 0 - 3, 16 - 19 <--> (0) ++ * ch 4 - 7, 20 - 23 <--> (1) ++ * ch 8 - 11, 24 - 27 <--> (2) ++ * ch 12 - 15, 28 - 31 <--> (3) ++ */ ++static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) ++{ ++ int prio, i; ++ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); ++ struct mmp_pdma_phy *phy, *found = NULL; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ ++ phy = lookup_phy_for_drcmr(pdev, pchan->drcmr); ++ ++ if (phy != NULL) { ++ if (!phy->vchan) { ++ phy->vchan = pchan; ++ found = phy; ++ } ++ ++ goto out_unlock; ++ } ++ ++ for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { ++ for (i = 0; i < pdev->dma_channels; i++) { ++ if (prio != (i & 0xf) >> 2) ++ continue; ++ ++ if (is_channel_reserved(pdev, i)) ++ continue; ++ phy = &pdev->phy[i]; ++ if (!phy->vchan) { ++ phy->vchan = pchan; ++ found = phy; ++ goto out_unlock; ++ } ++ } ++ } ++ ++out_unlock: ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++ return found; ++} ++ ++static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan) ++{ ++ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); ++ unsigned long flags; ++ u32 reg; ++ ++ if (!pchan->phy) ++ return; ++ ++ /* clear the channel mapping in DRCMR */ ++ reg = DRCMR(pchan->drcmr); ++ writel(0, pchan->phy->base + reg); ++ ++ spin_lock_irqsave(&pdev->phy_lock, flags); ++ pchan->phy->vchan = NULL; ++ pchan->phy = NULL; ++ ++ spin_unlock_irqrestore(&pdev->phy_lock, flags); ++} ++ ++/* ++ * start_pending_queue - transfer any pending transactions ++ * pending list ==> running list ++ */ ++static int start_pending_queue(struct mmp_pdma_chan *chan) ++{ ++ struct mmp_pdma_desc_sw *desc; ++ struct mmp_pdma_desc_sw *_desc; ++ ++ /* still in running, irq will start the pending list */ ++ if (chan->status == DMA_IN_PROGRESS) { ++ dev_dbg(chan->dev, "DMA controller still busy\n"); ++ return -1; ++ } ++ ++ if (list_empty(&chan->chain_pending)) { ++ /* chance to re-fetch phy channel with higher prio */ ++ mmp_pdma_free_phy(chan); ++ dev_dbg(chan->dev, "no pending list\n"); ++ ++ return -1; ++ } ++ ++ if (!chan->phy) { ++ chan->phy = lookup_phy(chan); ++ if (!chan->phy) { ++ dev_dbg(chan->dev, "no free dma channel\n"); ++ ++ return -1; ++ } ++ } ++ ++ /* ++ * pending -> running ++ * reintilize pending list ++ */ ++ list_for_each_entry_safe(desc, _desc, &chan->chain_pending, node) { ++ list_del(&desc->node); ++ list_add_tail(&desc->node, &chan->chain_running); ++ if (desc->desc.ddadr & DDADR_STOP) ++ break; ++ } ++ ++ desc = list_first_entry(&chan->chain_running, ++ struct mmp_pdma_desc_sw, node); ++ ++ /* ++ * Program the descriptor's address into the DMA controller, ++ * then start the DMA transaction ++ */ ++ set_desc(chan->phy, desc->async_tx.phys); ++ enable_chan(chan->phy); ++ chan->idle = false; ++ chan->status = DMA_IN_PROGRESS; ++ chan->bytes_residue = 0; ++ return 0; ++} ++ ++/* desc->tx_list ==> pending list */ ++static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); ++ struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); ++ struct mmp_pdma_desc_sw *child; ++ unsigned long flags; ++ dma_cookie_t cookie = -EBUSY; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ++ list_for_each_entry(child, &desc->tx_list, node) { ++ cookie = dma_cookie_assign(&child->async_tx); ++ } ++ ++ /* softly link to pending list - desc->tx_list ==> pending list */ ++ list_splice_tail_init(&desc->tx_list, &chan->chain_pending); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ return cookie; ++} ++ ++static int mmp_pdma_config_write(struct dma_chan *dchan, ++ struct dma_slave_config *cfg, ++ enum dma_transfer_direction direction) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ u32 maxburst = 0, addr = 0; ++ enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; ++ ++ if (!dchan) ++ return -EINVAL; ++ ++ if (direction == DMA_DEV_TO_MEM) { ++ chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; ++ maxburst = cfg->src_maxburst; ++ width = cfg->src_addr_width; ++ addr = cfg->src_addr; ++ } else if (direction == DMA_MEM_TO_DEV) { ++ chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; ++ maxburst = cfg->dst_maxburst; ++ width = cfg->dst_addr_width; ++ addr = cfg->dst_addr; ++ } ++ ++ if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) ++ chan->dcmd |= DCMD_WIDTH1; ++ else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) ++ chan->dcmd |= DCMD_WIDTH2; ++ else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) ++ chan->dcmd |= DCMD_WIDTH4; ++ ++ if (maxburst == 8) ++ chan->dcmd |= DCMD_BURST8; ++ else if (maxburst == 16) ++ chan->dcmd |= DCMD_BURST16; ++ else if (maxburst == 32) ++ chan->dcmd |= DCMD_BURST32; ++ ++ chan->dir = direction; ++ chan->dev_addr = addr; ++ ++ return 0; ++} ++ ++static struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) ++{ ++ struct mmp_pdma_desc_sw *desc; ++ dma_addr_t pdesc; ++ ++ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc); ++ if (!desc) { ++ dev_err(chan->dev, "out of memory for link descriptor\n"); ++ return NULL; ++ } ++ ++ INIT_LIST_HEAD(&desc->tx_list); ++ dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); ++ /* each desc has submit */ ++ desc->async_tx.tx_submit = mmp_pdma_tx_submit; ++ desc->async_tx.phys = pdesc; ++ ++ return desc; ++} ++ ++/* ++ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. ++ * ++ * This function will create a dma pool for descriptor allocation. ++ * Request irq only when channel is requested ++ * Return - The number of allocated descriptors. ++ */ ++static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ ++ if (chan->desc_pool) ++ return 1; ++ ++ chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), ++ chan->dev, ++ sizeof(struct mmp_pdma_desc_sw), ++ __alignof__(struct mmp_pdma_desc_sw), ++ 0); ++ if (!chan->desc_pool) { ++ dev_err(chan->dev, "unable to allocate descriptor pool\n"); ++ return -ENOMEM; ++ } ++ ++ chan->status = DMA_COMPLETE; ++ chan->dir = 0; ++ chan->dcmd = 0; ++ ++ mmp_pdma_free_phy(chan); ++ ++ chan->idle = true; ++ chan->dev_addr = 0; ++ return 1; ++} ++ ++static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, ++ struct list_head *list) ++{ ++ struct mmp_pdma_desc_sw *desc, *_desc; ++ ++ list_for_each_entry_safe(desc, _desc, list, node) { ++ list_del(&desc->node); ++ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); ++ } ++} ++ ++static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ unsigned long flags; ++ ++ /* wait until task ends if necessary */ ++ tasklet_kill(&chan->tasklet); ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ mmp_pdma_free_desc_list(chan, &chan->chain_pending); ++ mmp_pdma_free_desc_list(chan, &chan->chain_running); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ dma_pool_destroy(chan->desc_pool); ++ chan->desc_pool = NULL; ++ chan->idle = true; ++ chan->dev_addr = 0; ++ ++ chan->status = DMA_COMPLETE; ++ chan->dir = 0; ++ chan->dcmd = 0; ++ ++ mmp_pdma_free_phy(chan); ++} ++ ++/* ++ * Per-channel qos get/put function. This function ensures that pm_ ++ * runtime_get/put are not called multi times for one channel. ++ * This guarantees pm_runtime_get/put always match for the entire device. ++ */ ++static void mmp_pdma_qos_get(struct mmp_pdma_chan *chan) ++{ ++ unsigned long flags; ++ ++ if (chan->user_do_qos) ++ return; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ if (chan->qos_count == 0) { ++ chan->qos_count = 1; ++ /* ++ * Safe in spin_lock because it's marked as irq safe. ++ * Similar case for mmp_pdma_qos_put(). ++ */ ++ pm_runtime_get_sync(chan->dev); ++ } ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++} ++ ++static void mmp_pdma_qos_put(struct mmp_pdma_chan *chan) ++{ ++ unsigned long flags; ++ ++ if (chan->user_do_qos) ++ return; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ if (chan->qos_count == 1) { ++ chan->qos_count = 0; ++ pm_runtime_put_autosuspend(chan->dev); ++ } ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++} ++ ++#define INVALID_BURST_SETTING -1 ++#define DEFAULT_MAX_BURST_SIZE 32 ++ ++static int get_max_burst_setting(unsigned int max_burst_size) ++{ ++ switch (max_burst_size) { ++ case 8: ++ return DCMD_BURST8; ++ case 16: ++ return DCMD_BURST16; ++ case 32: ++ return DCMD_BURST32; ++ case 64: ++ return DCMD_BURST64; ++ default: ++ return INVALID_BURST_SETTING; ++ } ++} ++ ++static struct dma_async_tx_descriptor * ++mmp_pdma_prep_memcpy(struct dma_chan *dchan, ++ dma_addr_t dma_dst, dma_addr_t dma_src, ++ size_t len, unsigned long flags) ++{ ++ struct mmp_pdma_chan *chan; ++ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; ++ size_t copy = 0; ++ struct mmp_pdma_device *dev; ++ int value; ++ ++ if (!dchan) ++ return NULL; ++ ++ if (!len) ++ return NULL; ++ ++ chan = to_mmp_pdma_chan(dchan); ++ chan->byte_align = false; ++ ++ if (!chan->dir) { ++ chan->dir = DMA_MEM_TO_MEM; ++ chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; ++ dev = to_mmp_pdma_dev(dchan->device); ++ value = get_max_burst_setting(dev->max_burst_size); ++ ++ WARN_ON(value == INVALID_BURST_SETTING); ++ ++ chan->dcmd |= value; ++ } ++ ++ do { ++ /* Allocate the link descriptor from DMA pool */ ++ new = mmp_pdma_alloc_descriptor(chan); ++ if (!new) { ++ dev_err(chan->dev, "no memory for desc\n"); ++ goto fail; ++ } ++ ++ copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); ++ if (dma_src & 0x7 || dma_dst & 0x7) ++ chan->byte_align = true; ++ ++ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); ++ ++ /* ++ * Check whether descriptor/source-addr/target-addr is in ++ * region higher than 4G. If so, set related higher bits to 1. ++ */ ++ if (chan->dir == DMA_MEM_TO_DEV) { ++ new->desc.dsadr = dma_src & 0xffffffff; ++ new->desc.dtadr = dma_dst; ++ new->desc.dsadrh = (dma_src >> 32); ++ new->desc.dtadrh = 0; ++ } else if (chan->dir == DMA_DEV_TO_MEM) { ++ new->desc.dsadr = dma_src; ++ new->desc.dtadr = dma_dst & 0xffffffff; ++ new->desc.dsadrh = 0; ++ new->desc.dtadrh = (dma_dst >> 32); ++ } else if (chan->dir == DMA_MEM_TO_MEM) { ++ new->desc.dsadr = dma_src & 0xffffffff; ++ new->desc.dtadr = dma_dst & 0xffffffff; ++ new->desc.dsadrh = (dma_src >> 32); ++ new->desc.dtadrh = (dma_dst >> 32); ++ } else { ++ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir); ++ goto fail; ++ } ++ ++ if (!first) ++ first = new; ++ else { ++ prev->desc.ddadr = new->async_tx.phys; ++ prev->desc.ddadrh = (new->async_tx.phys >> 32); ++ } ++ ++ new->async_tx.cookie = 0; ++ async_tx_ack(&new->async_tx); ++ ++ prev = new; ++ len -= copy; ++ ++ if (chan->dir == DMA_MEM_TO_DEV) { ++ dma_src += copy; ++ } else if (chan->dir == DMA_DEV_TO_MEM) { ++ dma_dst += copy; ++ } else if (chan->dir == DMA_MEM_TO_MEM) { ++ dma_src += copy; ++ dma_dst += copy; ++ } ++ ++ /* Insert the link descriptor to the LD ring */ ++ list_add_tail(&new->node, &first->tx_list); ++ } while (len); ++ ++ first->async_tx.flags = flags; /* client is in control of this ack */ ++ first->async_tx.cookie = -EBUSY; ++ ++ /* last desc and fire IRQ */ ++ new->desc.ddadr = DDADR_STOP; ++ new->desc.dcmd |= DCMD_ENDIRQEN; ++ ++ chan->cyclic_first = NULL; ++ ++ return &first->async_tx; ++ ++fail: ++ if (first) ++ mmp_pdma_free_desc_list(chan, &first->tx_list); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor * ++mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, ++ unsigned int sg_len, enum dma_transfer_direction dir, ++ unsigned long flags, void *context) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; ++ size_t len, avail; ++ struct scatterlist *sg; ++ dma_addr_t addr; ++ int i; ++ ++ if ((sgl == NULL) || (sg_len == 0)) ++ return NULL; ++ ++ chan->byte_align = true; ++ ++ mmp_pdma_config_write(dchan, &chan->slave_config, dir); ++ ++ for_each_sg(sgl, sg, sg_len, i) { ++ addr = sg_dma_address(sg); ++ avail = sg_dma_len(sgl); ++ ++ do { ++ len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); ++ if (addr & 0x7) ++ chan->byte_align = true; ++ ++ /* allocate and populate the descriptor */ ++ new = mmp_pdma_alloc_descriptor(chan); ++ if (!new) { ++ dev_err(chan->dev, "no memory for desc\n"); ++ goto fail; ++ } ++ ++ new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); ++ ++ /* ++ * Check whether descriptor/source-addr/target-addr is in ++ * region higher than 4G. If so, set related higher bits to 1. ++ */ ++ if (dir == DMA_MEM_TO_DEV) { ++ new->desc.dsadr = addr & 0xffffffff; ++ new->desc.dtadr = chan->dev_addr; ++ new->desc.dsadrh = (addr >> 32); ++ new->desc.dtadrh = 0; ++ } else if (dir == DMA_DEV_TO_MEM) { ++ new->desc.dsadr = chan->dev_addr; ++ new->desc.dtadr = addr & 0xffffffff; ++ new->desc.dsadrh = 0; ++ new->desc.dtadrh = (addr >> 32); ++ } else { ++ dev_err(chan->dev, "wrong direction: 0x%x\n", chan->dir); ++ goto fail; ++ } ++ ++ if (!first) ++ first = new; ++ else { ++ prev->desc.ddadr = new->async_tx.phys; ++ prev->desc.ddadrh = (new->async_tx.phys >> 32); ++ } ++ ++ new->async_tx.cookie = 0; ++ async_tx_ack(&new->async_tx); ++ prev = new; ++ ++ /* Insert the link descriptor to the LD ring */ ++ list_add_tail(&new->node, &first->tx_list); ++ ++ /* update metadata */ ++ addr += len; ++ avail -= len; ++ } while (avail); ++ } ++ ++ first->async_tx.cookie = -EBUSY; ++ first->async_tx.flags = flags; ++ ++ /* last desc and fire IRQ */ ++ new->desc.ddadr = DDADR_STOP; ++ new->desc.dcmd |= DCMD_ENDIRQEN; ++ ++ chan->dir = dir; ++ chan->cyclic_first = NULL; ++ ++ return &first->async_tx; ++ ++fail: ++ if (first) ++ mmp_pdma_free_desc_list(chan, &first->tx_list); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor * ++mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, ++ dma_addr_t buf_addr, size_t len, size_t period_len, ++ enum dma_transfer_direction direction, ++ unsigned long flags) ++{ ++ struct mmp_pdma_chan *chan; ++ struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; ++ dma_addr_t dma_src, dma_dst; ++ dma_addr_t dma_srch, dma_dsth; ++ ++ if (!dchan || !len || !period_len) ++ return NULL; ++ ++ /* the buffer length must be a multiple of period_len */ ++ if (len % period_len != 0) ++ return NULL; ++ ++ if (period_len > PDMA_MAX_DESC_BYTES) ++ return NULL; ++ ++ chan = to_mmp_pdma_chan(dchan); ++ mmp_pdma_config_write(dchan, &chan->slave_config, direction); ++ ++ switch (direction) { ++ case DMA_MEM_TO_DEV: ++ dma_src = buf_addr & 0xffffffff; ++ dma_dst = chan->dev_addr; ++ dma_srch = (buf_addr >> 32); ++ dma_dsth = 0; ++ break; ++ case DMA_DEV_TO_MEM: ++ dma_dst = buf_addr & 0xffffffff; ++ dma_src = chan->dev_addr; ++ dma_dsth = (buf_addr >> 32); ++ dma_srch = 0; ++ break; ++ default: ++ dev_err(chan->dev, "Unsupported direction for cyclic DMA\n"); ++ return NULL; ++ } ++ ++ chan->dir = direction; ++ ++ do { ++ /* Allocate the link descriptor from DMA pool */ ++ new = mmp_pdma_alloc_descriptor(chan); ++ if (!new) { ++ dev_err(chan->dev, "no memory for desc\n"); ++ goto fail; ++ } ++ ++ new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | ++ (DCMD_LENGTH & period_len)); ++ new->desc.dsadr = dma_src; ++ new->desc.dtadr = dma_dst; ++ new->desc.dsadrh = dma_dsth; ++ new->desc.dtadrh = dma_srch; ++ ++ if (!first) ++ first = new; ++ else { ++ prev->desc.ddadr = new->async_tx.phys; ++ prev->desc.ddadrh = (new->async_tx.phys >> 32); ++ } ++ ++ new->async_tx.cookie = 0; ++ async_tx_ack(&new->async_tx); ++ ++ prev = new; ++ len -= period_len; ++ ++ if (chan->dir == DMA_MEM_TO_DEV) ++ dma_src += period_len; ++ else ++ dma_dst += period_len; ++ ++ /* Insert the link descriptor to the LD ring */ ++ list_add_tail(&new->node, &first->tx_list); ++ } while (len); ++ ++ first->async_tx.flags = flags; /* client is in control of this ack */ ++ first->async_tx.cookie = -EBUSY; ++ ++ /* make the cyclic link */ ++ new->desc.ddadr = first->async_tx.phys; ++ chan->cyclic_first = first; ++ ++ return &first->async_tx; ++ ++fail: ++ if (first) ++ mmp_pdma_free_desc_list(chan, &first->tx_list); ++ return NULL; ++} ++ ++static int mmp_pdma_pause_chan(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ ++ if (!chan->phy) ++ return -1; ++ ++ disable_chan(chan->phy); ++ chan->status = DMA_PAUSED; ++ ++ return 0; ++} ++ ++static int mmp_pdma_config(struct dma_chan *dchan, ++ struct dma_slave_config *cfg) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ ++ memcpy(&chan->slave_config, cfg, sizeof(*cfg)); ++ return 0; ++} ++ ++static int mmp_pdma_terminate_all(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ unsigned long flags; ++ ++ if (!dchan) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ disable_chan(chan->phy); ++ chan->status = DMA_COMPLETE; ++ mmp_pdma_free_phy(chan); ++ ++ mmp_pdma_free_desc_list(chan, &chan->chain_pending); ++ mmp_pdma_free_desc_list(chan, &chan->chain_running); ++ chan->bytes_residue = 0; ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ chan->idle = true; ++ ++ mmp_pdma_qos_put(chan); ++ ++ return 0; ++} ++ ++static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan, ++ dma_cookie_t cookie) ++{ ++ struct mmp_pdma_desc_sw *sw; ++ u32 curr, residue = 0; ++ bool passed = false; ++ bool cyclic = chan->cyclic_first != NULL; ++ ++ /* ++ * If the channel does not have a phy pointer anymore, it has already ++ * been completed. Therefore, its residue is 0. ++ */ ++ if (!chan->phy) ++ return chan->bytes_residue; /* special case for EORIRQEN */ ++ ++ if (chan->dir == DMA_DEV_TO_MEM) ++ curr = readl(chan->phy->base + DTADR(chan->phy->idx)); ++ else ++ curr = readl(chan->phy->base + DSADR(chan->phy->idx)); ++ ++ list_for_each_entry(sw, &chan->chain_running, node) { ++ u32 start, end, len; ++ ++ if (chan->dir == DMA_DEV_TO_MEM) ++ start = sw->desc.dtadr; ++ else ++ start = sw->desc.dsadr; ++ ++ len = sw->desc.dcmd & DCMD_LENGTH; ++ end = start + len; ++ ++ /* ++ * 'passed' will be latched once we found the descriptor which ++ * lies inside the boundaries of the curr pointer. All ++ * descriptors that occur in the list _after_ we found that ++ * partially handled descriptor are still to be processed and ++ * are hence added to the residual bytes counter. ++ */ ++ if (passed) { ++ residue += len; ++ } else if (curr >= start && curr <= end) { ++ residue += end - curr; ++ passed = true; ++ } ++ ++ /* ++ * Descriptors that have the ENDIRQEN bit set mark the end of a ++ * transaction chain, and the cookie assigned with it has been ++ * returned previously from mmp_pdma_tx_submit(). ++ * ++ * In case we have multiple transactions in the running chain, ++ * and the cookie does not match the one the user asked us ++ * about, reset the state variables and start over. ++ * ++ * This logic does not apply to cyclic transactions, where all ++ * descriptors have the ENDIRQEN bit set, and for which we ++ * can't have multiple transactions on one channel anyway. ++ */ ++ if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) ++ continue; ++ ++ if (sw->async_tx.cookie == cookie) ++ return residue; ++ ++ residue = 0; ++ passed = false; ++ } ++ ++ /* We should only get here in case of cyclic transactions */ ++ return residue; ++} ++ ++static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, ++ dma_cookie_t cookie, ++ struct dma_tx_state *txstate) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ enum dma_status ret; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ret = dma_cookie_status(dchan, cookie, txstate); ++ if (likely(ret != DMA_ERROR)) ++ dma_set_residue(txstate, mmp_pdma_residue(chan, cookie)); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ if (ret == DMA_COMPLETE) ++ return ret; ++ else ++ return chan->status; ++} ++ ++/* ++ * mmp_pdma_issue_pending - Issue the DMA start command ++ * pending list ==> running list ++ */ ++static void mmp_pdma_issue_pending(struct dma_chan *dchan) ++{ ++ struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); ++ unsigned long flags; ++ int ret = 0; ++ ++ mmp_pdma_qos_get(chan); ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ret = start_pending_queue(chan); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ if (ret) ++ mmp_pdma_qos_put(chan); ++} ++ ++/* ++ * dma_do_tasklet ++ * Do call back ++ * Start pending list ++ */ ++static void dma_do_tasklet(struct tasklet_struct *t) ++{ ++ struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet); ++ struct mmp_pdma_desc_sw *desc, *_desc; ++ LIST_HEAD(chain_cleanup); ++ unsigned long flags; ++ struct dmaengine_desc_callback cb; ++ ++ int ret = 0; ++ ++ /* return if this channel has been stopped */ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ if (chan->status == DMA_COMPLETE) { ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ return; ++ } ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ if (chan->cyclic_first) { ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ desc = chan->cyclic_first; ++ dmaengine_desc_get_callback(&desc->async_tx, &cb); ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ dmaengine_desc_callback_invoke(&cb, NULL); ++ ++ return; ++ } ++ ++ /* submit pending list; callback for each desc; free desc */ ++ spin_lock_irqsave(&chan->desc_lock, flags); ++ ++ /* special for the EORIRQEN case, residue is not 0 */ ++ list_for_each_entry(desc, &chan->chain_running, node) { ++ if (desc->desc.dcmd & DCMD_ENDIRQEN) { ++ chan->bytes_residue = ++ mmp_pdma_residue(chan, desc->async_tx.cookie); ++ break; ++ } ++ } ++ ++ list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) { ++ /* ++ * move the descriptors to a temporary list so we can drop ++ * the lock during the entire cleanup operation ++ */ ++ list_move(&desc->node, &chain_cleanup); ++ ++ /* ++ * Look for the first list entry which has the ENDIRQEN flag ++ * set. That is the descriptor we got an interrupt for, so ++ * complete that transaction and its cookie. ++ */ ++ if (desc->desc.dcmd & DCMD_ENDIRQEN) { ++ dma_cookie_t cookie = desc->async_tx.cookie; ++ ++ dma_cookie_complete(&desc->async_tx); ++ dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); ++ break; ++ } ++ } ++ ++ /* ++ * The hardware is idle and ready for more when the ++ * chain_running list is empty. ++ */ ++ chan->status = list_empty(&chan->chain_running) ? ++ DMA_COMPLETE : DMA_IN_PROGRESS; ++ ++ /* Start any pending transactions automatically */ ++ ret = start_pending_queue(chan); ++ ++ spin_unlock_irqrestore(&chan->desc_lock, flags); ++ ++ /* restart pending transactions failed, do not need qos anymore */ ++ if (ret) ++ mmp_pdma_qos_put(chan); ++ ++ /* Run the callback for each descriptor, in order */ ++ list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { ++ struct dma_async_tx_descriptor *txd = &desc->async_tx; ++ ++ /* Remove from the list of transactions */ ++ list_del(&desc->node); ++ /* Run the link descriptor callback function */ ++ dmaengine_desc_get_callback(txd, &cb); ++ dmaengine_desc_callback_invoke(&cb, NULL); ++ ++ dma_pool_free(chan->desc_pool, desc, txd->phys); ++ } ++} ++ ++static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx) ++{ ++ struct mmp_pdma_phy *phy = &pdev->phy[idx]; ++ struct mmp_pdma_chan *chan; ++ ++ chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL); ++ if (chan == NULL) ++ return -ENOMEM; ++ ++ phy->idx = idx; ++ phy->base = pdev->base; ++ ++ spin_lock_init(&chan->desc_lock); ++ chan->dev = pdev->dev; ++ chan->chan.device = &pdev->device; ++ tasklet_setup(&chan->tasklet, dma_do_tasklet); ++ INIT_LIST_HEAD(&chan->chain_pending); ++ INIT_LIST_HEAD(&chan->chain_running); ++ ++ chan->status = DMA_COMPLETE; ++ chan->bytes_residue = 0; ++ chan->qos_count = 0; ++ chan->user_do_qos = 1; ++ ++ /* register virt channel to dma engine */ ++ list_add_tail(&chan->chan.device_node, &pdev->device.channels); ++ ++ return 0; ++} ++ ++static const struct of_device_id mmp_pdma_dt_ids[] = { ++ { .compatible = "spacemit,k1-pdma", }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); ++ ++static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, ++ struct of_dma *ofdma) ++{ ++ struct mmp_pdma_device *d = ofdma->of_dma_data; ++ struct dma_chan *chan; ++ ++ chan = dma_get_any_slave_channel(&d->device); ++ if (!chan) ++ return NULL; ++ ++ to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; ++ ++ return chan; ++} ++ ++static int mmp_pdma_probe(struct platform_device *op) ++{ ++ struct mmp_pdma_device *pdev; ++ const struct of_device_id *of_id; ++ struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); ++ struct resource *iores; ++ int i, ret, value; ++ int irq = 0, dma_channels = 0; ++ const enum dma_slave_buswidth widths = ++ DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | ++ DMA_SLAVE_BUSWIDTH_4_BYTES; ++ ++ int nr_reserved_channels; ++ const int *list; ++ unsigned int max_burst_size = DEFAULT_MAX_BURST_SIZE; ++ ++ pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); ++ if (!pdev) ++ return -ENOMEM; ++ ++ pdev->dev = &op->dev; ++ ++ spin_lock_init(&pdev->phy_lock); ++ ++ iores = platform_get_resource(op, IORESOURCE_MEM, 0); ++ pdev->base = devm_ioremap_resource(pdev->dev, iores); ++ if (IS_ERR(pdev->base)) ++ return PTR_ERR(pdev->base); ++ ++ pdev->clk = devm_clk_get(pdev->dev, NULL); ++ if (IS_ERR(pdev->clk)) ++ return PTR_ERR(pdev->clk); ++ ++ ret = clk_prepare_enable(pdev->clk); ++ if (ret) ++ return dev_err_probe(pdev->dev, ret, "could not enable dma bus clock\n"); ++ ++ pdev->resets = devm_reset_control_get_optional(pdev->dev, NULL); ++ if (IS_ERR(pdev->resets)) { ++ ret = PTR_ERR(pdev->resets); ++ goto err_rst; ++ } ++ ret = reset_control_deassert(pdev->resets); ++ if (ret) ++ goto err_rst; ++ ++ of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); ++ ++ if (of_id) { ++ int n; ++ ++ of_property_read_u32(pdev->dev->of_node, "#dma-channels", &dma_channels); ++ list = of_get_property(pdev->dev->of_node, "reserved-channels", &n); ++ if (of_property_read_u32(pdev->dev->of_node, "max-burst-size", &max_burst_size)) { ++ dev_err(pdev->dev, ++ "No max-burst-size node in the device tree, set it to %d\n", ++ DEFAULT_MAX_BURST_SIZE); ++ max_burst_size = DEFAULT_MAX_BURST_SIZE; ++ } ++ ++ if (get_max_burst_setting(max_burst_size) == INVALID_BURST_SETTING) { ++ dev_err(pdev->dev, "Unsupported max-burst-size value %d set it to %d\n", ++ max_burst_size, DEFAULT_MAX_BURST_SIZE); ++ max_burst_size = DEFAULT_MAX_BURST_SIZE; ++ } ++ ++ if (list) { ++ nr_reserved_channels = n / (sizeof(u32) * PDMA_RESRV_CHAN_ARGS_NUM); ++ pdev->nr_reserved_channels = nr_reserved_channels; ++ pdev->reserved_channels = devm_kcalloc(pdev->dev, ++ nr_reserved_channels, ++ sizeof(struct reserved_chan), ++ GFP_KERNEL); ++ if (pdev->reserved_channels == NULL) { ++ ret = -ENOMEM; ++ goto err_out; ++ } ++ ++ for (i = 0; i < nr_reserved_channels; i++) { ++ of_property_read_u32_index(pdev->dev->of_node, ++ "reserved-channels", ++ i * PDMA_RESRV_CHAN_ARGS_NUM, ++ &value); ++ pdev->reserved_channels[i].chan_id = value; ++ of_property_read_u32_index(pdev->dev->of_node, ++ "reserved-channels", ++ i * PDMA_RESRV_CHAN_ARGS_NUM + 1, ++ &value); ++ pdev->reserved_channels[i].drcmr = value; ++ } ++ } ++ } else if (pdata && pdata->dma_channels) ++ dma_channels = pdata->dma_channels; ++ else ++ dma_channels = 32; ++ pdev->dma_channels = dma_channels; ++ ++ pdev->max_burst_size = max_burst_size; ++ dev_dbg(pdev->dev, "set max burst size to %d\n", max_burst_size); ++ ++ pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy), ++ GFP_KERNEL); ++ if (pdev->phy == NULL) { ++ ret = -ENOMEM; ++ goto err_out; ++ } ++ ++ INIT_LIST_HEAD(&pdev->device.channels); ++ ++ /* all chan share one irq, demux inside */ ++ irq = platform_get_irq(op, 0); ++ ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, ++ IRQF_SHARED, "pdma", pdev); ++ if (ret) ++ goto err_out; ++ ++ for (i = 0; i < dma_channels; i++) { ++ ret = mmp_pdma_chan_init(pdev, i); ++ if (ret) ++ goto err_out; ++ } ++ ++ dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); ++ dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); ++ dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask); ++ dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask); ++ pdev->device.dev = &op->dev; ++ pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; ++ pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; ++ pdev->device.device_tx_status = mmp_pdma_tx_status; ++ pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; ++ pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; ++ pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; ++ pdev->device.device_issue_pending = mmp_pdma_issue_pending; ++ pdev->device.device_config = mmp_pdma_config; ++ pdev->device.device_pause = mmp_pdma_pause_chan; ++ pdev->device.device_terminate_all = mmp_pdma_terminate_all; ++ pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES; ++ pdev->device.src_addr_widths = widths; ++ pdev->device.dst_addr_widths = widths; ++ pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); ++ pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; ++ ++ dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); ++ ++ ret = dma_async_device_register(&pdev->device); ++ if (ret) { ++ dev_err(pdev->device.dev, "unable to register\n"); ++ goto err_out; ++ } ++ ++ if (op->dev.of_node) { ++ /* Device-tree DMA controller registration */ ++ ret = of_dma_controller_register(op->dev.of_node, ++ mmp_pdma_dma_xlate, pdev); ++ if (ret < 0) { ++ dev_err(&op->dev, "of_dma_controller_register failed\n"); ++ dma_async_device_unregister(&pdev->device); ++ goto err_out; ++ } ++ } ++ ++ platform_set_drvdata(op, pdev); ++ dev_dbg(pdev->device.dev, "initialized %d channels\n", dma_channels); ++ return 0; ++ ++err_out: ++ reset_control_assert(pdev->resets); ++err_rst: ++ clk_disable_unprepare(pdev->clk); ++ return ret; ++} ++ ++static int mmp_pdma_remove(struct platform_device *op) ++{ ++ struct mmp_pdma_device *pdev = platform_get_drvdata(op); ++ int irq = 0; ++ ++ if (op->dev.of_node) ++ of_dma_controller_free(op->dev.of_node); ++ ++ irq = platform_get_irq(op, 0); ++ devm_free_irq(&op->dev, irq, pdev); ++ ++ dma_async_device_unregister(&pdev->device); ++ ++ reset_control_assert(pdev->resets); ++ clk_disable_unprepare(pdev->clk); ++ ++ kfree(pdev->reserved_channels); ++ platform_set_drvdata(op, NULL); ++ ++ return 0; ++} ++ ++static struct platform_driver mmp_pdma_driver = { ++ .driver = { ++ .name = "spacemit-k1-pdma", ++ .of_match_table = mmp_pdma_dt_ids, ++ }, ++ .probe = mmp_pdma_probe, ++ .remove = mmp_pdma_remove, ++}; ++ ++static int __init spacemit_k1_pdma_init(void) ++{ ++ return platform_driver_register(&mmp_pdma_driver); ++} ++ ++static void __exit spacemit_k1_pdma_exit(void) ++{ ++ platform_driver_unregister(&mmp_pdma_driver); ++} ++ ++subsys_initcall(spacemit_k1_pdma_init); ++module_exit(spacemit_k1_pdma_exit); ++ ++MODULE_DESCRIPTION("Spacemit K1 Peripheral DMA Controller Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig +index 0f3cd1b05ae3..d6aaccfffb01 100644 +--- a/drivers/firmware/Kconfig ++++ b/drivers/firmware/Kconfig +@@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE + + config FW_CFG_SYSFS + tristate "QEMU fw_cfg device support in sysfs" +- depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86 || SW64) ++ depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || SW64 || X86) + depends on HAS_IOPORT_MAP + default n + help +@@ -315,5 +315,6 @@ source "drivers/firmware/psci/Kconfig" + source "drivers/firmware/smccc/Kconfig" + source "drivers/firmware/tegra/Kconfig" + source "drivers/firmware/xilinx/Kconfig" ++source "drivers/firmware/xuantie/Kconfig" + + endmenu +diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile +index 28fcddcd688f..c549817a4b42 100644 +--- a/drivers/firmware/Makefile ++++ b/drivers/firmware/Makefile +@@ -38,3 +38,4 @@ obj-y += psci/ + obj-y += smccc/ + obj-y += tegra/ + obj-y += xilinx/ ++obj-y += xuantie/ +diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile +index da8eac6dfc0f..08a44140087f 100644 +--- a/drivers/firmware/efi/libstub/Makefile ++++ b/drivers/firmware/efi/libstub/Makefile +@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ + -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ + -DEFI_HAVE_STRCMP -fno-builtin -fpic \ + $(call cc-option,-mno-single-pic-base) +-cflags-$(CONFIG_RISCV) += -fpic -mno-relax ++cflags-$(CONFIG_RISCV) += -fpic -mno-relax -DNO_ALTERNATIVE + cflags-$(CONFIG_LOONGARCH) += -fpie + + cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt +diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c +index 01f0f90ea418..fa71cd898120 100644 +--- a/drivers/firmware/efi/riscv-runtime.c ++++ b/drivers/firmware/efi/riscv-runtime.c +@@ -152,3 +152,16 @@ void arch_efi_call_virt_teardown(void) + { + efi_virtmap_unload(); + } ++ ++static int __init riscv_dmi_init(void) ++{ ++ /* ++ * On riscv, DMI depends on UEFI, and dmi_setup() needs to ++ * be called early because dmi_id_init(), which is an arch_initcall ++ * itself, depends on dmi_scan_machine() having been called already. ++ */ ++ dmi_setup(); ++ ++ return 0; ++} ++core_initcall(riscv_dmi_init); +diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c +index f4fea1ec3201..24b302c9e212 100644 +--- a/drivers/firmware/qemu_fw_cfg.c ++++ b/drivers/firmware/qemu_fw_cfg.c +@@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) + + /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ + #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) +-# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64)) ++# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64) || defined(CONFIG_RISCV)) + # define FW_CFG_CTRL_OFF 0x08 + # define FW_CFG_DATA_OFF 0x00 + # define FW_CFG_DMA_OFF 0x10 +diff --git a/drivers/firmware/xuantie/Kconfig b/drivers/firmware/xuantie/Kconfig +new file mode 100644 +index 000000000000..b10c0416067f +--- /dev/null ++++ b/drivers/firmware/xuantie/Kconfig +@@ -0,0 +1,23 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++config TH1520_AON ++ bool "XuanTie TH1520 Aon Protocol driver" ++ depends on TH1520_MBOX ++ default y ++ help ++ XuanTie TH1520 Aon is a low-level system function which runs a dedicated ++ XuanTie riscv E902 core to provide power, clock and resource management. ++ ++ This driver manages the IPC interface between host cpu likes xuantie ++ and the Aon firmware running on xuantie riscv E902 core. ++ ++config TH1520_AON_PD ++ bool "XuanTie TH1520 Aon Power Domain driver" ++ depends on TH1520_AON ++ help ++ The Xuantie TH1520 Aon based power domain virtual driver. ++ When selected, this option adds kernel support for dynamically ++ configuring power to various peripherals and functional block. ++ ++ Note: Enabling this option requires the "Xuantie TH1520 Aon Support" ++ (`TH1520_AON`) to be selected first, as this driver is built upon ++ the foundational Aon infrastructure provided by that configuration. +diff --git a/drivers/firmware/xuantie/Makefile b/drivers/firmware/xuantie/Makefile +new file mode 100644 +index 000000000000..7e24382b122d +--- /dev/null ++++ b/drivers/firmware/xuantie/Makefile +@@ -0,0 +1,4 @@ ++# SPDX-License-Identifier: GPL-2.0 ++obj-$(CONFIG_TH1520_AON) += th1520_aon.o ++obj-$(CONFIG_TH1520_AON_PD) += th1520_aon_pd.o ++obj-y += th1520_proc_debug.o +diff --git a/drivers/firmware/xuantie/th1520_aon.c b/drivers/firmware/xuantie/th1520_aon.c +new file mode 100644 +index 000000000000..7cec53f10496 +--- /dev/null ++++ b/drivers/firmware/xuantie/th1520_aon.c +@@ -0,0 +1,341 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* wait for response for 3000ms instead of 300ms (fix me pls)*/ ++#define MAX_RX_TIMEOUT (msecs_to_jiffies(3000)) ++#define MAX_TX_TIMEOUT (msecs_to_jiffies(500)) ++ ++struct th1520_aon_chan { ++ struct th1520_aon_ipc *aon_ipc; ++ ++ struct mbox_client cl; ++ struct mbox_chan *ch; ++ struct completion tx_done; ++ /*for log proc*/ ++ phys_addr_t log_phy; ++ size_t log_size; ++ void __iomem *log_mem; ++ void *log_ctrl; ++ struct proc_dir_entry *proc_dir; ++}; ++ ++struct th1520_aon_ipc { ++ struct th1520_aon_chan chans; ++ struct device *dev; ++ struct mutex lock; ++ struct completion done; ++ u32 *msg; ++}; ++ ++/* ++ * This type is used to indicate error response for most functions. ++ */ ++enum th1520_aon_error_codes { ++ TH1520_AON_ERR_NONE = 0, /* Success */ ++ TH1520_AON_ERR_VERSION = 1, /* Incompatible API version */ ++ TH1520_AON_ERR_CONFIG = 2, /* Configuration error */ ++ TH1520_AON_ERR_PARM = 3, /* Bad parameter */ ++ TH1520_AON_ERR_NOACCESS = 4, /* Permission error (no access) */ ++ TH1520_AON_ERR_LOCKED = 5, /* Permission error (locked) */ ++ TH1520_AON_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */ ++ TH1520_AON_ERR_NOTFOUND = 7, /* Not found */ ++ TH1520_AON_ERR_NOPOWER = 8, /* No power */ ++ TH1520_AON_ERR_IPC = 9, /* Generic IPC error */ ++ TH1520_AON_ERR_BUSY = 10, /* Resource is currently busy/active */ ++ TH1520_AON_ERR_FAIL = 11, /* General I/O failure */ ++ TH1520_AON_ERR_LAST ++}; ++ ++static int th1520_aon_linux_errmap[TH1520_AON_ERR_LAST] = { ++ 0, /* TH1520_AON_ERR_NONE */ ++ -EINVAL, /* TH1520_AON_ERR_VERSION */ ++ -EINVAL, /* TH1520_AON_ERR_CONFIG */ ++ -EINVAL, /* TH1520_AON_ERR_PARM */ ++ -EACCES, /* TH1520_AON_ERR_NOACCESS */ ++ -EACCES, /* TH1520_AON_ERR_LOCKED */ ++ -ERANGE, /* TH1520_AON_ERR_UNAVAILABLE */ ++ -EEXIST, /* TH1520_AON_ERR_NOTFOUND */ ++ -EPERM, /* TH1520_AON_ERR_NOPOWER */ ++ -EPIPE, /* TH1520_AON_ERR_IPC */ ++ -EBUSY, /* TH1520_AON_ERR_BUSY */ ++ -EIO, /* TH1520_AON_ERR_FAIL */ ++}; ++ ++static struct th1520_aon_ipc *th1520_aon_ipc_handle; ++ ++static inline int th1520_aon_to_linux_errno(int errno) ++{ ++ if (errno >= TH1520_AON_ERR_NONE && errno < TH1520_AON_ERR_LAST) ++ return th1520_aon_linux_errmap[errno]; ++ return -EIO; ++} ++ ++/* ++ * Get the default handle used by SCU ++ */ ++int th1520_aon_get_handle(struct th1520_aon_ipc **ipc) ++{ ++ if (!th1520_aon_ipc_handle) ++ return -EPROBE_DEFER; ++ ++ *ipc = th1520_aon_ipc_handle; ++ return 0; ++} ++EXPORT_SYMBOL(th1520_aon_get_handle); ++ ++static void th1520_aon_tx_done(struct mbox_client *cl, void *mssg, int r) ++{ ++ struct th1520_aon_chan *aon_chan = ++ container_of(cl, struct th1520_aon_chan, cl); ++ ++ complete(&aon_chan->tx_done); ++} ++ ++static void th1520_aon_rx_callback(struct mbox_client *c, void *msg) ++{ ++ struct th1520_aon_chan *aon_chan = ++ container_of(c, struct th1520_aon_chan, cl); ++ struct th1520_aon_ipc *aon_ipc = aon_chan->aon_ipc; ++ struct th1520_aon_rpc_msg_hdr *hdr = ++ (struct th1520_aon_rpc_msg_hdr *)msg; ++ uint8_t recv_size = sizeof(struct th1520_aon_rpc_msg_hdr) + hdr->size; ++ ++ memcpy(aon_ipc->msg, msg, recv_size); ++ dev_dbg(aon_ipc->dev, "msg head: 0x%x, size:%d\n", *((u32 *)msg), ++ recv_size); ++ complete(&aon_ipc->done); ++} ++ ++static int th1520_aon_ipc_write(struct th1520_aon_ipc *aon_ipc, void *msg) ++{ ++ struct th1520_aon_rpc_msg_hdr *hdr = msg; ++ struct th1520_aon_chan *aon_chan; ++ u32 *data = msg; ++ int ret; ++ ++ /* check size, currently it requires 7 MSG in one transfer */ ++ if (hdr->size != TH1520_AON_RPC_MSG_NUM) ++ return -EINVAL; ++ ++ dev_dbg(aon_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc, ++ hdr->func, hdr->size); ++ ++ aon_chan = &aon_ipc->chans; ++ ++ if (!wait_for_completion_timeout(&aon_chan->tx_done, MAX_TX_TIMEOUT)) { ++ dev_err(aon_ipc->dev, "tx_done timeout\n"); ++ return -ETIMEDOUT; ++ } ++ reinit_completion(&aon_chan->tx_done); ++ ++ ret = mbox_send_message(aon_chan->ch, data); ++ if (ret < 0) ++ return ret; ++ ++ return 0; ++} ++ ++/* ++ * RPC command/response ++ */ ++int th1520_aon_call_rpc(struct th1520_aon_ipc *aon_ipc, void *msg, ++ void *ack_msg, bool have_resp) ++{ ++ struct th1520_aon_rpc_msg_hdr *hdr = msg; ++ int ret = 0; ++ ++ if (WARN_ON(!aon_ipc || !msg)) ++ return -EINVAL; ++ ++ if (have_resp && WARN_ON(!ack_msg)) ++ return -EINVAL; ++ mutex_lock(&aon_ipc->lock); ++ reinit_completion(&aon_ipc->done); ++ ++ RPC_SET_VER(hdr, TH1520_AON_RPC_VERSION); ++ /*svc id use 6bit for version 2*/ ++ RPC_SET_SVC_ID(hdr, hdr->svc); ++ RPC_SET_SVC_FLAG_MSG_TYPE(hdr, RPC_SVC_MSG_TYPE_DATA); ++ ++ if (have_resp) { ++ aon_ipc->msg = ack_msg; ++ RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NEED_ACK); ++ } else { ++ RPC_SET_SVC_FLAG_ACK_TYPE(hdr, RPC_SVC_MSG_NO_NEED_ACK); ++ } ++ ++ ret = th1520_aon_ipc_write(aon_ipc, msg); ++ if (ret < 0) { ++ dev_err(aon_ipc->dev, "RPC send msg failed: %d\n", ret); ++ goto out; ++ } ++ ++ if (have_resp) { ++ if (!wait_for_completion_timeout(&aon_ipc->done, ++ MAX_RX_TIMEOUT)) { ++ dev_err(aon_ipc->dev, "RPC send msg timeout\n"); ++ mutex_unlock(&aon_ipc->lock); ++ return -ETIMEDOUT; ++ } ++ ++ /* response status is stored in msg data[0] field */ ++ struct th1520_aon_rpc_ack_common *ack = ack_msg; ++ ret = ack->err_code; ++ } ++ ++out: ++ mutex_unlock(&aon_ipc->lock); ++ ++ dev_dbg(aon_ipc->dev, "RPC SVC done\n"); ++ ++ return th1520_aon_to_linux_errno(ret); ++} ++EXPORT_SYMBOL(th1520_aon_call_rpc); ++ ++int get_aon_log_mem(struct device *dev, phys_addr_t *mem, size_t *mem_size) ++{ ++ struct resource r; ++ struct device_node *node; ++ int ret; ++ ++ *mem = 0; ++ *mem_size = 0; ++ ++ node = of_parse_phandle(dev->of_node, "log-memory-region", 0); ++ if (!node) { ++ dev_err(dev, "no memory-region specified\n"); ++ return -EINVAL; ++ } ++ ++ ret = of_address_to_resource(node, 0, &r); ++ if (ret) { ++ dev_err(dev, "memory-region get resource faild\n"); ++ return -EINVAL; ++ } ++ ++ *mem = r.start; ++ *mem_size = resource_size(&r); ++ return 0; ++} ++ ++static int th1520_aon_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct th1520_aon_ipc *aon_ipc; ++ struct th1520_aon_chan *aon_chan; ++ struct mbox_client *cl; ++ char dir_name[32] = { 0x0 }; ++ int ret; ++ ++ aon_ipc = devm_kzalloc(dev, sizeof(*aon_ipc), GFP_KERNEL); ++ if (!aon_ipc) ++ return -ENOMEM; ++ ++ aon_chan = &aon_ipc->chans; ++ cl = &aon_chan->cl; ++ cl->dev = dev; ++ cl->tx_block = false; ++ cl->knows_txdone = true; ++ cl->rx_callback = th1520_aon_rx_callback; ++ ++ /* Initial tx_done completion as "done" */ ++ cl->tx_done = th1520_aon_tx_done; ++ init_completion(&aon_chan->tx_done); ++ complete(&aon_chan->tx_done); ++ ++ aon_chan->aon_ipc = aon_ipc; ++ aon_chan->ch = mbox_request_channel_byname(cl, "aon"); ++ if (IS_ERR(aon_chan->ch)) { ++ ret = PTR_ERR(aon_chan->ch); ++ if (ret != -EPROBE_DEFER) ++ dev_err(dev, "Failed to request aon mbox chan ret %d\n", ++ ret); ++ return ret; ++ } ++ ++ dev_dbg(dev, "request th1520 mbox chan: aon\n"); ++ ++ aon_ipc->dev = dev; ++ mutex_init(&aon_ipc->lock); ++ init_completion(&aon_ipc->done); ++ aon_chan->log_ctrl = NULL; ++ ++ ret = get_aon_log_mem(dev, &aon_chan->log_phy, &aon_chan->log_size); ++ if (ret) { ++ return ret; ++ } ++ aon_chan->log_mem = ioremap(aon_chan->log_phy, aon_chan->log_size); ++ if (!IS_ERR(aon_chan->log_mem)) { ++ pr_info("virtual_log_mem=0x%p, phy base=0x%pa\n",aon_chan->log_mem, &aon_chan->log_phy); ++ } else { ++ aon_chan->log_mem = NULL; ++ dev_err(dev, "%s:get aon log region fail\n", __func__); ++ return -1; ++ } ++ ++ sprintf(dir_name, "aon_proc"); ++ aon_chan->proc_dir = proc_mkdir(dir_name, NULL); ++ if (NULL != aon_chan->proc_dir) { ++ aon_chan->log_ctrl = th1520_create_panic_log_proc( ++ aon_chan->log_phy, aon_chan->proc_dir, ++ aon_chan->log_mem, aon_chan->log_size); ++ } else { ++ dev_err(dev, "create %s fail\n", dir_name); ++ return ret; ++ } ++ th1520_aon_ipc_handle = aon_ipc; ++ ++ return devm_of_platform_populate(dev); ++} ++ ++static const struct of_device_id th1520_aon_match[] = { ++ { ++ .compatible = "xuantie,th1520-aon", ++ }, ++ { /* Sentinel */ } ++}; ++ ++static int __maybe_unused th1520_aon_resume_noirq(struct device *dev) ++{ ++ struct th1520_aon_chan *aon_chan; ++ ++ aon_chan = &th1520_aon_ipc_handle->chans; ++ ++ complete(&aon_chan->tx_done); ++ return 0; ++} ++ ++static const struct dev_pm_ops th1520_aon_pm_ops = { ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, th1520_aon_resume_noirq) ++}; ++static struct platform_driver th1520_aon_driver = { ++ .driver = { ++ .name = "th1520-aon", ++ .of_match_table = th1520_aon_match, ++ .pm = &th1520_aon_pm_ops, ++ }, ++ .probe = th1520_aon_probe, ++}; ++builtin_platform_driver(th1520_aon_driver); ++ ++MODULE_AUTHOR("fugang.duan "); ++MODULE_DESCRIPTION("XuanTie TH1520 firmware protocol driver"); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/firmware/xuantie/th1520_aon_pd.c b/drivers/firmware/xuantie/th1520_aon_pd.c +new file mode 100644 +index 000000000000..77617e70abad +--- /dev/null ++++ b/drivers/firmware/xuantie/th1520_aon_pd.c +@@ -0,0 +1,414 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct th1520_aon_msg_req_set_resource_power_mode { ++ struct th1520_aon_rpc_msg_hdr hdr; ++ u16 resource; ++ u16 mode; ++ u16 reserved[10]; ++} __packed __aligned(1); ++ ++#define TH1520_AONU_PD_NAME_SIZE 20 ++#define TH1520_AONU_PD_STATE_NAME_SIZE 10 ++ ++struct th1520_aon_pm_domain { ++ struct generic_pm_domain pd; ++ char name[TH1520_AONU_PD_NAME_SIZE]; ++ u16 rsrc; ++}; ++ ++struct th1520_aon_pd_range { ++ char *name; ++ u32 rsrc; ++ u8 num; ++ ++ /* add domain index */ ++ bool postfix; ++ u8 start_from; ++}; ++ ++struct th1520_aon_pd_soc { ++ const struct th1520_aon_pd_range *pd_ranges; ++ u8 num_ranges; ++}; ++ ++static const struct th1520_aon_pd_range th1520_aon_pd_ranges[] = { ++ /* AUDIO SS */ ++ { "audio", TH1520_AON_AUDIO_PD, 1, false, 0 }, ++ { "vdec", TH1520_AON_VDEC_PD, 1, false, 0}, ++ { "npu", TH1520_AON_NPU_PD, 1, false, 0}, ++ { "venc", TH1520_AON_VENC_PD, 1, false, 0}, ++ { "gpu", TH1520_AON_GPU_PD, 1, false, 0}, ++ { "dsp0", TH1520_AON_DSP0_PD, 1, false, 0}, ++ { "dsp1", TH1520_AON_DSP1_PD, 1, false, 0}, ++ {}, ++}; ++ ++static const struct th1520_aon_pd_soc th1520_aon_pd = { ++ .pd_ranges = th1520_aon_pd_ranges, ++ .num_ranges = ARRAY_SIZE(th1520_aon_pd_ranges), ++}; ++ ++static struct th1520_aon_ipc *pm_ipc_handle; ++static struct dentry *pd_debugfs_root; ++struct dentry *pd_pde; ++struct genpd_onecell_data *genpd_data; ++ ++static inline struct th1520_aon_pm_domain *to_th1520_aon_pd(struct generic_pm_domain *genpd) ++{ ++ return container_of(genpd, struct th1520_aon_pm_domain, pd); ++} ++ ++static int th1520_aon_pd_power(struct generic_pm_domain *domain, bool power_on) ++{ ++ struct th1520_aon_msg_req_set_resource_power_mode msg; ++ struct th1520_aon_rpc_ack_common ack_msg; ++ struct th1520_aon_rpc_msg_hdr *hdr = &msg.hdr; ++ struct th1520_aon_pm_domain *pd; ++ int ret; ++ ++ pd = to_th1520_aon_pd(domain); ++ ++ hdr->svc = TH1520_AON_RPC_SVC_PM; ++ hdr->func = TH1520_AON_PM_FUNC_SET_RESOURCE_POWER_MODE; ++ hdr->size = TH1520_AON_RPC_MSG_NUM; ++ ++ RPC_SET_BE16(&msg.resource, 0, pd->rsrc); ++ RPC_SET_BE16(&msg.resource, 2, ++ (power_on ? TH1520_AON_PM_PW_MODE_ON : TH1520_AON_PM_PW_MODE_OFF)); ++ ++ ret = th1520_aon_call_rpc(pm_ipc_handle, &msg, &ack_msg, true); ++ if (ret) ++ dev_err(&domain->dev, "failed to power %s resource %d ret %d\n", ++ power_on ? "up" : "off", pd->rsrc, ret); ++ ++ return ret; ++} ++ ++static int th1520_aon_pd_power_on(struct generic_pm_domain *domain) ++{ ++ return th1520_aon_pd_power(domain, true); ++} ++ ++static int th1520_aon_pd_power_off(struct generic_pm_domain *domain) ++{ ++ return th1520_aon_pd_power(domain, false); ++} ++ ++static struct generic_pm_domain *th1520_aon_pd_xlate(struct of_phandle_args *spec, ++ void *data) ++{ ++ struct generic_pm_domain *domain = ERR_PTR(-ENOENT); ++ struct genpd_onecell_data *pd_data = data; ++ unsigned int i; ++ ++ for (i = 0; i < pd_data->num_domains; i++) { ++ struct th1520_aon_pm_domain *aon_pd; ++ ++ aon_pd = to_th1520_aon_pd(pd_data->domains[i]); ++ if (aon_pd->rsrc == spec->args[0]) { ++ domain = &aon_pd->pd; ++ break; ++ } ++ } ++ ++ return domain; ++} ++ ++static struct th1520_aon_pm_domain * ++th1520_aon_add_pm_domain(struct device *dev, int idx, ++ const struct th1520_aon_pd_range *pd_ranges) ++{ ++ struct th1520_aon_pm_domain *aon_pd; ++ int ret; ++ ++ aon_pd = devm_kzalloc(dev, sizeof(*aon_pd), GFP_KERNEL); ++ if (!aon_pd) ++ return ERR_PTR(-ENOMEM); ++ ++ aon_pd->rsrc = pd_ranges->rsrc + idx; ++ aon_pd->pd.power_off = th1520_aon_pd_power_off; ++ aon_pd->pd.power_on = th1520_aon_pd_power_on; ++ ++ if (pd_ranges->postfix) ++ snprintf(aon_pd->name, sizeof(aon_pd->name), ++ "%s%i", pd_ranges->name, pd_ranges->start_from + idx); ++ else ++ snprintf(aon_pd->name, sizeof(aon_pd->name), ++ "%s", pd_ranges->name); ++ ++ aon_pd->pd.name = aon_pd->name; ++ ++ if (aon_pd->rsrc >= TH1520_AON_R_LAST) { ++ dev_warn(dev, "invalid pd %s rsrc id %d found", ++ aon_pd->name, aon_pd->rsrc); ++ ++ devm_kfree(dev, aon_pd); ++ return NULL; ++ } ++ ++ ret = pm_genpd_init(&aon_pd->pd, NULL, true); ++ if (ret) { ++ dev_warn(dev, "failed to init pd %s rsrc id %d", ++ aon_pd->name, aon_pd->rsrc); ++ devm_kfree(dev, aon_pd); ++ return NULL; ++ } ++ ++ return aon_pd; ++} ++ ++static int th1520_aon_init_pm_domains(struct device *dev, ++ const struct th1520_aon_pd_soc *pd_soc) ++{ ++ const struct th1520_aon_pd_range *pd_ranges = pd_soc->pd_ranges; ++ struct generic_pm_domain **domains; ++ struct genpd_onecell_data *pd_data; ++ struct th1520_aon_pm_domain *aon_pd; ++ u32 count = 0; ++ int i, j; ++ ++ for (i = 0; i < pd_soc->num_ranges; i++) ++ count += pd_ranges[i].num; ++ ++ domains = devm_kcalloc(dev, count, sizeof(*domains), GFP_KERNEL); ++ if (!domains) ++ return -ENOMEM; ++ ++ pd_data = devm_kzalloc(dev, sizeof(*pd_data), GFP_KERNEL); ++ if (!pd_data) ++ return -ENOMEM; ++ ++ count = 0; ++ for (i = 0; i < pd_soc->num_ranges; i++) { ++ for (j = 0; j < pd_ranges[i].num; j++) { ++ aon_pd = th1520_aon_add_pm_domain(dev, j, &pd_ranges[i]); ++ if (IS_ERR_OR_NULL(aon_pd)) ++ continue; ++ ++ domains[count++] = &aon_pd->pd; ++ dev_dbg(dev, "added power domain %s\n", aon_pd->pd.name); ++ } ++ } ++ ++ pd_data->domains = domains; ++ pd_data->num_domains = count; ++ pd_data->xlate = th1520_aon_pd_xlate; ++ genpd_data = pd_data; ++ ++ of_genpd_add_provider_onecell(dev->of_node, pd_data); ++ ++ return 0; ++} ++ ++static char *pd_get_user_string(const char __user *userbuf, size_t userlen) ++{ ++ char *buffer; ++ ++ buffer = vmalloc(userlen + 1); ++ if (!buffer) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user(buffer, userbuf, userlen) != 0) { ++ vfree(buffer); ++ return ERR_PTR(-EFAULT); ++ } ++ ++ /* got the string, now strip linefeed. */ ++ if (buffer[userlen - 1] == '\n') ++ buffer[userlen - 1] = '\0'; ++ else ++ buffer[userlen] = '\0'; ++ ++ pr_debug("buffer = %s\n", buffer); ++ ++ return buffer; ++} ++ ++static ssize_t th1520_power_domain_write(struct file *file, ++ const char __user *userbuf, ++ size_t userlen, loff_t *ppos) ++{ ++ char *buffer, *start, *end; ++ struct seq_file *m = (struct seq_file *)file->private_data; ++ struct genpd_onecell_data *aon_pds_data = m->private; ++ struct generic_pm_domain *hitted_pm_genpd; ++ struct generic_pm_domain *domain; ++ char pd_name[TH1520_AONU_PD_NAME_SIZE]; ++ char pd_state[TH1520_AONU_PD_STATE_NAME_SIZE]; ++ int idx, ret; ++ size_t origin_len = userlen; ++ ++ buffer = pd_get_user_string(userbuf, userlen); ++ if (IS_ERR(buffer)) ++ return PTR_ERR(buffer); ++ ++ start = skip_spaces(buffer); ++ end = start; ++ while (!isspace(*end) && *end != '\0') ++ end++; ++ ++ *end = '\0'; ++ strcpy(pd_name, start); ++ pr_debug("power domain name: %s\n", pd_name); ++ ++ /* find the target power domain */ ++ for (idx = 0; idx < aon_pds_data->num_domains; idx++) { ++ domain = aon_pds_data->domains[idx]; ++ pr_debug("generic pm domain name: %s, pd_name: %s, ret = %d\n", ++ domain->name, pd_name, strcmp(pd_name, domain->name)); ++ if (strcmp(pd_name, domain->name)) ++ continue; ++ else { ++ hitted_pm_genpd = aon_pds_data->domains[idx]; ++ pr_debug("target pm power domain-%s found, index: %d\n", ++ hitted_pm_genpd->name, idx); ++ break; ++ } ++ } ++ ++ if (idx >= aon_pds_data->num_domains) { ++ pr_err("no taget power domain-%s found, idx = %d, total pd numbers = %d\n", ++ pd_name, idx, aon_pds_data->num_domains); ++ userlen = -EINVAL; ++ goto out; ++ } ++ ++ if (!hitted_pm_genpd->power_on && !hitted_pm_genpd->power_off) { ++ pr_err("no power operations registered for power domain-%s\n", pd_name); ++ userlen = -EINVAL; ++ goto out; ++ } ++ ++ end = end + 1; ++ start = skip_spaces(end); ++ end = start; ++ while (!isspace(*end) && *end != '\0') ++ end++; ++ ++ *end = '\0'; ++ strcpy(pd_state, start); ++ pr_debug("power domain target state: %s\n", pd_state); ++ ++ if (!strcmp(pd_state, "on")) { ++ ret = hitted_pm_genpd->power_on(hitted_pm_genpd); ++ if (ret) { ++ userlen = ret; ++ goto out; ++ } ++ } else if (!strcmp(pd_state, "off")) { ++ ret = hitted_pm_genpd->power_off(hitted_pm_genpd); ++ if (ret) { ++ userlen = ret; ++ goto out; ++ } ++ } else { ++ pr_err("invalid power domain target state, not 'on' or 'off'\n"); ++ userlen = -EINVAL; ++ goto out; ++ } ++ ++out: ++ memset(buffer, 0, origin_len); ++ vfree(buffer); ++ ++ return userlen; ++} ++ ++static int th1520_power_domain_show(struct seq_file *m, void *v) ++{ ++ struct genpd_onecell_data *pd_data = m->private; ++ u32 count = pd_data->num_domains; ++ int idx; ++ ++ seq_puts(m, "[Power domain name list]: "); ++ for (idx = 0; idx < count; idx++) ++ seq_printf(m, "%s ", pd_data->domains[idx]->name); ++ seq_puts(m, "\n"); ++ seq_puts(m, "[Power on domain usage]: echo power_name on > domain\n"); ++ seq_puts(m, "[Power off domain usage]: echo power_name off > domain\n"); ++ ++ return 0; ++} ++ ++static int th1520_power_domain_open(struct inode *inode, struct file *file) ++{ ++ struct genpd_onecell_data *pd_data = inode->i_private; ++ ++ return single_open(file, th1520_power_domain_show, pd_data); ++} ++ ++static const struct file_operations th1520_power_domain_fops = { ++ .owner = THIS_MODULE, ++ .write = th1520_power_domain_write, ++ .read = seq_read, ++ .open = th1520_power_domain_open, ++ .llseek = generic_file_llseek, ++}; ++ ++static void pd_debugfs_init(struct genpd_onecell_data *aon_pds_data) ++{ ++ pd_debugfs_root = debugfs_create_dir("power_domain", NULL); ++ if (!pd_debugfs_root || IS_ERR(pd_debugfs_root)) ++ return; ++ ++ pd_pde = debugfs_create_file("domain", 0600, pd_debugfs_root, ++ (void *)aon_pds_data, &th1520_power_domain_fops); ++} ++ ++static int th1520_aon_pd_probe(struct platform_device *pdev) ++{ ++ const struct th1520_aon_pd_soc *pd_soc; ++ int ret; ++ ++ ret = th1520_aon_get_handle(&pm_ipc_handle); ++ if (ret) ++ return ret; ++ ++ pd_soc = of_device_get_match_data(&pdev->dev); ++ if (!pd_soc) ++ return -ENODEV; ++ ++ ret = th1520_aon_init_pm_domains(&pdev->dev, pd_soc); ++ if (ret) ++ return ret; ++ ++ pd_debugfs_init(genpd_data); ++ ++ return 0; ++} ++ ++static const struct of_device_id th1520_aon_pd_match[] = { ++ { .compatible = "xuantie,th1520-aon-pd", &th1520_aon_pd}, ++ { /* sentinel */ } ++}; ++ ++static struct platform_driver th1520_aon_pd_driver = { ++ .driver = { ++ .name = "th1520-aon-pd", ++ .of_match_table = th1520_aon_pd_match, ++ }, ++ .probe = th1520_aon_pd_probe, ++}; ++builtin_platform_driver(th1520_aon_pd_driver); ++ ++MODULE_AUTHOR("fugang.duan "); ++MODULE_DESCRIPTION("XuanTie TH1520 firmware protocol driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/firmware/xuantie/th1520_proc_debug.c b/drivers/firmware/xuantie/th1520_proc_debug.c +new file mode 100644 +index 000000000000..20d216522c81 +--- /dev/null ++++ b/drivers/firmware/xuantie/th1520_proc_debug.c +@@ -0,0 +1,173 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * sys log sys for th1520 c906 and e902 ++ * Copyright (C) 2021 Alibaba Group Holding Limited. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define GET_PAGE_NUM(size, offset) \ ++ ((((size) + ((offset) & ~PAGE_MASK)) + PAGE_SIZE - 1) >> PAGE_SHIFT) ++ ++struct th1520_log_ring_buffer { ++ __u32 read; ++ __u32 write; ++ __u32 size; ++ __u32 reserved[1]; ++ __u8 data[0]; ++}; ++ ++struct th1520_hw_log { ++ __u32 panic; ++ __u32 reserved[2]; ++ struct th1520_log_ring_buffer rb; ++}; ++ ++struct th1520_proc_log_ctrl { ++ struct th1520_hw_log __iomem *log; ++ struct proc_dir_entry *log_proc_file; ++ phys_addr_t log_phy; ++}; ++ ++static void dump_regs(const char *fn, void *hw_arg) ++{ ++ struct th1520_proc_log_ctrl *log_ctrl = hw_arg; ++ ++ if (!log_ctrl->log) ++ return; ++ ++ pr_debug("%s: panic = 0x%08x\n", fn, ++ __raw_readl(&log_ctrl->log->panic)); ++ pr_debug("%s: read = 0x%08x, write = 0x%08x, size = 0x%08x\n", fn, ++ __raw_readl(&log_ctrl->log->rb.read), ++ __raw_readl(&log_ctrl->log->rb.write), ++ __raw_readl(&log_ctrl->log->rb.size)); ++} ++ ++static int log_proc_show(struct seq_file *file, void *v) ++{ ++ struct th1520_proc_log_ctrl *log_ctrl = file->private; ++ char *buf; ++ size_t i; ++ /*dcache clean and invalid*/ ++ ALT_CMO_OP(flush, (phys_to_virt(log_ctrl->log_phy)), ++ sizeof(struct th1520_hw_log), ++ riscv_cbom_block_size); ++ ++ uint32_t write = __raw_readl(&log_ctrl->log->rb.write); ++ uint32_t read = __raw_readl(&log_ctrl->log->rb.read); ++ uint32_t size = __raw_readl(&log_ctrl->log->rb.size); ++ size_t log_size = write >= read ? write - read : size + write - read; ++ ++ seq_printf(file, "****************** device log >>>>>>>>>>>>>>>>>\n"); ++ dump_regs(__func__, log_ctrl); ++ if (!log_size) { ++ seq_printf( ++ file, ++ "****************** end device log <<<<<<<<<<<<<<<<<\n"); ++ return 0; ++ } ++ ++ int page_num = GET_PAGE_NUM(log_size, 0); ++ ++ int log_patch_1 = -1, log_patch_2 = -1; ++ ++ buf = kmalloc(PAGE_SIZE * page_num, GFP_KERNEL); ++ if (buf) { ++ if (read + log_size >= size) { ++ log_patch_2 = read + log_size - size + 1; ++ log_patch_1 = log_size - log_patch_2; ++ ++ } else { ++ log_patch_1 = log_size; ++ } ++ ++ memcpy_fromio(buf, &log_ctrl->log->rb.data[read], log_patch_1); ++ if (log_patch_2 > 0) { ++ memcpy_fromio(buf, &log_ctrl->log->rb.data[0], ++ log_patch_2); ++ } ++ ++ uint8_t last_fame_size = log_size % 64; ++ ++ for (i = 0; i < log_size - last_fame_size; i += 64) { ++ seq_printf(file, " %*pEp", 64, buf + i); ++ } ++ if (last_fame_size) { ++ seq_printf(file, " %*pEp", last_fame_size, ++ buf + log_size - last_fame_size); ++ } ++ ++ __raw_writel(write, &log_ctrl->log->rb.read); ++ kfree(buf); ++ /*dcahce clean*/ ++ ALT_CMO_OP(clean, (phys_to_virt(log_ctrl->log_phy)), ++ sizeof(struct th1520_hw_log), riscv_cbom_block_size); ++ //seq_printf(file,"\n%d %d %d %d %d\n",log_patch_1, log_patch_2, log_size ,last_fame_size, read); ++ seq_printf( ++ file, ++ "\n****************** end device log <<<<<<<<<<<<<<<<<\n"); ++ return 0; ++ } else { ++ pr_debug("Fail to alloc buf\n"); ++ return -1; ++ } ++ return 0; ++} ++ ++static bool th1520_panic_init(struct th1520_hw_log *hw_log, size_t size) ++{ ++ if (size < sizeof(struct th1520_hw_log)) { ++ return false; ++ } ++ hw_log->rb.read = 0; ++ hw_log->rb.size = size - sizeof(struct th1520_hw_log); ++ return true; ++} ++ ++void *th1520_create_panic_log_proc(phys_addr_t log_phy, void *dir, ++ void *log_info_addr, size_t size) ++{ ++ struct th1520_proc_log_ctrl *log_ctrl = ++ kmalloc(sizeof(struct th1520_proc_log_ctrl), GFP_KERNEL); ++ ++ if (log_ctrl == NULL) ++ return NULL; ++ ++ log_ctrl->log = log_info_addr; ++ ++ th1520_panic_init(log_ctrl->log, size); ++ ++ log_ctrl->log_proc_file = proc_create_single_data( ++ "proc_log", 0644, dir, &log_proc_show, log_ctrl); ++ if (log_ctrl->log_proc_file == NULL) { ++ pr_debug("Error: Could not initialize %s\n", "dsp_log"); ++ kfree(log_ctrl); ++ log_ctrl = NULL; ++ } else { ++ pr_debug("%s create Success!\n", "dsp_log"); ++ } ++ log_ctrl->log_phy = log_phy; ++ return log_ctrl; ++} ++ ++void th1520_remove_panic_log_proc(void *arg) ++{ ++ struct th1520_proc_log_ctrl *log_ctrl = ++ (struct th1520_proc_log_ctrl *)arg; ++ ++ proc_remove(log_ctrl->log_proc_file); ++ kfree(log_ctrl); ++ pr_debug("th1520 proc log removed\n"); ++} +\ No newline at end of file +diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +index 904b71c06eba..77d2d065a1b2 100644 +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -1828,6 +1828,15 @@ config GPIO_SIM + This enables the GPIO simulator - a configfs-based GPIO testing + driver. + ++config GPIO_K1X ++ bool "Spacemit k1x GPIO support" ++ depends on PINCTRL_SPACEMIT_K1X ++ help ++ Say yes here to support the k1x GPIO device. ++ The k1x GPIO device may have several banks, and each ++ bank control at most 32 GPIO pins. The number of banks ++ is passed by device tree or platform data. ++ + endmenu + + endif +diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile +index e44a700ec7d3..06e2c4fcb6c3 100644 +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -78,6 +78,7 @@ obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o + obj-$(CONFIG_GPIO_IT87) += gpio-it87.o + obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o + obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o ++obj-$(CONFIG_GPIO_K1X) += gpio-k1x.o + obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o + obj-$(CONFIG_GPIO_LATCH) += gpio-latch.o + obj-$(CONFIG_GPIO_LJCA) += gpio-ljca.o +diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c +index 6b7d47a52b10..8a63ff1e5f73 100644 +--- a/drivers/gpio/gpio-dwapb.c ++++ b/drivers/gpio/gpio-dwapb.c +@@ -415,13 +415,12 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc, + static int dwapb_gpio_set_config(struct gpio_chip *gc, unsigned offset, + unsigned long config) + { +- u32 debounce; +- +- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) +- return -ENOTSUPP; ++ if (pinconf_to_config_param(config) == PIN_CONFIG_INPUT_DEBOUNCE) { ++ u32 debounce = pinconf_to_config_argument(config); ++ return dwapb_gpio_set_debounce(gc, offset, debounce); ++ } + +- debounce = pinconf_to_config_argument(config); +- return dwapb_gpio_set_debounce(gc, offset, debounce); ++ return gpiochip_generic_config(gc, offset, config); + } + + static int dwapb_convert_irqs(struct dwapb_gpio_port_irqchip *pirq, +@@ -531,10 +530,14 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio, + port->gc.fwnode = pp->fwnode; + port->gc.ngpio = pp->ngpio; + port->gc.base = pp->gpio_base; ++ port->gc.request = gpiochip_generic_request; ++ port->gc.free = gpiochip_generic_free; + + /* Only port A support debounce */ + if (pp->idx == 0) + port->gc.set_config = dwapb_gpio_set_config; ++ else ++ port->gc.set_config = gpiochip_generic_config; + + /* Only port A can provide interrupts in all configurations of the IP */ + if (pp->idx == 0) +diff --git a/drivers/gpio/gpio-k1x.c b/drivers/gpio/gpio-k1x.c +new file mode 100644 +index 000000000000..4491a9ca4169 +--- /dev/null ++++ b/drivers/gpio/gpio-k1x.c +@@ -0,0 +1,407 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * spacemit-k1x gpio driver file ++ * ++ * Copyright (C) 2023 Spacemit ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define GPLR 0x0 ++#define GPDR 0xc ++#define GPSR 0x18 ++#define GPCR 0x24 ++#define GRER 0x30 ++#define GFER 0x3c ++#define GEDR 0x48 ++#define GSDR 0x54 ++#define GCDR 0x60 ++#define GSRER 0x6c ++#define GCRER 0x78 ++#define GSFER 0x84 ++#define GCFER 0x90 ++#define GAPMASK 0x9c ++#define GCPMASK 0xa8 ++ ++#define K1X_BANK_GPIO_NUMBER (32) ++#define BANK_GPIO_MASK (K1X_BANK_GPIO_NUMBER - 1) ++ ++#define k1x_gpio_to_bank_idx(gpio) ((gpio) / K1X_BANK_GPIO_NUMBER) ++#define k1x_gpio_to_bank_offset(gpio) ((gpio) & BANK_GPIO_MASK) ++#define k1x_bank_to_gpio(idx, offset) (((idx) * K1X_BANK_GPIO_NUMBER) | \ ++ ((offset) & BANK_GPIO_MASK)) ++ ++struct k1x_gpio_bank { ++ void __iomem *reg_bank; ++ u32 irq_mask; ++ u32 irq_rising_edge; ++ u32 irq_falling_edge; ++}; ++ ++struct k1x_gpio_chip { ++ struct gpio_chip chip; ++ void __iomem *reg_base; ++ int irq; ++ struct irq_domain *domain; ++ unsigned int ngpio; ++ unsigned int nbank; ++ struct k1x_gpio_bank *banks; ++}; ++ ++static int k1x_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ ++ return irq_create_mapping(k1x_chip->domain, offset); ++} ++ ++static int k1x_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ ++ writel(bit, bank->reg_bank + GCDR); ++ ++ return 0; ++} ++ ++static int k1x_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) ++{ ++ struct k1x_gpio_chip *k1x_chip = ++ container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = ++ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ ++ /* Set value first. */ ++ writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); ++ writel(bit, bank->reg_bank + GSDR); ++ ++ return 0; ++} ++ ++static int k1x_gpio_get(struct gpio_chip *chip, unsigned int offset) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ u32 gplr; ++ ++ gplr = readl(bank->reg_bank + GPLR); ++ ++ return !!(gplr & bit); ++} ++ ++static void k1x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ u32 gpdr; ++ ++ gpdr = readl(bank->reg_bank + GPDR); ++ /* Is it configured as output? */ ++ if (gpdr & bit) ++ writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); ++} ++ ++#ifdef CONFIG_OF_GPIO ++static int k1x_gpio_of_xlate(struct gpio_chip *chip, ++ const struct of_phandle_args *gpiospec, ++ u32 *flags) ++{ ++ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ ++ /* GPIO index start from 0. */ ++ if (gpiospec->args[0] >= k1x_chip->ngpio) ++ return -EINVAL; ++ ++ if (flags) ++ *flags = gpiospec->args[1]; ++ ++ return gpiospec->args[0]; ++} ++#endif ++ ++static int k1x_gpio_irq_type(struct irq_data *d, unsigned int type) ++{ ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ int gpio = irqd_to_hwirq(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ ++ if (type & IRQ_TYPE_EDGE_RISING) { ++ bank->irq_rising_edge |= bit; ++ writel(bit, bank->reg_bank + GSRER); ++ } else { ++ bank->irq_rising_edge &= ~bit; ++ writel(bit, bank->reg_bank + GCRER); ++ } ++ ++ if (type & IRQ_TYPE_EDGE_FALLING) { ++ bank->irq_falling_edge |= bit; ++ writel(bit, bank->reg_bank + GSFER); ++ } else { ++ bank->irq_falling_edge &= ~bit; ++ writel(bit, bank->reg_bank + GCFER); ++ } ++ ++ return 0; ++} ++ ++static irqreturn_t k1x_gpio_demux_handler(int irq, void *data) ++{ ++ int i, n; ++ u32 gedr; ++ unsigned long pending = 0; ++ unsigned int irq_num, irqs_handled = 0; ++ struct k1x_gpio_bank *bank; ++ struct k1x_gpio_chip *k1x_chip = (struct k1x_gpio_chip *)data; ++ ++ for (i = 0; i < k1x_chip->nbank; i++) { ++ bank = &k1x_chip->banks[i]; ++ ++ gedr = readl(bank->reg_bank + GEDR); ++ if (!gedr) ++ continue; ++ ++ writel(gedr, bank->reg_bank + GEDR); ++ gedr = gedr & bank->irq_mask; ++ ++ if (!gedr) ++ continue; ++ pending = gedr; ++ for_each_set_bit(n, &pending, BITS_PER_LONG) { ++ irq_num = irq_find_mapping(k1x_chip->domain, ++ k1x_bank_to_gpio(i, n)); ++ generic_handle_irq(irq_num); ++ } ++ irqs_handled++; ++ } ++ ++ return irqs_handled ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++static void k1x_ack_muxed_gpio(struct irq_data *d) ++{ ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ int gpio = irqd_to_hwirq(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ ++ writel(bit, bank->reg_bank + GEDR); ++} ++ ++static void k1x_mask_muxed_gpio(struct irq_data *d) ++{ ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ int gpio = irqd_to_hwirq(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ ++ bank->irq_mask &= ~bit; ++ ++ /* Clear the bit of rising and falling edge detection. */ ++ writel(bit, bank->reg_bank + GCRER); ++ writel(bit, bank->reg_bank + GCFER); ++} ++ ++static void k1x_unmask_muxed_gpio(struct irq_data *d) ++{ ++ int gpio = irqd_to_hwirq(d); ++ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ ++ bank->irq_mask |= bit; ++ ++ /* Set the bit of rising and falling edge detection if the gpio has. */ ++ writel(bit & bank->irq_rising_edge, bank->reg_bank + GSRER); ++ writel(bit & bank->irq_falling_edge, bank->reg_bank + GSFER); ++} ++ ++static struct irq_chip k1x_muxed_gpio_chip = { ++ .name = "k1x-gpio-irqchip", ++ .irq_ack = k1x_ack_muxed_gpio, ++ .irq_mask = k1x_mask_muxed_gpio, ++ .irq_unmask = k1x_unmask_muxed_gpio, ++ .irq_set_type = k1x_gpio_irq_type, ++ .flags = IRQCHIP_SKIP_SET_WAKE, ++}; ++ ++static const struct of_device_id k1x_gpio_dt_ids[] = { ++ { .compatible = "spacemit,k1x-gpio"}, ++ {} ++}; ++ ++static int k1x_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ++{ ++ irq_set_chip_and_handler(irq, &k1x_muxed_gpio_chip, handle_edge_irq); ++ irq_set_chip_data(irq, d->host_data); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops k1x_gpio_irq_domain_ops = { ++ .map = k1x_irq_domain_map, ++ .xlate = irq_domain_xlate_twocell, ++}; ++ ++static int k1x_gpio_probe_dt(struct platform_device *pdev, struct k1x_gpio_chip *k1x_chip) ++{ ++ u32 offset; ++ int i, nbank, ret; ++ struct device_node *child; ++ struct device_node *np = pdev->dev.of_node; ++ ++ nbank = of_get_child_count(np); ++ if (nbank == 0) ++ return -EINVAL; ++ ++ k1x_chip->banks = devm_kzalloc(&pdev->dev, ++ sizeof(*k1x_chip->banks) * nbank, ++ GFP_KERNEL); ++ if (!k1x_chip->banks) ++ return -ENOMEM; ++ ++ i = 0; ++ for_each_child_of_node(np, child) { ++ ret = of_property_read_u32(child, "reg-offset", &offset); ++ if (ret) { ++ of_node_put(child); ++ return ret; ++ } ++ k1x_chip->banks[i].reg_bank = k1x_chip->reg_base + offset; ++ i++; ++ } ++ ++ k1x_chip->nbank = nbank; ++ k1x_chip->ngpio = k1x_chip->nbank * K1X_BANK_GPIO_NUMBER; ++ ++ return 0; ++} ++ ++static int k1x_gpio_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *np; ++ struct k1x_gpio_chip *k1x_chip; ++ struct k1x_gpio_bank *bank; ++ struct resource *res; ++ struct irq_domain *domain; ++ struct clk *clk; ++ ++ int irq, i, ret; ++ void __iomem *base; ++ ++ np = pdev->dev.of_node; ++ if (!np) ++ return -EINVAL; ++ ++ k1x_chip = devm_kzalloc(dev, sizeof(*k1x_chip), GFP_KERNEL); ++ if (!k1x_chip) ++ return -ENOMEM; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) ++ return -EINVAL; ++ base = devm_ioremap_resource(dev, res); ++ if (!base) ++ return -EINVAL; ++ ++ k1x_chip->irq = irq; ++ k1x_chip->reg_base = base; ++ ++ ret = k1x_gpio_probe_dt(pdev, k1x_chip); ++ if (ret) { ++ dev_err(dev, "Fail to initialize gpio unit, error %d.\n", ret); ++ return ret; ++ } ++ ++ clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_err(dev, "Fail to get gpio clock, error %ld.\n", ++ PTR_ERR(clk)); ++ return PTR_ERR(clk); ++ } ++ ret = clk_prepare_enable(clk); ++ if (ret) { ++ dev_err(dev, "Fail to enable gpio clock, error %d.\n", ret); ++ return ret; ++ } ++ ++ domain = irq_domain_add_linear(np, k1x_chip->ngpio, &k1x_gpio_irq_domain_ops, k1x_chip); ++ if (!domain) ++ return -EINVAL; ++ ++ k1x_chip->domain = domain; ++ ++ /* Initialize the gpio chip */ ++ k1x_chip->chip.label = "k1x-gpio"; ++ k1x_chip->chip.request = gpiochip_generic_request; ++ k1x_chip->chip.free = gpiochip_generic_free; ++ k1x_chip->chip.direction_input = k1x_gpio_direction_input; ++ k1x_chip->chip.direction_output = k1x_gpio_direction_output; ++ k1x_chip->chip.get = k1x_gpio_get; ++ k1x_chip->chip.set = k1x_gpio_set; ++ k1x_chip->chip.to_irq = k1x_gpio_to_irq; ++#ifdef CONFIG_OF_GPIO ++ k1x_chip->chip.fwnode = of_fwnode_handle(np); ++ k1x_chip->chip.of_xlate = k1x_gpio_of_xlate; ++ k1x_chip->chip.of_gpio_n_cells = 2; ++#endif ++ k1x_chip->chip.ngpio = k1x_chip->ngpio; ++ ++ if (devm_request_irq(&pdev->dev, irq, k1x_gpio_demux_handler, 0, ++ k1x_chip->chip.label, k1x_chip)) { ++ dev_err(&pdev->dev, "failed to request high IRQ\n"); ++ ret = -ENOENT; ++ goto err; ++ } ++ ++ gpiochip_add(&k1x_chip->chip); ++ ++ /* clear all GPIO edge detects */ ++ for (i = 0; i < k1x_chip->nbank; i++) { ++ bank = &k1x_chip->banks[i]; ++ writel(0xffffffff, bank->reg_bank + GCFER); ++ writel(0xffffffff, bank->reg_bank + GCRER); ++ /* Unmask edge detection to AP. */ ++ writel(0xffffffff, bank->reg_bank + GAPMASK); ++ } ++ ++ return 0; ++err: ++ irq_domain_remove(domain); ++ return ret; ++} ++ ++static struct platform_driver k1x_gpio_driver = { ++ .probe = k1x_gpio_probe, ++ .driver = { ++ .name = "k1x-gpio", ++ .of_match_table = k1x_gpio_dt_ids, ++ }, ++}; ++ ++static int __init k1x_gpio_init(void) ++{ ++ return platform_driver_register(&k1x_gpio_driver); ++} ++subsys_initcall(k1x_gpio_init); +diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c +index b882b26ab500..d502bb36434b 100644 +--- a/drivers/gpio/gpio-pca953x.c ++++ b/drivers/gpio/gpio-pca953x.c +@@ -1322,12 +1322,20 @@ static const struct of_device_id pca953x_dt_ids[] = { + + MODULE_DEVICE_TABLE(of, pca953x_dt_ids); + +-static SIMPLE_DEV_PM_OPS(pca953x_pm_ops, pca953x_suspend, pca953x_resume); ++#ifdef CONFIG_PM_SLEEP ++static const struct dev_pm_ops pca953x_pm_ops = { ++ SET_LATE_SYSTEM_SLEEP_PM_OPS(pca953x_suspend, ++ pca953x_resume) ++}; + ++#define PCA593X_PM_OPS &pca953x_pm_ops ++#else ++#define PCA593X_PM_OPS NULL ++#endif + static struct i2c_driver pca953x_driver = { + .driver = { + .name = "pca953x", +- .pm = &pca953x_pm_ops, ++ .pm = PCA593X_PM_OPS, + .of_match_table = pca953x_dt_ids, + .acpi_match_table = pca953x_acpi_ids, + }, +diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig +index d1cad875d2f7..191c700fde97 100644 +--- a/drivers/gpu/drm/Kconfig ++++ b/drivers/gpu/drm/Kconfig +@@ -390,6 +390,10 @@ source "drivers/gpu/drm/sprd/Kconfig" + + source "drivers/gpu/drm/phytium/Kconfig" + ++source "drivers/gpu/drm/verisilicon/Kconfig" ++ ++source "drivers/gpu/drm/img-rogue/Kconfig" ++ + config DRM_HYPERV + tristate "DRM Support for Hyper-V synthetic video device" + depends on DRM && PCI && MMU && HYPERV +diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile +index f93fd0ac8661..caff5405c5f4 100644 +--- a/drivers/gpu/drm/Makefile ++++ b/drivers/gpu/drm/Makefile +@@ -184,6 +184,7 @@ obj-y += hisilicon/ + obj-y += mxsfb/ + obj-y += tiny/ + obj-$(CONFIG_DRM_PL111) += pl111/ ++obj-$(CONFIG_DRM_POWERVR_ROGUE) += img-rogue/ + obj-$(CONFIG_DRM_TVE200) += tve200/ + obj-$(CONFIG_DRM_XEN) += xen/ + obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ +@@ -199,3 +200,4 @@ obj-y += solomon/ + obj-$(CONFIG_DRM_SPRD) += sprd/ + obj-y += loongson/ + obj-$(CONFIG_DRM_PHYTIUM) += phytium/ ++obj-$(CONFIG_DRM_VERISILICON) += verisilicon/ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 8136e49cb6d1..9a5b5dc210ba 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -1109,6 +1109,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) + u16 cmd; + int r; + ++ return 0; ++ + if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) + return 0; + +diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig +index d3c3d3ab7225..9d4a5f8ef43f 100644 +--- a/drivers/gpu/drm/amd/amdkfd/Kconfig ++++ b/drivers/gpu/drm/amd/amdkfd/Kconfig +@@ -5,7 +5,7 @@ + + config HSA_AMD + bool "HSA kernel driver for AMD GPU devices" +- depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64) ++ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || RISCV) + select HMM_MIRROR + select MMU_NOTIFIER + select DRM_AMDGPU_USERPTR +diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig +index 901d1961b739..49b33b2f6701 100644 +--- a/drivers/gpu/drm/amd/display/Kconfig ++++ b/drivers/gpu/drm/amd/display/Kconfig +@@ -8,7 +8,10 @@ config DRM_AMD_DC + depends on BROKEN || !CC_IS_CLANG || ARM64 || RISCV || SPARC64 || X86_64 + select SND_HDA_COMPONENT if SND_HDA_CORE + # !CC_IS_CLANG: https://github.com/ClangBuiltLinux/linux/issues/1752 +- select DRM_AMD_DC_FP if (X86 || LOONGARCH || (PPC64 && ALTIVEC) || (ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG)) ++ select DRM_AMD_DC_FP if ARM64 && KERNEL_MODE_NEON && !CC_IS_CLANG ++ select DRM_AMD_DC_FP if PPC64 && ALTIVEC ++ select DRM_AMD_DC_FP if RISCV && FPU ++ select DRM_AMD_DC_FP if LOONGARCH || X86 + help + Choose this option if you want to use the new display engine + support for AMDGPU. This adds required support for Vega and +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +index 172aa10a8800..53a7122ba98d 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +@@ -35,6 +35,8 @@ + #include + #elif defined(CONFIG_LOONGARCH) + #include ++#elif defined(CONFIG_RISCV) ++#include + #endif + + /** +@@ -90,7 +92,7 @@ void dc_fpu_begin(const char *function_name, const int line) + *pcpu += 1; + + if (*pcpu == 1) { +-#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) ++#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) || defined(CONFIG_RISCV) + migrate_disable(); + kernel_fpu_begin(); + #elif defined(CONFIG_PPC64) +@@ -130,7 +132,7 @@ void dc_fpu_end(const char *function_name, const int line) + pcpu = get_cpu_ptr(&fpu_recursion_depth); + *pcpu -= 1; + if (*pcpu <= 0) { +-#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) ++#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) || defined(CONFIG_RISCV) + kernel_fpu_end(); + migrate_enable(); + #elif defined(CONFIG_PPC64) +diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile +index 0ba9a7997d56..abd04d13997d 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile ++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile +@@ -43,6 +43,12 @@ dml_ccflags := -mfpu=64 + dml_rcflags := -msoft-float + endif + ++ifdef CONFIG_RISCV ++include $(srctree)/arch/riscv/Makefile.isa ++# Remove V from the ISA string, like in arch/riscv/Makefile, but keep F and D. ++dml_ccflags := -march=$(subst v0p7,,$(riscv-march-y)) ++endif ++ + ifdef CONFIG_CC_IS_GCC + ifneq ($(call gcc-min-version, 70100),y) + IS_OLD_GCC = 1 +diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +index 6c1d79474505..ae258bfe6b38 100644 +--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c ++++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +@@ -19,6 +19,8 @@ + #include + #include + #include ++#include ++#include + + #include + +@@ -49,6 +51,10 @@ + + #define HDMI14_MAX_TMDSCLK 340000000 + ++#define HDMI_DDC_CHECK_MAX_RETRIES 100 ++#define HDMI_DDC_CHECK_NORMAL 2 ++#define HDMI_SCRAMBLING_RETRIES 20 ++ + static const u16 csc_coeff_default[3][4] = { + { 0x2000, 0x0000, 0x0000, 0x0000 }, + { 0x0000, 0x2000, 0x0000, 0x0000 }, +@@ -140,6 +146,7 @@ struct dw_hdmi { + struct clk *isfr_clk; + struct clk *iahb_clk; + struct clk *cec_clk; ++ struct clk *pix_clk; + struct dw_hdmi_i2c *i2c; + + struct hdmi_data_info hdmi_data; +@@ -197,7 +204,10 @@ struct dw_hdmi { + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + enum drm_connector_status last_connector_result; ++ ++ struct notifier_block pm_notify; /*Used to receive STD notification*/ + }; ++static bool g_is_hdmi_std_suspend __nosavedata; + + #define HDMI_IH_PHY_STAT0_RX_SENSE \ + (HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \ +@@ -676,10 +686,12 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, + cts = 0; + } + ++ hdmi->audio_enable = true; + spin_lock_irq(&hdmi->audio_lock); + hdmi->audio_n = n; + hdmi->audio_cts = cts; + hdmi_set_cts_n(hdmi, cts, hdmi->audio_enable ? n : 0); ++ hdmi_writeb(hdmi, 0x4, HDMI_AUD_INPUTCLKFS); + spin_unlock_irq(&hdmi->audio_lock); + } + +@@ -1067,18 +1079,6 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi) + hdmi_writeb(hdmi, 0x0, HDMI_TX_BCBDATA1); + } + +-static int is_color_space_conversion(struct dw_hdmi *hdmi) +-{ +- struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data; +- bool is_input_rgb, is_output_rgb; +- +- is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format); +- is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format); +- +- return (is_input_rgb != is_output_rgb) || +- (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range); +-} +- + static int is_color_space_decimation(struct dw_hdmi *hdmi) + { + if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) +@@ -1103,13 +1103,6 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi) + return 0; + } + +-static bool is_csc_needed(struct dw_hdmi *hdmi) +-{ +- return is_color_space_conversion(hdmi) || +- is_color_space_decimation(hdmi) || +- is_color_space_interpolation(hdmi); +-} +- + static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi) + { + const u16 (*csc_coeff)[3][4] = &csc_coeff_default; +@@ -1534,6 +1527,11 @@ static int dw_hdmi_phy_power_on(struct dw_hdmi *hdmi) + unsigned int i; + u8 val; + ++ if (g_is_hdmi_std_suspend) { ++ pr_info("%s under std mod, do not resume\n", __func__); ++ return 0; ++ } ++ + if (phy->gen == 1) { + dw_hdmi_phy_enable_powerdown(hdmi, false); + +@@ -1978,6 +1976,45 @@ static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi, + HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); + } + ++static bool dw_hdmi_ddc_debounce(struct dw_hdmi *hdmi) ++{ ++ u8 config, val, orig; ++ int ret, count = 0, check = 0; ++ ++ drm_scdc_readb(hdmi->ddc, SCDC_TMDS_CONFIG, &orig); ++ ++ do { ++ drm_scdc_readb(hdmi->ddc, SCDC_TMDS_CONFIG, &config); ++ if (count & 0x1) ++ config |= SCDC_SCRAMBLING_ENABLE; ++ else ++ config &= ~SCDC_SCRAMBLING_ENABLE; ++ drm_scdc_writeb(hdmi->ddc, SCDC_TMDS_CONFIG, config); ++ drm_scdc_readb(hdmi->ddc, SCDC_TMDS_CONFIG, &val); ++ ++ if (val != config) ++ check = 0; ++ else ++ check++; ++ if (check >= HDMI_DDC_CHECK_NORMAL) { ++ ret = true; ++ goto out; ++ } ++ ++ if (count++ >= HDMI_DDC_CHECK_MAX_RETRIES) { ++ dev_err(hdmi->dev, "exceed max retries:%d\n", HDMI_DDC_CHECK_MAX_RETRIES); ++ ret = false; ++ goto out; ++ } ++ ++ usleep_range(10000, 15000); ++ } while (1); ++ ++out: ++ drm_scdc_writeb(hdmi->ddc, SCDC_TMDS_CONFIG, orig); ++ return ret; ++} ++ + static void hdmi_av_composer(struct dw_hdmi *hdmi, + const struct drm_display_info *display, + const struct drm_display_mode *mode) +@@ -2096,6 +2133,7 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, + * Source Devices compliant shall set the + * Source Version = 1. + */ ++ dw_hdmi_ddc_debounce(hdmi); + drm_scdc_readb(hdmi->ddc, SCDC_SINK_VERSION, + &bytes); + drm_scdc_writeb(hdmi->ddc, SCDC_SOURCE_VERSION, +@@ -2120,6 +2158,11 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi, + HDMI_MC_SWRSTZ); + drm_scdc_set_scrambling(hdmi->curr_conn, 0); + } ++ } else { ++ hdmi_writeb(hdmi, 0, HDMI_FC_SCRAMBLER_CTRL); ++ hdmi_writeb(hdmi, (u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, ++ HDMI_MC_SWRSTZ); ++ drm_scdc_set_scrambling(hdmi->curr_conn, 0); + } + + /* Set up horizontal active pixel width */ +@@ -2164,33 +2207,6 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi) + hdmi_writeb(hdmi, 0x0B, HDMI_FC_CH0PREAM); + hdmi_writeb(hdmi, 0x16, HDMI_FC_CH1PREAM); + hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM); +- +- /* Enable pixel clock and tmds data path */ +- hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE | +- HDMI_MC_CLKDIS_CSCCLK_DISABLE | +- HDMI_MC_CLKDIS_AUDCLK_DISABLE | +- HDMI_MC_CLKDIS_PREPCLK_DISABLE | +- HDMI_MC_CLKDIS_TMDSCLK_DISABLE; +- hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE; +- hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); +- +- hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; +- hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); +- +- /* Enable csc path */ +- if (is_csc_needed(hdmi)) { +- hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; +- hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); +- +- hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH, +- HDMI_MC_FLOWCTRL); +- } else { +- hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE; +- hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); +- +- hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS, +- HDMI_MC_FLOWCTRL); +- } + } + + /* Workaround to clear the overflow condition */ +@@ -2238,8 +2254,30 @@ static void hdmi_disable_overflow_interrupts(struct dw_hdmi *hdmi) + HDMI_IH_MUTE_FC_STAT2); + } + ++static void hdmi_check_scrambling_status(struct dw_hdmi *hdmi, ++ struct drm_connector *connector) ++{ ++ int count = 0; ++ ++ if (!dw_hdmi_support_scdc(hdmi, &connector->display_info)) ++ return; ++ ++ do { ++ if (drm_scdc_get_scrambling_status(connector)) ++ break; ++ ++ /* polling scrambling_status up to a maximum of 200ms */ ++ if (count++ >= HDMI_SCRAMBLING_RETRIES) { ++ dev_err(hdmi->dev, ++ "TMDS link of scrambling_status is not ready\n"); ++ break; ++ } ++ usleep_range(10000, 11000); ++ } while (1); ++} ++ + static int dw_hdmi_setup(struct dw_hdmi *hdmi, +- const struct drm_connector *connector, ++ struct drm_connector *connector, + const struct drm_display_mode *mode) + { + int ret; +@@ -2324,6 +2362,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, + hdmi_video_csc(hdmi); + hdmi_video_sample(hdmi); + hdmi_tx_hdcp_config(hdmi); ++ hdmi_check_scrambling_status(hdmi, connector); + + dw_hdmi_clear_overflow(hdmi); + +@@ -2952,6 +2991,7 @@ static void dw_hdmi_bridge_atomic_disable(struct drm_bridge *bridge, + dw_hdmi_update_phy_mask(hdmi); + handle_plugged_change(hdmi, false); + mutex_unlock(&hdmi->mutex); ++ pm_runtime_put(hdmi->dev); + } + + static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge, +@@ -2964,6 +3004,7 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge, + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + ++ pm_runtime_get_sync(hdmi->dev); + mutex_lock(&hdmi->mutex); + hdmi->disabled = false; + hdmi->curr_conn = connector; +@@ -3038,6 +3079,8 @@ static irqreturn_t dw_hdmi_hardirq(int irq, void *dev_id) + + intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); + if (intr_stat) { ++ hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); ++ hdmi_writeb(hdmi, 0xff, HDMI_PHY_MASK0); + hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); + return IRQ_WAKE_THREAD; + } +@@ -3079,6 +3122,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) + u8 intr_stat, phy_int_pol, phy_pol_mask, phy_stat; + enum drm_connector_status status = connector_status_unknown; + ++ msleep(50); + intr_stat = hdmi_readb(hdmi, HDMI_IH_PHY_STAT0); + phy_int_pol = hdmi_readb(hdmi, HDMI_PHY_POL0); + phy_stat = hdmi_readb(hdmi, HDMI_PHY_STAT0); +@@ -3138,6 +3182,7 @@ static irqreturn_t dw_hdmi_irq(int irq, void *dev_id) + hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); + hdmi_writeb(hdmi, ~(HDMI_IH_PHY_STAT0_HPD | HDMI_IH_PHY_STAT0_RX_SENSE), + HDMI_IH_MUTE_PHY_STAT0); ++ hdmi_writeb(hdmi, (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE), HDMI_PHY_MASK0); + + return IRQ_HANDLED; + } +@@ -3278,6 +3323,27 @@ static void dw_hdmi_init_hw(struct dw_hdmi *hdmi) + hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data); + } + ++static int hdmi_light_notify(struct notifier_block *notify_block, ++ unsigned long mode, void *unused) ++{ ++ pr_info("pm_notify: mode (%ld)\n", mode); ++ ++ switch (mode) { ++ case PM_HIBERNATION_PREPARE: ++ pr_info("hdmi_pm_notify PM_HIBERNATION_PREPARE\n"); ++ g_is_hdmi_std_suspend = true; ++ break; ++ case PM_POST_HIBERNATION: ++ pr_info("hdmi_pm_notify PM_HIBERNATION_PREPARE\n"); ++ g_is_hdmi_std_suspend = false; ++ break; ++ default: ++ break; ++ } ++ ++ return NOTIFY_DONE; ++} ++ + /* ----------------------------------------------------------------------------- + * Probe/remove API, used from platforms based on the DRM bridge API. + */ +@@ -3362,7 +3428,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, + hdmi->disabled = true; + hdmi->rxsense = true; + hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE); +- hdmi->mc_clkdis = 0x7f; ++ hdmi->mc_clkdis = 0x0; + hdmi->last_connector_result = connector_status_disconnected; + + mutex_init(&hdmi->mutex); +@@ -3467,6 +3533,13 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, + } + } + ++ hdmi->pix_clk = devm_clk_get(hdmi->dev, "pixclk"); ++ if (IS_ERR(hdmi->pix_clk)) { ++ ret = PTR_ERR(hdmi->pix_clk); ++ dev_err(hdmi->dev, "Unable to get HDMI pix clk: %d\n", ret); ++ goto err_iahb; ++ } ++ + /* Product and revision IDs */ + hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8) + | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0); +@@ -3618,6 +3691,13 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, + + drm_bridge_add(&hdmi->bridge); + ++ if (IS_ENABLED(CONFIG_PM)) ++ hdmi->pm_notify.notifier_call = hdmi_light_notify; ++ ++ ret = register_pm_notifier(&hdmi->pm_notify); ++ if (ret) ++ dev_err(dev, "register_pm_notifier failed: %d\n", ret); ++ + return hdmi; + + err_iahb: +@@ -3634,6 +3714,8 @@ EXPORT_SYMBOL_GPL(dw_hdmi_probe); + + void dw_hdmi_remove(struct dw_hdmi *hdmi) + { ++ unregister_pm_notifier(&hdmi->pm_notify); ++ + drm_bridge_remove(&hdmi->bridge); + + if (hdmi->audio && !IS_ERR(hdmi->audio)) +@@ -3672,6 +3754,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev, + ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0); + if (ret) { + dw_hdmi_remove(hdmi); ++ DRM_ERROR("Failed to initialize bridge with drm\n"); + return ERR_PTR(ret); + } + +@@ -3688,9 +3771,28 @@ EXPORT_SYMBOL_GPL(dw_hdmi_unbind); + void dw_hdmi_resume(struct dw_hdmi *hdmi) + { + dw_hdmi_init_hw(hdmi); ++ hdmi_init_clk_regenerator(hdmi); + } + EXPORT_SYMBOL_GPL(dw_hdmi_resume); + ++#ifdef CONFIG_PM ++int dw_hdmi_runtime_suspend(struct dw_hdmi *hdmi) ++{ ++ clk_disable_unprepare(hdmi->pix_clk); ++ clk_disable_unprepare(hdmi->cec_clk); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(dw_hdmi_runtime_suspend); ++ ++int dw_hdmi_runtime_resume(struct dw_hdmi *hdmi) ++{ ++ clk_prepare_enable(hdmi->cec_clk); ++ clk_prepare_enable(hdmi->pix_clk); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(dw_hdmi_runtime_resume); ++#endif ++ + MODULE_AUTHOR("Sascha Hauer "); + MODULE_AUTHOR("Andy Yan "); + MODULE_AUTHOR("Yakir Yang "); +diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c +index b4659cd6285a..b624a67ff7cf 100644 +--- a/drivers/gpu/drm/drm_fbdev_generic.c ++++ b/drivers/gpu/drm/drm_fbdev_generic.c +@@ -79,6 +79,7 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper, + void *screen_buffer; + u32 format; + int ret; ++ struct fb_fillrect region; + + drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, +@@ -124,6 +125,14 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper, + if (ret) + goto err_drm_fb_helper_release_info; + ++ region.color = 0; ++ region.rop = ROP_COPY; ++ region.dx = 0; ++ region.dy = 0; ++ region.width = sizes->surface_width; ++ region.height = sizes->surface_height; ++ info->fbops->fb_fillrect(info, ®ion); ++ + return 0; + + err_drm_fb_helper_release_info: +diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c +index b67eafa55715..5ebe418bd383 100644 +--- a/drivers/gpu/drm/drm_gem_vram_helper.c ++++ b/drivers/gpu/drm/drm_gem_vram_helper.c +@@ -870,7 +870,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, + if (!tt) + return NULL; + +- ret = ttm_tt_init(tt, bo, page_flags, ttm_cached, 0); ++ ret = ttm_tt_init(tt, bo, page_flags, ttm_write_combined, 0); + if (ret < 0) + goto err_ttm_tt_init; + +diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h +index 001faea80fef..3555e714f014 100644 +--- a/drivers/gpu/drm/etnaviv/common.xml.h ++++ b/drivers/gpu/drm/etnaviv/common.xml.h +@@ -65,6 +65,7 @@ DEALINGS IN THE SOFTWARE. + #define chipModel_GC520 0x00000520 + #define chipModel_GC530 0x00000530 + #define chipModel_GC600 0x00000600 ++#define chipModel_GC620 0x00000620 + #define chipModel_GC700 0x00000700 + #define chipModel_GC800 0x00000800 + #define chipModel_GC860 0x00000860 +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +index b13a17276d07..8c347ab34f28 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +@@ -417,8 +417,11 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, + + if (switch_mmu_context && + gpu->sec_mode == ETNA_SEC_KERNEL) { +- unsigned short id = +- etnaviv_iommuv2_get_pta_id(gpu->mmu_context); ++ unsigned short id; ++ ++ etnaviv_iommuv2_update_pta_entry(gpu->mmu_context); ++ ++ id = etnaviv_iommuv2_get_pta_id(gpu->mmu_context); + CMD_LOAD_STATE(buffer, + VIVS_MMUv2_PTA_CONFIG, + VIVS_MMUv2_PTA_CONFIG_INDEX(id)); +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +index ad543a7cbf07..8cdc7cf65825 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +@@ -522,7 +522,11 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) + control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU; + gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); + +- if (gpu->sec_mode == ETNA_SEC_KERNEL) { ++ if (etnaviv_is_model_rev(gpu, GC620, 0x5552)) { ++ gpu_write(gpu, 0x00800, 0x10); ++ } ++ ++ if (gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) { + gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, + VIVS_MMUv2_AHB_CONTROL_RESET); + } else { +@@ -754,7 +758,13 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) + gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config); + } + +- if (gpu->sec_mode == ETNA_SEC_KERNEL) { ++ /* FIXME: use feature bit 5 of minor features 12, G2D_DEC400EX */ ++ if (etnaviv_is_model_rev(gpu, GC620, 0x5552)) { ++ gpu_write(gpu, 0x800, 0x2010188); ++ gpu_write(gpu, 0x808, 0x3fc104); ++ } ++ ++ if (gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) { + u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL); + val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS; + gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val); +@@ -804,7 +814,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) + * On cores with security features supported, we claim control over the + * security states. + */ +- if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) && ++ if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) || + (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB)) + gpu->sec_mode = ETNA_SEC_KERNEL; + +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +index 8665f2658d51..6a56f1ab4444 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +@@ -69,6 +69,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { + .minor_features10 = 0x00000000, + .minor_features11 = 0x00000000, + }, ++ { ++ .model = 0x620, ++ .revision = 0x5552, ++ .product_id = 0x6200, ++ .customer_id = 0x20b, ++ .eco_id = 0, ++ .stream_count = 1, ++ .register_max = 64, ++ .thread_count = 256, ++ .shader_core_count = 1, ++ .vertex_cache_size = 8, ++ .vertex_output_buffer_size = 512, ++ .pixel_pipes = 1, ++ .instruction_count = 256, ++ .num_constants = 168, ++ .buffer_size = 0, ++ .varyings_count = 8, ++ .features = 0x001b4a40, ++ .minor_features0 = 0xa0600080, ++ .minor_features1 = 0x18050000, ++ .minor_features2 = 0x04f30000, ++ .minor_features3 = 0x00060005, ++ .minor_features4 = 0x20629000, ++ .minor_features5 = 0x0003380c, ++ .minor_features6 = 0x00000000, ++ .minor_features7 = 0x00001000, ++ .minor_features8 = 0x00000000, ++ .minor_features9 = 0x00000180, ++ .minor_features10 = 0x00004000, ++ .minor_features11 = 0x00000000, ++ }, + { + .model = 0x7000, + .revision = 0x6202, +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +index d664ae29ae20..33a28201c863 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c ++++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +@@ -186,6 +186,14 @@ static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, + gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); + } + ++void etnaviv_iommuv2_update_pta_entry(struct etnaviv_iommu_context *context) ++{ ++ struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); ++ ++ context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma | ++ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K; ++} ++ + static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) + { +@@ -216,8 +224,7 @@ static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, + VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH( + upper_32_bits(context->global->bad_page_dma))); + +- context->global->v2.pta_cpu[v2_context->id] = v2_context->mtlb_dma | +- VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K; ++ etnaviv_iommuv2_update_pta_entry(context); + + /* trigger a PTA load through the FE */ + prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id); +@@ -241,6 +248,7 @@ unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context) + + return v2_context->id; + } ++ + static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu, + struct etnaviv_iommu_context *context) + { +@@ -276,6 +284,8 @@ etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global) + if (!v2_context) + return NULL; + ++ v2_context->id = 0; ++#if 0 + mutex_lock(&global->lock); + v2_context->id = find_first_zero_bit(global->v2.pta_alloc, + ETNAVIV_PTA_ENTRIES); +@@ -286,6 +296,7 @@ etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global) + goto out_free; + } + mutex_unlock(&global->lock); ++#endif + + v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K, + &v2_context->mtlb_dma, GFP_KERNEL); +diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +index c01a147f0dfd..195ef1bf8288 100644 +--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h ++++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +@@ -124,4 +124,6 @@ etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global); + u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context); + unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context); + ++void etnaviv_iommuv2_update_pta_entry(struct etnaviv_iommu_context *context); ++ + #endif /* __ETNAVIV_MMU_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/Kconfig b/drivers/gpu/drm/img-rogue/Kconfig +new file mode 100644 +index 000000000000..6862c7539b6d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/Kconfig +@@ -0,0 +1,24 @@ ++config DRM_POWERVR_ROGUE ++ tristate "PowerVR Rogue" ++ depends on HAS_IOMEM ++ depends on DRM ++ select DRM_KMS_HELPER ++ select PM_DEVFREQ ++ select DEVFREQ_GOV_SIMPLE_ONDEMAND ++ select PM_OPP ++ select DEVFREQ_THERMAL ++ select SYNC_FILE ++ help ++ Driver for PowerVR Rogue graphics hardware. ++ ++ Say Y here if your SoC contains a PowerVR Rogue GPU. For more ++ information, see . ++ ++config DRM_POWERVR_ROGUE_DEBUG ++ bool "Enable PowerVR Rogue debug features" ++ depends on DRM_POWERVR_ROGUE ++ default n ++ help ++ Add additional debug features to the PowerVR Rogue driver. ++ To build a matching userspace, enable the following build options: ++ BUILD=debug SUPPORT_PAGE_FAULT_DEBUG=1 PVRSRV_ENABLE_GPU_MEMORY_INFO=1 +diff --git a/drivers/gpu/drm/img-rogue/Makefile b/drivers/gpu/drm/img-rogue/Makefile +new file mode 100644 +index 000000000000..e35b3386c4e4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/Makefile +@@ -0,0 +1,18 @@ ++img_basedir := $(srctree)/$(src) ++include $(img_basedir)/config_kernel.mk ++ ++obj-$(CONFIG_DRM_POWERVR_ROGUE) += pvrsrvkm.o ++ ++ccflags-y += \ ++ -include config_kernel.h \ ++ -I$(img_basedir)/include/drm \ ++ -I$(img_basedir) \ ++ -I$(img_basedir)/include \ ++ -I$(img_basedir)/km \ ++ -D__linux__ ++ ++include $(img_basedir)/pvrsrvkm.mk ++ ++obj-$(CONFIG_DRM_POWERVR_ROGUE) += drm_nulldisp.o ++ ++drm_nulldisp-y += drm_nulldisp_drv.o drm_nulldisp_netlink.o drm_netlink_gem.o drm_nulldisp_gem.o +diff --git a/drivers/gpu/drm/img-rogue/allocmem.c b/drivers/gpu/drm/img-rogue/allocmem.c +new file mode 100644 +index 000000000000..5d7c85da1f98 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/allocmem.c +@@ -0,0 +1,422 @@ ++/*************************************************************************/ /*! ++@File ++@Title Host memory management implementation for Linux ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++#include ++ ++#include "img_defs.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "process_stats.h" ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) ++#include "pvrsrv.h" ++#endif ++#include "osfunc.h" ++ ++ ++/* ++ * When memory statistics are disabled, memory records are used instead. ++ * In order for these to work, the PID of the process that requested the ++ * allocation needs to be stored at the end of the kmalloc'd memory, making ++ * sure 4 extra bytes are allocated to fit the PID. ++ * ++ * There is no need for this extra allocation when memory statistics are ++ * enabled, since all allocations are tracked in DebugFS mem_area files. ++ */ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) ++#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32) ++#else ++#define ALLOCMEM_MEMSTATS_PADDING 0UL ++#endif ++ ++/* How many times kmalloc can fail before the allocation threshold is reduced */ ++static const IMG_UINT32 g_ui32kmallocFailLimit = 10; ++/* How many kmalloc failures happened since the last allocation threshold change */ ++static IMG_UINT32 g_ui32kmallocFailCount = 0; ++/* Current kmalloc threshold value in bytes */ ++static IMG_UINT32 g_ui32kmallocThreshold = PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD; ++/* Spinlock used so that the global variables above may not be modified by more than 1 thread at a time */ ++static DEFINE_SPINLOCK(kmalloc_lock); ++ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) ++static DEFINE_SPINLOCK(kmalloc_leak_lock); ++static IMG_UINT32 g_ui32kmallocLeakCounter = 0; ++#endif ++ ++static inline void OSTryDecreaseKmallocThreshold(void) ++{ ++ unsigned long flags; ++ spin_lock_irqsave(&kmalloc_lock, flags); ++ ++ g_ui32kmallocFailCount++; ++ ++ if (g_ui32kmallocFailCount >= g_ui32kmallocFailLimit) ++ { ++ g_ui32kmallocFailCount = 0; ++ if (g_ui32kmallocThreshold > PAGE_SIZE) ++ { ++ g_ui32kmallocThreshold >>= 1; ++ printk(KERN_INFO "Threshold is now set to %d\n", g_ui32kmallocThreshold); ++ } ++ } ++ ++ spin_unlock_irqrestore(&kmalloc_lock, flags); ++} ++ ++static inline void OSResetKmallocFailCount(void) ++{ ++ unsigned long flags; ++ spin_lock_irqsave(&kmalloc_lock, flags); ++ ++ g_ui32kmallocFailCount = 0; ++ ++ spin_unlock_irqrestore(&kmalloc_lock, flags); ++} ++ ++static inline void _pvr_vfree(const void* pvAddr) ++{ ++#if defined(DEBUG) ++ /* Size harder to come by for vmalloc and since vmalloc allocates ++ * a whole number of pages, poison the minimum size known to have ++ * been allocated. ++ */ ++ OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, ++ PAGE_SIZE); ++#endif ++ vfree(pvAddr); ++} ++ ++static inline void _pvr_kfree(const void* pvAddr) ++{ ++#if defined(DEBUG) ++ /* Poison whole memory block */ ++ OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, ++ ksize(pvAddr)); ++#endif ++ kfree(pvAddr); ++} ++ ++static inline void _pvr_alloc_stats_add(void *pvAddr, IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) ++{ ++#if !defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVR_UNREFERENCED_PARAMETER(pvAddr); ++#else ++ if (!is_vmalloc_addr(pvAddr)) ++ { ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ IMG_CPU_PHYADDR sCpuPAddr; ++ sCpuPAddr.uiAddr = 0; ++ ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ++ pvAddr, ++ sCpuPAddr, ++ ksize(pvAddr), ++ NULL, ++ OSGetCurrentClientProcessIDKM() ++ DEBUG_MEMSTATS_ARGS); ++#else ++ { ++ /* Store the PID in the final additional 4 bytes allocated */ ++ IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING); ++ *puiTemp = OSGetCurrentClientProcessIDKM(); ++ } ++ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvAddr), OSGetCurrentClientProcessIDKM()); ++#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++ } ++ else ++ { ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ IMG_CPU_PHYADDR sCpuPAddr; ++ sCpuPAddr.uiAddr = 0; ++ ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, ++ pvAddr, ++ sCpuPAddr, ++ ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), ++ NULL, ++ OSGetCurrentClientProcessIDKM() ++ DEBUG_MEMSTATS_ARGS); ++#else ++ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, ++ ((ui32Size + PAGE_SIZE-1) & ~(PAGE_SIZE-1)), ++ (IMG_UINT64)(uintptr_t) pvAddr, ++ OSGetCurrentClientProcessIDKM()); ++#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++ } ++#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ ++} ++ ++static inline void _pvr_alloc_stats_remove(void *pvAddr) ++{ ++#if !defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVR_UNREFERENCED_PARAMETER(pvAddr); ++#else ++ if (!is_vmalloc_addr(pvAddr)) ++ { ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ { ++ IMG_UINT32 *puiTemp = IMG_OFFSET_ADDR(pvAddr, ksize(pvAddr) - ALLOCMEM_MEMSTATS_PADDING); ++ PVRSRVStatsDecrMemKAllocStat(ksize(pvAddr), *puiTemp); ++ } ++#else ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ++ (IMG_UINT64)(uintptr_t) pvAddr, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++ } ++ else ++ { ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, ++ (IMG_UINT64)(uintptr_t) pvAddr); ++#else ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC, ++ (IMG_UINT64)(uintptr_t) pvAddr, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++ } ++#endif /* !defined(PVRSRV_ENABLE_PROCESS_STATS) */ ++} ++ ++void *(OSAllocMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) ++{ ++ void *pvRet = NULL; ++ ++ if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold) ++ { ++ pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL); ++ if (pvRet == NULL) ++ { ++ OSTryDecreaseKmallocThreshold(); ++ } ++ else ++ { ++ OSResetKmallocFailCount(); ++ } ++ } ++ ++ if (pvRet == NULL) ++ { ++ pvRet = vmalloc(ui32Size); ++ } ++ ++ if (pvRet != NULL) ++ { ++ _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS); ++ } ++ ++ return pvRet; ++} ++ ++void *(OSAllocZMem)(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS) ++{ ++ void *pvRet = NULL; ++ ++ if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) <= g_ui32kmallocThreshold) ++ { ++ pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL); ++ if (pvRet == NULL) ++ { ++ OSTryDecreaseKmallocThreshold(); ++ } ++ else ++ { ++ OSResetKmallocFailCount(); ++ } ++ } ++ ++ if (pvRet == NULL) ++ { ++ pvRet = vzalloc(ui32Size); ++ } ++ ++ if (pvRet != NULL) ++ { ++ _pvr_alloc_stats_add(pvRet, ui32Size DEBUG_MEMSTATS_ARGS); ++ } ++ ++ return pvRet; ++} ++ ++/* ++ * The parentheses around OSFreeMem prevent the macro in allocmem.h from ++ * applying, as it would break the function's definition. ++ */ ++void (OSFreeMem)(void *pvMem) ++{ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) ++ unsigned long flags; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ if (psPVRSRVData) ++ { ++ IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; ++ ++ spin_lock_irqsave(&kmalloc_leak_lock, flags); ++ ++ g_ui32kmallocLeakCounter++; ++ if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) ++ { ++ g_ui32kmallocLeakCounter = 0; ++ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", ++ __func__, ++ pvMem)); ++ return; ++ } ++ ++ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); ++ } ++#endif ++ if (pvMem != NULL) ++ { ++ _pvr_alloc_stats_remove(pvMem); ++ ++ if (!is_vmalloc_addr(pvMem)) ++ { ++ _pvr_kfree(pvMem); ++ } ++ else ++ { ++ _pvr_vfree(pvMem); ++ } ++ } ++} ++ ++void *OSAllocMemNoStats(IMG_UINT32 ui32Size) ++{ ++ void *pvRet = NULL; ++ ++ if (ui32Size <= g_ui32kmallocThreshold) ++ { ++ pvRet = kmalloc(ui32Size, GFP_KERNEL); ++ if (pvRet == NULL) ++ { ++ OSTryDecreaseKmallocThreshold(); ++ } ++ else ++ { ++ OSResetKmallocFailCount(); ++ } ++ } ++ ++ if (pvRet == NULL) ++ { ++ pvRet = vmalloc(ui32Size); ++ } ++ ++ return pvRet; ++} ++ ++void *OSAllocZMemNoStats(IMG_UINT32 ui32Size) ++{ ++ void *pvRet = NULL; ++ ++ if (ui32Size <= g_ui32kmallocThreshold) ++ { ++ pvRet = kzalloc(ui32Size, GFP_KERNEL); ++ if (pvRet == NULL) ++ { ++ OSTryDecreaseKmallocThreshold(); ++ } ++ else ++ { ++ OSResetKmallocFailCount(); ++ } ++ } ++ ++ if (pvRet == NULL) ++ { ++ pvRet = vzalloc(ui32Size); ++ } ++ ++ return pvRet; ++} ++ ++/* ++ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from ++ * applying, as it would break the function's definition. ++ */ ++void (OSFreeMemNoStats)(void *pvMem) ++{ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) ++ unsigned long flags; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ if (psPVRSRVData) ++ { ++ IMG_UINT32 ui32kmallocLeakMax = psPVRSRVData->sMemLeakIntervals.ui32OSAlloc; ++ ++ spin_lock_irqsave(&kmalloc_leak_lock, flags); ++ ++ g_ui32kmallocLeakCounter++; ++ if (ui32kmallocLeakMax && (g_ui32kmallocLeakCounter >= ui32kmallocLeakMax)) ++ { ++ g_ui32kmallocLeakCounter = 0; ++ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Skipped freeing of pointer 0x%p to trigger memory leak.", ++ __func__, ++ pvMem)); ++ return; ++ } ++ ++ spin_unlock_irqrestore(&kmalloc_leak_lock, flags); ++ } ++#endif ++ if (pvMem != NULL) ++ { ++ if (!is_vmalloc_addr(pvMem)) ++ { ++ _pvr_kfree(pvMem); ++ } ++ else ++ { ++ _pvr_vfree(pvMem); ++ } ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/allocmem.h b/drivers/gpu/drm/img-rogue/allocmem.h +new file mode 100644 +index 000000000000..3de9e6781fbd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/allocmem.h +@@ -0,0 +1,224 @@ ++/*************************************************************************/ /*! ++@File allocmem.h ++@Title memory allocation header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory-Allocation API definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef ALLOCMEM_H ++#define ALLOCMEM_H ++ ++#include "img_types.h" ++#include "pvr_debug.h" ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* ++ * PVRSRV_ENABLE_PROCESS_STATS enables process statistics regarding events, ++ * resources and memory across all processes ++ * PVRSRV_ENABLE_MEMORY_STATS enables recording of Linux kernel memory ++ * allocations, provided that PVRSRV_ENABLE_PROCESS_STATS is enabled ++ * - Output can be found in: ++ * /(sys/kernel/debug|proc)/pvr/proc_stats/[live|retired]_pids_stats/mem_area ++ * PVRSRV_DEBUG_LINUX_MEMORY_STATS provides more details about memory ++ * statistics in conjunction with PVRSRV_ENABLE_MEMORY_STATS ++ * PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON is defined to encompass both memory ++ * allocation statistics functionalities described above in a single macro ++ */ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) ++#define PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON ++#endif ++ ++/* ++ * When using detailed memory allocation statistics, the line number and ++ * file name where the allocation happened are also provided. ++ * When this feature is not used, these parameters are not needed. ++ */ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) ++#define DEBUG_MEMSTATS_PARAMS ,void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine ++#define DEBUG_MEMSTATS_ARGS ,pvAllocFromFile, ui32AllocFromLine ++#define DEBUG_MEMSTATS_UNREF (void)pvAllocFromFile; (void)ui32AllocFromLine; ++#define DEBUG_MEMSTATS_VALUES ,__FILE__, __LINE__ ++#else ++#define DEBUG_MEMSTATS_PARAMS /*!< ++ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON ++ * build option. */ ++#define DEBUG_MEMSTATS_ARGS /*!< ++ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON ++ * build option. */ ++#define DEBUG_MEMSTATS_UNREF /*!< ++ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON ++ * build option. */ ++#define DEBUG_MEMSTATS_VALUES /*!< ++ * Used for PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON ++ * build option. */ ++#endif ++ ++ ++/**************************************************************************/ /*! ++@Function OSAllocMem ++@Description Allocates CPU memory. Contents are uninitialized. ++ If passed a size of zero, function should not assert, ++ but just return a NULL pointer. ++@Input ui32Size Size of required allocation (in bytes) ++@Return Pointer to allocated memory on success. ++ Otherwise NULL. ++ */ /**************************************************************************/ ++#if defined(DOXYGEN) ++void *OSAllocMem(IMG_UINT32 ui32Size); ++#else ++void *OSAllocMem(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS); ++#define OSAllocMem(_size) (OSAllocMem)((_size) DEBUG_MEMSTATS_VALUES) ++#endif ++ ++/**************************************************************************/ /*! ++@Function OSAllocZMem ++@Description Allocates CPU memory and initializes the contents to zero. ++ If passed a size of zero, function should not assert, ++ but just return a NULL pointer. ++@Input ui32Size Size of required allocation (in bytes) ++@Return Pointer to allocated memory on success. ++ Otherwise NULL. ++ */ /**************************************************************************/ ++#if defined(DOXYGEN) ++void *OSAllocZMem(IMG_UINT32 ui32Size); ++#else ++void *OSAllocZMem(IMG_UINT32 ui32Size DEBUG_MEMSTATS_PARAMS); ++#define OSAllocZMem(_size) (OSAllocZMem)((_size) DEBUG_MEMSTATS_VALUES) ++#endif ++ ++ ++/**************************************************************************/ /*! ++@Function OSAllocMemNoStats ++@Description Allocates CPU memory. Contents are uninitialized. ++ If passed a size of zero, function should not assert, ++ but just return a NULL pointer. ++ The allocated memory is not accounted for by process stats. ++ Process stats are an optional feature (enabled only when ++ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount ++ of memory allocated to help in debugging. Where this is not ++ required, OSAllocMem() and OSAllocMemNoStats() equate to ++ the same operation. ++@Input ui32Size Size of required allocation (in bytes) ++@Return Pointer to allocated memory on success. ++ Otherwise NULL. ++ */ /**************************************************************************/ ++void *OSAllocMemNoStats(IMG_UINT32 ui32Size); ++ ++/**************************************************************************/ /*! ++@Function OSAllocZMemNoStats ++@Description Allocates CPU memory and initializes the contents to zero. ++ If passed a size of zero, function should not assert, ++ but just return a NULL pointer. ++ The allocated memory is not accounted for by process stats. ++ Process stats are an optional feature (enabled only when ++ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount ++ of memory allocated to help in debugging. Where this is not ++ required, OSAllocZMem() and OSAllocZMemNoStats() equate to ++ the same operation. ++@Input ui32Size Size of required allocation (in bytes) ++@Return Pointer to allocated memory on success. ++ Otherwise NULL. ++ */ /**************************************************************************/ ++void *OSAllocZMemNoStats(IMG_UINT32 ui32Size); ++ ++/**************************************************************************/ /*! ++@Function OSFreeMem ++@Description Frees previously allocated CPU memory. ++@Input pvCpuVAddr Pointer to the memory to be freed. ++@Return None. ++ */ /**************************************************************************/ ++void OSFreeMem(void *pvCpuVAddr); ++ ++/**************************************************************************/ /*! ++@Function OSFreeMemNoStats ++@Description Frees previously allocated CPU memory. ++ The freed memory does not update the figures in process stats. ++ Process stats are an optional feature (enabled only when ++ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount ++ of memory allocated to help in debugging. Where this is not ++ required, OSFreeMem() and OSFreeMemNoStats() equate to the ++ same operation. ++@Input pvCpuVAddr Pointer to the memory to be freed. ++@Return None. ++ */ /**************************************************************************/ ++void OSFreeMemNoStats(void *pvCpuVAddr); ++ ++/* ++ * These macros allow us to catch double-free bugs on DEBUG builds and ++ * prevent crashes on RELEASE builds. ++ */ ++ ++/*! @cond Doxygen_Suppress */ ++#if defined(DEBUG) ++#define double_free_sentinel ((void *)&OSFreeMem) ++#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp) ++#else ++#define double_free_sentinel NULL ++#define ALLOCMEM_ASSERT(exp) do {} while (0) ++#endif ++/*! @endcond */ ++ ++/*! Frees memory allocated by OSAllocMem(). */ ++#define OSFreeMem(_ptr) do { \ ++ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ ++ (OSFreeMem)(_ptr); \ ++ (_ptr) = double_free_sentinel; \ ++ MSC_SUPPRESS_4127 \ ++ } while (0) ++ ++/*! Frees memory allocated by OSAllocMemNoStats(). */ ++#define OSFreeMemNoStats(_ptr) do { \ ++ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \ ++ (OSFreeMemNoStats)(_ptr); \ ++ (_ptr) = double_free_sentinel; \ ++ MSC_SUPPRESS_4127 \ ++ } while (0) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* ALLOCMEM_H */ ++ ++/****************************************************************************** ++ End of file (allocmem.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/apollo.mk b/drivers/gpu/drm/img-rogue/apollo/apollo.mk +new file mode 100644 +index 000000000000..6d34673e40be +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/apollo.mk +@@ -0,0 +1,4 @@ ++apollo-y += \ ++ tc_apollo.o \ ++ tc_drv.o \ ++ tc_odin.o +diff --git a/drivers/gpu/drm/img-rogue/apollo/apollo_regs.h b/drivers/gpu/drm/img-rogue/apollo/apollo_regs.h +new file mode 100644 +index 000000000000..4081e2123ac7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/apollo_regs.h +@@ -0,0 +1,108 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(APOLLO_REGS_H) ++#define APOLLO_REGS_H ++ ++#include "tc_clocks.h" ++ ++/* TC TCF5 */ ++#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1) ++#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000) ++#define TC5_SYS_APOLLO_REG_PDP2_SIZE (0x7C4) ++ ++#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000) ++#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE (0x14) ++ ++#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000) ++#define TC5_SYS_APOLLO_REG_HDMI_SIZE (0x1C) ++ ++/* TC ES2 */ ++#define TCF_TEMP_SENSOR_SPI_OFFSET 0xe ++#define TCF_TEMP_SENSOR_TO_C(raw) (((raw) * 248 / 4096) - 54) ++ ++/* Number of bytes that are broken */ ++#define SYS_DEV_MEM_BROKEN_BYTES (1024 * 1024) ++#define SYS_DEV_MEM_REGION_SIZE (0x40000000 - SYS_DEV_MEM_BROKEN_BYTES) ++ ++/* Apollo reg on base register 0 */ ++#define SYS_APOLLO_REG_PCI_BASENUM (0) ++#define SYS_APOLLO_REG_REGION_SIZE (0x00010000) ++ ++#define SYS_APOLLO_REG_SYS_OFFSET (0x0000) ++#define SYS_APOLLO_REG_SYS_SIZE (0x0400) ++ ++#define SYS_APOLLO_REG_PLL_OFFSET (0x1000) ++#define SYS_APOLLO_REG_PLL_SIZE (0x0400) ++ ++#define SYS_APOLLO_REG_HOST_OFFSET (0x4050) ++#define SYS_APOLLO_REG_HOST_SIZE (0x0014) ++ ++#define SYS_APOLLO_REG_PDP1_OFFSET (0xC000) ++#define SYS_APOLLO_REG_PDP1_SIZE (0x2000) ++ ++/* Offsets for flashing Apollo PROMs from base 0 */ ++#define APOLLO_FLASH_STAT_OFFSET (0x4058) ++#define APOLLO_FLASH_DATA_WRITE_OFFSET (0x4050) ++#define APOLLO_FLASH_RESET_OFFSET (0x4060) ++ ++#define APOLLO_FLASH_FIFO_STATUS_MASK (0xF) ++#define APOLLO_FLASH_FIFO_STATUS_SHIFT (0) ++#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF) ++#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16) ++ ++#define APOLLO_FLASH_PROG_COMPLETE_BIT (0x1) ++#define APOLLO_FLASH_PROG_PROGRESS_BIT (0x2) ++#define APOLLO_FLASH_PROG_FAILED_BIT (0x4) ++#define APOLLO_FLASH_INV_FILETYPE_BIT (0x8) ++ ++#define APOLLO_FLASH_FIFO_SIZE (8) ++ ++/* RGX reg on base register 1 */ ++#define SYS_RGX_REG_PCI_BASENUM (1) ++#define SYS_RGX_REG_REGION_SIZE (0x7FFFF) ++ ++/* Device memory (including HP mapping) on base register 2 */ ++#define SYS_DEV_MEM_PCI_BASENUM (2) ++ ++#endif /* APOLLO_REGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h b/drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h +new file mode 100644 +index 000000000000..fc87ec790df9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/bonnie_tcf.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* bonnie_tcf.h - Bonnie TCF register definitions */ ++ ++/* tab size 4 */ ++ ++#ifndef BONNIE_TCF_DEFS_H ++#define BONNIE_TCF_DEFS_H ++ ++#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK 0x00000000 ++#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS 0x00004000 ++#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP 0x00008000 ++#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN 0x0000C000 ++#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG 0x00010000 ++#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN 0x00014000 ++#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX 0x00018000 ++#define BONNIE_TCF_OFFSET_SAI_RX_1 0x0001C000 ++#define BONNIE_TCF_OFFSET_SAI_RX_SDR 0x00040000 ++#define BONNIE_TCF_OFFSET_SAI_TX_1 0x00044000 ++#define BONNIE_TCF_OFFSET_SAI_TX_SDR 0x00068000 ++ ++#define BONNIE_TCF_OFFSET_SAI_RX_DELTA 0x00004000 ++#define BONNIE_TCF_OFFSET_SAI_TX_DELTA 0x00004000 ++ ++#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS 0x0000000C ++#define BONNIE_TCF_OFFSET_SAI_EYES 0x00000010 ++#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK 0x00000018 ++ ++ ++#endif /* BONNIE_TCF_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c +new file mode 100644 +index 000000000000..76895df4559f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_crtc.c +@@ -0,0 +1,1104 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "pvr_linux_fence.h" ++#include "drm_pdp_drv.h" ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++#include ++ ++#include "pvr_dma_resv.h" ++#include "drm_pdp_gem.h" ++ ++#include "pdp_apollo.h" ++#include "pdp_odin.h" ++#include "pdp_plato.h" ++ ++#include "plato_drv.h" ++ ++#if defined(PDP_USE_ATOMIC) ++#include ++#include ++#endif ++ ++#include "kernel_compatibility.h" ++ ++enum pdp_crtc_flip_status { ++ PDP_CRTC_FLIP_STATUS_NONE = 0, ++ PDP_CRTC_FLIP_STATUS_PENDING, ++ PDP_CRTC_FLIP_STATUS_DONE, ++}; ++ ++struct pdp_flip_data { ++ struct dma_fence_cb base; ++ struct drm_crtc *crtc; ++ struct dma_fence *wait_fence; ++}; ++ ++/* returns true for ok, false for fail */ ++static bool pdp_clocks_set(struct drm_crtc *crtc, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ bool res; ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: { ++ pdp_odin_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, false); ++ res = pdp_odin_clocks_set(crtc->dev->dev, ++ pdp_crtc->pdp_reg, pdp_crtc->pll_reg, ++ 0, /* apollo only */ ++ dev_priv->outdev - 1, ++ pdp_crtc->odn_core_reg, /* odin only */ ++ adjusted_mode->hdisplay, ++ adjusted_mode->vdisplay, ++ dev_priv->subversion); ++ pdp_odin_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, true); ++ ++ break; ++ } ++ case PDP_VERSION_APOLLO: { ++ int clock_in_mhz = adjusted_mode->clock / 1000; ++ ++ pdp_apollo_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, false); ++ res = pdp_apollo_clocks_set(crtc->dev->dev, ++ pdp_crtc->pdp_reg, pdp_crtc->pll_reg, ++ clock_in_mhz, /* apollo only */ ++ NULL, /* odin only */ ++ adjusted_mode->hdisplay, ++ adjusted_mode->vdisplay); ++ pdp_apollo_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, true); ++ ++ DRM_DEBUG_DRIVER("pdp clock set to %dMhz\n", clock_in_mhz); ++ ++ break; ++ } ++ case PDP_VERSION_PLATO: ++#if defined(SUPPORT_PLATO_DISPLAY) ++ plato_enable_pdp_clock(dev_priv->dev->dev->parent); ++ res = true; ++#else ++ DRM_ERROR("Trying to enable plato PDP clock on non-Plato build\n"); ++ res = false; ++#endif ++ break; ++ default: ++ BUG(); ++ } ++ ++ return res; ++} ++ ++void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable) ++{ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: ++ pdp_odin_set_plane_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ 0, enable); ++ break; ++ case PDP_VERSION_APOLLO: ++ pdp_apollo_set_plane_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ 0, enable); ++ break; ++ case PDP_VERSION_PLATO: ++ pdp_plato_set_plane_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ 0, enable); ++ break; ++ default: ++ BUG(); ++ } ++} ++ ++static void pdp_crtc_set_syncgen_enabled(struct drm_crtc *crtc, bool enable) ++{ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: ++ pdp_odin_set_syncgen_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ enable); ++ break; ++ case PDP_VERSION_APOLLO: ++ pdp_apollo_set_syncgen_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ enable); ++ break; ++ case PDP_VERSION_PLATO: ++ pdp_plato_set_syncgen_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ enable); ++ break; ++ default: ++ BUG(); ++ } ++} ++ ++static void pdp_crtc_set_enabled(struct drm_crtc *crtc, bool enable) ++{ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ ++ if (enable) { ++ pdp_crtc_set_syncgen_enabled(crtc, enable); ++ pdp_crtc_set_plane_enabled(crtc, dev_priv->display_enabled); ++ drm_crtc_vblank_on(crtc); ++ } else { ++ drm_crtc_vblank_off(crtc); ++ pdp_crtc_set_plane_enabled(crtc, enable); ++ pdp_crtc_set_syncgen_enabled(crtc, enable); ++ } ++} ++ ++static void pdp_crtc_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* ++ * ht = horizontal total ++ * hbps = horizontal back porch start ++ * has = horizontal active start ++ * hlbs = horizontal left border start ++ * hfps = horizontal front porch start ++ * hrbs = horizontal right border start ++ * ++ * vt = vertical total ++ * vbps = vertical back porch start ++ * vas = vertical active start ++ * vtbs = vertical top border start ++ * vfps = vertical front porch start ++ * vbbs = vertical bottom border start ++ */ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ uint32_t ht = adjusted_mode->htotal; ++ uint32_t hbps = adjusted_mode->hsync_end - adjusted_mode->hsync_start; ++ uint32_t has = (adjusted_mode->htotal - adjusted_mode->hsync_start); ++ uint32_t hlbs = has; ++ uint32_t hfps = (hlbs + adjusted_mode->hdisplay); ++ uint32_t hrbs = hfps; ++ uint32_t vt = adjusted_mode->vtotal; ++ uint32_t vbps = adjusted_mode->vsync_end - adjusted_mode->vsync_start; ++ uint32_t vas = (adjusted_mode->vtotal - adjusted_mode->vsync_start); ++ uint32_t vtbs = vas; ++ uint32_t vfps = (vtbs + adjusted_mode->vdisplay); ++ uint32_t vbbs = vfps; ++ bool ok; ++ ++ ok = pdp_clocks_set(crtc, adjusted_mode); ++ ++ if (!ok) { ++ dev_info(crtc->dev->dev, "%s failed\n", __func__); ++ return; ++ } ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: ++ pdp_odin_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, false); ++ pdp_odin_reset_planes(crtc->dev->dev, ++ pdp_crtc->pdp_reg); ++ pdp_odin_mode_set(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ adjusted_mode->hdisplay, adjusted_mode->vdisplay, ++ hbps, ht, has, ++ hlbs, hfps, hrbs, ++ vbps, vt, vas, ++ vtbs, vfps, vbbs, ++ adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, ++ adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC, ++ pdp_crtc->pfim_reg); ++ pdp_odin_set_powerdwn_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, false); ++ pdp_odin_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, true); ++ break; ++ case PDP_VERSION_APOLLO: ++ pdp_apollo_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, false); ++ pdp_apollo_reset_planes(crtc->dev->dev, ++ pdp_crtc->pdp_reg); ++ pdp_apollo_mode_set(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ adjusted_mode->hdisplay, adjusted_mode->vdisplay, ++ hbps, ht, has, ++ hlbs, hfps, hrbs, ++ vbps, vt, vas, ++ vtbs, vfps, vbbs, ++ adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, ++ adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); ++ pdp_apollo_set_powerdwn_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, false); ++ pdp_apollo_set_updates_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, true); ++ break; ++ case PDP_VERSION_PLATO: ++ pdp_plato_mode_set(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ adjusted_mode->hdisplay, ++ adjusted_mode->vdisplay, ++ hbps, ht, has, ++ hlbs, hfps, hrbs, ++ vbps, vt, vas, ++ vtbs, vfps, vbbs, ++ adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC, ++ adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC); ++ break; ++ default: ++ BUG(); ++ } ++} ++ ++ ++static bool pdp_crtc_helper_mode_fixup(struct drm_crtc *crtc, ++ const struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ ++ if (dev_priv->version == PDP_VERSION_ODIN ++ && mode->hdisplay == 1920 ++ && mode->vdisplay == 1080) { ++ ++ /* 1080p 60Hz */ ++ const int h_total = 2200; ++ const int h_active_start = 192; ++ const int h_back_porch_start = 44; ++ const int v_total = 1125; ++ const int v_active_start = 41; ++ const int v_back_porch_start = 5; ++ ++ adjusted_mode->htotal = h_total; ++ adjusted_mode->hsync_start = adjusted_mode->htotal - ++ h_active_start; ++ adjusted_mode->hsync_end = adjusted_mode->hsync_start + ++ h_back_porch_start; ++ adjusted_mode->vtotal = v_total; ++ adjusted_mode->vsync_start = adjusted_mode->vtotal - ++ v_active_start; ++ adjusted_mode->vsync_end = adjusted_mode->vsync_start + ++ v_back_porch_start; ++ } ++ return true; ++} ++ ++static void pdp_crtc_flip_complete(struct drm_crtc *crtc); ++ ++#if defined(PDP_USE_ATOMIC) ++static void pdp_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) ++{ ++ pdp_crtc_mode_set(crtc, &crtc->state->adjusted_mode); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void pdp_crtc_helper_atomic_flush(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++{ ++#else ++static void pdp_crtc_helper_atomic_flush(struct drm_crtc *crtc, ++ struct drm_atomic_state *state) ++{ ++ struct drm_crtc_state *old_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++ struct drm_crtc_state *new_crtc_state = crtc->state; ++ ++ if (!new_crtc_state->active || !old_crtc_state->active) ++ return; ++ ++ if (crtc->state->event) { ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ unsigned long flags; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++ pdp_crtc->flip_async = new_crtc_state->async_flip; ++#else ++ pdp_crtc->flip_async = !!(new_crtc_state->pageflip_flags ++ & DRM_MODE_PAGE_FLIP_ASYNC); ++#endif ++ if (pdp_crtc->flip_async) ++ WARN_ON(drm_crtc_vblank_get(crtc) != 0); ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ pdp_crtc->flip_event = crtc->state->event; ++ crtc->state->event = NULL; ++ ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ ++ if (pdp_crtc->flip_async) ++ pdp_crtc_flip_complete(crtc); ++ } ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void pdp_crtc_helper_atomic_enable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++#else ++static void pdp_crtc_helper_atomic_enable(struct drm_crtc *crtc, ++ struct drm_atomic_state *state) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++{ ++ pdp_crtc_set_enabled(crtc, true); ++ ++ if (crtc->state->event) { ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ unsigned long flags; ++ ++ WARN_ON(drm_crtc_vblank_get(crtc) != 0); ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ pdp_crtc->flip_event = crtc->state->event; ++ crtc->state->event = NULL; ++ ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ } ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void pdp_crtc_helper_atomic_disable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++#else ++static void pdp_crtc_helper_atomic_disable(struct drm_crtc *crtc, ++ struct drm_atomic_state *state) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++{ ++ pdp_crtc_set_enabled(crtc, false); ++ ++ if (crtc->state->event) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ drm_crtc_send_vblank_event(crtc, crtc->state->event); ++ crtc->state->event = NULL; ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ } ++} ++#else ++static void pdp_crtc_helper_dpms(struct drm_crtc *crtc, int mode) ++{ ++} ++ ++static void pdp_crtc_helper_prepare(struct drm_crtc *crtc) ++{ ++ pdp_crtc_set_enabled(crtc, false); ++} ++ ++static void pdp_crtc_helper_commit(struct drm_crtc *crtc) ++{ ++ pdp_crtc_set_enabled(crtc, true); ++} ++ ++static int pdp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc, ++ struct drm_framebuffer *fb, ++ int x, int y, ++ enum mode_set_atomic atomic) ++{ ++ if (x < 0 || y < 0) ++ return -EINVAL; ++ ++ pdp_plane_set_surface(crtc, crtc->primary, fb, ++ (uint32_t) x, (uint32_t) y); ++ ++ return 0; ++} ++ ++static int pdp_crtc_helper_mode_set_base(struct drm_crtc *crtc, ++ int x, int y, ++ struct drm_framebuffer *old_fb) ++{ ++ if (!crtc->primary->fb) { ++ DRM_ERROR("no framebuffer\n"); ++ return 0; ++ } ++ ++ return pdp_crtc_helper_mode_set_base_atomic(crtc, ++ crtc->primary->fb, ++ x, y, ++ 0); ++} ++ ++static int pdp_crtc_helper_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, ++ int x, int y, ++ struct drm_framebuffer *old_fb) ++{ ++ pdp_crtc_mode_set(crtc, adjusted_mode); ++ ++ return pdp_crtc_helper_mode_set_base(crtc, x, y, old_fb); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++static void pdp_crtc_helper_load_lut(struct drm_crtc *crtc) ++{ ++} ++#endif ++ ++static void pdp_crtc_helper_disable(struct drm_crtc *crtc) ++{ ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ enum pdp_crtc_flip_status status; ++ ++ pdp_crtc_set_enabled(crtc, false); ++ ++ status = atomic_read(&pdp_crtc->flip_status); ++ if (status != PDP_CRTC_FLIP_STATUS_NONE) { ++ long lerr; ++ ++ lerr = wait_event_timeout( ++ pdp_crtc->flip_pending_wait_queue, ++ atomic_read(&pdp_crtc->flip_status) ++ != PDP_CRTC_FLIP_STATUS_PENDING, ++ 30 * HZ); ++ if (!lerr) ++ DRM_ERROR("Failed to wait for pending flip\n"); ++ else if (!pdp_crtc->flip_async) ++ pdp_crtc_flip_complete(crtc); ++ } ++} ++#endif /* defined(PDP_USE_ATOMIC) */ ++ ++static int pfim_init(struct drm_device *dev, ++ struct pdp_crtc *pdp_crtc, ++ const char *crtc_name) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct resource *regs; ++ int err; ++ ++ if (!dev_priv->pfim_capable) { ++ pdp_crtc->pfim_reg = NULL; ++ return 0; ++ } ++ ++ regs = platform_get_resource_byname(to_platform_device(dev->dev), ++ IORESOURCE_MEM, ++ "pfim-regs"); ++ if (!regs) { ++ DRM_ERROR("missing pfim register info\n"); ++ return -ENXIO; ++ } ++ ++ pdp_crtc->pfim_reg_phys_base = regs->start; ++ pdp_crtc->pfim_reg_size = resource_size(regs); ++ ++ if (!request_mem_region(pdp_crtc->pfim_reg_phys_base, ++ pdp_crtc->pfim_reg_size, ++ crtc_name)) { ++ DRM_ERROR("failed to reserve pfim registers\n"); ++ return -EBUSY; ++ } ++ ++ pdp_crtc->pfim_reg = ++ ioremap(pdp_crtc->pfim_reg_phys_base, pdp_crtc->pfim_reg_size); ++ if (!pdp_crtc->pfim_reg) { ++ DRM_ERROR("failed to map pfim registers\n"); ++ err = -ENOMEM; ++ goto err_release_mem; ++ } ++ return 0; ++ ++err_release_mem: ++ release_mem_region(pdp_crtc->pfim_reg_phys_base, ++ pdp_crtc->pfim_reg_size); ++ pdp_crtc->pfim_reg = NULL; ++ return err; ++} ++ ++static void pfim_deinit(struct pdp_crtc *pdp_crtc) ++{ ++ if (pdp_crtc->pfim_reg) { ++ iounmap(pdp_crtc->pfim_reg); ++ release_mem_region(pdp_crtc->pfim_reg_phys_base, ++ pdp_crtc->pfim_reg_size); ++ pdp_crtc->pfim_reg = NULL; ++ } ++} ++ ++static void pdp_crtc_destroy(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ ++ DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); ++ ++ drm_crtc_cleanup(crtc); ++ ++ iounmap(pdp_crtc->pll_reg); ++ ++ iounmap(pdp_crtc->pdp_reg); ++ release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size); ++ ++ pfim_deinit(pdp_crtc); ++ ++ kfree(pdp_crtc); ++ dev_priv->crtc = NULL; ++} ++ ++static void pdp_crtc_flip_complete(struct drm_crtc *crtc) ++{ ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ ++ /* The flipping process has been completed so reset the flip state */ ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); ++ pdp_crtc->flip_async = false; ++ ++#if !defined(PDP_USE_ATOMIC) ++ if (pdp_crtc->flip_data) { ++ dma_fence_put(pdp_crtc->flip_data->wait_fence); ++ kfree(pdp_crtc->flip_data); ++ pdp_crtc->flip_data = NULL; ++ } ++#endif ++ ++ if (pdp_crtc->flip_event) { ++ drm_crtc_send_vblank_event(crtc, pdp_crtc->flip_event); ++ pdp_crtc->flip_event = NULL; ++ } ++ ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++} ++ ++#if !defined(PDP_USE_ATOMIC) ++static void pdp_crtc_flip(struct drm_crtc *crtc) ++{ ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ struct drm_framebuffer *old_fb; ++ ++ WARN_ON(atomic_read(&to_pdp_crtc(crtc)->flip_status) ++ != PDP_CRTC_FLIP_STATUS_PENDING); ++ ++ old_fb = pdp_crtc->old_fb; ++ pdp_crtc->old_fb = NULL; ++ ++ /* ++ * The graphics stream registers latch on vsync so we can go ahead and ++ * do the flip now. ++ */ ++ (void) pdp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, old_fb); ++ ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_DONE); ++ wake_up(&pdp_crtc->flip_pending_wait_queue); ++ ++ if (pdp_crtc->flip_async) ++ pdp_crtc_flip_complete(crtc); ++} ++ ++static void pdp_crtc_flip_cb(struct dma_fence *fence, struct dma_fence_cb *cb) ++{ ++ struct pdp_flip_data *flip_data = ++ container_of(cb, struct pdp_flip_data, base); ++ ++ pdp_crtc_flip(flip_data->crtc); ++} ++ ++static void pdp_crtc_flip_schedule_cb(struct dma_fence *fence, ++ struct dma_fence_cb *cb) ++{ ++ struct pdp_flip_data *flip_data = ++ container_of(cb, struct pdp_flip_data, base); ++ int err = 0; ++ ++ if (flip_data->wait_fence) ++ err = dma_fence_add_callback(flip_data->wait_fence, ++ &flip_data->base, ++ pdp_crtc_flip_cb); ++ ++ if (!flip_data->wait_fence || err) { ++ if (err && err != -ENOENT) ++ DRM_ERROR("flip failed to wait on old buffer\n"); ++ pdp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base); ++ } ++} ++ ++static int pdp_crtc_flip_schedule(struct drm_crtc *crtc, ++ struct drm_gem_object *obj, ++ struct drm_gem_object *old_obj) ++{ ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ struct dma_resv *resv = pdp_gem_get_resv(obj); ++ struct dma_resv *old_resv = pdp_gem_get_resv(old_obj); ++ struct pdp_flip_data *flip_data; ++ struct dma_fence *fence; ++ int err; ++ ++ flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL); ++ if (!flip_data) ++ return -ENOMEM; ++ ++ flip_data->crtc = crtc; ++ ++ ww_mutex_lock(&old_resv->lock, NULL); ++ flip_data->wait_fence = ++ dma_fence_get(dma_resv_get_excl(old_resv)); ++ ++ if (old_resv != resv) { ++ ww_mutex_unlock(&old_resv->lock); ++ ww_mutex_lock(&resv->lock, NULL); ++ } ++ ++ fence = dma_fence_get(dma_resv_get_excl(resv)); ++ ww_mutex_unlock(&resv->lock); ++ ++ pdp_crtc->flip_data = flip_data; ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_PENDING); ++ ++ if (fence) { ++ err = dma_fence_add_callback(fence, &flip_data->base, ++ pdp_crtc_flip_schedule_cb); ++ dma_fence_put(fence); ++ if (err && err != -ENOENT) ++ goto err_set_flip_status_none; ++ } ++ ++ if (!fence || err == -ENOENT) { ++ pdp_crtc_flip_schedule_cb(fence, &flip_data->base); ++ err = 0; ++ } ++ ++ return err; ++ ++err_set_flip_status_none: ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); ++ dma_fence_put(flip_data->wait_fence); ++ kfree(flip_data); ++ return err; ++} ++ ++static int pdp_crtc_page_flip(struct drm_crtc *crtc, ++ struct drm_framebuffer *fb, ++ struct drm_pending_vblank_event *event, ++ uint32_t page_flip_flags ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) ++ , struct drm_modeset_acquire_ctx *ctx ++#endif ++ ) ++{ ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); ++ struct pdp_framebuffer *pdp_old_fb = ++ to_pdp_framebuffer(crtc->primary->fb); ++ enum pdp_crtc_flip_status status; ++ unsigned long flags; ++ int err; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ status = atomic_read(&pdp_crtc->flip_status); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ ++ if (status != PDP_CRTC_FLIP_STATUS_NONE) ++ return -EBUSY; ++ ++ if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) { ++ err = drm_crtc_vblank_get(crtc); ++ if (err) ++ return err; ++ } ++ ++ pdp_crtc->old_fb = crtc->primary->fb; ++ pdp_crtc->flip_event = event; ++ pdp_crtc->flip_async = !!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC); ++ ++ /* Set the crtc primary plane to point to the new framebuffer */ ++ crtc->primary->fb = fb; ++ ++ err = pdp_crtc_flip_schedule(crtc, pdp_fb->obj[0], pdp_old_fb->obj[0]); ++ if (err) { ++ crtc->primary->fb = pdp_crtc->old_fb; ++ pdp_crtc->old_fb = NULL; ++ pdp_crtc->flip_event = NULL; ++ pdp_crtc->flip_async = false; ++ ++ DRM_ERROR("failed to schedule flip (err=%d)\n", err); ++ goto err_vblank_put; ++ } ++ ++ return 0; ++ ++err_vblank_put: ++ if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) ++ drm_crtc_vblank_put(crtc); ++ return err; ++} ++#endif /* !defined(PDP_USE_ATOMIC) */ ++ ++static const struct drm_crtc_helper_funcs pdp_crtc_helper_funcs = { ++ .mode_fixup = pdp_crtc_helper_mode_fixup, ++#if defined(PDP_USE_ATOMIC) ++ .mode_set_nofb = pdp_crtc_helper_mode_set_nofb, ++ .atomic_flush = pdp_crtc_helper_atomic_flush, ++ .atomic_enable = pdp_crtc_helper_atomic_enable, ++ .atomic_disable = pdp_crtc_helper_atomic_disable, ++#else ++ .dpms = pdp_crtc_helper_dpms, ++ .prepare = pdp_crtc_helper_prepare, ++ .commit = pdp_crtc_helper_commit, ++ .mode_set = pdp_crtc_helper_mode_set, ++ .mode_set_base = pdp_crtc_helper_mode_set_base, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ .load_lut = pdp_crtc_helper_load_lut, ++#endif ++ .mode_set_base_atomic = pdp_crtc_helper_mode_set_base_atomic, ++ .disable = pdp_crtc_helper_disable, ++#endif ++}; ++ ++static const struct drm_crtc_funcs pdp_crtc_funcs = { ++ .destroy = pdp_crtc_destroy, ++#if defined(PDP_USE_ATOMIC) ++ .reset = drm_atomic_helper_crtc_reset, ++ .set_config = drm_atomic_helper_set_config, ++ .page_flip = drm_atomic_helper_page_flip, ++ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, ++#else ++ .set_config = drm_crtc_helper_set_config, ++ .page_flip = pdp_crtc_page_flip, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++ .enable_vblank = pdp_enable_vblank, ++ .disable_vblank = pdp_disable_vblank, ++#endif ++}; ++ ++ ++struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number, ++ struct drm_plane *primary_plane) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct pdp_crtc *pdp_crtc; ++ const char *crtc_name = "crtc-0"; ++ int err; ++ ++ pdp_crtc = kzalloc(sizeof(*pdp_crtc), GFP_KERNEL); ++ if (!pdp_crtc) { ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ ++ init_waitqueue_head(&pdp_crtc->flip_pending_wait_queue); ++ atomic_set(&pdp_crtc->flip_status, PDP_CRTC_FLIP_STATUS_NONE); ++ pdp_crtc->number = number; ++ ++ switch (number) { ++ case 0: ++ { ++ struct resource *regs; ++ const char *pdp_resname = NULL; ++ ++ if (dev_priv->version == PDP_VERSION_ODIN) { ++ switch (dev_priv->outdev) { ++ case PDP_OUTPUT_PDP1: { ++ pdp_resname = "pdp-regs"; ++ break; ++ } ++ case PDP_OUTPUT_PDP2: { ++ pdp_resname = "pdp2-regs"; ++ break; ++ } ++ default: ++ DRM_ERROR("wrong PDP output device\n"); ++ err = -ENODEV; ++ goto err_exit; ++ } ++ } else { ++ pdp_resname = "pdp-regs"; ++ } ++ ++ regs = platform_get_resource_byname( ++ to_platform_device(dev->dev), ++ IORESOURCE_MEM, ++ pdp_resname); ++ if (!regs) { ++ DRM_ERROR("missing pdp register info\n"); ++ err = -ENXIO; ++ goto err_crtc_free; ++ } ++ ++ pdp_crtc->pdp_reg_phys_base = regs->start; ++ pdp_crtc->pdp_reg_size = resource_size(regs); ++ ++ if (dev_priv->version == PDP_VERSION_ODIN || ++ dev_priv->version == PDP_VERSION_APOLLO) { ++ regs = platform_get_resource_byname( ++ to_platform_device(dev->dev), ++ IORESOURCE_MEM, ++ "pll-regs"); ++ if (!regs) { ++ DRM_ERROR("missing pll register info\n"); ++ err = -ENXIO; ++ goto err_crtc_free; ++ } ++ ++ pdp_crtc->pll_reg_phys_base = regs->start; ++ pdp_crtc->pll_reg_size = resource_size(regs); ++ ++ pdp_crtc->pll_reg = ioremap(pdp_crtc->pll_reg_phys_base, ++ pdp_crtc->pll_reg_size); ++ if (!pdp_crtc->pll_reg) { ++ DRM_ERROR("failed to map pll registers\n"); ++ err = -ENOMEM; ++ goto err_crtc_free; ++ } ++ } else if (dev_priv->version == PDP_VERSION_PLATO) { ++ regs = platform_get_resource_byname( ++ to_platform_device(dev->dev), ++ IORESOURCE_MEM, ++ PLATO_PDP_RESOURCE_BIF_REGS); ++ if (!regs) { ++ DRM_ERROR("missing pdp-bif register info\n"); ++ err = -ENXIO; ++ goto err_crtc_free; ++ } ++ ++ pdp_crtc->pdp_bif_reg_phys_base = regs->start; ++ pdp_crtc->pdp_bif_reg_size = resource_size(regs); ++ ++ if (!request_mem_region(pdp_crtc->pdp_bif_reg_phys_base, ++ pdp_crtc->pdp_bif_reg_size, ++ crtc_name)) { ++ DRM_ERROR("failed to reserve pdp-bif registers\n"); ++ err = -EBUSY; ++ goto err_crtc_free; ++ } ++ ++ pdp_crtc->pdp_bif_reg = ++ ioremap(pdp_crtc->pdp_bif_reg_phys_base, ++ pdp_crtc->pdp_bif_reg_size); ++ if (!pdp_crtc->pdp_bif_reg) { ++ DRM_ERROR("failed to map pdp-bif registers\n"); ++ err = -ENOMEM; ++ goto err_iounmap_regs; ++ } ++ } ++ ++ if (dev_priv->version == PDP_VERSION_ODIN) { ++ regs = platform_get_resource_byname( ++ to_platform_device(dev->dev), ++ IORESOURCE_MEM, ++ "odn-core"); ++ if (!regs) { ++ DRM_ERROR("missing odn-core info\n"); ++ err = -ENXIO; ++ goto err_crtc_free; ++ } ++ ++ pdp_crtc->odn_core_phys_base = regs->start; ++ pdp_crtc->odn_core_size = resource_size(regs); ++ ++ pdp_crtc->odn_core_reg ++ = ioremap(pdp_crtc->odn_core_phys_base, ++ pdp_crtc->odn_core_size); ++ if (!pdp_crtc->odn_core_reg) { ++ DRM_ERROR("failed to map pdp reset register\n"); ++ err = -ENOMEM; ++ goto err_iounmap_regs; ++ } ++ ++ err = pfim_init(dev, pdp_crtc, crtc_name); ++ if (err) { ++ DRM_ERROR("failed to initialise PFIM\n"); ++ goto err_iounmap_regs; ++ } ++ } ++ ++ break; ++ } ++ default: ++ DRM_ERROR("invalid crtc number %u\n", number); ++ err = -EINVAL; ++ goto err_crtc_free; ++ } ++ ++ if (!request_mem_region(pdp_crtc->pdp_reg_phys_base, ++ pdp_crtc->pdp_reg_size, ++ crtc_name)) { ++ DRM_ERROR("failed to reserve pdp registers\n"); ++ err = -EBUSY; ++ goto err_crtc_free; ++ } ++ ++ pdp_crtc->pdp_reg = ioremap(pdp_crtc->pdp_reg_phys_base, ++ pdp_crtc->pdp_reg_size); ++ if (!pdp_crtc->pdp_reg) { ++ DRM_ERROR("failed to map pdp registers\n"); ++ err = -ENOMEM; ++ goto err_release_mem_region; ++ } ++ ++ err = drm_crtc_init_with_planes(dev, &pdp_crtc->base, primary_plane, ++ NULL, &pdp_crtc_funcs, NULL); ++ if (err) { ++ DRM_ERROR("CRTC init with planes failed"); ++ goto err_iounmap_regs; ++ } ++ ++ drm_crtc_helper_add(&pdp_crtc->base, &pdp_crtc_helper_funcs); ++ ++ DRM_DEBUG_DRIVER("[CRTC:%d]\n", pdp_crtc->base.base.id); ++ ++ return &pdp_crtc->base; ++ ++err_iounmap_regs: ++ iounmap(pdp_crtc->pdp_reg); ++ if (pdp_crtc->odn_core_reg) ++ iounmap(pdp_crtc->odn_core_reg); ++ if (pdp_crtc->pdp_bif_reg) ++ iounmap(pdp_crtc->pdp_bif_reg); ++err_release_mem_region: ++ release_mem_region(pdp_crtc->pdp_reg_phys_base, pdp_crtc->pdp_reg_size); ++ pfim_deinit(pdp_crtc); ++err_crtc_free: ++ kfree(pdp_crtc); ++err_exit: ++ return ERR_PTR(err); ++} ++ ++void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable) ++{ ++ struct pdp_drm_private *dev_priv = crtc->dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: ++ pdp_odin_set_vblank_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ enable); ++ break; ++ case PDP_VERSION_APOLLO: ++ pdp_apollo_set_vblank_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ enable); ++ break; ++ case PDP_VERSION_PLATO: ++ pdp_plato_set_vblank_enabled(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ enable); ++ break; ++ default: ++ BUG(); ++ } ++} ++ ++void pdp_crtc_irq_handler(struct drm_crtc *crtc) ++{ ++ struct drm_device *dev = crtc->dev; ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ bool handled; ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: ++ handled = pdp_odin_check_and_clear_vblank(dev->dev, ++ pdp_crtc->pdp_reg); ++ break; ++ case PDP_VERSION_APOLLO: ++ handled = pdp_apollo_check_and_clear_vblank(dev->dev, ++ pdp_crtc->pdp_reg); ++ break; ++ case PDP_VERSION_PLATO: ++ handled = pdp_plato_check_and_clear_vblank(dev->dev, ++ pdp_crtc->pdp_reg); ++ break; ++ default: ++ handled = false; ++ break; ++ } ++ ++ if (handled) { ++ enum pdp_crtc_flip_status status; ++ ++ drm_handle_vblank(dev, pdp_crtc->number); ++ ++ status = atomic_read(&pdp_crtc->flip_status); ++ if (status == PDP_CRTC_FLIP_STATUS_DONE) { ++ if (!pdp_crtc->flip_async) { ++ pdp_crtc_flip_complete(crtc); ++#if !defined(PDP_USE_ATOMIC) ++ drm_crtc_vblank_put(crtc); ++#endif ++ } ++ } ++ } ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file) ++{ ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ ++ if (pdp_crtc->flip_event && ++ pdp_crtc->flip_event->base.file_priv == file) { ++ pdp_crtc->flip_event->base.destroy(&pdp_crtc->flip_event->base); ++ pdp_crtc->flip_event = NULL; ++ } ++ ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++} ++#endif +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c +new file mode 100644 +index 000000000000..5725fd173800 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_debugfs.c +@@ -0,0 +1,184 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#endif ++ ++#include "drm_pdp_drv.h" ++ ++#define PDP_DEBUGFS_DISPLAY_ENABLED "display_enabled" ++ ++static int display_enabled_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = inode->i_private; ++ ++ return 0; ++} ++ ++static ssize_t display_enabled_read(struct file *file, ++ char __user *user_buffer, ++ size_t count, ++ loff_t *position_ptr) ++{ ++ struct drm_device *dev = file->private_data; ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ loff_t position = *position_ptr; ++ char buffer[] = "N\n"; ++ size_t buffer_size = ARRAY_SIZE(buffer); ++ int err; ++ ++ if (position < 0) ++ return -EINVAL; ++ else if (position >= buffer_size || count == 0) ++ return 0; ++ ++ if (dev_priv->display_enabled) ++ buffer[0] = 'Y'; ++ ++ if (count > buffer_size - position) ++ count = buffer_size - position; ++ ++ err = copy_to_user(user_buffer, &buffer[position], count); ++ if (err) ++ return -EFAULT; ++ ++ *position_ptr = position + count; ++ ++ return count; ++} ++ ++static ssize_t display_enabled_write(struct file *file, ++ const char __user *user_buffer, ++ size_t count, ++ loff_t *position) ++{ ++ struct drm_device *dev = file->private_data; ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ char buffer[3]; ++ int err; ++ ++ count = min(count, ARRAY_SIZE(buffer) - 1); ++ ++ err = copy_from_user(buffer, user_buffer, count); ++ if (err) ++ return -EFAULT; ++ buffer[count] = '\0'; ++ ++ if (!strtobool(buffer, &dev_priv->display_enabled) && dev_priv->crtc) ++ pdp_crtc_set_plane_enabled(dev_priv->crtc, dev_priv->display_enabled); ++ ++ return count; ++} ++ ++static const struct file_operations pdp_display_enabled_fops = { ++ .owner = THIS_MODULE, ++ .open = display_enabled_open, ++ .read = display_enabled_read, ++ .write = display_enabled_write, ++ .llseek = default_llseek, ++}; ++ ++static int pdp_debugfs_create(struct drm_minor *minor, const char *name, ++ umode_t mode, const struct file_operations *fops) ++{ ++ struct drm_info_node *node; ++ ++ /* ++ * We can't get access to our driver private data when this function is ++ * called so we fake up a node so that we can clean up entries later on. ++ */ ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (!node) ++ return -ENOMEM; ++ ++ node->dent = debugfs_create_file(name, mode, minor->debugfs_root, ++ minor->dev, fops); ++ if (!node->dent) { ++ kfree(node); ++ return -ENOMEM; ++ } ++ ++ node->minor = minor; ++ node->info_ent = (void *) fops; ++ ++ mutex_lock(&minor->debugfs_lock); ++ list_add(&node->list, &minor->debugfs_list); ++ mutex_unlock(&minor->debugfs_lock); ++ ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) ++int pdp_debugfs_init(struct drm_minor *minor) ++#else ++void pdp_debugfs_init(struct drm_minor *minor) ++#endif ++{ ++ int err; ++ ++ err = pdp_debugfs_create(minor, PDP_DEBUGFS_DISPLAY_ENABLED, ++ 0100644, ++ &pdp_display_enabled_fops); ++ if (err) { ++ DRM_INFO("failed to create '%s' debugfs entry\n", ++ PDP_DEBUGFS_DISPLAY_ENABLED); ++ } ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) ++ return err; ++#endif ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++void pdp_debugfs_cleanup(struct drm_minor *minor) ++{ ++ drm_debugfs_remove_files((struct drm_info_list *) &pdp_display_enabled_fops, ++ 1, minor); ++} ++#endif +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c +new file mode 100644 +index 000000000000..5c2d63889d89 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.c +@@ -0,0 +1,866 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#include ++#include ++#include ++#else ++#include ++#endif ++ ++#include "tc_drv.h" ++#include "pvrversion.h" ++ ++#include "drm_pdp_drv.h" ++#include "drm_pdp_gem.h" ++#include "pdp_drm.h" ++ ++#include "odin_defs.h" ++ ++#if defined(SUPPORT_PLATO_DISPLAY) ++#include "plato_drv.h" ++#include "pdp2_regs.h" ++#include "pdp2_mmu_regs.h" ++#endif ++ ++#define DRIVER_NAME "pdp" ++#define DRIVER_DESC "Imagination Technologies PDP DRM Display Driver" ++#define DRIVER_DATE "20150612" ++ ++#if defined(PDP_USE_ATOMIC) ++#include ++ ++#define PVR_DRIVER_ATOMIC DRIVER_ATOMIC ++#else ++#define PVR_DRIVER_ATOMIC 0 ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++#define PVR_DRIVER_PRIME 0 ++#else ++#define PVR_DRIVER_PRIME DRIVER_PRIME ++#endif ++ ++/* This header must always be included last */ ++#include "kernel_compatibility.h" ++ ++static bool display_enable = true; ++static unsigned int output_device = 1; ++ ++module_param(display_enable, bool, 0444); ++MODULE_PARM_DESC(display_enable, "Enable all displays (default: Y)"); ++ ++module_param(output_device, uint, 0444); ++MODULE_PARM_DESC(output_device, "PDP output device (default: PDP1)"); ++ ++static void pdp_irq_handler(void *data) ++{ ++ struct drm_device *dev = data; ++ struct drm_crtc *crtc; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) ++ pdp_crtc_irq_handler(crtc); ++} ++ ++static int pdp_early_load(struct drm_device *dev) ++{ ++ struct pdp_drm_private *dev_priv; ++ int err; ++ ++ DRM_DEBUG("loading %s device\n", to_platform_device(dev->dev)->name); ++ ++ platform_set_drvdata(to_platform_device(dev->dev), dev); ++ ++ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); ++ if (!dev_priv) ++ return -ENOMEM; ++ ++ dev->dev_private = dev_priv; ++ dev_priv->dev = dev; ++ dev_priv->version = (enum pdp_version) ++ to_platform_device(dev->dev)->id_entry->driver_data; ++ dev_priv->display_enabled = display_enable; ++ ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ /* PDP output device selection */ ++ dev_priv->outdev = (enum pdp_output_device)output_device; ++ if (dev_priv->outdev == PDP_OUTPUT_PDP2 && ++ !tc_pdp2_compatible(dev->dev->parent)) { ++ DRM_ERROR("TC doesn't support PDP2\n"); ++ err = -ENODEV; ++ goto err_dev_priv_free; ++ } ++ ++ if (dev_priv->outdev == PDP_OUTPUT_PDP1) { ++ dev_priv->pdp_interrupt = TC_INTERRUPT_PDP; ++ } else if (dev_priv->outdev == PDP_OUTPUT_PDP2) { ++ dev_priv->pdp_interrupt = TC_INTERRUPT_PDP2; ++ } else { ++ DRM_ERROR("wrong PDP device number (outdev=%u)\n", ++ dev_priv->outdev); ++ err = -ENODEV; ++ goto err_dev_priv_free; ++ } ++ ++ /* PDP FBC module support detection */ ++ dev_priv->pfim_capable = (dev_priv->outdev == PDP_OUTPUT_PDP2 && ++ tc_pfim_capable(dev->dev->parent)); ++#endif ++ ++ if (dev_priv->version == PDP_VERSION_APOLLO || ++ dev_priv->version == PDP_VERSION_ODIN) { ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ err = tc_enable(dev->dev->parent); ++ if (err) { ++ DRM_ERROR("failed to enable parent device (err=%d)\n", err); ++ goto err_dev_priv_free; ++ } ++ ++ /* ++ * check whether it's Orion PDP for picking ++ * the right display mode list later on ++ */ ++ if (dev_priv->version == PDP_VERSION_ODIN) ++ dev_priv->subversion = (enum pdp_odin_subversion) ++ tc_odin_subvers(dev->dev->parent); ++#endif ++ } ++ ++#if defined(SUPPORT_PLATO_DISPLAY) ++ else if (dev_priv->version == PDP_VERSION_PLATO) { ++// XXX do we we need to do this? Plato driver has already enabled device. ++ err = plato_enable(dev->dev->parent); ++ if (err) { ++ DRM_ERROR("failed to enable parent device (err=%d)\n", err); ++ goto err_dev_priv_free; ++ } ++ } ++#endif ++ ++ dev_priv->gem_priv = pdp_gem_init(dev); ++ if (!dev_priv->gem_priv) { ++ DRM_ERROR("gem initialisation failed\n"); ++ err = -ENOMEM; ++ goto err_disable_parent_device; ++ } ++ ++ err = pdp_modeset_early_init(dev_priv); ++ if (err) { ++ DRM_ERROR("early modeset initialisation failed (err=%d)\n", ++ err); ++ goto err_gem_cleanup; ++ } ++ ++ err = drm_vblank_init(dev_priv->dev, 1); ++ if (err) { ++ DRM_ERROR("failed to complete vblank init (err=%d)\n", err); ++ goto err_modeset_late_cleanup; ++ } ++ ++ if (dev_priv->version == PDP_VERSION_APOLLO || ++ dev_priv->version == PDP_VERSION_ODIN) { ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ err = tc_set_interrupt_handler(dev->dev->parent, ++ dev_priv->pdp_interrupt, ++ pdp_irq_handler, ++ dev); ++ if (err) { ++ DRM_ERROR("failed to set interrupt handler (err=%d)\n", ++ err); ++ goto err_vblank_cleanup; ++ } ++ ++ err = tc_enable_interrupt(dev->dev->parent, ++ dev_priv->pdp_interrupt); ++ if (err) { ++ DRM_ERROR("failed to enable pdp interrupts (err=%d)\n", ++ err); ++ goto err_uninstall_interrupt_handle; ++ } ++#endif ++ } ++#if defined(SUPPORT_PLATO_DISPLAY) ++ else if (dev_priv->version == PDP_VERSION_PLATO) { ++ err = plato_set_interrupt_handler(dev->dev->parent, ++ PLATO_INTERRUPT_PDP, ++ pdp_irq_handler, ++ dev); ++ if (err) { ++ DRM_ERROR("failed to set interrupt handler (err=%d)\n", ++ err); ++ goto err_vblank_cleanup; ++ } ++ ++ err = plato_enable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP); ++ if (err) { ++ DRM_ERROR("failed to enable pdp interrupts (err=%d)\n", ++ err); ++ goto err_uninstall_interrupt_handle; ++ } ++ } ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) ++ dev->irq_enabled = true; ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) ++ dev->vblank_disable_allowed = 1; ++#endif ++ ++ return 0; ++ ++err_uninstall_interrupt_handle: ++ if (dev_priv->version == PDP_VERSION_APOLLO || ++ dev_priv->version == PDP_VERSION_ODIN) { ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ tc_set_interrupt_handler(dev->dev->parent, ++ dev_priv->pdp_interrupt, ++ NULL, ++ NULL); ++#endif ++ } ++#if defined(SUPPORT_PLATO_DISPLAY) ++ else if (dev_priv->version == PDP_VERSION_PLATO) { ++ plato_set_interrupt_handler(dev->dev->parent, ++ PLATO_INTERRUPT_PDP, ++ NULL, ++ NULL); ++ } ++#endif ++err_vblank_cleanup: ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ /* Called by drm_dev_fini in Linux 4.11.0 and later */ ++ drm_vblank_cleanup(dev_priv->dev); ++#endif ++err_modeset_late_cleanup: ++ pdp_modeset_late_cleanup(dev_priv); ++err_gem_cleanup: ++ pdp_gem_cleanup(dev_priv->gem_priv); ++err_disable_parent_device: ++ if (dev_priv->version == PDP_VERSION_APOLLO || ++ dev_priv->version == PDP_VERSION_ODIN) { ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ tc_disable(dev->dev->parent); ++#endif ++ } ++#if defined(SUPPORT_PLATO_DISPLAY) ++ else if (dev_priv->version == PDP_VERSION_PLATO) ++ plato_disable(dev->dev->parent); ++#endif ++err_dev_priv_free: ++ kfree(dev_priv); ++ return err; ++} ++ ++static int pdp_late_load(struct drm_device *dev) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ int err; ++ ++ err = pdp_modeset_late_init(dev_priv); ++ if (err) { ++ DRM_ERROR("late modeset initialisation failed (err=%d)\n", ++ err); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static void pdp_early_unload(struct drm_device *dev) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) && defined(PDP_USE_ATOMIC) ++ drm_atomic_helper_shutdown(dev); ++#endif ++ pdp_modeset_early_cleanup(dev_priv); ++} ++ ++static void pdp_late_unload(struct drm_device *dev) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ ++ DRM_INFO("unloading %s device.\n", to_platform_device(dev->dev)->name); ++ if (dev_priv->version == PDP_VERSION_APOLLO || ++ dev_priv->version == PDP_VERSION_ODIN) { ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ tc_disable_interrupt(dev->dev->parent, dev_priv->pdp_interrupt); ++ tc_set_interrupt_handler(dev->dev->parent, ++ dev_priv->pdp_interrupt, ++ NULL, ++ NULL); ++#endif ++ } ++#if defined(SUPPORT_PLATO_DISPLAY) ++ else if (dev_priv->version == PDP_VERSION_PLATO) { ++ plato_disable_interrupt(dev->dev->parent, PLATO_INTERRUPT_PDP); ++ plato_set_interrupt_handler(dev->dev->parent, ++ PLATO_INTERRUPT_PDP, ++ NULL, ++ NULL); ++ } ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ /* Called by drm_dev_fini in Linux 4.11.0 and later */ ++ drm_vblank_cleanup(dev_priv->dev); ++#endif ++ pdp_modeset_late_cleanup(dev_priv); ++ pdp_gem_cleanup(dev_priv->gem_priv); ++ ++ if (dev_priv->version == PDP_VERSION_APOLLO || ++ dev_priv->version == PDP_VERSION_ODIN) { ++#if !defined(SUPPORT_PLATO_DISPLAY) ++ tc_disable(dev->dev->parent); ++#endif ++ } ++#if defined(SUPPORT_PLATO_DISPLAY) ++ else if (dev_priv->version == PDP_VERSION_PLATO) ++ plato_disable(dev->dev->parent); ++#endif ++ ++ kfree(dev_priv); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) ++static int pdp_load(struct drm_device *dev, unsigned long flags) ++{ ++ int err; ++ ++ err = pdp_early_load(dev); ++ if (err) ++ return err; ++ ++ err = pdp_late_load(dev); ++ if (err) { ++ pdp_late_unload(dev); ++ return err; ++ } ++ ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++static int pdp_unload(struct drm_device *dev) ++#else ++static void pdp_unload(struct drm_device *dev) ++#endif ++{ ++ pdp_early_unload(dev); ++ pdp_late_unload(dev); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ return 0; ++#endif ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++static void pdp_preclose(struct drm_device *dev, struct drm_file *file) ++{ ++ struct drm_crtc *crtc; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) ++ pdp_crtc_flip_event_cancel(crtc, file); ++} ++#endif ++ ++#if !defined(CONFIG_DRM_FBDEV_EMULATION) ++static inline void pdp_teardown_drm_config(struct drm_device *dev) ++{ ++#if defined(PDP_USE_ATOMIC) ++ drm_atomic_helper_shutdown(dev); ++#else ++ struct drm_crtc *crtc; ++ ++ DRM_INFO("%s: %s device\n", __func__, to_platform_device(dev->dev)->name); ++ ++ /* ++ * When non atomic driver is in use, manually trigger ->set_config ++ * with an empty mode set associated to this crtc. ++ */ ++ drm_modeset_lock_all(dev); ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (crtc->primary->fb) { ++ struct drm_mode_set mode_set = { .crtc = crtc }; ++ int err; ++ ++ err = drm_mode_set_config_internal(&mode_set); ++ if (err) ++ DRM_ERROR("failed to disable crtc %p (err=%d)\n", ++ crtc, err); ++ } ++ } ++ drm_modeset_unlock_all(dev); ++#endif ++} ++#endif /* !defined(CONFIG_DRM_FBDEV_EMULATION) */ ++ ++static void pdp_lastclose(struct drm_device *dev) ++{ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct pdp_fbdev *fbdev = dev_priv->fbdev; ++ int err; ++ ++ if (fbdev) { ++ /* ++ * This is a fbdev driver, therefore never attempt to shutdown ++ * on a client disconnecting. ++ */ ++ err = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->helper); ++ if (err) ++ DRM_ERROR("failed to restore mode (err=%d)\n", err); ++ } ++#else ++ pdp_teardown_drm_config(dev); ++#endif ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++int pdp_enable_vblank(struct drm_crtc *crtc) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++static int pdp_enable_vblank(struct drm_device *dev, unsigned int pipe) ++#else ++static int pdp_enable_vblank(struct drm_device *dev, int pipe) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++ struct drm_device *dev = crtc->dev; ++ unsigned int pipe = drm_crtc_index(crtc); ++#endif ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ ++ switch (pipe) { ++ case 0: ++ pdp_crtc_set_vblank_enabled(dev_priv->crtc, true); ++ break; ++ default: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++ DRM_ERROR("invalid crtc %u\n", pipe); ++#else ++ DRM_ERROR("invalid crtc %d\n", pipe); ++#endif ++ return -EINVAL; ++ } ++ ++ DRM_DEBUG("vblank interrupts enabled for crtc %d\n", pipe); ++ ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++void pdp_disable_vblank(struct drm_crtc *crtc) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++static void pdp_disable_vblank(struct drm_device *dev, unsigned int pipe) ++#else ++static void pdp_disable_vblank(struct drm_device *dev, int pipe) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++ struct drm_device *dev = crtc->dev; ++ unsigned int pipe = drm_crtc_index(crtc); ++#endif ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ ++ switch (pipe) { ++ case 0: ++ pdp_crtc_set_vblank_enabled(dev_priv->crtc, false); ++ break; ++ default: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++ DRM_ERROR("invalid crtc %u\n", pipe); ++#else ++ DRM_ERROR("invalid crtc %d\n", pipe); ++#endif ++ return; ++ } ++ ++ DRM_DEBUG("vblank interrupts disabled for crtc %d\n", pipe); ++} ++ ++static int pdp_gem_object_create_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ ++ return pdp_gem_object_create_ioctl_priv(dev, ++ dev_priv->gem_priv, ++ data, ++ file); ++} ++ ++static int pdp_gem_dumb_create(struct drm_file *file, ++ struct drm_device *dev, ++ struct drm_mode_create_dumb *args) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ ++ return pdp_gem_dumb_create_priv(file, ++ dev, ++ dev_priv->gem_priv, ++ args); ++} ++ ++static const struct drm_ioctl_desc pdp_ioctls[] = { ++ DRM_IOCTL_DEF_DRV(PDP_GEM_CREATE, pdp_gem_object_create_ioctl, ++ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), ++ DRM_IOCTL_DEF_DRV(PDP_GEM_MMAP, pdp_gem_object_mmap_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_PREP, pdp_gem_object_cpu_prep_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(PDP_GEM_CPU_FINI, pdp_gem_object_cpu_fini_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++}; ++ ++static const struct file_operations pdp_driver_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .unlocked_ioctl = drm_ioctl, ++ .mmap = drm_gem_mmap, ++ .poll = drm_poll, ++ .read = drm_read, ++ .llseek = noop_llseek, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = drm_compat_ioctl, ++#endif ++}; ++ ++static struct drm_driver pdp_drm_driver = { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ .load = NULL, ++ .unload = NULL, ++#else ++ .load = pdp_load, ++ .unload = pdp_unload, ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++ .preclose = pdp_preclose, ++#endif ++ .lastclose = pdp_lastclose, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ .set_busid = drm_platform_set_busid, ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) ++ .get_vblank_counter = drm_vblank_count, ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .get_vblank_counter = drm_vblank_no_hw_counter, ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) ++ .enable_vblank = pdp_enable_vblank, ++ .disable_vblank = pdp_disable_vblank, ++#endif ++ ++ .debugfs_init = pdp_debugfs_init, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .debugfs_cleanup = pdp_debugfs_cleanup, ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) ++ .gem_prime_export = pdp_gem_prime_export, ++ .gem_free_object = pdp_gem_object_free, ++ .gem_vm_ops = &pdp_gem_vm_ops, ++#endif ++ ++ .gem_prime_import = pdp_gem_prime_import, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) ++ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, ++ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, ++#endif ++ .gem_prime_import_sg_table = pdp_gem_prime_import_sg_table, ++ ++ // Set dumb_create to NULL to avoid xorg owning the display (if xorg is running). ++ .dumb_create = pdp_gem_dumb_create, ++ .dumb_map_offset = pdp_gem_dumb_map_offset, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ .dumb_destroy = drm_gem_dumb_destroy, ++#endif ++ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = PVRVERSION_MAJ, ++ .minor = PVRVERSION_MIN, ++ .patchlevel = PVRVERSION_BUILD, ++ ++ .driver_features = DRIVER_GEM | ++ DRIVER_MODESET | ++ PVR_DRIVER_PRIME | ++ PVR_DRIVER_ATOMIC, ++ .ioctls = pdp_ioctls, ++ .num_ioctls = ARRAY_SIZE(pdp_ioctls), ++ .fops = &pdp_driver_fops, ++}; ++ ++#if defined(SUPPORT_PLATO_DISPLAY) ++ ++static int compare_parent_dev(struct device *dev, void *data) ++{ ++ struct device *pdp_dev = data; ++ ++ return dev->parent && dev->parent == pdp_dev->parent; ++} ++ ++static int pdp_component_bind(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct drm_device *ddev; ++ int ret; ++ ++ dev_info(dev, "Loading platform device\n"); ++ ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ if (IS_ERR(ddev)) ++ return PTR_ERR(ddev); ++#else ++ if (!ddev) ++ return -ENOMEM; ++#endif ++ ++ // XXX no need to do this as happens in pdp_early_load ++ platform_set_drvdata(pdev, ddev); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ /* Needed by drm_platform_set_busid */ ++ ddev->platformdev = pdev; ++#endif ++ BUG_ON(pdp_drm_driver.load != NULL); ++ ++ ret = pdp_early_load(ddev); ++ if (ret) ++ goto err_drm_dev_put; ++ ++ DRM_DEBUG_DRIVER("Binding other components\n"); ++ /* Bind other components, including HDMI encoder/connector */ ++ ret = component_bind_all(dev, ddev); ++ if (ret) { ++ DRM_ERROR("Failed to bind other components (ret=%d)\n", ret); ++ goto err_drm_dev_late_unload; ++ } ++ ++ ret = drm_dev_register(ddev, 0); ++ if (ret) ++ goto err_drm_dev_late_unload; ++ ++ ret = pdp_late_load(ddev); ++ if (ret) ++ goto err_drm_dev_unregister; ++ ++ return 0; ++ ++err_drm_dev_unregister: ++ drm_dev_unregister(ddev); ++err_drm_dev_late_unload: ++ pdp_late_unload(ddev); ++err_drm_dev_put: ++ drm_dev_put(ddev); ++ return ret; ++} ++ ++static void pdp_component_unbind(struct device *dev) ++{ ++ struct drm_device *ddev = dev_get_drvdata(dev); ++ ++ dev_info(dev, "Unloading platform device\n"); ++ BUG_ON(pdp_drm_driver.unload != NULL); ++ pdp_early_unload(ddev); ++ drm_dev_unregister(ddev); ++ pdp_late_unload(ddev); ++ component_unbind_all(dev, ddev); ++ drm_dev_put(ddev); ++} ++ ++static const struct component_master_ops pdp_component_ops = { ++ .bind = pdp_component_bind, ++ .unbind = pdp_component_unbind, ++}; ++ ++ ++static int pdp_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct component_match *match = NULL; ++ ++ component_match_add(dev, &match, compare_parent_dev, dev); ++ return component_master_add_with_match(dev, &pdp_component_ops, match); ++} ++ ++static int pdp_remove(struct platform_device *pdev) ++{ ++ component_master_del(&pdev->dev, &pdp_component_ops); ++ return 0; ++} ++ ++#else // !SUPPORT_PLATO_DISPLAY ++ ++static int pdp_probe(struct platform_device *pdev) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ struct drm_device *ddev; ++ int ret; ++ ++ ddev = drm_dev_alloc(&pdp_drm_driver, &pdev->dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ if (IS_ERR(ddev)) ++ return PTR_ERR(ddev); ++#else ++ if (!ddev) ++ return -ENOMEM; ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ /* Needed by drm_platform_set_busid */ ++ ddev->platformdev = pdev; ++#endif ++ /* ++ * The load callback, called from drm_dev_register, is deprecated, ++ * because of potential race conditions. ++ */ ++ BUG_ON(pdp_drm_driver.load != NULL); ++ ++ ret = pdp_early_load(ddev); ++ if (ret) ++ goto err_drm_dev_put; ++ ++ ret = drm_dev_register(ddev, 0); ++ if (ret) ++ goto err_drm_dev_late_unload; ++ ++ ret = pdp_late_load(ddev); ++ if (ret) ++ goto err_drm_dev_unregister; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", ++ pdp_drm_driver.name, ++ pdp_drm_driver.major, ++ pdp_drm_driver.minor, ++ pdp_drm_driver.patchlevel, ++ pdp_drm_driver.date, ++ ddev->primary->index); ++#endif ++ return 0; ++ ++err_drm_dev_unregister: ++ drm_dev_unregister(ddev); ++err_drm_dev_late_unload: ++ pdp_late_unload(ddev); ++err_drm_dev_put: ++ drm_dev_put(ddev); ++ return ret; ++#else ++ return drm_platform_init(&pdp_drm_driver, pdev); ++#endif ++} ++ ++static int pdp_remove(struct platform_device *pdev) ++{ ++ struct drm_device *ddev = platform_get_drvdata(pdev); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ /* ++ * The unload callback, called from drm_dev_unregister, is ++ * deprecated. ++ */ ++ BUG_ON(pdp_drm_driver.unload != NULL); ++ ++ pdp_early_unload(ddev); ++ ++ drm_dev_unregister(ddev); ++ ++ pdp_late_unload(ddev); ++ ++ drm_dev_put(ddev); ++#else ++ drm_put_dev(ddev); ++#endif ++ return 0; ++} ++ ++#endif // SUPPORT_PLATO_DISPLAY ++ ++static void pdp_shutdown(struct platform_device *pdev) ++{ ++} ++ ++static struct platform_device_id pdp_platform_device_id_table[] = { ++ { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_APOLLO }, ++ { .name = ODN_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_ODIN }, ++#if defined(SUPPORT_PLATO_DISPLAY) ++ { .name = PLATO_DEVICE_NAME_PDP, .driver_data = PDP_VERSION_PLATO }, ++#endif // SUPPORT_PLATO_DISPLAY ++ { }, ++}; ++ ++static struct platform_driver pdp_platform_driver = { ++ .probe = pdp_probe, ++ .remove = pdp_remove, ++ .shutdown = pdp_shutdown, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRIVER_NAME, ++ }, ++ .id_table = pdp_platform_device_id_table, ++}; ++ ++module_platform_driver(pdp_platform_driver); ++ ++MODULE_AUTHOR("Imagination Technologies Ltd. "); ++MODULE_DESCRIPTION(DRIVER_DESC); ++//MODULE_DEVICE_TABLE(platform, pdp_platform_device_id_table); ++MODULE_LICENSE("Dual MIT/GPL"); +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h +new file mode 100644 +index 000000000000..9306ff01b251 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_drv.h +@@ -0,0 +1,241 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__DRM_PDP_DRV_H__) ++#define __DRM_PDP_DRV_H__ ++ ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++#include ++#endif ++ ++#include "pdp_common.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && \ ++ !defined(PVR_ANDROID_USE_PDP_LEGACY) ++#define PDP_USE_ATOMIC ++#endif ++ ++struct pdp_gem_context; ++enum pdp_crtc_flip_status; ++struct pdp_flip_data; ++struct pdp_gem_private; ++ ++#if !defined(SUPPORT_PLATO_DISPLAY) ++struct tc_pdp_platform_data; ++#else ++struct plato_pdp_platform_data; ++#endif ++ ++struct pdp_drm_private { ++ struct drm_device *dev; ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++ struct pdp_fbdev *fbdev; ++#endif ++ ++ enum pdp_version version; ++ ++ /* differentiate Orion from base Odin PDP */ ++ enum pdp_odin_subversion subversion; ++ ++ /* created by pdp_gem_init */ ++ struct pdp_gem_private *gem_priv; ++ ++ /* preferred output device */ ++ enum pdp_output_device outdev; ++ uint32_t pdp_interrupt; ++ ++ /* PDP FBC Decompression module support */ ++ bool pfim_capable; ++ ++ /* initialised by pdp_modeset_early_init */ ++ struct drm_plane *plane; ++ struct drm_crtc *crtc; ++ struct drm_connector *connector; ++ struct drm_encoder *encoder; ++ ++ bool display_enabled; ++}; ++ ++struct pdp_crtc { ++ struct drm_crtc base; ++ ++ uint32_t number; ++ ++ resource_size_t pdp_reg_size; ++ resource_size_t pdp_reg_phys_base; ++ void __iomem *pdp_reg; ++ ++ resource_size_t pdp_bif_reg_size; ++ resource_size_t pdp_bif_reg_phys_base; ++ void __iomem *pdp_bif_reg; ++ ++ resource_size_t pll_reg_size; ++ resource_size_t pll_reg_phys_base; ++ void __iomem *pll_reg; ++ ++ resource_size_t odn_core_size; /* needed for odin pdp clk reset */ ++ resource_size_t odn_core_phys_base; ++ void __iomem *odn_core_reg; ++ ++ resource_size_t pfim_reg_size; ++ resource_size_t pfim_reg_phys_base; ++ void __iomem *pfim_reg; ++ ++ wait_queue_head_t flip_pending_wait_queue; ++ ++ /* Reuse the drm_device event_lock to protect these */ ++ atomic_t flip_status; ++ struct drm_pending_vblank_event *flip_event; ++ struct drm_framebuffer *old_fb; ++ struct pdp_flip_data *flip_data; ++ bool flip_async; ++}; ++ ++#define to_pdp_crtc(crtc) container_of(crtc, struct pdp_crtc, base) ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++struct drm_gem_object; ++ ++struct pdp_framebuffer { ++ struct drm_framebuffer base; ++ struct drm_gem_object *obj[1]; ++}; ++ ++#define to_pdp_framebuffer(fb) container_of(fb, struct pdp_framebuffer, base) ++#define to_drm_framebuffer(fb) (&(fb)->base) ++#else ++#define pdp_framebuffer drm_framebuffer ++#define to_pdp_framebuffer(fb) (fb) ++#define to_drm_framebuffer(fb) (fb) ++#endif ++ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++struct pdp_fbdev { ++ struct drm_fb_helper helper; ++ struct pdp_framebuffer fb; ++ struct pdp_drm_private *priv; ++ u8 preferred_bpp; ++}; ++#endif ++ ++static inline u32 pdp_drm_fb_cpp(struct drm_framebuffer *fb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ return fb->format->cpp[0]; ++#else ++ return fb->bits_per_pixel / 8; ++#endif ++} ++ ++static inline u32 pdp_drm_fb_format(struct drm_framebuffer *fb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ return fb->format->format; ++#else ++ return fb->pixel_format; ++#endif ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) ++int pdp_debugfs_init(struct drm_minor *minor); ++#else ++void pdp_debugfs_init(struct drm_minor *minor); ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++void pdp_debugfs_cleanup(struct drm_minor *minor); ++#endif ++ ++struct drm_plane *pdp_plane_create(struct drm_device *dev, ++ enum drm_plane_type type); ++void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane, ++ struct drm_framebuffer *fb, ++ const uint32_t src_x, const uint32_t src_y); ++ ++struct drm_crtc *pdp_crtc_create(struct drm_device *dev, uint32_t number, ++ struct drm_plane *primary_plane); ++void pdp_crtc_set_plane_enabled(struct drm_crtc *crtc, bool enable); ++void pdp_crtc_set_vblank_enabled(struct drm_crtc *crtc, bool enable); ++void pdp_crtc_irq_handler(struct drm_crtc *crtc); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++void pdp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file); ++#endif ++ ++struct drm_connector *pdp_dvi_connector_create(struct drm_device *dev); ++ ++struct drm_encoder *pdp_tmds_encoder_create(struct drm_device *dev); ++ ++int pdp_modeset_early_init(struct pdp_drm_private *dev_priv); ++int pdp_modeset_late_init(struct pdp_drm_private *dev_priv); ++void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv); ++void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv); ++ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++struct pdp_fbdev *pdp_fbdev_create(struct pdp_drm_private *dev); ++void pdp_fbdev_destroy(struct pdp_fbdev *fbdev); ++#endif ++ ++int pdp_modeset_validate_init(struct pdp_drm_private *dev_priv, ++ struct drm_mode_fb_cmd2 *mode_cmd, ++ struct pdp_framebuffer *pdp_fb, ++ struct drm_gem_object *obj); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++int pdp_enable_vblank(struct drm_crtc *crtc); ++void pdp_disable_vblank(struct drm_crtc *crtc); ++#endif ++ ++#endif /* !defined(__DRM_PDP_DRV_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c +new file mode 100644 +index 000000000000..12089ff2f6d6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_dvi.c +@@ -0,0 +1,307 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drm_pdp_drv.h" ++ ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#else ++#include ++#endif ++ ++#include ++#include ++ ++#if defined(PDP_USE_ATOMIC) ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) ++#include ++#endif ++ ++#include "kernel_compatibility.h" ++ ++struct pdp_mode_data { ++ int hdisplay; ++ int vdisplay; ++ int vrefresh; ++ bool reduced_blanking; ++ bool interlaced; ++ bool margins; ++}; ++ ++static const struct pdp_mode_data pdp_extra_modes[] = { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) ++ { ++ .hdisplay = 1280, ++ .vdisplay = 720, ++ .vrefresh = 60, ++ .reduced_blanking = false, ++ .interlaced = false, ++ .margins = false, ++ }, ++ { ++ .hdisplay = 1920, ++ .vdisplay = 1080, ++ .vrefresh = 60, ++ .reduced_blanking = false, ++ .interlaced = false, ++ .margins = false, ++ }, ++#endif ++}; ++ ++static char preferred_mode_name[DRM_DISPLAY_MODE_LEN] = "\0"; ++ ++module_param_string(dvi_preferred_mode, ++ preferred_mode_name, ++ DRM_DISPLAY_MODE_LEN, ++ 0444); ++ ++MODULE_PARM_DESC(dvi_preferred_mode, ++ "Specify the preferred mode (if supported), e.g. 1280x1024."); ++ ++ ++static int pdp_dvi_add_extra_modes(struct drm_connector *connector) ++{ ++ struct drm_display_mode *mode; ++ int num_modes; ++ int i; ++ ++ for (i = 0, num_modes = 0; i < ARRAY_SIZE(pdp_extra_modes); i++) { ++ mode = drm_cvt_mode(connector->dev, ++ pdp_extra_modes[i].hdisplay, ++ pdp_extra_modes[i].vdisplay, ++ pdp_extra_modes[i].vrefresh, ++ pdp_extra_modes[i].reduced_blanking, ++ pdp_extra_modes[i].interlaced, ++ pdp_extra_modes[i].margins); ++ if (mode) { ++ drm_mode_probed_add(connector, mode); ++ num_modes++; ++ } ++ } ++ ++ return num_modes; ++} ++ ++static int pdp_dvi_connector_helper_get_modes(struct drm_connector *connector) ++{ ++ struct drm_device *dev = connector->dev; ++ int num_modes; ++ int len = strlen(preferred_mode_name); ++ ++ if (len) ++ dev_info(dev->dev, "detected dvi_preferred_mode=%s\n", ++ preferred_mode_name); ++ else ++ dev_info(dev->dev, "no dvi_preferred_mode\n"); ++ ++ num_modes = drm_add_modes_noedid(connector, ++ dev->mode_config.max_width, ++ dev->mode_config.max_height); ++ ++ num_modes += pdp_dvi_add_extra_modes(connector); ++ if (num_modes) { ++ struct drm_display_mode *pref_mode = NULL; ++ ++ if (len) { ++ struct drm_display_mode *mode; ++ struct list_head *entry; ++ ++ list_for_each(entry, &connector->probed_modes) { ++ mode = list_entry(entry, ++ struct drm_display_mode, ++ head); ++ if (!strcmp(mode->name, preferred_mode_name)) { ++ pref_mode = mode; ++ break; ++ } ++ } ++ } ++ ++ if (pref_mode) ++ pref_mode->type |= DRM_MODE_TYPE_PREFERRED; ++ else ++ drm_set_preferred_mode(connector, ++ dev->mode_config.max_width, ++ dev->mode_config.max_height); ++ } ++ ++ drm_mode_sort(&connector->probed_modes); ++ ++ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s] found %d modes\n", ++ connector->base.id, ++ connector->name, ++ num_modes); ++ ++ return num_modes; ++} ++ ++static enum drm_mode_status ++pdp_dvi_connector_helper_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ if (mode->flags & DRM_MODE_FLAG_INTERLACE) ++ return MODE_NO_INTERLACE; ++ else if (mode->flags & DRM_MODE_FLAG_DBLSCAN) ++ return MODE_NO_DBLESCAN; ++ ++ return MODE_OK; ++} ++ ++#if !defined(PDP_USE_ATOMIC) ++static struct drm_encoder * ++pdp_dvi_connector_helper_best_encoder(struct drm_connector *connector) ++{ ++ /* Pick the first encoder we find */ ++ if (connector->encoder_ids[0] != 0) { ++ struct drm_encoder *encoder; ++ ++ encoder = drm_encoder_find(connector->dev, ++ NULL, ++ connector->encoder_ids[0]); ++ if (encoder) { ++ DRM_DEBUG_DRIVER("[ENCODER:%d:%s] best for [CONNECTOR:%d:%s]\n", ++ encoder->base.id, ++ encoder->name, ++ connector->base.id, ++ connector->name); ++ return encoder; ++ } ++ } ++ ++ return NULL; ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) ++static enum drm_connector_status ++pdp_dvi_connector_detect(struct drm_connector *connector, ++ bool force) ++{ ++ /* ++ * It appears that there is no way to determine if a monitor ++ * is connected. This needs to be set to connected otherwise ++ * DPMS never gets set to ON. ++ */ ++ return connector_status_connected; ++} ++#endif ++ ++static void pdp_dvi_connector_destroy(struct drm_connector *connector) ++{ ++ struct pdp_drm_private *dev_priv = connector->dev->dev_private; ++ ++ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", ++ connector->base.id, ++ connector->name); ++ ++ drm_connector_cleanup(connector); ++ ++ kfree(connector); ++ dev_priv->connector = NULL; ++} ++ ++static void pdp_dvi_connector_force(struct drm_connector *connector) ++{ ++} ++ ++static struct drm_connector_helper_funcs pdp_dvi_connector_helper_funcs = { ++ .get_modes = pdp_dvi_connector_helper_get_modes, ++ .mode_valid = pdp_dvi_connector_helper_mode_valid, ++ /* ++ * For atomic, don't set atomic_best_encoder or best_encoder. This will ++ * cause the DRM core to fallback to drm_atomic_helper_best_encoder(). ++ * This is fine as we only have a single connector and encoder. ++ */ ++#if !defined(PDP_USE_ATOMIC) ++ .best_encoder = pdp_dvi_connector_helper_best_encoder, ++#endif ++}; ++ ++static const struct drm_connector_funcs pdp_dvi_connector_funcs = { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) ++ .detect = pdp_dvi_connector_detect, ++#endif ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = pdp_dvi_connector_destroy, ++ .force = pdp_dvi_connector_force, ++#if defined(PDP_USE_ATOMIC) ++ .reset = drm_atomic_helper_connector_reset, ++ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, ++#else ++ .dpms = drm_helper_connector_dpms, ++#endif ++}; ++ ++ ++struct drm_connector * ++pdp_dvi_connector_create(struct drm_device *dev) ++{ ++ struct drm_connector *connector; ++ ++ connector = kzalloc(sizeof(*connector), GFP_KERNEL); ++ if (!connector) ++ return ERR_PTR(-ENOMEM); ++ ++ drm_connector_init(dev, ++ connector, ++ &pdp_dvi_connector_funcs, ++ DRM_MODE_CONNECTOR_DVID); ++ drm_connector_helper_add(connector, &pdp_dvi_connector_helper_funcs); ++ ++ connector->dpms = DRM_MODE_DPMS_OFF; ++ connector->interlace_allowed = false; ++ connector->doublescan_allowed = false; ++ connector->display_info.subpixel_order = SubPixelHorizontalRGB; ++ ++ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", ++ connector->base.id, ++ connector->name); ++ ++ return connector; ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c +new file mode 100644 +index 000000000000..8cbe85590107 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_fb.c +@@ -0,0 +1,312 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) ++#include ++#endif ++#include ++#include ++ ++#include "drm_pdp_gem.h" ++#include "kernel_compatibility.h" ++ ++#define FBDEV_NAME "pdpdrmfb" ++ ++static struct fb_ops pdp_fbdev_ops = { ++ .owner = THIS_MODULE, ++ .fb_check_var = drm_fb_helper_check_var, ++ .fb_set_par = drm_fb_helper_set_par, ++ .fb_fillrect = cfb_fillrect, ++ .fb_copyarea = cfb_copyarea, ++ .fb_imageblit = cfb_imageblit, ++ .fb_pan_display = drm_fb_helper_pan_display, ++ .fb_blank = drm_fb_helper_blank, ++ .fb_setcmap = drm_fb_helper_setcmap, ++ .fb_debug_enter = drm_fb_helper_debug_enter, ++ .fb_debug_leave = drm_fb_helper_debug_leave, ++}; ++ ++ ++static struct fb_info * ++pdp_fbdev_helper_alloc(struct drm_fb_helper *helper) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) ++ struct device *dev = helper->dev->dev; ++ struct fb_info *info; ++ int ret; ++ ++ info = framebuffer_alloc(0, dev); ++ if (!info) ++ return ERR_PTR(-ENOMEM); ++ ++ ret = fb_alloc_cmap(&info->cmap, 256, 0); ++ if (ret) ++ goto err_release; ++ ++ info->apertures = alloc_apertures(1); ++ if (!info->apertures) { ++ ret = -ENOMEM; ++ goto err_free_cmap; ++ } ++ ++ helper->fbdev = info; ++ ++ return info; ++ ++err_free_cmap: ++ fb_dealloc_cmap(&info->cmap); ++err_release: ++ framebuffer_release(info); ++ return ERR_PTR(ret); ++#else ++ return drm_fb_helper_alloc_fbi(helper); ++#endif ++} ++ ++static inline void ++pdp_fbdev_helper_fill_info(struct drm_fb_helper *helper, ++ struct drm_fb_helper_surface_size *sizes, ++ struct fb_info *info, ++ struct drm_mode_fb_cmd2 __maybe_unused *mode_cmd) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ drm_fb_helper_fill_fix(info, mode_cmd->pitches[0], helper->fb->depth); ++ drm_fb_helper_fill_var(info, helper, sizes->fb_width, ++ sizes->fb_height); ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ drm_fb_helper_fill_fix(info, mode_cmd->pitches[0], ++ helper->fb->format->depth); ++ drm_fb_helper_fill_var(info, helper, helper->fb->width, ++ helper->fb->height); ++#else ++ drm_fb_helper_fill_info(info, helper, sizes); ++#endif ++} ++ ++static int pdp_fbdev_probe(struct drm_fb_helper *helper, ++ struct drm_fb_helper_surface_size *sizes) ++{ ++ struct pdp_fbdev *pdp_fbdev = ++ container_of(helper, struct pdp_fbdev, helper); ++ struct drm_framebuffer *fb = ++ to_drm_framebuffer(&pdp_fbdev->fb); ++ struct pdp_gem_private *gem_priv = pdp_fbdev->priv->gem_priv; ++ struct drm_device *dev = helper->dev; ++ struct drm_mode_fb_cmd2 mode_cmd; ++ struct pdp_gem_object *pdp_obj; ++ struct drm_gem_object *obj; ++ struct fb_info *info; ++ void __iomem *vaddr; ++ size_t obj_size; ++ int err; ++ ++ if (helper->fb) ++ return 0; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ /* Create a framebuffer */ ++ info = pdp_fbdev_helper_alloc(helper); ++ if (!info) { ++ err = -ENOMEM; ++ goto err_unlock_dev; ++ } ++ ++ memset(&mode_cmd, 0, sizeof(mode_cmd)); ++ mode_cmd.pitches[0] = ++ sizes->surface_width * DIV_ROUND_UP(sizes->surface_bpp, 8); ++ mode_cmd.width = sizes->surface_width; ++ mode_cmd.height = sizes->surface_height; ++ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, ++ sizes->surface_depth); ++ obj_size = PAGE_ALIGN(mode_cmd.height * mode_cmd.pitches[0]); ++ ++ obj = pdp_gem_object_create(dev, gem_priv, obj_size, 0); ++ if (IS_ERR(obj)) { ++ err = PTR_ERR(obj); ++ goto err_unlock_dev; ++ } ++ ++ pdp_obj = to_pdp_obj(obj); ++ ++ vaddr = ioremap(pdp_obj->cpu_addr, obj->size); ++ if (!vaddr) { ++ err = PTR_ERR(vaddr); ++ goto err_gem_destroy; ++ } ++ ++ /* Zero fb memory, fb_memset accounts for iomem address space */ ++ fb_memset(vaddr, 0, obj_size); ++ ++ err = pdp_modeset_validate_init(pdp_fbdev->priv, &mode_cmd, ++ &pdp_fbdev->fb, obj); ++ if (err) ++ goto err_gem_unmap; ++ ++ helper->fb = fb; ++ helper->fbdev = info; ++ ++ /* Fill out the Linux framebuffer info */ ++ strlcpy(info->fix.id, FBDEV_NAME, sizeof(info->fix.id)); ++ pdp_fbdev_helper_fill_info(helper, sizes, info, &mode_cmd); ++ info->par = helper; ++ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) ++ info->flags |= FBINFO_CAN_FORCE_OUTPUT; ++#endif ++ info->fbops = &pdp_fbdev_ops; ++ info->fix.smem_start = pdp_obj->cpu_addr; ++ info->fix.smem_len = obj_size; ++ info->screen_base = vaddr; ++ info->screen_size = obj_size; ++ info->apertures->ranges[0].base = pdp_obj->cpu_addr; ++ info->apertures->ranges[0].size = obj_size; ++ ++ mutex_unlock(&dev->struct_mutex); ++ return 0; ++ ++err_gem_unmap: ++ iounmap(vaddr); ++ ++err_gem_destroy: ++ pdp_gem_object_free_priv(gem_priv, obj); ++ ++err_unlock_dev: ++ mutex_unlock(&dev->struct_mutex); ++ ++ DRM_ERROR(FBDEV_NAME " - %s failed (err=%d)\n", __func__, err); ++ return err; ++} ++ ++static const struct drm_fb_helper_funcs pdp_fbdev_helper_funcs = { ++ .fb_probe = pdp_fbdev_probe, ++}; ++ ++struct pdp_fbdev *pdp_fbdev_create(struct pdp_drm_private *dev_priv) ++{ ++ struct pdp_fbdev *pdp_fbdev; ++ int err; ++ ++ pdp_fbdev = kzalloc(sizeof(*pdp_fbdev), GFP_KERNEL); ++ if (!pdp_fbdev) ++ return ERR_PTR(-ENOMEM); ++ ++ drm_fb_helper_prepare(dev_priv->dev, &pdp_fbdev->helper, ++ &pdp_fbdev_helper_funcs); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper, 1, 1); ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) ++ err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper, 1); ++#else ++ err = drm_fb_helper_init(dev_priv->dev, &pdp_fbdev->helper); ++#endif ++ if (err) ++ goto err_free_fbdev; ++ ++ pdp_fbdev->priv = dev_priv; ++ pdp_fbdev->preferred_bpp = 32; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0)) ++ drm_fb_helper_single_add_all_connectors(&pdp_fbdev->helper); ++#endif ++ ++ /* Call ->fb_probe() */ ++ err = drm_fb_helper_initial_config(&pdp_fbdev->helper, pdp_fbdev->preferred_bpp); ++ if (err) ++ goto err_fb_helper_fini; ++ ++ DRM_DEBUG_DRIVER(FBDEV_NAME " - fb device registered\n"); ++ return pdp_fbdev; ++ ++err_fb_helper_fini: ++ drm_fb_helper_fini(&pdp_fbdev->helper); ++ ++err_free_fbdev: ++ kfree(pdp_fbdev); ++ ++ DRM_ERROR(FBDEV_NAME " - %s, failed (err=%d)\n", __func__, err); ++ return ERR_PTR(err); ++} ++ ++void pdp_fbdev_destroy(struct pdp_fbdev *pdp_fbdev) ++{ ++ struct pdp_framebuffer *pdp_fb; ++ struct pdp_gem_object *pdp_obj; ++ struct drm_framebuffer *fb; ++ struct fb_info *info; ++ ++ if (!pdp_fbdev) ++ return; ++ ++ drm_fb_helper_unregister_fbi(&pdp_fbdev->helper); ++ pdp_fb = &pdp_fbdev->fb; ++ ++ pdp_obj = to_pdp_obj(pdp_fb->obj[0]); ++ if (pdp_obj) { ++ info = pdp_fbdev->helper.fbdev; ++ iounmap((void __iomem *)info->screen_base); ++ } ++ ++ drm_gem_object_put(pdp_fb->obj[0]); ++ ++ drm_fb_helper_fini(&pdp_fbdev->helper); ++ ++ fb = to_drm_framebuffer(pdp_fb); ++ ++ /** ++ * If the driver's probe function hasn't been called ++ * (due to deferred setup of the framebuffer device), ++ * then the framebuffer won't have been initialised. ++ * Check this before attempting to clean it up. ++ */ ++ if (fb && fb->dev) ++ drm_framebuffer_cleanup(fb); ++ ++ kfree(pdp_fbdev); ++} ++#endif /* CONFIG_DRM_FBDEV_EMULATION */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c +new file mode 100644 +index 000000000000..b7013210e7af +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.c +@@ -0,0 +1,780 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#if defined(SUPPORT_PLATO_DISPLAY) ++#include "plato_drv.h" ++#else ++#include "tc_drv.h" ++#endif ++ ++#include "drm_pdp_gem.h" ++#include "pdp_drm.h" ++#include "kernel_compatibility.h" ++ ++#if defined(SUPPORT_PLATO_DISPLAY) ++#define pdp_gem_platform_data plato_pdp_platform_data ++#else ++#define pdp_gem_platform_data tc_pdp_platform_data ++#endif ++ ++const struct vm_operations_struct pdp_gem_vm_ops = { ++ .fault = pdp_gem_object_vm_fault, ++ .open = drm_gem_vm_open, ++ .close = drm_gem_vm_close, ++}; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) ++const struct drm_gem_object_funcs pdp_gem_funcs = { ++ .export = pdp_gem_prime_export, ++ .free = pdp_gem_object_free, ++ .vm_ops = &pdp_gem_vm_ops, ++}; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ ++ ++struct pdp_gem_private { ++ struct mutex vram_lock; ++ struct drm_mm vram; ++}; ++ ++static struct pdp_gem_object * ++pdp_gem_private_object_create(struct drm_device *dev, ++ size_t size, ++ struct dma_resv *resv) ++{ ++ struct pdp_gem_object *pdp_obj; ++ ++ WARN_ON(PAGE_ALIGN(size) != size); ++ ++ pdp_obj = kzalloc(sizeof(*pdp_obj), GFP_KERNEL); ++ if (!pdp_obj) ++ return ERR_PTR(-ENOMEM); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ if (!resv) ++ dma_resv_init(&pdp_obj->_resv); ++#else ++ pdp_obj->base.resv = resv; ++#endif ++ drm_gem_private_object_init(dev, &pdp_obj->base, size); ++ ++ return pdp_obj; ++} ++ ++struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev, ++ struct pdp_gem_private *gem_priv, ++ size_t size, ++ u32 flags) ++{ ++ struct pdp_gem_platform_data *pdata = ++ to_platform_device(dev->dev)->dev.platform_data; ++ struct pdp_gem_object *pdp_obj; ++ struct drm_mm_node *node; ++ int err = 0; ++ ++ pdp_obj = pdp_gem_private_object_create(dev, size, NULL); ++ if (!pdp_obj) { ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (!node) { ++ err = -ENOMEM; ++ goto err_unref; ++ } ++ ++ mutex_lock(&gem_priv->vram_lock); ++ err = drm_mm_insert_node(&gem_priv->vram, node, size); ++ mutex_unlock(&gem_priv->vram_lock); ++ if (err) ++ goto err_free_node; ++ ++ pdp_obj->vram = node; ++ pdp_obj->dev_addr = pdp_obj->vram->start; ++ pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ pdp_obj->resv = &pdp_obj->_resv; ++#else ++ pdp_obj->resv = pdp_obj->base.resv; ++#endif ++ pdp_obj->dma_map_export_host_addr = pdata->dma_map_export_host_addr; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) ++ pdp_obj->base.funcs = &pdp_gem_funcs; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ ++ ++ return &pdp_obj->base; ++ ++err_free_node: ++ kfree(node); ++err_unref: ++ pdp_gem_object_free_priv(gem_priv, &pdp_obj->base); ++err_exit: ++ return ERR_PTR(err); ++} ++ ++void pdp_gem_object_free(struct drm_gem_object *obj) ++{ ++ struct pdp_drm_private *dev_priv = obj->dev->dev_private; ++ ++ pdp_gem_object_free_priv(dev_priv->gem_priv, obj); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++vm_fault_t pdp_gem_object_vm_fault(struct vm_fault *vmf) ++#else ++int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ struct vm_area_struct *vma = vmf->vma; ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ unsigned long addr = vmf->address; ++#else ++ unsigned long addr = (unsigned long)vmf->virtual_address; ++#endif ++ struct drm_gem_object *obj = vma->vm_private_data; ++ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); ++ unsigned long off; ++ unsigned long pfn; ++ ++ off = addr - vma->vm_start; ++ pfn = (pdp_obj->cpu_addr + off) >> PAGE_SHIFT; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) ++ return vmf_insert_pfn(vma, addr, pfn); ++#else ++ { ++ int err; ++ ++ err = vm_insert_pfn(vma, addr, pfn); ++ switch (err) { ++ case 0: ++ case -EBUSY: ++ return VM_FAULT_NOPAGE; ++ case -ENOMEM: ++ return VM_FAULT_OOM; ++ default: ++ return VM_FAULT_SIGBUS; ++ } ++ } ++#endif ++} ++ ++void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv, ++ struct drm_gem_object *obj) ++{ ++ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); ++ ++ drm_gem_free_mmap_offset(obj); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ if (&pdp_obj->_resv == pdp_obj->resv) ++ dma_resv_fini(&pdp_obj->_resv); ++#endif ++ if (pdp_obj->vram) { ++ mutex_lock(&gem_priv->vram_lock); ++ drm_mm_remove_node(pdp_obj->vram); ++ mutex_unlock(&gem_priv->vram_lock); ++ ++ kfree(pdp_obj->vram); ++ } else if (obj->import_attach) { ++ drm_prime_gem_destroy(obj, pdp_obj->sgt); ++ } ++ ++ drm_gem_object_release(&pdp_obj->base); ++ kfree(pdp_obj); ++} ++ ++static int pdp_gem_prime_attach(struct dma_buf *dma_buf, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) ++ struct device *dev, ++#endif ++ struct dma_buf_attachment *attach) ++{ ++ struct drm_gem_object *obj = dma_buf->priv; ++ ++ /* Restrict access to Rogue */ ++ if (WARN_ON(!obj->dev->dev->parent) || ++ obj->dev->dev->parent != attach->dev->parent) ++ return -EPERM; ++ ++ return 0; ++} ++ ++static struct sg_table * ++pdp_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, ++ enum dma_data_direction dir) ++{ ++ struct drm_gem_object *obj = attach->dmabuf->priv; ++ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); ++ struct sg_table *sgt; ++ ++ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); ++ if (!sgt) ++ return NULL; ++ ++ if (sg_alloc_table(sgt, 1, GFP_KERNEL)) ++ goto err_free_sgt; ++ ++ if (pdp_obj->dma_map_export_host_addr) ++ sg_dma_address(sgt->sgl) = pdp_obj->cpu_addr; ++ else ++ sg_dma_address(sgt->sgl) = pdp_obj->dev_addr; ++ ++ sg_dma_len(sgt->sgl) = obj->size; ++ ++ return sgt; ++ ++err_free_sgt: ++ kfree(sgt); ++ return NULL; ++} ++ ++static void pdp_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, ++ struct sg_table *sgt, ++ enum dma_data_direction dir) ++{ ++ sg_free_table(sgt); ++ kfree(sgt); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) ++static void *pdp_gem_prime_kmap_atomic(struct dma_buf *dma_buf, ++ unsigned long page_num) ++{ ++ return NULL; ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) ++static void *pdp_gem_prime_kmap(struct dma_buf *dma_buf, ++ unsigned long page_num) ++{ ++ return NULL; ++} ++#endif ++ ++static int pdp_gem_prime_mmap(struct dma_buf *dma_buf, ++ struct vm_area_struct *vma) ++{ ++ struct drm_gem_object *obj = dma_buf->priv; ++ int err; ++ ++ mutex_lock(&obj->dev->struct_mutex); ++ err = drm_gem_mmap_obj(obj, obj->size, vma); ++ mutex_unlock(&obj->dev->struct_mutex); ++ ++ return err; ++} ++ ++#if defined(CONFIG_X86) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void *pdp_gem_prime_vmap(struct dma_buf *dma_buf) ++#else ++static int pdp_gem_prime_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++{ ++ struct drm_gem_object *obj = dma_buf->priv; ++ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); ++ void __iomem *vaddr; ++ __maybe_unused int ret = 0; ++ ++ mutex_lock(&obj->dev->struct_mutex); ++ ++ /* ++ * On x86 platforms, the pointer returned by ioremap can be dereferenced ++ * directly. As such, explicitly cast away the __ioremap qualifier. ++ */ ++ vaddr = ioremap(pdp_obj->cpu_addr, obj->size); ++ if (vaddr == NULL) { ++ DRM_DEBUG_DRIVER("ioremap failed"); ++ ret = -ENOMEM; ++ } ++ ++ mutex_unlock(&obj->dev->struct_mutex); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)) ++ if (ret == 0) ++ dma_buf_map_set_vaddr_iomem(map, vaddr); ++ return ret; ++#else ++ return (void __force *) vaddr; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */ ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) ++#else ++static void pdp_gem_prime_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++{ ++ struct drm_gem_object *obj = dma_buf->priv; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)) ++ void __iomem *vaddr = map->vaddr_iomem; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */ ++ ++ mutex_lock(&obj->dev->struct_mutex); ++ iounmap((void __iomem *)vaddr); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)) ++ dma_buf_map_clear(map); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0) */ ++ ++ mutex_unlock(&obj->dev->struct_mutex); ++} ++#endif ++ ++static const struct dma_buf_ops pdp_gem_prime_dmabuf_ops = { ++ .attach = pdp_gem_prime_attach, ++ .map_dma_buf = pdp_gem_prime_map_dma_buf, ++ .unmap_dma_buf = pdp_gem_prime_unmap_dma_buf, ++ .release = drm_gem_dmabuf_release, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) ++ .map_atomic = pdp_gem_prime_kmap_atomic, ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) ++ .map = pdp_gem_prime_kmap, ++#endif ++#else ++ .kmap_atomic = pdp_gem_prime_kmap_atomic, ++ .kmap = pdp_gem_prime_kmap, ++#endif ++ .mmap = pdp_gem_prime_mmap, ++#if defined(CONFIG_X86) ++ .vmap = pdp_gem_prime_vmap, ++ .vunmap = pdp_gem_prime_vunmap ++#endif ++}; ++ ++ ++static int ++pdp_gem_lookup_our_object(struct drm_file *file, u32 handle, ++ struct drm_gem_object **objp) ++ ++{ ++ struct drm_gem_object *obj; ++ ++ obj = drm_gem_object_lookup(file, handle); ++ if (!obj) ++ return -ENOENT; ++ ++ if (obj->import_attach) { ++ /* ++ * The dmabuf associated with the object is not one of ours. ++ * Our own buffers are handled differently on import. ++ */ ++ drm_gem_object_put(obj); ++ return -EINVAL; ++ } ++ ++ *objp = obj; ++ return 0; ++} ++ ++struct dma_buf *pdp_gem_prime_export( ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++ struct drm_device *dev, ++#endif ++ struct drm_gem_object *obj, ++ int flags) ++{ ++ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ DEFINE_DMA_BUF_EXPORT_INFO(export_info); ++ ++ export_info.ops = &pdp_gem_prime_dmabuf_ops; ++ export_info.size = obj->size; ++ export_info.flags = flags; ++ export_info.resv = pdp_obj->resv; ++ export_info.priv = obj; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++ return drm_gem_dmabuf_export(obj->dev, &export_info); ++#else ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ return drm_gem_dmabuf_export(dev, &export_info); ++#else ++ return dma_buf_export(&export_info); ++#endif ++#endif ++#else ++ return dma_buf_export(obj, &pdp_gem_prime_dmabuf_ops, obj->size, ++ flags, pdp_obj->resv); ++#endif ++} ++ ++struct drm_gem_object * ++pdp_gem_prime_import(struct drm_device *dev, ++ struct dma_buf *dma_buf) ++{ ++ struct drm_gem_object *obj = dma_buf->priv; ++ ++ if (obj->dev == dev) { ++ BUG_ON(dma_buf->ops != &pdp_gem_prime_dmabuf_ops); ++ ++ /* ++ * The dmabuf is one of ours, so return the associated ++ * PDP GEM object, rather than create a new one. ++ */ ++ drm_gem_object_get(obj); ++ ++ return obj; ++ } ++ ++ return drm_gem_prime_import(dev, dma_buf); ++} ++ ++struct drm_gem_object * ++pdp_gem_prime_import_sg_table(struct drm_device *dev, ++ struct dma_buf_attachment *attach, ++ struct sg_table *sgt) ++{ ++ struct pdp_gem_platform_data *pdata = ++ to_platform_device(dev->dev)->dev.platform_data; ++ struct pdp_gem_object *pdp_obj; ++ int err; ++ ++ pdp_obj = pdp_gem_private_object_create(dev, ++ attach->dmabuf->size, ++ attach->dmabuf->resv); ++ if (!pdp_obj) { ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ ++ pdp_obj->sgt = sgt; ++ ++ /* We only expect a single entry for card memory */ ++ if (pdp_obj->sgt->nents != 1) { ++ err = -EINVAL; ++ goto err_obj_unref; ++ } ++ ++ pdp_obj->dev_addr = sg_dma_address(pdp_obj->sgt->sgl); ++ pdp_obj->cpu_addr = pdata->memory_base + pdp_obj->dev_addr; ++ pdp_obj->resv = attach->dmabuf->resv; ++ ++ return &pdp_obj->base; ++ ++err_obj_unref: ++ drm_gem_object_put(&pdp_obj->base); ++err_exit: ++ return ERR_PTR(err); ++} ++ ++int pdp_gem_dumb_create_priv(struct drm_file *file, ++ struct drm_device *dev, ++ struct pdp_gem_private *gem_priv, ++ struct drm_mode_create_dumb *args) ++{ ++ struct drm_gem_object *obj; ++ u32 handle; ++ u32 pitch; ++ size_t size; ++ int err; ++ ++ pitch = args->width * (ALIGN(args->bpp, 8) >> 3); ++ size = PAGE_ALIGN(pitch * args->height); ++ ++ obj = pdp_gem_object_create(dev, gem_priv, size, 0); ++ if (IS_ERR(obj)) ++ return PTR_ERR(obj); ++ ++ err = drm_gem_handle_create(file, obj, &handle); ++ if (err) ++ goto exit; ++ ++ args->handle = handle; ++ args->pitch = pitch; ++ args->size = size; ++ ++exit: ++ drm_gem_object_put(obj); ++ return err; ++} ++ ++int pdp_gem_dumb_map_offset(struct drm_file *file, ++ struct drm_device *dev, ++ uint32_t handle, ++ uint64_t *offset) ++{ ++ struct drm_gem_object *obj; ++ int err; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ err = pdp_gem_lookup_our_object(file, handle, &obj); ++ if (err) ++ goto exit_unlock; ++ ++ err = drm_gem_create_mmap_offset(obj); ++ if (err) ++ goto exit_obj_unref; ++ ++ *offset = drm_vma_node_offset_addr(&obj->vma_node); ++ ++exit_obj_unref: ++ drm_gem_object_put(obj); ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} ++ ++struct pdp_gem_private *pdp_gem_init(struct drm_device *dev) ++{ ++#if !defined(SUPPORT_ION) || defined(SUPPORT_GEM_ALLOC) ++ struct pdp_gem_platform_data *pdata = ++ to_platform_device(dev->dev)->dev.platform_data; ++#endif ++ struct pdp_gem_private *gem_priv = ++ kmalloc(sizeof(*gem_priv), GFP_KERNEL); ++ ++ if (!gem_priv) ++ return NULL; ++ ++ mutex_init(&gem_priv->vram_lock); ++ ++ memset(&gem_priv->vram, 0, sizeof(gem_priv->vram)); ++ ++#if defined(SUPPORT_ION) && !defined(SUPPORT_GEM_ALLOC) ++ drm_mm_init(&gem_priv->vram, 0, 0); ++ DRM_INFO("%s has no directly allocatable memory; the memory is managed by ION\n", ++ dev->driver->name); ++#else ++ drm_mm_init(&gem_priv->vram, ++ pdata->pdp_heap_memory_base - pdata->memory_base, ++ pdata->pdp_heap_memory_size); ++ ++ DRM_INFO("%s has %pa bytes of allocatable memory at 0x%llx = (0x%llx - 0x%llx)\n", ++ dev->driver->name, &pdata->pdp_heap_memory_size, ++ (u64)(pdata->pdp_heap_memory_base - pdata->memory_base), ++ (u64)pdata->pdp_heap_memory_base, (u64)pdata->memory_base); ++#endif ++ return gem_priv; ++} ++ ++void pdp_gem_cleanup(struct pdp_gem_private *gem_priv) ++{ ++ drm_mm_takedown(&gem_priv->vram); ++ mutex_destroy(&gem_priv->vram_lock); ++ ++ kfree(gem_priv); ++} ++ ++struct dma_resv *pdp_gem_get_resv(struct drm_gem_object *obj) ++{ ++ return (to_pdp_obj(obj)->resv); ++} ++ ++u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj) ++{ ++ struct pdp_gem_object *pdp_obj = to_pdp_obj(obj); ++ ++ return pdp_obj->dev_addr; ++} ++ ++int pdp_gem_object_create_ioctl_priv(struct drm_device *dev, ++ struct pdp_gem_private *gem_priv, ++ void *data, ++ struct drm_file *file) ++{ ++ struct drm_pdp_gem_create *args = data; ++ struct drm_gem_object *obj; ++ int err; ++ ++ if (args->flags) { ++ DRM_ERROR("invalid flags: %#08x\n", args->flags); ++ return -EINVAL; ++ } ++ ++ if (args->handle) { ++ DRM_ERROR("invalid handle (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ obj = pdp_gem_object_create(dev, ++ gem_priv, ++ PAGE_ALIGN(args->size), ++ args->flags); ++ if (IS_ERR(obj)) ++ return PTR_ERR(obj); ++ ++ err = drm_gem_handle_create(file, obj, &args->handle); ++ drm_gem_object_put(obj); ++ ++ return err; ++ ++} ++ ++int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file) ++{ ++ struct drm_pdp_gem_mmap *args = (struct drm_pdp_gem_mmap *)data; ++ ++ if (args->pad) { ++ DRM_ERROR("invalid pad (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ if (args->offset) { ++ DRM_ERROR("invalid offset (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ return pdp_gem_dumb_map_offset(file, dev, args->handle, &args->offset); ++} ++ ++int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file) ++{ ++ struct drm_pdp_gem_cpu_prep *args = (struct drm_pdp_gem_cpu_prep *)data; ++ struct drm_gem_object *obj; ++ struct pdp_gem_object *pdp_obj; ++ bool write = !!(args->flags & PDP_GEM_CPU_PREP_WRITE); ++ bool wait = !(args->flags & PDP_GEM_CPU_PREP_NOWAIT); ++ int err = 0; ++ ++ if (args->flags & ~(PDP_GEM_CPU_PREP_READ | ++ PDP_GEM_CPU_PREP_WRITE | ++ PDP_GEM_CPU_PREP_NOWAIT)) { ++ DRM_ERROR("invalid flags: %#08x\n", args->flags); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ err = pdp_gem_lookup_our_object(file, args->handle, &obj); ++ if (err) ++ goto exit_unlock; ++ ++ pdp_obj = to_pdp_obj(obj); ++ ++ if (pdp_obj->cpu_prep) { ++ err = -EBUSY; ++ goto exit_unref; ++ } ++ ++ if (wait) { ++ long lerr; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)) ++ lerr = dma_resv_wait_timeout(pdp_obj->resv, write, ++ true, 30 * HZ); ++#else ++ lerr = dma_resv_wait_timeout_rcu(pdp_obj->resv, write, ++ true, 30 * HZ); ++#endif ++ if (!lerr) ++ err = -EBUSY; ++ else if (lerr < 0) ++ err = lerr; ++ } else { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)) ++ if (!dma_resv_test_signaled(pdp_obj->resv, write)) ++#else ++ if (!dma_resv_test_signaled_rcu(pdp_obj->resv, write)) ++#endif ++ err = -EBUSY; ++ } ++ ++ if (!err) ++ pdp_obj->cpu_prep = true; ++ ++exit_unref: ++ drm_gem_object_put(obj); ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} ++ ++int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file) ++{ ++ struct drm_pdp_gem_cpu_fini *args = (struct drm_pdp_gem_cpu_fini *)data; ++ struct drm_gem_object *obj; ++ struct pdp_gem_object *pdp_obj; ++ int err = 0; ++ ++ if (args->pad) { ++ DRM_ERROR("invalid pad (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ err = pdp_gem_lookup_our_object(file, args->handle, &obj); ++ if (err) ++ goto exit_unlock; ++ ++ pdp_obj = to_pdp_obj(obj); ++ ++ if (!pdp_obj->cpu_prep) { ++ err = -EINVAL; ++ goto exit_unref; ++ } ++ ++ pdp_obj->cpu_prep = false; ++ ++exit_unref: ++ drm_gem_object_put(obj); ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h +new file mode 100644 +index 000000000000..181b0c3b8c2e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_gem.h +@@ -0,0 +1,157 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__DRM_PDP_GEM_H__) ++#define __DRM_PDP_GEM_H__ ++ ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#else ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++#include ++#endif ++ ++#include "drm_pdp_drv.h" ++#include "pvr_dma_resv.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) ++extern const struct drm_gem_object_funcs pdp_gem_funcs; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ ++extern const struct vm_operations_struct pdp_gem_vm_ops; ++ ++struct pdp_gem_private; ++ ++struct pdp_gem_object { ++ struct drm_gem_object base; ++ ++ /* Non-null if backing originated from this driver */ ++ struct drm_mm_node *vram; ++ ++ /* Non-null if backing was imported */ ++ struct sg_table *sgt; ++ ++ bool dma_map_export_host_addr; ++ phys_addr_t cpu_addr; ++ dma_addr_t dev_addr; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ struct dma_resv _resv; ++#endif ++ struct dma_resv *resv; ++ ++ bool cpu_prep; ++}; ++ ++#define to_pdp_obj(obj) container_of(obj, struct pdp_gem_object, base) ++ ++struct pdp_gem_private *pdp_gem_init(struct drm_device *dev); ++ ++void pdp_gem_cleanup(struct pdp_gem_private *dev_priv); ++ ++/* ioctl functions */ ++int pdp_gem_object_create_ioctl_priv(struct drm_device *dev, ++ struct pdp_gem_private *gem_priv, ++ void *data, ++ struct drm_file *file); ++int pdp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file); ++int pdp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file); ++int pdp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file); ++ ++/* drm driver functions */ ++struct drm_gem_object *pdp_gem_object_create(struct drm_device *dev, ++ struct pdp_gem_private *gem_priv, ++ size_t size, ++ u32 flags); ++ ++void pdp_gem_object_free_priv(struct pdp_gem_private *gem_priv, ++ struct drm_gem_object *obj); ++ ++void pdp_gem_object_free(struct drm_gem_object *obj); ++ ++struct dma_buf *pdp_gem_prime_export( ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++ struct drm_device *dev, ++#endif ++ struct drm_gem_object *obj, ++ int flags); ++ ++struct drm_gem_object *pdp_gem_prime_import(struct drm_device *dev, ++ struct dma_buf *dma_buf); ++ ++struct drm_gem_object * ++pdp_gem_prime_import_sg_table(struct drm_device *dev, ++ struct dma_buf_attachment *attach, ++ struct sg_table *sgt); ++ ++int pdp_gem_dumb_create_priv(struct drm_file *file, ++ struct drm_device *dev, ++ struct pdp_gem_private *gem_priv, ++ struct drm_mode_create_dumb *args); ++ ++int pdp_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, ++ uint32_t handle, uint64_t *offset); ++ ++/* vm operation functions */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) ++typedef int vm_fault_t; ++#endif ++vm_fault_t pdp_gem_object_vm_fault(struct vm_fault *vmf); ++#else ++int pdp_gem_object_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf); ++#endif ++ ++/* internal interfaces */ ++struct dma_resv *pdp_gem_get_resv(struct drm_gem_object *obj); ++u64 pdp_gem_get_dev_addr(struct drm_gem_object *obj); ++ ++#endif /* !defined(__DRM_PDP_GEM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c +new file mode 100644 +index 000000000000..0e6563d3e334 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_modeset.c +@@ -0,0 +1,472 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "drm_pdp_drv.h" ++ ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++#define drm_gem_fb_create(...) pdp_framebuffer_create(__VA_ARGS__) ++#else ++#include ++#endif ++ ++#if defined(PDP_USE_ATOMIC) ++#include ++#endif ++ ++#include ++ ++#include "kernel_compatibility.h" ++ ++#define PDP_WIDTH_MIN 640 ++#define PDP_WIDTH_MAX 1280 ++#define PDP_HEIGHT_MIN 480 ++#define PDP_HEIGHT_MAX 1024 ++ ++#define ODIN_PDP_WIDTH_MAX 1920 ++#define ODIN_PDP_HEIGHT_MAX 1080 ++ ++#define ORION_PDP_WIDTH_MAX 1280 ++#define ORION_PDP_HEIGHT_MAX 720 ++ ++#define PLATO_PDP_WIDTH_MAX 1920 ++#define PLATO_PDP_HEIGHT_MAX 1080 ++ ++static bool async_flip_enable = true; ++ ++module_param(async_flip_enable, bool, 0444); ++ ++MODULE_PARM_DESC(async_flip_enable, ++ "Enable support for 'faked' async flipping (default: Y)"); ++ ++static inline int ++drm_mode_fb_cmd2_validate(const struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ switch (mode_cmd->pixel_format) { ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_XRGB8888: ++ case DRM_FORMAT_RGB565: ++ break; ++ default: ++ DRM_ERROR_RATELIMITED("pixel format not supported (format = %u)\n", ++ mode_cmd->pixel_format); ++ return -EINVAL; ++ } ++ ++ if (mode_cmd->flags & DRM_MODE_FB_INTERLACED) { ++ DRM_ERROR_RATELIMITED("interlaced framebuffers not supported\n"); ++ return -EINVAL; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ switch (mode_cmd->modifier[0]) { ++ case DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12: ++ case DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12: ++ case DRM_FORMAT_MOD_LINEAR: ++ break; ++ default: ++ DRM_ERROR_RATELIMITED("format modifier 0x%llx is not supported\n", ++ mode_cmd->modifier[0]); ++ return -EINVAL; ++ } ++#endif ++ ++ return 0; ++} ++ ++static void pdp_framebuffer_destroy(struct drm_framebuffer *fb) ++{ ++ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); ++ ++ drm_framebuffer_cleanup(fb); ++ ++ drm_gem_object_put(pdp_fb->obj[0]); ++ ++ kfree(pdp_fb); ++} ++ ++static int pdp_framebuffer_create_handle(struct drm_framebuffer *fb, ++ struct drm_file *file, ++ unsigned int *handle) ++{ ++ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); ++ ++ return drm_gem_handle_create(file, pdp_fb->obj[0], handle); ++} ++ ++static const struct drm_framebuffer_funcs pdp_framebuffer_funcs = { ++ .destroy = pdp_framebuffer_destroy, ++ .create_handle = pdp_framebuffer_create_handle, ++ .dirty = NULL, ++}; ++ ++static inline int ++pdp_framebuffer_init(struct pdp_drm_private *dev_priv, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++ const ++#endif ++ struct drm_mode_fb_cmd2 *mode_cmd, ++ struct pdp_framebuffer *pdp_fb, ++ struct drm_gem_object *obj) ++{ ++ struct drm_framebuffer *fb; ++ ++ if (!pdp_fb) ++ return -EINVAL; ++ ++ fb = to_drm_framebuffer(pdp_fb); ++ pdp_fb->obj[0] = obj; ++ ++ drm_helper_mode_fill_fb_struct(dev_priv->dev, fb, mode_cmd); ++ ++ return drm_framebuffer_init(dev_priv->dev, fb, &pdp_framebuffer_funcs); ++} ++ ++int pdp_modeset_validate_init(struct pdp_drm_private *dev_priv, ++ struct drm_mode_fb_cmd2 *mode_cmd, ++ struct pdp_framebuffer *pdp_fb, ++ struct drm_gem_object *obj) ++{ ++ int err; ++ ++ err = drm_mode_fb_cmd2_validate(mode_cmd); ++ if (err) ++ return err; ++ ++ return pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, obj); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++static struct drm_framebuffer * ++pdp_framebuffer_create(struct drm_device *dev, ++ struct drm_file *file, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++ const ++#endif ++ struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct drm_gem_object *obj; ++ struct pdp_framebuffer *pdp_fb; ++ int err; ++ ++ obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); ++ if (!obj) { ++ DRM_ERROR("failed to find buffer with handle %u\n", ++ mode_cmd->handles[0]); ++ err = -ENOENT; ++ goto err_out; ++ } ++ ++ pdp_fb = kzalloc(sizeof(*pdp_fb), GFP_KERNEL); ++ if (!pdp_fb) { ++ err = -ENOMEM; ++ goto err_obj_put; ++ } ++ ++ err = pdp_framebuffer_init(dev_priv, mode_cmd, pdp_fb, obj); ++ if (err) { ++ DRM_ERROR("failed to initialise framebuffer (err=%d)\n", err); ++ goto err_free_fb; ++ } ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", pdp_fb->base.base.id); ++ ++ return &pdp_fb->base; ++ ++err_free_fb: ++ kfree(pdp_fb); ++err_obj_put: ++ drm_gem_object_put(obj); ++err_out: ++ return ERR_PTR(err); ++} ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ ++ ++ ++/************************************************************************* ++ * DRM mode config callbacks ++ **************************************************************************/ ++ ++static struct drm_framebuffer * ++pdp_fb_create(struct drm_device *dev, ++ struct drm_file *file, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++ const ++#endif ++ struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ struct drm_framebuffer *fb; ++ int err; ++ ++ err = drm_mode_fb_cmd2_validate(mode_cmd); ++ if (err) ++ return ERR_PTR(err); ++ ++ fb = drm_gem_fb_create(dev, file, mode_cmd); ++ if (IS_ERR(fb)) ++ goto out; ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); ++ ++out: ++ return fb; ++} ++ ++static const struct drm_mode_config_funcs pdp_mode_config_funcs = { ++ .fb_create = pdp_fb_create, ++ .output_poll_changed = NULL, ++#if defined(PDP_USE_ATOMIC) ++ .atomic_check = drm_atomic_helper_check, ++ .atomic_commit = drm_atomic_helper_commit, ++#endif ++}; ++ ++ ++int pdp_modeset_early_init(struct pdp_drm_private *dev_priv) ++{ ++ struct drm_device *dev = dev_priv->dev; ++ int err; ++ ++ drm_mode_config_init(dev); ++ ++ dev->mode_config.funcs = &pdp_mode_config_funcs; ++ dev->mode_config.min_width = PDP_WIDTH_MIN; ++ dev->mode_config.min_height = PDP_HEIGHT_MIN; ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_APOLLO: ++ dev->mode_config.max_width = PDP_WIDTH_MAX; ++ dev->mode_config.max_height = PDP_HEIGHT_MAX; ++ break; ++ case PDP_VERSION_ODIN: ++ if (dev_priv->subversion == PDP_ODIN_ORION) { ++ dev->mode_config.max_width = ORION_PDP_WIDTH_MAX; ++ dev->mode_config.max_height = ORION_PDP_HEIGHT_MAX; ++ } else { ++ dev->mode_config.max_width = ODIN_PDP_WIDTH_MAX; ++ dev->mode_config.max_height = ODIN_PDP_HEIGHT_MAX; ++ } ++ break; ++ case PDP_VERSION_PLATO: ++ dev->mode_config.max_width = PLATO_PDP_WIDTH_MAX; ++ dev->mode_config.max_height = PLATO_PDP_HEIGHT_MAX; ++ break; ++ default: ++ BUG(); ++ } ++ ++ DRM_INFO("max_width is %d\n", ++ dev->mode_config.max_width); ++ DRM_INFO("max_height is %d\n", ++ dev->mode_config.max_height); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) ++ dev->mode_config.fb_base = 0; ++#endif ++ dev->mode_config.async_page_flip = async_flip_enable; ++ ++ DRM_INFO("%s async flip support is %s\n", ++ dev->driver->name, async_flip_enable ? "enabled" : "disabled"); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ dev->mode_config.fb_modifiers_not_supported = false; ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ dev->mode_config.allow_fb_modifiers = true; ++#endif ++ ++ dev_priv->plane = pdp_plane_create(dev, DRM_PLANE_TYPE_PRIMARY); ++ if (IS_ERR(dev_priv->plane)) { ++ DRM_ERROR("failed to create a primary plane\n"); ++ err = PTR_ERR(dev_priv->plane); ++ goto err_config_cleanup; ++ } ++ ++ dev_priv->crtc = pdp_crtc_create(dev, 0, dev_priv->plane); ++ if (IS_ERR(dev_priv->crtc)) { ++ DRM_ERROR("failed to create a CRTC\n"); ++ err = PTR_ERR(dev_priv->crtc); ++ goto err_config_cleanup; ++ } ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_APOLLO: ++ case PDP_VERSION_ODIN: ++ dev_priv->connector = pdp_dvi_connector_create(dev); ++ if (IS_ERR(dev_priv->connector)) { ++ DRM_ERROR("failed to create a connector\n"); ++ err = PTR_ERR(dev_priv->connector); ++ goto err_config_cleanup; ++ } ++ ++ dev_priv->encoder = pdp_tmds_encoder_create(dev); ++ if (IS_ERR(dev_priv->encoder)) { ++ DRM_ERROR("failed to create an encoder\n"); ++ err = PTR_ERR(dev_priv->encoder); ++ goto err_config_cleanup; ++ } ++ ++ err = drm_connector_attach_encoder(dev_priv->connector, ++ dev_priv->encoder); ++ if (err) { ++ DRM_ERROR("can't attach [ENCODER:%d:%s] to [CONNECTOR:%d:%s] (err=%d)\n", ++ dev_priv->encoder->base.id, ++ dev_priv->encoder->name, ++ dev_priv->connector->base.id, ++ dev_priv->connector->name, ++ err); ++ goto err_config_cleanup; ++ } ++ break; ++ case PDP_VERSION_PLATO: ++ // PLATO connectors are created in HDMI component driver ++ break; ++ default: ++ BUG(); ++ } ++ ++ DRM_DEBUG_DRIVER("initialised\n"); ++ ++ return 0; ++ ++err_config_cleanup: ++ drm_mode_config_cleanup(dev); ++ ++ return err; ++} ++ ++static inline int pdp_modeset_init_fbdev(struct pdp_drm_private *dev_priv) ++{ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++ struct pdp_fbdev *fbdev; ++ int err; ++ ++ fbdev = pdp_fbdev_create(dev_priv); ++ if (IS_ERR(fbdev)) { ++ DRM_ERROR("failed to create a fb device"); ++ return PTR_ERR(fbdev); ++ } ++ dev_priv->fbdev = fbdev; ++ ++ /* ++ * pdpdrmfb is registered and available for userspace to use. If this ++ * is the only or primary device, fbcon has already bound a tty to it, ++ * and the following call will take no effect. However, this may be ++ * essential in order to sync the display when fbcon was already bound ++ * to a different tty (and fbdev). This triggers ->set_config() which ++ * will in turn set up a config and then do a modeset. ++ */ ++ err = drm_fb_helper_restore_fbdev_mode_unlocked(&dev_priv->fbdev->helper); ++ if (err) { ++ DRM_ERROR("failed to set mode (err=%d)\n", err); ++ return err; ++ } ++#endif ++ return 0; ++} ++ ++int pdp_modeset_late_init(struct pdp_drm_private *dev_priv) ++{ ++ struct drm_device *ddev = dev_priv->dev; ++ int err; ++ ++ drm_mode_config_reset(ddev); ++ ++ err = pdp_modeset_init_fbdev(dev_priv); ++ if (err) ++ DRM_INFO("fbdev init failure is not fatal, continue anyway.\n"); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) ++ if (dev_priv->connector != NULL) { ++ err = drm_connector_register(dev_priv->connector); ++ if (err) { ++ DRM_ERROR("[CONNECTOR:%d:%s] failed to register (err=%d)\n", ++ dev_priv->connector->base.id, ++ dev_priv->connector->name, ++ err); ++ return err; ++ } ++ } ++#endif ++ return 0; ++} ++ ++void pdp_modeset_early_cleanup(struct pdp_drm_private *dev_priv) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) ++ if (dev_priv->connector != NULL) ++ drm_connector_unregister(dev_priv->connector); ++#endif ++} ++ ++void pdp_modeset_late_cleanup(struct pdp_drm_private *dev_priv) ++{ ++#if defined(CONFIG_DRM_FBDEV_EMULATION) ++ pdp_fbdev_destroy(dev_priv->fbdev); ++#endif ++ drm_mode_config_cleanup(dev_priv->dev); ++ ++ DRM_DEBUG_DRIVER("cleaned up\n"); ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c +new file mode 100644 +index 000000000000..2e683425d579 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_plane.c +@@ -0,0 +1,323 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "drm_pdp_drv.h" ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) ++#include ++#endif ++ ++#include ++ ++#if defined(PDP_USE_ATOMIC) ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) ++#include ++#else ++#include ++#endif ++#endif ++ ++#include ++ ++#include "drm_pdp_gem.h" ++#include "pdp_apollo.h" ++#include "pdp_odin.h" ++#include "pdp_plato.h" ++#include "pfim_defs.h" ++ ++#include "kernel_compatibility.h" ++ ++ ++#if defined(PDP_USE_ATOMIC) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) ++static int pdp_plane_helper_atomic_check(struct drm_plane *plane, ++ struct drm_atomic_state *atomic_state) ++#else ++static int pdp_plane_helper_atomic_check(struct drm_plane *plane, ++ struct drm_plane_state *state) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) ++ struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, ++ plane); ++#endif ++ struct drm_crtc_state *crtc_new_state; ++ ++ if (!state->crtc) ++ return 0; ++ ++ crtc_new_state = drm_atomic_get_new_crtc_state(state->state, ++ state->crtc); ++ ++ return drm_atomic_helper_check_plane_state(state, crtc_new_state, ++ DRM_PLANE_HELPER_NO_SCALING, ++ DRM_PLANE_HELPER_NO_SCALING, ++ false, true); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) ++static void pdp_plane_helper_atomic_update(struct drm_plane *plane, ++ struct drm_atomic_state *atomic_state) ++#else ++static void pdp_plane_helper_atomic_update(struct drm_plane *plane, ++ struct drm_plane_state *old_state) ++#endif ++{ ++ struct drm_plane_state *plane_state = plane->state; ++ struct drm_framebuffer *fb = plane_state->fb; ++ ++ if (fb) { ++ pdp_plane_set_surface(plane_state->crtc, plane, fb, ++ plane_state->src_x, plane_state->src_y); ++ } ++} ++ ++static const struct drm_plane_helper_funcs pdp_plane_helper_funcs = { ++ .prepare_fb = drm_gem_plane_helper_prepare_fb, ++ .atomic_check = pdp_plane_helper_atomic_check, ++ .atomic_update = pdp_plane_helper_atomic_update, ++}; ++ ++static const struct drm_plane_funcs pdp_plane_funcs = { ++ .update_plane = drm_atomic_helper_update_plane, ++ .disable_plane = drm_atomic_helper_disable_plane, ++ .destroy = drm_primary_helper_destroy, ++ .reset = drm_atomic_helper_plane_reset, ++ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, ++}; ++#else ++#define pdp_plane_funcs drm_primary_helper_funcs ++#endif ++ ++struct drm_plane *pdp_plane_create(struct drm_device *dev, ++ enum drm_plane_type type) ++{ ++ struct pdp_drm_private *dev_priv = dev->dev_private; ++ struct drm_plane *plane; ++ const uint32_t *supported_formats; ++ uint32_t num_supported_formats; ++ const uint32_t apollo_plato_formats[] = { ++ DRM_FORMAT_XRGB8888, ++ DRM_FORMAT_ARGB8888, ++ }; ++ const uint32_t odin_formats[] = { ++ DRM_FORMAT_XRGB8888, ++ DRM_FORMAT_ARGB8888, ++ DRM_FORMAT_RGB565, ++ }; ++ int err; ++ ++ switch (dev_priv->version) { ++ case PDP_VERSION_ODIN: ++ supported_formats = odin_formats; ++ num_supported_formats = ARRAY_SIZE(odin_formats); ++ break; ++ case PDP_VERSION_APOLLO: ++ case PDP_VERSION_PLATO: ++ supported_formats = apollo_plato_formats; ++ num_supported_formats = ARRAY_SIZE(apollo_plato_formats); ++ break; ++ default: ++ DRM_ERROR("Unsupported PDP version\n"); ++ err = -EINVAL; ++ goto err_exit; ++ } ++ ++ plane = kzalloc(sizeof(*plane), GFP_KERNEL); ++ if (!plane) { ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ ++ err = drm_universal_plane_init(dev, plane, 0, &pdp_plane_funcs, ++ supported_formats, ++ num_supported_formats, ++ NULL, type, NULL); ++ if (err) ++ goto err_plane_free; ++ ++#if defined(PDP_USE_ATOMIC) ++ drm_plane_helper_add(plane, &pdp_plane_helper_funcs); ++#endif ++ ++ DRM_DEBUG_DRIVER("[PLANE:%d]\n", plane->base.id); ++ ++ return plane; ++ ++err_plane_free: ++ kfree(plane); ++err_exit: ++ return ERR_PTR(err); ++} ++ ++void pdp_plane_set_surface(struct drm_crtc *crtc, struct drm_plane *plane, ++ struct drm_framebuffer *fb, ++ const uint32_t src_x, const uint32_t src_y) ++{ ++ struct pdp_drm_private *dev_priv = plane->dev->dev_private; ++ struct pdp_crtc *pdp_crtc = to_pdp_crtc(crtc); ++ struct pdp_framebuffer *pdp_fb = to_pdp_framebuffer(fb); ++ unsigned int pitch = fb->pitches[0]; ++ uint64_t address = pdp_gem_get_dev_addr(pdp_fb->obj[0]); ++ uint64_t modifier = 0; ++ uint32_t format; ++ uint32_t fbc_mode; ++ ++ /* ++ * User space specifies 'x' and 'y' and this is used to tell the display ++ * to scan out from part way through a buffer. ++ */ ++ address += ((src_y * pitch) + (src_x * (pdp_drm_fb_cpp(fb)))); ++ ++ /* ++ * NOTE: If the buffer dimensions are less than the current mode then ++ * the output will appear in the top left of the screen. This can be ++ * centered by adjusting horizontal active start, right border start, ++ * vertical active start and bottom border start. At this point it's ++ * not entirely clear where this should be done. On the one hand it's ++ * related to pdp_crtc_helper_mode_set but on the other hand there ++ * might not always be a call to pdp_crtc_helper_mode_set. This needs ++ * to be investigated. ++ */ ++ switch (dev_priv->version) { ++ case PDP_VERSION_APOLLO: ++ switch (pdp_drm_fb_format(fb)) { ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_XRGB8888: ++ format = 0xE; ++ break; ++ default: ++ DRM_ERROR("unsupported pixel format (format = %d)\n", ++ pdp_drm_fb_format(fb)); ++ return; ++ } ++ ++ pdp_apollo_set_surface(plane->dev->dev, ++ pdp_crtc->pdp_reg, ++ 0, ++ address, ++ 0, 0, ++ fb->width, fb->height, pitch, ++ format, ++ 255, ++ false); ++ break; ++ case PDP_VERSION_ODIN: ++ switch (pdp_drm_fb_format(fb)) { ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_XRGB8888: ++ format = ODN_PDP_SURF_PIXFMT_ARGB8888; ++ break; ++ case DRM_FORMAT_RGB565: ++ format = ODN_PDP_SURF_PIXFMT_RGB565; ++ break; ++ default: ++ DRM_ERROR("unsupported pixel format (format = %d)\n", ++ pdp_drm_fb_format(fb)); ++ return; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ modifier = fb->modifier; ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ modifier = fb->modifier[0]; ++#endif ++ ++ switch (modifier) { ++ case DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12: ++ fbc_mode = ODIN_PFIM_FBCDC_8X8_V12; ++ break; ++ case DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12: ++ fbc_mode = ODIN_PFIM_FBCDC_16X4_V12; ++ break; ++ case DRM_FORMAT_MOD_LINEAR: ++ fbc_mode = ODIN_PFIM_MOD_LINEAR; ++ break; ++ default: ++ DRM_ERROR("unsupported fbc format (format = %llu)\n", ++ modifier); ++ return; ++ } ++ ++ pdp_odin_set_surface(plane->dev->dev, ++ pdp_crtc->pdp_reg, ++ 0, ++ address, fb->offsets[0], ++ 0, 0, ++ fb->width, fb->height, pitch, ++ format, ++ 255, ++ false, ++ pdp_crtc->pfim_reg, fbc_mode); ++ break; ++ case PDP_VERSION_PLATO: ++ switch (pdp_drm_fb_format(fb)) { ++ case DRM_FORMAT_ARGB8888: ++ case DRM_FORMAT_XRGB8888: ++ format = PLATO_PDP_PIXEL_FORMAT_ARGB8; ++ break; ++ default: ++ DRM_ERROR("unsupported pixel format (format = %d)\n", ++ pdp_drm_fb_format(fb)); ++ return; ++ } ++ ++ pdp_plato_set_surface(crtc->dev->dev, ++ pdp_crtc->pdp_reg, ++ pdp_crtc->pdp_bif_reg, ++ 0, ++ address, ++ 0, 0, ++ fb->width, fb->height, pitch, ++ format, ++ 255, ++ false); ++ break; ++ default: ++ BUG(); ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c +new file mode 100644 +index 000000000000..baaa9363866e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/drm_pdp_tmds.c +@@ -0,0 +1,143 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) ++#include ++#endif ++ ++#include ++ ++#include "drm_pdp_drv.h" ++ ++#include "kernel_compatibility.h" ++ ++static void pdp_tmds_encoder_helper_dpms(struct drm_encoder *encoder, int mode) ++{ ++} ++ ++static bool ++pdp_tmds_encoder_helper_mode_fixup(struct drm_encoder *encoder, ++ const struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ return true; ++} ++ ++static void pdp_tmds_encoder_helper_prepare(struct drm_encoder *encoder) ++{ ++} ++ ++static void pdp_tmds_encoder_helper_commit(struct drm_encoder *encoder) ++{ ++} ++ ++static void ++pdp_tmds_encoder_helper_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++} ++ ++static void pdp_tmds_encoder_destroy(struct drm_encoder *encoder) ++{ ++ struct pdp_drm_private *dev_priv = encoder->dev->dev_private; ++ ++ DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", ++ encoder->base.id, ++ encoder->name); ++ ++ drm_encoder_cleanup(encoder); ++ ++ kfree(encoder); ++ dev_priv->encoder = NULL; ++} ++ ++static const struct drm_encoder_helper_funcs pdp_tmds_encoder_helper_funcs = { ++ .dpms = pdp_tmds_encoder_helper_dpms, ++ .mode_fixup = pdp_tmds_encoder_helper_mode_fixup, ++ .prepare = pdp_tmds_encoder_helper_prepare, ++ .commit = pdp_tmds_encoder_helper_commit, ++ .mode_set = pdp_tmds_encoder_helper_mode_set, ++ .detect = NULL, ++ .disable = NULL, ++}; ++ ++static const struct drm_encoder_funcs pdp_tmds_encoder_funcs = { ++ .reset = NULL, ++ .destroy = pdp_tmds_encoder_destroy, ++}; ++ ++struct drm_encoder * ++pdp_tmds_encoder_create(struct drm_device *dev) ++{ ++ struct drm_encoder *encoder; ++ int err; ++ ++ encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); ++ if (!encoder) ++ return ERR_PTR(-ENOMEM); ++ ++ err = drm_encoder_init(dev, ++ encoder, ++ &pdp_tmds_encoder_funcs, ++ DRM_MODE_ENCODER_TMDS, ++ NULL); ++ if (err) { ++ DRM_ERROR("Failed to initialise encoder"); ++ return ERR_PTR(err); ++ } ++ drm_encoder_helper_add(encoder, &pdp_tmds_encoder_helper_funcs); ++ ++ /* ++ * This is a bit field that's used to determine which ++ * CRTCs can drive this encoder. ++ */ ++ encoder->possible_crtcs = 0x1; ++ ++ DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", ++ encoder->base.id, ++ encoder->name); ++ ++ return encoder; ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/odin_defs.h b/drivers/gpu/drm/img-rogue/apollo/odin_defs.h +new file mode 100644 +index 000000000000..6234887a1bfd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/odin_defs.h +@@ -0,0 +1,326 @@ ++/**************************************************************************** ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin Memory Map - View from PCIe ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++****************************************************************************/ ++ ++#ifndef _ODIN_DEFS_H_ ++#define _ODIN_DEFS_H_ ++ ++/* These defines have not been autogenerated */ ++ ++#define PCI_VENDOR_ID_ODIN (0x1AEE) ++#define DEVICE_ID_ODIN (0x1010) ++#define DEVICE_ID_TBA (0x1CF2) ++ ++/* PCI BAR 0 contains the PDP regs and the Odin system regs */ ++#define ODN_SYS_BAR 0 ++#define ODN_SYS_REGION_SIZE 0x000800000 /* 8MB */ ++ ++#define ODN_SYS_REGS_OFFSET 0 ++#define ODN_SYS_REGS_SIZE 0x000400000 /* 4MB */ ++ ++#define ODN_PDP_REGS_OFFSET 0x000440000 ++#define ODN_PDP_REGS_SIZE 0x000040000 /* 256k */ ++ ++#define ODN_PDP2_REGS_OFFSET 0x000480000 ++#define ODN_PDP2_REGS_SIZE 0x000040000 /* 256k */ ++ ++#define ODN_PDP2_PFIM_OFFSET 0x000500000 ++#define ODN_PDP2_PFIM_SIZE 0x000040000 /* 256k */ ++ ++#define ODIN_DMA_REGS_OFFSET 0x0004C0000 ++#define ODIN_DMA_REGS_SIZE 0x000040000 /* 256k */ ++ ++#define ODIN_DMA_CHAN_REGS_SIZE 0x000001000 /* 4k */ ++ ++/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */ ++#define ODN_DUT_SOCIF_BAR 2 ++#define ODN_DUT_SOCIF_OFFSET 0x000000000 ++#define ODN_DUT_SOCIF_SIZE 0x004000000 /* 64MB */ ++ ++/* PCI BAR 4 contains the on-board 1GB DDR memory */ ++#define ODN_DDR_BAR 4 ++#define ODN_DDR_MEM_OFFSET 0x000000000 ++#define ODN_DDR_MEM_SIZE 0x040000000 /* 1GB */ ++ ++/* Odin system register banks */ ++#define ODN_REG_BANK_CORE 0x00000 ++#define ODN_REG_BANK_TCF_SPI_MASTER 0x02000 ++#define ODN_REG_BANK_ODN_CLK_BLK 0x0A000 ++#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR 0x0C000 ++#define ODN_REG_BANK_DB_TYPE_ID 0x0C200 ++#define ODN_REG_BANK_DB_TYPE_ID_TYPE_TCFVUOCTA 0x000000C6U ++#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK 0x000000C0U ++#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT 0x6 ++#define ODN_REG_BANK_ODN_I2C 0x0E000 ++#define ODN_REG_BANK_MULTI_CLK_ALIGN 0x20000 ++#define ODN_REG_BANK_ALIGN_DATA_TX 0x22000 ++#define ODN_REG_BANK_SAI_RX_DDR_0 0x24000 ++#define ODN_REG_BANK_SAI_RX_DDR(n) (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n)) ++#define ODN_REG_BANK_SAI_TX_DDR_0 0x3A000 ++#define ODN_REG_BANK_SAI_TX_DDR(n) (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n)) ++#define ODN_REG_BANK_SAI_TX_SDR 0x4E000 ++ ++/* Odin SPI regs */ ++#define ODN_SPI_MST_ADDR_RDNWR 0x0000 ++#define ODN_SPI_MST_WDATA 0x0004 ++#define ODN_SPI_MST_RDATA 0x0008 ++#define ODN_SPI_MST_STATUS 0x000C ++#define ODN_SPI_MST_GO 0x0010 ++ ++ ++/* ++ Odin CLK regs - the odn_clk_blk module defs are not auto generated ++ */ ++#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 ++#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 ++#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 ++#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define ODN_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U ++#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT 7 ++ ++#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C ++ ++#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 ++#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 ++#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C ++#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define ODN_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U ++#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT 7 ++ ++#define ODN_PDP_P_CLK_MULTIPLIER_REG1 0x650 ++#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 ++#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 ++ ++#define ODN_PDP_P_CLK_MULTIPLIER_REG2 0x654 ++#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U ++#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 ++#define ODN_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U ++#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT 7 ++ ++#define ODN_PDP_P_CLK_MULTIPLIER_REG3 0x64C ++ ++#define ODN_PDP_P_CLK_IN_DIVIDER_REG 0x658 ++#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 ++#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 ++#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U ++#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 ++#define ODN_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U ++#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT 13 ++ ++/* ++ * DUT core clock input divider, multiplier and out divider. ++ */ ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1 (0x0028) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2 (0x002C) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_CORE_CLK_MULTIPLIER1 (0x0050) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_CORE_CLK_MULTIPLIER2 (0x0054) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT (12) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT (7) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1 (0x0058) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) ++ ++/* ++ * DUT interface clock input divider, multiplier and out divider. ++ */ ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1 (0x0220) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2 (0x0224) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1 (0x0250) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2 (0x0254) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT (12) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT (7) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1 (0x0258) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) ++ ++ ++/* ++ * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2 ++ * All in Hz ++ */ ++#define ODN_INPUT_CLOCK_SPEED (100000000U) ++#define ODN_INPUT_CLOCK_SPEED_MIN (10000000U) ++#define ODN_INPUT_CLOCK_SPEED_MAX (933000000U) ++#define ODN_OUTPUT_CLOCK_SPEED_MIN (4690000U) ++#define ODN_OUTPUT_CLOCK_SPEED_MAX (933000000U) ++#define ODN_VCO_MIN (600000000U) ++#define ODN_VCO_MAX (1440000000U) ++#define ODN_PFD_MIN (10000000U) ++#define ODN_PFD_MAX (500000000U) ++ ++/* ++ * Max values that can be set in DRP registers ++ */ ++#define ODN_OREG_VALUE_MAX (126.875f) ++#define ODN_MREG_VALUE_MAX (126.875f) ++#define ODN_DREG_VALUE_MAX (126U) ++ ++ ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE (0x00000001U) ++#define ODN_MMCM_LOCK_STATUS_DUT_IF (0x00000002U) ++#define ODN_MMCM_LOCK_STATUS_PDPP (0x00000008U) ++ ++/* ++ Odin interrupt flags ++*/ ++#define ODN_INTERRUPT_ENABLE_PDP1 (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT) ++#define ODN_INTERRUPT_ENABLE_PDP2 (1 << ODN_INTERRUPT_ENABLE_PDP2_SHIFT) ++#define ODN_INTERRUPT_ENABLE_DUT (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT) ++#define ODN_INTERRUPT_STATUS_PDP1 (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT) ++#define ODN_INTERRUPT_STATUS_PDP2 (1 << ODN_INTERRUPT_STATUS_PDP2_SHIFT) ++#define ODN_INTERRUPT_STATUS_DUT (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT) ++#define ODN_INTERRUPT_CLEAR_PDP1 (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT) ++#define ODN_INTERRUPT_CLEAR_PDP2 (1 << ODN_INTERRUPT_CLR_PDP2_SHIFT) ++#define ODN_INTERRUPT_CLEAR_DUT (1 << ODN_INTERRUPT_CLR_DUT_SHIFT) ++ ++#define ODN_INTERRUPT_ENABLE_CDMA (1 << ODN_INTERRUPT_ENABLE_CDMA_SHIFT) ++#define ODN_INTERRUPT_STATUS_CDMA (1 << ODN_INTERRUPT_STATUS_CDMA_SHIFT) ++#define ODN_INTERRUPT_CLEAR_CDMA (1 << ODN_INTERRUPT_CLR_CDMA_SHIFT) ++ ++#define ODN_INTERRUPT_ENABLE_CDMA2 (1 << (ODN_INTERRUPT_ENABLE_CDMA_SHIFT + 1)) ++#define ODN_INTERRUPT_STATUS_CDMA2 (1 << (ODN_INTERRUPT_STATUS_CDMA_SHIFT + 1)) ++#define ODN_INTERRUPT_CLEAR_CDMA2 (1 << (ODN_INTERRUPT_CLR_CDMA_SHIFT + 1)) ++ ++/* ++ Other defines ++*/ ++#define ODN_STREAM_OFF 0 ++#define ODN_STREAM_ON 1 ++#define ODN_SYNC_GEN_DISABLE 0 ++#define ODN_SYNC_GEN_ENABLE 1 ++#define ODN_INTERLACE_DISABLE 0 ++#define ODN_INTERLACE_ENABLE 1 ++#define ODN_PIXEL_CLOCK_INVERTED 1 ++#define ODN_HSYNC_POLARITY_ACTIVE_HIGH 1 ++ ++#define ODN_PDP_INTCLR_ALL 0x000FFFFFU ++#define ODN_PDP_INTSTAT_ALL_OURUN_MASK 0x000FFFF0U ++ ++/* ++ DMA defs ++*/ ++#define ODN_CDMA_ADDR_WIDTH 35 ++#define ODN_DMA_HW_DESC_HEAP_SIZE 0x100000 ++#define ODN_DMA_CHAN_RX 0 ++#define ODN_DMA_CHAN_TX 1 ++ ++#define ODIN_DMA_TX_CHAN_NAME "tx" ++#define ODIN_DMA_RX_CHAN_NAME "rx" ++ ++/* ++ FBC defs ++*/ ++#define ODIN_PFIM_RELNUM (005U) ++ ++#endif /* _ODIN_DEFS_H_ */ ++ ++/***************************************************************************** ++ End of file (odn_defs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h b/drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h +new file mode 100644 +index 000000000000..da47a253db31 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/odin_pdp_regs.h +@@ -0,0 +1,8540 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* tab size 4 */ ++ ++#ifndef ODN_PDP_REGS_H ++#define ODN_PDP_REGS_H ++ ++/* Odin-PDP hardware register definitions */ ++ ++ ++#define ODN_PDP_GRPH1SURF_OFFSET (0x0000) ++ ++/* PDP, GRPH1SURF, GRPH1PIXFMT ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USEGAMMA ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USECSC ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USELUT ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2SURF_OFFSET (0x0004) ++ ++/* PDP, GRPH2SURF, GRPH2PIXFMT ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USEGAMMA ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USECSC ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USELUT ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3SURF_OFFSET (0x0008) ++ ++/* PDP, GRPH3SURF, GRPH3PIXFMT ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USEGAMMA ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USECSC ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USELUT ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4SURF_OFFSET (0x000C) ++ ++/* PDP, GRPH4SURF, GRPH4PIXFMT ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USEGAMMA ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USECSC ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USELUT ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SURF_OFFSET (0x0010) ++ ++/* PDP, VID1SURF, VID1PIXFMT ++*/ ++#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT (27) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH (5) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEGAMMA ++*/ ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USECSC ++*/ ++#define ODN_PDP_VID1SURF_VID1USECSC_MASK (0x02000000) ++#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT (25) ++#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEI2P ++*/ ++#define ODN_PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) ++#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT (24) ++#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1COSITED ++*/ ++#define ODN_PDP_VID1SURF_VID1COSITED_MASK (0x00800000) ++#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT (23) ++#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEHQCD ++*/ ++#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT (22) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEINSTREAM ++*/ ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SURF_OFFSET (0x0014) ++ ++/* PDP, VID2SURF, VID2PIXFMT ++*/ ++#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT (27) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH (5) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2COSITED ++*/ ++#define ODN_PDP_VID2SURF_VID2COSITED_MASK (0x00800000) ++#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT (23) ++#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH (1) ++#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2USEGAMMA ++*/ ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2USECSC ++*/ ++#define ODN_PDP_VID2SURF_VID2USECSC_MASK (0x02000000) ++#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT (25) ++#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH (1) ++#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SURF_OFFSET (0x0018) ++ ++/* PDP, VID3SURF, VID3PIXFMT ++*/ ++#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT (27) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH (5) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3COSITED ++*/ ++#define ODN_PDP_VID3SURF_VID3COSITED_MASK (0x00800000) ++#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT (23) ++#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH (1) ++#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3USEGAMMA ++*/ ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3USECSC ++*/ ++#define ODN_PDP_VID3SURF_VID3USECSC_MASK (0x02000000) ++#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT (25) ++#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH (1) ++#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SURF_OFFSET (0x001C) ++ ++/* PDP, VID4SURF, VID4PIXFMT ++*/ ++#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT (27) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH (5) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4COSITED ++*/ ++#define ODN_PDP_VID4SURF_VID4COSITED_MASK (0x00800000) ++#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT (23) ++#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH (1) ++#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4USEGAMMA ++*/ ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4USECSC ++*/ ++#define ODN_PDP_VID4SURF_VID4USECSC_MASK (0x02000000) ++#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT (25) ++#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH (1) ++#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1CTRL_OFFSET (0x0020) ++ ++/* PDP, GRPH1CTRL, GRPH1STREN ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1CKEYEN ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1CKEYSRC ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1BLEND ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1BLENDPOS ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1DITHEREN ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2CTRL_OFFSET (0x0024) ++ ++/* PDP, GRPH2CTRL, GRPH2STREN ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2CKEYEN ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2CKEYSRC ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2BLEND ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2BLENDPOS ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2DITHEREN ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3CTRL_OFFSET (0x0028) ++ ++/* PDP, GRPH3CTRL, GRPH3STREN ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3CKEYEN ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3CKEYSRC ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3BLEND ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3BLENDPOS ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3DITHEREN ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4CTRL_OFFSET (0x002C) ++ ++/* PDP, GRPH4CTRL, GRPH4STREN ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4CKEYEN ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4CKEYSRC ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4BLEND ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4BLENDPOS ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4DITHEREN ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1CTRL_OFFSET (0x0030) ++ ++/* PDP, VID1CTRL, VID1STREN ++*/ ++#define ODN_PDP_VID1CTRL_VID1STREN_MASK (0x80000000) ++#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT (31) ++#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1CKEYEN ++*/ ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1CKEYSRC ++*/ ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1BLEND ++*/ ++#define ODN_PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) ++#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT (27) ++#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH (2) ++#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1BLENDPOS ++*/ ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1DITHEREN ++*/ ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2CTRL_OFFSET (0x0034) ++ ++/* PDP, VID2CTRL, VID2STREN ++*/ ++#define ODN_PDP_VID2CTRL_VID2STREN_MASK (0x80000000) ++#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT (31) ++#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2CKEYEN ++*/ ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2CKEYSRC ++*/ ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2BLEND ++*/ ++#define ODN_PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) ++#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT (27) ++#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH (2) ++#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2BLENDPOS ++*/ ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2DITHEREN ++*/ ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3CTRL_OFFSET (0x0038) ++ ++/* PDP, VID3CTRL, VID3STREN ++*/ ++#define ODN_PDP_VID3CTRL_VID3STREN_MASK (0x80000000) ++#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT (31) ++#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3CKEYEN ++*/ ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3CKEYSRC ++*/ ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3BLEND ++*/ ++#define ODN_PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) ++#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT (27) ++#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH (2) ++#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3BLENDPOS ++*/ ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3DITHEREN ++*/ ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4CTRL_OFFSET (0x003C) ++ ++/* PDP, VID4CTRL, VID4STREN ++*/ ++#define ODN_PDP_VID4CTRL_VID4STREN_MASK (0x80000000) ++#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT (31) ++#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4CKEYEN ++*/ ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4CKEYSRC ++*/ ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4BLEND ++*/ ++#define ODN_PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) ++#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT (27) ++#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH (2) ++#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4BLENDPOS ++*/ ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4DITHEREN ++*/ ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1UCTRL_OFFSET (0x0050) ++ ++/* PDP, VID1UCTRL, VID1UVHALFSTR ++*/ ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2UCTRL_OFFSET (0x0054) ++ ++/* PDP, VID2UCTRL, VID2UVHALFSTR ++*/ ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3UCTRL_OFFSET (0x0058) ++ ++/* PDP, VID3UCTRL, VID3UVHALFSTR ++*/ ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4UCTRL_OFFSET (0x005C) ++ ++/* PDP, VID4UCTRL, VID4UVHALFSTR ++*/ ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1STRIDE_OFFSET (0x0060) ++ ++/* PDP, GRPH1STRIDE, GRPH1STRIDE ++*/ ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2STRIDE_OFFSET (0x0064) ++ ++/* PDP, GRPH2STRIDE, GRPH2STRIDE ++*/ ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3STRIDE_OFFSET (0x0068) ++ ++/* PDP, GRPH3STRIDE, GRPH3STRIDE ++*/ ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4STRIDE_OFFSET (0x006C) ++ ++/* PDP, GRPH4STRIDE, GRPH4STRIDE ++*/ ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1STRIDE_OFFSET (0x0070) ++ ++/* PDP, VID1STRIDE, VID1STRIDE ++*/ ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2STRIDE_OFFSET (0x0074) ++ ++/* PDP, VID2STRIDE, VID2STRIDE ++*/ ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3STRIDE_OFFSET (0x0078) ++ ++/* PDP, VID3STRIDE, VID3STRIDE ++*/ ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4STRIDE_OFFSET (0x007C) ++ ++/* PDP, VID4STRIDE, VID4STRIDE ++*/ ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1SIZE_OFFSET (0x0080) ++ ++/* PDP, GRPH1SIZE, GRPH1WIDTH ++*/ ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SIZE, GRPH1HEIGHT ++*/ ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2SIZE_OFFSET (0x0084) ++ ++/* PDP, GRPH2SIZE, GRPH2WIDTH ++*/ ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SIZE, GRPH2HEIGHT ++*/ ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3SIZE_OFFSET (0x0088) ++ ++/* PDP, GRPH3SIZE, GRPH3WIDTH ++*/ ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SIZE, GRPH3HEIGHT ++*/ ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4SIZE_OFFSET (0x008C) ++ ++/* PDP, GRPH4SIZE, GRPH4WIDTH ++*/ ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SIZE, GRPH4HEIGHT ++*/ ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SIZE_OFFSET (0x0090) ++ ++/* PDP, VID1SIZE, VID1WIDTH ++*/ ++#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT (16) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH (12) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SIZE, VID1HEIGHT ++*/ ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SIZE_OFFSET (0x0094) ++ ++/* PDP, VID2SIZE, VID2WIDTH ++*/ ++#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT (16) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH (12) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SIZE, VID2HEIGHT ++*/ ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SIZE_OFFSET (0x0098) ++ ++/* PDP, VID3SIZE, VID3WIDTH ++*/ ++#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT (16) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH (12) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SIZE, VID3HEIGHT ++*/ ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SIZE_OFFSET (0x009C) ++ ++/* PDP, VID4SIZE, VID4WIDTH ++*/ ++#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT (16) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH (12) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SIZE, VID4HEIGHT ++*/ ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1POSN_OFFSET (0x00A0) ++ ++/* PDP, GRPH1POSN, GRPH1XSTART ++*/ ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1POSN, GRPH1YSTART ++*/ ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2POSN_OFFSET (0x00A4) ++ ++/* PDP, GRPH2POSN, GRPH2XSTART ++*/ ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2POSN, GRPH2YSTART ++*/ ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3POSN_OFFSET (0x00A8) ++ ++/* PDP, GRPH3POSN, GRPH3XSTART ++*/ ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3POSN, GRPH3YSTART ++*/ ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4POSN_OFFSET (0x00AC) ++ ++/* PDP, GRPH4POSN, GRPH4XSTART ++*/ ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4POSN, GRPH4YSTART ++*/ ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1POSN_OFFSET (0x00B0) ++ ++/* PDP, VID1POSN, VID1XSTART ++*/ ++#define ODN_PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT (16) ++#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH (12) ++#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1POSN, VID1YSTART ++*/ ++#define ODN_PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT (0) ++#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH (12) ++#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2POSN_OFFSET (0x00B4) ++ ++/* PDP, VID2POSN, VID2XSTART ++*/ ++#define ODN_PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT (16) ++#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH (12) ++#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2POSN, VID2YSTART ++*/ ++#define ODN_PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT (0) ++#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH (12) ++#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3POSN_OFFSET (0x00B8) ++ ++/* PDP, VID3POSN, VID3XSTART ++*/ ++#define ODN_PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT (16) ++#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH (12) ++#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3POSN, VID3YSTART ++*/ ++#define ODN_PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT (0) ++#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH (12) ++#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4POSN_OFFSET (0x00BC) ++ ++/* PDP, VID4POSN, VID4XSTART ++*/ ++#define ODN_PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT (16) ++#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH (12) ++#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4POSN, VID4YSTART ++*/ ++#define ODN_PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT (0) ++#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH (12) ++#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1GALPHA_OFFSET (0x00C0) ++ ++/* PDP, GRPH1GALPHA, GRPH1GALPHA ++*/ ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2GALPHA_OFFSET (0x00C4) ++ ++/* PDP, GRPH2GALPHA, GRPH2GALPHA ++*/ ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3GALPHA_OFFSET (0x00C8) ++ ++/* PDP, GRPH3GALPHA, GRPH3GALPHA ++*/ ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4GALPHA_OFFSET (0x00CC) ++ ++/* PDP, GRPH4GALPHA, GRPH4GALPHA ++*/ ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1GALPHA_OFFSET (0x00D0) ++ ++/* PDP, VID1GALPHA, VID1GALPHA ++*/ ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2GALPHA_OFFSET (0x00D4) ++ ++/* PDP, VID2GALPHA, VID2GALPHA ++*/ ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3GALPHA_OFFSET (0x00D8) ++ ++/* PDP, VID3GALPHA, VID3GALPHA ++*/ ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4GALPHA_OFFSET (0x00DC) ++ ++/* PDP, VID4GALPHA, VID4GALPHA ++*/ ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1CKEY_R_OFFSET (0x00E0) ++ ++/* PDP, GRPH1CKEY_R, GRPH1CKEY_R ++*/ ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1CKEY_GB_OFFSET (0x00E4) ++ ++/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G ++*/ ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B ++*/ ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2CKEY_R_OFFSET (0x00E8) ++ ++/* PDP, GRPH2CKEY_R, GRPH2CKEY_R ++*/ ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2CKEY_GB_OFFSET (0x00EC) ++ ++/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G ++*/ ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B ++*/ ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3CKEY_R_OFFSET (0x00F0) ++ ++/* PDP, GRPH3CKEY_R, GRPH3CKEY_R ++*/ ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3CKEY_GB_OFFSET (0x00F4) ++ ++/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G ++*/ ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B ++*/ ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4CKEY_R_OFFSET (0x00F8) ++ ++/* PDP, GRPH4CKEY_R, GRPH4CKEY_R ++*/ ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4CKEY_GB_OFFSET (0x00FC) ++ ++/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G ++*/ ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B ++*/ ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1CKEY_R_OFFSET (0x0100) ++ ++/* PDP, VID1CKEY_R, VID1CKEY_R ++*/ ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1CKEY_GB_OFFSET (0x0104) ++ ++/* PDP, VID1CKEY_GB, VID1CKEY_G ++*/ ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CKEY_GB, VID1CKEY_B ++*/ ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2CKEY_R_OFFSET (0x0108) ++ ++/* PDP, VID2CKEY_R, VID2CKEY_R ++*/ ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2CKEY_GB_OFFSET (0x010C) ++ ++/* PDP, VID2CKEY_GB, VID2CKEY_G ++*/ ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CKEY_GB, VID2CKEY_B ++*/ ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3CKEY_R_OFFSET (0x0110) ++ ++/* PDP, VID3CKEY_R, VID3CKEY_R ++*/ ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3CKEY_GB_OFFSET (0x0114) ++ ++/* PDP, VID3CKEY_GB, VID3CKEY_G ++*/ ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CKEY_GB, VID3CKEY_B ++*/ ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4CKEY_R_OFFSET (0x0118) ++ ++/* PDP, VID4CKEY_R, VID4CKEY_R ++*/ ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4CKEY_GB_OFFSET (0x011C) ++ ++/* PDP, VID4CKEY_GB, VID4CKEY_G ++*/ ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CKEY_GB, VID4CKEY_B ++*/ ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1BLND2_R_OFFSET (0x0120) ++ ++/* PDP, GRPH1BLND2_R, GRPH1PIXDBL ++*/ ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_R, GRPH1LINDBL ++*/ ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1BLND2_GB_OFFSET (0x0124) ++ ++/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2BLND2_R_OFFSET (0x0128) ++ ++/* PDP, GRPH2BLND2_R, GRPH2PIXDBL ++*/ ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_R, GRPH2LINDBL ++*/ ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2BLND2_GB_OFFSET (0x012C) ++ ++/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3BLND2_R_OFFSET (0x0130) ++ ++/* PDP, GRPH3BLND2_R, GRPH3PIXDBL ++*/ ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_R, GRPH3LINDBL ++*/ ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3BLND2_GB_OFFSET (0x0134) ++ ++/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4BLND2_R_OFFSET (0x0138) ++ ++/* PDP, GRPH4BLND2_R, GRPH4PIXDBL ++*/ ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_R, GRPH4LINDBL ++*/ ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4BLND2_GB_OFFSET (0x013C) ++ ++/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1BLND2_R_OFFSET (0x0140) ++ ++/* PDP, VID1BLND2_R, VID1CKEYMASK_R ++*/ ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1BLND2_GB_OFFSET (0x0144) ++ ++/* PDP, VID1BLND2_GB, VID1CKEYMASK_G ++*/ ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1BLND2_GB, VID1CKEYMASK_B ++*/ ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2BLND2_R_OFFSET (0x0148) ++ ++/* PDP, VID2BLND2_R, VID2CKEYMASK_R ++*/ ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2BLND2_GB_OFFSET (0x014C) ++ ++/* PDP, VID2BLND2_GB, VID2CKEYMASK_G ++*/ ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2BLND2_GB, VID2CKEYMASK_B ++*/ ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3BLND2_R_OFFSET (0x0150) ++ ++/* PDP, VID3BLND2_R, VID3CKEYMASK_R ++*/ ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3BLND2_GB_OFFSET (0x0154) ++ ++/* PDP, VID3BLND2_GB, VID3CKEYMASK_G ++*/ ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3BLND2_GB, VID3CKEYMASK_B ++*/ ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4BLND2_R_OFFSET (0x0158) ++ ++/* PDP, VID4BLND2_R, VID4CKEYMASK_R ++*/ ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4BLND2_GB_OFFSET (0x015C) ++ ++/* PDP, VID4BLND2_GB, VID4CKEYMASK_G ++*/ ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4BLND2_GB, VID4CKEYMASK_B ++*/ ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) ++ ++/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD ++*/ ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) ++ ++/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD ++*/ ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) ++ ++/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD ++*/ ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) ++ ++/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD ++*/ ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) ++ ++/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD ++*/ ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) ++ ++/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD ++*/ ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) ++ ++/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD ++*/ ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) ++ ++/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD ++*/ ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1BASEADDR_OFFSET (0x0180) ++ ++/* PDP, GRPH1BASEADDR, GRPH1BASEADDR ++*/ ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2BASEADDR_OFFSET (0x0184) ++ ++/* PDP, GRPH2BASEADDR, GRPH2BASEADDR ++*/ ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3BASEADDR_OFFSET (0x0188) ++ ++/* PDP, GRPH3BASEADDR, GRPH3BASEADDR ++*/ ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4BASEADDR_OFFSET (0x018C) ++ ++/* PDP, GRPH4BASEADDR, GRPH4BASEADDR ++*/ ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1BASEADDR_OFFSET (0x0190) ++ ++/* PDP, VID1BASEADDR, VID1BASEADDR ++*/ ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2BASEADDR_OFFSET (0x0194) ++ ++/* PDP, VID2BASEADDR, VID2BASEADDR ++*/ ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3BASEADDR_OFFSET (0x0198) ++ ++/* PDP, VID3BASEADDR, VID3BASEADDR ++*/ ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4BASEADDR_OFFSET (0x019C) ++ ++/* PDP, VID4BASEADDR, VID4BASEADDR ++*/ ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1UBASEADDR_OFFSET (0x01B0) ++ ++/* PDP, VID1UBASEADDR, VID1UBASEADDR ++*/ ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2UBASEADDR_OFFSET (0x01B4) ++ ++/* PDP, VID2UBASEADDR, VID2UBASEADDR ++*/ ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3UBASEADDR_OFFSET (0x01B8) ++ ++/* PDP, VID3UBASEADDR, VID3UBASEADDR ++*/ ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4UBASEADDR_OFFSET (0x01BC) ++ ++/* PDP, VID4UBASEADDR, VID4UBASEADDR ++*/ ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VBASEADDR_OFFSET (0x01D0) ++ ++/* PDP, VID1VBASEADDR, VID1VBASEADDR ++*/ ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VBASEADDR_OFFSET (0x01D4) ++ ++/* PDP, VID2VBASEADDR, VID2VBASEADDR ++*/ ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VBASEADDR_OFFSET (0x01D8) ++ ++/* PDP, VID3VBASEADDR, VID3VBASEADDR ++*/ ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VBASEADDR_OFFSET (0x01DC) ++ ++/* PDP, VID4VBASEADDR, VID4VBASEADDR ++*/ ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) ++ ++/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP ++*/ ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP ++*/ ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) ++ ++/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP ++*/ ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP ++*/ ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) ++ ++/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP ++*/ ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP ++*/ ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) ++ ++/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP ++*/ ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP ++*/ ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SKIPCTRL_OFFSET (0x0270) ++ ++/* PDP, VID1SKIPCTRL, VID1HSKIP ++*/ ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SKIPCTRL, VID1VSKIP ++*/ ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SKIPCTRL_OFFSET (0x0274) ++ ++/* PDP, VID2SKIPCTRL, VID2HSKIP ++*/ ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SKIPCTRL, VID2VSKIP ++*/ ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SKIPCTRL_OFFSET (0x0278) ++ ++/* PDP, VID3SKIPCTRL, VID3HSKIP ++*/ ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SKIPCTRL, VID3VSKIP ++*/ ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SKIPCTRL_OFFSET (0x027C) ++ ++/* PDP, VID4SKIPCTRL, VID4HSKIP ++*/ ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SKIPCTRL, VID4VSKIP ++*/ ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SCALECTRL_OFFSET (0x0460) ++ ++/* PDP, VID1SCALECTRL, VID1HSCALEBP ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VSCALEBP ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1HSBEFOREVS ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VSURUNCTRL ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1PAN_EN ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VORDER ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VPITCH ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VSINIT_OFFSET (0x0464) ++ ++/* PDP, VID1VSINIT, VID1VINITIAL1 ++*/ ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1VSINIT, VID1VINITIAL0 ++*/ ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF0_OFFSET (0x0468) ++ ++/* PDP, VID1VCOEFF0, VID1VCOEFF0 ++*/ ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF1_OFFSET (0x046C) ++ ++/* PDP, VID1VCOEFF1, VID1VCOEFF1 ++*/ ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF2_OFFSET (0x0470) ++ ++/* PDP, VID1VCOEFF2, VID1VCOEFF2 ++*/ ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF3_OFFSET (0x0474) ++ ++/* PDP, VID1VCOEFF3, VID1VCOEFF3 ++*/ ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF4_OFFSET (0x0478) ++ ++/* PDP, VID1VCOEFF4, VID1VCOEFF4 ++*/ ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF5_OFFSET (0x047C) ++ ++/* PDP, VID1VCOEFF5, VID1VCOEFF5 ++*/ ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF6_OFFSET (0x0480) ++ ++/* PDP, VID1VCOEFF6, VID1VCOEFF6 ++*/ ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF7_OFFSET (0x0484) ++ ++/* PDP, VID1VCOEFF7, VID1VCOEFF7 ++*/ ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF8_OFFSET (0x0488) ++ ++/* PDP, VID1VCOEFF8, VID1VCOEFF8 ++*/ ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HSINIT_OFFSET (0x048C) ++ ++/* PDP, VID1HSINIT, VID1HINITIAL ++*/ ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1HSINIT, VID1HPITCH ++*/ ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF0_OFFSET (0x0490) ++ ++/* PDP, VID1HCOEFF0, VID1HCOEFF0 ++*/ ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF1_OFFSET (0x0494) ++ ++/* PDP, VID1HCOEFF1, VID1HCOEFF1 ++*/ ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF2_OFFSET (0x0498) ++ ++/* PDP, VID1HCOEFF2, VID1HCOEFF2 ++*/ ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF3_OFFSET (0x049C) ++ ++/* PDP, VID1HCOEFF3, VID1HCOEFF3 ++*/ ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF4_OFFSET (0x04A0) ++ ++/* PDP, VID1HCOEFF4, VID1HCOEFF4 ++*/ ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF5_OFFSET (0x04A4) ++ ++/* PDP, VID1HCOEFF5, VID1HCOEFF5 ++*/ ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF6_OFFSET (0x04A8) ++ ++/* PDP, VID1HCOEFF6, VID1HCOEFF6 ++*/ ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF7_OFFSET (0x04AC) ++ ++/* PDP, VID1HCOEFF7, VID1HCOEFF7 ++*/ ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF8_OFFSET (0x04B0) ++ ++/* PDP, VID1HCOEFF8, VID1HCOEFF8 ++*/ ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF9_OFFSET (0x04B4) ++ ++/* PDP, VID1HCOEFF9, VID1HCOEFF9 ++*/ ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF10_OFFSET (0x04B8) ++ ++/* PDP, VID1HCOEFF10, VID1HCOEFF10 ++*/ ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF11_OFFSET (0x04BC) ++ ++/* PDP, VID1HCOEFF11, VID1HCOEFF11 ++*/ ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF12_OFFSET (0x04C0) ++ ++/* PDP, VID1HCOEFF12, VID1HCOEFF12 ++*/ ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF13_OFFSET (0x04C4) ++ ++/* PDP, VID1HCOEFF13, VID1HCOEFF13 ++*/ ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF14_OFFSET (0x04C8) ++ ++/* PDP, VID1HCOEFF14, VID1HCOEFF14 ++*/ ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF15_OFFSET (0x04CC) ++ ++/* PDP, VID1HCOEFF15, VID1HCOEFF15 ++*/ ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF16_OFFSET (0x04D0) ++ ++/* PDP, VID1HCOEFF16, VID1HCOEFF16 ++*/ ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SCALESIZE_OFFSET (0x04D4) ++ ++/* PDP, VID1SCALESIZE, VID1SCALEWIDTH ++*/ ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT ++*/ ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CORE_ID_OFFSET (0x04E0) ++ ++/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID ++*/ ++#define ODN_PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) ++#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT (24) ++#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH (8) ++#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID ++*/ ++#define ODN_PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) ++#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_ID_CORE_ID_SHIFT (16) ++#define ODN_PDP_CORE_ID_CORE_ID_LENGTH (8) ++#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID ++*/ ++#define ODN_PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) ++#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) ++#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT (0) ++#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH (16) ++#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CORE_REV_OFFSET (0x04F0) ++ ++/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV ++*/ ++#define ODN_PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) ++#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT (16) ++#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH (8) ++#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV ++*/ ++#define ODN_PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) ++#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT (8) ++#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH (8) ++#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV ++*/ ++#define ODN_PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT (0) ++#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH (8) ++#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SCALECTRL_OFFSET (0x0500) ++ ++/* PDP, VID2SCALECTRL, VID2HSCALEBP ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VSCALEBP ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2HSBEFOREVS ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VSURUNCTRL ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2PAN_EN ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VORDER ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VPITCH ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VSINIT_OFFSET (0x0504) ++ ++/* PDP, VID2VSINIT, VID2VINITIAL1 ++*/ ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2VSINIT, VID2VINITIAL0 ++*/ ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF0_OFFSET (0x0508) ++ ++/* PDP, VID2VCOEFF0, VID2VCOEFF0 ++*/ ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF1_OFFSET (0x050C) ++ ++/* PDP, VID2VCOEFF1, VID2VCOEFF1 ++*/ ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF2_OFFSET (0x0510) ++ ++/* PDP, VID2VCOEFF2, VID2VCOEFF2 ++*/ ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF3_OFFSET (0x0514) ++ ++/* PDP, VID2VCOEFF3, VID2VCOEFF3 ++*/ ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF4_OFFSET (0x0518) ++ ++/* PDP, VID2VCOEFF4, VID2VCOEFF4 ++*/ ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF5_OFFSET (0x051C) ++ ++/* PDP, VID2VCOEFF5, VID2VCOEFF5 ++*/ ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF6_OFFSET (0x0520) ++ ++/* PDP, VID2VCOEFF6, VID2VCOEFF6 ++*/ ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF7_OFFSET (0x0524) ++ ++/* PDP, VID2VCOEFF7, VID2VCOEFF7 ++*/ ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF8_OFFSET (0x0528) ++ ++/* PDP, VID2VCOEFF8, VID2VCOEFF8 ++*/ ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HSINIT_OFFSET (0x052C) ++ ++/* PDP, VID2HSINIT, VID2HINITIAL ++*/ ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2HSINIT, VID2HPITCH ++*/ ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF0_OFFSET (0x0530) ++ ++/* PDP, VID2HCOEFF0, VID2HCOEFF0 ++*/ ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF1_OFFSET (0x0534) ++ ++/* PDP, VID2HCOEFF1, VID2HCOEFF1 ++*/ ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF2_OFFSET (0x0538) ++ ++/* PDP, VID2HCOEFF2, VID2HCOEFF2 ++*/ ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF3_OFFSET (0x053C) ++ ++/* PDP, VID2HCOEFF3, VID2HCOEFF3 ++*/ ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF4_OFFSET (0x0540) ++ ++/* PDP, VID2HCOEFF4, VID2HCOEFF4 ++*/ ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF5_OFFSET (0x0544) ++ ++/* PDP, VID2HCOEFF5, VID2HCOEFF5 ++*/ ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF6_OFFSET (0x0548) ++ ++/* PDP, VID2HCOEFF6, VID2HCOEFF6 ++*/ ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF7_OFFSET (0x054C) ++ ++/* PDP, VID2HCOEFF7, VID2HCOEFF7 ++*/ ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF8_OFFSET (0x0550) ++ ++/* PDP, VID2HCOEFF8, VID2HCOEFF8 ++*/ ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF9_OFFSET (0x0554) ++ ++/* PDP, VID2HCOEFF9, VID2HCOEFF9 ++*/ ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF10_OFFSET (0x0558) ++ ++/* PDP, VID2HCOEFF10, VID2HCOEFF10 ++*/ ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF11_OFFSET (0x055C) ++ ++/* PDP, VID2HCOEFF11, VID2HCOEFF11 ++*/ ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF12_OFFSET (0x0560) ++ ++/* PDP, VID2HCOEFF12, VID2HCOEFF12 ++*/ ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF13_OFFSET (0x0564) ++ ++/* PDP, VID2HCOEFF13, VID2HCOEFF13 ++*/ ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF14_OFFSET (0x0568) ++ ++/* PDP, VID2HCOEFF14, VID2HCOEFF14 ++*/ ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF15_OFFSET (0x056C) ++ ++/* PDP, VID2HCOEFF15, VID2HCOEFF15 ++*/ ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF16_OFFSET (0x0570) ++ ++/* PDP, VID2HCOEFF16, VID2HCOEFF16 ++*/ ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SCALESIZE_OFFSET (0x0574) ++ ++/* PDP, VID2SCALESIZE, VID2SCALEWIDTH ++*/ ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT ++*/ ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SCALECTRL_OFFSET (0x0578) ++ ++/* PDP, VID3SCALECTRL, VID3HSCALEBP ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VSCALEBP ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3HSBEFOREVS ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VSURUNCTRL ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3PAN_EN ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VORDER ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VPITCH ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VSINIT_OFFSET (0x057C) ++ ++/* PDP, VID3VSINIT, VID3VINITIAL1 ++*/ ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3VSINIT, VID3VINITIAL0 ++*/ ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF0_OFFSET (0x0580) ++ ++/* PDP, VID3VCOEFF0, VID3VCOEFF0 ++*/ ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF1_OFFSET (0x0584) ++ ++/* PDP, VID3VCOEFF1, VID3VCOEFF1 ++*/ ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF2_OFFSET (0x0588) ++ ++/* PDP, VID3VCOEFF2, VID3VCOEFF2 ++*/ ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF3_OFFSET (0x058C) ++ ++/* PDP, VID3VCOEFF3, VID3VCOEFF3 ++*/ ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF4_OFFSET (0x0590) ++ ++/* PDP, VID3VCOEFF4, VID3VCOEFF4 ++*/ ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF5_OFFSET (0x0594) ++ ++/* PDP, VID3VCOEFF5, VID3VCOEFF5 ++*/ ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF6_OFFSET (0x0598) ++ ++/* PDP, VID3VCOEFF6, VID3VCOEFF6 ++*/ ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF7_OFFSET (0x059C) ++ ++/* PDP, VID3VCOEFF7, VID3VCOEFF7 ++*/ ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF8_OFFSET (0x05A0) ++ ++/* PDP, VID3VCOEFF8, VID3VCOEFF8 ++*/ ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HSINIT_OFFSET (0x05A4) ++ ++/* PDP, VID3HSINIT, VID3HINITIAL ++*/ ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3HSINIT, VID3HPITCH ++*/ ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF0_OFFSET (0x05A8) ++ ++/* PDP, VID3HCOEFF0, VID3HCOEFF0 ++*/ ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF1_OFFSET (0x05AC) ++ ++/* PDP, VID3HCOEFF1, VID3HCOEFF1 ++*/ ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF2_OFFSET (0x05B0) ++ ++/* PDP, VID3HCOEFF2, VID3HCOEFF2 ++*/ ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF3_OFFSET (0x05B4) ++ ++/* PDP, VID3HCOEFF3, VID3HCOEFF3 ++*/ ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF4_OFFSET (0x05B8) ++ ++/* PDP, VID3HCOEFF4, VID3HCOEFF4 ++*/ ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF5_OFFSET (0x05BC) ++ ++/* PDP, VID3HCOEFF5, VID3HCOEFF5 ++*/ ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF6_OFFSET (0x05C0) ++ ++/* PDP, VID3HCOEFF6, VID3HCOEFF6 ++*/ ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF7_OFFSET (0x05C4) ++ ++/* PDP, VID3HCOEFF7, VID3HCOEFF7 ++*/ ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF8_OFFSET (0x05C8) ++ ++/* PDP, VID3HCOEFF8, VID3HCOEFF8 ++*/ ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF9_OFFSET (0x05CC) ++ ++/* PDP, VID3HCOEFF9, VID3HCOEFF9 ++*/ ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF10_OFFSET (0x05D0) ++ ++/* PDP, VID3HCOEFF10, VID3HCOEFF10 ++*/ ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF11_OFFSET (0x05D4) ++ ++/* PDP, VID3HCOEFF11, VID3HCOEFF11 ++*/ ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF12_OFFSET (0x05D8) ++ ++/* PDP, VID3HCOEFF12, VID3HCOEFF12 ++*/ ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF13_OFFSET (0x05DC) ++ ++/* PDP, VID3HCOEFF13, VID3HCOEFF13 ++*/ ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF14_OFFSET (0x05E0) ++ ++/* PDP, VID3HCOEFF14, VID3HCOEFF14 ++*/ ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF15_OFFSET (0x05E4) ++ ++/* PDP, VID3HCOEFF15, VID3HCOEFF15 ++*/ ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF16_OFFSET (0x05E8) ++ ++/* PDP, VID3HCOEFF16, VID3HCOEFF16 ++*/ ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SCALESIZE_OFFSET (0x05EC) ++ ++/* PDP, VID3SCALESIZE, VID3SCALEWIDTH ++*/ ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT ++*/ ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SCALECTRL_OFFSET (0x05F0) ++ ++/* PDP, VID4SCALECTRL, VID4HSCALEBP ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VSCALEBP ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4HSBEFOREVS ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VSURUNCTRL ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4PAN_EN ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VORDER ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VPITCH ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VSINIT_OFFSET (0x05F4) ++ ++/* PDP, VID4VSINIT, VID4VINITIAL1 ++*/ ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4VSINIT, VID4VINITIAL0 ++*/ ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF0_OFFSET (0x05F8) ++ ++/* PDP, VID4VCOEFF0, VID4VCOEFF0 ++*/ ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF1_OFFSET (0x05FC) ++ ++/* PDP, VID4VCOEFF1, VID4VCOEFF1 ++*/ ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF2_OFFSET (0x0600) ++ ++/* PDP, VID4VCOEFF2, VID4VCOEFF2 ++*/ ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF3_OFFSET (0x0604) ++ ++/* PDP, VID4VCOEFF3, VID4VCOEFF3 ++*/ ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF4_OFFSET (0x0608) ++ ++/* PDP, VID4VCOEFF4, VID4VCOEFF4 ++*/ ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF5_OFFSET (0x060C) ++ ++/* PDP, VID4VCOEFF5, VID4VCOEFF5 ++*/ ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF6_OFFSET (0x0610) ++ ++/* PDP, VID4VCOEFF6, VID4VCOEFF6 ++*/ ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF7_OFFSET (0x0614) ++ ++/* PDP, VID4VCOEFF7, VID4VCOEFF7 ++*/ ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF8_OFFSET (0x0618) ++ ++/* PDP, VID4VCOEFF8, VID4VCOEFF8 ++*/ ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HSINIT_OFFSET (0x061C) ++ ++/* PDP, VID4HSINIT, VID4HINITIAL ++*/ ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4HSINIT, VID4HPITCH ++*/ ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF0_OFFSET (0x0620) ++ ++/* PDP, VID4HCOEFF0, VID4HCOEFF0 ++*/ ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF1_OFFSET (0x0624) ++ ++/* PDP, VID4HCOEFF1, VID4HCOEFF1 ++*/ ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF2_OFFSET (0x0628) ++ ++/* PDP, VID4HCOEFF2, VID4HCOEFF2 ++*/ ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF3_OFFSET (0x062C) ++ ++/* PDP, VID4HCOEFF3, VID4HCOEFF3 ++*/ ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF4_OFFSET (0x0630) ++ ++/* PDP, VID4HCOEFF4, VID4HCOEFF4 ++*/ ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF5_OFFSET (0x0634) ++ ++/* PDP, VID4HCOEFF5, VID4HCOEFF5 ++*/ ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF6_OFFSET (0x0638) ++ ++/* PDP, VID4HCOEFF6, VID4HCOEFF6 ++*/ ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF7_OFFSET (0x063C) ++ ++/* PDP, VID4HCOEFF7, VID4HCOEFF7 ++*/ ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF8_OFFSET (0x0640) ++ ++/* PDP, VID4HCOEFF8, VID4HCOEFF8 ++*/ ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF9_OFFSET (0x0644) ++ ++/* PDP, VID4HCOEFF9, VID4HCOEFF9 ++*/ ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF10_OFFSET (0x0648) ++ ++/* PDP, VID4HCOEFF10, VID4HCOEFF10 ++*/ ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF11_OFFSET (0x064C) ++ ++/* PDP, VID4HCOEFF11, VID4HCOEFF11 ++*/ ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF12_OFFSET (0x0650) ++ ++/* PDP, VID4HCOEFF12, VID4HCOEFF12 ++*/ ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF13_OFFSET (0x0654) ++ ++/* PDP, VID4HCOEFF13, VID4HCOEFF13 ++*/ ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF14_OFFSET (0x0658) ++ ++/* PDP, VID4HCOEFF14, VID4HCOEFF14 ++*/ ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF15_OFFSET (0x065C) ++ ++/* PDP, VID4HCOEFF15, VID4HCOEFF15 ++*/ ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF16_OFFSET (0x0660) ++ ++/* PDP, VID4HCOEFF16, VID4HCOEFF16 ++*/ ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SCALESIZE_OFFSET (0x0664) ++ ++/* PDP, VID4SCALESIZE, VID4SCALEWIDTH ++*/ ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT ++*/ ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND0_OFFSET (0x0668) ++ ++/* PDP, PORTER_BLND0, BLND0BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND0, BLND0PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND1_OFFSET (0x066C) ++ ++/* PDP, PORTER_BLND1, BLND1BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND1, BLND1PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND2_OFFSET (0x0670) ++ ++/* PDP, PORTER_BLND2, BLND2BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND2, BLND2PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND3_OFFSET (0x0674) ++ ++/* PDP, PORTER_BLND3, BLND3BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND3, BLND3PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND4_OFFSET (0x0678) ++ ++/* PDP, PORTER_BLND4, BLND4BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND4, BLND4PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND5_OFFSET (0x067C) ++ ++/* PDP, PORTER_BLND5, BLND5BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND5, BLND5PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND6_OFFSET (0x0680) ++ ++/* PDP, PORTER_BLND6, BLND6BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND6, BLND6PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND7_OFFSET (0x0684) ++ ++/* PDP, PORTER_BLND7, BLND7BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND7, BLND7PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) ++ ++/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) ++ ++/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) ++ ++/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) ++ ++/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) ++ ++/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) ++ ++/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) ++ ++/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) ++ ++/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) ++ ++/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) ++ ++/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) ++ ++/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) ++ ++/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF0_OFFSET (0x0708) ++ ++/* PDP, CSCCOEFF0, CSCCOEFFRU ++*/ ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF0, CSCCOEFFRY ++*/ ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF1_OFFSET (0x070C) ++ ++/* PDP, CSCCOEFF1, CSCCOEFFGY ++*/ ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF1, CSCCOEFFRV ++*/ ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF2_OFFSET (0x0710) ++ ++/* PDP, CSCCOEFF2, CSCCOEFFGV ++*/ ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF2, CSCCOEFFGU ++*/ ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF3_OFFSET (0x0714) ++ ++/* PDP, CSCCOEFF3, CSCCOEFFBU ++*/ ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF3, CSCCOEFFBY ++*/ ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF4_OFFSET (0x0718) ++ ++/* PDP, CSCCOEFF4, CSCCOEFFBV ++*/ ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BGNDCOL_AR_OFFSET (0x071C) ++ ++/* PDP, BGNDCOL_AR, BGNDCOL_A ++*/ ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BGNDCOL_AR, BGNDCOL_R ++*/ ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BGNDCOL_GB_OFFSET (0x0720) ++ ++/* PDP, BGNDCOL_GB, BGNDCOL_G ++*/ ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BGNDCOL_GB, BGNDCOL_B ++*/ ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BORDCOL_R_OFFSET (0x0724) ++ ++/* PDP, BORDCOL_R, BORDCOL_R ++*/ ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BORDCOL_GB_OFFSET (0x0728) ++ ++/* PDP, BORDCOL_GB, BORDCOL_G ++*/ ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BORDCOL_GB, BORDCOL_B ++*/ ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_LINESTAT_OFFSET (0x0734) ++ ++/* PDP, LINESTAT, LINENO ++*/ ++#define ODN_PDP_LINESTAT_LINENO_MASK (0x00001FFF) ++#define ODN_PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) ++#define ODN_PDP_LINESTAT_LINENO_SHIFT (0) ++#define ODN_PDP_LINESTAT_LINENO_LENGTH (13) ++#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET (0x0738) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET (0x073C) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET (0x0740) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET (0x0744) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET (0x0748) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_SIGNAT_R_OFFSET (0x075C) ++ ++/* PDP, SIGNAT_R, SIGNATURE_R ++*/ ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_SIGNAT_GB_OFFSET (0x0760) ++ ++/* PDP, SIGNAT_GB, SIGNATURE_G ++*/ ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SIGNAT_GB, SIGNATURE_B ++*/ ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) ++ ++/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING ++*/ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID ++*/ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK ++*/ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) ++ ++/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED ++*/ ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGCTRL_OFFSET (0x076C) ++ ++/* PDP, DBGCTRL, DBG_READ ++*/ ++#define ODN_PDP_DBGCTRL_DBG_READ_MASK (0x00000002) ++#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) ++#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT (1) ++#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH (1) ++#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGCTRL, DBG_ENAB ++*/ ++#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT (0) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH (1) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGDATA_R_OFFSET (0x0770) ++ ++/* PDP, DBGDATA_R, DBG_DATA_R ++*/ ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGDATA_GB_OFFSET (0x0774) ++ ++/* PDP, DBGDATA_GB, DBG_DATA_G ++*/ ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGDATA_GB, DBG_DATA_B ++*/ ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGSIDE_OFFSET (0x0778) ++ ++/* PDP, DBGSIDE, DBG_VAL ++*/ ++#define ODN_PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) ++#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) ++#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT (3) ++#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH (1) ++#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGSIDE, DBG_SIDE ++*/ ++#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT (0) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH (3) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_OUTPUT_OFFSET (0x077C) ++ ++/* PDP, OUTPUT, EIGHT_BIT_OUTPUT ++*/ ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OUTPUT, OUTPUT_CONFIG ++*/ ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_SYNCCTRL_OFFSET (0x0780) ++ ++/* PDP, SYNCCTRL, SYNCACTIVE ++*/ ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, ODN_PDP_RST ++*/ ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK (0x20000000) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT (29) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, POWERDN ++*/ ++#define ODN_PDP_SYNCCTRL_POWERDN_MASK (0x10000000) ++#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT (28) ++#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, LOWPWRMODE ++*/ ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDSYNCTRL ++*/ ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDINTCTRL ++*/ ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDCTRL ++*/ ++#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT (24) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDWAIT ++*/ ++#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT (16) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH (4) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, FIELD_EN ++*/ ++#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT (13) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, CSYNC_EN ++*/ ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, CLKPOL ++*/ ++#define ODN_PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) ++#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT (11) ++#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VS_SLAVE ++*/ ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HS_SLAVE ++*/ ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, BLNKPOL ++*/ ++#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT (5) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, BLNKDIS ++*/ ++#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT (4) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VSPOL ++*/ ++#define ODN_PDP_SYNCCTRL_VSPOL_MASK (0x00000008) ++#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT (3) ++#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VSDIS ++*/ ++#define ODN_PDP_SYNCCTRL_VSDIS_MASK (0x00000004) ++#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT (2) ++#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HSPOL ++*/ ++#define ODN_PDP_SYNCCTRL_HSPOL_MASK (0x00000002) ++#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT (1) ++#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HSDIS ++*/ ++#define ODN_PDP_SYNCCTRL_HSDIS_MASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT (0) ++#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HSYNC1_OFFSET (0x0784) ++ ++/* PDP, HSYNC1, HBPS ++*/ ++#define ODN_PDP_HSYNC1_HBPS_MASK (0x1FFF0000) ++#define ODN_PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC1_HBPS_SHIFT (16) ++#define ODN_PDP_HSYNC1_HBPS_LENGTH (13) ++#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC1, HT ++*/ ++#define ODN_PDP_HSYNC1_HT_MASK (0x00001FFF) ++#define ODN_PDP_HSYNC1_HT_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC1_HT_SHIFT (0) ++#define ODN_PDP_HSYNC1_HT_LENGTH (13) ++#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HSYNC2_OFFSET (0x0788) ++ ++/* PDP, HSYNC2, HAS ++*/ ++#define ODN_PDP_HSYNC2_HAS_MASK (0x1FFF0000) ++#define ODN_PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC2_HAS_SHIFT (16) ++#define ODN_PDP_HSYNC2_HAS_LENGTH (13) ++#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC2, HLBS ++*/ ++#define ODN_PDP_HSYNC2_HLBS_MASK (0x00001FFF) ++#define ODN_PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC2_HLBS_SHIFT (0) ++#define ODN_PDP_HSYNC2_HLBS_LENGTH (13) ++#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HSYNC3_OFFSET (0x078C) ++ ++/* PDP, HSYNC3, HFPS ++*/ ++#define ODN_PDP_HSYNC3_HFPS_MASK (0x1FFF0000) ++#define ODN_PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC3_HFPS_SHIFT (16) ++#define ODN_PDP_HSYNC3_HFPS_LENGTH (13) ++#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC3, HRBS ++*/ ++#define ODN_PDP_HSYNC3_HRBS_MASK (0x00001FFF) ++#define ODN_PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC3_HRBS_SHIFT (0) ++#define ODN_PDP_HSYNC3_HRBS_LENGTH (13) ++#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VSYNC1_OFFSET (0x0790) ++ ++/* PDP, VSYNC1, VBPS ++*/ ++#define ODN_PDP_VSYNC1_VBPS_MASK (0x1FFF0000) ++#define ODN_PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC1_VBPS_SHIFT (16) ++#define ODN_PDP_VSYNC1_VBPS_LENGTH (13) ++#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC1, VT ++*/ ++#define ODN_PDP_VSYNC1_VT_MASK (0x00001FFF) ++#define ODN_PDP_VSYNC1_VT_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC1_VT_SHIFT (0) ++#define ODN_PDP_VSYNC1_VT_LENGTH (13) ++#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VSYNC2_OFFSET (0x0794) ++ ++/* PDP, VSYNC2, VAS ++*/ ++#define ODN_PDP_VSYNC2_VAS_MASK (0x1FFF0000) ++#define ODN_PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC2_VAS_SHIFT (16) ++#define ODN_PDP_VSYNC2_VAS_LENGTH (13) ++#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC2, VTBS ++*/ ++#define ODN_PDP_VSYNC2_VTBS_MASK (0x00001FFF) ++#define ODN_PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC2_VTBS_SHIFT (0) ++#define ODN_PDP_VSYNC2_VTBS_LENGTH (13) ++#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VSYNC3_OFFSET (0x0798) ++ ++/* PDP, VSYNC3, VFPS ++*/ ++#define ODN_PDP_VSYNC3_VFPS_MASK (0x1FFF0000) ++#define ODN_PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC3_VFPS_SHIFT (16) ++#define ODN_PDP_VSYNC3_VFPS_LENGTH (13) ++#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC3, VBBS ++*/ ++#define ODN_PDP_VSYNC3_VBBS_MASK (0x00001FFF) ++#define ODN_PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC3_VBBS_SHIFT (0) ++#define ODN_PDP_VSYNC3_VBBS_LENGTH (13) ++#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTSTAT_OFFSET (0x079C) ++ ++/* PDP, INTSTAT, INTS_VID4ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID3ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID2ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID1ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH4ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH3ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH2ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH1ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID4URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID3URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID2URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID1URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH4URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH3URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH2URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH1URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VBLNK1 ++*/ ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VBLNK0 ++*/ ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_HBLNK1 ++*/ ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_HBLNK0 ++*/ ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTENAB_OFFSET (0x07A0) ++ ++/* PDP, INTENAB, INTEN_VID4ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID3ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID2ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID1ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH4ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH3ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH2ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH1ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID4URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID3URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID2URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID1URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH4URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH3URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH2URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH1URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VBLNK1 ++*/ ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VBLNK0 ++*/ ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_HBLNK1 ++*/ ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_HBLNK0 ++*/ ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTCLR_OFFSET (0x07A4) ++ ++/* PDP, INTCLR, INTCLR_VID4ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID3ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID2ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID1ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH4ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH3ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH2ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH1ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID4URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID3URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID2URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID1URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH4URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH3URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH2URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH1URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VBLNK1 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VBLNK0 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_HBLNK1 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_HBLNK0 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_MEMCTRL_OFFSET (0x07A8) ++ ++/* PDP, MEMCTRL, MEMREFRESH ++*/ ++#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT (30) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH (2) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEMCTRL, BURSTLEN ++*/ ++#define ODN_PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT (0) ++#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH (8) ++#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_MEM_THRESH_OFFSET (0x07AC) ++ ++/* PDP, MEM_THRESH, UVTHRESHOLD ++*/ ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEM_THRESH, YTHRESHOLD ++*/ ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEM_THRESH, THRESHOLD ++*/ ++#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT (0) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH (9) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) ++ ++/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON ++*/ ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL ++*/ ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA0_R_OFFSET (0x07B4) ++ ++/* PDP, GAMMA0_R, GAMMA0_R ++*/ ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA0_GB_OFFSET (0x07B8) ++ ++/* PDP, GAMMA0_GB, GAMMA0_G ++*/ ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA0_GB, GAMMA0_B ++*/ ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA1_R_OFFSET (0x07BC) ++ ++/* PDP, GAMMA1_R, GAMMA1_R ++*/ ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA1_GB_OFFSET (0x07C0) ++ ++/* PDP, GAMMA1_GB, GAMMA1_G ++*/ ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA1_GB, GAMMA1_B ++*/ ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA2_R_OFFSET (0x07C4) ++ ++/* PDP, GAMMA2_R, GAMMA2_R ++*/ ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA2_GB_OFFSET (0x07C8) ++ ++/* PDP, GAMMA2_GB, GAMMA2_G ++*/ ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA2_GB, GAMMA2_B ++*/ ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA3_R_OFFSET (0x07CC) ++ ++/* PDP, GAMMA3_R, GAMMA3_R ++*/ ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA3_GB_OFFSET (0x07D0) ++ ++/* PDP, GAMMA3_GB, GAMMA3_G ++*/ ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA3_GB, GAMMA3_B ++*/ ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA4_R_OFFSET (0x07D4) ++ ++/* PDP, GAMMA4_R, GAMMA4_R ++*/ ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA4_GB_OFFSET (0x07D8) ++ ++/* PDP, GAMMA4_GB, GAMMA4_G ++*/ ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA4_GB, GAMMA4_B ++*/ ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA5_R_OFFSET (0x07DC) ++ ++/* PDP, GAMMA5_R, GAMMA5_R ++*/ ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA5_GB_OFFSET (0x07E0) ++ ++/* PDP, GAMMA5_GB, GAMMA5_G ++*/ ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA5_GB, GAMMA5_B ++*/ ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA6_R_OFFSET (0x07E4) ++ ++/* PDP, GAMMA6_R, GAMMA6_R ++*/ ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA6_GB_OFFSET (0x07E8) ++ ++/* PDP, GAMMA6_GB, GAMMA6_G ++*/ ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA6_GB, GAMMA6_B ++*/ ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA7_R_OFFSET (0x07EC) ++ ++/* PDP, GAMMA7_R, GAMMA7_R ++*/ ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA7_GB_OFFSET (0x07F0) ++ ++/* PDP, GAMMA7_GB, GAMMA7_G ++*/ ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA7_GB, GAMMA7_B ++*/ ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA8_R_OFFSET (0x07F4) ++ ++/* PDP, GAMMA8_R, GAMMA8_R ++*/ ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA8_GB_OFFSET (0x07F8) ++ ++/* PDP, GAMMA8_GB, GAMMA8_G ++*/ ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA8_GB, GAMMA8_B ++*/ ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA9_R_OFFSET (0x07FC) ++ ++/* PDP, GAMMA9_R, GAMMA9_R ++*/ ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA9_GB_OFFSET (0x0800) ++ ++/* PDP, GAMMA9_GB, GAMMA9_G ++*/ ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA9_GB, GAMMA9_B ++*/ ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA10_R_OFFSET (0x0804) ++ ++/* PDP, GAMMA10_R, GAMMA10_R ++*/ ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA10_GB_OFFSET (0x0808) ++ ++/* PDP, GAMMA10_GB, GAMMA10_G ++*/ ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA10_GB, GAMMA10_B ++*/ ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA11_R_OFFSET (0x080C) ++ ++/* PDP, GAMMA11_R, GAMMA11_R ++*/ ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA11_GB_OFFSET (0x0810) ++ ++/* PDP, GAMMA11_GB, GAMMA11_G ++*/ ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA11_GB, GAMMA11_B ++*/ ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA12_R_OFFSET (0x0814) ++ ++/* PDP, GAMMA12_R, GAMMA12_R ++*/ ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA12_GB_OFFSET (0x0818) ++ ++/* PDP, GAMMA12_GB, GAMMA12_G ++*/ ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA12_GB, GAMMA12_B ++*/ ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA13_R_OFFSET (0x081C) ++ ++/* PDP, GAMMA13_R, GAMMA13_R ++*/ ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA13_GB_OFFSET (0x0820) ++ ++/* PDP, GAMMA13_GB, GAMMA13_G ++*/ ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA13_GB, GAMMA13_B ++*/ ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA14_R_OFFSET (0x0824) ++ ++/* PDP, GAMMA14_R, GAMMA14_R ++*/ ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA14_GB_OFFSET (0x0828) ++ ++/* PDP, GAMMA14_GB, GAMMA14_G ++*/ ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA14_GB, GAMMA14_B ++*/ ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA15_R_OFFSET (0x082C) ++ ++/* PDP, GAMMA15_R, GAMMA15_R ++*/ ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA15_GB_OFFSET (0x0830) ++ ++/* PDP, GAMMA15_GB, GAMMA15_G ++*/ ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA15_GB, GAMMA15_B ++*/ ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA16_R_OFFSET (0x0834) ++ ++/* PDP, GAMMA16_R, GAMMA16_R ++*/ ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA16_GB_OFFSET (0x0838) ++ ++/* PDP, GAMMA16_GB, GAMMA16_G ++*/ ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA16_GB, GAMMA16_B ++*/ ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA17_R_OFFSET (0x083C) ++ ++/* PDP, GAMMA17_R, GAMMA17_R ++*/ ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA17_GB_OFFSET (0x0840) ++ ++/* PDP, GAMMA17_GB, GAMMA17_G ++*/ ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA17_GB, GAMMA17_B ++*/ ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA18_R_OFFSET (0x0844) ++ ++/* PDP, GAMMA18_R, GAMMA18_R ++*/ ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA18_GB_OFFSET (0x0848) ++ ++/* PDP, GAMMA18_GB, GAMMA18_G ++*/ ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA18_GB, GAMMA18_B ++*/ ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA19_R_OFFSET (0x084C) ++ ++/* PDP, GAMMA19_R, GAMMA19_R ++*/ ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA19_GB_OFFSET (0x0850) ++ ++/* PDP, GAMMA19_GB, GAMMA19_G ++*/ ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA19_GB, GAMMA19_B ++*/ ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA20_R_OFFSET (0x0854) ++ ++/* PDP, GAMMA20_R, GAMMA20_R ++*/ ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA20_GB_OFFSET (0x0858) ++ ++/* PDP, GAMMA20_GB, GAMMA20_G ++*/ ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA20_GB, GAMMA20_B ++*/ ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA21_R_OFFSET (0x085C) ++ ++/* PDP, GAMMA21_R, GAMMA21_R ++*/ ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA21_GB_OFFSET (0x0860) ++ ++/* PDP, GAMMA21_GB, GAMMA21_G ++*/ ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA21_GB, GAMMA21_B ++*/ ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA22_R_OFFSET (0x0864) ++ ++/* PDP, GAMMA22_R, GAMMA22_R ++*/ ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA22_GB_OFFSET (0x0868) ++ ++/* PDP, GAMMA22_GB, GAMMA22_G ++*/ ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA22_GB, GAMMA22_B ++*/ ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA23_R_OFFSET (0x086C) ++ ++/* PDP, GAMMA23_R, GAMMA23_R ++*/ ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA23_GB_OFFSET (0x0870) ++ ++/* PDP, GAMMA23_GB, GAMMA23_G ++*/ ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA23_GB, GAMMA23_B ++*/ ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA24_R_OFFSET (0x0874) ++ ++/* PDP, GAMMA24_R, GAMMA24_R ++*/ ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA24_GB_OFFSET (0x0878) ++ ++/* PDP, GAMMA24_GB, GAMMA24_G ++*/ ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA24_GB, GAMMA24_B ++*/ ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA25_R_OFFSET (0x087C) ++ ++/* PDP, GAMMA25_R, GAMMA25_R ++*/ ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA25_GB_OFFSET (0x0880) ++ ++/* PDP, GAMMA25_GB, GAMMA25_G ++*/ ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA25_GB, GAMMA25_B ++*/ ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA26_R_OFFSET (0x0884) ++ ++/* PDP, GAMMA26_R, GAMMA26_R ++*/ ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA26_GB_OFFSET (0x0888) ++ ++/* PDP, GAMMA26_GB, GAMMA26_G ++*/ ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA26_GB, GAMMA26_B ++*/ ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA27_R_OFFSET (0x088C) ++ ++/* PDP, GAMMA27_R, GAMMA27_R ++*/ ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA27_GB_OFFSET (0x0890) ++ ++/* PDP, GAMMA27_GB, GAMMA27_G ++*/ ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA27_GB, GAMMA27_B ++*/ ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA28_R_OFFSET (0x0894) ++ ++/* PDP, GAMMA28_R, GAMMA28_R ++*/ ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA28_GB_OFFSET (0x0898) ++ ++/* PDP, GAMMA28_GB, GAMMA28_G ++*/ ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA28_GB, GAMMA28_B ++*/ ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA29_R_OFFSET (0x089C) ++ ++/* PDP, GAMMA29_R, GAMMA29_R ++*/ ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA29_GB_OFFSET (0x08A0) ++ ++/* PDP, GAMMA29_GB, GAMMA29_G ++*/ ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA29_GB, GAMMA29_B ++*/ ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA30_R_OFFSET (0x08A4) ++ ++/* PDP, GAMMA30_R, GAMMA30_R ++*/ ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA30_GB_OFFSET (0x08A8) ++ ++/* PDP, GAMMA30_GB, GAMMA30_G ++*/ ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA30_GB, GAMMA30_B ++*/ ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA31_R_OFFSET (0x08AC) ++ ++/* PDP, GAMMA31_R, GAMMA31_R ++*/ ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA31_GB_OFFSET (0x08B0) ++ ++/* PDP, GAMMA31_GB, GAMMA31_G ++*/ ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA31_GB, GAMMA31_B ++*/ ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA32_R_OFFSET (0x08B4) ++ ++/* PDP, GAMMA32_R, GAMMA32_R ++*/ ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA32_GB_OFFSET (0x08B8) ++ ++/* PDP, GAMMA32_GB, GAMMA32_G ++*/ ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA32_GB, GAMMA32_B ++*/ ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VEVENT_OFFSET (0x08BC) ++ ++/* PDP, VEVENT, VEVENT ++*/ ++#define ODN_PDP_VEVENT_VEVENT_MASK (0x1FFF0000) ++#define ODN_PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) ++#define ODN_PDP_VEVENT_VEVENT_SHIFT (16) ++#define ODN_PDP_VEVENT_VEVENT_LENGTH (13) ++#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VEVENT, VFETCH ++*/ ++#define ODN_PDP_VEVENT_VFETCH_MASK (0x00001FFF) ++#define ODN_PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) ++#define ODN_PDP_VEVENT_VFETCH_SHIFT (0) ++#define ODN_PDP_VEVENT_VFETCH_LENGTH (13) ++#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HDECTRL_OFFSET (0x08C0) ++ ++/* PDP, HDECTRL, HDES ++*/ ++#define ODN_PDP_HDECTRL_HDES_MASK (0x1FFF0000) ++#define ODN_PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) ++#define ODN_PDP_HDECTRL_HDES_SHIFT (16) ++#define ODN_PDP_HDECTRL_HDES_LENGTH (13) ++#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HDECTRL, HDEF ++*/ ++#define ODN_PDP_HDECTRL_HDEF_MASK (0x00001FFF) ++#define ODN_PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) ++#define ODN_PDP_HDECTRL_HDEF_SHIFT (0) ++#define ODN_PDP_HDECTRL_HDEF_LENGTH (13) ++#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VDECTRL_OFFSET (0x08C4) ++ ++/* PDP, VDECTRL, VDES ++*/ ++#define ODN_PDP_VDECTRL_VDES_MASK (0x1FFF0000) ++#define ODN_PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) ++#define ODN_PDP_VDECTRL_VDES_SHIFT (16) ++#define ODN_PDP_VDECTRL_VDES_LENGTH (13) ++#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VDECTRL, VDEF ++*/ ++#define ODN_PDP_VDECTRL_VDEF_MASK (0x00001FFF) ++#define ODN_PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) ++#define ODN_PDP_VDECTRL_VDEF_SHIFT (0) ++#define ODN_PDP_VDECTRL_VDEF_LENGTH (13) ++#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_OPMASK_R_OFFSET (0x08C8) ++ ++/* PDP, OPMASK_R, MASKLEVEL ++*/ ++#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT (31) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH (1) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_R, BLANKLEVEL ++*/ ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_R, MASKR ++*/ ++#define ODN_PDP_OPMASK_R_MASKR_MASK (0x000003FF) ++#define ODN_PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) ++#define ODN_PDP_OPMASK_R_MASKR_SHIFT (0) ++#define ODN_PDP_OPMASK_R_MASKR_LENGTH (10) ++#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_OPMASK_GB_OFFSET (0x08CC) ++ ++/* PDP, OPMASK_GB, MASKG ++*/ ++#define ODN_PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) ++#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) ++#define ODN_PDP_OPMASK_GB_MASKG_SHIFT (16) ++#define ODN_PDP_OPMASK_GB_MASKG_LENGTH (10) ++#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_GB, MASKB ++*/ ++#define ODN_PDP_OPMASK_GB_MASKB_MASK (0x000003FF) ++#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) ++#define ODN_PDP_OPMASK_GB_MASKB_SHIFT (0) ++#define ODN_PDP_OPMASK_GB_MASKB_LENGTH (10) ++#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) ++ ++/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN ++*/ ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) ++ ++/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT ++*/ ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_STAT_OFFSET (0x08D8) ++ ++/* PDP, REGLD_STAT, REGLD_ADDREN ++*/ ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_CTRL_OFFSET (0x08DC) ++ ++/* PDP, REGLD_CTRL, REGLD_ADDRLEN ++*/ ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGLD_CTRL, REGLD_VAL ++*/ ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_UPDCTRL_OFFSET (0x08E0) ++ ++/* PDP, UPDCTRL, UPDFIELD ++*/ ++#define ODN_PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) ++#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT (0) ++#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH (1) ++#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTCTRL_OFFSET (0x08E4) ++ ++/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE ++*/ ++#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT (16) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH (1) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO ++*/ ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDISETUP_OFFSET (0x0900) ++ ++/* PDP, PDISETUP, PDI_BLNKLVL ++*/ ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_BLNK ++*/ ++#define ODN_PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) ++#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT (5) ++#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_PWR ++*/ ++#define ODN_PDP_PDISETUP_PDI_PWR_MASK (0x00000010) ++#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT (4) ++#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_EN ++*/ ++#define ODN_PDP_PDISETUP_PDI_EN_MASK (0x00000008) ++#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_EN_SHIFT (3) ++#define ODN_PDP_PDISETUP_PDI_EN_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_GDEN ++*/ ++#define ODN_PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) ++#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT (2) ++#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_NFEN ++*/ ++#define ODN_PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) ++#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT (1) ++#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_CR ++*/ ++#define ODN_PDP_PDISETUP_PDI_CR_MASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_CR_SHIFT (0) ++#define ODN_PDP_PDISETUP_PDI_CR_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDITIMING0_OFFSET (0x0904) ++ ++/* PDP, PDITIMING0, PDI_PWRSVGD ++*/ ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING0, PDI_LSDEL ++*/ ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING0, PDI_PWRSV2GD2 ++*/ ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDITIMING1_OFFSET (0x0908) ++ ++/* PDP, PDITIMING1, PDI_NLDEL ++*/ ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING1, PDI_ACBDEL ++*/ ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDICOREID_OFFSET (0x090C) ++ ++/* PDP, PDICOREID, PDI_GROUP_ID ++*/ ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREID, PDI_CORE_ID ++*/ ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREID, PDI_CONFIG_ID ++*/ ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDICOREREV_OFFSET (0x0910) ++ ++/* PDP, PDICOREREV, PDI_MAJOR_REV ++*/ ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREREV, PDI_MINOR_REV ++*/ ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREREV, PDI_MAINT_REV ++*/ ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX2_OFFSET (0x0920) ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX4_0_OFFSET (0x0924) ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX4_1_OFFSET (0x0928) ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_0_OFFSET (0x092C) ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_1_OFFSET (0x0930) ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_2_OFFSET (0x0934) ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_3_OFFSET (0x0938) ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_4_OFFSET (0x093C) ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_5_OFFSET (0x0940) ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_6_OFFSET (0x0944) ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_7_OFFSET (0x0948) ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_8_OFFSET (0x094C) ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_9_OFFSET (0x0950) ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_10_OFFSET (0x0954) ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_11_OFFSET (0x0958) ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_12_OFFSET (0x095C) ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1_MEMCTRL_OFFSET (0x0960) ++ ++/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN ++*/ ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD ++*/ ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2_MEMCTRL_OFFSET (0x0968) ++ ++/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN ++*/ ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD ++*/ ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3_MEMCTRL_OFFSET (0x0970) ++ ++/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN ++*/ ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD ++*/ ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4_MEMCTRL_OFFSET (0x0978) ++ ++/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN ++*/ ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD ++*/ ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1_MEMCTRL_OFFSET (0x0980) ++ ++/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEMCTRL, VID1_BURSTLEN ++*/ ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1_MEM_THRESH_OFFSET (0x0984) ++ ++/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD ++*/ ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD ++*/ ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2_MEMCTRL_OFFSET (0x0988) ++ ++/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEMCTRL, VID2_BURSTLEN ++*/ ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2_MEM_THRESH_OFFSET (0x098C) ++ ++/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD ++*/ ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD ++*/ ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3_MEMCTRL_OFFSET (0x0990) ++ ++/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEMCTRL, VID3_BURSTLEN ++*/ ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3_MEM_THRESH_OFFSET (0x0994) ++ ++/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD ++*/ ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD ++*/ ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4_MEMCTRL_OFFSET (0x0998) ++ ++/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEMCTRL, VID4_BURSTLEN ++*/ ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4_MEM_THRESH_OFFSET (0x099C) ++ ++/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD ++*/ ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD ++*/ ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BURST_BOUNDARY_OFFSET (0x09C0) ++ ++/* PDP, BURST_BOUNDARY, BURST_BOUNDARY ++*/ ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE ++ ++ ++/* ---------------------- End of register definitions ---------------------- */ ++ ++/* NUMREG defines the extent of register address space. ++*/ ++ ++#define ODN_PDP_NUMREG ((0x09C0 >> 2)+1) ++ ++/* Info about video plane addresses */ ++#define ODN_PDP_YADDR_BITS (ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) ++#define ODN_PDP_YADDR_ALIGN 5 ++#define ODN_PDP_UADDR_BITS (ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) ++#define ODN_PDP_UADDR_ALIGN 5 ++#define ODN_PDP_VADDR_BITS (ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) ++#define ODN_PDP_VADDR_ALIGN 5 ++ ++#define ODN_PDP_YSTRIDE_BITS (ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH) ++#define ODN_PDP_YSTRIDE_ALIGN 5 ++ ++#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) ++#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) ++ ++/* Maximum 6 bytes per pixel for RGB161616 */ ++#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6) ++ ++/* Round up */ ++#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) ++ ++#define ODN_PDP_YADDR_MAX (((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN) ++#define ODN_PDP_UADDR_MAX (((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN) ++#define ODN_PDP_VADDR_MAX (((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN) ++#define ODN_PDP_YSTRIDE_MAX ((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN) ++#define ODN_PDP_YADDR_ALIGNMASK ((1 << ODN_PDP_YADDR_ALIGN) - 1) ++#define ODN_PDP_UADDR_ALIGNMASK ((1 << ODN_PDP_UADDR_ALIGN) - 1) ++#define ODN_PDP_VADDR_ALIGNMASK ((1 << ODN_PDP_VADDR_ALIGN) - 1) ++#define ODN_PDP_YSTRIDE_ALIGNMASK ((1 << ODN_PDP_YSTRIDE_ALIGN) - 1) ++ ++/* Field Values (some are reserved for future use) */ ++#define ODN_PDP_SURF_PIXFMT_RGB332 0x3 ++#define ODN_PDP_SURF_PIXFMT_ARGB4444 0x4 ++#define ODN_PDP_SURF_PIXFMT_ARGB1555 0x5 ++#define ODN_PDP_SURF_PIXFMT_RGB888 0x6 ++#define ODN_PDP_SURF_PIXFMT_RGB565 0x7 ++#define ODN_PDP_SURF_PIXFMT_ARGB8888 0x8 ++#define ODN_PDP_SURF_PIXFMT_420_PL8 0x9 ++#define ODN_PDP_SURF_PIXFMT_420_PL8IVU 0xA ++#define ODN_PDP_SURF_PIXFMT_420_PL8IUV 0xB ++#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC ++#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD ++#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE ++#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF ++#define ODN_PDP_SURF_PIXFMT_AYUV8888 0x10 ++#define ODN_PDP_SURF_PIXFMT_YUV101010 0x15 ++#define ODN_PDP_SURF_PIXFMT_RGB101010 0x17 ++#define ODN_PDP_SURF_PIXFMT_420_PL10IUV 0x18 ++#define ODN_PDP_SURF_PIXFMT_420_PL10IVU 0x19 ++#define ODN_PDP_SURF_PIXFMT_422_PL10IUV 0x1A ++#define ODN_PDP_SURF_PIXFMT_422_PL10IVU 0x1B ++#define ODN_PDP_SURF_PIXFMT_RGB121212 0x1E ++#define ODN_PDP_SURF_PIXFMT_RGB161616 0x1F ++ ++#define ODN_PDP_CTRL_CKEYSRC_PREV 0x0 ++#define ODN_PDP_CTRL_CKEYSRC_CUR 0x1 ++ ++#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 ++#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 ++#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 ++#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 ++ ++#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 ++ ++#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 ++#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 ++#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 ++ ++#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 ++#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 ++ ++/*---------------------------------------------------------------------------*/ ++ ++#endif /* ODN_PDP_REGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/odin_regs.h b/drivers/gpu/drm/img-rogue/apollo/odin_regs.h +new file mode 100644 +index 000000000000..5d5821623ac7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/odin_regs.h +@@ -0,0 +1,1026 @@ ++/****************************************************************************** ++@Title Odin system control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin FPGA register defs for IMG 3rd generation TCF ++ ++ Auto generated headers, eg. odn_core.h: ++ regconv -d . -a 8 odn_core.def ++ ++ Source files : ++ odn_core.def ++ mca_debug.def ++ sai_rx_debug.def ++ sai_tx_debug.def ++ ad_tx.def ++ ++ Changes: ++ Removed obsolete copyright dates ++ Changed lower case to upper case ++ (eg. odn_core changed to ODN_CORE) ++ Changed PVR5__ to ODN_ ++ Merged multiple .def files into one header ++ ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++ ++/* tab size 4 */ ++ ++#ifndef _ODIN_REGS_H_ ++#define _ODIN_REGS_H_ ++ ++/****************************** ++ Generated from: odn_core.def ++*******************************/ ++ ++/* ++ Register ID ++*/ ++#define ODN_CORE_ID 0x0000 ++#define ODN_ID_VARIANT_MASK 0x0000FFFFU ++#define ODN_ID_VARIANT_SHIFT 0 ++#define ODN_ID_VARIANT_SIGNED 0 ++ ++#define ODN_ID_ID_MASK 0xFFFF0000U ++#define ODN_ID_ID_SHIFT 16 ++#define ODN_ID_ID_SIGNED 0 ++ ++/* ++ Register REL ++*/ ++#define ODN_CORE_REL 0x0004 ++#define ODN_REL_MINOR_MASK 0x0000FFFFU ++#define ODN_REL_MINOR_SHIFT 0 ++#define ODN_REL_MINOR_SIGNED 0 ++ ++#define ODN_REL_MAJOR_MASK 0xFFFF0000U ++#define ODN_REL_MAJOR_SHIFT 16 ++#define ODN_REL_MAJOR_SIGNED 0 ++ ++/* ++ Register CHANGE_SET ++*/ ++#define ODN_CORE_CHANGE_SET 0x0008 ++#define ODN_CHANGE_SET_SET_MASK 0xFFFFFFFFU ++#define ODN_CHANGE_SET_SET_SHIFT 0 ++#define ODN_CHANGE_SET_SET_SIGNED 0 ++ ++/* ++ Register USER_ID ++*/ ++#define ODN_CORE_USER_ID 0x000C ++#define ODN_USER_ID_ID_MASK 0x000000FFU ++#define ODN_USER_ID_ID_SHIFT 0 ++#define ODN_USER_ID_ID_SIGNED 0 ++ ++/* ++ Register USER_BUILD ++*/ ++#define ODN_CORE_USER_BUILD 0x0010 ++#define ODN_USER_BUILD_BUILD_MASK 0xFFFFFFFFU ++#define ODN_USER_BUILD_BUILD_SHIFT 0 ++#define ODN_USER_BUILD_BUILD_SIGNED 0 ++ ++/* ++ Register SW_IF_VERSION ++*/ ++#define ODN_CORE_SW_IF_VERSION 0x0014 ++#define ODN_SW_IF_VERSION_VERSION_MASK 0x0000FFFFU ++#define ODN_SW_IF_VERSION_VERSION_SHIFT 0 ++#define ODN_SW_IF_VERSION_VERSION_SIGNED 0 ++ ++/* ++ Register INTERNAL_RESETN ++*/ ++#define ODN_CORE_INTERNAL_RESETN 0x0080 ++#define ODN_INTERNAL_RESETN_DDR_MASK 0x00000001U ++#define ODN_INTERNAL_RESETN_DDR_SHIFT 0 ++#define ODN_INTERNAL_RESETN_DDR_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_MIG0_MASK 0x00000002U ++#define ODN_INTERNAL_RESETN_MIG0_SHIFT 1 ++#define ODN_INTERNAL_RESETN_MIG0_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_MIG1_MASK 0x00000004U ++#define ODN_INTERNAL_RESETN_MIG1_SHIFT 2 ++#define ODN_INTERNAL_RESETN_MIG1_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PDP1_MASK 0x00000008U ++#define ODN_INTERNAL_RESETN_PDP1_SHIFT 3 ++#define ODN_INTERNAL_RESETN_PDP1_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PDP2_MASK 0x00000010U ++#define ODN_INTERNAL_RESETN_PDP2_SHIFT 4 ++#define ODN_INTERNAL_RESETN_PDP2_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PERIP_MASK 0x00000020U ++#define ODN_INTERNAL_RESETN_PERIP_SHIFT 5 ++#define ODN_INTERNAL_RESETN_PERIP_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_GIST_MASK 0x00000040U ++#define ODN_INTERNAL_RESETN_GIST_SHIFT 6 ++#define ODN_INTERNAL_RESETN_GIST_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PIKE_MASK 0x00000080U ++#define ODN_INTERNAL_RESETN_PIKE_SHIFT 7 ++#define ODN_INTERNAL_RESETN_PIKE_SIGNED 0 ++ ++/* ++ Register EXTERNAL_RESETN ++*/ ++#define ODN_CORE_EXTERNAL_RESETN 0x0084 ++#define ODN_EXTERNAL_RESETN_DUT_MASK 0x00000001U ++#define ODN_EXTERNAL_RESETN_DUT_SHIFT 0 ++#define ODN_EXTERNAL_RESETN_DUT_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK 0x00000002U ++#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT 1 ++#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_MASK 0x00000004U ++#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SHIFT 2 ++#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT_IF_MASK 0x00000008U ++#define ODN_EXTERNAL_RESETN_DUT_IF_SHIFT 3 ++#define ODN_EXTERNAL_RESETN_DUT_IF_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT1_MASK 0x00000010U ++#define ODN_EXTERNAL_RESETN_DUT1_SHIFT 4 ++#define ODN_EXTERNAL_RESETN_DUT1_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT2_MASK 0x00000020U ++#define ODN_EXTERNAL_RESETN_DUT2_SHIFT 5 ++#define ODN_EXTERNAL_RESETN_DUT2_SIGNED 0 ++ ++/* ++ Register EXTERNAL_RESET ++*/ ++#define ODN_CORE_EXTERNAL_RESET 0x0088 ++#define ODN_EXTERNAL_RESET_PVT_CAL_MASK 0x00000001U ++#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT 0 ++#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESET_PLL_MASK 0x00000002U ++#define ODN_EXTERNAL_RESET_PLL_SHIFT 1 ++#define ODN_EXTERNAL_RESET_PLL_SIGNED 0 ++ ++/* ++ Register INTERNAL_AUTO_RESETN ++*/ ++#define ODN_CORE_INTERNAL_AUTO_RESETN 0x008C ++#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK 0x00000001U ++#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT 0 ++#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED 0 ++ ++/* ++ Register CLK_GEN_RESET ++*/ ++#define ODN_CORE_CLK_GEN_RESET 0x0090 ++#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U ++#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 ++#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 ++ ++#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U ++#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 ++#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 ++ ++#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U ++#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 ++#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 ++ ++#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U ++#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 ++#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 ++ ++/* ++ Register INTERRUPT_STATUS ++*/ ++#define ODN_CORE_INTERRUPT_STATUS 0x0100 ++#define ODN_INTERRUPT_STATUS_DUT_MASK 0x00000001U ++#define ODN_INTERRUPT_STATUS_DUT_SHIFT 0 ++#define ODN_INTERRUPT_STATUS_DUT_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_PDP1_MASK 0x00000002U ++#define ODN_INTERRUPT_STATUS_PDP1_SHIFT 1 ++#define ODN_INTERRUPT_STATUS_PDP1_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_PDP2_MASK 0x00000004U ++#define ODN_INTERRUPT_STATUS_PDP2_SHIFT 2 ++#define ODN_INTERRUPT_STATUS_PDP2_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_PERIP_MASK 0x00000008U ++#define ODN_INTERRUPT_STATUS_PERIP_SHIFT 3 ++#define ODN_INTERRUPT_STATUS_PERIP_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_UART_MASK 0x00000010U ++#define ODN_INTERRUPT_STATUS_UART_SHIFT 4 ++#define ODN_INTERRUPT_STATUS_UART_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U ++#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5 ++#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U ++#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6 ++#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U ++#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7 ++#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U ++#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8 ++#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_DUT2_MASK 0x00000200U ++#define ODN_INTERRUPT_STATUS_DUT2_SHIFT 9 ++#define ODN_INTERRUPT_STATUS_DUT2_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_MASK 0x00000400U ++#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SHIFT 10 ++#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_CDMA_MASK 0x00001800U ++#define ODN_INTERRUPT_STATUS_CDMA_SHIFT 11 ++#define ODN_INTERRUPT_STATUS_CDMA_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_OS_IRQ_MASK 0x001FE000U ++#define ODN_INTERRUPT_STATUS_OS_IRQ_SHIFT 13 ++#define ODN_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U ++#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 ++#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U ++#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 ++#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 ++ ++/* ++ Register INTERRUPT_ENABLE ++*/ ++#define ODN_CORE_INTERRUPT_ENABLE 0x0104 ++#define ODN_INTERRUPT_ENABLE_DUT_MASK 0x00000001U ++#define ODN_INTERRUPT_ENABLE_DUT_SHIFT 0 ++#define ODN_INTERRUPT_ENABLE_DUT_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_PDP1_MASK 0x00000002U ++#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT 1 ++#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_PDP2_MASK 0x00000004U ++#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT 2 ++#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_PERIP_MASK 0x00000008U ++#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT 3 ++#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_UART_MASK 0x00000010U ++#define ODN_INTERRUPT_ENABLE_UART_SHIFT 4 ++#define ODN_INTERRUPT_ENABLE_UART_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U ++#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5 ++#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U ++#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6 ++#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7 ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8 ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_DUT2_MASK 0x00000200U ++#define ODN_INTERRUPT_ENABLE_DUT2_SHIFT 9 ++#define ODN_INTERRUPT_ENABLE_DUT2_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_MASK 0x00000400U ++#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SHIFT 10 ++#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_CDMA_MASK 0x00001800U ++#define ODN_INTERRUPT_ENABLE_CDMA_SHIFT 11 ++#define ODN_INTERRUPT_ENABLE_CDMA_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_OS_IRQ_MASK 0x001FE000U ++#define ODN_INTERRUPT_ENABLE_OS_IRQ_SHIFT 13 ++#define ODN_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U ++#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 ++#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U ++#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 ++#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_CLR ++*/ ++#define ODN_CORE_INTERRUPT_CLR 0x010C ++#define ODN_INTERRUPT_CLR_DUT_MASK 0x00000001U ++#define ODN_INTERRUPT_CLR_DUT_SHIFT 0 ++#define ODN_INTERRUPT_CLR_DUT_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_PDP1_MASK 0x00000002U ++#define ODN_INTERRUPT_CLR_PDP1_SHIFT 1 ++#define ODN_INTERRUPT_CLR_PDP1_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_PDP2_MASK 0x00000004U ++#define ODN_INTERRUPT_CLR_PDP2_SHIFT 2 ++#define ODN_INTERRUPT_CLR_PDP2_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_PERIP_MASK 0x00000008U ++#define ODN_INTERRUPT_CLR_PERIP_SHIFT 3 ++#define ODN_INTERRUPT_CLR_PERIP_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_UART_MASK 0x00000010U ++#define ODN_INTERRUPT_CLR_UART_SHIFT 4 ++#define ODN_INTERRUPT_CLR_UART_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK 0x00000020U ++#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5 ++#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK 0x00000040U ++#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT 6 ++#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U ++#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7 ++#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK 0x00000100U ++#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8 ++#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_DUT2_MASK 0x00000200U ++#define ODN_INTERRUPT_CLR_DUT2_SHIFT 9 ++#define PVR5__INTERRUPT_CLR_DUT2_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_MASK 0x00000400U ++#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SHIFT 10 ++#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_CDMA_MASK 0x00001800U ++#define ODN_INTERRUPT_CLR_CDMA_SHIFT 11 ++#define ODN_INTERRUPT_CLR_CDMA_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_OS_IRQ_MASK 0x001FE000U ++#define ODN_INTERRUPT_CLR_OS_IRQ_SHIFT 13 ++#define ODN_INTERRUPT_CLR_OS_IRQ_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U ++#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 ++#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U ++#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 ++#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TEST ++*/ ++#define ODN_CORE_INTERRUPT_TEST 0x0110 ++#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U ++#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 ++#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT_CLR ++*/ ++#define ODN_CORE_INTERRUPT_TIMEOUT_CLR 0x0114 ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 ++ ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT ++*/ ++#define ODN_CORE_INTERRUPT_TIMEOUT 0x0118 ++#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU ++#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 ++#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 ++/* ++ Register SYSTEM_ID ++*/ ++#define ODN_CORE_SYSTEM_ID 0x011C ++#define ODN_SYSTEM_ID_ID_MASK 0x0000FFFFU ++#define ODN_SYSTEM_ID_ID_SHIFT 0 ++#define ODN_SYSTEM_ID_ID_SIGNED 0 ++ ++/* ++ Register SUPPORTED_FEATURES ++*/ ++#define ODN_CORE_SUPPORTED_FEATURES 0x0120 ++#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_MASK 0xFFFFFFFEU ++#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SHIFT 1 ++#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SIGNED 0 ++ ++#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_MASK 0x00000001U ++#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SHIFT 0 ++#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SIGNED 0 ++ ++/* ++ Register NUM_GPIO ++*/ ++#define ODN_CORE_NUM_GPIO 0x0180 ++#define ODN_NUM_GPIO_NUMBER_MASK 0x0000000FU ++#define ODN_NUM_GPIO_NUMBER_SHIFT 0 ++#define ODN_NUM_GPIO_NUMBER_SIGNED 0 ++ ++/* ++ Register GPIO_EN ++*/ ++#define ODN_CORE_GPIO_EN 0x0184 ++#define ODN_GPIO_EN_DIRECTION_MASK 0x000000FFU ++#define ODN_GPIO_EN_DIRECTION_SHIFT 0 ++#define ODN_GPIO_EN_DIRECTION_SIGNED 0 ++ ++/* ++ Register GPIO ++*/ ++#define ODN_CORE_GPIO 0x0188 ++#define ODN_GPIO_GPIO_MASK 0x000000FFU ++#define ODN_GPIO_GPIO_SHIFT 0 ++#define ODN_GPIO_GPIO_SIGNED 0 ++ ++/* ++ Register NUM_DUT_CTRL ++*/ ++#define ODN_CORE_NUM_DUT_CTRL 0x0190 ++#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK 0xFFFFFFFFU ++#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT 0 ++#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED 0 ++ ++/* ++ Register DUT_CTRL1 ++*/ ++#define ODN_CORE_DUT_CTRL1 0x0194 ++#define ODN_DUT_CTRL1_CONTROL1_MASK 0x3FFFFFFFU ++#define ODN_DUT_CTRL1_CONTROL1_SHIFT 0 ++#define ODN_DUT_CTRL1_CONTROL1_SIGNED 0 ++ ++#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK 0x40000000U ++#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT 30 ++#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED 0 ++ ++#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK 0x80000000U ++#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT 31 ++#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED 0 ++ ++/* ++ Register DUT_CTRL2 ++*/ ++#define ODN_CORE_DUT_CTRL2 0x0198 ++#define ODN_DUT_CTRL2_CONTROL2_MASK 0xFFFFFFFFU ++#define ODN_DUT_CTRL2_CONTROL2_SHIFT 0 ++#define ODN_DUT_CTRL2_CONTROL2_SIGNED 0 ++ ++/* ++ Register NUM_DUT_STAT ++*/ ++#define ODN_CORE_NUM_DUT_STAT 0x019C ++#define ODN_NUM_DUT_STAT_NUM_PINS_MASK 0xFFFFFFFFU ++#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT 0 ++#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED 0 ++ ++/* ++ Register DUT_STAT1 ++*/ ++#define ODN_CORE_DUT_STAT1 0x01A0 ++#define ODN_DUT_STAT1_STATUS1_MASK 0xFFFFFFFFU ++#define ODN_DUT_STAT1_STATUS1_SHIFT 0 ++#define ODN_DUT_STAT1_STATUS1_SIGNED 0 ++ ++/* ++ Register DUT_STAT2 ++*/ ++#define ODN_CORE_DUT_STAT2 0x01A4 ++#define ODN_DUT_STAT2_STATUS2_MASK 0xFFFFFFFFU ++#define ODN_DUT_STAT2_STATUS2_SHIFT 0 ++#define ODN_DUT_STAT2_STATUS2_SIGNED 0 ++ ++/* ++ Register DASH_LEDS ++*/ ++#define ODN_CORE_DASH_LEDS 0x01A8 ++#define ODN_DASH_LEDS_REPA_MASK 0xFFF00000U ++#define ODN_DASH_LEDS_REPA_SHIFT 20 ++#define ODN_DASH_LEDS_REPA_SIGNED 0 ++ ++#define ODN_DASH_LEDS_PIKE_MASK 0x00000FFFU ++#define ODN_DASH_LEDS_PIKE_SHIFT 0 ++#define ODN_DASH_LEDS_PIKE_SIGNED 0 ++ ++/* ++ Register DUT_CLK_INFO ++*/ ++#define ODN_CORE_DUT_CLK_INFO 0x01B0 ++#define ODN_DUT_CLK_INFO_CORE_MASK 0x0000FFFFU ++#define ODN_DUT_CLK_INFO_CORE_SHIFT 0 ++#define ODN_DUT_CLK_INFO_CORE_SIGNED 0 ++ ++#define ODN_DUT_CLK_INFO_MEM_MASK 0xFFFF0000U ++#define ODN_DUT_CLK_INFO_MEM_SHIFT 16 ++#define ODN_DUT_CLK_INFO_MEM_SIGNED 0 ++ ++/* ++ Register DUT_CLK_PHSE ++*/ ++#define ODN_CORE_DUT_CLK_PHSE 0x01B4 ++#define ODN_DUT_CLK_PHSE_MEM_REQ_MASK 0x0000FFFFU ++#define ODN_DUT_CLK_PHSE_MEM_REQ_SHIFT 0 ++#define ODN_DUT_CLK_PHSE_MEM_REQ_SIGNED 0 ++ ++#define ODN_DUT_CLK_PHSE_MEM_RD_MASK 0xFFFF0000U ++#define ODN_DUT_CLK_PHSE_MEM_RD_SHIFT 16 ++#define ODN_DUT_CLK_PHSE_MEM_RD_SIGNED 0 ++ ++/* ++ Register CORE_STATUS ++*/ ++#define ODN_CORE_CORE_STATUS 0x0200 ++#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U ++#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT 0 ++#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U ++#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4 ++#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U ++#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5 ++#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U ++#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6 ++#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U ++#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7 ++#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U ++#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8 ++#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U ++#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9 ++#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U ++#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12 ++#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U ++#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13 ++#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++/* ++ Register CORE_CONTROL ++*/ ++#define ODN_CORE_CORE_CONTROL 0x0204 ++#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU ++#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 ++#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U ++#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 ++#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U ++#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 ++#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U ++#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13 ++#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK 0x00070000U ++#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT 16 ++#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK 0x00700000U ++#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT 20 ++#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_DUT_OFFSET_MASK 0x07000000U ++#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT 24 ++#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED 0 ++ ++/* ++ Register REG_BANK_STATUS ++*/ ++#define ODN_CORE_REG_BANK_STATUS 0x0208 ++#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU ++#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 ++#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 ++ ++/* ++ Register MMCM_LOCK_STATUS ++*/ ++#define ODN_CORE_MMCM_LOCK_STATUS 0x020C ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 ++ ++#define ODN_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U ++#define ODN_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 ++#define ODN_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 ++ ++#define ODN_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U ++#define ODN_MMCM_LOCK_STATUS_MULTI_SHIFT 2 ++#define ODN_MMCM_LOCK_STATUS_MULTI_SIGNED 0 ++ ++#define ODN_MMCM_LOCK_STATUS_PDPP_MASK 0x00000008U ++#define ODN_MMCM_LOCK_STATUS_PDPP_SHIFT 3 ++#define ODN_MMCM_LOCK_STATUS_PDPP_SIGNED 0 ++ ++/* ++ Register GIST_STATUS ++*/ ++#define ODN_CORE_GIST_STATUS 0x0210 ++#define ODN_GIST_STATUS_MST_MASK 0x000001FFU ++#define ODN_GIST_STATUS_MST_SHIFT 0 ++#define ODN_GIST_STATUS_MST_SIGNED 0 ++ ++#define ODN_GIST_STATUS_SLV_MASK 0x001FF000U ++#define ODN_GIST_STATUS_SLV_SHIFT 12 ++#define ODN_GIST_STATUS_SLV_SIGNED 0 ++ ++#define ODN_GIST_STATUS_SLV_OUT_MASK 0x03000000U ++#define ODN_GIST_STATUS_SLV_OUT_SHIFT 24 ++#define ODN_GIST_STATUS_SLV_OUT_SIGNED 0 ++ ++#define ODN_GIST_STATUS_MST_OUT_MASK 0x70000000U ++#define ODN_GIST_STATUS_MST_OUT_SHIFT 28 ++#define ODN_GIST_STATUS_MST_OUT_SIGNED 0 ++ ++/* ++ Register DUT_MST_ADD ++*/ ++#define ODN_CORE_DUT_MST_ADD 0x0214 ++#define ODN_DUT_MST_ADD_SLV_OUT_MASK 0x0000003FU ++#define ODN_DUT_MST_ADD_SLV_OUT_SHIFT 0 ++#define ODN_DUT_MST_ADD_SLV_OUT_SIGNED 0 ++ ++/* ++ Register DUT_MULTIPLX_INFO ++*/ ++#define ODN_CORE_DUT_MULTIPLX_INFO 0x0218 ++#define ODN_DUT_MULTIPLX_INFO_MEM_MASK 0x000000FFU ++#define ODN_DUT_MULTIPLX_INFO_MEM_SHIFT 0 ++#define ODN_DUT_MULTIPLX_INFO_MEM_SIGNED 0 ++ ++/**************************** ++ Generated from: ad_tx.def ++*****************************/ ++ ++/* ++ Register ADT_CONTROL ++*/ ++#define ODN_AD_TX_DEBUG_ADT_CONTROL 0x0000 ++#define ODN_SET_ADTX_READY_MASK 0x00000004U ++#define ODN_SET_ADTX_READY_SHIFT 2 ++#define ODN_SET_ADTX_READY_SIGNED 0 ++ ++#define ODN_SEND_ALIGN_DATA_MASK 0x00000002U ++#define ODN_SEND_ALIGN_DATA_SHIFT 1 ++#define ODN_SEND_ALIGN_DATA_SIGNED 0 ++ ++#define ODN_ENABLE_FLUSHING_MASK 0x00000001U ++#define ODN_ENABLE_FLUSHING_SHIFT 0 ++#define ODN_ENABLE_FLUSHING_SIGNED 0 ++ ++/* ++ Register ADT_STATUS ++*/ ++#define ODN_AD_TX_DEBUG_ADT_STATUS 0x0004 ++#define ODN_REQUEST_COMPLETE_MASK 0x00000001U ++#define ODN_REQUEST_COMPLETE_SHIFT 0 ++#define ODN_REQUEST_COMPLETE_SIGNED 0 ++ ++ ++/****************************** ++ Generated from: mca_debug.def ++*******************************/ ++ ++/* ++ Register MCA_CONTROL ++*/ ++#define ODN_MCA_DEBUG_MCA_CONTROL 0x0000 ++#define ODN_ALIGN_START_MASK 0x00000001U ++#define ODN_ALIGN_START_SHIFT 0 ++#define ODN_ALIGN_START_SIGNED 0 ++ ++/* ++ Register MCA_STATUS ++*/ ++#define ODN_MCA_DEBUG_MCA_STATUS 0x0004 ++#define ODN_TCHECK_SDEBUG_MASK 0x40000000U ++#define ODN_TCHECK_SDEBUG_SHIFT 30 ++#define ODN_TCHECK_SDEBUG_SIGNED 0 ++ ++#define ODN_CHECK_SDEBUG_MASK 0x20000000U ++#define ODN_CHECK_SDEBUG_SHIFT 29 ++#define ODN_CHECK_SDEBUG_SIGNED 0 ++ ++#define ODN_ALIGN_SDEBUG_MASK 0x10000000U ++#define ODN_ALIGN_SDEBUG_SHIFT 28 ++#define ODN_ALIGN_SDEBUG_SIGNED 0 ++ ++#define ODN_FWAIT_SDEBUG_MASK 0x08000000U ++#define ODN_FWAIT_SDEBUG_SHIFT 27 ++#define ODN_FWAIT_SDEBUG_SIGNED 0 ++ ++#define ODN_IDLE_SDEBUG_MASK 0x04000000U ++#define ODN_IDLE_SDEBUG_SHIFT 26 ++#define ODN_IDLE_SDEBUG_SIGNED 0 ++ ++#define ODN_FIFO_FULL_MASK 0x03FF0000U ++#define ODN_FIFO_FULL_SHIFT 16 ++#define ODN_FIFO_FULL_SIGNED 0 ++ ++#define ODN_FIFO_EMPTY_MASK 0x0000FFC0U ++#define ODN_FIFO_EMPTY_SHIFT 6 ++#define ODN_FIFO_EMPTY_SIGNED 0 ++ ++#define ODN_TAG_CHECK_ERROR_MASK 0x00000020U ++#define ODN_TAG_CHECK_ERROR_SHIFT 5 ++#define ODN_TAG_CHECK_ERROR_SIGNED 0 ++ ++#define ODN_ALIGN_CHECK_ERROR_MASK 0x00000010U ++#define ODN_ALIGN_CHECK_ERROR_SHIFT 4 ++#define ODN_ALIGN_CHECK_ERROR_SIGNED 0 ++ ++#define ODN_ALIGN_ERROR_MASK 0x00000008U ++#define ODN_ALIGN_ERROR_SHIFT 3 ++#define ODN_ALIGN_ERROR_SIGNED 0 ++ ++#define ODN_TAG_CHECKING_OK_MASK 0x00000004U ++#define ODN_TAG_CHECKING_OK_SHIFT 2 ++#define ODN_TAG_CHECKING_OK_SIGNED 0 ++ ++#define ODN_ALIGN_CHECK_OK_MASK 0x00000002U ++#define ODN_ALIGN_CHECK_OK_SHIFT 1 ++#define ODN_ALIGN_CHECK_OK_SIGNED 0 ++ ++#define ODN_ALIGNMENT_FOUND_MASK 0x00000001U ++#define ODN_ALIGNMENT_FOUND_SHIFT 0 ++#define ODN_ALIGNMENT_FOUND_SIGNED 0 ++ ++ ++/********************************* ++ Generated from: sai_rx_debug.def ++**********************************/ ++ ++/* ++ Register SIG_RESULT ++*/ ++#define ODN_SAI_RX_DEBUG_SIG_RESULT 0x0000 ++#define ODN_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU ++#define ODN_SIG_RESULT_VALUE_SHIFT 0 ++#define ODN_SIG_RESULT_VALUE_SIGNED 0 ++ ++/* ++ Register INIT_SIG ++*/ ++#define ODN_SAI_RX_DEBUG_INIT_SIG 0x0004 ++#define ODN_INIT_SIG_VALUE_MASK 0x00000001U ++#define ODN_INIT_SIG_VALUE_SHIFT 0 ++#define ODN_INIT_SIG_VALUE_SIGNED 0 ++ ++/* ++ Register SAI_BYPASS ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_BYPASS 0x0008 ++#define ODN_BYPASS_CLK_TAPS_VALUE_MASK 0x000003FFU ++#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT 0 ++#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED 0 ++ ++#define ODN_BYPASS_SET_MASK 0x00010000U ++#define ODN_BYPASS_SET_SHIFT 16 ++#define ODN_BYPASS_SET_SIGNED 0 ++ ++#define ODN_BYPASS_EN_MASK 0x00100000U ++#define ODN_BYPASS_EN_SHIFT 20 ++#define ODN_BYPASS_EN_SIGNED 0 ++ ++#define ODN_EN_STATUS_MASK 0x01000000U ++#define ODN_EN_STATUS_SHIFT 24 ++#define ODN_EN_STATUS_SIGNED 0 ++ ++/* ++ Register SAI_CLK_TAPS ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS 0x000C ++#define ODN_CLK_TAPS_VALUE_MASK 0x000003FFU ++#define ODN_CLK_TAPS_VALUE_SHIFT 0 ++#define ODN_CLK_TAPS_VALUE_SIGNED 0 ++ ++#define ODN_TRAINING_COMPLETE_MASK 0x00010000U ++#define ODN_TRAINING_COMPLETE_SHIFT 16 ++#define ODN_TRAINING_COMPLETE_SIGNED 0 ++ ++/* ++ Register SAI_EYES ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_EYES 0x0010 ++#define ODN_MIN_EYE_END_MASK 0x0000FFFFU ++#define ODN_MIN_EYE_END_SHIFT 0 ++#define ODN_MIN_EYE_END_SIGNED 0 ++ ++#define ODN_MAX_EYE_START_MASK 0xFFFF0000U ++#define ODN_MAX_EYE_START_SHIFT 16 ++#define ODN_MAX_EYE_START_SIGNED 0 ++ ++/* ++ Register SAI_DDR_INVERT ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_DDR_INVERT 0x0014 ++#define ODN_DDR_INVERT_MASK 0x00000001U ++#define ODN_DDR_INVERT_SHIFT 0 ++#define ODN_DDR_INVERT_SIGNED 0 ++ ++#define ODN_OVERIDE_VALUE_MASK 0x00010000U ++#define ODN_OVERIDE_VALUE_SHIFT 16 ++#define ODN_OVERIDE_VALUE_SIGNED 0 ++ ++#define ODN_INVERT_OVERIDE_MASK 0x00100000U ++#define ODN_INVERT_OVERIDE_SHIFT 20 ++#define ODN_INVERT_OVERIDE_SIGNED 0 ++ ++/* ++ Register SAI_TRAIN_ACK ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK 0x0018 ++#define ODN_TRAIN_ACK_FAIL_MASK 0x00000001U ++#define ODN_TRAIN_ACK_FAIL_SHIFT 0 ++#define ODN_TRAIN_ACK_FAIL_SIGNED 0 ++ ++#define ODN_TRAIN_ACK_FAIL_COUNT_MASK 0x000000F0U ++#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT 4 ++#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED 0 ++ ++#define ODN_TRAIN_ACK_COMPLETE_MASK 0x00000100U ++#define ODN_TRAIN_ACK_COMPLETE_SHIFT 8 ++#define ODN_TRAIN_ACK_COMPLETE_SIGNED 0 ++ ++#define ODN_TRAIN_ACK_OVERIDE_MASK 0x00001000U ++#define ODN_TRAIN_ACK_OVERIDE_SHIFT 12 ++#define ODN_TRAIN_ACK_OVERIDE_SIGNED 0 ++ ++/* ++ Register SAI_TRAIN_ACK_COUNT ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT 0x001C ++#define ODN_TRAIN_COUNT_MASK 0xFFFFFFFFU ++#define ODN_TRAIN_COUNT_SHIFT 0 ++#define ODN_TRAIN_COUNT_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_NUMBER ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER 0x0020 ++#define ODN_CHANNEL_NUMBER_MASK 0x0000FFFFU ++#define ODN_CHANNEL_NUMBER_SHIFT 0 ++#define ODN_CHANNEL_NUMBER_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_START ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START 0x0024 ++#define ODN_CHANNEL_EYE_START_MASK 0xFFFFFFFFU ++#define ODN_CHANNEL_EYE_START_SHIFT 0 ++#define ODN_CHANNEL_EYE_START_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_END ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END 0x0028 ++#define ODN_CHANNEL_EYE_END_MASK 0xFFFFFFFFU ++#define ODN_CHANNEL_EYE_END_SHIFT 0 ++#define ODN_CHANNEL_EYE_END_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_PATTERN ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C ++#define ODN_CHANNEL_EYE_PATTERN_MASK 0xFFFFFFFFU ++#define ODN_CHANNEL_EYE_PATTERN_SHIFT 0 ++#define ODN_CHANNEL_EYE_PATTERN_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_DEBUG ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG 0x0030 ++#define ODN_CHANNEL_EYE_SENSE_MASK 0x00000001U ++#define ODN_CHANNEL_EYE_SENSE_SHIFT 0 ++#define ODN_CHANNEL_EYE_SENSE_SIGNED 0 ++ ++#define ODN_CHANNEL_EYE_COMPLETE_MASK 0x00000002U ++#define ODN_CHANNEL_EYE_COMPLETE_SHIFT 1 ++#define ODN_CHANNEL_EYE_COMPLETE_SIGNED 0 ++ ++ ++/********************************* ++ Generated from: sai_tx_debug.def ++**********************************/ ++ ++/* ++ Register SIG_RESULT ++*/ ++#define ODN_SAI_TX_DEBUG_SIG_RESULT 0x0000 ++#define ODN_TX_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU ++#define ODN_TX_SIG_RESULT_VALUE_SHIFT 0 ++#define ODN_TX_SIG_RESULT_VALUE_SIGNED 0 ++ ++/* ++ Register INIT_SIG ++*/ ++#define ODN_SAI_TX_DEBUG_INIT_SIG 0x0004 ++#define ODN_TX_INIT_SIG_VALUE_MASK 0x00000001U ++#define ODN_TX_INIT_SIG_VALUE_SHIFT 0 ++#define ODN_TX_INIT_SIG_VALUE_SIGNED 0 ++ ++/* ++ Register SAI_BYPASS ++*/ ++#define ODN_SAI_TX_DEBUG_SAI_BYPASS 0x0008 ++#define ODN_TX_BYPASS_EN_MASK 0x00000001U ++#define ODN_TX_BYPASS_EN_SHIFT 0 ++#define ODN_TX_BYPASS_EN_SIGNED 0 ++ ++#define ODN_TX_ACK_RESEND_MASK 0x00000002U ++#define ODN_TX_ACK_RESEND_SHIFT 1 ++#define ODN_TX_ACK_RESEND_SIGNED 0 ++ ++#define ODN_TX_DISABLE_ACK_SEND_MASK 0x00000004U ++#define ODN_TX_DISABLE_ACK_SEND_SHIFT 2 ++#define ODN_TX_DISABLE_ACK_SEND_SIGNED 0 ++ ++/* ++ Register SAI_STATUS ++*/ ++#define ODN_SAI_TX_DEBUG_SAI_STATUS 0x000C ++#define ODN_TX_TRAINING_COMPLETE_MASK 0x00000001U ++#define ODN_TX_TRAINING_COMPLETE_SHIFT 0 ++#define ODN_TX_TRAINING_COMPLETE_SIGNED 0 ++ ++#define ODN_TX_TRAINING_ACK_COMPLETE_MASK 0x00000002U ++#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT 1 ++#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED 0 ++ ++ ++ ++#endif /* _ODIN_REGS_H_ */ ++ ++/****************************************************************************** ++ End of file (odin_regs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/orion_defs.h b/drivers/gpu/drm/img-rogue/apollo/orion_defs.h +new file mode 100644 +index 000000000000..1691151de58d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/orion_defs.h +@@ -0,0 +1,183 @@ ++/**************************************************************************** ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Orion Memory Map - View from PCIe ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++****************************************************************************/ ++ ++#ifndef _ORION_DEFS_H_ ++#define _ORION_DEFS_H_ ++ ++/* ++ * These defines have not been autogenerated ++ * Only values different from Odin will be included here ++ */ ++ ++#define DEVICE_ID_ORION 0x1020 ++ ++/* Odin system register banks */ ++#define SRS_REG_BANK_ODN_CLK_BLK 0x02000 ++ ++/* ++ * Orion CLK regs - the srs_clk_blk module defs are not auto generated ++ */ ++#define SRS_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 ++#define SRS_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 ++#define SRS_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define SRS_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 ++#define SRS_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define SRS_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define SRS_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U ++#define SRS_PDP_PCLK_ODIV2_EDGE_SHIFT 7 ++#define SRS_PDP_PCLK_ODIV2_FRAC_MASK 0x00007C00U ++#define SRS_PDP_PCLK_ODIV2_FRAC_SHIFT 10 ++ ++#define SRS_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C ++ ++#define SRS_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 ++#define SRS_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 ++#define SRS_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define SRS_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C ++#define SRS_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define SRS_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define SRS_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U ++#define SRS_PDP_MCLK_ODIV2_EDGE_SHIFT 7 ++ ++#define SRS_PDP_P_CLK_MULTIPLIER_REG1 0x650 ++#define SRS_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 ++#define SRS_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 ++ ++#define SRS_PDP_P_CLK_MULTIPLIER_REG2 0x654 ++#define SRS_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U ++#define SRS_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 ++#define SRS_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U ++#define SRS_PDP_PCLK_MUL2_EDGE_SHIFT 7 ++#define SRS_PDP_PCLK_MUL2_FRAC_MASK 0x00007C00U ++#define SRS_PDP_PCLK_MUL2_FRAC_SHIFT 10 ++ ++#define SRS_PDP_P_CLK_MULTIPLIER_REG3 0x64C ++ ++#define SRS_PDP_P_CLK_IN_DIVIDER_REG 0x658 ++#define SRS_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 ++#define SRS_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 ++#define SRS_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U ++#define SRS_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 ++#define SRS_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U ++#define SRS_PDP_PCLK_IDIV_EDGE_SHIFT 13 ++ ++/* ++ * DUT core clock input divider, DUT reference clock input divider ++ */ ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1 0x0020 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 ++ ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2 0x0024 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 ++ ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1 0x0028 ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 ++ ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2 0x002C ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 ++ ++/* ++ * DUT interface reference clock input divider ++ */ ++ ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1 0x0228 ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 ++ ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2 0x022C ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 ++ ++/* ++ * Min max values from Xilinx Virtex Ultrascale data sheet DS893, ++ * for speed grade 1. All in Hz. ++ */ ++#define SRS_INPUT_CLOCK_SPEED 100000000U ++#define SRS_INPUT_CLOCK_SPEED_MIN 10000000U ++#define SRS_INPUT_CLOCK_SPEED_MAX 800000000U ++#define SRS_OUTPUT_CLOCK_SPEED_MIN 4690000U ++#define SRS_OUTPUT_CLOCK_SPEED_MAX 630000000U ++#define SRS_VCO_MIN 600000000U ++#define SRS_VCO_MAX 1200000000U ++#define SRS_PFD_MIN 10000000U ++#define SRS_PFD_MAX 450000000U ++ ++/* ++ * Orion interrupt flags ++ */ ++#define SRS_INTERRUPT_ENABLE_PDP1 (1 << SRS_INTERRUPT_ENABLE_PDP_SHIFT) ++#define SRS_INTERRUPT_ENABLE_DUT (1 << SRS_INTERRUPT_ENABLE_DUT_SHIFT) ++#define SRS_INTERRUPT_STATUS_PDP1 (1 << SRS_INTERRUPT_STATUS_PDP_SHIFT) ++#define SRS_INTERRUPT_STATUS_DUT (1 << SRS_INTERRUPT_STATUS_DUT_SHIFT) ++#define SRS_INTERRUPT_CLEAR_PDP1 (1 << SRS_INTERRUPT_CLR_PDP_SHIFT) ++#define SRS_INTERRUPT_CLEAR_DUT (1 << SRS_INTERRUPT_CLR_DUT_SHIFT) ++ ++#endif /* _ORION_DEFS_H_ */ ++ ++/***************************************************************************** ++ End of file (orion_defs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/orion_regs.h b/drivers/gpu/drm/img-rogue/apollo/orion_regs.h +new file mode 100644 +index 000000000000..2a626bd2b70a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/orion_regs.h +@@ -0,0 +1,439 @@ ++/****************************************************************************** ++@Title Orion system control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Orion FPGA register defs for Sirius RTL ++@Author Autogenerated ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++ ++#ifndef _OUT_DRV_H_ ++#define _OUT_DRV_H_ ++ ++/* ++ Register ID ++*/ ++#define SRS_CORE_ID 0x0000 ++#define SRS_ID_VARIANT_MASK 0x0000FFFFU ++#define SRS_ID_VARIANT_SHIFT 0 ++#define SRS_ID_VARIANT_SIGNED 0 ++ ++#define SRS_ID_ID_MASK 0xFFFF0000U ++#define SRS_ID_ID_SHIFT 16 ++#define SRS_ID_ID_SIGNED 0 ++ ++/* ++ Register REVISION ++*/ ++#define SRS_CORE_REVISION 0x0004 ++#define SRS_REVISION_MINOR_MASK 0x000000FFU ++#define SRS_REVISION_MINOR_SHIFT 0 ++#define SRS_REVISION_MINOR_SIGNED 0 ++ ++#define SRS_REVISION_MAJOR_MASK 0x00000F00U ++#define SRS_REVISION_MAJOR_SHIFT 8 ++#define SRS_REVISION_MAJOR_SIGNED 0 ++ ++/* ++ Register CHANGE_SET ++*/ ++#define SRS_CORE_CHANGE_SET 0x0008 ++#define SRS_CHANGE_SET_SET_MASK 0xFFFFFFFFU ++#define SRS_CHANGE_SET_SET_SHIFT 0 ++#define SRS_CHANGE_SET_SET_SIGNED 0 ++ ++/* ++ Register USER_ID ++*/ ++#define SRS_CORE_USER_ID 0x000C ++#define SRS_USER_ID_ID_MASK 0x0000000FU ++#define SRS_USER_ID_ID_SHIFT 0 ++#define SRS_USER_ID_ID_SIGNED 0 ++ ++/* ++ Register USER_BUILD ++*/ ++#define SRS_CORE_USER_BUILD 0x0010 ++#define SRS_USER_BUILD_BUILD_MASK 0xFFFFFFFFU ++#define SRS_USER_BUILD_BUILD_SHIFT 0 ++#define SRS_USER_BUILD_BUILD_SIGNED 0 ++ ++/* ++ Register SOFT_RESETN ++*/ ++#define SRS_CORE_SOFT_RESETN 0x0080 ++#define SRS_SOFT_RESETN_DDR_MASK 0x00000001U ++#define SRS_SOFT_RESETN_DDR_SHIFT 0 ++#define SRS_SOFT_RESETN_DDR_SIGNED 0 ++ ++#define SRS_SOFT_RESETN_USB_MASK 0x00000002U ++#define SRS_SOFT_RESETN_USB_SHIFT 1 ++#define SRS_SOFT_RESETN_USB_SIGNED 0 ++ ++#define SRS_SOFT_RESETN_PDP_MASK 0x00000004U ++#define SRS_SOFT_RESETN_PDP_SHIFT 2 ++#define SRS_SOFT_RESETN_PDP_SIGNED 0 ++ ++#define SRS_SOFT_RESETN_GIST_MASK 0x00000008U ++#define SRS_SOFT_RESETN_GIST_SHIFT 3 ++#define SRS_SOFT_RESETN_GIST_SIGNED 0 ++ ++/* ++ Register DUT_SOFT_RESETN ++*/ ++#define SRS_CORE_DUT_SOFT_RESETN 0x0084 ++#define SRS_DUT_SOFT_RESETN_EXTERNAL_MASK 0x00000001U ++#define SRS_DUT_SOFT_RESETN_EXTERNAL_SHIFT 0 ++#define SRS_DUT_SOFT_RESETN_EXTERNAL_SIGNED 0 ++ ++/* ++ Register SOFT_AUTO_RESETN ++*/ ++#define SRS_CORE_SOFT_AUTO_RESETN 0x0088 ++#define SRS_SOFT_AUTO_RESETN_CFG_MASK 0x00000001U ++#define SRS_SOFT_AUTO_RESETN_CFG_SHIFT 0 ++#define SRS_SOFT_AUTO_RESETN_CFG_SIGNED 0 ++ ++/* ++ Register CLK_GEN_RESET ++*/ ++#define SRS_CORE_CLK_GEN_RESET 0x0090 ++#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U ++#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 ++#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 ++ ++#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U ++#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 ++#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 ++ ++#define SRS_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U ++#define SRS_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 ++#define SRS_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 ++ ++#define SRS_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U ++#define SRS_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 ++#define SRS_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 ++ ++/* ++ Register DUT_MEM ++*/ ++#define SRS_CORE_DUT_MEM 0x0120 ++#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_MASK 0x0000FFFFU ++#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SHIFT 0 ++#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SIGNED 0 ++ ++#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_MASK 0xFFFF0000U ++#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SHIFT 16 ++#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SIGNED 0 ++ ++/* ++ Register APM ++*/ ++#define SRS_CORE_APM 0x0150 ++#define SRS_APM_RESET_EVENT_MASK 0x00000001U ++#define SRS_APM_RESET_EVENT_SHIFT 0 ++#define SRS_APM_RESET_EVENT_SIGNED 0 ++ ++#define SRS_APM_CAPTURE_EVENT_MASK 0x00000002U ++#define SRS_APM_CAPTURE_EVENT_SHIFT 1 ++#define SRS_APM_CAPTURE_EVENT_SIGNED 0 ++ ++/* ++ Register NUM_GPIO ++*/ ++#define SRS_CORE_NUM_GPIO 0x0180 ++#define SRS_NUM_GPIO_NUMBER_MASK 0x0000000FU ++#define SRS_NUM_GPIO_NUMBER_SHIFT 0 ++#define SRS_NUM_GPIO_NUMBER_SIGNED 0 ++ ++/* ++ Register GPIO_EN ++*/ ++#define SRS_CORE_GPIO_EN 0x0184 ++#define SRS_GPIO_EN_DIRECTION_MASK 0x000000FFU ++#define SRS_GPIO_EN_DIRECTION_SHIFT 0 ++#define SRS_GPIO_EN_DIRECTION_SIGNED 0 ++ ++/* ++ Register GPIO ++*/ ++#define SRS_CORE_GPIO 0x0188 ++#define SRS_GPIO_GPIO_MASK 0x000000FFU ++#define SRS_GPIO_GPIO_SHIFT 0 ++#define SRS_GPIO_GPIO_SIGNED 0 ++ ++/* ++ Register SPI_MASTER_IFACE ++*/ ++#define SRS_CORE_SPI_MASTER_IFACE 0x018C ++#define SRS_SPI_MASTER_IFACE_ENABLE_MASK 0x00000001U ++#define SRS_SPI_MASTER_IFACE_ENABLE_SHIFT 0 ++#define SRS_SPI_MASTER_IFACE_ENABLE_SIGNED 0 ++ ++/* ++ Register SRS_IP_STATUS ++*/ ++#define SRS_CORE_SRS_IP_STATUS 0x0200 ++#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U ++#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SHIFT 0 ++#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SIGNED 0 ++ ++#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_MASK 0x00000002U ++#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SHIFT 1 ++#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SIGNED 0 ++ ++#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00000004U ++#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 2 ++#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00000008U ++#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 3 ++#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++/* ++ Register CORE_CONTROL ++*/ ++#define SRS_CORE_CORE_CONTROL 0x0204 ++#define SRS_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU ++#define SRS_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 ++#define SRS_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 ++ ++#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U ++#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 ++#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 ++ ++#define SRS_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U ++#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 ++#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 ++ ++/* ++ Register REG_BANK_STATUS ++*/ ++#define SRS_CORE_REG_BANK_STATUS 0x0208 ++#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU ++#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 ++#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 ++ ++/* ++ Register MMCM_LOCK_STATUS ++*/ ++#define SRS_CORE_MMCM_LOCK_STATUS 0x020C ++#define SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U ++#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 ++#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 ++ ++#define SRS_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U ++#define SRS_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 ++#define SRS_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 ++ ++#define SRS_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U ++#define SRS_MMCM_LOCK_STATUS_MULTI_SHIFT 2 ++#define SRS_MMCM_LOCK_STATUS_MULTI_SIGNED 0 ++ ++#define SRS_MMCM_LOCK_STATUS_PDP_MASK 0x00000008U ++#define SRS_MMCM_LOCK_STATUS_PDP_SHIFT 3 ++#define SRS_MMCM_LOCK_STATUS_PDP_SIGNED 0 ++ ++/* ++ Register GIST_STATUS ++*/ ++#define SRS_CORE_GIST_STATUS 0x0210 ++#define SRS_GIST_STATUS_MST_MASK 0x000001FFU ++#define SRS_GIST_STATUS_MST_SHIFT 0 ++#define SRS_GIST_STATUS_MST_SIGNED 0 ++ ++#define SRS_GIST_STATUS_SLV_MASK 0x001FF000U ++#define SRS_GIST_STATUS_SLV_SHIFT 12 ++#define SRS_GIST_STATUS_SLV_SIGNED 0 ++ ++#define SRS_GIST_STATUS_SLV_OUT_MASK 0x03000000U ++#define SRS_GIST_STATUS_SLV_OUT_SHIFT 24 ++#define SRS_GIST_STATUS_SLV_OUT_SIGNED 0 ++ ++#define SRS_GIST_STATUS_MST_OUT_MASK 0x70000000U ++#define SRS_GIST_STATUS_MST_OUT_SHIFT 28 ++#define SRS_GIST_STATUS_MST_OUT_SIGNED 0 ++ ++/* ++ Register SENSOR_BOARD ++*/ ++#define SRS_CORE_SENSOR_BOARD 0x0214 ++#define SRS_SENSOR_BOARD_ID_MASK 0x00000003U ++#define SRS_SENSOR_BOARD_ID_SHIFT 0 ++#define SRS_SENSOR_BOARD_ID_SIGNED 0 ++ ++/* ++ Register INTERRUPT_STATUS ++*/ ++#define SRS_CORE_INTERRUPT_STATUS 0x0218 ++#define SRS_INTERRUPT_STATUS_DUT_MASK 0x00000001U ++#define SRS_INTERRUPT_STATUS_DUT_SHIFT 0 ++#define SRS_INTERRUPT_STATUS_DUT_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_PDP_MASK 0x00000002U ++#define SRS_INTERRUPT_STATUS_PDP_SHIFT 1 ++#define SRS_INTERRUPT_STATUS_PDP_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_I2C_MASK 0x00000004U ++#define SRS_INTERRUPT_STATUS_I2C_SHIFT 2 ++#define SRS_INTERRUPT_STATUS_I2C_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_SPI_MASK 0x00000008U ++#define SRS_INTERRUPT_STATUS_SPI_SHIFT 3 ++#define SRS_INTERRUPT_STATUS_SPI_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_APM_MASK 0x00000010U ++#define SRS_INTERRUPT_STATUS_APM_SHIFT 4 ++#define SRS_INTERRUPT_STATUS_APM_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_OS_IRQ_MASK 0x00001FE0U ++#define SRS_INTERRUPT_STATUS_OS_IRQ_SHIFT 5 ++#define SRS_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U ++#define SRS_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 ++#define SRS_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U ++#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 ++#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 ++ ++/* ++ Register INTERRUPT_ENABLE ++*/ ++#define SRS_CORE_INTERRUPT_ENABLE 0x021C ++#define SRS_INTERRUPT_ENABLE_DUT_MASK 0x00000001U ++#define SRS_INTERRUPT_ENABLE_DUT_SHIFT 0 ++#define SRS_INTERRUPT_ENABLE_DUT_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_PDP_MASK 0x00000002U ++#define SRS_INTERRUPT_ENABLE_PDP_SHIFT 1 ++#define SRS_INTERRUPT_ENABLE_PDP_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_I2C_MASK 0x00000004U ++#define SRS_INTERRUPT_ENABLE_I2C_SHIFT 2 ++#define SRS_INTERRUPT_ENABLE_I2C_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_SPI_MASK 0x00000008U ++#define SRS_INTERRUPT_ENABLE_SPI_SHIFT 3 ++#define SRS_INTERRUPT_ENABLE_SPI_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_APM_MASK 0x00000010U ++#define SRS_INTERRUPT_ENABLE_APM_SHIFT 4 ++#define SRS_INTERRUPT_ENABLE_APM_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_OS_IRQ_MASK 0x00001FE0U ++#define SRS_INTERRUPT_ENABLE_OS_IRQ_SHIFT 5 ++#define SRS_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U ++#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 ++#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U ++#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 ++#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_CLR ++*/ ++#define SRS_CORE_INTERRUPT_CLR 0x0220 ++#define SRS_INTERRUPT_CLR_DUT_MASK 0x00000001U ++#define SRS_INTERRUPT_CLR_DUT_SHIFT 0 ++#define SRS_INTERRUPT_CLR_DUT_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_PDP_MASK 0x00000002U ++#define SRS_INTERRUPT_CLR_PDP_SHIFT 1 ++#define SRS_INTERRUPT_CLR_PDP_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_I2C_MASK 0x00000004U ++#define SRS_INTERRUPT_CLR_I2C_SHIFT 2 ++#define SRS_INTERRUPT_CLR_I2C_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_SPI_MASK 0x00000008U ++#define SRS_INTERRUPT_CLR_SPI_SHIFT 3 ++#define SRS_INTERRUPT_CLR_SPI_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_APM_MASK 0x00000010U ++#define SRS_INTERRUPT_CLR_APM_SHIFT 4 ++#define SRS_INTERRUPT_CLR_APM_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_OS_IRQ_MASK 0x00001FE0U ++#define SRS_INTERRUPT_CLR_OS_IRQ_SHIFT 5 ++#define SRS_INTERRUPT_CLR_OS_IRQ_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U ++#define SRS_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 ++#define SRS_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U ++#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 ++#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TEST ++*/ ++#define SRS_CORE_INTERRUPT_TEST 0x0224 ++#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U ++#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 ++#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT_CLR ++*/ ++#define SRS_CORE_INTERRUPT_TIMEOUT_CLR 0x0228 ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 ++ ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT ++*/ ++#define SRS_CORE_INTERRUPT_TIMEOUT 0x022C ++#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU ++#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 ++#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 ++ ++#endif /* _OUT_DRV_H_ */ ++ ++/****************************************************************************** ++ End of file (orion_regs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c +new file mode 100644 +index 000000000000..712d9aa01087 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.c +@@ -0,0 +1,332 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "pdp_apollo.h" ++#include "pdp_common.h" ++#include "pdp_regs.h" ++#include "tcf_rgbpdp_regs.h" ++#include "tcf_pll.h" ++ ++/* Map a register to the "pll-regs" region */ ++#define PLL_REG(n) ((n) - TCF_PLL_PLL_PDP_CLK0) ++ ++bool pdp_apollo_clocks_set(struct device *dev, ++ void __iomem *pdp_reg, void __iomem *pll_reg, ++ u32 clock_in_mhz, ++ void __iomem *odn_core_reg, ++ u32 hdisplay, u32 vdisplay) ++{ ++ /* ++ * Setup TCF_CR_PLL_PDP_CLK1TO5 based on the main clock speed ++ * (clock 0 or 3) ++ */ ++ const u32 clock = (clock_in_mhz >= 50) ? 0 : 0x3; ++ ++ /* Set phase 0, ratio 50:50 and frequency in MHz */ ++ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK0), clock_in_mhz); ++ ++ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_CLK1TO5), clock); ++ ++ /* Now initiate reprogramming of the PLLs */ ++ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x1); ++ ++ udelay(1000); ++ ++ pll_wreg32(pll_reg, PLL_REG(TCF_PLL_PLL_PDP_DRP_GO), 0x0); ++ ++ return true; ++} ++ ++void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable"); ++#endif ++ /* nothing to do here */ ++} ++ ++void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ SYNCACTIVE_SHIFT, SYNCACTIVE_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); ++} ++ ++void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ POWERDN_SHIFT, POWERDN_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); ++} ++ ++void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ INTEN_VBLNK0_SHIFT, INTEN_VBLNK0_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, value); ++} ++ ++bool pdp_apollo_check_and_clear_vblank(struct device *dev, ++ void __iomem *pdp_reg) ++{ ++ u32 value; ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT); ++ ++ if (REG_VALUE_GET(value, INTS_VBLNK0_SHIFT, INTS_VBLNK0_MASK)) { ++ value = REG_VALUE_SET(0, 0x1, ++ INTCLR_VBLNK0_SHIFT, INTCLR_VBLNK0_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR, value); ++ return true; ++ } ++ return false; ++} ++ ++void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set plane %u: %s\n", ++ plane, enable ? "enable" : "disable"); ++#endif ++ ++ if (plane > 0) { ++ dev_err(dev, "Maximum of 1 plane is supported\n"); ++ return; ++ } ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ STR1STREN_SHIFT, STR1STREN_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value); ++} ++ ++void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg) ++{ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Reset planes\n"); ++#endif ++ ++ pdp_apollo_set_plane_enabled(dev, pdp_reg, 0, false); ++} ++ ++void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, u32 address, ++ u32 posx, u32 posy, ++ u32 width, u32 height, u32 stride, ++ u32 format, u32 alpha, bool blend) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, ++ "Set surface: size=%dx%d stride=%d format=%d address=0x%x\n", ++ width, height, stride, format, address); ++#endif ++ ++ if (plane > 0) { ++ dev_err(dev, "Maximum of 1 plane is supported\n"); ++ return; ++ } ++ ++ /* Size & format */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF); ++ value = REG_VALUE_SET(value, width - 1, ++ STR1WIDTH_SHIFT, STR1WIDTH_MASK); ++ value = REG_VALUE_SET(value, height - 1, ++ STR1HEIGHT_SHIFT, STR1HEIGHT_MASK); ++ value = REG_VALUE_SET(value, format, ++ STR1PIXFMT_SHIFT, STR1PIXFMT_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF, value); ++ /* Stride */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN); ++ value = REG_VALUE_SET(value, ++ (stride >> DCPDP_STR1POSN_STRIDE_SHIFT) - 1, ++ STR1STRIDE_SHIFT, STR1STRIDE_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_PDP_STR1POSN, value); ++ /* Disable interlaced output */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL); ++ value = REG_VALUE_SET(value, 0x0, ++ STR1INTFIELD_SHIFT, ++ STR1INTFIELD_MASK); ++ /* Frame buffer base address */ ++ value = REG_VALUE_SET(value, ++ address >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT, ++ STR1BASE_SHIFT, STR1BASE_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, value); ++} ++ ++void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg, ++ u32 h_display, u32 v_display, ++ u32 hbps, u32 ht, u32 has, ++ u32 hlbs, u32 hfps, u32 hrbs, ++ u32 vbps, u32 vt, u32 vas, ++ u32 vtbs, u32 vfps, u32 vbbs, ++ bool nhsync, bool nvsync) ++{ ++ u32 value; ++ ++ dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); ++#ifdef PDP_VERBOSE ++ dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", ++ ht, hbps, has, hlbs, hfps, hrbs); ++ dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", ++ vt, vbps, vas, vtbs, vfps, vbbs); ++#endif ++ ++#if 0 ++ /* I don't really know what this is doing but it was in the Android ++ * implementation (not in the Linux one). Seems not to be necessary ++ * though! ++ */ ++ if (pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL) ++ != 0x0000C010) { ++ /* Buffer request threshold */ ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL, ++ 0x00001C10); ++ } ++#endif ++ ++ /* Border colour */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL); ++ value = REG_VALUE_SET(value, 0x0, BORDCOL_SHIFT, BORDCOL_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, value); ++ ++ /* Update control */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL); ++ value = REG_VALUE_SET(value, 0x0, UPDFIELD_SHIFT, UPDFIELD_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, value); ++ ++ /* Set hsync timings */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1); ++ value = REG_VALUE_SET(value, hbps, HBPS_SHIFT, HBPS_MASK); ++ value = REG_VALUE_SET(value, ht, HT_SHIFT, HT_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, value); ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2); ++ value = REG_VALUE_SET(value, has, HAS_SHIFT, HAS_MASK); ++ value = REG_VALUE_SET(value, hlbs, HLBS_SHIFT, HLBS_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, value); ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3); ++ value = REG_VALUE_SET(value, hfps, HFPS_SHIFT, HFPS_MASK); ++ value = REG_VALUE_SET(value, hrbs, HRBS_SHIFT, HRBS_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, value); ++ ++ /* Set vsync timings */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1); ++ value = REG_VALUE_SET(value, vbps, VBPS_SHIFT, VBPS_MASK); ++ value = REG_VALUE_SET(value, vt, VT_SHIFT, VT_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, value); ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2); ++ value = REG_VALUE_SET(value, vas, VAS_SHIFT, VAS_MASK); ++ value = REG_VALUE_SET(value, vtbs, VTBS_SHIFT, VTBS_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, value); ++ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3); ++ value = REG_VALUE_SET(value, vfps, VFPS_SHIFT, VFPS_MASK); ++ value = REG_VALUE_SET(value, vbbs, VBBS_SHIFT, VBBS_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, value); ++ ++ /* Horizontal data enable */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL); ++ value = REG_VALUE_SET(value, hlbs, HDES_SHIFT, HDES_MASK); ++ value = REG_VALUE_SET(value, hfps, HDEF_SHIFT, HDEF_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, value); ++ ++ /* Vertical data enable */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL); ++ value = REG_VALUE_SET(value, vtbs, VDES_SHIFT, VDES_MASK); ++ value = REG_VALUE_SET(value, vfps, VDEF_SHIFT, VDEF_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, value); ++ ++ /* Vertical event start and vertical fetch start */ ++ value = pdp_rreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT); ++ value = REG_VALUE_SET(value, vbps, VFETCH_SHIFT, VFETCH_MASK); ++ value = REG_VALUE_SET(value, vfps, VEVENT_SHIFT, VEVENT_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, value); ++ ++ /* Set up polarities of sync/blank */ ++ value = REG_VALUE_SET(0, 0x1, BLNKPOL_SHIFT, BLNKPOL_MASK); ++ ++ /* ++ * Enable this if you want vblnk1. You also need to change to vblnk1 ++ * in the interrupt handler. ++ */ ++#if 0 ++ value = REG_VALUE_SET(value, 0x1, FIELDPOL_SHIFT, FIELDPOL_MASK); ++#endif ++ if (nhsync) ++ value = REG_VALUE_SET(value, 0x1, HSPOL_SHIFT, HSPOL_MASK); ++ if (nvsync) ++ value = REG_VALUE_SET(value, 0x1, VSPOL_SHIFT, VSPOL_MASK); ++ pdp_wreg32(pdp_reg, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, value); ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h +new file mode 100644 +index 000000000000..fc7db08a041e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_apollo.h +@@ -0,0 +1,88 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PDP_APOLLO_H__) ++#define __PDP_APOLLO_H__ ++ ++#include ++#include ++ ++bool pdp_apollo_clocks_set(struct device *dev, ++ void __iomem *pdp_reg, void __iomem *pll_reg, ++ u32 clock_in_mhz, ++ void __iomem *odn_core_reg, ++ u32 hdisplay, u32 vdisplay); ++ ++void pdp_apollo_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_apollo_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_apollo_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_apollo_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++bool pdp_apollo_check_and_clear_vblank(struct device *dev, ++ void __iomem *pdp_reg); ++ ++void pdp_apollo_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, bool enable); ++ ++void pdp_apollo_reset_planes(struct device *dev, void __iomem *pdp_reg); ++ ++void pdp_apollo_set_surface(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, u32 address, ++ u32 posx, u32 posy, ++ u32 width, u32 height, u32 stride, ++ u32 format, u32 alpha, bool blend); ++ ++void pdp_apollo_mode_set(struct device *dev, void __iomem *pdp_reg, ++ u32 h_display, u32 v_display, ++ u32 hbps, u32 ht, u32 has, ++ u32 hlbs, u32 hfps, u32 hrbs, ++ u32 vbps, u32 vt, u32 vas, ++ u32 vtbs, u32 vfps, u32 vbbs, ++ bool nhsync, bool nvsync); ++ ++#endif /* __PDP_APOLLO_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_common.h b/drivers/gpu/drm/img-rogue/apollo/pdp_common.h +new file mode 100644 +index 000000000000..68e7130ecd9c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_common.h +@@ -0,0 +1,107 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PDP_COMMON_H__) ++#define __PDP_COMMON_H__ ++ ++#include ++ ++/*#define PDP_VERBOSE*/ ++ ++#define REG_VALUE_GET(v, s, m) \ ++ (u32)(((v) & (m)) >> (s)) ++#define REG_VALUE_SET(v, b, s, m) \ ++ (u32)(((v) & (u32)~(m)) | (u32)(((b) << (s)) & (m))) ++/* Active low */ ++#define REG_VALUE_LO(v, b, s, m) \ ++ (u32)((v) & ~(u32)(((b) << (s)) & (m))) ++ ++enum pdp_version { ++ PDP_VERSION_APOLLO, ++ PDP_VERSION_ODIN, ++ PDP_VERSION_PLATO, ++}; ++ ++enum pdp_odin_subversion { ++ PDP_ODIN_NONE = 0, ++ PDP_ODIN_ORION, ++}; ++ ++enum pdp_output_device { ++ PDP_OUTPUT_PDP1 = 1, ++ PDP_OUTPUT_PDP2, ++}; ++ ++/* Register R-W */ ++static inline u32 core_rreg32(void __iomem *base, resource_size_t reg) ++{ ++ return ioread32(base + reg); ++} ++ ++static inline void core_wreg32(void __iomem *base, resource_size_t reg, ++ u32 value) ++{ ++ iowrite32(value, base + reg); ++} ++ ++static inline u32 pdp_rreg32(void __iomem *base, resource_size_t reg) ++{ ++ return ioread32(base + reg); ++} ++ ++static inline void pdp_wreg32(void __iomem *base, resource_size_t reg, ++ u32 value) ++{ ++ iowrite32(value, base + reg); ++} ++ ++static inline u32 pll_rreg32(void __iomem *base, resource_size_t reg) ++{ ++ return ioread32(base + reg); ++} ++ ++static inline void pll_wreg32(void __iomem *base, resource_size_t reg, ++ u32 value) ++{ ++ iowrite32(value, base + reg); ++} ++ ++#endif /* __PDP_COMMON_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_odin.c b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.c +new file mode 100644 +index 000000000000..6c30de91a059 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.c +@@ -0,0 +1,1230 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++ ++#include "pdp_common.h" ++#include "pdp_odin.h" ++#include "odin_defs.h" ++#include "odin_regs.h" ++#include "orion_defs.h" ++#include "orion_regs.h" ++#include "pfim_defs.h" ++#include "pfim_regs.h" ++ ++#define ODIN_PLL_REG(n) ((n) - ODN_PDP_P_CLK_OUT_DIVIDER_REG1) ++ ++struct odin_displaymode { ++ int w; /* display width */ ++ int h; /* display height */ ++ int id; /* pixel clock input divider */ ++ int m; /* pixel clock multiplier */ ++ int od1; /* pixel clock output divider */ ++ int od2; /* mem clock output divider */ ++}; ++ ++struct pfim_property { ++ u32 tiles_per_line; ++ u32 tile_type; ++ u32 tile_xsize; ++ u32 tile_ysize; ++}; ++ ++/* ++ * For Odin, only the listed modes below are supported. ++ * 1080p id=5, m=37, od1=5, od2=5 ++ * 720p id=5, m=37, od1=10, od2=5 ++ * 1280x1024 id=1, m=14, od1=13, od2=8 ++ * 1440x900 id=5, m=53, od1=10, od2=8 ++ * 1280x960 id=3, m=40, od1=13, od2=9 ++ * 1024x768 id=1, m=13, od1=20, od2=10 ++ * 800x600 id=2, m=20, od1=25, od2=7 ++ * 640x480 id=1, m=12, od1=48, od2=9 ++ * ... where id is the PDP_P_CLK input divider, ++ * m is PDP_P_CLK multiplier regs 1 to 3 ++ * od1 is PDP_P_clk output divider regs 1 to 3 ++ * od2 is PDP_M_clk output divider regs 1 to 2 ++ */ ++static const struct odin_displaymode odin_modes[] = { ++ {.w = 1920, .h = 1080, .id = 5, .m = 37, .od1 = 5, .od2 = 5}, ++ {.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 5}, ++ {.w = 1280, .h = 1024, .id = 1, .m = 14, .od1 = 13, .od2 = 10}, ++ {.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 8}, ++ {.w = 1280, .h = 960, .id = 3, .m = 40, .od1 = 13, .od2 = 9}, ++ {.w = 1024, .h = 768, .id = 1, .m = 13, .od1 = 20, .od2 = 10}, ++ {.w = 800, .h = 600, .id = 2, .m = 20, .od1 = 25, .od2 = 7}, ++ {.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 48, .od2 = 9}, ++ {.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0} ++}; ++ ++/* ++ * For Orion, only the listed modes below are supported. ++ * 1920x1080 mode is currently not supported. ++ */ ++static const struct odin_displaymode orion_modes[] = { ++ {.w = 1280, .h = 720, .id = 5, .m = 37, .od1 = 10, .od2 = 7}, ++ {.w = 1280, .h = 1024, .id = 1, .m = 12, .od1 = 11, .od2 = 10}, ++ {.w = 1440, .h = 900, .id = 5, .m = 53, .od1 = 10, .od2 = 9}, ++ {.w = 1280, .h = 960, .id = 5, .m = 51, .od1 = 10, .od2 = 9}, ++ {.w = 1024, .h = 768, .id = 3, .m = 33, .od1 = 17, .od2 = 10}, ++ {.w = 800, .h = 600, .id = 2, .m = 24, .od1 = 31, .od2 = 12}, ++ {.w = 640, .h = 480, .id = 1, .m = 12, .od1 = 50, .od2 = 12}, ++ {.w = 0, .h = 0, .id = 0, .m = 0, .od1 = 0, .od2 = 0} ++}; ++ ++static const struct pfim_property pfim_properties[] = { ++ [ODIN_PFIM_MOD_LINEAR] = {0}, ++ [ODIN_PFIM_FBCDC_8X8_V12] = {.tiles_per_line = 8, ++ .tile_type = ODN_PFIM_TILETYPE_8X8, ++ .tile_xsize = 8, ++ .tile_ysize = 8}, ++ [ODIN_PFIM_FBCDC_16X4_V12] = {.tiles_per_line = 16, ++ .tile_type = ODN_PFIM_TILETYPE_16X4, ++ .tile_xsize = 16, ++ .tile_ysize = 4}, ++}; ++ ++static const u32 GRPH_SURF_OFFSET[] = { ++ ODN_PDP_GRPH1SURF_OFFSET, ++ ODN_PDP_GRPH2SURF_OFFSET, ++ ODN_PDP_VID1SURF_OFFSET, ++ ODN_PDP_GRPH4SURF_OFFSET ++}; ++static const u32 GRPH_SURF_GRPH_PIXFMT_SHIFT[] = { ++ ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT, ++ ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT, ++ ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT, ++ ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT ++}; ++static const u32 GRPH_SURF_GRPH_PIXFMT_MASK[] = { ++ ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK, ++ ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK, ++ ODN_PDP_VID1SURF_VID1PIXFMT_MASK, ++ ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK ++}; ++static const u32 GRPH_GALPHA_OFFSET[] = { ++ ODN_PDP_GRPH1GALPHA_OFFSET, ++ ODN_PDP_GRPH2GALPHA_OFFSET, ++ ODN_PDP_VID1GALPHA_OFFSET, ++ ODN_PDP_GRPH4GALPHA_OFFSET ++}; ++static const u32 GRPH_GALPHA_GRPH_GALPHA_SHIFT[] = { ++ ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT, ++ ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT, ++ ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT, ++ ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT ++}; ++static const u32 GRPH_GALPHA_GRPH_GALPHA_MASK[] = { ++ ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK, ++ ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK, ++ ODN_PDP_VID1GALPHA_VID1GALPHA_MASK, ++ ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK ++}; ++static const u32 GRPH_CTRL_OFFSET[] = { ++ ODN_PDP_GRPH1CTRL_OFFSET, ++ ODN_PDP_GRPH2CTRL_OFFSET, ++ ODN_PDP_VID1CTRL_OFFSET, ++ ODN_PDP_GRPH4CTRL_OFFSET, ++}; ++static const u32 GRPH_CTRL_GRPH_BLEND_SHIFT[] = { ++ ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT, ++ ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT, ++ ODN_PDP_VID1CTRL_VID1BLEND_SHIFT, ++ ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT ++}; ++static const u32 GRPH_CTRL_GRPH_BLEND_MASK[] = { ++ ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK, ++ ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK, ++ ODN_PDP_VID1CTRL_VID1BLEND_MASK, ++ ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK ++}; ++static const u32 GRPH_CTRL_GRPH_BLENDPOS_SHIFT[] = { ++ ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT, ++ ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT, ++ ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT, ++ ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT ++}; ++static const u32 GRPH_CTRL_GRPH_BLENDPOS_MASK[] = { ++ ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK, ++ ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK, ++ ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK, ++ ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK ++}; ++static const u32 GRPH_CTRL_GRPH_STREN_SHIFT[] = { ++ ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT, ++ ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT, ++ ODN_PDP_VID1CTRL_VID1STREN_SHIFT, ++ ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT ++}; ++static const u32 GRPH_CTRL_GRPH_STREN_MASK[] = { ++ ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK, ++ ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK, ++ ODN_PDP_VID1CTRL_VID1STREN_MASK, ++ ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK ++}; ++static const u32 GRPH_POSN_OFFSET[] = { ++ ODN_PDP_GRPH1POSN_OFFSET, ++ ODN_PDP_GRPH2POSN_OFFSET, ++ ODN_PDP_VID1POSN_OFFSET, ++ ODN_PDP_GRPH4POSN_OFFSET ++}; ++static const u32 GRPH_POSN_GRPH_XSTART_SHIFT[] = { ++ ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT, ++ ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT, ++ ODN_PDP_VID1POSN_VID1XSTART_SHIFT, ++ ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT, ++}; ++static const u32 GRPH_POSN_GRPH_XSTART_MASK[] = { ++ ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK, ++ ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK, ++ ODN_PDP_VID1POSN_VID1XSTART_MASK, ++ ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK, ++}; ++static const u32 GRPH_POSN_GRPH_YSTART_SHIFT[] = { ++ ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT, ++ ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT, ++ ODN_PDP_VID1POSN_VID1YSTART_SHIFT, ++ ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT, ++}; ++static const u32 GRPH_POSN_GRPH_YSTART_MASK[] = { ++ ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK, ++ ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK, ++ ODN_PDP_VID1POSN_VID1YSTART_MASK, ++ ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK, ++}; ++static const u32 GRPH_SIZE_OFFSET[] = { ++ ODN_PDP_GRPH1SIZE_OFFSET, ++ ODN_PDP_GRPH2SIZE_OFFSET, ++ ODN_PDP_VID1SIZE_OFFSET, ++ ODN_PDP_GRPH4SIZE_OFFSET, ++}; ++static const u32 GRPH_SIZE_GRPH_WIDTH_SHIFT[] = { ++ ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT, ++ ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT, ++ ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT, ++ ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT ++}; ++static const u32 GRPH_SIZE_GRPH_WIDTH_MASK[] = { ++ ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK, ++ ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK, ++ ODN_PDP_VID1SIZE_VID1WIDTH_MASK, ++ ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK ++}; ++static const u32 GRPH_SIZE_GRPH_HEIGHT_SHIFT[] = { ++ ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT, ++ ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT, ++ ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT, ++ ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT ++}; ++static const u32 GRPH_SIZE_GRPH_HEIGHT_MASK[] = { ++ ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK, ++ ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK, ++ ODN_PDP_VID1SIZE_VID1HEIGHT_MASK, ++ ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK ++}; ++static const u32 GRPH_STRIDE_OFFSET[] = { ++ ODN_PDP_GRPH1STRIDE_OFFSET, ++ ODN_PDP_GRPH2STRIDE_OFFSET, ++ ODN_PDP_VID1STRIDE_OFFSET, ++ ODN_PDP_GRPH4STRIDE_OFFSET ++}; ++static const u32 GRPH_STRIDE_GRPH_STRIDE_SHIFT[] = { ++ ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT, ++ ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT, ++ ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT, ++ ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT ++}; ++static const u32 GRPH_STRIDE_GRPH_STRIDE_MASK[] = { ++ ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK, ++ ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK, ++ ODN_PDP_VID1STRIDE_VID1STRIDE_MASK, ++ ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK ++}; ++static const u32 GRPH_INTERLEAVE_CTRL_OFFSET[] = { ++ ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET, ++ ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET, ++ ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET, ++ ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET ++}; ++static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[] = { ++ ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT, ++ ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT, ++ ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT, ++ ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT ++}; ++static const u32 GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[] = { ++ ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK, ++ ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK, ++ ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK, ++ ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK ++}; ++static const u32 GRPH_BASEADDR_OFFSET[] = { ++ ODN_PDP_GRPH1BASEADDR_OFFSET, ++ ODN_PDP_GRPH2BASEADDR_OFFSET, ++ ODN_PDP_VID1BASEADDR_OFFSET, ++ ODN_PDP_GRPH4BASEADDR_OFFSET ++}; ++ ++static const u32 ODN_INTERNAL_RESETN_PDP_MASK[] = { ++ ODN_INTERNAL_RESETN_PDP1_MASK, ++ ODN_INTERNAL_RESETN_PDP2_MASK ++}; ++ ++static const u32 ODN_INTERNAL_RESETN_PDP_SHIFT[] = { ++ ODN_INTERNAL_RESETN_PDP1_SHIFT, ++ ODN_INTERNAL_RESETN_PDP2_SHIFT ++}; ++ ++static void get_odin_clock_settings(u32 value, u32 *lo_time, u32 *hi_time, ++ u32 *no_count, u32 *edge) ++{ ++ u32 lt, ht; ++ ++ /* If the value is 1, High Time & Low Time are both set to 1 ++ * and the NOCOUNT bit is set to 1. ++ */ ++ if (value == 1) { ++ *lo_time = 1; ++ *hi_time = 1; ++ ++ /* If od is an odd number then write 1 to NO_COUNT ++ * otherwise write 0. ++ */ ++ *no_count = 1; ++ ++ /* If m is and odd number then write 1 to EDGE bit of MR2 ++ * otherwise write 0. ++ * If id is an odd number then write 1 to EDGE bit of ID ++ * otherwise write 0. ++ */ ++ *edge = 0; ++ return; ++ } ++ *no_count = 0; ++ ++ /* High Time & Low time is half the value listed for each PDP mode */ ++ lt = value>>1; ++ ht = lt; ++ ++ /* If the value is odd, Low Time is rounded up to nearest integer ++ * and High Time is rounded down, and Edge is set to 1. ++ */ ++ if (value & 1) { ++ lt++; ++ ++ /* If m is and odd number then write 1 to EDGE bit of MR2 ++ * otherwise write 0. ++ * If id is an odd number then write 1 to EDGE bit of ID ++ * otherwise write 0. ++ */ ++ *edge = 1; ++ ++ } else { ++ *edge = 0; ++ } ++ *hi_time = ht; ++ *lo_time = lt; ++} ++ ++static const struct odin_displaymode *get_odin_mode(int w, int h, ++ enum pdp_odin_subversion pv) ++{ ++ struct odin_displaymode *pdp_modes; ++ int n = 0; ++ ++ if (pv == PDP_ODIN_ORION) ++ pdp_modes = (struct odin_displaymode *)orion_modes; ++ else ++ pdp_modes = (struct odin_displaymode *)odin_modes; ++ ++ do { ++ if ((pdp_modes[n].w == w) && (pdp_modes[n].h == h)) ++ return pdp_modes+n; ++ ++ } while (pdp_modes[n++].w); ++ ++ return NULL; ++} ++ ++bool pdp_odin_clocks_set(struct device *dev, ++ void __iomem *pdp_reg, void __iomem *pll_reg, ++ u32 clock_freq, u32 dev_num, ++ void __iomem *odn_core_reg, ++ u32 hdisplay, u32 vdisplay, ++ enum pdp_odin_subversion pdpsubv) ++{ ++ u32 value; ++ const struct odin_displaymode *odispl; ++ u32 hi_time, lo_time, no_count, edge; ++ u32 core_id, core_rev; ++ ++ core_id = pdp_rreg32(pdp_reg, ODN_PDP_CORE_ID_OFFSET); ++ dev_info(dev, "Odin-PDP CORE_ID %08X\n", core_id); ++ ++ core_rev = pdp_rreg32(odn_core_reg, ODN_PDP_CORE_REV_OFFSET); ++ dev_info(dev, "Odin-PDP CORE_REV %08X\n", core_rev); ++ ++ odispl = get_odin_mode(hdisplay, vdisplay, pdpsubv); ++ if (!odispl) { ++ dev_err(dev, "Display mode not supported.\n"); ++ return false; ++ } ++ ++ /* ++ * The PDP uses a Xilinx clock that requires read ++ * modify write for all registers. ++ * It is essential that only the specified bits are changed ++ * because other bits are in use. ++ * To change PDP clocks reset PDP & PDP mmcm (PLL) first, ++ * then apply changes and then un-reset mmcm & PDP. ++ * Warm reset will keep the changes. ++ * wr 0x000080 0x1f7 ; # reset pdp ++ * wr 0x000090 8 ; # reset pdp mmcm ++ * then apply clock changes, then ++ * wr 0x000090 0x0 ; # un-reset pdp mmcm ++ * wr 0x000080 0x1ff ; # un-reset pdp ++ */ ++ ++ /* ++ * Hold Odin PDP in reset while changing the clock regs. ++ * Set the PDP bit of ODN_CORE_INTERNAL_RESETN low to reset. ++ * set bit 3 to 0 (active low) ++ */ ++ if (pdpsubv == PDP_ODIN_ORION) { ++ value = core_rreg32(odn_core_reg, SRS_CORE_SOFT_RESETN); ++ value = REG_VALUE_LO(value, 1, SRS_SOFT_RESETN_PDP_SHIFT, ++ SRS_SOFT_RESETN_PDP_MASK); ++ core_wreg32(odn_core_reg, SRS_CORE_SOFT_RESETN, value); ++ } else { ++ value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN); ++ value = REG_VALUE_LO(value, 1, ++ ODN_INTERNAL_RESETN_PDP_SHIFT[dev_num], ++ ODN_INTERNAL_RESETN_PDP_MASK[dev_num]); ++ core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value); ++ } ++ ++ /* ++ * Hold the PDP MMCM in reset while changing the clock regs. ++ * Set the PDP bit of ODN_CORE_CLK_GEN_RESET high to reset. ++ */ ++ value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET); ++ value = REG_VALUE_SET(value, 0x1, ++ ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT, ++ ODN_CLK_GEN_RESET_PDP_MMCM_MASK); ++ core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value); ++ ++ /* Pixel clock Input divider */ ++ get_odin_clock_settings(odispl->id, &lo_time, &hi_time, ++ &no_count, &edge); ++ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG)); ++ value = REG_VALUE_SET(value, lo_time, ++ ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT, ++ ODN_PDP_PCLK_IDIV_LO_TIME_MASK); ++ value = REG_VALUE_SET(value, hi_time, ++ ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT, ++ ODN_PDP_PCLK_IDIV_HI_TIME_MASK); ++ value = REG_VALUE_SET(value, no_count, ++ ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT, ++ ODN_PDP_PCLK_IDIV_NOCOUNT_MASK); ++ value = REG_VALUE_SET(value, edge, ++ ODN_PDP_PCLK_IDIV_EDGE_SHIFT, ++ ODN_PDP_PCLK_IDIV_EDGE_MASK); ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_IN_DIVIDER_REG), ++ value); ++ ++ /* Pixel clock Output divider */ ++ get_odin_clock_settings(odispl->od1, &lo_time, &hi_time, ++ &no_count, &edge); ++ ++ /* Pixel clock Output divider reg1 */ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1)); ++ value = REG_VALUE_SET(value, lo_time, ++ ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT, ++ ODN_PDP_PCLK_ODIV1_LO_TIME_MASK); ++ value = REG_VALUE_SET(value, hi_time, ++ ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT, ++ ODN_PDP_PCLK_ODIV1_HI_TIME_MASK); ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG1), ++ value); ++ ++ /* Pixel clock Output divider reg2 */ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2)); ++ value = REG_VALUE_SET(value, no_count, ++ ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT, ++ ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK); ++ value = REG_VALUE_SET(value, edge, ++ ODN_PDP_PCLK_ODIV2_EDGE_SHIFT, ++ ODN_PDP_PCLK_ODIV2_EDGE_MASK); ++ if (pdpsubv == PDP_ODIN_ORION) { ++ /* ++ * Fractional divide for PLL registers currently does not work ++ * on Sirius, as duly mentioned on the TRM. However, owing to ++ * what most likely is a design flaw in the RTL, the ++ * following register and a later one have their fractional ++ * divide fields set to values other than 0 by default, ++ * unlike on Odin. This prevents the PDP device from working ++ * on Orion ++ */ ++ value = REG_VALUE_LO(value, 0x1F, SRS_PDP_PCLK_ODIV2_FRAC_SHIFT, ++ SRS_PDP_PCLK_ODIV2_FRAC_MASK); ++ } ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_OUT_DIVIDER_REG2), ++ value); ++ ++ /* Pixel clock Multiplier */ ++ get_odin_clock_settings(odispl->m, &lo_time, &hi_time, ++ &no_count, &edge); ++ ++ /* Pixel clock Multiplier reg1 */ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1)); ++ value = REG_VALUE_SET(value, lo_time, ++ ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT, ++ ODN_PDP_PCLK_MUL1_LO_TIME_MASK); ++ value = REG_VALUE_SET(value, hi_time, ++ ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT, ++ ODN_PDP_PCLK_MUL1_HI_TIME_MASK); ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG1), ++ value); ++ ++ /* Pixel clock Multiplier reg2 */ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2)); ++ value = REG_VALUE_SET(value, no_count, ++ ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT, ++ ODN_PDP_PCLK_MUL2_NOCOUNT_MASK); ++ value = REG_VALUE_SET(value, edge, ++ ODN_PDP_PCLK_MUL2_EDGE_SHIFT, ++ ODN_PDP_PCLK_MUL2_EDGE_MASK); ++ if (pdpsubv == PDP_ODIN_ORION) { ++ /* Zero out fractional divide fields */ ++ value = REG_VALUE_LO(value, 0x1F, SRS_PDP_PCLK_MUL2_FRAC_SHIFT, ++ SRS_PDP_PCLK_MUL2_FRAC_MASK); ++ } ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_P_CLK_MULTIPLIER_REG2), ++ value); ++ ++ /* Mem clock Output divider */ ++ get_odin_clock_settings(odispl->od2, &lo_time, &hi_time, ++ &no_count, &edge); ++ ++ /* Mem clock Output divider reg1 */ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1)); ++ value = REG_VALUE_SET(value, lo_time, ++ ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT, ++ ODN_PDP_MCLK_ODIV1_LO_TIME_MASK); ++ value = REG_VALUE_SET(value, hi_time, ++ ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT, ++ ODN_PDP_MCLK_ODIV1_HI_TIME_MASK); ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG1), ++ value); ++ ++ /* Mem clock Output divider reg2 */ ++ value = pll_rreg32(pll_reg, ++ ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2)); ++ value = REG_VALUE_SET(value, no_count, ++ ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT, ++ ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK); ++ value = REG_VALUE_SET(value, edge, ++ ODN_PDP_MCLK_ODIV2_EDGE_SHIFT, ++ ODN_PDP_MCLK_ODIV2_EDGE_MASK); ++ pll_wreg32(pll_reg, ODIN_PLL_REG(ODN_PDP_M_CLK_OUT_DIVIDER_REG2), ++ value); ++ ++ /* ++ * Take the PDP MMCM out of reset. ++ * Set the PDP bit of ODN_CORE_CLK_GEN_RESET to 0. ++ */ ++ value = core_rreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET); ++ value = REG_VALUE_LO(value, 1, ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT, ++ ODN_CLK_GEN_RESET_PDP_MMCM_MASK); ++ core_wreg32(odn_core_reg, ODN_CORE_CLK_GEN_RESET, value); ++ ++ /* ++ * Wait until MMCM_LOCK_STATUS_PDPP bit is '1' in register ++ * MMCM_LOCK_STATUS. Issue an error if this does not ++ * go to '1' within 500ms. ++ */ ++ { ++ int count; ++ bool locked = false; ++ ++ for (count = 0; count < 10; count++) { ++ value = core_rreg32(odn_core_reg, ++ ODN_CORE_MMCM_LOCK_STATUS); ++ if (value & ODN_MMCM_LOCK_STATUS_PDPP) { ++ locked = true; ++ break; ++ } ++ msleep(50); ++ } ++ ++ if (!locked) { ++ dev_err(dev, "The MMCM pll did not lock\n"); ++ return false; ++ } ++ } ++ ++ /* ++ * Take Odin-PDP out of reset: ++ * Set the PDP bit of ODN_CORE_INTERNAL_RESETN to 1. ++ */ ++ if (pdpsubv == PDP_ODIN_ORION) { ++ value = core_rreg32(odn_core_reg, SRS_CORE_SOFT_RESETN); ++ value = REG_VALUE_SET(value, 1, SRS_SOFT_RESETN_PDP_SHIFT, ++ SRS_SOFT_RESETN_PDP_MASK); ++ core_wreg32(odn_core_reg, SRS_CORE_SOFT_RESETN, value); ++ } else { ++ value = core_rreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN); ++ value = REG_VALUE_SET(value, 1, ++ ODN_INTERNAL_RESETN_PDP_SHIFT[dev_num], ++ ODN_INTERNAL_RESETN_PDP_MASK[dev_num]); ++ core_wreg32(odn_core_reg, ODN_CORE_INTERNAL_RESETN, value); ++ } ++ ++ return true; ++} ++ ++void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value = enable ? ++ (1 << ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT | ++ 1 << ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT) : ++ 0x0; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set updates: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ pdp_wreg32(pdp_reg, ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET, value); ++} ++ ++void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET); ++ ++ value = REG_VALUE_SET(value, ++ enable ? ODN_SYNC_GEN_ENABLE : ODN_SYNC_GEN_DISABLE, ++ ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT, ++ ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK); ++ ++ /* Invert the pixel clock */ ++ value = REG_VALUE_SET(value, ODN_PIXEL_CLOCK_INVERTED, ++ ODN_PDP_SYNCCTRL_CLKPOL_SHIFT, ++ ODN_PDP_SYNCCTRL_CLKPOL_MASK); ++ ++ /* Set the Horizontal Sync Polarity to active high */ ++ value = REG_VALUE_LO(value, ODN_HSYNC_POLARITY_ACTIVE_HIGH, ++ ODN_PDP_SYNCCTRL_HSPOL_SHIFT, ++ ODN_PDP_SYNCCTRL_HSPOL_MASK); ++ ++ pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); ++ ++ /* Check for underruns when the sync generator ++ * is being turned off. ++ */ ++ if (!enable) { ++ value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET); ++ value &= ODN_PDP_INTSTAT_ALL_OURUN_MASK; ++ ++ if (value) { ++ dev_warn(dev, "underruns detected. status=0x%08X\n", ++ value); ++ } else { ++ dev_info(dev, "no underruns detected\n"); ++ } ++ } ++} ++ ++void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set powerdwn: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET); ++ ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ ODN_PDP_SYNCCTRL_POWERDN_SHIFT, ++ ODN_PDP_SYNCCTRL_POWERDN_MASK); ++ ++ pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); ++} ++ ++void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, ODN_PDP_INTCLR_ALL); ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT, ++ ODN_PDP_INTENAB_INTEN_VBLNK0_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_INTENAB_OFFSET, value); ++} ++ ++bool pdp_odin_check_and_clear_vblank(struct device *dev, ++ void __iomem *pdp_reg) ++{ ++ u32 value; ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_INTSTAT_OFFSET); ++ ++ if (REG_VALUE_GET(value, ++ ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT, ++ ODN_PDP_INTSTAT_INTS_VBLNK0_MASK)) { ++ pdp_wreg32(pdp_reg, ODN_PDP_INTCLR_OFFSET, ++ (1 << ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT)); ++ ++ return true; ++ } ++ return false; ++} ++ ++void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set plane %u: %s\n", ++ plane, enable ? "enable" : "disable"); ++#endif ++ ++ if (plane > 3) { ++ dev_err(dev, "Maximum of 4 planes are supported\n"); ++ return; ++ } ++ ++ value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ GRPH_CTRL_GRPH_STREN_SHIFT[plane], ++ GRPH_CTRL_GRPH_STREN_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value); ++} ++ ++void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg) ++{ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Reset planes\n"); ++#endif ++ ++ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[0], 0x00000000); ++ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[1], 0x01000000); ++ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[2], 0x02000000); ++ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[3], 0x03000000); ++} ++ ++static unsigned int pfim_pixel_format(u32 pdp_format) ++{ ++ u32 pfim_pixformat; ++ ++ switch (pdp_format) { ++ case ODN_PDP_SURF_PIXFMT_ARGB8888: ++ pfim_pixformat = ODN_PFIM_PIXFMT_ARGB8888; ++ break; ++ case ODN_PDP_SURF_PIXFMT_RGB565: ++ pfim_pixformat = ODN_PFIM_PIXFMT_RGB565; ++ break; ++ default: ++ WARN(true, "Unknown Odin pixel format: %u defaulting to ARGB8888\n", ++ pdp_format); ++ pfim_pixformat = ODN_PFIM_PIXFMT_ARGB8888; ++ } ++ ++ return pfim_pixformat; ++} ++ ++static unsigned int pfim_tiles_line(u32 width, ++ u32 pfim_format, ++ u32 fbc_mode) ++{ ++ u32 bpp; ++ u32 tpl; ++ ++ switch (pfim_format) { ++ case ODN_PFIM_PIXFMT_ARGB8888: ++ bpp = 32; ++ break; ++ case ODN_PFIM_PIXFMT_RGB565: ++ bpp = 16; ++ break; ++ default: ++ WARN(true, "Unknown PFIM pixel format: %u, defaulting to 32 bpp\n", ++ pfim_format); ++ bpp = 32; ++ } ++ ++ if (fbc_mode < ODIN_PFIM_FBCDC_MAX) { ++ tpl = pfim_properties[fbc_mode].tiles_per_line; ++ } else { ++ WARN(true, "Unknown FBC compression format: %u, defaulting to 8X8_V12\n", ++ fbc_mode); ++ tpl = pfim_properties[ODIN_PFIM_FBCDC_8X8_V12].tiles_per_line; ++ } ++ ++ return ((width/tpl) / (32/bpp)); ++} ++ ++static void pfim_modeset(void __iomem *pfim_reg) ++{ ++ u32 value; ++ ++ /* ++ * Odin PDP can address up to 32 bits of PCI BAR4, ++ * so this register is not necessary ++ */ ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB, 0x00); ++ ++ /* ++ * Following registers are only used with YUV buffers, ++ * which we currently do not support ++ */ ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_UV_BASE_ADDR_LSB, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_UV_BASE_ADDR_MSB, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_PDP_Y_BASE_ADDR, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_PDP_UV_BASE_ADDR, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_Y_VAL0, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_UV_VAL0, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_Y_VAL1, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_UV_VAL1, 0x00); ++ ++ /* ++ * PFIM tags are used for distinguishing between Y and UV plane ++ * request when that is the kind of format we use. Thus, any ++ * random value will do, as explained in the TRM ++ */ ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_CONTEXT, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_TAG, PFIM_RND_TAG); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_SB_TAG, 0x00); ++ ++ /* Default tile value if tile is found to be corrupted */ ++ value = REG_VALUE_SET(0, 0x01, ++ CR_PFIM_FBDC_FILTER_ENABLE_SHIFT, ++ CR_PFIM_FBDC_FILTER_ENABLE_MASK); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_FILTER_ENABLE, value); ++ ++ /* Recommended values for corrupt tile substitution */ ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_CH0123_VAL0, 0x00); ++ value = REG_VALUE_SET(0, 0x01000000, ++ CR_PFIM_FBDC_CR_CH0123_VAL1_SHIFT, ++ CR_PFIM_FBDC_CR_CH0123_VAL1_MASK); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CR_CH0123_VAL1, value); ++ ++ /* Only used when requesting a clear tile */ ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CLEAR_COLOUR_LSB, 0x00); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_CLEAR_COLOUR_MSB, 0x00); ++ ++ /* Current PDP revision does not support lossy formats */ ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_REQ_LOSSY, 0x00); ++ ++ /* Force invalidation of FBC headers at beginning of render */ ++ value = REG_VALUE_SET(0, 0x01, ++ CR_PFIM_FBDC_HDR_INVAL_REQ_SHIFT, ++ CR_PFIM_FBDC_HDR_INVAL_REQ_MASK); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_HDR_INVAL_REQ, value); ++} ++ ++static unsigned int pfim_num_tiles(struct device *dev, u32 width, u32 height, ++ u32 pfim_format, u32 fbc_mode) ++{ ++ u32 phys_width, phys_height; ++ u32 walign, halign; ++ u32 tile_mult; ++ u32 num_tiles; ++ u32 bpp; ++ ++ switch (pfim_format) { ++ case ODN_PFIM_PIXFMT_ARGB8888: ++ bpp = 32; ++ tile_mult = 4; ++ break; ++ case ODN_PFIM_PIXFMT_RGB565: ++ bpp = 16; ++ tile_mult = 2; ++ break; ++ default: ++ dev_warn(dev, "WARNING: Wrong PFIM pixel format: %d\n", ++ pfim_format); ++ return 0; ++ } ++ ++ switch (fbc_mode) { ++ case ODIN_PFIM_FBCDC_8X8_V12: ++ switch (bpp) { ++ case 16: /* 16x8 */ ++ walign = 16; ++ break; ++ case 32: /* 8x8 */ ++ walign = 8; ++ break; ++ default: ++ dev_warn(dev, "WARNING: Wrong bit depth: %d\n", ++ bpp); ++ } ++ halign = 8; ++ break; ++ case ODIN_PFIM_FBCDC_16X4_V12: ++ switch (bpp) { ++ case 16: /* 32x4 */ ++ walign = 32; ++ break; ++ case 32: /* 16x4 */ ++ walign = 16; ++ break; ++ default: ++ dev_warn(dev, "WARNING: Wrong bit depth: %d\n", ++ bpp); ++ return 0; ++ } ++ halign = 4; ++ break; ++ default: ++ dev_warn(dev, "WARNING: Wrong FBC compression format: %d\n", ++ fbc_mode); ++ return 0; ++ } ++ ++ phys_width = ALIGN(width, walign); ++ phys_height = ALIGN(height, halign); ++ num_tiles = phys_width / pfim_properties[fbc_mode].tile_xsize; ++ num_tiles *= phys_height / pfim_properties[fbc_mode].tile_ysize; ++ num_tiles *= tile_mult; ++ num_tiles /= 4; ++ ++ return num_tiles ? num_tiles : 1; ++} ++ ++static void pfim_set_surface(struct device *dev, ++ void __iomem *pfim_reg, ++ u32 width, ++ u32 height, ++ u32 pdp_format, ++ u32 fbc_mode) ++{ ++ u32 pfim_pixformat = pfim_pixel_format(pdp_format); ++ u32 tiles_line = pfim_tiles_line(width, pfim_pixformat, fbc_mode); ++ u32 tile_type = pfim_properties[fbc_mode].tile_type; ++ u32 num_tiles = pfim_num_tiles(dev, width, height, ++ pfim_pixformat, fbc_mode); ++ ++ pdp_wreg32(pfim_reg, CR_PFIM_NUM_TILES, num_tiles); ++ pdp_wreg32(pfim_reg, CR_PFIM_TILES_PER_LINE, tiles_line); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_PIX_FORMAT, pfim_pixformat); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_TILE_TYPE, tile_type); ++} ++ ++void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, u32 address, u32 offset, ++ u32 posx, u32 posy, ++ u32 width, u32 height, u32 stride, ++ u32 format, u32 alpha, bool blend, ++ void __iomem *pfim_reg, u32 fbcm) ++{ ++ /* ++ * Use a blender based on the plane number (this defines the Z ++ * ordering) ++ */ ++ static const int GRPH_BLEND_POS[] = { 0x0, 0x1, 0x2, 0x3 }; ++ u32 blend_mode; ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, ++ "Set surface: plane=%d pos=%d:%d size=%dx%d stride=%d format=%d alpha=%d address=0x%x\n", ++ plane, posx, posy, width, height, stride, ++ format, alpha, address); ++#endif ++ ++ if (plane > 3) { ++ dev_err(dev, "Maximum of 4 planes are supported\n"); ++ return; ++ } ++ ++ if (address & 0xf) ++ dev_warn(dev, "The frame buffer address is not aligned\n"); ++ ++ if (fbcm && pfim_reg) { ++ pfim_set_surface(dev, pfim_reg, ++ width, height, ++ format, fbcm); ++ pdp_wreg32(pfim_reg, CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB, ++ (address + offset) >> 6); ++ } else ++ pdp_wreg32(pdp_reg, GRPH_BASEADDR_OFFSET[plane], address); ++ ++ /* Pos */ ++ value = REG_VALUE_SET(0x0, posx, ++ GRPH_POSN_GRPH_XSTART_SHIFT[plane], ++ GRPH_POSN_GRPH_XSTART_MASK[plane]); ++ value = REG_VALUE_SET(value, posy, ++ GRPH_POSN_GRPH_YSTART_SHIFT[plane], ++ GRPH_POSN_GRPH_YSTART_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_POSN_OFFSET[plane], value); ++ ++ /* Size */ ++ value = REG_VALUE_SET(0x0, width - 1, ++ GRPH_SIZE_GRPH_WIDTH_SHIFT[plane], ++ GRPH_SIZE_GRPH_WIDTH_MASK[plane]); ++ value = REG_VALUE_SET(value, height - 1, ++ GRPH_SIZE_GRPH_HEIGHT_SHIFT[plane], ++ GRPH_SIZE_GRPH_HEIGHT_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_SIZE_OFFSET[plane], value); ++ ++ /* Stride */ ++ value = REG_VALUE_SET(0x0, (stride >> 4) - 1, ++ GRPH_STRIDE_GRPH_STRIDE_SHIFT[plane], ++ GRPH_STRIDE_GRPH_STRIDE_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_STRIDE_OFFSET[plane], value); ++ ++ /* Interlace mode: progressive */ ++ value = REG_VALUE_SET(0x0, ODN_INTERLACE_DISABLE, ++ GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_SHIFT[plane], ++ GRPH_INTERLEAVE_CTRL_GRPH_INTFIELD_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_INTERLEAVE_CTRL_OFFSET[plane], value); ++ ++ /* Format */ ++ value = REG_VALUE_SET(0x0, format, ++ GRPH_SURF_GRPH_PIXFMT_SHIFT[plane], ++ GRPH_SURF_GRPH_PIXFMT_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_SURF_OFFSET[plane], value); ++ ++ /* Global alpha (0...1023) */ ++ value = REG_VALUE_SET(0x0, ((1024 * 256) / 255 * alpha) / 256, ++ GRPH_GALPHA_GRPH_GALPHA_SHIFT[plane], ++ GRPH_GALPHA_GRPH_GALPHA_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_GALPHA_OFFSET[plane], value); ++ value = pdp_rreg32(pdp_reg, GRPH_CTRL_OFFSET[plane]); ++ ++ /* Blend mode */ ++ if (blend) { ++ if (alpha != 255) ++ blend_mode = 0x2; /* 0b10 = global alpha blending */ ++ else ++ blend_mode = 0x3; /* 0b11 = pixel alpha blending */ ++ } else { ++ blend_mode = 0x0; /* 0b00 = no blending */ ++ } ++ value = REG_VALUE_SET(value, blend_mode, ++ GRPH_CTRL_GRPH_BLEND_SHIFT[plane], ++ GRPH_CTRL_GRPH_BLEND_MASK[plane]); ++ ++ /* Blend position */ ++ value = REG_VALUE_SET(value, GRPH_BLEND_POS[plane], ++ GRPH_CTRL_GRPH_BLENDPOS_SHIFT[plane], ++ GRPH_CTRL_GRPH_BLENDPOS_MASK[plane]); ++ pdp_wreg32(pdp_reg, GRPH_CTRL_OFFSET[plane], value); ++} ++ ++void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg, ++ u32 h_display, u32 v_display, ++ u32 hbps, u32 ht, u32 has, ++ u32 hlbs, u32 hfps, u32 hrbs, ++ u32 vbps, u32 vt, u32 vas, ++ u32 vtbs, u32 vfps, u32 vbbs, ++ bool nhsync, bool nvsync, ++ void __iomem *pfim_reg) ++{ ++ u32 value; ++ ++ dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); ++#ifdef PDP_VERBOSE ++ dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", ++ ht, hbps, has, hlbs, hfps, hrbs); ++ dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", ++ vt, vbps, vas, vtbs, vfps, vbbs); ++#endif ++ ++ /* Border colour: 10bits per channel */ ++ pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_R_OFFSET, 0x0); ++ pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0); ++ ++ /* Background: 10bits per channel */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET); ++ value = REG_VALUE_SET(value, 0x3ff, ++ ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT, ++ ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK); ++ value = REG_VALUE_SET(value, 0x0, ++ ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT, ++ ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_AR_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET); ++ value = REG_VALUE_SET(value, 0x0, ++ ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT, ++ ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK); ++ value = REG_VALUE_SET(value, 0x0, ++ ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT, ++ ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_BGNDCOL_GB_OFFSET, value); ++ pdp_wreg32(pdp_reg, ODN_PDP_BORDCOL_GB_OFFSET, 0x0); ++ ++ /* Update control */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET); ++ value = REG_VALUE_SET(value, 0x0, ++ ODN_PDP_UPDCTRL_UPDFIELD_SHIFT, ++ ODN_PDP_UPDCTRL_UPDFIELD_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_UPDCTRL_OFFSET, value); ++ ++ /* Horizontal timing */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET); ++ value = REG_VALUE_SET(value, hbps, ++ ODN_PDP_HSYNC1_HBPS_SHIFT, ++ ODN_PDP_HSYNC1_HBPS_MASK); ++ value = REG_VALUE_SET(value, ht, ++ ODN_PDP_HSYNC1_HT_SHIFT, ++ ODN_PDP_HSYNC1_HT_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_HSYNC1_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET); ++ value = REG_VALUE_SET(value, has, ++ ODN_PDP_HSYNC2_HAS_SHIFT, ++ ODN_PDP_HSYNC2_HAS_MASK); ++ value = REG_VALUE_SET(value, hlbs, ++ ODN_PDP_HSYNC2_HLBS_SHIFT, ++ ODN_PDP_HSYNC2_HLBS_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_HSYNC2_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET); ++ value = REG_VALUE_SET(value, hfps, ++ ODN_PDP_HSYNC3_HFPS_SHIFT, ++ ODN_PDP_HSYNC3_HFPS_MASK); ++ value = REG_VALUE_SET(value, hrbs, ++ ODN_PDP_HSYNC3_HRBS_SHIFT, ++ ODN_PDP_HSYNC3_HRBS_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_HSYNC3_OFFSET, value); ++ ++ /* Vertical timing */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET); ++ value = REG_VALUE_SET(value, vbps, ++ ODN_PDP_VSYNC1_VBPS_SHIFT, ++ ODN_PDP_VSYNC1_VBPS_MASK); ++ value = REG_VALUE_SET(value, vt, ++ ODN_PDP_VSYNC1_VT_SHIFT, ++ ODN_PDP_VSYNC1_VT_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_VSYNC1_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET); ++ value = REG_VALUE_SET(value, vas, ++ ODN_PDP_VSYNC2_VAS_SHIFT, ++ ODN_PDP_VSYNC2_VAS_MASK); ++ value = REG_VALUE_SET(value, vtbs, ++ ODN_PDP_VSYNC2_VTBS_SHIFT, ++ ODN_PDP_VSYNC2_VTBS_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_VSYNC2_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET); ++ value = REG_VALUE_SET(value, vfps, ++ ODN_PDP_VSYNC3_VFPS_SHIFT, ++ ODN_PDP_VSYNC3_VFPS_MASK); ++ value = REG_VALUE_SET(value, vbbs, ++ ODN_PDP_VSYNC3_VBBS_SHIFT, ++ ODN_PDP_VSYNC3_VBBS_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_VSYNC3_OFFSET, value); ++ ++ /* Horizontal data enable */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET); ++ value = REG_VALUE_SET(value, hlbs, ++ ODN_PDP_HDECTRL_HDES_SHIFT, ++ ODN_PDP_HDECTRL_HDES_MASK); ++ value = REG_VALUE_SET(value, hfps, ++ ODN_PDP_HDECTRL_HDEF_SHIFT, ++ ODN_PDP_HDECTRL_HDEF_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_HDECTRL_OFFSET, value); ++ ++ /* Vertical data enable */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET); ++ value = REG_VALUE_SET(value, vtbs, ++ ODN_PDP_VDECTRL_VDES_SHIFT, ++ ODN_PDP_VDECTRL_VDES_MASK); ++ value = REG_VALUE_SET(value, vfps, ++ ODN_PDP_VDECTRL_VDEF_SHIFT, ++ ODN_PDP_VDECTRL_VDEF_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_VDECTRL_OFFSET, value); ++ ++ /* Vertical event start and vertical fetch start */ ++ value = pdp_rreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET); ++ value = REG_VALUE_SET(value, vbps, ++ ODN_PDP_VEVENT_VFETCH_SHIFT, ++ ODN_PDP_VEVENT_VFETCH_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_VEVENT_OFFSET, value); ++ ++ /* Set up polarities of sync/blank */ ++ value = REG_VALUE_SET(0, 0x1, ++ ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT, ++ ODN_PDP_SYNCCTRL_BLNKPOL_MASK); ++ if (nhsync) ++ value = REG_VALUE_SET(value, 0x1, ++ ODN_PDP_SYNCCTRL_HSPOL_SHIFT, ++ ODN_PDP_SYNCCTRL_HSPOL_MASK); ++ if (nvsync) ++ value = REG_VALUE_SET(value, 0x1, ++ ODN_PDP_SYNCCTRL_VSPOL_SHIFT, ++ ODN_PDP_SYNCCTRL_VSPOL_MASK); ++ pdp_wreg32(pdp_reg, ODN_PDP_SYNCCTRL_OFFSET, value); ++ ++ /* PDP framebuffer compression setup */ ++ if (pfim_reg) ++ pfim_modeset(pfim_reg); ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_odin.h b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.h +new file mode 100644 +index 000000000000..0a625e231147 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_odin.h +@@ -0,0 +1,95 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PDP_ODIN_H__) ++#define __PDP_ODIN_H__ ++ ++#include ++#include ++ ++/* include here for ODN_PDP_SURF_PIXFMT_ARGB8888 as this is part of the API */ ++#include "odin_pdp_regs.h" ++#include "pdp_common.h" ++ ++bool pdp_odin_clocks_set(struct device *dev, ++ void __iomem *pdp_reg, void __iomem *pll_reg, ++ u32 clock_freq, u32 dev_num, ++ void __iomem *odn_core_reg, ++ u32 hdisplay, u32 vdisplay, ++ enum pdp_odin_subversion); ++ ++void pdp_odin_set_updates_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_odin_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_odin_set_powerdwn_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_odin_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++bool pdp_odin_check_and_clear_vblank(struct device *dev, ++ void __iomem *pdp_reg); ++ ++void pdp_odin_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, bool enable); ++ ++void pdp_odin_reset_planes(struct device *dev, void __iomem *pdp_reg); ++ ++void pdp_odin_set_surface(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, u32 address, u32 offset, ++ u32 posx, u32 posy, ++ u32 width, u32 height, u32 stride, ++ u32 format, u32 alpha, bool blend, ++ void __iomem *pfim_reg, u32 fbcf); ++ ++void pdp_odin_mode_set(struct device *dev, void __iomem *pdp_reg, ++ u32 h_display, u32 v_display, ++ u32 hbps, u32 ht, u32 has, ++ u32 hlbs, u32 hfps, u32 hrbs, ++ u32 vbps, u32 vt, u32 vas, ++ u32 vtbs, u32 vfps, u32 vbbs, ++ bool nhsync, bool nvsync, ++ void __iomem *pfim_reg); ++ ++#endif /* __PDP_ODIN_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_plato.c b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.c +new file mode 100644 +index 000000000000..df214f97bc6d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.c +@@ -0,0 +1,339 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "pdp_common.h" ++#include "pdp_plato.h" ++#include "pdp2_mmu_regs.h" ++#include "pdp2_regs.h" ++ ++#define PLATO_PDP_STRIDE_SHIFT 5 ++ ++ ++void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set syncgen: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ value = pdp_rreg32(pdp_reg, PDP_SYNCCTRL_OFFSET); ++ /* Starts Sync Generator. */ ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ PDP_SYNCCTRL_SYNCACTIVE_SHIFT, ++ PDP_SYNCCTRL_SYNCACTIVE_MASK); ++ /* Controls polarity of pixel clock: Pixel clock is inverted */ ++ value = REG_VALUE_SET(value, 0x01, ++ PDP_SYNCCTRL_CLKPOL_SHIFT, ++ PDP_SYNCCTRL_CLKPOL_MASK); ++ pdp_wreg32(pdp_reg, PDP_SYNCCTRL_OFFSET, value); ++} ++ ++void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set vblank: %s\n", enable ? "enable" : "disable"); ++#endif ++ ++ pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, 0xFFFFFFFF); ++ ++ value = pdp_rreg32(pdp_reg, PDP_INTENAB_OFFSET); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ PDP_INTENAB_INTEN_VBLNK0_SHIFT, ++ PDP_INTENAB_INTEN_VBLNK0_MASK); ++ pdp_wreg32(pdp_reg, PDP_INTENAB_OFFSET, value); ++} ++ ++bool pdp_plato_check_and_clear_vblank(struct device *dev, ++ void __iomem *pdp_reg) ++{ ++ u32 value; ++ ++ value = pdp_rreg32(pdp_reg, PDP_INTSTAT_OFFSET); ++ ++ if (REG_VALUE_GET(value, ++ PDP_INTSTAT_INTS_VBLNK0_SHIFT, ++ PDP_INTSTAT_INTS_VBLNK0_MASK)) { ++ pdp_wreg32(pdp_reg, PDP_INTCLR_OFFSET, ++ (1 << PDP_INTCLR_INTCLR_VBLNK0_SHIFT)); ++ return true; ++ } ++ ++ return false; ++} ++ ++void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, bool enable) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, "Set plane %u: %s\n", ++ plane, enable ? "enable" : "disable"); ++#endif ++ value = pdp_rreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET); ++ value = REG_VALUE_SET(value, enable ? 0x1 : 0x0, ++ PDP_GRPH1CTRL_GRPH1STREN_SHIFT, ++ PDP_GRPH1CTRL_GRPH1STREN_MASK); ++ pdp_wreg32(pdp_reg, PDP_GRPH1CTRL_OFFSET, value); ++} ++ ++void pdp_plato_set_surface(struct device *dev, ++ void __iomem *pdp_reg, void __iomem *pdp_bif_reg, ++ u32 plane, u64 address, ++ u32 posx, u32 posy, ++ u32 width, u32 height, u32 stride, ++ u32 format, u32 alpha, bool blend) ++{ ++ u32 value; ++ ++#ifdef PDP_VERBOSE ++ dev_info(dev, ++ "Set surface: size=%dx%d stride=%d format=%d address=0x%llx\n", ++ width, height, stride, format, address); ++#endif ++ ++ pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x0); ++ /* ++ * Set the offset position to (0,0) as we've already added any offset ++ * to the base address. ++ */ ++ pdp_wreg32(pdp_reg, PDP_GRPH1POSN_OFFSET, 0); ++ ++ /* Set the frame buffer base address */ ++ if (address & 0xF) ++ dev_warn(dev, "The frame buffer address is not aligned\n"); ++ ++ pdp_wreg32(pdp_reg, PDP_GRPH1BASEADDR_OFFSET, ++ (u32)address & PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK); ++ ++ /* ++ * Write 8 msb of the address to address extension bits in the PDP ++ * MMU control register. ++ */ ++ value = pdp_rreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET); ++ value = REG_VALUE_SET(value, address >> 32, ++ PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT, ++ PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK); ++ value = REG_VALUE_SET(value, 0x00, ++ PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT, ++ PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK); ++ value = REG_VALUE_SET(value, 0x01, ++ PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT, ++ PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK); ++ pdp_wreg32(pdp_bif_reg, PDP_BIF_ADDRESS_CONTROL_OFFSET, value); ++ ++ /* Set the framebuffer pixel format */ ++ value = pdp_rreg32(pdp_reg, PDP_GRPH1SURF_OFFSET); ++ value = REG_VALUE_SET(value, format, ++ PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT, ++ PDP_GRPH1SURF_GRPH1PIXFMT_MASK); ++ pdp_wreg32(pdp_reg, PDP_GRPH1SURF_OFFSET, value); ++ /* ++ * Set the framebuffer size (this might be smaller than the resolution) ++ */ ++ value = REG_VALUE_SET(0, width - 1, ++ PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT, ++ PDP_GRPH1SIZE_GRPH1WIDTH_MASK); ++ value = REG_VALUE_SET(value, height - 1, ++ PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT, ++ PDP_GRPH1SIZE_GRPH1HEIGHT_MASK); ++ pdp_wreg32(pdp_reg, PDP_GRPH1SIZE_OFFSET, value); ++ ++ /* Set the framebuffer stride in 16byte words */ ++ value = REG_VALUE_SET(0, (stride >> PLATO_PDP_STRIDE_SHIFT) - 1, ++ PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT, ++ PDP_GRPH1STRIDE_GRPH1STRIDE_MASK); ++ pdp_wreg32(pdp_reg, PDP_GRPH1STRIDE_OFFSET, value); ++ ++ /* Enable the register writes on the next vblank */ ++ pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, 0x3); ++ ++ /* ++ * Issues with NoC sending interleaved read responses to PDP require ++ * burst to be 1. ++ */ ++ value = REG_VALUE_SET(0, 0x02, ++ PDP_MEMCTRL_MEMREFRESH_SHIFT, ++ PDP_MEMCTRL_MEMREFRESH_MASK); ++ value = REG_VALUE_SET(value, 0x01, ++ PDP_MEMCTRL_BURSTLEN_SHIFT, ++ PDP_MEMCTRL_BURSTLEN_MASK); ++ pdp_wreg32(pdp_reg, PDP_MEMCTRL_OFFSET, value); ++} ++ ++void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg, ++ u32 h_display, u32 v_display, ++ u32 hbps, u32 ht, u32 has, ++ u32 hlbs, u32 hfps, u32 hrbs, ++ u32 vbps, u32 vt, u32 vas, ++ u32 vtbs, u32 vfps, u32 vbbs, ++ bool nhsync, bool nvsync) ++{ ++ u32 value; ++ ++ dev_info(dev, "Set mode: %dx%d\n", h_display, v_display); ++#ifdef PDP_VERBOSE ++ dev_info(dev, " ht: %d hbps %d has %d hlbs %d hfps %d hrbs %d\n", ++ ht, hbps, has, hlbs, hfps, hrbs); ++ dev_info(dev, " vt: %d vbps %d vas %d vtbs %d vfps %d vbbs %d\n", ++ vt, vbps, vas, vtbs, vfps, vbbs); ++#endif ++ ++ /* Update control */ ++ value = pdp_rreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET); ++ value = REG_VALUE_SET(value, 0x0, ++ PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT, ++ PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK); ++ pdp_wreg32(pdp_reg, PDP_REGISTER_UPDATE_CTRL_OFFSET, value); ++ ++ /* Set hsync timings */ ++ value = pdp_rreg32(pdp_reg, PDP_HSYNC1_OFFSET); ++ value = REG_VALUE_SET(value, hbps, ++ PDP_HSYNC1_HBPS_SHIFT, ++ PDP_HSYNC1_HBPS_MASK); ++ value = REG_VALUE_SET(value, ht, ++ PDP_HSYNC1_HT_SHIFT, ++ PDP_HSYNC1_HT_MASK); ++ pdp_wreg32(pdp_reg, PDP_HSYNC1_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, PDP_HSYNC2_OFFSET); ++ value = REG_VALUE_SET(value, has, ++ PDP_HSYNC2_HAS_SHIFT, ++ PDP_HSYNC2_HAS_MASK); ++ value = REG_VALUE_SET(value, hlbs, ++ PDP_HSYNC2_HLBS_SHIFT, ++ PDP_HSYNC2_HLBS_MASK); ++ pdp_wreg32(pdp_reg, PDP_HSYNC2_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, PDP_HSYNC3_OFFSET); ++ value = REG_VALUE_SET(value, hfps, ++ PDP_HSYNC3_HFPS_SHIFT, ++ PDP_HSYNC3_HFPS_MASK); ++ value = REG_VALUE_SET(value, hrbs, ++ PDP_HSYNC3_HRBS_SHIFT, ++ PDP_HSYNC3_HRBS_MASK); ++ pdp_wreg32(pdp_reg, PDP_HSYNC3_OFFSET, value); ++ ++ /* Set vsync timings */ ++ value = pdp_rreg32(pdp_reg, PDP_VSYNC1_OFFSET); ++ value = REG_VALUE_SET(value, vbps, ++ PDP_VSYNC1_VBPS_SHIFT, ++ PDP_VSYNC1_VBPS_MASK); ++ value = REG_VALUE_SET(value, vt, ++ PDP_VSYNC1_VT_SHIFT, ++ PDP_VSYNC1_VT_MASK); ++ pdp_wreg32(pdp_reg, PDP_VSYNC1_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, PDP_VSYNC2_OFFSET); ++ value = REG_VALUE_SET(value, vas, ++ PDP_VSYNC2_VAS_SHIFT, ++ PDP_VSYNC2_VAS_MASK); ++ value = REG_VALUE_SET(value, vtbs, ++ PDP_VSYNC2_VTBS_SHIFT, ++ PDP_VSYNC2_VTBS_MASK); ++ pdp_wreg32(pdp_reg, PDP_VSYNC2_OFFSET, value); ++ ++ value = pdp_rreg32(pdp_reg, PDP_VSYNC3_OFFSET); ++ value = REG_VALUE_SET(value, vfps, ++ PDP_VSYNC3_VFPS_SHIFT, ++ PDP_VSYNC3_VFPS_MASK); ++ value = REG_VALUE_SET(value, vbbs, ++ PDP_VSYNC3_VBBS_SHIFT, ++ PDP_VSYNC3_VBBS_MASK); ++ pdp_wreg32(pdp_reg, PDP_VSYNC3_OFFSET, value); ++ ++ /* Horizontal data enable */ ++ value = pdp_rreg32(pdp_reg, PDP_HDECTRL_OFFSET); ++ value = REG_VALUE_SET(value, has, ++ PDP_HDECTRL_HDES_SHIFT, ++ PDP_HDECTRL_HDES_MASK); ++ value = REG_VALUE_SET(value, hrbs, ++ PDP_HDECTRL_HDEF_SHIFT, ++ PDP_HDECTRL_HDEF_MASK); ++ pdp_wreg32(pdp_reg, PDP_HDECTRL_OFFSET, value); ++ ++ /* Vertical data enable */ ++ value = pdp_rreg32(pdp_reg, PDP_VDECTRL_OFFSET); ++ value = REG_VALUE_SET(value, vtbs, /* XXX: we're setting this to VAS */ ++ PDP_VDECTRL_VDES_SHIFT, ++ PDP_VDECTRL_VDES_MASK); ++ value = REG_VALUE_SET(value, vfps, /* XXX: set to VBBS */ ++ PDP_VDECTRL_VDEF_SHIFT, ++ PDP_VDECTRL_VDEF_MASK); ++ pdp_wreg32(pdp_reg, PDP_VDECTRL_OFFSET, value); ++ ++ /* Vertical event start and vertical fetch start */ ++ value = 0; ++ value = REG_VALUE_SET(value, 0, ++ PDP_VEVENT_VEVENT_SHIFT, ++ PDP_VEVENT_VEVENT_MASK); ++ value = REG_VALUE_SET(value, vbps, ++ PDP_VEVENT_VFETCH_SHIFT, ++ PDP_VEVENT_VFETCH_MASK); ++ value = REG_VALUE_SET(value, vfps, ++ PDP_VEVENT_VEVENT_SHIFT, ++ PDP_VEVENT_VEVENT_MASK); ++ pdp_wreg32(pdp_reg, PDP_VEVENT_OFFSET, value); ++ ++ /* Set up polarities of sync/blank */ ++ value = REG_VALUE_SET(0, 0x1, ++ PDP_SYNCCTRL_BLNKPOL_SHIFT, ++ PDP_SYNCCTRL_BLNKPOL_MASK); ++ ++ if (nhsync) ++ value = REG_VALUE_SET(value, 0x1, ++ PDP_SYNCCTRL_HSPOL_SHIFT, ++ PDP_SYNCCTRL_HSPOL_MASK); ++ ++ if (nvsync) ++ value = REG_VALUE_SET(value, 0x1, ++ PDP_SYNCCTRL_VSPOL_SHIFT, ++ PDP_SYNCCTRL_VSPOL_MASK); ++ ++ pdp_wreg32(pdp_reg, ++ PDP_SYNCCTRL_OFFSET, ++ value); ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_plato.h b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.h +new file mode 100644 +index 000000000000..3275259673c0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_plato.h +@@ -0,0 +1,86 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PDP_PLATO_H__) ++#define __PDP_PLATO_H__ ++ ++#include ++#include ++ ++#define PLATO_PDP_PIXEL_FORMAT_G (0x00) ++#define PLATO_PDP_PIXEL_FORMAT_ARGB4 (0x04) ++#define PLATO_PDP_PIXEL_FORMAT_ARGB1555 (0x05) ++#define PLATO_PDP_PIXEL_FORMAT_RGB8 (0x06) ++#define PLATO_PDP_PIXEL_FORMAT_RGB565 (0x07) ++#define PLATO_PDP_PIXEL_FORMAT_ARGB8 (0x08) ++#define PLATO_PDP_PIXEL_FORMAT_AYUV8 (0x10) ++#define PLATO_PDP_PIXEL_FORMAT_YUV10 (0x15) ++#define PLATO_PDP_PIXEL_FORMAT_RGBA8 (0x16) ++ ++ ++void pdp_plato_set_syncgen_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++void pdp_plato_set_vblank_enabled(struct device *dev, void __iomem *pdp_reg, ++ bool enable); ++ ++bool pdp_plato_check_and_clear_vblank(struct device *dev, ++ void __iomem *pdp_reg); ++ ++void pdp_plato_set_plane_enabled(struct device *dev, void __iomem *pdp_reg, ++ u32 plane, bool enable); ++ ++void pdp_plato_set_surface(struct device *dev, ++ void __iomem *pdp_reg, void __iomem *pdp_bif_reg, ++ u32 plane, u64 address, ++ u32 posx, u32 posy, ++ u32 width, u32 height, u32 stride, ++ u32 format, u32 alpha, bool blend); ++ ++void pdp_plato_mode_set(struct device *dev, void __iomem *pdp_reg, ++ u32 h_display, u32 v_display, ++ u32 hbps, u32 ht, u32 has, ++ u32 hlbs, u32 hfps, u32 hrbs, ++ u32 vbps, u32 vt, u32 vas, ++ u32 vtbs, u32 vfps, u32 vbbs, ++ bool nhsync, bool nvsync); ++ ++#endif /* __PDP_PLATO_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pdp_regs.h b/drivers/gpu/drm/img-rogue/apollo/pdp_regs.h +new file mode 100644 +index 000000000000..bd26b0617f95 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pdp_regs.h +@@ -0,0 +1,75 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(__PDP_REGS_H__) ++#define __PDP_REGS_H__ ++ ++/*************************************************************************/ /*! ++ PCI Device Information ++*/ /**************************************************************************/ ++ ++#define DCPDP_VENDOR_ID_POWERVR (0x1010) ++ ++#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA (0x1CF1) ++#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA (0x1CF2) ++ ++/*************************************************************************/ /*! ++ PCI Device Base Address Information ++*/ /**************************************************************************/ ++ ++/* PLL and PDP registers on base address register 0 */ ++#define DCPDP_REG_PCI_BASENUM (0) ++ ++#define DCPDP_PCI_PLL_REG_OFFSET (0x1000) ++#define DCPDP_PCI_PLL_REG_SIZE (0x0400) ++ ++#define DCPDP_PCI_PDP_REG_OFFSET (0xC000) ++#define DCPDP_PCI_PDP_REG_SIZE (0x2000) ++ ++/*************************************************************************/ /*! ++ Misc register information ++*/ /**************************************************************************/ ++ ++/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */ ++#define DCPDP_STR1SURF_FORMAT_ARGB8888 (0xE) ++#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT (4) ++#define DCPDP_STR1POSN_STRIDE_SHIFT (4) ++ ++#endif /* !defined(__PDP_REGS_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pfim_defs.h b/drivers/gpu/drm/img-rogue/apollo/pfim_defs.h +new file mode 100644 +index 000000000000..d39c06f703bd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pfim_defs.h +@@ -0,0 +1,69 @@ ++/****************************************************************************** ++@Title Odin PFIM definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin register defs for PDP-FBDC Interface Module ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++ ++#ifndef _PFIM_DEFS_H_ ++#define _PFIM_DEFS_H_ ++ ++/* Supported FBC modes */ ++#define ODIN_PFIM_MOD_LINEAR (0x00) ++#define ODIN_PFIM_FBCDC_8X8_V12 (0x01) ++#define ODIN_PFIM_FBCDC_16X4_V12 (0x02) ++#define ODIN_PFIM_FBCDC_MAX (0x03) ++ ++/* Supported pixel formats */ ++#define ODN_PFIM_PIXFMT_NONE (0x00) ++#define ODN_PFIM_PIXFMT_ARGB8888 (0x0C) ++#define ODN_PFIM_PIXFMT_RGB565 (0x05) ++ ++/* Tile types */ ++#define ODN_PFIM_TILETYPE_8X8 (0x01) ++#define ODN_PFIM_TILETYPE_16X4 (0x02) ++#define ODN_PFIM_TILETYPE_32x2 (0x03) ++ ++#define PFIM_ROUNDUP(X, Y) (((X) + ((Y) - 1U)) & ~((Y) - 1U)) ++#define PFIM_RND_TAG (0x10) ++ ++#endif /* _PFIM_DEFS_H_ */ ++ ++/****************************************************************************** ++ End of file (pfim_defs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/pfim_regs.h b/drivers/gpu/drm/img-rogue/apollo/pfim_regs.h +new file mode 100644 +index 000000000000..4b8ff82138b4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/pfim_regs.h +@@ -0,0 +1,265 @@ ++/****************************************************************************** ++@Title Odin PFIM control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin register defs for PDP-FBDC Interface Module ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++#ifndef _PFIM_REGS_H_ ++#define _PFIM_REGS_H_ ++ ++/* ++ Register CR_PFIM_NUM_TILES ++*/ ++#define CR_PFIM_NUM_TILES 0x0000 ++#define CR_PFIM_NUM_TILES_MASK 0x007FFFFFU ++#define CR_PFIM_NUM_TILES_SHIFT 0 ++#define CR_PFIM_NUM_TILES_SIGNED 0 ++ ++/* ++ Register CR_PFIM_TILES_PER_LINE ++*/ ++#define CR_PFIM_TILES_PER_LINE 0x0004 ++#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_MASK 0x000000FFU ++#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SHIFT 0 ++#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB ++*/ ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB 0x0008 ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SHIFT 0 ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB ++*/ ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB 0x000C ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_MASK 0x00000003U ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SHIFT 0 ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_UV_BASE_ADDR_LSB ++*/ ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB 0x0010 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SHIFT 0 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_UV_BASE_ADDR_MSB ++*/ ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB 0x0014 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_MASK 0x00000003U ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SHIFT 0 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_PDP_Y_BASE_ADDR ++*/ ++#define CR_PFIM_PDP_Y_BASE_ADDR 0x0018 ++#define CR_PFIM_PDP_Y_BASE_ADDR_MASK 0xFFFFFFFFU ++#define CR_PFIM_PDP_Y_BASE_ADDR_SHIFT 0 ++#define CR_PFIM_PDP_Y_BASE_ADDR_SIGNED 0 ++ ++/* ++ Register CR_PFIM_PDP_UV_BASE_ADDR ++*/ ++#define CR_PFIM_PDP_UV_BASE_ADDR 0x001C ++#define CR_PFIM_PDP_UV_BASE_ADDR_MASK 0xFFFFFFFFU ++#define CR_PFIM_PDP_UV_BASE_ADDR_SHIFT 0 ++#define CR_PFIM_PDP_UV_BASE_ADDR_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_CONTEXT ++*/ ++#define CR_PFIM_FBDC_REQ_CONTEXT 0x0020 ++#define CR_PFIM_FBDC_REQ_CONTEXT_MASK 0x00000007U ++#define CR_PFIM_FBDC_REQ_CONTEXT_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_CONTEXT_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_TAG ++*/ ++#define CR_PFIM_FBDC_REQ_TAG 0x0024 ++#define CR_PFIM_FBDC_REQ_TAG_YARGB_MASK 0x00000003U ++#define CR_PFIM_FBDC_REQ_TAG_YARGB_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_TAG_YARGB_SIGNED 0 ++ ++#define CR_PFIM_FBDC_REQ_TAG_UV_MASK 0x00000030U ++#define CR_PFIM_FBDC_REQ_TAG_UV_SHIFT 4 ++#define CR_PFIM_FBDC_REQ_TAG_UV_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_SB_TAG ++*/ ++#define CR_PFIM_FBDC_REQ_SB_TAG 0x0028 ++#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_MASK 0x00000003U ++#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SIGNED 0 ++ ++#define CR_PFIM_FBDC_REQ_SB_TAG_UV_MASK 0x00000030U ++#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SHIFT 4 ++#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_HDR_INVAL_REQ ++*/ ++#define CR_PFIM_FBDC_HDR_INVAL_REQ 0x002C ++#define CR_PFIM_FBDC_HDR_INVAL_REQ_MASK 0x00000001U ++#define CR_PFIM_FBDC_HDR_INVAL_REQ_SHIFT 0 ++#define CR_PFIM_FBDC_HDR_INVAL_REQ_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_PIX_FORMAT ++*/ ++#define CR_PFIM_FBDC_PIX_FORMAT 0x0030 ++#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_MASK 0x0000007FU ++#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SHIFT 0 ++#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_CH0123_VAL0 ++*/ ++#define CR_PFIM_FBDC_CR_CH0123_VAL0 0x0034 ++#define CR_PFIM_FBDC_CR_CH0123_VAL0_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CR_CH0123_VAL0_SHIFT 0 ++#define CR_PFIM_FBDC_CR_CH0123_VAL0_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_CH0123_VAL1 ++*/ ++#define CR_PFIM_FBDC_CR_CH0123_VAL1 0x0038 ++#define CR_PFIM_FBDC_CR_CH0123_VAL1_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CR_CH0123_VAL1_SHIFT 0 ++#define CR_PFIM_FBDC_CR_CH0123_VAL1_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_Y_VAL0 ++*/ ++#define CR_PFIM_FBDC_CR_Y_VAL0 0x003C ++#define CR_PFIM_FBDC_CR_Y_VAL0_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_Y_VAL0_SHIFT 0 ++#define CR_PFIM_FBDC_CR_Y_VAL0_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_UV_VAL0 ++*/ ++#define CR_PFIM_FBDC_CR_UV_VAL0 0x0040 ++#define CR_PFIM_FBDC_CR_UV_VAL0_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_UV_VAL0_SHIFT 0 ++#define CR_PFIM_FBDC_CR_UV_VAL0_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_Y_VAL1 ++*/ ++#define CR_PFIM_FBDC_CR_Y_VAL1 0x0044 ++#define CR_PFIM_FBDC_CR_Y_VAL1_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_Y_VAL1_SHIFT 0 ++#define CR_PFIM_FBDC_CR_Y_VAL1_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_UV_VAL1 ++*/ ++#define CR_PFIM_FBDC_CR_UV_VAL1 0x0048 ++#define CR_PFIM_FBDC_CR_UV_VAL1_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_UV_VAL1_SHIFT 0 ++#define CR_PFIM_FBDC_CR_UV_VAL1_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_FILTER_ENABLE ++*/ ++#define CR_PFIM_FBDC_FILTER_ENABLE 0x004C ++#define CR_PFIM_FBDC_FILTER_ENABLE_MASK 0x00000001U ++#define CR_PFIM_FBDC_FILTER_ENABLE_SHIFT 0 ++#define CR_PFIM_FBDC_FILTER_ENABLE_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_FILTER_STATUS ++*/ ++#define CR_PFIM_FBDC_FILTER_STATUS 0x0050 ++#define CR_PFIM_FBDC_FILTER_STATUS_MASK 0x0000000FU ++#define CR_PFIM_FBDC_FILTER_STATUS_SHIFT 0 ++#define CR_PFIM_FBDC_FILTER_STATUS_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_FILTER_CLEAR ++*/ ++#define CR_PFIM_FBDC_FILTER_CLEAR 0x0054 ++#define CR_PFIM_FBDC_FILTER_CLEAR_MASK 0x0000000FU ++#define CR_PFIM_FBDC_FILTER_CLEAR_SHIFT 0 ++#define CR_PFIM_FBDC_FILTER_CLEAR_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_TILE_TYPE ++*/ ++#define CR_PFIM_FBDC_TILE_TYPE 0x0058 ++#define CR_PFIM_FBDC_TILE_TYPE_MASK 0x00000003U ++#define CR_PFIM_FBDC_TILE_TYPE_SHIFT 0 ++#define CR_PFIM_FBDC_TILE_TYPE_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CLEAR_COLOUR_LSB ++*/ ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB 0x005C ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SHIFT 0 ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CLEAR_COLOUR_MSB ++*/ ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB 0x0060 ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SHIFT 0 ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_LOSSY ++*/ ++#define CR_PFIM_FBDC_REQ_LOSSY 0x0064 ++#define CR_PFIM_FBDC_REQ_LOSSY_MASK 0x00000001U ++#define CR_PFIM_FBDC_REQ_LOSSY_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_LOSSY_SIGNED 0 ++ ++#endif /* _PFIM_REGS_H_ */ ++ ++/****************************************************************************** ++ End of file (pfim_regs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/sysconfig.c b/drivers/gpu/drm/img-rogue/apollo/sysconfig.c +new file mode 100644 +index 000000000000..155181e3d76c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/sysconfig.c +@@ -0,0 +1,1116 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Configuration ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description System Configuration functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++#include "sysinfo.h" ++#include "apollo_regs.h" ++ ++#include "pvrsrv.h" ++#include "pvrsrv_device.h" ++#include "rgxdevice.h" ++#include "syscommon.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++ ++#if defined(SUPPORT_ION) ++#include PVR_ANDROID_ION_HEADER ++#include "ion_support.h" ++#include "ion_sys.h" ++#endif ++ ++#include "tc_drv.h" ++ ++#include ++#include ++ ++#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) ++ ++#define UI64_TOPWORD_IS_ZERO(ui64) ((ui64 >> 32) == 0) ++ ++#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) ++ ++/* Fake DVFS configuration used purely for testing purposes */ ++ ++static const IMG_OPP asOPPTable[] = ++{ ++ { 8, 25000000}, ++ { 16, 50000000}, ++ { 32, 75000000}, ++ { 64, 100000000}, ++}; ++ ++#define LEVEL_COUNT (sizeof(asOPPTable) / sizeof(IMG_OPP)) ++ ++static void SetFrequency(IMG_UINT32 ui32Frequency) ++{ ++ PVR_DPF((PVR_DBG_ERROR, "SetFrequency %u", ui32Frequency)); ++} ++ ++static void SetVoltage(IMG_UINT32 ui32Voltage) ++{ ++ PVR_DPF((PVR_DBG_ERROR, "SetVoltage %u", ui32Voltage)); ++} ++ ++#endif ++ ++static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr); ++ ++static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr); ++ ++static PHYS_HEAP_FUNCTIONS gsLocalPhysHeapFuncs = ++{ ++ .pfnCpuPAddrToDevPAddr = TCLocalCpuPAddrToDevPAddr, ++ .pfnDevPAddrToCpuPAddr = TCLocalDevPAddrToCpuPAddr, ++}; ++ ++static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr); ++ ++static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr); ++ ++static PHYS_HEAP_FUNCTIONS gsHostPhysHeapFuncs = ++{ ++ .pfnCpuPAddrToDevPAddr = TCHostCpuPAddrToDevPAddr, ++ .pfnDevPAddrToCpuPAddr = TCHostDevPAddrToCpuPAddr, ++}; ++ ++typedef struct _SYS_DATA_ SYS_DATA; ++ ++struct _SYS_DATA_ ++{ ++ struct platform_device *pdev; ++ ++ struct tc_rogue_platform_data *pdata; ++ ++ struct resource *registers; ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ struct ion_client *ion_client; ++ struct ion_handle *ion_rogue_allocation; ++#endif ++ ++#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ ++ PHYS_HEAP_ITERATOR *psHeapIter; ++ void *pvS3Buffer; ++#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ ++}; ++ ++#define SYSTEM_INFO_FORMAT_STRING "FPGA Revision: %s - TCF Core Revision: %s - TCF Core Target Build ID: %s - PCI Version: %s - Macro Version: %s" ++#define FPGA_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ ++#define TCF_CORE_REV_MAX_LEN 8 /* current longest format: "x.y.z" */ ++#define TCF_CORE_CFG_MAX_LEN 4 /* current longest format: "x" */ ++#define PCI_VERSION_MAX_LEN 4 /* current longest format: "x" */ ++#define MACRO_VERSION_MAX_LEN 8 /* current longest format: "x.yz" */ ++ ++static IMG_CHAR *GetDeviceVersionString(SYS_DATA *psSysData) ++{ ++ int err; ++ char str_fpga_rev[FPGA_REV_MAX_LEN]={0}; ++ char str_tcf_core_rev[TCF_CORE_REV_MAX_LEN]={0}; ++ char str_tcf_core_target_build_id[TCF_CORE_CFG_MAX_LEN]={0}; ++ char str_pci_ver[PCI_VERSION_MAX_LEN]={0}; ++ char str_macro_ver[MACRO_VERSION_MAX_LEN]={0}; ++ ++ IMG_CHAR *pszVersion; ++ IMG_UINT32 ui32StringLength; ++ ++ err = tc_sys_strings(psSysData->pdev->dev.parent, ++ str_fpga_rev, sizeof(str_fpga_rev), ++ str_tcf_core_rev, sizeof(str_tcf_core_rev), ++ str_tcf_core_target_build_id, sizeof(str_tcf_core_target_build_id), ++ str_pci_ver, sizeof(str_pci_ver), ++ str_macro_ver, sizeof(str_macro_ver)); ++ if (err) ++ { ++ return NULL; ++ } ++ ++ /* Calculate how much space we need to allocate for the string */ ++ ui32StringLength = OSStringLength(SYSTEM_INFO_FORMAT_STRING); ++ ui32StringLength += OSStringLength(str_fpga_rev); ++ ui32StringLength += OSStringLength(str_tcf_core_rev); ++ ui32StringLength += OSStringLength(str_tcf_core_target_build_id); ++ ui32StringLength += OSStringLength(str_pci_ver); ++ ui32StringLength += OSStringLength(str_macro_ver); ++ ++ /* Create the version string */ ++ pszVersion = OSAllocMem(ui32StringLength * sizeof(IMG_CHAR)); ++ if (pszVersion) ++ { ++ OSSNPrintf(&pszVersion[0], ui32StringLength, ++ SYSTEM_INFO_FORMAT_STRING, ++ str_fpga_rev, ++ str_tcf_core_rev, ++ str_tcf_core_target_build_id, ++ str_pci_ver, ++ str_macro_ver); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create format string", __func__)); ++ } ++ ++ return pszVersion; ++} ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++static SYS_DATA *gpsIonPrivateData; ++ ++PVRSRV_ERROR IonInit(void *pvPrivateData) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ SYS_DATA *psSysData = pvPrivateData; ++ gpsIonPrivateData = psSysData; ++ ++ psSysData->ion_client = ion_client_create(psSysData->pdata->ion_device, SYS_RGX_DEV_NAME); ++ if (IS_ERR(psSysData->ion_client)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create ION client (%ld)", __func__, PTR_ERR(psSysData->ion_client))); ++ eError = PVRSRV_ERROR_ION_NO_CLIENT; ++ goto err_out; ++ } ++ /* Allocate the whole rogue ion heap and pass that to services to manage */ ++ psSysData->ion_rogue_allocation = ion_alloc(psSysData->ion_client, psSysData->pdata->rogue_heap_memory_size, 4096, (1 << psSysData->pdata->ion_heap_id), 0); ++ if (IS_ERR(psSysData->ion_rogue_allocation)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate ION rogue buffer (%ld)", __func__, PTR_ERR(psSysData->ion_rogue_allocation))); ++ eError = PVRSRV_ERROR_ION_FAILED_TO_ALLOC; ++ goto err_destroy_client; ++ ++ } ++ ++ return PVRSRV_OK; ++err_destroy_client: ++ ion_client_destroy(psSysData->ion_client); ++ psSysData->ion_client = NULL; ++err_out: ++ return eError; ++} ++ ++void IonDeinit(void) ++{ ++ SYS_DATA *psSysData = gpsIonPrivateData; ++ ion_free(psSysData->ion_client, psSysData->ion_rogue_allocation); ++ psSysData->ion_rogue_allocation = NULL; ++ ion_client_destroy(psSysData->ion_client); ++ psSysData->ion_client = NULL; ++} ++ ++struct ion_device *IonDevAcquire(void) ++{ ++ return gpsIonPrivateData->pdata->ion_device; ++} ++ ++void IonDevRelease(struct ion_device *ion_device) ++{ ++ PVR_ASSERT(ion_device == gpsIonPrivateData->pdata->ion_device); ++} ++#endif /* defined(SUPPORT_ION) */ ++ ++static void TCLocalCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; ++ SYS_DATA *psSysData = psDevConfig->hSysData; ++ IMG_UINT32 ui32Idx; ++ ++ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) ++ { ++ psDevPAddr[ui32Idx].uiAddr = ++ psCpuPAddr[ui32Idx].uiAddr - psSysData->pdata->tc_memory_base; ++ } ++} ++ ++static void TCLocalDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ PVRSRV_DEVICE_CONFIG *psDevConfig = (PVRSRV_DEVICE_CONFIG *)hPrivData; ++ SYS_DATA *psSysData = psDevConfig->hSysData; ++ IMG_UINT32 ui32Idx; ++ ++ for (ui32Idx = 0; ui32Idx < ui32NumOfAddr; ui32Idx++) ++ { ++ psCpuPAddr[ui32Idx].uiAddr = ++ psDevPAddr[ui32Idx].uiAddr + psSysData->pdata->tc_memory_base; ++ } ++} ++ ++static void TCHostCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 uiNumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ if (sizeof(*psDevPAddr) == sizeof(*psCpuPAddr)) ++ { ++ OSCachedMemCopy(psDevPAddr, psCpuPAddr, uiNumOfAddr * sizeof(*psDevPAddr)); ++ return; ++ } ++ ++ /* In this case we may have a 32bit host, so we can't do a memcpy */ ++ /* Optimise common case */ ++ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; ++ if (uiNumOfAddr > 1) ++ { ++ IMG_UINT32 ui32Idx; ++ for (ui32Idx = 1; ui32Idx < uiNumOfAddr; ++ui32Idx) ++ { ++ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; ++ } ++ } ++} ++ ++static void TCHostDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 uiNumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ if (sizeof(*psCpuPAddr) == sizeof(*psDevPAddr)) ++ { ++ OSCachedMemCopy(psCpuPAddr, psDevPAddr, uiNumOfAddr * sizeof(*psCpuPAddr)); ++ return; ++ } ++ ++ /* In this case we may have a 32bit host, so we can't do a memcpy. ++ * Check we are not dropping any data from the 64bit dev addr */ ++ PVR_ASSERT(UI64_TOPWORD_IS_ZERO(psDevPAddr[0].uiAddr)); ++ /* Optimise common case */ ++ psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr); ++ if (uiNumOfAddr > 1) ++ { ++ IMG_UINT32 ui32Idx; ++ for (ui32Idx = 1; ui32Idx < uiNumOfAddr; ++ui32Idx) ++ { ++ PVR_ASSERT(UI64_TOPWORD_IS_ZERO(psDevPAddr[ui32Idx].uiAddr)); ++ psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr); ++ } ++ } ++ ++} ++ ++static PVRSRV_ERROR ++InitLocalHeap(PHYS_HEAP_CONFIG *psPhysHeap, ++ IMG_UINT64 uiBaseAddr, IMG_UINT64 uiStartAddr, ++ IMG_UINT64 uiSize, PHYS_HEAP_FUNCTIONS *psFuncs, ++ PHYS_HEAP_USAGE_FLAGS ui32Flags) ++{ ++ psPhysHeap->sCardBase.uiAddr = uiBaseAddr; ++ psPhysHeap->sStartAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(uiStartAddr); ++ psPhysHeap->uiSize = uiSize; ++ ++ psPhysHeap->eType = PHYS_HEAP_TYPE_LMA; ++ psPhysHeap->pszPDumpMemspaceName = "LMA"; ++ psPhysHeap->psMemFuncs = psFuncs; ++ psPhysHeap->ui32UsageFlags = ui32Flags; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++InitLocalHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32PhysHeapCount) ++{ ++ struct tc_rogue_platform_data *pdata = psSysData->pdata; ++ PHYS_HEAP_FUNCTIONS *psHeapFuncs; ++ IMG_UINT64 uiLocalCardBase; ++ PVRSRV_ERROR eError; ++ ++ if (psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) ++ { ++ psHeapFuncs = &gsHostPhysHeapFuncs; ++ uiLocalCardBase = psSysData->pdata->tc_memory_base; ++ } ++ else ++ { ++ psHeapFuncs = &gsLocalPhysHeapFuncs; ++ uiLocalCardBase = 0; ++ } ++ ++ eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++], ++ uiLocalCardBase, pdata->rogue_heap_memory_base, ++ pdata->rogue_heap_memory_size, psHeapFuncs, ++ PHYS_HEAP_USAGE_GPU_LOCAL); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++#if TC_DISPLAY_MEM_SIZE != 0 ++ eError = InitLocalHeap(&pasPhysHeaps[(*pui32PhysHeapCount)++], ++ uiLocalCardBase, pdata->pdp_heap_memory_base, ++ pdata->pdp_heap_memory_size, psHeapFuncs, ++ PHYS_HEAP_USAGE_EXTERNAL | PHYS_HEAP_USAGE_DISPLAY); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++InitHostHeaps(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, IMG_UINT32 *pui32PhysHeapCount) ++{ ++ if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL) ++ { ++ pasPhysHeaps[*pui32PhysHeapCount].eType = PHYS_HEAP_TYPE_UMA; ++ pasPhysHeaps[*pui32PhysHeapCount].pszPDumpMemspaceName = "SYSMEM"; ++ pasPhysHeaps[*pui32PhysHeapCount].psMemFuncs = &gsHostPhysHeapFuncs; ++ pasPhysHeaps[*pui32PhysHeapCount].ui32UsageFlags = PHYS_HEAP_USAGE_CPU_LOCAL; ++ ++ (*pui32PhysHeapCount)++; ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "Initialising CPU_LOCAL UMA Host PhysHeaps with memory mode: %d", ++ psSysData->pdata->mem_mode)); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++PhysHeapsInit(const SYS_DATA *psSysData, PHYS_HEAP_CONFIG *pasPhysHeaps, ++ void *pvPrivData, IMG_UINT32 *pui32PhysHeapCount) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i; ++ ++ eError = InitLocalHeaps(psSysData, pasPhysHeaps, pui32PhysHeapCount); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ eError = InitHostHeaps(psSysData, pasPhysHeaps, pui32PhysHeapCount); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ /* Initialise fields that don't change between memory modes. ++ * Fix up heap IDs. This is needed for multi-testchip systems to ++ * ensure the heap IDs are unique as this is what Services expects. ++ */ ++ for (i = 0; i < *pui32PhysHeapCount; i++) ++ { ++ pasPhysHeaps[i].hPrivData = pvPrivData; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++PhysHeapsCreate(const SYS_DATA *psSysData, void *pvPrivData, ++ PHYS_HEAP_CONFIG **ppasPhysHeapsOut, ++ IMG_UINT32 *puiPhysHeapCountOut) ++{ ++ PHYS_HEAP_CONFIG *pasPhysHeaps; ++ IMG_UINT32 ui32NumPhysHeaps; ++ IMG_UINT32 ui32PhysHeapCount = 0; ++ PVRSRV_ERROR eError; ++ ++ switch (psSysData->pdata->mem_mode) ++ { ++ case TC_MEMORY_LOCAL: ui32NumPhysHeaps = 1U; break; ++ case TC_MEMORY_HYBRID: ui32NumPhysHeaps = 2U; break; ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: unsupported memory mode %d", __func__, psSysData->pdata->mem_mode)); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ } ++ ++#if TC_DISPLAY_MEM_SIZE != 0 ++ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL || ++ psSysData->pdata->mem_mode == TC_MEMORY_HYBRID) ++ { ++ ui32NumPhysHeaps += 1U; ++ } ++#endif ++ ++ pasPhysHeaps = OSAllocMem(sizeof(*pasPhysHeaps) * ui32NumPhysHeaps); ++ if (!pasPhysHeaps) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ eError = PhysHeapsInit(psSysData, pasPhysHeaps, pvPrivData, &ui32PhysHeapCount); ++ if (eError != PVRSRV_OK) ++ { ++ OSFreeMem(pasPhysHeaps); ++ return eError; ++ } ++ ++ PVR_ASSERT(ui32PhysHeapCount == ui32NumPhysHeaps); ++ ++ *ppasPhysHeapsOut = pasPhysHeaps; ++ *puiPhysHeapCountOut = ui32PhysHeapCount; ++ ++ return PVRSRV_OK; ++} ++ ++static void DeviceConfigDestroy(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ if (psDevConfig->pszVersion) ++ { ++ OSFreeMem(psDevConfig->pszVersion); ++ } ++ ++ OSFreeMem(psDevConfig->pasPhysHeaps); ++ ++ OSFreeMem(psDevConfig); ++} ++ ++static void odinTCDevPhysAddr2DmaAddr(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ IMG_DMA_ADDR *psDmaAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_BOOL *pbValid, ++ IMG_UINT32 ui32NumAddr, ++ IMG_BOOL bSparseAlloc) ++{ ++ IMG_CPU_PHYADDR sCpuPAddr = {0}; ++ IMG_UINT32 ui32Idx; ++ ++ /* Fast path */ ++ if (!bSparseAlloc) ++ { ++ /* In Odin, DMA address space is the same as host CPU */ ++ TCLocalDevPAddrToCpuPAddr(psDevConfig, ++ 1, ++ &sCpuPAddr, ++ psDevPAddr); ++ psDmaAddr->uiAddr = sCpuPAddr.uiAddr; ++ } ++ else ++ { ++ for (ui32Idx = 0; ui32Idx < ui32NumAddr; ui32Idx++) ++ { ++ if (pbValid[ui32Idx]) ++ { ++ TCLocalDevPAddrToCpuPAddr(psDevConfig, ++ 1, ++ &sCpuPAddr, ++ &psDevPAddr[ui32Idx]); ++ psDmaAddr[ui32Idx].uiAddr = sCpuPAddr.uiAddr; ++ } ++ else ++ { ++ /* Invalid DMA address marker */ ++ psDmaAddr[ui32Idx].uiAddr = ~((IMG_UINT64)0x0); ++ } ++ } ++ } ++} ++ ++static void * odinTCgetCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, char *name) ++{ ++ struct device* psDev = (struct device*) psDevConfig->pvOSDevice; ++ return tc_dma_chan(psDev->parent, name); ++} ++ ++static void odinTCFreeCDMAChan(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ void* channel) ++{ ++ ++ struct device* psDev = (struct device*) psDevConfig->pvOSDevice; ++ struct dma_chan *chan = (struct dma_chan*) channel; ++ ++ tc_dma_chan_free(psDev->parent, chan); ++} ++ ++static PVRSRV_ERROR DeviceConfigCreate(SYS_DATA *psSysData, ++ PVRSRV_DEVICE_CONFIG **ppsDevConfigOut) ++{ ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ RGX_DATA *psRGXData; ++ RGX_TIMING_INFORMATION *psRGXTimingInfo; ++ PHYS_HEAP_CONFIG *pasPhysHeaps; ++ IMG_UINT32 uiPhysHeapCount; ++ PVRSRV_ERROR eError; ++ ++ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + ++ sizeof(*psRGXData) + ++ sizeof(*psRGXTimingInfo)); ++ if (!psDevConfig) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psRGXData = (RGX_DATA *) IMG_OFFSET_ADDR(psDevConfig, sizeof(*psDevConfig)); ++ psRGXTimingInfo = (RGX_TIMING_INFORMATION *) IMG_OFFSET_ADDR(psRGXData, sizeof(*psRGXData)); ++ ++ eError = PhysHeapsCreate(psSysData, psDevConfig, &pasPhysHeaps, &uiPhysHeapCount); ++ if (eError != PVRSRV_OK) ++ { ++ goto ErrorFreeDevConfig; ++ } ++ ++ /* Setup RGX specific timing data */ ++#if defined(TC_APOLLO_BONNIE) ++ /* For BonnieTC there seems to be an additional 5x multiplier that occurs to the clock as measured speed is 540Mhz not 108Mhz. */ ++ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6 * 5; ++#elif defined(TC_APOLLO_ES2) ++ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) * 6; ++#else ++ psRGXTimingInfo->ui32CoreClockSpeed = tc_core_clock_speed(&psSysData->pdev->dev) / ++ tc_core_clock_multiplex(&psSysData->pdev->dev); ++#endif ++ psRGXTimingInfo->bEnableActivePM = IMG_FALSE; ++ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; ++ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; ++ ++ /* Set up the RGX data */ ++ psRGXData->psRGXTimingInfo = psRGXTimingInfo; ++ ++ /* Setup the device config */ ++ psDevConfig->pvOSDevice = &psSysData->pdev->dev; ++ psDevConfig->pszName = "tc"; ++ psDevConfig->pszVersion = GetDeviceVersionString(psSysData); ++ ++ psDevConfig->sRegsCpuPBase.uiAddr = psSysData->registers->start; ++ psDevConfig->ui32RegsSize = resource_size(psSysData->registers); ++ psDevConfig->eDefaultHeap = PVRSRV_PHYS_HEAP_GPU_LOCAL; ++ ++ psDevConfig->ui32IRQ = TC_INTERRUPT_EXT; ++ ++ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; ++ ++ psDevConfig->pasPhysHeaps = pasPhysHeaps; ++ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; ++ ++ /* Only required for LMA but having this always set shouldn't be a problem */ ++ psDevConfig->bDevicePA0IsValid = IMG_TRUE; ++ ++ psDevConfig->hDevData = psRGXData; ++ psDevConfig->hSysData = psSysData; ++ ++#if defined(SUPPORT_ALT_REGBASE) ++ if (psSysData->pdata->mem_mode != TC_MEMORY_LOCAL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: alternative GPU register base is " ++ "supported only in LMA mode", __func__)); ++ goto ErrorFreeDevConfig; ++ } ++ ++ /* Using display memory base as the alternative GPU register base, ++ * since the display memory range is not used by the firmware. */ ++ TCLocalCpuPAddrToDevPAddr(psDevConfig, 1, ++ &psDevConfig->sAltRegsGpuPBase, ++ &pasPhysHeaps[PHY_HEAP_CARD_EXT].sStartAddr); ++#endif ++ ++#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) ++ /* Fake DVFS configuration used purely for testing purposes */ ++ psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = asOPPTable; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32OPPTableSize = LEVEL_COUNT; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; ++#endif ++#if defined(SUPPORT_LINUX_DVFS) ++ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 1000; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; ++ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90; ++ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; ++#endif ++ ++ /* DMA channel config */ ++ psDevConfig->pfnSlaveDMAGetChan = odinTCgetCDMAChan; ++ psDevConfig->pfnSlaveDMAFreeChan = odinTCFreeCDMAChan; ++ psDevConfig->pfnDevPhysAddr2DmaAddr = odinTCDevPhysAddr2DmaAddr; ++ psDevConfig->pszDmaTxChanName = psSysData->pdata->tc_dma_tx_chan_name; ++ psDevConfig->pszDmaRxChanName = psSysData->pdata->tc_dma_rx_chan_name; ++ psDevConfig->bHasDma = IMG_TRUE; ++ /* Following two values are expressed in number of bytes */ ++ psDevConfig->ui32DmaTransferUnit = 1; ++ psDevConfig->ui32DmaAlignment = 1; ++ ++ *ppsDevConfigOut = psDevConfig; ++ ++ return PVRSRV_OK; ++ ++ErrorFreeDevConfig: ++ OSFreeMem(psDevConfig); ++ return eError; ++} ++ ++#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) ++/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ ++#define _DBG(...) ++ ++static PVRSRV_ERROR PrePower(IMG_HANDLE hSysData, ++ PVRSRV_SYS_POWER_STATE eNewPowerState, ++ PVRSRV_SYS_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ SYS_DATA *psSysData = (SYS_DATA *) hSysData; ++ IMG_DEV_PHYADDR sDevPAddr = {0}; ++ IMG_UINT64 uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize; ++ IMG_UINT64 uiSize = 0, uiOffset = 0; ++ PVRSRV_ERROR eError; ++ ++ _DBG("(%s()) state: current=%d, new=%d; flags: 0x%08x", __func__, ++ eCurrentPowerState, eNewPowerState, ePwrFlags); ++ ++ if (eNewPowerState == eCurrentPowerState || ++ eNewPowerState != PVRSRV_SYS_POWER_STATE_OFF || ++ !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_SUSPEND)) ++ { ++ return PVRSRV_OK; ++ } ++ ++ eError = LMA_HeapIteratorCreate(psSysData->psDevConfig->psDevNode, ++ PHYS_HEAP_USAGE_GPU_LOCAL, ++ &psSysData->psHeapIter); ++ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorCreate", return_error); ++ ++ eError = LMA_HeapIteratorGetHeapStats(psSysData->psHeapIter, &uiHeapTotalSize, ++ &uiHeapUsedSize); ++ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorGetHeapStats", ++ return_error); ++ uiHeapFreeSize = uiHeapTotalSize - uiHeapUsedSize; ++ ++ _DBG("(%s()) heap stats: total=0x%" IMG_UINT64_FMTSPECx ", " ++ "used=0x%" IMG_UINT64_FMTSPECx ", free=0x%" IMG_UINT64_FMTSPECx, ++ __func__, uiHeapTotalSize, uiHeapUsedSize, uiHeapFreeSize); ++ ++ psSysData->pvS3Buffer = OSAllocMem(uiHeapUsedSize); ++ PVR_LOG_GOTO_IF_NOMEM(psSysData->pvS3Buffer, eError, destroy_iterator); ++ ++ while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize)) ++ { ++ void *pvCpuVAddr; ++ IMG_CPU_PHYADDR sCpuPAddr = {0}; ++ ++ if (uiOffset + uiSize > uiHeapUsedSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "uiOffset = %" IMG_UINT64_FMTSPECx ", " ++ "uiSize = %" IMG_UINT64_FMTSPECx, uiOffset, uiSize)); ++ ++ PVR_LOG_GOTO_WITH_ERROR("LMA_HeapIteratorNext", eError, ++ PVRSRV_ERROR_INVALID_OFFSET, ++ free_buffer); ++ } ++ ++ TCLocalDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr, ++ &sDevPAddr); ++ ++ pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); ++ ++ _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, " ++ "size=0x%05" IMG_UINT64_FMTSPECx, __func__, ++ (void *) sCpuPAddr.uiAddr, ++ pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, ++ uiSize); ++ ++ /* copy memory */ ++ memcpy((IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, pvCpuVAddr, ++ uiSize); ++ /* and now poison it */ ++ memset(pvCpuVAddr, 0x9b, uiSize); ++ ++ uiOffset += uiSize; ++ ++ OSUnMapPhysToLin(pvCpuVAddr, uiSize); ++ } ++ ++ return PVRSRV_OK; ++ ++free_buffer: ++ OSFreeMem(psSysData->pvS3Buffer); ++ psSysData->pvS3Buffer = NULL; ++destroy_iterator: ++ LMA_HeapIteratorDestroy(psSysData->psHeapIter); ++ psSysData->psHeapIter = NULL; ++return_error: ++ return eError; ++} ++ ++static PVRSRV_ERROR PostPower(IMG_HANDLE hSysData, ++ PVRSRV_SYS_POWER_STATE eNewPowerState, ++ PVRSRV_SYS_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ SYS_DATA *psSysData = (SYS_DATA *) hSysData; ++ IMG_DEV_PHYADDR sDevPAddr = {0}; ++ IMG_UINT64 uiSize = 0, uiOffset = 0; ++ PVRSRV_ERROR eError; ++ ++ _DBG("(%s()) state: current=%d, new=%d; flags=0x%08x; buffer null?=%d", __func__, ++ eCurrentPowerState, eNewPowerState, ePwrFlags, psSysData->pvS3Buffer == NULL); ++ ++ if (eNewPowerState == eCurrentPowerState || ++ eCurrentPowerState != PVRSRV_SYS_POWER_STATE_OFF || ++ !BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_SUSPEND) || ++ psSysData->pvS3Buffer == NULL) ++ { ++ return PVRSRV_OK; ++ } ++ ++ eError = LMA_HeapIteratorReset(psSysData->psHeapIter); ++ PVR_LOG_GOTO_IF_ERROR(eError, "LMA_HeapIteratorReset", free_buffer); ++ ++ while (LMA_HeapIteratorNext(psSysData->psHeapIter, &sDevPAddr, &uiSize)) ++ { ++ void *pvCpuVAddr; ++ IMG_CPU_PHYADDR sCpuPAddr = {0}; ++ ++ TCLocalDevPAddrToCpuPAddr(psSysData->psDevConfig, 1, &sCpuPAddr, ++ &sDevPAddr); ++ ++ pvCpuVAddr = OSMapPhysToLin(sCpuPAddr, uiSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); ++ ++ _DBG("(%s()) iterator: dev_paddr=%px, cpu_paddr=%px, cpu_vaddr=%px, " ++ "size=0x%05" IMG_UINT64_FMTSPECx, __func__, ++ (void *) sCpuPAddr.uiAddr, ++ pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, ++ uiSize); ++ ++ /* copy memory */ ++ memcpy(pvCpuVAddr, (IMG_BYTE *) psSysData->pvS3Buffer + uiOffset, ++ uiSize); ++ ++ uiOffset += uiSize; ++ ++ OSUnMapPhysToLin(pvCpuVAddr, uiSize); ++ } ++ ++ LMA_HeapIteratorDestroy(psSysData->psHeapIter); ++ psSysData->psHeapIter = NULL; ++ ++ OSFreeMem(psSysData->pvS3Buffer); ++ psSysData->pvS3Buffer = NULL; ++ ++ return PVRSRV_OK; ++ ++free_buffer: ++ OSFreeMem(psSysData->pvS3Buffer); ++ psSysData->pvS3Buffer = NULL; ++ ++ return eError; ++} ++#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ ++ ++PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) ++{ ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ SYS_DATA *psSysData; ++ resource_size_t uiRegistersSize; ++ PVRSRV_ERROR eError; ++ int err = 0; ++ ++ PVR_ASSERT(pvOSDevice); ++ ++ psSysData = OSAllocZMem(sizeof(*psSysData)); ++ if (psSysData == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psSysData->pdev = to_platform_device((struct device *)pvOSDevice); ++ psSysData->pdata = psSysData->pdev->dev.platform_data; ++ ++ /* ++ * The device cannot address system memory, so there is no DMA ++ * limitation. ++ */ ++ if (psSysData->pdata->mem_mode == TC_MEMORY_LOCAL) ++ { ++ dma_set_mask(pvOSDevice, DMA_BIT_MASK(64)); ++ } ++ else ++ { ++ dma_set_mask(pvOSDevice, DMA_BIT_MASK(32)); ++ } ++ ++ err = tc_enable(psSysData->pdev->dev.parent); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to enable PCI device (%d)", __func__, err)); ++ eError = PVRSRV_ERROR_PCI_CALL_FAILED; ++ goto ErrFreeSysData; ++ } ++ ++ psSysData->registers = platform_get_resource_byname(psSysData->pdev, ++ IORESOURCE_MEM, ++ "rogue-regs"); ++ if (!psSysData->registers) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to get Rogue register information", ++ __func__)); ++ eError = PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; ++ goto ErrorDevDisable; ++ } ++ ++ /* Check the address range is large enough. */ ++ uiRegistersSize = resource_size(psSysData->registers); ++ if (uiRegistersSize < SYS_RGX_REG_REGION_SIZE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Rogue register region isn't big enough (was %pa, required 0x%08x)", ++ __func__, &uiRegistersSize, SYS_RGX_REG_REGION_SIZE)); ++ ++ eError = PVRSRV_ERROR_PCI_REGION_TOO_SMALL; ++ goto ErrorDevDisable; ++ } ++ ++ /* Reserve the address range */ ++ if (!request_mem_region(psSysData->registers->start, ++ resource_size(psSysData->registers), ++ SYS_RGX_DEV_NAME)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Rogue register memory region not available", ++ __func__)); ++ eError = PVRSRV_ERROR_PCI_CALL_FAILED; ++ ++ goto ErrorDevDisable; ++ } ++ ++ eError = DeviceConfigCreate(psSysData, &psDevConfig); ++ if (eError != PVRSRV_OK) ++ { ++ goto ErrorReleaseMemRegion; ++ } ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ eError = IonInit(psSysData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise ION", __func__)); ++ goto ErrorDeviceConfigDestroy; ++ } ++#endif ++ ++ /* Set psDevConfig->pfnSysDevErrorNotify callback */ ++ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify; ++ ++#if defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) ++ /* power functions */ ++ psDevConfig->pfnPrePowerState = PrePower; ++ psDevConfig->pfnPostPowerState = PostPower; ++ ++ psSysData->psDevConfig = psDevConfig; ++#endif /* defined(SUPPORT_LMA_SUSPEND_TO_RAM) && defined(__x86_64__) */ ++ ++ *ppsDevConfig = psDevConfig; ++ ++ return PVRSRV_OK; ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ErrorDeviceConfigDestroy: ++ DeviceConfigDestroy(psDevConfig); ++#endif ++ErrorReleaseMemRegion: ++ release_mem_region(psSysData->registers->start, ++ resource_size(psSysData->registers)); ++ErrorDevDisable: ++ tc_disable(psSysData->pdev->dev.parent); ++ErrFreeSysData: ++ OSFreeMem(psSysData); ++ return eError; ++} ++ ++void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ SYS_DATA *psSysData = (SYS_DATA *)psDevConfig->hSysData; ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ IonDeinit(); ++#endif ++ ++ DeviceConfigDestroy(psDevConfig); ++ ++ release_mem_region(psSysData->registers->start, ++ resource_size(psSysData->registers)); ++ tc_disable(psSysData->pdev->dev.parent); ++ ++ OSFreeMem(psSysData); ++} ++ ++PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++#if defined(TC_APOLLO_TCF5) ++ PVR_UNREFERENCED_PARAMETER(psDevConfig); ++ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); ++ return PVRSRV_OK; ++#else ++ SYS_DATA *psSysData = psDevConfig->hSysData; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ u32 tmp = 0; ++ u32 pll; ++ ++ PVR_DUMPDEBUG_LOG("------[ rgx_tc system debug ]------"); ++ ++ if (tc_sys_info(psSysData->pdev->dev.parent, &tmp, &pll)) ++ goto err_out; ++ ++ if (tmp > 0) ++ PVR_DUMPDEBUG_LOG("Chip temperature: %d degrees C", tmp); ++ PVR_DUMPDEBUG_LOG("PLL status: %x", pll); ++ ++err_out: ++ return eError; ++#endif ++} ++ ++typedef struct ++{ ++ struct device *psDev; ++ int iInterruptID; ++ void *pvData; ++ PFN_LISR pfnLISR; ++} LISR_DATA; ++ ++static void TCInterruptHandler(void* pvData) ++{ ++ LISR_DATA *psLISRData = pvData; ++ psLISRData->pfnLISR(psLISRData->pvData); ++} ++ ++PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszName, ++ PFN_LISR pfnLISR, ++ void *pvData, ++ IMG_HANDLE *phLISRData) ++{ ++ SYS_DATA *psSysData = (SYS_DATA *)hSysData; ++ LISR_DATA *psLISRData; ++ PVRSRV_ERROR eError; ++ int err; ++ ++ if (ui32IRQ != TC_INTERRUPT_EXT) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: No device matching IRQ %d", __func__, ui32IRQ)); ++ return PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; ++ } ++ ++ psLISRData = OSAllocZMem(sizeof(*psLISRData)); ++ if (!psLISRData) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_out; ++ } ++ ++ psLISRData->pfnLISR = pfnLISR; ++ psLISRData->pvData = pvData; ++ psLISRData->iInterruptID = ui32IRQ; ++ psLISRData->psDev = psSysData->pdev->dev.parent; ++ ++ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, TCInterruptHandler, psLISRData); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); ++ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; ++ goto err_free_data; ++ } ++ ++ err = tc_enable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: tc_enable_interrupt() failed (%d)", __func__, err)); ++ eError = PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR; ++ goto err_unset_interrupt_handler; ++ } ++ ++ *phLISRData = psLISRData; ++ eError = PVRSRV_OK; ++ ++ PVR_TRACE(("Installed device LISR " IMG_PFN_FMTSPEC " with tc module to ID %u", ++ pfnLISR, ui32IRQ)); ++ ++err_out: ++ return eError; ++err_unset_interrupt_handler: ++ tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); ++err_free_data: ++ OSFreeMem(psLISRData); ++ goto err_out; ++} ++ ++PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) ++{ ++ LISR_DATA *psLISRData = (LISR_DATA *) hLISRData; ++ int err; ++ ++ err = tc_disable_interrupt(psLISRData->psDev, psLISRData->iInterruptID); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: tc_disable_interrupt() failed (%d)", __func__, err)); ++ } ++ ++ err = tc_set_interrupt_handler(psLISRData->psDev, psLISRData->iInterruptID, NULL, NULL); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: tc_set_interrupt_handler() failed (%d)", __func__, err)); ++ } ++ ++ PVR_TRACE(("Uninstalled device LISR " IMG_PFN_FMTSPEC " with tc module from ID %u", ++ psLISRData->pfnLISR, psLISRData->iInterruptID)); ++ ++ OSFreeMem(psLISRData); ++ ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/sysinfo.h b/drivers/gpu/drm/img-rogue/apollo/sysinfo.h +new file mode 100644 +index 000000000000..b71df887b113 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/sysinfo.h +@@ -0,0 +1,60 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(__SYSINFO_H__) ++#define __SYSINFO_H__ ++ ++/*!< System specific poll/timeout details */ ++#if defined(VIRTUAL_PLATFORM) || defined(FPGA) ++#define MAX_HW_TIME_US (240000000) ++#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (120000) ++#else ++#define MAX_HW_TIME_US (500000) ++#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) ++#endif ++#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) ++#define WAIT_TRY_COUNT (10000) ++ ++#define SYS_RGX_DEV_NAME "tc_rogue" ++ ++#endif /* !defined(__SYSINFO_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_apollo.c b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.c +new file mode 100644 +index 000000000000..f664b3f2e22d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.c +@@ -0,0 +1,1499 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * This is a device driver for the apollo testchip framework. It creates ++ * platform devices for the pdp and ext sub-devices, and exports functions to ++ * manage the shared interrupt handling ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "tc_drv_internal.h" ++#include "tc_apollo.h" ++ ++#if defined(SUPPORT_DMA_HEAP) ++#include "tc_dmabuf_heap.h" ++#elif defined(SUPPORT_ION) ++#include "tc_ion.h" ++#endif ++ ++#include "apollo_regs.h" ++#include "tcf_clk_ctrl.h" ++#include "tcf_pll.h" ++#include "tc_clocks.h" ++ ++#if defined(SUPPORT_APOLLO_FPGA) ++#include "tc_apollo_debugfs.h" ++#endif /* defined(SUPPORT_APOLLO_FPGA) */ ++ ++#define TC_INTERRUPT_FLAG_PDP (1 << PDP1_INT_SHIFT) ++#define TC_INTERRUPT_FLAG_EXT (1 << EXT_INT_SHIFT) ++ ++#define PCI_VENDOR_ID_POWERVR 0x1010 ++#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1 ++#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2 ++ ++#define APOLLO_MEM_PCI_BASENUM (2) ++ ++static struct { ++ struct thermal_zone_device *thermal_zone; ++ ++#if defined(SUPPORT_APOLLO_FPGA) ++ struct tc_io_region fpga; ++ struct apollo_debugfs_fpga_entries fpga_entries; ++#endif ++} apollo_pdata; ++ ++#if defined(SUPPORT_APOLLO_FPGA) ++ ++#define APOLLO_DEVICE_NAME_FPGA "apollo_fpga" ++ ++struct apollo_fpga_platform_data { ++ /* The testchip memory mode (LMA, HOST or HYBRID) */ ++ int mem_mode; ++ ++ resource_size_t tc_memory_base; ++ ++ resource_size_t pdp_heap_memory_base; ++ resource_size_t pdp_heap_memory_size; ++}; ++ ++#endif /* defined(SUPPORT_APOLLO_FPGA) */ ++ ++static void spi_write(struct tc_device *tc, u32 off, u32 val) ++{ ++ iowrite32(off, tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR); ++ iowrite32(val, tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_WDATA); ++ iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_GO); ++ udelay(1000); ++} ++ ++static int spi_read(struct tc_device *tc, u32 off, u32 *val) ++{ ++ int cnt = 0; ++ u32 spi_mst_status; ++ ++ iowrite32(0x40000 | off, tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR); ++ iowrite32(TCF_SPI_MST_GO_MASK, tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_GO); ++ ++ udelay(100); ++ ++ do { ++ spi_mst_status = ioread32(tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_STATUS); ++ ++ if (cnt++ > 10000) { ++ dev_err(&tc->pdev->dev, ++ "%s: Time out reading SPI reg (0x%x)\n", ++ __func__, off); ++ return -1; ++ } ++ ++ } while (spi_mst_status != 0x08); ++ ++ *val = ioread32(tc->tcf.registers ++ + TCF_CLK_CTRL_TCF_SPI_MST_RDATA); ++ ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) ++static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, ++ unsigned long *t) ++#else ++static int apollo_thermal_get_temp(struct thermal_zone_device *thermal, ++ int *t) ++#endif ++{ ++ struct tc_device *tc; ++ int err = -ENODEV; ++ u32 tmp; ++ ++ if (!thermal) ++ goto err_out; ++ ++ tc = (struct tc_device *)thermal->devdata; ++ ++ if (!tc) ++ goto err_out; ++ ++ if (spi_read(tc, TCF_TEMP_SENSOR_SPI_OFFSET, &tmp)) { ++ dev_err(&tc->pdev->dev, ++ "Failed to read apollo temperature sensor\n"); ++ ++ goto err_out; ++ } ++ ++ /* Report this in millidegree Celsius */ ++ *t = TCF_TEMP_SENSOR_TO_C(tmp) * 1000; ++ ++ err = 0; ++ ++err_out: ++ return err; ++} ++ ++static struct thermal_zone_device_ops apollo_thermal_dev_ops = { ++ .get_temp = apollo_thermal_get_temp, ++}; ++ ++#if defined(SUPPORT_RGX) ++ ++static void pll_write_reg(struct tc_device *tc, ++ resource_size_t reg_offset, u32 reg_value) ++{ ++ BUG_ON(reg_offset < TCF_PLL_PLL_CORE_CLK0); ++ BUG_ON(reg_offset > tc->tcf_pll.region.size + ++ TCF_PLL_PLL_CORE_CLK0 - 4); ++ ++ /* Tweak the offset because we haven't mapped the full pll region */ ++ iowrite32(reg_value, tc->tcf_pll.registers + ++ reg_offset - TCF_PLL_PLL_CORE_CLK0); ++} ++ ++static u32 sai_read_es2(struct tc_device *tc, u32 addr) ++{ ++ iowrite32(0x200 | addr, tc->tcf.registers + 0x300); ++ iowrite32(0x1 | addr, tc->tcf.registers + 0x318); ++ return ioread32(tc->tcf.registers + 0x310); ++} ++ ++static int apollo_align_interface_es2(struct tc_device *tc) ++{ ++ u32 reg = 0; ++ u32 reg_reset_n; ++ int reset_cnt = 0; ++ int err = -EFAULT; ++ bool aligned = false; ++ ++ /* Try to enable the core clock PLL */ ++ spi_write(tc, 0x1, 0x0); ++ reg = ioread32(tc->tcf.registers + 0x320); ++ reg |= 0x1; ++ iowrite32(reg, tc->tcf.registers + 0x320); ++ reg &= 0xfffffffe; ++ iowrite32(reg, tc->tcf.registers + 0x320); ++ msleep(1000); ++ ++ if (spi_read(tc, 0x2, ®)) { ++ dev_err(&tc->pdev->dev, ++ "Unable to read PLL status\n"); ++ goto err_out; ++ } ++ ++ if (reg == 0x1) { ++ /* Select DUT PLL as core clock */ ++ reg = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_DUT_CONTROL_1); ++ reg &= 0xfffffff7; ++ iowrite32(reg, tc->tcf.registers + ++ TCF_CLK_CTRL_DUT_CONTROL_1); ++ } else { ++ dev_err(&tc->pdev->dev, ++ "PLL has failed to lock, status = %x\n", reg); ++ goto err_out; ++ } ++ ++ reg_reset_n = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ ++ while (!aligned && reset_cnt < 10 && ++ tc->version != APOLLO_VERSION_TCF_5) { ++ int bank; ++ u32 eyes; ++ u32 clk_taps; ++ u32 train_ack; ++ ++ ++reset_cnt; ++ ++ /* Reset the DUT to allow the SAI to retrain */ ++ reg_reset_n &= ~(0x1 << DUT_RESETN_SHIFT); ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ udelay(100); ++ reg_reset_n |= (0x1 << DUT_RESETN_SHIFT); ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ udelay(100); ++ ++ /* Assume alignment passed, if any bank fails on either DUT or ++ * FPGA we will set this to false and try again for a max of 10 ++ * times. ++ */ ++ aligned = true; ++ ++ /* For each of the banks */ ++ for (bank = 0; bank < 10; bank++) { ++ int bank_aligned = 0; ++ /* Check alignment on the DUT */ ++ u32 bank_base = 0x7000 + (0x1000 * bank); ++ ++ spi_read(tc, bank_base + 0x4, &eyes); ++ spi_read(tc, bank_base + 0x3, &clk_taps); ++ spi_read(tc, bank_base + 0x6, &train_ack); ++ ++ bank_aligned = tc_is_interface_aligned( ++ eyes, clk_taps, train_ack); ++ if (!bank_aligned) { ++ dev_warn(&tc->pdev->dev, ++ "Alignment check failed, retrying\n"); ++ aligned = false; ++ break; ++ } ++ ++ /* Check alignment on the FPGA */ ++ bank_base = 0xb0 + (0x10 * bank); ++ ++ eyes = sai_read_es2(tc, bank_base + 0x4); ++ clk_taps = sai_read_es2(tc, bank_base + 0x3); ++ train_ack = sai_read_es2(tc, bank_base + 0x6); ++ ++ bank_aligned = tc_is_interface_aligned( ++ eyes, clk_taps, train_ack); ++ ++ if (!bank_aligned) { ++ dev_warn(&tc->pdev->dev, ++ "Alignment check failed, retrying\n"); ++ aligned = false; ++ break; ++ } ++ } ++ } ++ ++ if (!aligned) { ++ dev_err(&tc->pdev->dev, "Unable to initialise the testchip (interface alignment failure), please restart the system.\n"); ++ /* We are not returning an error here, cause VP doesn't ++ * implement the necessary registers although they claim to be ++ * TC compatible. ++ */ ++ } ++ ++ if (reset_cnt > 1) { ++ dev_dbg(&tc->pdev->dev, "Note: The testchip required more than one reset to find a good interface alignment!\n"); ++ dev_dbg(&tc->pdev->dev, " This should be harmless, but if you do suspect foul play, please reset the machine.\n"); ++ dev_dbg(&tc->pdev->dev, " If you continue to see this message you may want to report it to PowerVR Verification Platforms.\n"); ++ } ++ ++ err = 0; ++err_out: ++ return err; ++} ++ ++static void apollo_set_clocks(struct tc_device *tc, ++ int core_clock, int mem_clock, int sys_clock) ++{ ++ u32 val; ++ ++ /* This is disabled for TCF2 since the current FPGA builds do not ++ * like their core clocks being set (it takes apollo down). ++ */ ++ if (tc->version != APOLLO_VERSION_TCF_2) { ++ val = core_clock / 1000000; ++ pll_write_reg(tc, TCF_PLL_PLL_CORE_CLK0, val); ++ ++ val = 0x1 << PLL_CORE_DRP_GO_SHIFT; ++ pll_write_reg(tc, TCF_PLL_PLL_CORE_DRP_GO, val); ++ } ++ ++ val = mem_clock / 1000000; ++ pll_write_reg(tc, TCF_PLL_PLL_MEMIF_CLK0, val); ++ ++ val = 0x1 << PLL_MEM_DRP_GO_SHIFT; ++ pll_write_reg(tc, TCF_PLL_PLL_MEM_DRP_GO, val); ++ ++ if (tc->version == APOLLO_VERSION_TCF_5) { ++ val = sys_clock / 1000000; ++ pll_write_reg(tc, TCF_PLL_PLL_SYSIF_CLK0, val); ++ ++ val = 0x1 << PLL_MEM_DRP_GO_SHIFT; ++ pll_write_reg(tc, TCF_PLL_PLL_SYS_DRP_GO, val); ++ } ++ ++ udelay(400); ++} ++ ++static void apollo_set_mem_latency(struct tc_device *tc, ++ int mem_latency, int mem_wresp_latency) ++{ ++ u32 regval = 0; ++ ++ if (mem_latency <= 4) { ++ /* The total memory read latency cannot be lower than the ++ * amount of cycles consumed by the hardware to do a read. ++ * Set the memory read latency to 0 cycles. ++ */ ++ mem_latency = 0; ++ } else { ++ mem_latency -= 4; ++ ++ dev_info(&tc->pdev->dev, ++ "Setting memory read latency to %i cycles\n", ++ mem_latency); ++ } ++ ++ if (mem_wresp_latency <= 2) { ++ /* The total memory write latency cannot be lower than the ++ * amount of cycles consumed by the hardware to do a write. ++ * Set the memory write latency to 0 cycles. ++ */ ++ mem_wresp_latency = 0; ++ } else { ++ mem_wresp_latency -= 2; ++ ++ dev_info(&tc->pdev->dev, ++ "Setting memory write response latency to %i cycles\n", ++ mem_wresp_latency); ++ } ++ ++ mem_latency |= mem_wresp_latency << 16; ++ ++ spi_write(tc, 0x1009, mem_latency); ++ ++ if (spi_read(tc, 0x1009, ®val) != 0) { ++ dev_err(&tc->pdev->dev, ++ "Failed to read back memory latency register"); ++ return; ++ } ++ ++ if (mem_latency != regval) { ++ dev_err(&tc->pdev->dev, ++ "Memory latency register doesn't match requested value (actual: %#08x, expected: %#08x)\n", ++ regval, mem_latency); ++ } ++} ++ ++static void apollo_fpga_update_dut_clk_freq(struct tc_device *tc, ++ int *core_clock, int *mem_clock, int *clock_multiplex) ++{ ++ struct device *dev = &tc->pdev->dev; ++ u32 reg = 0; ++ ++#if defined(SUPPORT_FPGA_DUT_CLK_INFO) ++ /* DUT_CLK_INFO available only if SW_IF_VERSION >= 1 */ ++ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); ++ reg = (reg & VERSION_MASK) >> VERSION_SHIFT; ++#endif ++ if (reg >= 1) { ++ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_DUT_CLK_INFO); ++ ++ if ((reg != 0) && (reg != 0xbaadface) && (reg != 0xffffffff)) { ++ dev_info(dev, "TCF_CLK_CTRL_DUT_CLK_INFO = %08x\n", reg); ++ ++ if (*core_clock == 0) { ++ *core_clock = ((reg & CORE_MASK) >> CORE_SHIFT) * 1000000; ++ dev_info(dev, "Using register DUT core clock value: %i\n", ++ *core_clock); ++ } else { ++ dev_info(dev, "Using module param DUT core clock value: %i\n", ++ *core_clock); ++ } ++ ++ if (*mem_clock == 0) { ++ *mem_clock = ((reg & MEM_MASK) >> MEM_SHIFT) * 1000000; ++ dev_info(dev, "Using register DUT mem clock value: %i\n", ++ *mem_clock); ++ } else { ++ dev_info(dev, "Using module param DUT mem clock value: %i\n", ++ *mem_clock); ++ } ++ ++ return; ++ } ++ } ++ ++ if (*core_clock == 0) { ++ *core_clock = RGX_TC_CORE_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT core clock value: %i\n", ++ *core_clock); ++ } else { ++ dev_info(dev, "Using module param DUT core clock value: %i\n", ++ *core_clock); ++ } ++ ++ if (*mem_clock == 0) { ++ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT mem clock value: %i\n", ++ *mem_clock); ++ } else { ++ dev_info(dev, "Using module param DUT mem clock value: %i\n", ++ *mem_clock); ++ } ++ ++ if (*clock_multiplex == 0) { ++ *clock_multiplex = RGX_TC_CLOCK_MULTIPLEX; ++ dev_info(dev, "Using default DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } else { ++ dev_info(dev, "Using module param DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } ++} ++ ++#endif /* defined(SUPPORT_RGX) */ ++ ++static int apollo_hard_reset(struct tc_device *tc, ++ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex) ++{ ++ u32 reg; ++ u32 reg_reset_n = 0; ++ ++ int err = 0; ++ ++ /* This is required for SPI reset which is not yet implemented. */ ++ /*u32 aux_reset_n;*/ ++ ++ if (tc->version == APOLLO_VERSION_TCF_2) { ++ /* Power down */ ++ reg = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_DUT_CONTROL_1); ++ reg &= ~DUT_CTRL_VCC_0V9EN; ++ reg &= ~DUT_CTRL_VCC_1V8EN; ++ reg |= DUT_CTRL_VCC_IO_INH; ++ reg |= DUT_CTRL_VCC_CORE_INH; ++ iowrite32(reg, tc->tcf.registers + ++ TCF_CLK_CTRL_DUT_CONTROL_1); ++ msleep(500); ++ } ++ ++ /* Put everything into reset */ ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ ++ /* Take PDP1 and PDP2 out of reset */ ++ reg_reset_n |= (0x1 << PDP1_RESETN_SHIFT); ++ reg_reset_n |= (0x1 << PDP2_RESETN_SHIFT); ++ ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ msleep(100); ++ ++ /* Take DDR out of reset */ ++ reg_reset_n |= (0x1 << DDR_RESETN_SHIFT); ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ ++#if defined(SUPPORT_RGX) ++ if (tc->version == APOLLO_VERSION_TCF_5) { ++ apollo_fpga_update_dut_clk_freq(tc, core_clock, mem_clock, clock_multiplex); ++ } else { ++ struct device *dev = &tc->pdev->dev; ++ ++ if (*core_clock == 0) { ++ *core_clock = RGX_TC_CORE_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT core clock value: %i\n", ++ *core_clock); ++ } else { ++ dev_info(dev, "Using module param DUT core clock value: %i\n", ++ *core_clock); ++ } ++ ++ if (*mem_clock == 0) { ++ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT mem clock value: %i\n", ++ *mem_clock); ++ } else { ++ dev_info(dev, "Using module param DUT mem clock value: %i\n", ++ *mem_clock); ++ } ++ ++ if (*clock_multiplex == 0) { ++ *clock_multiplex = RGX_TC_CLOCK_MULTIPLEX; ++ dev_info(dev, "Using default DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } else { ++ dev_info(dev, "Using module param DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } ++ } ++ ++ /* Set clock speed here, before reset. */ ++ apollo_set_clocks(tc, *core_clock, *mem_clock, sys_clock); ++ ++ /* Put take GLB_CLKG and SCB out of reset */ ++ reg_reset_n |= (0x1 << GLB_CLKG_EN_SHIFT); ++ reg_reset_n |= (0x1 << SCB_RESETN_SHIFT); ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ msleep(100); ++ ++ if (tc->version == APOLLO_VERSION_TCF_2) { ++ /* Enable the voltage control regulators on DUT */ ++ reg = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_DUT_CONTROL_1); ++ reg |= DUT_CTRL_VCC_0V9EN; ++ reg |= DUT_CTRL_VCC_1V8EN; ++ reg &= ~DUT_CTRL_VCC_IO_INH; ++ reg &= ~DUT_CTRL_VCC_CORE_INH; ++ iowrite32(reg, tc->tcf.registers + ++ TCF_CLK_CTRL_DUT_CONTROL_1); ++ msleep(300); ++ } ++ ++ /* Take DUT_DCM out of reset */ ++ reg_reset_n |= (0x1 << DUT_DCM_RESETN_SHIFT); ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ msleep(100); ++ ++ ++ err = tc_iopol32_nonzero(DCM_LOCK_STATUS_MASK, ++ tc->tcf.registers + TCF_CLK_CTRL_DCM_LOCK_STATUS); ++ ++ if (err != 0) ++ goto err_out; ++ ++ if (tc->version == APOLLO_VERSION_TCF_2) { ++ /* Set ODT to a specific value that seems to provide the most ++ * stable signals. ++ */ ++ spi_write(tc, 0x11, 0x413130); ++ } ++ ++ /* Take DUT out of reset */ ++ reg_reset_n |= (0x1 << DUT_RESETN_SHIFT); ++ iowrite32(reg_reset_n, tc->tcf.registers + ++ TCF_CLK_CTRL_CLK_AND_RST_CTRL); ++ msleep(100); ++ ++ if (tc->version != APOLLO_VERSION_TCF_5) { ++ u32 hood_ctrl; ++ ++ err = apollo_align_interface_es2(tc); ++ if (err) ++ goto err_out; ++ ++ spi_read(tc, 0xF, &hood_ctrl); ++ hood_ctrl |= 0x1; ++ spi_write(tc, 0xF, hood_ctrl); ++ } ++ ++#endif /* defined(SUPPORT_RGX) */ ++ ++ if (tc->version == APOLLO_VERSION_TCF_2) { ++ /* Enable the temperature sensor */ ++ spi_write(tc, 0xc, 0); /* power up */ ++ spi_write(tc, 0xc, 2); /* reset */ ++ spi_write(tc, 0xc, 6); /* init & run */ ++ ++ /* Register a new thermal zone */ ++ apollo_pdata.thermal_zone = ++ thermal_zone_device_register("apollo", 0, 0, tc, ++ &apollo_thermal_dev_ops, ++ NULL, 0, 0); ++ if (IS_ERR(apollo_pdata.thermal_zone)) { ++ dev_warn(&tc->pdev->dev, "Couldn't register thermal zone"); ++ apollo_pdata.thermal_zone = NULL; ++ } ++ } ++ ++ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); ++ reg = (reg & VERSION_MASK) >> VERSION_SHIFT; ++ ++ if (reg == 0) { ++ u32 build_inc; ++ u32 build_owner; ++ ++ /* Check the build */ ++ reg = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_DES_REV_1); ++ build_inc = (reg >> 12) & 0xff; ++ build_owner = (reg >> 20) & 0xf; ++ ++ if (build_inc) { ++ dev_alert(&tc->pdev->dev, ++ "BE WARNED: You are not running a tagged release of the FPGA!\n"); ++ ++ dev_alert(&tc->pdev->dev, "Owner: 0x%01x, Inc: 0x%02x\n", ++ build_owner, build_inc); ++ } ++ ++ dev_info(&tc->pdev->dev, "FPGA Release: %u.%02u\n", ++ reg >> 8 & 0xf, reg & 0xff); ++ } ++ ++#if defined(SUPPORT_RGX) ++err_out: ++#endif /* defined(SUPPORT_RGX) */ ++ return err; ++} ++ ++static void apollo_set_mem_mode_lma(struct tc_device *tc) ++{ ++ u32 val; ++ ++ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); ++ val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK ++ | HOST_PHY_MODE_MASK); ++ val |= (0x1 << ADDRESS_FORCE_SHIFT); ++ iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); ++} ++ ++static void apollo_set_mem_mode_hybrid(struct tc_device *tc) ++{ ++ u32 val; ++ ++ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); ++ val &= ~(ADDRESS_FORCE_MASK | PCI_TEST_MODE_MASK | HOST_ONLY_MODE_MASK ++ | HOST_PHY_MODE_MASK); ++ val |= ((0x1 << HOST_ONLY_MODE_SHIFT) | (0x1 << HOST_PHY_MODE_SHIFT)); ++ iowrite32(val, tc->tcf.registers + TCF_CLK_CTRL_TEST_CTRL); ++ ++ /* Setup apollo to pass 1GB window of address space to the local memory. ++ * This is a sub-mode of the host only mode, meaning that the apollo TC ++ * can address the system memory with a 1GB window of address space ++ * routed to the device local memory. The simplest approach is to mirror ++ * the CPU physical address space, by moving the device local memory ++ * window where it is mapped in the CPU physical address space. ++ */ ++ iowrite32(tc->tc_mem.base, ++ tc->tcf.registers + TCF_CLK_CTRL_HOST_PHY_OFFSET); ++} ++ ++static int apollo_set_mem_mode(struct tc_device *tc, int mem_mode) ++{ ++ switch (mem_mode) { ++ case TC_MEMORY_HYBRID: ++ apollo_set_mem_mode_hybrid(tc); ++ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_HYBRID\n"); ++ break; ++ case TC_MEMORY_LOCAL: ++ apollo_set_mem_mode_lma(tc); ++ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_LOCAL\n"); ++ break; ++ default: ++ dev_err(&tc->pdev->dev, "unsupported memory mode = %d\n", ++ mem_mode); ++ return -EINVAL; ++ }; ++ ++ tc->mem_mode = mem_mode; ++ ++ return 0; ++} ++ ++static bool apollo_pdp_export_host_addr(struct tc_device *tc) ++{ ++ return tc->mem_mode == TC_MEMORY_HYBRID; ++} ++ ++static u64 apollo_get_pdp_dma_mask(struct tc_device *tc) ++{ ++ /* The PDP does not access system memory, so there is no ++ * DMA limitation. ++ */ ++ if ((tc->mem_mode == TC_MEMORY_LOCAL) || ++ (tc->mem_mode == TC_MEMORY_HYBRID)) ++ return DMA_BIT_MASK(64); ++ ++ return DMA_BIT_MASK(32); ++} ++ ++#if defined(SUPPORT_RGX) || defined(SUPPORT_APOLLO_FPGA) ++#if defined(SUPPORT_RGX) ++static u64 apollo_get_rogue_dma_mask(struct tc_device *tc) ++#else /* SUPPORT_APOLLO_FPGA */ ++static u64 apollo_get_fpga_dma_mask(struct tc_device *tc) ++#endif /* defined(SUPPORT_RGX) */ ++{ ++ /* Does not access system memory, so there is no DMA limitation */ ++ if (tc->mem_mode == TC_MEMORY_LOCAL) ++ return DMA_BIT_MASK(64); ++ ++ return DMA_BIT_MASK(32); ++} ++#endif /* defined(SUPPORT_RGX) || defined(SUPPORT_APOLLO_FPGA) */ ++ ++static int apollo_hw_init(struct tc_device *tc, ++ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex, ++ int mem_latency, int mem_wresp_latency, int mem_mode) ++{ ++ int err = 0; ++ ++ err = apollo_hard_reset(tc, core_clock, mem_clock, sys_clock, clock_multiplex); ++ if (err) ++ goto err_out; ++ ++ err = apollo_set_mem_mode(tc, mem_mode); ++ if (err) ++ goto err_out; ++ ++#if defined(SUPPORT_RGX) ++ if (tc->version == APOLLO_VERSION_TCF_BONNIE) { ++ u32 reg; ++ /* Enable ASTC via SPI */ ++ if (spi_read(tc, 0xf, ®)) { ++ dev_err(&tc->pdev->dev, ++ "Failed to read apollo ASTC register\n"); ++ err = -ENODEV; ++ goto err_out; ++ } ++ ++ reg |= 0x1 << 4; ++ spi_write(tc, 0xf, reg); ++ } else if (tc->version == APOLLO_VERSION_TCF_5) { ++ apollo_set_mem_latency(tc, mem_latency, mem_wresp_latency); ++ } ++#endif /* defined(SUPPORT_RGX) */ ++ ++err_out: ++ return err; ++} ++ ++static int apollo_enable_irq(struct tc_device *tc) ++{ ++ int err = 0; ++ ++#if defined(TC_FAKE_INTERRUPTS) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++ timer_setup(&tc->timer, tc_irq_fake_wrapper, 0); ++#else ++ setup_timer(&tc->timer, tc_irq_fake_wrapper, (unsigned long)tc); ++#endif ++ ++ mod_timer(&tc->timer, ++ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); ++#else ++ { ++ u32 val; ++ ++ iowrite32(0, tc->tcf.registers + ++ TCF_CLK_CTRL_INTERRUPT_ENABLE); ++ iowrite32(0xffffffff, tc->tcf.registers + ++ TCF_CLK_CTRL_INTERRUPT_CLEAR); ++ ++ /* Set sense to active high */ ++ val = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_INTERRUPT_OP_CFG) & ~(INT_SENSE_MASK); ++ iowrite32(val, tc->tcf.registers + ++ TCF_CLK_CTRL_INTERRUPT_OP_CFG); ++ ++ err = request_irq(tc->pdev->irq, apollo_irq_handler, ++ IRQF_SHARED, DRV_NAME, tc); ++ } ++#endif ++ return err; ++} ++ ++static void apollo_disable_irq(struct tc_device *tc) ++{ ++#if defined(TC_FAKE_INTERRUPTS) ++ del_timer_sync(&tc->timer); ++#else ++ iowrite32(0, tc->tcf.registers + ++ TCF_CLK_CTRL_INTERRUPT_ENABLE); ++ iowrite32(0xffffffff, tc->tcf.registers + ++ TCF_CLK_CTRL_INTERRUPT_CLEAR); ++ ++ free_irq(tc->pdev->irq, tc); ++#endif ++} ++ ++static enum tc_version_t ++apollo_detect_tc_version(struct tc_device *tc) ++{ ++ u32 val = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG); ++ ++ switch (val) { ++ default: ++ dev_err(&tc->pdev->dev, ++ "Unknown TCF core target build ID (0x%x) - assuming Hood ES2 - PLEASE REPORT TO ANDROID TEAM\n", ++ val); ++ /* Fall-through */ ++ case 5: ++ dev_err(&tc->pdev->dev, "Looks like a Hood ES2 TC\n"); ++ return APOLLO_VERSION_TCF_2; ++ case 1: ++ dev_err(&tc->pdev->dev, "Looks like a TCF5\n"); ++ return APOLLO_VERSION_TCF_5; ++ case 6: ++ dev_err(&tc->pdev->dev, "Looks like a Bonnie TC\n"); ++ return APOLLO_VERSION_TCF_BONNIE; ++ } ++} ++ ++static u32 apollo_interrupt_id_to_flag(int interrupt_id) ++{ ++ switch (interrupt_id) { ++ case TC_INTERRUPT_PDP: ++ return TC_INTERRUPT_FLAG_PDP; ++ case TC_INTERRUPT_EXT: ++ return TC_INTERRUPT_FLAG_EXT; ++ default: ++ BUG(); ++ } ++} ++ ++static int apollo_dev_init(struct tc_device *tc, struct pci_dev *pdev, ++ int pdp_mem_size, int secure_mem_size) ++{ ++ int err; ++ ++ /* Reserve and map the tcf_clk / "sys" registers */ ++ err = setup_io_region(pdev, &tc->tcf, ++ SYS_APOLLO_REG_PCI_BASENUM, ++ SYS_APOLLO_REG_SYS_OFFSET, SYS_APOLLO_REG_SYS_SIZE); ++ if (err) ++ goto err_out; ++ ++ /* Reserve and map the tcf_pll registers */ ++ err = setup_io_region(pdev, &tc->tcf_pll, ++ SYS_APOLLO_REG_PCI_BASENUM, ++ SYS_APOLLO_REG_PLL_OFFSET + TCF_PLL_PLL_CORE_CLK0, ++ TCF_PLL_PLL_DRP_STATUS - TCF_PLL_PLL_CORE_CLK0 + 4); ++ if (err) ++ goto err_unmap_sys_registers; ++ ++#if defined(SUPPORT_APOLLO_FPGA) ++#define FPGA_REGISTERS_SIZE 4 ++ /* If this is a special 'fgpa' build, have the apollo driver manage ++ * the second register bar. ++ */ ++ err = setup_io_region(pdev, &apollo_pdata.fpga, ++ SYS_RGX_REG_PCI_BASENUM, 0, FPGA_REGISTERS_SIZE); ++ if (err) ++ goto err_unmap_pll_registers; ++#endif ++ ++ /* Detect testchip version */ ++ tc->version = apollo_detect_tc_version(tc); ++ ++ /* Setup card memory */ ++ tc->tc_mem.base = ++ pci_resource_start(pdev, APOLLO_MEM_PCI_BASENUM); ++ tc->tc_mem.size = ++ pci_resource_len(pdev, APOLLO_MEM_PCI_BASENUM); ++ ++ if (tc->tc_mem.size < pdp_mem_size) { ++ dev_err(&pdev->dev, ++ "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu", ++ APOLLO_MEM_PCI_BASENUM, ++ (unsigned long)tc->tc_mem.size, ++ (unsigned long)pdp_mem_size); ++ err = -EIO; ++ goto err_unmap_fpga_registers; ++ } ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ if (tc->tc_mem.size < ++ (pdp_mem_size + secure_mem_size)) { ++ dev_err(&pdev->dev, ++ "Apollo MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu plus the requested secure heap size %lu", ++ APOLLO_MEM_PCI_BASENUM, ++ (unsigned long)tc->tc_mem.size, ++ (unsigned long)pdp_mem_size, ++ (unsigned long)secure_mem_size); ++ err = -EIO; ++ goto err_unmap_fpga_registers; ++ } ++#endif ++ ++ err = tc_mtrr_setup(tc); ++ if (err) ++ goto err_unmap_fpga_registers; ++ ++ /* Setup ranges for the device heaps */ ++ tc->pdp_heap_mem_size = pdp_mem_size; ++ ++ /* We know ext_heap_mem_size won't underflow as we've compared ++ * tc_mem.size against the pdp_mem_size value earlier ++ */ ++ tc->ext_heap_mem_size = ++ tc->tc_mem.size - tc->pdp_heap_mem_size; ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ tc->ext_heap_mem_size -= secure_mem_size; ++#endif ++ ++ if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) { ++ dev_warn(&pdev->dev, ++ "Apollo MEM region (bar %d) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for ext device, which looks too small", ++ APOLLO_MEM_PCI_BASENUM, ++ (unsigned long)tc->tc_mem.size, ++ (unsigned long)pdp_mem_size, ++ (unsigned long)tc->ext_heap_mem_size); ++ /* Continue as this is only a 'helpful warning' not a hard ++ * requirement ++ */ ++ } ++ ++ tc->ext_heap_mem_base = tc->tc_mem.base; ++ tc->pdp_heap_mem_base = ++ tc->tc_mem.base + tc->ext_heap_mem_size; ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ tc->secure_heap_mem_base = tc->pdp_heap_mem_base + ++ tc->pdp_heap_mem_size; ++ tc->secure_heap_mem_size = secure_mem_size; ++#endif ++ ++#if defined(SUPPORT_DMA_HEAP) ++ err = tc_dmabuf_heap_init(tc, APOLLO_MEM_PCI_BASENUM); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to initialise DMA heap\n"); ++ goto err_unmap_fpga_registers; ++ } ++#elif defined(SUPPORT_ION) ++ err = tc_ion_init(tc, APOLLO_MEM_PCI_BASENUM); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to initialise ION\n"); ++ goto err_unmap_fpga_registers; ++ } ++#endif /* defined(SUPPORT_ION) */ ++ ++#if defined(SUPPORT_APOLLO_FPGA) ++ apollo_debugfs_add_fpga_entries(tc, &apollo_pdata.fpga, ++ &apollo_pdata.fpga_entries); ++#endif /* defined(SUPPORT_APOLLO_FPGA) */ ++ ++err_out: ++ return err; ++err_unmap_fpga_registers: ++#if defined(SUPPORT_APOLLO_FPGA) ++ iounmap(apollo_pdata.fpga.registers); ++ release_pci_io_addr(pdev, SYS_RGX_REG_PCI_BASENUM, ++ apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size); ++err_unmap_pll_registers: ++#endif /* defined(SUPPORT_APOLLO_FPGA) */ ++ iounmap(tc->tcf_pll.registers); ++ release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM, ++ tc->tcf_pll.region.base, tc->tcf_pll.region.size); ++err_unmap_sys_registers: ++ iounmap(tc->tcf.registers); ++ release_pci_io_addr(pdev, SYS_APOLLO_REG_PCI_BASENUM, ++ tc->tcf.region.base, tc->tcf.region.size); ++ goto err_out; ++} ++ ++static void apollo_dev_cleanup(struct tc_device *tc) ++{ ++#if defined(SUPPORT_APOLLO_FPGA) ++ apollo_debugfs_remove_fpga_entries(&apollo_pdata.fpga_entries); ++#endif ++ ++#if defined(SUPPORT_DMA_HEAP) ++ tc_dmabuf_heap_deinit(tc, APOLLO_MEM_PCI_BASENUM); ++#elif defined(SUPPORT_ION) ++ tc_ion_deinit(tc, APOLLO_MEM_PCI_BASENUM); ++#endif ++ ++ tc_mtrr_cleanup(tc); ++ ++#if defined(SUPPORT_APOLLO_FPGA) ++ iounmap(apollo_pdata.fpga.registers); ++ release_pci_io_addr(tc->pdev, SYS_RGX_REG_PCI_BASENUM, ++ apollo_pdata.fpga.region.base, apollo_pdata.fpga.region.size); ++#endif ++ ++ iounmap(tc->tcf_pll.registers); ++ release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM, ++ tc->tcf_pll.region.base, tc->tcf_pll.region.size); ++ ++ iounmap(tc->tcf.registers); ++ release_pci_io_addr(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM, ++ tc->tcf.region.base, tc->tcf.region.size); ++ ++ if (apollo_pdata.thermal_zone) ++ thermal_zone_device_unregister(apollo_pdata.thermal_zone); ++} ++ ++int apollo_init(struct tc_device *tc, struct pci_dev *pdev, ++ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex, ++ int pdp_mem_size, int secure_mem_size, ++ int mem_latency, int mem_wresp_latency, int mem_mode) ++{ ++ int err = 0; ++ ++ err = apollo_dev_init(tc, pdev, pdp_mem_size, secure_mem_size); ++ if (err) { ++ dev_err(&pdev->dev, "apollo_dev_init failed\n"); ++ goto err_out; ++ } ++ ++ err = apollo_hw_init(tc, core_clock, mem_clock, sys_clock, clock_multiplex, ++ mem_latency, mem_wresp_latency, mem_mode); ++ if (err) { ++ dev_err(&pdev->dev, "apollo_hw_init failed\n"); ++ goto err_dev_cleanup; ++ } ++ ++ err = apollo_enable_irq(tc); ++ if (err) { ++ dev_err(&pdev->dev, ++ "Failed to initialise IRQ\n"); ++ goto err_dev_cleanup; ++ } ++ ++err_out: ++ return err; ++ ++err_dev_cleanup: ++ apollo_dev_cleanup(tc); ++ goto err_out; ++} ++ ++int apollo_cleanup(struct tc_device *tc) ++{ ++ apollo_disable_irq(tc); ++ apollo_dev_cleanup(tc); ++ ++ return 0; ++} ++ ++int apollo_register_pdp_device(struct tc_device *tc) ++{ ++ int err = 0; ++ resource_size_t reg_start = ++ pci_resource_start(tc->pdev, ++ SYS_APOLLO_REG_PCI_BASENUM); ++ struct resource pdp_resources_es2[] = { ++ DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET, ++ SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"), ++ DEFINE_RES_MEM_NAMED(reg_start + ++ SYS_APOLLO_REG_PLL_OFFSET + ++ TCF_PLL_PLL_PDP_CLK0, ++ TCF_PLL_PLL_PDP2_DRP_GO - ++ TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"), ++ }; ++ struct resource pdp_resources_tcf5[] = { ++ DEFINE_RES_MEM_NAMED(reg_start + SYS_APOLLO_REG_PDP1_OFFSET, ++ SYS_APOLLO_REG_PDP1_SIZE, "pdp-regs"), ++ DEFINE_RES_MEM_NAMED(reg_start + ++ SYS_APOLLO_REG_PLL_OFFSET + ++ TCF_PLL_PLL_PDP_CLK0, ++ TCF_PLL_PLL_PDP2_DRP_GO - ++ TCF_PLL_PLL_PDP_CLK0 + 4, "pll-regs"), ++ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, ++ TC5_SYS_APOLLO_REG_PCI_BASENUM) ++ + TC5_SYS_APOLLO_REG_PDP2_OFFSET, ++ TC5_SYS_APOLLO_REG_PDP2_SIZE, "tc5-pdp2-regs"), ++ ++ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, ++ TC5_SYS_APOLLO_REG_PCI_BASENUM) ++ + TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET, ++ TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE, ++ "tc5-pdp2-fbdc-regs"), ++ ++ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, ++ TC5_SYS_APOLLO_REG_PCI_BASENUM) ++ + TC5_SYS_APOLLO_REG_HDMI_OFFSET, ++ TC5_SYS_APOLLO_REG_HDMI_SIZE, ++ "tc5-adv5711-regs"), ++ }; ++ ++ struct tc_pdp_platform_data pdata = { ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .ion_device = tc->ion_device, ++ .ion_heap_id = ION_HEAP_TC_PDP, ++#endif ++ .memory_base = tc->tc_mem.base, ++ .pdp_heap_memory_base = tc->pdp_heap_mem_base, ++ .pdp_heap_memory_size = tc->pdp_heap_mem_size, ++ .dma_map_export_host_addr = apollo_pdp_export_host_addr(tc), ++ }; ++ struct platform_device_info pdp_device_info = { ++ .parent = &tc->pdev->dev, ++ .name = APOLLO_DEVICE_NAME_PDP, ++ .id = -2, ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ .dma_mask = apollo_get_pdp_dma_mask(tc), ++ }; ++ ++ if (tc->version == APOLLO_VERSION_TCF_5) { ++ pdp_device_info.res = pdp_resources_tcf5; ++ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_tcf5); ++ } else if (tc->version == APOLLO_VERSION_TCF_2 || ++ tc->version == APOLLO_VERSION_TCF_BONNIE) { ++ pdp_device_info.res = pdp_resources_es2; ++ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_es2); ++ } else { ++ dev_err(&tc->pdev->dev, ++ "Unable to set PDP resource info for unknown apollo device\n"); ++ } ++ ++ tc->pdp_dev = platform_device_register_full(&pdp_device_info); ++ if (IS_ERR(tc->pdp_dev)) { ++ err = PTR_ERR(tc->pdp_dev); ++ dev_err(&tc->pdev->dev, ++ "Failed to register PDP device (%d)\n", err); ++ tc->pdp_dev = NULL; ++ goto err; ++ } ++err: ++ return err; ++} ++ ++#if defined(SUPPORT_RGX) ++ ++int apollo_register_ext_device(struct tc_device *tc) ++{ ++ int err = 0; ++ struct resource rogue_resources[] = { ++ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, ++ SYS_RGX_REG_PCI_BASENUM), ++ SYS_RGX_REG_REGION_SIZE, "rogue-regs"), ++ }; ++ struct tc_rogue_platform_data pdata = { ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .ion_device = tc->ion_device, ++ .ion_heap_id = ION_HEAP_TC_ROGUE, ++#endif ++ .mem_mode = tc->mem_mode, ++ .tc_memory_base = tc->tc_mem.base, ++ .pdp_heap_memory_base = tc->pdp_heap_mem_base, ++ .pdp_heap_memory_size = tc->pdp_heap_mem_size, ++ .rogue_heap_memory_base = tc->ext_heap_mem_base, ++ .rogue_heap_memory_size = tc->ext_heap_mem_size, ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ .secure_heap_memory_base = tc->secure_heap_mem_base, ++ .secure_heap_memory_size = tc->secure_heap_mem_size, ++#endif ++ }; ++ struct platform_device_info rogue_device_info = { ++ .parent = &tc->pdev->dev, ++ .name = TC_DEVICE_NAME_ROGUE, ++ .id = -2, ++ .res = rogue_resources, ++ .num_res = ARRAY_SIZE(rogue_resources), ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ .dma_mask = apollo_get_rogue_dma_mask(tc), ++ }; ++ ++ tc->ext_dev ++ = platform_device_register_full(&rogue_device_info); ++ ++ if (IS_ERR(tc->ext_dev)) { ++ err = PTR_ERR(tc->ext_dev); ++ dev_err(&tc->pdev->dev, ++ "Failed to register rogue device (%d)\n", err); ++ tc->ext_dev = NULL; ++ } ++ return err; ++} ++ ++#elif defined(SUPPORT_APOLLO_FPGA) ++ ++int apollo_register_ext_device(struct tc_device *tc) ++{ ++ int err = 0; ++ struct resource fpga_resources[] = { ++ /* For the 'fpga' build, we don't use the Rogue, but reuse the ++ * define that mentions RGX. ++ */ ++ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, ++ SYS_RGX_REG_PCI_BASENUM), ++ SYS_RGX_REG_REGION_SIZE, "fpga-regs"), ++ }; ++ struct apollo_fpga_platform_data pdata = { ++ .mem_mode = tc->mem_mode, ++ .tc_memory_base = tc->tc_mem.base, ++ .pdp_heap_memory_base = tc->pdp_heap_mem_base, ++ .pdp_heap_memory_size = tc->pdp_heap_mem_size, ++ }; ++ struct platform_device_info fpga_device_info = { ++ .parent = &tc->pdev->dev, ++ .name = APOLLO_DEVICE_NAME_FPGA, ++ .id = -1, ++ .res = fpga_resources, ++ .num_res = ARRAY_SIZE(fpga_resources), ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ .dma_mask = apollo_get_fpga_dma_mask(tc), ++ }; ++ ++ tc->ext_dev = platform_device_register_full(&fpga_device_info); ++ if (IS_ERR(tc->ext_dev)) { ++ err = PTR_ERR(tc->ext_dev); ++ dev_err(&tc->pdev->dev, ++ "Failed to register fpga device (%d)\n", err); ++ tc->ext_dev = NULL; ++ /* Fall through */ ++ } ++ ++ return err; ++} ++ ++#else /* defined(SUPPORT_APOLLO_FPGA) */ ++ ++int apollo_register_ext_device(struct tc_device *tc) ++{ ++ return 0; ++} ++ ++#endif /* defined(SUPPORT_RGX) */ ++ ++void apollo_enable_interrupt_register(struct tc_device *tc, ++ int interrupt_id) ++{ ++ u32 val; ++ ++ if (interrupt_id == TC_INTERRUPT_PDP || ++ interrupt_id == TC_INTERRUPT_EXT) { ++ val = ioread32( ++ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); ++ val |= apollo_interrupt_id_to_flag(interrupt_id); ++ iowrite32(val, ++ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); ++ } ++} ++ ++void apollo_disable_interrupt_register(struct tc_device *tc, ++ int interrupt_id) ++{ ++ u32 val; ++ ++ if (interrupt_id == TC_INTERRUPT_PDP || ++ interrupt_id == TC_INTERRUPT_EXT) { ++ val = ioread32( ++ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); ++ val &= ~(apollo_interrupt_id_to_flag(interrupt_id)); ++ iowrite32(val, ++ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_ENABLE); ++ } ++} ++ ++irqreturn_t apollo_irq_handler(int irq, void *data) ++{ ++ u32 interrupt_status; ++ u32 interrupt_clear = 0; ++ unsigned long flags; ++ irqreturn_t ret = IRQ_NONE; ++ struct tc_device *tc = (struct tc_device *)data; ++ ++ spin_lock_irqsave(&tc->interrupt_handler_lock, flags); ++ ++#if defined(TC_FAKE_INTERRUPTS) ++ /* If we're faking interrupts pretend we got both ext and PDP ints */ ++ interrupt_status = TC_INTERRUPT_FLAG_EXT ++ | TC_INTERRUPT_FLAG_PDP; ++#else ++ interrupt_status = ioread32(tc->tcf.registers ++ + TCF_CLK_CTRL_INTERRUPT_STATUS); ++#endif ++ ++ if (interrupt_status & TC_INTERRUPT_FLAG_EXT) { ++ struct tc_interrupt_handler *ext_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_EXT]; ++ ++ if (ext_int->enabled && ext_int->handler_function) { ++ ext_int->handler_function(ext_int->handler_data); ++ interrupt_clear |= TC_INTERRUPT_FLAG_EXT; ++ } ++ ret = IRQ_HANDLED; ++ } ++ if (interrupt_status & TC_INTERRUPT_FLAG_PDP) { ++ struct tc_interrupt_handler *pdp_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_PDP]; ++ ++ if (pdp_int->enabled && pdp_int->handler_function) { ++ pdp_int->handler_function(pdp_int->handler_data); ++ interrupt_clear |= TC_INTERRUPT_FLAG_PDP; ++ } ++ ret = IRQ_HANDLED; ++ } ++ ++ if (tc->version == APOLLO_VERSION_TCF_5) { ++ /* On TC5 the interrupt is not by the TC framework, but ++ * by the PDP itself. So we always have to callback to the tc5 ++ * pdp code regardless of the interrupt status of the TCF. ++ */ ++ struct tc_interrupt_handler *pdp_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_TC5_PDP]; ++ ++ if (pdp_int->enabled && pdp_int->handler_function) { ++ pdp_int->handler_function(pdp_int->handler_data); ++ ret = IRQ_HANDLED; ++ } ++ } ++ ++ if (interrupt_clear) ++ iowrite32(0xffffffff, ++ tc->tcf.registers + TCF_CLK_CTRL_INTERRUPT_CLEAR); ++ ++ spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); ++ ++ return ret; ++} ++ ++int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll) ++{ ++ int err = 0; ++ ++ *tmp = 0; ++ *pll = 0; ++ ++ if (tc->version == APOLLO_VERSION_TCF_5) ++ /* Not implemented on TCF5 */ ++ goto err_out; ++ else if (tc->version == APOLLO_VERSION_TCF_2) { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) ++ unsigned long t; ++#else ++ int t; ++#endif ++ ++ err = apollo_thermal_get_temp(apollo_pdata.thermal_zone, &t); ++ if (err) ++ goto err_out; ++ *tmp = t / 1000; ++ } ++ ++ if (spi_read(tc, 0x2, pll)) { ++ dev_err(&tc->pdev->dev, "Failed to read PLL status\n"); ++ err = -ENODEV; ++ goto err_out; ++ } ++ ++err_out: ++ return err; ++} ++ ++int apollo_sys_strings(struct tc_device *tc, ++ char *str_fpga_rev, size_t size_fpga_rev, ++ char *str_tcf_core_rev, size_t size_tcf_core_rev, ++ char *str_tcf_core_target_build_id, ++ size_t size_tcf_core_target_build_id, ++ char *str_pci_ver, size_t size_pci_ver, ++ char *str_macro_ver, size_t size_macro_ver) ++{ ++ int err = 0; ++ u32 val; ++ resource_size_t host_fpga_base; ++ void __iomem *host_fpga_registers; ++ ++ /* To get some of the version information we need to read from a ++ * register that we don't normally have mapped. Map it temporarily ++ * (without trying to reserve it) to get the information we need. ++ */ ++ host_fpga_base = ++ pci_resource_start(tc->pdev, SYS_APOLLO_REG_PCI_BASENUM) ++ + 0x40F0; ++ ++ host_fpga_registers = ioremap(host_fpga_base, 0x04); ++ if (!host_fpga_registers) { ++ dev_err(&tc->pdev->dev, ++ "Failed to map host fpga registers\n"); ++ err = -EIO; ++ goto err_out; ++ } ++ ++ /* Create the components of the PCI and macro versions */ ++ val = ioread32(host_fpga_registers); ++ snprintf(str_pci_ver, size_pci_ver, "%d", ++ HEX2DEC((val & 0x00FF0000) >> 16)); ++ snprintf(str_macro_ver, size_macro_ver, "%d.%d", ++ (val & 0x00000F00) >> 8, ++ HEX2DEC((val & 0x000000FF) >> 0)); ++ ++ /* Unmap the register now that we no longer need it */ ++ iounmap(host_fpga_registers); ++ ++ /* ++ * Check bits 7:0 of register 0x28 (TCF_CORE_REV_REG or SW_IF_VERSION ++ * depending on its own value) to find out how the driver should ++ * generate the strings for FPGA and core revision. ++ */ ++ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_SW_IF_VERSION); ++ val = (val & VERSION_MASK) >> VERSION_SHIFT; ++ ++ if (val == 0) { ++ /* Create the components of the TCF core revision number */ ++ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_TCF_CORE_REV_REG); ++ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d.%d.%d", ++ HEX2DEC((val & TCF_CORE_REV_REG_MAJOR_MASK) ++ >> TCF_CORE_REV_REG_MAJOR_SHIFT), ++ HEX2DEC((val & TCF_CORE_REV_REG_MINOR_MASK) ++ >> TCF_CORE_REV_REG_MINOR_SHIFT), ++ HEX2DEC((val & TCF_CORE_REV_REG_MAINT_MASK) ++ >> TCF_CORE_REV_REG_MAINT_SHIFT)); ++ ++ /* Create the components of the FPGA revision number */ ++ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_FPGA_REV_REG); ++ snprintf(str_fpga_rev, size_fpga_rev, "%d.%d.%d", ++ HEX2DEC((val & FPGA_REV_REG_MAJOR_MASK) ++ >> FPGA_REV_REG_MAJOR_SHIFT), ++ HEX2DEC((val & FPGA_REV_REG_MINOR_MASK) ++ >> FPGA_REV_REG_MINOR_SHIFT), ++ HEX2DEC((val & FPGA_REV_REG_MAINT_MASK) ++ >> FPGA_REV_REG_MAINT_SHIFT)); ++ } else if (val == 1) { ++ /* Create the components of the TCF core revision number */ ++ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val); ++ ++ /* Create the components of the FPGA revision number */ ++ val = ioread32(tc->tcf.registers + TCF_CLK_CTRL_REL); ++ snprintf(str_fpga_rev, size_fpga_rev, "%d.%d", ++ HEX2DEC((val & MAJOR_MASK) >> MAJOR_SHIFT), ++ HEX2DEC((val & MINOR_MASK) >> MINOR_SHIFT)); ++ } else { ++ dev_warn(&tc->pdev->dev, ++ "%s: unrecognised SW_IF_VERSION %#08x\n", ++ __func__, val); ++ ++ /* Create the components of the TCF core revision number */ ++ snprintf(str_tcf_core_rev, size_tcf_core_rev, "%d", val); ++ ++ /* Create the components of the FPGA revision number */ ++ snprintf(str_fpga_rev, size_fpga_rev, "N/A"); ++ } ++ ++ /* Create the component of the TCF core target build ID */ ++ val = ioread32(tc->tcf.registers + ++ TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG); ++ snprintf(str_tcf_core_target_build_id, size_tcf_core_target_build_id, ++ "%d", ++ (val & TCF_CORE_TARGET_BUILD_ID_MASK) ++ >> TCF_CORE_TARGET_BUILD_ID_SHIFT); ++ ++err_out: ++ return err; ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_apollo.h b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.h +new file mode 100644 +index 000000000000..0090dd2e570e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_apollo.h +@@ -0,0 +1,77 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _APOLLO_DRV_H ++#define _APOLLO_DRV_H ++ ++#include "tc_drv_internal.h" ++#include "apollo_regs.h" ++ ++#if defined(SUPPORT_RGX) && defined(SUPPORT_APOLLO_FPGA) ++#error Define either SUPPORT_RGX or SUPPORT_APOLLO_FGPA, not both ++#endif ++ ++int apollo_init(struct tc_device *tc, struct pci_dev *pdev, ++ int *core_clock, int *mem_clock, int sys_clock, int *clock_multiplex, ++ int pdp_mem_size, int secure_mem_size, ++ int mem_latency, int mem_wresp_latency, int mem_mode); ++int apollo_cleanup(struct tc_device *tc); ++ ++int apollo_register_pdp_device(struct tc_device *tc); ++int apollo_register_ext_device(struct tc_device *tc); ++ ++void apollo_enable_interrupt_register(struct tc_device *tc, ++ int interrupt_id); ++void apollo_disable_interrupt_register(struct tc_device *tc, ++ int interrupt_id); ++ ++irqreturn_t apollo_irq_handler(int irq, void *data); ++ ++int apollo_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll); ++int apollo_sys_strings(struct tc_device *tc, ++ char *str_fpga_rev, size_t size_fpga_rev, ++ char *str_tcf_core_rev, size_t size_tcf_core_rev, ++ char *str_tcf_core_target_build_id, ++ size_t size_tcf_core_target_build_id, ++ char *str_pci_ver, size_t size_pci_ver, ++ char *str_macro_ver, size_t size_macro_ver); ++ ++#endif /* _APOLLO_DRV_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_clocks.h b/drivers/gpu/drm/img-rogue/apollo/tc_clocks.h +new file mode 100644 +index 000000000000..431273de0827 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_clocks.h +@@ -0,0 +1,158 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(TC_CLOCKS_H) ++#define TC_CLOCKS_H ++ ++/* ++ * The core clock speed is passed through a multiplier depending on the TC ++ * version. ++ * ++ * On TC_ES1: Multiplier = x3, final speed = 270MHz ++ * On TC_ES2: Multiplier = x6, final speed = 540MHz ++ * On TCF5: Multiplier = 1x final speed = 45MHz ++ * ++ * ++ * The base (unmultiplied speed) can be adjusted using a module parameter ++ * called "sys_core_clk_speed", a number in Hz. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start ++ * ++ * would result in a core speed of 60MHz xMultiplier. ++ * ++ * ++ * The memory clock is unmultiplied and can be adjusted using a module ++ * parameter called "sys_mem_clk_speed", this should be the number in Hz for ++ * the memory clock speed. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start ++ * ++ * would attempt to start the driver with the memory clock speed set to 100MHz. ++ * ++ * ++ * Same applies to the system interface clock speed, "sys_sysif_clk_speed". ++ * Needed for TCF5 but not for TC_ES2/ES1. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start ++ * ++ * would attempt to start the driver with the system clock speed set to 45MHz. ++ * ++ * ++ * All parameters can be specified at once, e.g., ++ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start ++ */ ++ ++#define RGX_TC_SYS_CLOCK_SPEED (25000000) /*< At the moment just used for TCF5 */ ++#define RGX_TC_CLOCK_MULTIPLEX (1) ++ ++#if defined(TC_APOLLO_TCF5_22_46_54_330) ++ #undef RGX_TC_SYS_CLOCK_SPEED ++ #define RGX_TC_CORE_CLOCK_SPEED (100000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (45000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_TCF5_22_49_21_16) || \ ++ defined(TC_APOLLO_TCF5_22_60_22_29) || \ ++ defined(TC_APOLLO_TCF5_22_75_22_25) ++ #define RGX_TC_CORE_CLOCK_SPEED (20000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (50000000) ++#elif defined(TC_APOLLO_TCF5_22_67_54_30) ++ #define RGX_TC_CORE_CLOCK_SPEED (100000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_TCF5_22_89_204_18) ++ #define RGX_TC_CORE_CLOCK_SPEED (50000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_22_86_104_218) ++ #define RGX_TC_CORE_CLOCK_SPEED (30000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_22_88_104_318) ++ #define RGX_TC_CORE_CLOCK_SPEED (28000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_22_98_54_230) ++ #define RGX_TC_CORE_CLOCK_SPEED (100000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_22_102_54_38) ++ #define RGX_TC_CORE_CLOCK_SPEED (80000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED) ++ /* TC TCF5 (22.*) fallback frequencies */ ++ #undef RGX_TC_SYS_CLOCK_SPEED ++ #define RGX_TC_CORE_CLOCK_SPEED (20000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (50000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_33_8_22_1) ++ #define RGX_TC_CORE_CLOCK_SPEED (25000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_TCF5_REFERENCE) ++ /* TC TCF5 (Reference bitfile) */ ++ #undef RGX_TC_SYS_CLOCK_SPEED ++ #define RGX_TC_CORE_CLOCK_SPEED (50000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (50000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_BONNIE) ++ /* TC Bonnie */ ++ #define RGX_TC_CORE_CLOCK_SPEED (18000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (65000000) ++#elif defined(TC_APOLLO_ES2) ++ /* TC ES2 */ ++ #define RGX_TC_CORE_CLOCK_SPEED (90000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (104000000) ++#elif defined(TC_ORION) ++ #define RGX_TC_CORE_CLOCK_SPEED (40000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (100000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_29_19_52_202) ++ #define RGX_TC_CORE_CLOCK_SPEED (25000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_29_18_204_508) ++ #define RGX_TC_CORE_CLOCK_SPEED (15000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (35000000) ++#else ++ /* TC ES1 */ ++ #define RGX_TC_CORE_CLOCK_SPEED (90000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (65000000) ++#endif ++ ++#endif /* if !defined(TC_CLOCKS_H) */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_drv.c b/drivers/gpu/drm/img-rogue/apollo/tc_drv.c +new file mode 100644 +index 000000000000..c69498fdf8b2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_drv.c +@@ -0,0 +1,943 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * This is a device driver for the testchip framework. It creates platform ++ * devices for the pdp and ext sub-devices, and exports functions to manage the ++ * shared interrupt handling ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#if defined(CONFIG_MTRR) ++#include ++#endif ++ ++#include "pvrmodule.h" ++ ++#include "tc_apollo.h" ++#include "tc_odin.h" ++ ++/* How much memory to give to the PDP heap (used for pdp buffers). */ ++#define TC_PDP_MEM_SIZE_BYTES ((TC_DISPLAY_MEM_SIZE)*1024*1024) ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++/* How much memory to give to the secure heap. */ ++#define TC_SECURE_MEM_SIZE_BYTES ((TC_SECURE_MEM_SIZE)*1024*1024) ++#endif ++ ++#define PCI_VENDOR_ID_POWERVR 0x1010 ++#define DEVICE_ID_PCI_APOLLO_FPGA 0x1CF1 ++#define DEVICE_ID_PCIE_APOLLO_FPGA 0x1CF2 ++ ++MODULE_DESCRIPTION("PowerVR testchip framework driver"); ++ ++static int tc_core_clock; ++module_param(tc_core_clock, int, 0444); ++MODULE_PARM_DESC(tc_core_clock, "TC core clock speed"); ++ ++static int tc_mem_clock; ++module_param(tc_mem_clock, int, 0444); ++MODULE_PARM_DESC(tc_mem_clock, "TC memory clock speed"); ++ ++static int tc_clock_multiplex; ++module_param(tc_clock_multiplex, int, 0444); ++MODULE_PARM_DESC(tc_clock_multiplex, "TC core clock multiplex"); ++ ++static int tc_sys_clock = RGX_TC_SYS_CLOCK_SPEED; ++module_param(tc_sys_clock, int, 0444); ++MODULE_PARM_DESC(tc_sys_clock, "TC system clock speed (TCF5 only)"); ++ ++static int tc_mem_latency; ++module_param(tc_mem_latency, int, 0444); ++MODULE_PARM_DESC(tc_mem_latency, "TC memory read latency in cycles (TCF5 only)"); ++ ++static unsigned long tc_mem_mode = TC_MEMORY_CONFIG; ++module_param(tc_mem_mode, ulong, 0444); ++MODULE_PARM_DESC(tc_mem_mode, "TC memory mode (local = 1, hybrid = 2, host = 3)"); ++ ++static int tc_wresp_latency; ++module_param(tc_wresp_latency, int, 0444); ++MODULE_PARM_DESC(tc_wresp_latency, "TC memory write response latency in cycles (TCF5 only)"); ++ ++static unsigned long tc_pdp_mem_size = TC_PDP_MEM_SIZE_BYTES; ++module_param(tc_pdp_mem_size, ulong, 0444); ++MODULE_PARM_DESC(tc_pdp_mem_size, ++ "TC PDP reserved memory size in bytes"); ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++static unsigned long tc_secure_mem_size = TC_SECURE_MEM_SIZE_BYTES; ++module_param(tc_secure_mem_size, ulong, 0444); ++MODULE_PARM_DESC(tc_secure_mem_size, ++ "TC secure reserved memory size in bytes"); ++#endif ++ ++static bool fbc_bypass; ++module_param(fbc_bypass, bool, 0444); ++MODULE_PARM_DESC(fbc_bypass, "Force bypass of PDP2 FBC decompression"); ++ ++static struct debugfs_blob_wrapper tc_debugfs_rogue_name_blobs[] = { ++ [APOLLO_VERSION_TCF_2] = { ++ .data = "hood", /* probably */ ++ .size = sizeof("hood") - 1, ++ }, ++ [APOLLO_VERSION_TCF_5] = { ++ .data = "fpga (unknown)", ++ .size = sizeof("fpga (unknown)") - 1, ++ }, ++ [APOLLO_VERSION_TCF_BONNIE] = { ++ .data = "bonnie", ++ .size = sizeof("bonnie") - 1, ++ }, ++ [ODIN_VERSION_TCF_BONNIE] = { ++ .data = "bonnie", ++ .size = sizeof("bonnie") - 1, ++ }, ++ [ODIN_VERSION_FPGA] = { ++ .data = "fpga (unknown)", ++ .size = sizeof("fpga (unknown)") - 1, ++ }, ++ [ODIN_VERSION_ORION] = { ++ .data = "orion", ++ .size = sizeof("orion") - 1, ++ }, ++}; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++/* forward declaration */ ++static void tc_devres_release(struct device *dev, void *res); ++ ++static ssize_t rogue_name_show(struct device_driver *drv, char *buf) ++{ ++ struct pci_dev *pci_dev; ++ struct tc_device *tc; ++ struct device *dev; ++ ++ dev = driver_find_next_device(drv, NULL); ++ if (!dev) ++ return -ENODEV; ++ ++ pci_dev = to_pci_dev(dev); ++ if (!pci_dev) ++ return -ENODEV; ++ ++ tc = devres_find(&pci_dev->dev, tc_devres_release, NULL, NULL); ++ if (!tc) ++ return -ENODEV; ++ ++ return sprintf(buf, "%s\n", (const char *) ++ tc_debugfs_rogue_name_blobs[tc->version].data); ++} ++ ++static DRIVER_ATTR_RO(rogue_name); ++ ++static struct attribute *tc_attrs[] = { ++ &driver_attr_rogue_name.attr, ++ NULL, ++}; ++ ++static struct attribute_group tc_attr_group = { ++ .attrs = tc_attrs, ++}; ++ ++static const struct attribute_group *tc_attr_groups[] = { ++ &tc_attr_group, ++ NULL, ++}; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) */ ++ ++#if defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) ++/* ++ * A return value of: ++ * 0 or more means success ++ * -1 means we were unable to add an mtrr but we should continue ++ * -2 means we were unable to add an mtrr but we shouldn't continue ++ */ ++static int mtrr_setup(struct pci_dev *pdev, ++ resource_size_t mem_start, ++ resource_size_t mem_size) ++{ ++ int err; ++ int mtrr; ++ ++ /* Reset MTRR */ ++ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_UNCACHABLE, 0); ++ if (mtrr < 0) { ++ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", ++ __LINE__, __func__, mtrr); ++ mtrr = -2; ++ goto err_out; ++ } ++ ++ err = mtrr_del(mtrr, mem_start, mem_size); ++ if (err < 0) { ++ dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", ++ __LINE__, __func__, err); ++ mtrr = -2; ++ goto err_out; ++ } ++ ++ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRBACK, 0); ++ if (mtrr < 0) { ++ /* Stop, but not an error as this may be already be setup */ ++ dev_dbg(&pdev->dev, ++ "%d - %s: mtrr_add failed (%d) - probably means the mtrr is already setup\n", ++ __LINE__, __func__, mtrr); ++ mtrr = -1; ++ goto err_out; ++ } ++ ++ err = mtrr_del(mtrr, mem_start, mem_size); ++ if (err < 0) { ++ dev_err(&pdev->dev, "%d - %s: mtrr_del failed (%d)\n", ++ __LINE__, __func__, err); ++ mtrr = -2; ++ goto err_out; ++ } ++ ++ if (mtrr == 0) { ++ /* Replace 0 with a non-overlapping WRBACK mtrr */ ++ err = mtrr_add(0, mem_start, MTRR_TYPE_WRBACK, 0); ++ if (err < 0) { ++ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", ++ __LINE__, __func__, err); ++ mtrr = -2; ++ goto err_out; ++ } ++ } ++ ++ mtrr = mtrr_add(mem_start, mem_size, MTRR_TYPE_WRCOMB, 0); ++ if (mtrr < 0) { ++ dev_err(&pdev->dev, "%d - %s: mtrr_add failed (%d)\n", ++ __LINE__, __func__, mtrr); ++ mtrr = -1; ++ } ++ ++err_out: ++ return mtrr; ++} ++#endif /* defined(CONFIG_MTRR) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) */ ++ ++int tc_mtrr_setup(struct tc_device *tc) ++{ ++ int err = 0; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ /* Register the LMA as write combined */ ++ err = arch_io_reserve_memtype_wc(tc->tc_mem.base, ++ tc->tc_mem.size); ++ if (err) ++ return -ENODEV; ++#endif ++ /* Enable write combining */ ++ tc->mtrr = arch_phys_wc_add(tc->tc_mem.base, ++ tc->tc_mem.size); ++ if (tc->mtrr < 0) { ++ err = -ENODEV; ++ goto err_out; ++ } ++ ++#elif defined(CONFIG_MTRR) ++ /* Enable mtrr region caching */ ++ tc->mtrr = mtrr_setup(tc->pdev, ++ tc->tc_mem.base, ++ tc->tc_mem.size); ++ if (tc->mtrr == -2) { ++ err = -ENODEV; ++ goto err_out; ++ } ++#endif ++ return err; ++ ++err_out: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ arch_io_free_memtype_wc(tc->tc_mem.base, ++ tc->tc_mem.size); ++#endif ++ return err; ++} ++ ++void tc_mtrr_cleanup(struct tc_device *tc) ++{ ++ if (tc->mtrr >= 0) { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ arch_phys_wc_del(tc->mtrr); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ arch_io_free_memtype_wc(tc->tc_mem.base, ++ tc->tc_mem.size); ++#endif ++#elif defined(CONFIG_MTRR) ++ int err; ++ ++ err = mtrr_del(tc->mtrr, ++ tc->tc_mem.base, ++ tc->tc_mem.size); ++ if (err < 0) ++ dev_err(&tc->pdev->dev, ++ "mtrr_del failed (%d)\n", err); ++#endif ++ } ++} ++ ++int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack) ++{ ++ u32 max_eye_start = eyes >> 16; ++ u32 min_eye_end = eyes & 0xffff; ++ ++ /* If either the training or training ack failed, we haven't aligned */ ++ if (!(clk_taps & 0x10000) || !(train_ack & 0x100)) ++ return 0; ++ ++ /* If the max eye >= min eye it means the readings are nonsense */ ++ if (max_eye_start >= min_eye_end) ++ return 0; ++ ++ /* If we failed the ack pattern more than 4 times */ ++ if (((train_ack & 0xf0) >> 4) > 4) ++ return 0; ++ ++ /* If there is less than 7 taps (240ps @40ps/tap, this number should be ++ * lower for the fpga, since its taps are bigger We should really ++ * calculate the "7" based on the interface clock speed. ++ */ ++ if ((min_eye_end - max_eye_start) < 7) ++ return 0; ++ ++ return 1; ++} ++ ++int tc_iopol32_nonzero(u32 mask, void __iomem *addr) ++{ ++ int polnum; ++ u32 read_value; ++ ++ for (polnum = 0; polnum < 50; polnum++) { ++ read_value = ioread32(addr) & mask; ++ if (read_value != 0) ++ break; ++ msleep(20); ++ } ++ if (polnum == 50) { ++ pr_err(DRV_NAME " iopol32_nonzero timeout\n"); ++ return -ETIME; ++ } ++ return 0; ++} ++ ++int request_pci_io_addr(struct pci_dev *pdev, u32 index, ++ resource_size_t offset, resource_size_t length) ++{ ++ resource_size_t start, end; ++ ++ start = pci_resource_start(pdev, index); ++ end = pci_resource_end(pdev, index); ++ ++ if ((start + offset + length - 1) > end) ++ return -EIO; ++ if (pci_resource_flags(pdev, index) & IORESOURCE_IO) { ++ if (request_region(start + offset, length, DRV_NAME) == NULL) ++ return -EIO; ++ } else { ++ if (request_mem_region(start + offset, length, DRV_NAME) ++ == NULL) ++ return -EIO; ++ } ++ return 0; ++} ++ ++void release_pci_io_addr(struct pci_dev *pdev, u32 index, ++ resource_size_t start, resource_size_t length) ++{ ++ if (pci_resource_flags(pdev, index) & IORESOURCE_IO) ++ release_region(start, length); ++ else ++ release_mem_region(start, length); ++} ++ ++int setup_io_region(struct pci_dev *pdev, ++ struct tc_io_region *region, u32 index, ++ resource_size_t offset, resource_size_t size) ++{ ++ int err; ++ resource_size_t pci_phys_addr; ++ ++ err = request_pci_io_addr(pdev, index, offset, size); ++ if (err) { ++ dev_err(&pdev->dev, ++ "Failed to request tc registers (err=%d)\n", err); ++ return -EIO; ++ } ++ pci_phys_addr = pci_resource_start(pdev, index); ++ region->region.base = pci_phys_addr + offset; ++ region->region.size = size; ++ ++ region->registers = ioremap(region->region.base, region->region.size); ++ ++ if (!region->registers) { ++ dev_err(&pdev->dev, "Failed to map tc registers\n"); ++ release_pci_io_addr(pdev, index, ++ region->region.base, region->region.size); ++ return -EIO; ++ } ++ return 0; ++} ++ ++#if defined(TC_FAKE_INTERRUPTS) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++void tc_irq_fake_wrapper(struct timer_list *t) ++{ ++ struct tc_device *tc = from_timer(tc, t, timer); ++#else ++void tc_irq_fake_wrapper(unsigned long data) ++{ ++ struct tc_device *tc = (struct tc_device *)data; ++#endif ++ ++ if (tc->odin) ++ odin_irq_handler(0, tc); ++ else ++ apollo_irq_handler(0, tc); ++ ++ mod_timer(&tc->timer, ++ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); ++} ++#endif ++ ++static int tc_register_pdp_device(struct tc_device *tc) ++{ ++ int err = 0; ++ ++ if (tc->odin || tc->orion) ++ err = odin_register_pdp_device(tc); ++ else ++ err = apollo_register_pdp_device(tc); ++ ++ return err; ++} ++ ++static int tc_register_ext_device(struct tc_device *tc) ++{ ++ int err = 0; ++ ++ if (tc->odin || tc->orion) ++ err = odin_register_ext_device(tc); ++ else ++ err = apollo_register_ext_device(tc); ++ ++ return err; ++} ++ ++static int tc_register_dma_device(struct tc_device *tc) ++{ ++ int err = 0; ++ ++ if (tc->odin) ++ err = odin_register_dma_device(tc); ++ ++ return err; ++} ++ ++static void tc_devres_release(struct device *dev, void *res) ++{ ++ /* No extra cleanup needed */ ++} ++ ++static int tc_cleanup(struct pci_dev *pdev) ++{ ++ struct tc_device *tc = devres_find(&pdev->dev, ++ tc_devres_release, NULL, NULL); ++ int i, err = 0; ++ ++ if (!tc) { ++ dev_err(&pdev->dev, "No tc device resources found\n"); ++ return -ENODEV; ++ } ++ ++ debugfs_remove(tc->debugfs_rogue_name); ++ ++ for (i = 0; i < TC_INTERRUPT_COUNT; i++) ++ if (tc->interrupt_handlers[i].enabled) ++ tc_disable_interrupt(&pdev->dev, i); ++ ++ if (tc->odin || tc->orion) ++ err = odin_cleanup(tc); ++ else ++ err = apollo_cleanup(tc); ++ ++ debugfs_remove(tc->debugfs_tc_dir); ++ ++ return err; ++} ++ ++static int tc_init(struct pci_dev *pdev, const struct pci_device_id *id) ++{ ++ struct tc_device *tc; ++ int err = 0; ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ int sec_mem_size = TC_SECURE_MEM_SIZE_BYTES; ++#else /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */ ++ int sec_mem_size = 0; ++#endif /* defined(SUPPORT_FAKE_SECURE_ION_HEAP) */ ++ ++ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ tc = devres_alloc(tc_devres_release, ++ sizeof(*tc), GFP_KERNEL); ++ if (!tc) { ++ err = -ENOMEM; ++ goto err_out; ++ } ++ ++ devres_add(&pdev->dev, tc); ++ ++ err = tc_enable(&pdev->dev); ++ if (err) { ++ dev_err(&pdev->dev, ++ "tc_enable failed %d\n", err); ++ goto err_release; ++ } ++ ++ tc->pdev = pdev; ++ ++ spin_lock_init(&tc->interrupt_handler_lock); ++ spin_lock_init(&tc->interrupt_enable_lock); ++ ++ tc->debugfs_tc_dir = debugfs_create_dir(DRV_NAME, NULL); ++ ++ if (pdev->vendor == PCI_VENDOR_ID_ODIN) { ++ ++ if (pdev->device == DEVICE_ID_ODIN) ++ tc->odin = true; ++ else if (pdev->device == DEVICE_ID_ORION) ++ tc->orion = true; ++ ++ dev_info(&pdev->dev, "%s detected\n", odin_tc_name(tc)); ++ ++ err = odin_init(tc, pdev, ++ &tc_core_clock, &tc_mem_clock, &tc_clock_multiplex, ++ tc_pdp_mem_size, sec_mem_size, ++ tc_mem_latency, tc_wresp_latency, ++ tc_mem_mode, fbc_bypass); ++ if (err) ++ goto err_dev_cleanup; ++ ++ } else { ++ dev_info(&pdev->dev, "Apollo detected"); ++ tc->odin = false; ++ ++ err = apollo_init(tc, pdev, ++ &tc_core_clock, &tc_mem_clock, tc_sys_clock, &tc_clock_multiplex, ++ tc_pdp_mem_size, sec_mem_size, ++ tc_mem_latency, tc_wresp_latency, ++ tc_mem_mode); ++ if (err) ++ goto err_dev_cleanup; ++ } ++ ++ /* Add the rogue name debugfs entry */ ++ tc->debugfs_rogue_name = ++ debugfs_create_blob("rogue-name", 0444, ++ tc->debugfs_tc_dir, ++ &tc_debugfs_rogue_name_blobs[tc->version]); ++ ++#if defined(TC_FAKE_INTERRUPTS) ++ dev_warn(&pdev->dev, "WARNING: Faking interrupts every %d ms", ++ FAKE_INTERRUPT_TIME_MS); ++#endif ++ ++ /* Register pdp and ext platform devices */ ++ err = tc_register_pdp_device(tc); ++ if (err) ++ goto err_dev_cleanup; ++ ++ err = tc_register_ext_device(tc); ++ if (err) ++ goto err_dev_cleanup; ++ ++ err = tc_register_dma_device(tc); ++ if (err) ++ goto err_dev_cleanup; ++ ++ devres_remove_group(&pdev->dev, NULL); ++ ++ pci_set_master(pdev); ++ ++err_out: ++ if (err) ++ dev_err(&pdev->dev, "%s: failed\n", __func__); ++ ++ return err; ++ ++err_dev_cleanup: ++ tc_cleanup(pdev); ++ tc_disable(&pdev->dev); ++err_release: ++ devres_release_group(&pdev->dev, NULL); ++ goto err_out; ++} ++ ++static void tc_exit(struct pci_dev *pdev) ++{ ++ struct tc_device *tc = devres_find(&pdev->dev, ++ tc_devres_release, NULL, NULL); ++ ++ if (!tc) { ++ dev_err(&pdev->dev, "No tc device resources found\n"); ++ return; ++ } ++ ++ if (tc->pdp_dev) ++ platform_device_unregister(tc->pdp_dev); ++ ++ if (tc->ext_dev) ++ platform_device_unregister(tc->ext_dev); ++ ++ if (tc->dma_dev) ++ platform_device_unregister(tc->dma_dev); ++ ++ pci_clear_master(pdev); ++ ++ tc_cleanup(pdev); ++ ++ tc_disable(&pdev->dev); ++} ++ ++static struct pci_device_id tc_pci_tbl[] = { ++ { PCI_VDEVICE(POWERVR, DEVICE_ID_PCI_APOLLO_FPGA) }, ++ { PCI_VDEVICE(POWERVR, DEVICE_ID_PCIE_APOLLO_FPGA) }, ++ { PCI_VDEVICE(POWERVR, DEVICE_ID_TBA) }, ++ { PCI_VDEVICE(ODIN, DEVICE_ID_ODIN) }, ++ { PCI_VDEVICE(ODIN, DEVICE_ID_ORION) }, ++ { }, ++}; ++ ++static struct pci_driver tc_pci_driver = { ++ .name = DRV_NAME, ++ .id_table = tc_pci_tbl, ++ .probe = tc_init, ++ .remove = tc_exit, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++ .groups = tc_attr_groups, ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) */ ++}; ++ ++module_pci_driver(tc_pci_driver); ++ ++//MODULE_DEVICE_TABLE(pci, tc_pci_tbl); ++ ++int tc_enable(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ ++ return pci_enable_device(pdev); ++} ++EXPORT_SYMBOL(tc_enable); ++ ++void tc_disable(struct device *dev) ++{ ++ struct pci_dev *pdev = to_pci_dev(dev); ++ ++ pci_disable_device(pdev); ++} ++EXPORT_SYMBOL(tc_disable); ++ ++int tc_set_interrupt_handler(struct device *dev, int interrupt_id, ++ void (*handler_function)(void *), void *data) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ int err = 0; ++ unsigned long flags; ++ ++ if (!tc) { ++ dev_err(dev, "No tc device resources found\n"); ++ err = -ENODEV; ++ goto err_out; ++ } ++ ++ if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { ++ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); ++ err = -EINVAL; ++ goto err_out; ++ } ++ ++ spin_lock_irqsave(&tc->interrupt_handler_lock, flags); ++ ++ tc->interrupt_handlers[interrupt_id].handler_function = ++ handler_function; ++ tc->interrupt_handlers[interrupt_id].handler_data = data; ++ ++ spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); ++ ++err_out: ++ return err; ++} ++EXPORT_SYMBOL(tc_set_interrupt_handler); ++ ++int tc_enable_interrupt(struct device *dev, int interrupt_id) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ int err = 0; ++ unsigned long flags; ++ ++ if (!tc) { ++ dev_err(dev, "No tc device resources found\n"); ++ err = -ENODEV; ++ goto err_out; ++ } ++ if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { ++ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); ++ err = -EINVAL; ++ goto err_out; ++ } ++ spin_lock_irqsave(&tc->interrupt_enable_lock, flags); ++ ++ if (tc->interrupt_handlers[interrupt_id].enabled) { ++ dev_warn(dev, "Interrupt ID %d already enabled\n", ++ interrupt_id); ++ err = -EEXIST; ++ goto err_unlock; ++ } ++ tc->interrupt_handlers[interrupt_id].enabled = true; ++ ++ if (tc->odin || tc->orion) ++ odin_enable_interrupt_register(tc, interrupt_id); ++ else ++ apollo_enable_interrupt_register(tc, interrupt_id); ++ ++err_unlock: ++ spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags); ++err_out: ++ return err; ++} ++EXPORT_SYMBOL(tc_enable_interrupt); ++ ++int tc_disable_interrupt(struct device *dev, int interrupt_id) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ int err = 0; ++ unsigned long flags; ++ ++ if (!tc) { ++ dev_err(dev, "No tc device resources found\n"); ++ err = -ENODEV; ++ goto err_out; ++ } ++ if (interrupt_id < 0 || interrupt_id >= TC_INTERRUPT_COUNT) { ++ dev_err(dev, "Invalid interrupt ID (%d)\n", interrupt_id); ++ err = -EINVAL; ++ goto err_out; ++ } ++ spin_lock_irqsave(&tc->interrupt_enable_lock, flags); ++ ++ if (!tc->interrupt_handlers[interrupt_id].enabled) { ++ dev_warn(dev, "Interrupt ID %d already disabled\n", ++ interrupt_id); ++ } ++ tc->interrupt_handlers[interrupt_id].enabled = false; ++ ++ if (tc->odin || tc->orion) ++ odin_disable_interrupt_register(tc, interrupt_id); ++ else ++ apollo_disable_interrupt_register(tc, interrupt_id); ++ ++ spin_unlock_irqrestore(&tc->interrupt_enable_lock, flags); ++err_out: ++ return err; ++} ++EXPORT_SYMBOL(tc_disable_interrupt); ++ ++int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll) ++{ ++ int err = -ENODEV; ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ if (!tc) { ++ dev_err(dev, "No tc device resources found\n"); ++ goto err_out; ++ } ++ ++ if (tc->odin || tc->orion) ++ err = odin_sys_info(tc, tmp, pll); ++ else ++ err = apollo_sys_info(tc, tmp, pll); ++ ++err_out: ++ return err; ++} ++EXPORT_SYMBOL(tc_sys_info); ++ ++int tc_sys_strings(struct device *dev, ++ char *str_fpga_rev, size_t size_fpga_rev, ++ char *str_tcf_core_rev, size_t size_tcf_core_rev, ++ char *str_tcf_core_target_build_id, ++ size_t size_tcf_core_target_build_id, ++ char *str_pci_ver, size_t size_pci_ver, ++ char *str_macro_ver, size_t size_macro_ver) ++{ ++ int err = -ENODEV; ++ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ if (!tc) { ++ dev_err(dev, "No tc device resources found\n"); ++ goto err_out; ++ } ++ ++ if (!str_fpga_rev || ++ !size_fpga_rev || ++ !str_tcf_core_rev || ++ !size_tcf_core_rev || ++ !str_tcf_core_target_build_id || ++ !size_tcf_core_target_build_id || ++ !str_pci_ver || ++ !size_pci_ver || ++ !str_macro_ver || ++ !size_macro_ver) { ++ ++ err = -EINVAL; ++ goto err_out; ++ } ++ ++ if (tc->odin || tc->orion) { ++ err = odin_sys_strings(tc, ++ str_fpga_rev, size_fpga_rev, ++ str_tcf_core_rev, size_tcf_core_rev, ++ str_tcf_core_target_build_id, ++ size_tcf_core_target_build_id, ++ str_pci_ver, size_pci_ver, ++ str_macro_ver, size_macro_ver); ++ } else { ++ err = apollo_sys_strings(tc, ++ str_fpga_rev, size_fpga_rev, ++ str_tcf_core_rev, size_tcf_core_rev, ++ str_tcf_core_target_build_id, ++ size_tcf_core_target_build_id, ++ str_pci_ver, size_pci_ver, ++ str_macro_ver, size_macro_ver); ++ } ++ ++err_out: ++ return err; ++} ++EXPORT_SYMBOL(tc_sys_strings); ++ ++int tc_core_clock_speed(struct device *dev) ++{ ++ return tc_core_clock; ++} ++EXPORT_SYMBOL(tc_core_clock_speed); ++ ++int tc_core_clock_multiplex(struct device *dev) ++{ ++ return tc_clock_multiplex; ++} ++EXPORT_SYMBOL(tc_core_clock_multiplex); ++ ++unsigned int tc_odin_subvers(struct device *dev) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ if (tc->orion) ++ return 1; ++ else ++ return 0; ++} ++EXPORT_SYMBOL(tc_odin_subvers); ++ ++bool tc_pfim_capable(struct device *dev) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ if (tc->odin && !tc->orion) ++ return (!tc->fbc_bypass && ++ odin_pfim_compatible(tc)); ++ ++ return false; ++} ++EXPORT_SYMBOL(tc_pfim_capable); ++ ++bool tc_pdp2_compatible(struct device *dev) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ /* PDP2 is available in all versions of Sleipnir PCB / Odin RTL */ ++ return (tc->odin && !tc->orion); ++} ++EXPORT_SYMBOL(tc_pdp2_compatible); ++ ++struct dma_chan *tc_dma_chan(struct device *dev, char *name) ++ ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ if (tc->odin) ++ return odin_cdma_chan(tc, name); ++ else ++ return NULL; ++} ++EXPORT_SYMBOL(tc_dma_chan); ++ ++void tc_dma_chan_free(struct device *dev, ++ void *chan_prv) ++{ ++ struct tc_device *tc = devres_find(dev, tc_devres_release, ++ NULL, NULL); ++ ++ if (tc->odin) ++ odin_cdma_chan_free(tc, chan_prv); ++ ++ return; ++} ++EXPORT_SYMBOL(tc_dma_chan_free); +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_drv.h b/drivers/gpu/drm/img-rogue/apollo/tc_drv.h +new file mode 100644 +index 000000000000..d8f5f9328af3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_drv.h +@@ -0,0 +1,183 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _TC_DRV_H ++#define _TC_DRV_H ++ ++/* ++ * This contains the hooks for the testchip driver, as used by the Rogue and ++ * PDP sub-devices, and the platform data passed to each of their drivers ++ */ ++ ++#include ++#include ++#include ++ ++/* Valid values for the TC_MEMORY_CONFIG configuration option */ ++#define TC_MEMORY_LOCAL 1 ++#define TC_MEMORY_HOST 2 ++#define TC_MEMORY_HYBRID 3 ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ ++#include PVR_ANDROID_ION_HEADER ++ ++/* NOTE: This should be kept in sync with the user side (in buffer_generic.c) */ ++#if defined(SUPPORT_RGX) ++#define ION_HEAP_TC_ROGUE (ION_HEAP_TYPE_CUSTOM+1) ++#endif ++#define ION_HEAP_TC_PDP (ION_HEAP_TYPE_CUSTOM+2) ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++#define ION_HEAP_TC_SECURE (ION_HEAP_TYPE_CUSTOM+3) ++#endif ++ ++#endif /* defined(SUPPORT_ION) */ ++ ++#define TC_INTERRUPT_PDP 0 ++#define TC_INTERRUPT_EXT 1 ++#define TC_INTERRUPT_TC5_PDP 2 ++#define TC_INTERRUPT_PDP2 3 ++#define TC_INTERRUPT_CDMA 4 ++#define TC_INTERRUPT_CDMA2 5 ++#define TC_INTERRUPT_COUNT 6 ++ ++int tc_enable(struct device *dev); ++void tc_disable(struct device *dev); ++ ++int tc_enable_interrupt(struct device *dev, int interrupt_id); ++int tc_disable_interrupt(struct device *dev, int interrupt_id); ++ ++int tc_set_interrupt_handler(struct device *dev, int interrupt_id, ++ void (*handler_function)(void *), void *handler_data); ++ ++int tc_sys_info(struct device *dev, u32 *tmp, u32 *pll); ++int tc_sys_strings(struct device *dev, ++ char *str_fpga_rev, size_t size_fpga_rev, char *str_tcf_core_rev, ++ size_t size_tcf_core_rev, char *str_tcf_core_target_build_id, ++ size_t size_tcf_core_target_build_id, char *str_pci_ver, ++ size_t size_pci_ver, char *str_macro_ver, size_t size_macro_ver); ++int tc_core_clock_speed(struct device *dev); ++int tc_core_clock_multiplex(struct device *dev); ++ ++unsigned int tc_odin_subvers(struct device *dev); ++ ++bool tc_pfim_capable(struct device *dev); ++bool tc_pdp2_compatible(struct device *dev); ++ ++void tc_dma_chan_free(struct device *dev, void *chandata); ++struct dma_chan *tc_dma_chan(struct device *dev, char *name); ++ ++#define APOLLO_DEVICE_NAME_PDP "apollo_pdp" ++#define ODN_DEVICE_NAME_PDP "odin_pdp" ++#define ODN_DEVICE_NAME_CDMA "odin-cdma" ++ ++/* The following structs are initialised and passed down by the parent tc ++ * driver to the respective sub-drivers ++ */ ++ ++struct tc_pdp_platform_data { ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ struct ion_device *ion_device; ++ int ion_heap_id; ++#endif ++ resource_size_t memory_base; ++ ++ /* The following is used by the drm_pdp driver as it manages the ++ * pdp memory ++ */ ++ resource_size_t pdp_heap_memory_base; ++ resource_size_t pdp_heap_memory_size; ++ ++ /* Used to export host address instead of pdp address, depends on the ++ * TC memory mode. ++ * ++ * PDP phys address space is from 0 to end of local device memory, ++ * however if the TC is configured to operate in hybrid mode then the ++ * GPU is configured to match the CPU phys address space view. ++ */ ++ bool dma_map_export_host_addr; ++}; ++ ++struct tc_dma_platform_data { ++ u32 addr_width; ++ u32 num_dmas; ++ bool has_dre; ++ bool has_sg; ++}; ++ ++#if defined(SUPPORT_RGX) ++ ++#define TC_DEVICE_NAME_ROGUE "tc_rogue" ++ ++struct tc_rogue_platform_data { ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ struct ion_device *ion_device; ++ int ion_heap_id; ++#endif ++ /* The testchip memory mode (LOCAL, HOST or HYBRID) */ ++ int mem_mode; ++ ++ /* The base address of the testchip memory (CPU physical address) - ++ * used to convert from CPU-Physical to device-physical addresses ++ */ ++ resource_size_t tc_memory_base; ++ ++ /* The following is used to setup the services heaps that map to the ++ * ion heaps ++ */ ++ resource_size_t pdp_heap_memory_base; ++ resource_size_t pdp_heap_memory_size; ++ resource_size_t rogue_heap_memory_base; ++ resource_size_t rogue_heap_memory_size; ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ resource_size_t secure_heap_memory_base; ++ resource_size_t secure_heap_memory_size; ++#endif ++ ++ /* DMA channel names for RGX usage */ ++ char *tc_dma_tx_chan_name; ++ char *tc_dma_rx_chan_name; ++}; ++ ++#endif /* defined(SUPPORT_RGX) */ ++ ++#endif /* _TC_DRV_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h b/drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h +new file mode 100644 +index 000000000000..97d6ecf4c33c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_drv_internal.h +@@ -0,0 +1,204 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _TC_DRV_INTERNAL_H ++#define _TC_DRV_INTERNAL_H ++ ++#include "tc_drv.h" ++ ++#include ++ ++#if defined(TC_FAKE_INTERRUPTS) ++#define FAKE_INTERRUPT_TIME_MS 20 ++#include ++#include ++#endif ++ ++#define DRV_NAME "tc" ++ ++/* This is a guess of what's a minimum sensible size for the ext heap ++ * It is only used for a warning if the ext heap is smaller, and does ++ * not affect the functional logic in any way ++ */ ++#define TC_EXT_MINIMUM_MEM_SIZE (10*1024*1024) ++ ++#if defined(SUPPORT_DMA_HEAP) ++ #if defined(SUPPORT_FAKE_SECURE_DMA_HEAP) ++ #define TC_DMA_HEAP_COUNT 3 ++ #else ++ #define TC_DMA_HEAP_COUNT 2 ++ #endif ++#elif defined(SUPPORT_ION) ++ #if defined(SUPPORT_RGX) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ #define TC_ION_HEAP_BASE_COUNT 3 ++ #else ++ #define TC_ION_HEAP_BASE_COUNT 2 ++ #endif ++ ++ #if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ #define TC_ION_HEAP_COUNT (TC_ION_HEAP_BASE_COUNT + 1) ++ #else ++ #define TC_ION_HEAP_COUNT TC_ION_HEAP_BASE_COUNT ++ #endif ++#endif /* defined(SUPPORT_ION) */ ++ ++/* Convert a byte offset to a 32 bit dword offset */ ++#define DWORD_OFFSET(byte_offset) ((byte_offset)>>2) ++ ++#define HEX2DEC(v) ((((v) >> 4) * 10) + ((v) & 0x0F)) ++ ++enum tc_version_t { ++ TC_INVALID_VERSION, ++ APOLLO_VERSION_TCF_2, ++ APOLLO_VERSION_TCF_5, ++ APOLLO_VERSION_TCF_BONNIE, ++ ODIN_VERSION_TCF_BONNIE, ++ ODIN_VERSION_FPGA, ++ ODIN_VERSION_ORION, ++}; ++ ++struct tc_interrupt_handler { ++ bool enabled; ++ void (*handler_function)(void *data); ++ void *handler_data; ++}; ++ ++struct tc_region { ++ resource_size_t base; ++ resource_size_t size; ++}; ++ ++struct tc_io_region { ++ struct tc_region region; ++ void __iomem *registers; ++}; ++ ++struct tc_device { ++ struct pci_dev *pdev; ++ ++ enum tc_version_t version; ++ bool odin; ++ bool orion; ++ ++ int mem_mode; ++ ++ struct tc_io_region tcf; ++ struct tc_io_region tcf_pll; ++ ++ struct tc_region tc_mem; ++ ++ struct platform_device *pdp_dev; ++ ++ resource_size_t pdp_heap_mem_base; ++ resource_size_t pdp_heap_mem_size; ++ ++ struct platform_device *ext_dev; ++ ++ resource_size_t ext_heap_mem_base; ++ resource_size_t ext_heap_mem_size; ++ ++ struct platform_device *dma_dev; ++ ++ struct dma_chan *dma_chans[2]; ++ unsigned int dma_refcnt[2]; ++ unsigned int dma_nchan; ++ struct mutex dma_mutex; ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) || \ ++ defined(SUPPORT_FAKE_SECURE_DMA_HEAP) ++ resource_size_t secure_heap_mem_base; ++ resource_size_t secure_heap_mem_size; ++#endif ++ ++#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ int mtrr; ++#endif ++ spinlock_t interrupt_handler_lock; ++ spinlock_t interrupt_enable_lock; ++ ++ struct tc_interrupt_handler ++ interrupt_handlers[TC_INTERRUPT_COUNT]; ++ ++#if defined(TC_FAKE_INTERRUPTS) ++ struct timer_list timer; ++#endif ++ ++#if defined(SUPPORT_DMA_HEAP) ++ struct dma_heap *dma_heaps[TC_DMA_HEAP_COUNT]; ++#elif defined(SUPPORT_ION) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ struct ion_device *ion_device; ++#endif ++ struct ion_heap *ion_heaps[TC_ION_HEAP_COUNT]; ++ int ion_heap_count; ++#endif /* defined(SUPPORT_ION) */ ++ ++ bool fbc_bypass; ++ ++ struct dentry *debugfs_tc_dir; ++ struct dentry *debugfs_rogue_name; ++}; ++ ++int tc_mtrr_setup(struct tc_device *tc); ++void tc_mtrr_cleanup(struct tc_device *tc); ++ ++int tc_is_interface_aligned(u32 eyes, u32 clk_taps, u32 train_ack); ++ ++int tc_iopol32_nonzero(u32 mask, void __iomem *addr); ++ ++int request_pci_io_addr(struct pci_dev *pdev, u32 index, ++ resource_size_t offset, resource_size_t length); ++void release_pci_io_addr(struct pci_dev *pdev, u32 index, ++ resource_size_t start, resource_size_t length); ++ ++int setup_io_region(struct pci_dev *pdev, ++ struct tc_io_region *region, u32 index, ++ resource_size_t offset, resource_size_t size); ++ ++#if defined(TC_FAKE_INTERRUPTS) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++void tc_irq_fake_wrapper(struct timer_list *t); ++#else ++void tc_irq_fake_wrapper(unsigned long data); ++#endif ++#endif /* defined(TC_FAKE_INTERRUPTS) */ ++ ++#endif /* _TC_DRV_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_odin.c b/drivers/gpu/drm/img-rogue/apollo/tc_odin.c +new file mode 100644 +index 000000000000..4d939994171e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_odin.c +@@ -0,0 +1,2220 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++/* ++ * This is a device driver for the odin testchip framework. It creates ++ * platform devices for the pdp and ext sub-devices, and exports functions ++ * to manage the shared interrupt handling ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "tc_drv_internal.h" ++#include "tc_odin.h" ++#if defined(SUPPORT_DMA_HEAP) ++#include "tc_dmabuf_heap.h" ++#elif defined(SUPPORT_ION) ++#include "tc_ion.h" ++#endif ++ ++/* Odin (3rd gen TCF FPGA) */ ++#include "odin_defs.h" ++#include "odin_regs.h" ++#include "bonnie_tcf.h" ++#include "tc_clocks.h" ++ ++/* Orion demo platform */ ++#include "orion_defs.h" ++#include "orion_regs.h" ++ ++/* Odin/Orion common registers */ ++#include "tc_odin_common_regs.h" ++ ++/* Macros to set and get register fields */ ++#define REG_FIELD_GET(v, str) \ ++ (u32)(((v) & (str##_MASK)) >> (str##_SHIFT)) ++#define REG_FIELD_SET(v, f, str) \ ++ (v = (u32)(((v) & (u32)~(str##_MASK)) | \ ++ (u32)(((f) << (str##_SHIFT)) & (str##_MASK)))) ++ ++#define SAI_STATUS_UNALIGNED 0 ++#define SAI_STATUS_ALIGNED 1 ++#define SAI_STATUS_ERROR 2 ++ ++/* Odin/Orion shared masks */ ++static const u32 CHANGE_SET_SET_MASK[] = { ++ ODN_CHANGE_SET_SET_MASK, ++ SRS_CHANGE_SET_SET_MASK ++}; ++static const u32 CHANGE_SET_SET_SHIFT[] = { ++ ODN_CHANGE_SET_SET_SHIFT, ++ SRS_CHANGE_SET_SET_SHIFT ++}; ++static const u32 USER_ID_ID_MASK[] = { ++ ODN_USER_ID_ID_MASK, ++ SRS_USER_ID_ID_MASK ++}; ++static const u32 USER_ID_ID_SHIFT[] = { ++ ODN_USER_ID_ID_SHIFT, ++ SRS_USER_ID_ID_SHIFT ++}; ++static const u32 USER_BUILD_BUILD_MASK[] = { ++ ODN_USER_BUILD_BUILD_MASK, ++ SRS_USER_BUILD_BUILD_MASK ++}; ++static const u32 USER_BUILD_BUILD_SHIFT[] = { ++ ODN_USER_BUILD_BUILD_SHIFT, ++ SRS_USER_BUILD_BUILD_SHIFT ++}; ++static const u32 INPUT_CLOCK_SPEED_MIN[] = { ++ ODN_INPUT_CLOCK_SPEED_MIN, ++ SRS_INPUT_CLOCK_SPEED_MIN ++}; ++static const u32 INPUT_CLOCK_SPEED_MAX[] = { ++ ODN_INPUT_CLOCK_SPEED_MAX, ++ SRS_INPUT_CLOCK_SPEED_MAX ++}; ++static const u32 OUTPUT_CLOCK_SPEED_MIN[] = { ++ ODN_OUTPUT_CLOCK_SPEED_MIN, ++ SRS_OUTPUT_CLOCK_SPEED_MIN ++}; ++static const u32 OUTPUT_CLOCK_SPEED_MAX[] = { ++ ODN_OUTPUT_CLOCK_SPEED_MAX, ++ SRS_OUTPUT_CLOCK_SPEED_MAX ++}; ++static const u32 VCO_MIN[] = { ++ ODN_VCO_MIN, ++ SRS_VCO_MIN ++}; ++static const u32 VCO_MAX[] = { ++ ODN_VCO_MAX, ++ SRS_VCO_MAX ++}; ++static const u32 PFD_MIN[] = { ++ ODN_PFD_MIN, ++ SRS_PFD_MIN ++}; ++static const u32 PFD_MAX[] = { ++ ODN_PFD_MAX, ++ SRS_PFD_MAX ++}; ++ ++#if defined(SUPPORT_RGX) ++ ++static void spi_write(struct tc_device *tc, u32 off, u32 val) ++{ ++ iowrite32(off, tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_ADDR_RDNWR); ++ iowrite32(val, tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_WDATA); ++ iowrite32(0x1, tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_GO); ++ udelay(1000); ++} ++ ++static int spi_read(struct tc_device *tc, u32 off, u32 *val) ++{ ++ int cnt = 0; ++ u32 spi_mst_status; ++ ++ iowrite32(0x40000 | off, tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_ADDR_RDNWR); ++ iowrite32(0x1, tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_GO); ++ udelay(100); ++ ++ do { ++ spi_mst_status = ioread32(tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_STATUS); ++ ++ if (cnt++ > 10000) { ++ dev_err(&tc->pdev->dev, ++ "%s: Time out reading SPI reg (0x%x)\n", ++ __func__, off); ++ return -1; ++ } ++ ++ } while (spi_mst_status != 0x08); ++ ++ *val = ioread32(tc->tcf.registers ++ + ODN_REG_BANK_TCF_SPI_MASTER ++ + ODN_SPI_MST_RDATA); ++ ++ return 0; ++} ++ ++/* Returns 1 for aligned, 0 for unaligned */ ++static int get_odin_sai_status(struct tc_device *tc, int bank) ++{ ++ void __iomem *bank_addr = tc->tcf.registers ++ + ODN_REG_BANK_SAI_RX_DDR(bank); ++ void __iomem *reg_addr; ++ u32 eyes; ++ u32 clk_taps; ++ u32 train_ack; ++ ++ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_EYES; ++ eyes = ioread32(reg_addr); ++ ++ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_CLK_TAPS; ++ clk_taps = ioread32(reg_addr); ++ ++ reg_addr = bank_addr + ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK; ++ train_ack = ioread32(reg_addr); ++ ++#if 0 /* enable this to get debug info if the board is not aligning */ ++ dev_info(&tc->pdev->dev, ++ "odin bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n", ++ bank, eyes, clk_taps, train_ack); ++#endif ++ ++ if (tc_is_interface_aligned(eyes, clk_taps, train_ack)) ++ return SAI_STATUS_ALIGNED; ++ ++ dev_warn(&tc->pdev->dev, "odin bank %d is unaligned\n", bank); ++ return SAI_STATUS_UNALIGNED; ++} ++ ++/* Read the odin multi clocked bank align status. ++ * Returns 1 for aligned, 0 for unaligned ++ */ ++static int read_odin_mca_status(struct tc_device *tc) ++{ ++ void __iomem *bank_addr = tc->tcf.registers ++ + ODN_REG_BANK_MULTI_CLK_ALIGN; ++ void __iomem *reg_addr = bank_addr + ODN_MCA_DEBUG_MCA_STATUS; ++ u32 mca_status; ++ ++ mca_status = ioread32(reg_addr); ++ ++#if 0 /* Enable this if there are alignment issues */ ++ dev_info(&tc->pdev->dev, ++ "Odin MCA_STATUS = %08x\n", mca_status); ++#endif ++ return mca_status & ODN_ALIGNMENT_FOUND_MASK; ++} ++ ++/* Read the DUT multi clocked bank align status. ++ * Returns 1 for aligned, 0 for unaligned ++ */ ++static int read_dut_mca_status(struct tc_device *tc) ++{ ++ u32 mca_status; ++ const int mca_status_register_offset = 1; /* not in bonnie_tcf.h */ ++ int spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN); ++ ++ spi_address = DWORD_OFFSET(BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN) ++ + mca_status_register_offset; ++ ++ spi_read(tc, spi_address, &mca_status); ++ ++#if 0 /* Enable this if there are alignment issues */ ++ dev_info(&tc->pdev->dev, ++ "DUT MCA_STATUS = %08x\n", mca_status); ++#endif ++ return mca_status & 1; /* 'alignment found' status is in bit 1 */ ++} ++ ++/* Returns 1 for aligned, 0 for unaligned */ ++static int get_dut_sai_status(struct tc_device *tc, int bank) ++{ ++ u32 eyes; ++ u32 clk_taps; ++ u32 train_ack; ++ const u32 bank_base = DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_RX_1 ++ + (BONNIE_TCF_OFFSET_SAI_RX_DELTA * bank)); ++ int spi_timeout; ++ ++ spi_timeout = spi_read(tc, bank_base ++ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_EYES), &eyes); ++ if (spi_timeout) ++ return SAI_STATUS_ERROR; ++ ++ spi_read(tc, bank_base ++ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_CLK_TAPS), &clk_taps); ++ spi_read(tc, bank_base ++ + DWORD_OFFSET(BONNIE_TCF_OFFSET_SAI_TRAIN_ACK), &train_ack); ++ ++#if 0 /* enable this to get debug info if the board is not aligning */ ++ dev_info(&tc->pdev->dev, ++ "dut bank %d align: eyes=%08x clk_taps=%08x train_ack=%08x\n", ++ bank, eyes, clk_taps, train_ack); ++#endif ++ ++ if (tc_is_interface_aligned(eyes, clk_taps, train_ack)) ++ return SAI_STATUS_ALIGNED; ++ ++ dev_warn(&tc->pdev->dev, "dut bank %d is unaligned\n", bank); ++ return SAI_STATUS_UNALIGNED; ++} ++ ++/* ++ * Returns the divider group register fields for the specified counter value. ++ * See Xilinx Application Note xapp888. ++ */ ++static void odin_mmcm_reg_param_calc(u32 value, u32 *low, u32 *high, ++ u32 *edge, u32 *no_count) ++{ ++ if (value == 1U) { ++ *no_count = 1U; ++ *edge = 0; ++ *high = 0; ++ *low = 0; ++ } else { ++ *no_count = 0; ++ *edge = value % 2U; ++ *high = value >> 1; ++ *low = (value + *edge) >> 1U; ++ } ++} ++ ++/* ++ * Returns the MMCM Input Divider, FB Multiplier and Output Divider values for ++ * the specified input frequency and target output frequency. ++ * Function doesn't support fractional values for multiplier and output divider ++ * As per Xilinx 7 series FPGAs clocking resources user guide, aims for highest ++ * VCO and smallest D and M. ++ * Configured for Xilinx Virtex7 speed grade 2. ++ */ ++static int odin_mmcm_counter_calc(struct device *dev, ++ u32 freq_input, u32 freq_output, ++ u32 *d, u32 *m, u32 *o) ++{ ++ u32 tcver = tc_odin_subvers(dev); ++ u32 best_diff, d_best, m_best, o_best; ++ u32 m_min, m_max, m_ideal; ++ u32 d_cur, m_cur, o_cur; ++ u32 d_min, d_max; ++ ++ /* ++ * Check specified input frequency is within range ++ */ ++ if (freq_input < INPUT_CLOCK_SPEED_MIN[tcver]) { ++ dev_err(dev, "Input frequency (%u hz) below minimum supported value (%u hz)\n", ++ freq_input, INPUT_CLOCK_SPEED_MIN[tcver]); ++ return -EINVAL; ++ } ++ if (freq_input > INPUT_CLOCK_SPEED_MAX[tcver]) { ++ dev_err(dev, "Input frequency (%u hz) above maximum supported value (%u hz)\n", ++ freq_input, INPUT_CLOCK_SPEED_MAX[tcver]); ++ return -EINVAL; ++ } ++ ++ /* ++ * Check specified target frequency is within range ++ */ ++ if (freq_output < OUTPUT_CLOCK_SPEED_MIN[tcver]) { ++ dev_err(dev, "Output frequency (%u hz) below minimum supported value (%u hz)\n", ++ freq_input, OUTPUT_CLOCK_SPEED_MIN[tcver]); ++ return -EINVAL; ++ } ++ if (freq_output > OUTPUT_CLOCK_SPEED_MAX[tcver]) { ++ dev_err(dev, "Output frequency (%u hz) above maximum supported value (%u hz)\n", ++ freq_output, OUTPUT_CLOCK_SPEED_MAX[tcver]); ++ return -EINVAL; ++ } ++ ++ /* ++ * Calculate min and max for Input Divider. ++ * Refer Xilinx 7 series FPGAs clocking resources user guide ++ * equation 3-6 and 3-7 ++ */ ++ d_min = DIV_ROUND_UP(freq_input, PFD_MAX[tcver]); ++ d_max = min(freq_input/PFD_MIN[tcver], (u32)ODN_DREG_VALUE_MAX); ++ ++ /* ++ * Calculate min and max for Input Divider. ++ * Refer Xilinx 7 series FPGAs clocking resources user guide. ++ * equation 3-8 and 3-9 ++ */ ++ m_min = DIV_ROUND_UP((VCO_MIN[tcver] * d_min), freq_input); ++ m_max = min(((VCO_MAX[tcver] * d_max) / freq_input), ++ (u32)ODN_MREG_VALUE_MAX); ++ ++ for (d_cur = d_min; d_cur <= d_max; d_cur++) { ++ /* ++ * Refer Xilinx 7 series FPGAs clocking resources user guide. ++ * equation 3-10 ++ */ ++ m_ideal = min(((d_cur * VCO_MAX[tcver])/freq_input), m_max); ++ ++ for (m_cur = m_ideal; m_cur >= m_min; m_cur -= 1) { ++ /** ++ * Skip if VCO for given 'm' and 'd' value is not an ++ * integer since fractional component is not supported ++ */ ++ if (((freq_input * m_cur) % d_cur) != 0) ++ continue; ++ ++ /** ++ * Skip if divider for given 'm' and 'd' value is not ++ * an integer since fractional component is not ++ * supported ++ */ ++ if ((freq_input * m_cur) % (d_cur * freq_output) != 0) ++ continue; ++ ++ /** ++ * Calculate output divider value. ++ */ ++ o_cur = (freq_input * m_cur)/(d_cur * freq_output); ++ ++ *d = d_cur; ++ *m = m_cur; ++ *o = o_cur; ++ return 0; ++ } ++ } ++ ++ /* ++ * Failed to find exact optimal solution with high VCO. Brute-force find ++ * a suitable config, again prioritising high VCO, to get lowest jitter ++ */ ++ d_min = 1; d_max = (u32)ODN_DREG_VALUE_MAX; ++ m_min = 1; m_max = (u32)ODN_MREG_VALUE_MAX; ++ best_diff = 0xFFFFFFFF; ++ ++ for (d_cur = d_min; d_cur <= d_max; d_cur++) { ++ for (m_cur = m_max; m_cur >= m_min; m_cur -= 1) { ++ u32 pfd, vco, o_avg, o_min, o_max; ++ ++ pfd = freq_input / d_cur; ++ vco = pfd * m_cur; ++ ++ if (pfd < PFD_MIN[tcver]) ++ continue; ++ ++ if (pfd > PFD_MAX[tcver]) ++ continue; ++ ++ if (vco < VCO_MIN[tcver]) ++ continue; ++ ++ if (vco > VCO_MAX[tcver]) ++ continue; ++ ++ /* ++ * A range of -1/+3 around o_avg gives us 100kHz granularity. ++ * It can be extended further. ++ */ ++ o_avg = vco / freq_output; ++ o_min = (o_avg >= 2) ? (o_avg - 1) : 1; ++ o_max = o_avg + 3; ++ if (o_max > (u32)ODN_OREG_VALUE_MAX) ++ o_max = (u32)ODN_OREG_VALUE_MAX; ++ ++ for (o_cur = o_min; o_cur <= o_max; o_cur++) { ++ u32 freq_cur, diff_cur; ++ ++ freq_cur = vco / o_cur; ++ ++ if (freq_cur > freq_output) ++ continue; ++ ++ diff_cur = freq_output - freq_cur; ++ ++ if (diff_cur == 0) { ++ /* Found an exact match */ ++ *d = d_cur; ++ *m = m_cur; ++ *o = o_cur; ++ return 0; ++ } ++ ++ if (diff_cur < best_diff) { ++ best_diff = diff_cur; ++ d_best = d_cur; ++ m_best = m_cur; ++ o_best = o_cur; ++ } ++ } ++ } ++ } ++ ++ if (best_diff != 0xFFFFFFFF) { ++ dev_warn(dev, "Odin: Found similar freq of %u Hz\n", freq_output - best_diff); ++ *d = d_best; ++ *m = m_best; ++ *o = o_best; ++ return 0; ++ } ++ ++ dev_err(dev, "Odin: Unable to find integer values for d, m and o for requested frequency (%u)\n", ++ freq_output); ++ ++ return -ERANGE; ++} ++ ++static int odin_fpga_set_dut_core_clk(struct tc_device *tc, ++ u32 input_clk, u32 output_clk) ++{ ++ int err = 0; ++ u32 in_div, mul, out_div; ++ u32 high_time, low_time, edge, no_count; ++ u32 value; ++ void __iomem *base = tc->tcf.registers; ++ void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK; ++ struct device *dev = &tc->pdev->dev; ++ ++ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, ++ &mul, &out_div); ++ if (err != 0) ++ return err; ++ ++ /* Put DUT into reset */ ++ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK, ++ base + ODN_CORE_EXTERNAL_RESETN); ++ msleep(20); ++ ++ /* Put DUT Core MMCM into reset */ ++ iowrite32(ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK, ++ base + ODN_CORE_CLK_GEN_RESET); ++ msleep(20); ++ ++ /* Calculate the register fields for output divider */ ++ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to output divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER1); ++ ++ /* Read-modify-write the required fields to output divider register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_OUT_DIVIDER2); ++ ++ /* Calculate the register fields for multiplier */ ++ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to multiplier register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); ++ ++ /* Read-modify-write the required fields to multiplier register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); ++ ++ /* Calculate the register fields for input divider */ ++ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to input divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); ++ ++ /* Bring DUT clock MMCM out of reset */ ++ iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET); ++ ++ err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_CORE, ++ base + ODN_CORE_MMCM_LOCK_STATUS); ++ if (err != 0) { ++ dev_err(dev, "MMCM failed to lock for DUT core\n"); ++ return err; ++ } ++ ++ /* Bring DUT out of reset */ ++ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK | ++ ODN_EXTERNAL_RESETN_DUT_MASK, ++ tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN); ++ msleep(20); ++ ++ dev_info(dev, "DUT core clock set-up successful\n"); ++ ++ return err; ++} ++ ++static int odin_fpga_set_dut_if_clk(struct tc_device *tc, ++ u32 input_clk, u32 output_clk) ++{ ++ int err = 0; ++ u32 in_div, mul, out_div; ++ u32 high_time, low_time, edge, no_count; ++ u32 value; ++ void __iomem *base = tc->tcf.registers; ++ void __iomem *clk_blk_base = base + ODN_REG_BANK_ODN_CLK_BLK; ++ struct device *dev = &tc->pdev->dev; ++ ++ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, ++ &in_div, &mul, &out_div); ++ if (err != 0) ++ return err; ++ ++ /* Put DUT into reset */ ++ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK, ++ base + ODN_CORE_EXTERNAL_RESETN); ++ msleep(20); ++ ++ /* Put DUT Core MMCM into reset */ ++ iowrite32(ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK, ++ base + ODN_CORE_CLK_GEN_RESET); ++ msleep(20); ++ ++ /* Calculate the register fields for output divider */ ++ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to output divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); ++ ++ /* Read-modify-write the required fields to output divider register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); ++ ++ /* Calculate the register fields for multiplier */ ++ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, &edge, &no_count); ++ ++ /* Read-modify-write the required fields to multiplier register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); ++ ++ /* Read-modify-write the required fields to multiplier register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); ++ ++ /* Calculate the register fields for input divider */ ++ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to input divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); ++ ++ /* Bring DUT interface clock MMCM out of reset */ ++ iowrite32(0, tc->tcf.registers + ODN_CORE_CLK_GEN_RESET); ++ ++ err = tc_iopol32_nonzero(ODN_MMCM_LOCK_STATUS_DUT_IF, ++ base + ODN_CORE_MMCM_LOCK_STATUS); ++ if (err != 0) { ++ dev_err(dev, "MMCM failed to lock for DUT IF\n"); ++ return err; ++ } ++ ++ /* Bring DUT out of reset */ ++ iowrite32(ODN_EXTERNAL_RESETN_DUT_SPI_MASK | ++ ODN_EXTERNAL_RESETN_DUT_MASK, ++ tc->tcf.registers + ODN_CORE_EXTERNAL_RESETN); ++ msleep(20); ++ ++ dev_info(dev, "DUT IF clock set-up successful\n"); ++ ++ return err; ++} ++ ++static void odin_fpga_update_dut_clk_freq(struct tc_device *tc, ++ int *core_clock, int *mem_clock, int *clock_multiplex) ++{ ++ struct device *dev = &tc->pdev->dev; ++ int dut_clk_info = 0; ++ int dut_clk_multiplex = 0; ++ ++#if defined(SUPPORT_FPGA_DUT_CLK_INFO) ++ dut_clk_info = ioread32(tc->tcf.registers + ODN_CORE_DUT_CLK_INFO); ++#endif ++ ++ if ((dut_clk_info != 0) && (dut_clk_info != 0xbaadface) ++ && (dut_clk_info != 0xffffffff)) { ++ dev_info(dev, "ODN_DUT_CLK_INFO = %08x\n", dut_clk_info); ++ ++ if (*core_clock == 0) { ++ *core_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_CORE_MASK) ++ >> ODN_DUT_CLK_INFO_CORE_SHIFT) * 1000000; ++ dev_info(dev, "Using register DUT core clock value: %i\n", ++ *core_clock); ++ } else { ++ dev_info(dev, "Using module param DUT core clock value: %i\n", ++ *core_clock); ++ } ++ ++ if (*mem_clock == 0) { ++ *mem_clock = ((dut_clk_info & ODN_DUT_CLK_INFO_MEM_MASK) ++ >> ODN_DUT_CLK_INFO_MEM_SHIFT) * 1000000; ++ dev_info(dev, "Using register DUT mem clock value: %i\n", ++ *mem_clock); ++ } else { ++ dev_info(dev, "Using module param DUT mem clock value: %i\n", ++ *mem_clock); ++ } ++ } else { ++ if (*core_clock == 0) { ++ *core_clock = RGX_TC_CORE_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT core clock value: %i\n", ++ *core_clock); ++ } else { ++ dev_info(dev, "Using module param DUT core clock value: %i\n", ++ *core_clock); ++ } ++ ++ if (*mem_clock == 0) { ++ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT mem clock value: %i\n", ++ *mem_clock); ++ } else { ++ dev_info(dev, "Using module param DUT mem clock value: %i\n", ++ *mem_clock); ++ } ++ } ++ ++#if defined(SUPPORT_FPGA_DUT_MULTIPLEX_INFO) ++ dut_clk_multiplex = ioread32(tc->tcf.registers + ODN_CORE_DUT_MULTIPLX_INFO); ++#endif ++ ++ if ((dut_clk_multiplex != 0) && (dut_clk_multiplex != 0xbaadface) ++ && (dut_clk_multiplex != 0xffffffff)) { ++ dev_info(dev, "ODN_DUT_MULTIPLX_INFO = %08x\n", dut_clk_multiplex); ++ if (*clock_multiplex == 0) { ++ *clock_multiplex = ((dut_clk_multiplex & ODN_DUT_MULTIPLX_INFO_MEM_MASK) ++ >> ODN_DUT_MULTIPLX_INFO_MEM_SHIFT); ++ dev_info(dev, "Using register DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } else { ++ dev_info(dev, "Using module param DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } ++ } else { ++ if (*clock_multiplex == 0) { ++ *clock_multiplex = RGX_TC_CLOCK_MULTIPLEX; ++ dev_info(dev, "Using default DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } else { ++ dev_info(dev, "Using module param DUT clock multiplex: %i\n", ++ *clock_multiplex); ++ } ++ } ++} ++ ++static int odin_hard_reset_fpga(struct tc_device *tc, ++ int *core_clock, int *mem_clock, int *clock_mulitplex) ++{ ++ int err = 0; ++ ++ odin_fpga_update_dut_clk_freq(tc, core_clock, mem_clock, clock_mulitplex); ++ ++ err = odin_fpga_set_dut_core_clk(tc, ODN_INPUT_CLOCK_SPEED, *core_clock); ++ if (err != 0) ++ goto err_out; ++ ++ err = odin_fpga_set_dut_if_clk(tc, ODN_INPUT_CLOCK_SPEED, *mem_clock); ++ ++err_out: ++ return err; ++} ++ ++static int odin_hard_reset_bonnie(struct tc_device *tc) ++{ ++ int reset_cnt = 0; ++ bool aligned = false; ++ int alignment_found; ++ ++ msleep(100); ++ ++ /* It is essential to do an SPI reset once on power-up before ++ * doing any DUT reads via the SPI interface. ++ */ ++ iowrite32(1, tc->tcf.registers /* set bit 1 low */ ++ + ODN_CORE_EXTERNAL_RESETN); ++ msleep(20); ++ ++ iowrite32(3, tc->tcf.registers /* set bit 1 high */ ++ + ODN_CORE_EXTERNAL_RESETN); ++ msleep(20); ++ ++ while (!aligned && (reset_cnt < 20)) { ++ ++ int bank; ++ ++ /* Reset the DUT to allow the SAI to retrain */ ++ iowrite32(2, /* set bit 0 low */ ++ tc->tcf.registers ++ + ODN_CORE_EXTERNAL_RESETN); ++ ++ /* Hold the DUT in reset for 50ms */ ++ msleep(50); ++ ++ /* Take the DUT out of reset */ ++ iowrite32(3, /* set bit 0 hi */ ++ tc->tcf.registers ++ + ODN_CORE_EXTERNAL_RESETN); ++ reset_cnt++; ++ ++ /* Wait 200ms for the DUT to stabilise */ ++ msleep(200); ++ ++ /* Check the odin Multi Clocked bank Align status */ ++ alignment_found = read_odin_mca_status(tc); ++ dev_info(&tc->pdev->dev, ++ "Odin mca_status indicates %s\n", ++ (alignment_found)?"aligned":"UNALIGNED"); ++ ++ /* Check the DUT MCA status */ ++ alignment_found = read_dut_mca_status(tc); ++ dev_info(&tc->pdev->dev, ++ "DUT mca_status indicates %s\n", ++ (alignment_found)?"aligned":"UNALIGNED"); ++ ++ /* If all banks have aligned then the reset was successful */ ++ for (bank = 0; bank < 10; bank++) { ++ ++ int dut_aligned = 0; ++ int odin_aligned = 0; ++ ++ odin_aligned = get_odin_sai_status(tc, bank); ++ dut_aligned = get_dut_sai_status(tc, bank); ++ ++ if (dut_aligned == SAI_STATUS_ERROR) ++ return SAI_STATUS_ERROR; ++ ++ if (!dut_aligned || !odin_aligned) { ++ aligned = false; ++ break; ++ } ++ aligned = true; ++ } ++ ++ if (aligned) { ++ dev_info(&tc->pdev->dev, ++ "all banks have aligned\n"); ++ break; ++ } ++ ++ dev_warn(&tc->pdev->dev, ++ "Warning- not all banks have aligned. Trying again.\n"); ++ } ++ ++ if (!aligned) ++ dev_warn(&tc->pdev->dev, "odin_hard_reset failed\n"); ++ ++ return (aligned) ? 0 : 1; /* return 0 for success */ ++} ++ ++static void odin_set_mem_latency(struct tc_device *tc, ++ int mem_latency, int mem_wresp_latency) ++{ ++ u32 regval = 0; ++ ++ if (mem_latency <= 4) { ++ /* The total memory read latency cannot be lower than the ++ * amount of cycles consumed by the hardware to do a read. ++ * Set the memory read latency to 0 cycles. ++ */ ++ mem_latency = 0; ++ } else { ++ mem_latency -= 4; ++ ++ dev_info(&tc->pdev->dev, ++ "Setting memory read latency to %i cycles\n", ++ mem_latency); ++ } ++ ++ if (mem_wresp_latency <= 2) { ++ /* The total memory write latency cannot be lower than the ++ * amount of cycles consumed by the hardware to do a write. ++ * Set the memory write latency to 0 cycles. ++ */ ++ mem_wresp_latency = 0; ++ } else { ++ mem_wresp_latency -= 2; ++ ++ dev_info(&tc->pdev->dev, ++ "Setting memory write response latency to %i cycles\n", ++ mem_wresp_latency); ++ } ++ ++ mem_latency |= mem_wresp_latency << 16; ++ ++ spi_write(tc, 0x1009, mem_latency); ++ ++ if (spi_read(tc, 0x1009, ®val) != 0) { ++ dev_err(&tc->pdev->dev, ++ "Failed to read back memory latency register"); ++ return; ++ } ++ ++ if (mem_latency != regval) { ++ dev_err(&tc->pdev->dev, ++ "Memory latency register doesn't match requested value (actual: %#08x, expected: %#08x)\n", ++ regval, mem_latency); ++ } ++} ++ ++static int orion_set_dut_core_clk(struct tc_device *tc, ++ u32 input_clk, ++ u32 output_clk) ++{ ++ void __iomem *base = tc->tcf.registers; ++ void __iomem *clk_blk_base = base + SRS_REG_BANK_ODN_CLK_BLK; ++ struct device *dev = &tc->pdev->dev; ++ u32 high_time, low_time, edge, no_count; ++ u32 in_div, mul, out_div; ++ u32 value; ++ int err; ++ ++ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, ++ &mul, &out_div); ++ if (err != 0) ++ return err; ++ ++ /* Put DUT into reset */ ++ iowrite32(0, base + SRS_CORE_DUT_SOFT_RESETN); ++ msleep(20); ++ ++ /* Put DUT Core MMCM into reset */ ++ iowrite32(SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK, ++ base + SRS_CORE_CLK_GEN_RESET); ++ msleep(20); ++ ++ /* Calculate the register fields for input divider */ ++ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to input divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_IN_DIVIDER1); ++ ++ /* Calculate the register fields for multiplier */ ++ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to multiplier register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER1); ++ ++ /* Read-modify-write the required fields to multiplier register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_CORE_CLK_MULTIPLIER2); ++ ++ /* Calculate the register fields for output divider */ ++ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* ++ * Read-modify-write the required fields to ++ * core output divider register 1 ++ */ ++ value = ioread32(clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); ++ iowrite32(value, clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER1); ++ ++ /* ++ * Read-modify-write the required fields to core output ++ * divider register 2 ++ */ ++ value = ioread32(clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER2); ++ REG_FIELD_SET(value, edge, ++ SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + SRS_DUT_CORE_CLK_OUT_DIVIDER2); ++ ++ /* ++ * Read-modify-write the required fields to ++ * reference output divider register 1 ++ */ ++ value = ioread32(clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME); ++ iowrite32(value, clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER1); ++ ++ /* ++ * Read-modify-write the required fields to ++ * reference output divider register 2 ++ */ ++ value = ioread32(clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER2); ++ REG_FIELD_SET(value, edge, ++ SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + SRS_DUT_REF_CLK_OUT_DIVIDER2); ++ ++ /* Bring DUT IF clock MMCM out of reset */ ++ iowrite32(0, tc->tcf.registers + SRS_CORE_CLK_GEN_RESET); ++ ++ err = tc_iopol32_nonzero(SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK, ++ base + SRS_CORE_MMCM_LOCK_STATUS); ++ if (err != 0) { ++ dev_err(dev, "MMCM failed to lock for DUT core\n"); ++ return err; ++ } ++ ++ /* Bring DUT out of reset */ ++ iowrite32(SRS_DUT_SOFT_RESETN_EXTERNAL_MASK, ++ tc->tcf.registers + SRS_CORE_DUT_SOFT_RESETN); ++ msleep(20); ++ ++ dev_info(dev, "DUT core clock set-up successful\n"); ++ ++ return err; ++} ++ ++static int orion_set_dut_sys_mem_clk(struct tc_device *tc, ++ u32 input_clk, ++ u32 output_clk) ++{ ++ void __iomem *base = tc->tcf.registers; ++ void __iomem *clk_blk_base = base + SRS_REG_BANK_ODN_CLK_BLK; ++ struct device *dev = &tc->pdev->dev; ++ u32 high_time, low_time, edge, no_count; ++ u32 in_div, mul, out_div; ++ u32 value; ++ int err; ++ ++ err = odin_mmcm_counter_calc(dev, input_clk, output_clk, &in_div, ++ &mul, &out_div); ++ if (err != 0) ++ return err; ++ ++ /* Put DUT into reset */ ++ iowrite32(0, base + SRS_CORE_DUT_SOFT_RESETN); ++ msleep(20); ++ ++ /* Put DUT Core MMCM into reset */ ++ iowrite32(SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK, ++ base + SRS_CORE_CLK_GEN_RESET); ++ msleep(20); ++ ++ /* Calculate the register fields for input divider */ ++ odin_mmcm_reg_param_calc(in_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to input divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_IN_DIVIDER1); ++ ++ /* Calculate the register fields for multiplier */ ++ odin_mmcm_reg_param_calc(mul, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to multiplier register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER1); ++ ++ /* Read-modify-write the required fields to multiplier register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_MULTIPLIER2); ++ ++ /* Calculate the register fields for output divider */ ++ odin_mmcm_reg_param_calc(out_div, &high_time, &low_time, ++ &edge, &no_count); ++ ++ /* Read-modify-write the required fields to output divider register 1 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER1); ++ ++ /* Read-modify-write the required fields to output divider register 2 */ ++ value = ioread32(clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); ++ REG_FIELD_SET(value, edge, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + ODN_DUT_IFACE_CLK_OUT_DIVIDER2); ++ ++ /* ++ * New to Orion, registers undocumented in the TRM, assumed high_time, ++ * low_time, edge and no_count are in the same bit fields as the ++ * previous two registers Even though these registers seem to be ++ * undocumented, setting them is essential for the DUT not to show ++ * abnormal behaviour, like the firmware jumping to random addresses ++ */ ++ ++ /* ++ * Read-modify-write the required fields to memory clock output divider ++ * register 1 ++ */ ++ value = ioread32(clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER1); ++ REG_FIELD_SET(value, high_time, ++ SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME); ++ REG_FIELD_SET(value, low_time, ++ SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME); ++ iowrite32(value, clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER1); ++ ++ /* ++ * Read-modify-write the required fields to memory clock output divider ++ * register 1 ++ */ ++ value = ioread32(clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER2); ++ REG_FIELD_SET(value, edge, ++ SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE); ++ REG_FIELD_SET(value, no_count, ++ SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT); ++ iowrite32(value, clk_blk_base + SRS_DUT_MEM_CLK_OUT_DIVIDER2); ++ ++ /* Bring DUT clock MMCM out of reset */ ++ iowrite32(0, tc->tcf.registers + SRS_CORE_CLK_GEN_RESET); ++ ++ err = tc_iopol32_nonzero(SRS_MMCM_LOCK_STATUS_DUT_IF_MASK, ++ base + SRS_CORE_MMCM_LOCK_STATUS); ++ if (err != 0) { ++ dev_err(dev, "MMCM failed to lock for DUT IF\n"); ++ return err; ++ } ++ ++ /* Bring DUT out of reset */ ++ iowrite32(SRS_DUT_SOFT_RESETN_EXTERNAL_MASK, ++ tc->tcf.registers + SRS_CORE_DUT_SOFT_RESETN); ++ msleep(20); ++ ++ dev_info(dev, "DUT IF clock set-up successful\n"); ++ ++ return err; ++} ++ ++ ++static int orion_hard_reset(struct tc_device *tc, int *core_clock, int *mem_clock) ++{ ++ int err; ++ struct device *dev = &tc->pdev->dev; ++ ++ if (*core_clock == 0) { ++ *core_clock = RGX_TC_CORE_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT core clock value: %i\n", ++ *core_clock); ++ } else { ++ dev_info(dev, "Using module param DUT core clock value: %i\n", ++ *core_clock); ++ } ++ ++ if (*mem_clock == 0) { ++ *mem_clock = RGX_TC_MEM_CLOCK_SPEED; ++ dev_info(dev, "Using default DUT mem clock value: %i\n", ++ *mem_clock); ++ } else { ++ dev_info(dev, "Using module param DUT mem clock value: %i\n", ++ *mem_clock); ++ } ++ ++ err = orion_set_dut_core_clk(tc, SRS_INPUT_CLOCK_SPEED, *core_clock); ++ if (err != 0) ++ goto err_out; ++ ++ err = orion_set_dut_sys_mem_clk(tc, SRS_INPUT_CLOCK_SPEED, *mem_clock); ++ ++err_out: ++ return err; ++} ++ ++#endif /* defined(SUPPORT_RGX) */ ++ ++/* Do a hard reset on the DUT */ ++static int odin_hard_reset(struct tc_device *tc, int *core_clock, int *mem_clock, ++ int *clock_mulitplex) ++{ ++#if defined(SUPPORT_RGX) ++ if (tc->version == ODIN_VERSION_TCF_BONNIE) ++ return odin_hard_reset_bonnie(tc); ++ if (tc->version == ODIN_VERSION_FPGA) ++ return odin_hard_reset_fpga(tc, core_clock, mem_clock, clock_mulitplex); ++ if (tc->version == ODIN_VERSION_ORION) ++ return orion_hard_reset(tc, core_clock, mem_clock); ++ ++ dev_err(&tc->pdev->dev, "Invalid Odin version"); ++ return 1; ++#else /* defined(SUPPORT_RGX) */ ++ return 0; ++#endif /* defined(SUPPORT_RGX) */ ++} ++ ++static void odin_set_mem_mode_lma(struct tc_device *tc) ++{ ++ u32 val; ++ ++ if (tc->version != ODIN_VERSION_FPGA) ++ return; ++ ++ /* Enable memory offset to be applied to DUT and PDPs */ ++ iowrite32(0x80000A10, tc->tcf.registers + ODN_CORE_DUT_CTRL1); ++ ++ /* Apply memory offset to GPU and PDPs to point to DDR memory. ++ * Enable HDMI. ++ */ ++ val = (0x4 << ODN_CORE_CONTROL_DUT_OFFSET_SHIFT) | ++ (0x4 << ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT) | ++ (0x4 << ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT) | ++ (0x2 << ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT) | ++ (0x1 << ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT); ++ iowrite32(val, tc->tcf.registers + ODN_CORE_CORE_CONTROL); ++} ++ ++static int odin_set_mem_mode(struct tc_device *tc, int mem_mode) ++{ ++ switch (mem_mode) { ++ case TC_MEMORY_LOCAL: ++ odin_set_mem_mode_lma(tc); ++ dev_info(&tc->pdev->dev, "Memory mode: TC_MEMORY_LOCAL\n"); ++ break; ++ default: ++ dev_err(&tc->pdev->dev, "unsupported memory mode = %d\n", ++ mem_mode); ++ return -EINVAL; ++ }; ++ ++ tc->mem_mode = mem_mode; ++ ++ return 0; ++} ++ ++static u64 odin_get_pdp_dma_mask(struct tc_device *tc) ++{ ++ /* Does not access system memory, so there is no DMA limitation */ ++ if ((tc->mem_mode == TC_MEMORY_LOCAL) || ++ (tc->mem_mode == TC_MEMORY_HYBRID)) ++ return DMA_BIT_MASK(64); ++ ++ return DMA_BIT_MASK(32); ++} ++ ++#if defined(SUPPORT_RGX) ++static u64 odin_get_rogue_dma_mask(struct tc_device *tc) ++{ ++ /* Does not access system memory, so there is no DMA limitation */ ++ if (tc->mem_mode == TC_MEMORY_LOCAL) ++ return DMA_BIT_MASK(64); ++ ++ return DMA_BIT_MASK(32); ++} ++#endif /* defined(SUPPORT_RGX) */ ++ ++static void odin_set_fbc_bypass(struct tc_device *tc, bool fbc_bypass) ++{ ++ u32 val; ++ ++ /* Register field is present whether TC has PFIM support or not */ ++ val = ioread32(tc->tcf.registers + ODN_CORE_DUT_CTRL1); ++ REG_FIELD_SET(val, fbc_bypass ? 0x1 : 0x0, ++ ODN_DUT_CTRL1_FBDC_BYPASS); ++ iowrite32(val, tc->tcf.registers + ODN_CORE_DUT_CTRL1); ++ ++ tc->fbc_bypass = fbc_bypass; ++} ++ ++static int odin_hw_init(struct tc_device *tc, int *core_clock, ++ int *mem_clock, int *clock_mulitplex, int mem_latency, ++ int mem_wresp_latency, int mem_mode, ++ bool fbc_bypass) ++{ ++ int err; ++ ++ err = odin_hard_reset(tc, core_clock, mem_clock, clock_mulitplex); ++ if (err) { ++ dev_err(&tc->pdev->dev, "Failed to initialise Odin"); ++ goto err_out; ++ } ++ ++ err = odin_set_mem_mode(tc, mem_mode); ++ if (err) ++ goto err_out; ++ ++ odin_set_fbc_bypass(tc, fbc_bypass); ++ ++#if defined(SUPPORT_RGX) ++ if (tc->version == ODIN_VERSION_FPGA) ++ odin_set_mem_latency(tc, mem_latency, mem_wresp_latency); ++#endif /* defined(SUPPORT_RGX) */ ++ ++err_out: ++ return err; ++} ++ ++static int odin_enable_irq(struct tc_device *tc) ++{ ++ int err = 0; ++ ++#if defined(TC_FAKE_INTERRUPTS) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++ timer_setup(&tc->timer, tc_irq_fake_wrapper, 0); ++#else ++ setup_timer(&tc->timer, tc_irq_fake_wrapper, (unsigned long)tc); ++#endif ++ mod_timer(&tc->timer, ++ jiffies + msecs_to_jiffies(FAKE_INTERRUPT_TIME_MS)); ++#else ++ iowrite32(0, tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); ++ iowrite32(0xffffffff, tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_CLR)); ++ ++ dev_info(&tc->pdev->dev, ++ "Registering IRQ %d for use by %s\n", ++ tc->pdev->irq, ++ odin_tc_name(tc)); ++ ++ err = request_irq(tc->pdev->irq, odin_irq_handler, ++ IRQF_SHARED, DRV_NAME, tc); ++ ++ if (err) { ++ dev_err(&tc->pdev->dev, ++ "Error - IRQ %d failed to register\n", ++ tc->pdev->irq); ++ } else { ++ dev_info(&tc->pdev->dev, ++ "IRQ %d was successfully registered for use by %s\n", ++ tc->pdev->irq, ++ odin_tc_name(tc)); ++ } ++#endif ++ return err; ++} ++ ++static void odin_disable_irq(struct tc_device *tc) ++{ ++#if defined(TC_FAKE_INTERRUPTS) ++ del_timer_sync(&tc->timer); ++#else ++ iowrite32(0, tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); ++ iowrite32(0xffffffff, tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_CLR)); ++ ++ free_irq(tc->pdev->irq, tc); ++#endif ++} ++ ++static enum tc_version_t ++odin_detect_daughterboard_version(struct tc_device *tc) ++{ ++ u32 reg = ioread32(tc->tcf.registers + ODN_REG_BANK_DB_TYPE_ID); ++ u32 val = reg; ++ ++ if (tc->orion) ++ return ODIN_VERSION_ORION; ++ ++ val = (val & ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK) >> ++ ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT; ++ ++ switch (val) { ++ default: ++ dev_err(&tc->pdev->dev, ++ "Unknown odin version ID type %#x (DB_TYPE_ID: %#08x)\n", ++ val, reg); ++ return TC_INVALID_VERSION; ++ case 1: ++ dev_info(&tc->pdev->dev, "DUT: Bonnie TC\n"); ++ return ODIN_VERSION_TCF_BONNIE; ++ case 2: ++ case 3: ++ dev_info(&tc->pdev->dev, "DUT: FPGA\n"); ++ return ODIN_VERSION_FPGA; ++ } ++} ++ ++static int odin_dev_init(struct tc_device *tc, struct pci_dev *pdev, ++ int pdp_mem_size, int secure_mem_size) ++{ ++ int err; ++ u32 val; ++ ++ /* Reserve and map the tcf system registers */ ++ err = setup_io_region(pdev, &tc->tcf, ++ ODN_SYS_BAR, ODN_SYS_REGS_OFFSET, ODN_SYS_REGS_SIZE); ++ if (err) ++ goto err_out; ++ ++ tc->version = odin_detect_daughterboard_version(tc); ++ if (tc->version == TC_INVALID_VERSION) { ++ err = -EIO; ++ goto err_odin_unmap_sys_registers; ++ } ++ ++ /* Setup card memory */ ++ tc->tc_mem.base = pci_resource_start(pdev, ODN_DDR_BAR); ++ tc->tc_mem.size = pci_resource_len(pdev, ODN_DDR_BAR); ++ ++ if (tc->tc_mem.size < pdp_mem_size) { ++ dev_err(&pdev->dev, ++ "%s MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu", ++ odin_tc_name(tc), ++ ODN_DDR_BAR, ++ (unsigned long)tc->tc_mem.size, ++ (unsigned long)pdp_mem_size); ++ ++ err = -EIO; ++ goto err_odin_unmap_sys_registers; ++ } ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ if (tc->tc_mem.size < ++ (pdp_mem_size + secure_mem_size)) { ++ dev_err(&pdev->dev, ++ "Odin MEM region (bar %d) has size of %lu which is smaller than the requested PDP heap of %lu" ++ " plus the requested secure heap size %lu", ++ ODN_DDR_BAR, ++ (unsigned long)tc->tc_mem.size, ++ (unsigned long)pdp_mem_size, ++ (unsigned long)secure_mem_size); ++ err = -EIO; ++ goto err_odin_unmap_sys_registers; ++ } ++#endif ++ ++ err = tc_mtrr_setup(tc); ++ if (err) ++ goto err_odin_unmap_sys_registers; ++ ++ /* Setup ranges for the device heaps */ ++ tc->pdp_heap_mem_size = pdp_mem_size; ++ ++ /* We know ext_heap_mem_size won't underflow as we've compared ++ * tc_mem.size against the pdp_mem_size value earlier ++ */ ++ tc->ext_heap_mem_size = ++ tc->tc_mem.size - tc->pdp_heap_mem_size; ++ ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ tc->ext_heap_mem_size -= secure_mem_size; ++#endif ++ ++ if (tc->ext_heap_mem_size < TC_EXT_MINIMUM_MEM_SIZE) { ++ dev_warn(&pdev->dev, ++ "%s MEM region (bar 4) has size of %lu, with %lu pdp_mem_size only %lu bytes are left for " ++ "ext device, which looks too small", ++ odin_tc_name(tc), ++ (unsigned long)tc->tc_mem.size, ++ (unsigned long)pdp_mem_size, ++ (unsigned long)tc->ext_heap_mem_size); ++ /* Continue as this is only a 'helpful warning' not a hard ++ * requirement ++ */ ++ } ++ tc->ext_heap_mem_base = tc->tc_mem.base; ++ tc->pdp_heap_mem_base = ++ tc->tc_mem.base + tc->ext_heap_mem_size; ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ tc->secure_heap_mem_base = tc->pdp_heap_mem_base + ++ tc->pdp_heap_mem_size; ++ tc->secure_heap_mem_size = secure_mem_size; ++#endif ++ ++#if defined(SUPPORT_DMA_HEAP) ++ err = tc_dmabuf_heap_init(tc, ODN_DDR_BAR); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to initialise ION\n"); ++ goto err_odin_unmap_sys_registers; ++ } ++#elif defined(SUPPORT_ION) ++ err = tc_ion_init(tc, ODN_DDR_BAR); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to initialise ION\n"); ++ goto err_odin_unmap_sys_registers; ++ } ++#endif /* defined(SUPPORT_ION) */ ++ ++ /* CDMA initialisation */ ++ val = ioread32(tc->tcf.registers + ODN_CORE_SUPPORTED_FEATURES); ++ tc->dma_nchan = REG_FIELD_GET(val, ++ ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS); ++ tc->dma_nchan++; ++ dev_info(&tc->pdev->dev, "Odin RTL has %u DMA(s)\n", tc->dma_nchan); ++ mutex_init(&tc->dma_mutex); ++ ++ if (tc->odin) { ++ val = ioread32(tc->tcf.registers + ++ ODN_CORE_REL); ++ dev_info(&pdev->dev, "%s = 0x%08x\n", ++ "ODN_CORE_REL", val); ++ } else { ++ val = ioread32(tc->tcf.registers + ++ SRS_CORE_REVISION); ++ dev_info(&pdev->dev, "%s = 0x%08x\n", ++ "SRS_CORE_REVISION", val); ++ } ++ ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_CHANGE_SET)); ++ dev_info(&pdev->dev, "%s = 0x%08x\n", ++ common_reg_name(tc, CORE_CHANGE_SET), val); ++ ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_USER_ID)); ++ dev_info(&pdev->dev, "%s = 0x%08x\n", ++ common_reg_name(tc, CORE_USER_ID), val); ++ ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_USER_BUILD)); ++ dev_info(&pdev->dev, "%s = 0x%08x\n", ++ common_reg_name(tc, CORE_USER_BUILD), val); ++ ++err_out: ++ return err; ++ ++err_odin_unmap_sys_registers: ++ dev_info(&pdev->dev, ++ "%s: failed - unmapping the io regions.\n", __func__); ++ ++ iounmap(tc->tcf.registers); ++ release_pci_io_addr(pdev, ODN_SYS_BAR, ++ tc->tcf.region.base, tc->tcf.region.size); ++ goto err_out; ++} ++ ++static void odin_dev_cleanup(struct tc_device *tc) ++{ ++#if defined(SUPPORT_DMA_HEAP) ++ tc_dmabuf_heap_deinit(tc, ODN_DDR_BAR); ++#elif defined(SUPPORT_ION) ++ tc_ion_deinit(tc, ODN_DDR_BAR); ++#endif ++ ++ tc_mtrr_cleanup(tc); ++ ++ iounmap(tc->tcf.registers); ++ ++ release_pci_io_addr(tc->pdev, ++ ODN_SYS_BAR, ++ tc->tcf.region.base, ++ tc->tcf.region.size); ++} ++ ++static u32 odin_interrupt_id_to_flag(int interrupt_id) ++{ ++ switch (interrupt_id) { ++ case TC_INTERRUPT_PDP: ++ return ODN_INTERRUPT_ENABLE_PDP1; ++ case TC_INTERRUPT_EXT: ++ return ODN_INTERRUPT_ENABLE_DUT; ++ case TC_INTERRUPT_PDP2: ++ return ODN_INTERRUPT_ENABLE_PDP2; ++ case TC_INTERRUPT_CDMA: ++ return ODN_INTERRUPT_ENABLE_CDMA; ++ case TC_INTERRUPT_CDMA2: ++ return ODN_INTERRUPT_ENABLE_CDMA2; ++ default: ++ BUG(); ++ } ++} ++ ++int odin_init(struct tc_device *tc, struct pci_dev *pdev, ++ int *core_clock, int *mem_clock, int *clock_mulitplex, ++ int pdp_mem_size, int secure_mem_size, ++ int mem_latency, int mem_wresp_latency, int mem_mode, ++ bool fbc_bypass) ++{ ++ int err = 0; ++ ++ err = odin_dev_init(tc, pdev, pdp_mem_size, secure_mem_size); ++ if (err) { ++ dev_err(&pdev->dev, "odin_dev_init failed\n"); ++ goto err_out; ++ } ++ ++ err = odin_hw_init(tc, core_clock, mem_clock, clock_mulitplex, ++ mem_latency, mem_wresp_latency, mem_mode, ++ fbc_bypass); ++ if (err) { ++ dev_err(&pdev->dev, "odin_hw_init failed\n"); ++ goto err_dev_cleanup; ++ } ++ ++ err = odin_enable_irq(tc); ++ if (err) { ++ dev_err(&pdev->dev, ++ "Failed to initialise IRQ\n"); ++ goto err_dev_cleanup; ++ } ++ ++err_out: ++ return err; ++ ++err_dev_cleanup: ++ odin_dev_cleanup(tc); ++ goto err_out; ++} ++ ++int odin_cleanup(struct tc_device *tc) ++{ ++ /* ++ * Make sure we don't attempt to clean-up after an invalid device. ++ * We'll have already unmapped the PCI i/o space so cannot access ++ * anything now. ++ */ ++ if (tc->version != TC_INVALID_VERSION) { ++ odin_disable_irq(tc); ++ odin_dev_cleanup(tc); ++ } ++ ++ return 0; ++} ++ ++int odin_register_pdp_device(struct tc_device *tc) ++{ ++ int err = 0; ++ resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR); ++ struct resource pdp_resources_odin[] = { ++ DEFINE_RES_MEM_NAMED(reg_start + ++ ODN_PDP_REGS_OFFSET, /* start */ ++ ODN_PDP_REGS_SIZE, /* size */ ++ "pdp-regs"), ++ DEFINE_RES_MEM_NAMED(reg_start + ++ ODN_PDP2_REGS_OFFSET, /* start */ ++ ODN_PDP2_REGS_SIZE, /* size */ ++ "pdp2-regs"), ++ DEFINE_RES_MEM_NAMED(reg_start + ++ ODN_SYS_REGS_OFFSET + ++ common_reg_offset(tc, REG_BANK_ODN_CLK_BLK) + ++ ODN_PDP_P_CLK_OUT_DIVIDER_REG1, /* start */ ++ ODN_PDP_P_CLK_IN_DIVIDER_REG - ++ ODN_PDP_P_CLK_OUT_DIVIDER_REG1 + 4, /* size */ ++ "pll-regs"), ++ DEFINE_RES_MEM_NAMED(reg_start + ++ ODN_PDP2_PFIM_OFFSET, /* start */ ++ ODN_PDP2_PFIM_SIZE, /* size */ ++ "pfim-regs"), ++ DEFINE_RES_MEM_NAMED(reg_start + ++ ODN_SYS_REGS_OFFSET + ++ ODN_REG_BANK_CORE, /* start */ ++ ODN_CORE_MMCM_LOCK_STATUS + 4, /* size */ ++ "odn-core"), ++ }; ++ ++ struct tc_pdp_platform_data pdata = { ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .ion_device = tc->ion_device, ++ .ion_heap_id = ION_HEAP_TC_PDP, ++#endif ++ .memory_base = tc->tc_mem.base, ++ .pdp_heap_memory_base = tc->pdp_heap_mem_base, ++ .pdp_heap_memory_size = tc->pdp_heap_mem_size, ++ }; ++ struct platform_device_info pdp_device_info = { ++ .parent = &tc->pdev->dev, ++ .name = ODN_DEVICE_NAME_PDP, ++ .id = -2, ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ .dma_mask = odin_get_pdp_dma_mask(tc), ++ }; ++ ++ pdp_device_info.res = pdp_resources_odin; ++ pdp_device_info.num_res = ARRAY_SIZE(pdp_resources_odin); ++ ++ tc->pdp_dev = platform_device_register_full(&pdp_device_info); ++ if (IS_ERR(tc->pdp_dev)) { ++ err = PTR_ERR(tc->pdp_dev); ++ dev_err(&tc->pdev->dev, ++ "Failed to register PDP device (%d)\n", err); ++ tc->pdp_dev = NULL; ++ goto err_out; ++ } ++ ++err_out: ++ return err; ++} ++ ++int odin_register_ext_device(struct tc_device *tc) ++{ ++#if defined(SUPPORT_RGX) ++ int err = 0; ++ struct resource odin_rogue_resources[] = { ++ DEFINE_RES_MEM_NAMED(pci_resource_start(tc->pdev, ++ ODN_DUT_SOCIF_BAR), ++ ODN_DUT_SOCIF_SIZE, "rogue-regs"), ++ }; ++ struct tc_rogue_platform_data pdata = { ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .ion_device = tc->ion_device, ++ .ion_heap_id = ION_HEAP_TC_ROGUE, ++#endif ++ .mem_mode = tc->mem_mode, ++ .tc_memory_base = tc->tc_mem.base, ++ .pdp_heap_memory_base = tc->pdp_heap_mem_base, ++ .pdp_heap_memory_size = tc->pdp_heap_mem_size, ++ .rogue_heap_memory_base = tc->ext_heap_mem_base, ++ .rogue_heap_memory_size = tc->ext_heap_mem_size, ++#if defined(SUPPORT_FAKE_SECURE_ION_HEAP) ++ .secure_heap_memory_base = tc->secure_heap_mem_base, ++ .secure_heap_memory_size = tc->secure_heap_mem_size, ++#endif ++ .tc_dma_tx_chan_name = ODIN_DMA_TX_CHAN_NAME, ++ .tc_dma_rx_chan_name = ODIN_DMA_RX_CHAN_NAME, ++ }; ++ struct platform_device_info odin_rogue_dev_info = { ++ .parent = &tc->pdev->dev, ++ .name = TC_DEVICE_NAME_ROGUE, ++ .id = -2, ++ .res = odin_rogue_resources, ++ .num_res = ARRAY_SIZE(odin_rogue_resources), ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ .dma_mask = odin_get_rogue_dma_mask(tc), ++ }; ++ ++ tc->ext_dev ++ = platform_device_register_full(&odin_rogue_dev_info); ++ ++ if (IS_ERR(tc->ext_dev)) { ++ err = PTR_ERR(tc->ext_dev); ++ dev_err(&tc->pdev->dev, ++ "Failed to register rogue device (%d)\n", err); ++ tc->ext_dev = NULL; ++ } ++ return err; ++#else /* defined(SUPPORT_RGX) */ ++ return 0; ++#endif /* defined(SUPPORT_RGX) */ ++} ++ ++int odin_register_dma_device(struct tc_device *tc) ++{ ++ resource_size_t reg_start = pci_resource_start(tc->pdev, ODN_SYS_BAR); ++ int err = 0; ++ ++ struct resource odin_cdma_resources[] = { ++ DEFINE_RES_MEM_NAMED(reg_start + ++ ODIN_DMA_REGS_OFFSET, /* start */ ++ ODIN_DMA_REGS_SIZE, /* size */ ++ "cdma-regs"), ++ DEFINE_RES_IRQ_NAMED(TC_INTERRUPT_CDMA, ++ "cdma-irq"), ++ DEFINE_RES_IRQ_NAMED(TC_INTERRUPT_CDMA2, ++ "cdma-irq2"), ++ }; ++ ++ struct tc_dma_platform_data pdata = { ++ .addr_width = ODN_CDMA_ADDR_WIDTH, ++ .num_dmas = tc->dma_nchan, ++ .has_dre = true, ++ .has_sg = true, ++ }; ++ ++ struct platform_device_info odin_cdma_dev_info = { ++ .parent = &tc->pdev->dev, ++ .name = ODN_DEVICE_NAME_CDMA, ++ .id = -1, ++ .res = odin_cdma_resources, ++ .num_res = ARRAY_SIZE(odin_cdma_resources), ++ .dma_mask = DMA_BIT_MASK(ODN_CDMA_ADDR_WIDTH), ++ .data = &pdata, ++ .size_data = sizeof(pdata), ++ }; ++ ++ tc->dma_dev ++ = platform_device_register_full(&odin_cdma_dev_info); ++ ++ if (IS_ERR(tc->dma_dev)) { ++ err = PTR_ERR(tc->dma_dev); ++ dev_err(&tc->pdev->dev, ++ "Failed to register CDMA device (%d)\n", err); ++ tc->dma_dev = NULL; ++ } ++ ++ return err; ++} ++ ++void odin_enable_interrupt_register(struct tc_device *tc, ++ int interrupt_id) ++{ ++ u32 val; ++ u32 flag; ++ ++ switch (interrupt_id) { ++ case TC_INTERRUPT_PDP: ++ dev_info(&tc->pdev->dev, ++ "Enabling Odin PDP interrupts\n"); ++ break; ++ case TC_INTERRUPT_EXT: ++ dev_info(&tc->pdev->dev, ++ "Enabling Odin DUT interrupts\n"); ++ break; ++ case TC_INTERRUPT_PDP2: ++ dev_info(&tc->pdev->dev, ++ "Enabling Odin PDP2 interrupts\n"); ++ break; ++ case TC_INTERRUPT_CDMA: ++ dev_info(&tc->pdev->dev, ++ "Enabling Odin CDMA interrupts\n"); ++ break; ++ case TC_INTERRUPT_CDMA2: ++ dev_info(&tc->pdev->dev, ++ "Enabling Odin CDMA2 interrupts\n"); ++ break; ++ default: ++ dev_err(&tc->pdev->dev, ++ "Error - illegal interrupt id\n"); ++ return; ++ } ++ ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); ++ flag = odin_interrupt_id_to_flag(interrupt_id); ++ val |= flag; ++ iowrite32(val, tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); ++} ++ ++void odin_disable_interrupt_register(struct tc_device *tc, ++ int interrupt_id) ++{ ++ u32 val; ++ ++ switch (interrupt_id) { ++ case TC_INTERRUPT_PDP: ++ dev_info(&tc->pdev->dev, ++ "Disabling Odin PDP interrupts\n"); ++ break; ++ case TC_INTERRUPT_EXT: ++ dev_info(&tc->pdev->dev, ++ "Disabling Odin DUT interrupts\n"); ++ break; ++ case TC_INTERRUPT_PDP2: ++ dev_info(&tc->pdev->dev, ++ "Disabling Odin PDP2 interrupts\n"); ++ break; ++ case TC_INTERRUPT_CDMA: ++ dev_info(&tc->pdev->dev, ++ "Disabling Odin CDMA interrupts\n"); ++ break; ++ case TC_INTERRUPT_CDMA2: ++ dev_info(&tc->pdev->dev, ++ "Disabling Odin CDMA2 interrupts\n"); ++ break; ++ default: ++ dev_err(&tc->pdev->dev, ++ "Error - illegal interrupt id\n"); ++ return; ++ } ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); ++ val &= ~(odin_interrupt_id_to_flag(interrupt_id)); ++ iowrite32(val, tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_ENABLE)); ++} ++ ++irqreturn_t odin_irq_handler(int irq, void *data) ++{ ++ u32 interrupt_status; ++ u32 interrupt_clear = 0; ++ unsigned long flags; ++ irqreturn_t ret = IRQ_NONE; ++ struct tc_device *tc = (struct tc_device *)data; ++ ++ spin_lock_irqsave(&tc->interrupt_handler_lock, flags); ++ ++#if defined(TC_FAKE_INTERRUPTS) ++ /* If we're faking interrupts pretend we got both ext and PDP ints */ ++ interrupt_status = ODN_INTERRUPT_STATUS_DUT ++ | ODN_INTERRUPT_STATUS_PDP1; ++#else ++ interrupt_status = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, ++ CORE_INTERRUPT_STATUS)); ++#endif ++ ++ if (interrupt_status & ODN_INTERRUPT_STATUS_DUT) { ++ struct tc_interrupt_handler *ext_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_EXT]; ++ ++ if (ext_int->enabled && ext_int->handler_function) { ++ ext_int->handler_function(ext_int->handler_data); ++ interrupt_clear |= ODN_INTERRUPT_CLEAR_DUT; ++ } ++ ret = IRQ_HANDLED; ++ } ++ if (interrupt_status & ODN_INTERRUPT_STATUS_PDP1) { ++ struct tc_interrupt_handler *pdp_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_PDP]; ++ ++ if (pdp_int->enabled && pdp_int->handler_function) { ++ pdp_int->handler_function(pdp_int->handler_data); ++ interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP1; ++ } ++ ret = IRQ_HANDLED; ++ } ++ if (interrupt_status & ODN_INTERRUPT_STATUS_PDP2) { ++ struct tc_interrupt_handler *pdp_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_PDP2]; ++ ++ if (pdp_int->enabled && pdp_int->handler_function) { ++ pdp_int->handler_function(pdp_int->handler_data); ++ interrupt_clear |= ODN_INTERRUPT_CLEAR_PDP2; ++ } ++ ret = IRQ_HANDLED; ++ } ++ ++ if (interrupt_status & ODN_INTERRUPT_STATUS_CDMA) { ++ struct tc_interrupt_handler *cdma_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_CDMA]; ++ if (cdma_int->enabled && cdma_int->handler_function) { ++ cdma_int->handler_function(cdma_int->handler_data); ++ interrupt_clear |= ODN_INTERRUPT_CLEAR_CDMA; ++ } ++ ret = IRQ_HANDLED; ++ } ++ ++ if (interrupt_status & ODN_INTERRUPT_STATUS_CDMA2) { ++ struct tc_interrupt_handler *cdma_int = ++ &tc->interrupt_handlers[TC_INTERRUPT_CDMA2]; ++ if (cdma_int->enabled && cdma_int->handler_function) { ++ cdma_int->handler_function(cdma_int->handler_data); ++ interrupt_clear |= ODN_INTERRUPT_CLEAR_CDMA2; ++ } ++ ret = IRQ_HANDLED; ++ } ++ ++ ++ if (interrupt_clear) ++ iowrite32(interrupt_clear, ++ tc->tcf.registers + ++ common_reg_offset(tc, CORE_INTERRUPT_CLR)); ++ ++ /* ++ * Orion PDP interrupts are occasionally masked because, for unknown ++ * reasons, a vblank goes without being asserted for about 1000 ms. This ++ * feature is not present on Odin, and setting the ++ * INTERRUPT_TIMEOUT_THRESHOLD register to 0 does not seem to disable it ++ * either. This is probably caused by a bug in some versions of Sirius ++ * RTL. Also this bug seems to only affect PDP interrupts, but not the ++ * DUT. This might sometimes lead to a sudden jitter effect in the ++ * render. Further investigation is pending before this code can ++ * be safely removed. ++ */ ++ ++ if (tc->orion) { ++ if (REG_FIELD_GET(ioread32(tc->tcf.registers + ++ SRS_CORE_INTERRUPT_TIMEOUT_CLR), ++ SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT)) { ++ dev_warn(&tc->pdev->dev, ++ "Orion PDP interrupts were masked, clearing now\n"); ++ iowrite32(SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK, ++ tc->tcf.registers + SRS_CORE_INTERRUPT_TIMEOUT_CLR); ++ } ++ } ++ ++ spin_unlock_irqrestore(&tc->interrupt_handler_lock, flags); ++ ++ return ret; ++} ++ ++int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll) ++{ ++ *tmp = 0; ++ *pll = 0; ++ return 0; ++} ++ ++int odin_sys_strings(struct tc_device *tc, ++ char *str_fpga_rev, size_t size_fpga_rev, ++ char *str_tcf_core_rev, size_t size_tcf_core_rev, ++ char *str_tcf_core_target_build_id, ++ size_t size_tcf_core_target_build_id, ++ char *str_pci_ver, size_t size_pci_ver, ++ char *str_macro_ver, size_t size_macro_ver) ++{ ++ u32 tcver = tc_odin_subvers(&tc->pdev->dev); ++ char temp_str[12]; ++ u32 val; ++ ++ if (tc->odin) { ++ /* Read the Odin major and minor revision ID register Rx-xx */ ++ val = ioread32(tc->tcf.registers + ++ ODN_CORE_REL); ++ ++ snprintf(str_tcf_core_rev, ++ size_tcf_core_rev, ++ "%d.%d", ++ HEX2DEC((val & ODN_REL_MAJOR_MASK) ++ >> ODN_REL_MAJOR_SHIFT), ++ HEX2DEC((val & ODN_REL_MINOR_MASK) ++ >> ODN_REL_MINOR_SHIFT)); ++ } else { ++ /* Read the Orion major and minor revision ID register Rx-xx */ ++ val = ioread32(tc->tcf.registers + ++ SRS_CORE_REVISION); ++ ++ snprintf(str_tcf_core_rev, ++ size_tcf_core_rev, ++ "%d.%d", ++ HEX2DEC((val & SRS_REVISION_MAJOR_MASK) ++ >> SRS_REVISION_MAJOR_SHIFT), ++ HEX2DEC((val & SRS_REVISION_MINOR_MASK) ++ >> SRS_REVISION_MINOR_SHIFT)); ++ } ++ ++ dev_info(&tc->pdev->dev, "%s core revision %s\n", ++ odin_tc_name(tc), str_tcf_core_rev); ++ ++ /* Read the Odin register containing the Perforce changelist ++ * value that the FPGA build was generated from ++ */ ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_CHANGE_SET)); ++ ++ snprintf(str_tcf_core_target_build_id, ++ size_tcf_core_target_build_id, ++ "%d", ++ (val & CHANGE_SET_SET_MASK[tcver]) ++ >> CHANGE_SET_SET_SHIFT[tcver]); ++ ++ /* Read the Odin User_ID register containing the User ID for ++ * identification of a modified build ++ */ ++ val = ioread32(tc->tcf.registers + common_reg_offset(tc, CORE_USER_ID)); ++ ++ snprintf(temp_str, ++ sizeof(temp_str), ++ "%d", ++ HEX2DEC((val & USER_ID_ID_MASK[tcver]) ++ >> USER_ID_ID_SHIFT[tcver])); ++ ++ /* Read the Odin User_Build register containing the User build ++ * number for identification of modified builds ++ */ ++ val = ioread32(tc->tcf.registers + ++ common_reg_offset(tc, CORE_USER_BUILD)); ++ ++ snprintf(temp_str, ++ sizeof(temp_str), ++ "%d", ++ HEX2DEC((val & USER_BUILD_BUILD_MASK[tcver]) ++ >> USER_BUILD_BUILD_SHIFT[tcver])); ++ ++ return 0; ++} ++ ++const char *odin_tc_name(struct tc_device *tc) ++{ ++ if (tc->odin) ++ return "Odin"; ++ else if (tc->orion) ++ return "Orion"; ++ else ++ return "Unknown TC"; ++} ++ ++bool odin_pfim_compatible(struct tc_device *tc) ++{ ++ u32 val; ++ ++ val = ioread32(tc->tcf.registers + ++ ODN_CORE_REL); ++ ++ return ((REG_FIELD_GET(val, ODN_REL_MAJOR) ++ >= ODIN_PFIM_RELNUM)); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && !defined(TC_XILINX_DMA) ++static bool odin_dma_chan_filter(struct dma_chan *chan, void *param) ++{ ++ return false; ++} ++#endif ++ ++struct dma_chan *odin_cdma_chan(struct tc_device *tc, char *name) ++{ ++ struct dma_chan *chan; ++ unsigned long chan_idx; ++ int err; ++ ++ if (!(strcmp("rx", name))) ++ chan_idx = ODN_DMA_CHAN_RX; ++ else if (!(strcmp("tx", name))) { ++ /* ++ * When Odin RTL has a single CDMA device, we simulate ++ * a second channel by always opening the first one. ++ * This is made possible because CDMA allows for ++ * transfers in both directions ++ */ ++ if (tc->dma_nchan == 1) { ++ name = "rx"; ++ chan_idx = ODN_DMA_CHAN_RX; ++ } else ++ chan_idx = ODN_DMA_CHAN_TX; ++ } else { ++ dev_err(&tc->pdev->dev, "Wrong CDMA channel name\n"); ++ return NULL; ++ } ++ ++ mutex_lock(&tc->dma_mutex); ++ ++ if (tc->dma_refcnt[chan_idx]) { ++ tc->dma_refcnt[chan_idx]++; ++ } else { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ chan = dma_request_chan(&tc->dma_dev->dev, name); ++#else ++ dma_cap_mask_t mask; ++ ++ dma_cap_zero(mask); ++ dma_cap_set(DMA_SLAVE, mask); ++ chan = dma_request_channel(mask, ++ odin_dma_chan_filter, ++ (void *)chan_idx); ++#endif ++ if (IS_ERR(chan)) { ++ err = PTR_ERR(chan); ++ dev_err(&tc->pdev->dev, ++ "dma channel request failed (%d)\n", err); ++ mutex_unlock(&tc->dma_mutex); ++ return NULL; ++ } ++ tc->dma_chans[chan_idx] = chan; ++ tc->dma_refcnt[chan_idx] = 1; ++ } ++ ++ mutex_unlock(&tc->dma_mutex); ++ ++ return tc->dma_chans[chan_idx]; ++} ++ ++void odin_cdma_chan_free(struct tc_device *tc, ++ void *chan_priv) ++{ ++ struct dma_chan *dma_chan = (struct dma_chan *)chan_priv; ++ u32 chan_idx; ++ ++ BUG_ON(dma_chan == NULL); ++ ++ mutex_lock(&tc->dma_mutex); ++ ++ if (dma_chan == tc->dma_chans[ODN_DMA_CHAN_RX]) ++ chan_idx = ODN_DMA_CHAN_RX; ++ else if (dma_chan == tc->dma_chans[ODN_DMA_CHAN_TX]) ++ chan_idx = ODN_DMA_CHAN_TX; ++ else ++ goto cdma_chan_free_exit; ++ ++ tc->dma_refcnt[chan_idx]--; ++ if (!tc->dma_refcnt[chan_idx]) { ++ tc->dma_chans[chan_idx] = NULL; ++ dma_release_channel(dma_chan); ++ } ++ ++cdma_chan_free_exit: ++ mutex_unlock(&tc->dma_mutex); ++} +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_odin.h b/drivers/gpu/drm/img-rogue/apollo/tc_odin.h +new file mode 100644 +index 000000000000..d2be3cde97a4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_odin.h +@@ -0,0 +1,82 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _ODIN_DRV_H ++#define _ODIN_DRV_H ++ ++#include "tc_drv_internal.h" ++#include "odin_defs.h" ++#include "orion_defs.h" ++ ++int odin_init(struct tc_device *tc, struct pci_dev *pdev, ++ int *core_clock, int *mem_clock, int *clock_multiplex, ++ int pdp_mem_size, int secure_mem_size, ++ int mem_latency, int mem_wresp_latency, int mem_mode, ++ bool fbc_bypass); ++int odin_cleanup(struct tc_device *tc); ++ ++int odin_register_pdp_device(struct tc_device *tc); ++int odin_register_ext_device(struct tc_device *tc); ++int odin_register_dma_device(struct tc_device *tc); ++ ++struct dma_chan *odin_cdma_chan(struct tc_device *tc, char *name); ++void odin_cdma_chan_free(struct tc_device *tc, void *chan_priv); ++ ++void odin_enable_interrupt_register(struct tc_device *tc, ++ int interrupt_id); ++void odin_disable_interrupt_register(struct tc_device *tc, ++ int interrupt_id); ++ ++irqreturn_t odin_irq_handler(int irq, void *data); ++ ++int odin_sys_info(struct tc_device *tc, u32 *tmp, u32 *pll); ++int odin_sys_strings(struct tc_device *tc, ++ char *str_fpga_rev, size_t size_fpga_rev, ++ char *str_tcf_core_rev, size_t size_tcf_core_rev, ++ char *str_tcf_core_target_build_id, ++ size_t size_tcf_core_target_build_id, ++ char *str_pci_ver, size_t size_pci_ver, ++ char *str_macro_ver, size_t size_macro_ver); ++ ++const char *odin_tc_name(struct tc_device *tc); ++ ++bool odin_pfim_compatible(struct tc_device *tc); ++#endif /* _ODIN_DRV_H */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h b/drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h +new file mode 100644 +index 000000000000..55a47efdb7b2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tc_odin_common_regs.h +@@ -0,0 +1,105 @@ ++/* ++ * @File odin_common_regs.h ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __TC_ODIN_COMMON_REGS_H__ ++#define __TC_ODIN_COMMON_REGS_H__ ++ ++#include ++#include ++ ++struct tc_device; ++ ++enum odin_common_regs { ++ CORE_CHANGE_SET = 0, ++ CORE_USER_ID, ++ CORE_USER_BUILD, ++ CORE_INTERRUPT_ENABLE, ++ CORE_INTERRUPT_CLR, ++ CORE_INTERRUPT_STATUS, ++ REG_BANK_ODN_CLK_BLK, ++}; ++ ++#define ODIN_REGNAME(REG_NAME) "ODN_" __stringify(REG_NAME) ++#define ORION_REGNAME(REG_NAME) "SRS_" __stringify(REG_NAME) ++ ++struct odin_orion_reg { ++ u32 odin_offset; ++ u32 orion_offset; ++ const char *odin_name; ++ const char *orion_name; ++}; ++ ++#define COMMON_REG_ENTRY(REG) \ ++ [REG] = { \ ++ .odin_offset = ODN_##REG, \ ++ .orion_offset = SRS_##REG, \ ++ .odin_name = ODIN_REGNAME(REG), \ ++ .orion_name = ORION_REGNAME(REG), \ ++ } ++ ++static const struct odin_orion_reg common_regs[] = { ++ COMMON_REG_ENTRY(CORE_CHANGE_SET), ++ COMMON_REG_ENTRY(CORE_USER_ID), ++ COMMON_REG_ENTRY(CORE_USER_BUILD), ++ COMMON_REG_ENTRY(CORE_INTERRUPT_ENABLE), ++ COMMON_REG_ENTRY(CORE_INTERRUPT_CLR), ++ COMMON_REG_ENTRY(CORE_INTERRUPT_STATUS), ++ COMMON_REG_ENTRY(REG_BANK_ODN_CLK_BLK), ++}; ++ ++static inline const u32 common_reg_offset(struct tc_device *tc, u32 reg) ++{ ++ if (tc->odin) ++ return common_regs[reg].odin_offset; ++ else ++ return common_regs[reg].orion_offset; ++} ++ ++static inline const char *common_reg_name(struct tc_device *tc, u32 reg) ++{ ++ if (tc->odin) ++ return common_regs[reg].odin_name; ++ else ++ return common_regs[reg].orion_name; ++} ++ ++#endif /* __TC_ODIN_COMMON_REGS_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h b/drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h +new file mode 100644 +index 000000000000..cc7b10fd8116 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tcf_clk_ctrl.h +@@ -0,0 +1,1018 @@ ++/*************************************************************************/ /*! ++@Title Test Chip Framework system control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Autogenerated C -- do not edit ++ Generated from: tcf_clk_ctrl.def ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(_TCF_CLK_CTRL_H_) ++#define _TCF_CLK_CTRL_H_ ++ ++/* ++ * The following register definitions are valid if register 0x28 has value 0. ++ */ ++ ++/* ++ Register FPGA_ID_REG ++*/ ++#define TCF_CLK_CTRL_FPGA_ID_REG 0x0000 ++#define FPGA_ID_REG_CORE_CFG_MASK 0x0000FFFFU ++#define FPGA_ID_REG_CORE_CFG_SHIFT 0 ++#define FPGA_ID_REG_CORE_CFG_SIGNED 0 ++ ++#define FPGA_ID_REG_CORE_ID_MASK 0xFFFF0000U ++#define FPGA_ID_REG_CORE_ID_SHIFT 16 ++#define FPGA_ID_REG_CORE_ID_SIGNED 0 ++ ++/* ++ Register FPGA_REV_REG ++*/ ++#define TCF_CLK_CTRL_FPGA_REV_REG 0x0008 ++#define FPGA_REV_REG_MAINT_MASK 0x000000FFU ++#define FPGA_REV_REG_MAINT_SHIFT 0 ++#define FPGA_REV_REG_MAINT_SIGNED 0 ++ ++#define FPGA_REV_REG_MINOR_MASK 0x0000FF00U ++#define FPGA_REV_REG_MINOR_SHIFT 8 ++#define FPGA_REV_REG_MINOR_SIGNED 0 ++ ++#define FPGA_REV_REG_MAJOR_MASK 0x00FF0000U ++#define FPGA_REV_REG_MAJOR_SHIFT 16 ++#define FPGA_REV_REG_MAJOR_SIGNED 0 ++ ++#define FPGA_REV_REG_DESIGNER_MASK 0xFF000000U ++#define FPGA_REV_REG_DESIGNER_SHIFT 24 ++#define FPGA_REV_REG_DESIGNER_SIGNED 0 ++ ++/* ++ Register FPGA_DES_REV_1 ++*/ ++#define TCF_CLK_CTRL_FPGA_DES_REV_1 0x0010 ++#define FPGA_DES_REV_1_MASK 0xFFFFFFFFU ++#define FPGA_DES_REV_1_SHIFT 0 ++#define FPGA_DES_REV_1_SIGNED 0 ++ ++/* ++ Register FPGA_DES_REV_2 ++*/ ++#define TCF_CLK_CTRL_FPGA_DES_REV_2 0x0018 ++#define FPGA_DES_REV_2_MASK 0xFFFFFFFFU ++#define FPGA_DES_REV_2_SHIFT 0 ++#define FPGA_DES_REV_2_SIGNED 0 ++ ++/* ++ Register TCF_CORE_ID_REG ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_ID_REG 0x0020 ++#define TCF_CORE_ID_REG_CORE_CFG_MASK 0x0000FFFFU ++#define TCF_CORE_ID_REG_CORE_CFG_SHIFT 0 ++#define TCF_CORE_ID_REG_CORE_CFG_SIGNED 0 ++ ++#define TCF_CORE_ID_REG_CORE_ID_MASK 0xFFFF0000U ++#define TCF_CORE_ID_REG_CORE_ID_SHIFT 16 ++#define TCF_CORE_ID_REG_CORE_ID_SIGNED 0 ++ ++/* ++ Register TCF_CORE_REV_REG ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_REV_REG 0x0028 ++#define TCF_CORE_REV_REG_MAINT_MASK 0x000000FFU ++#define TCF_CORE_REV_REG_MAINT_SHIFT 0 ++#define TCF_CORE_REV_REG_MAINT_SIGNED 0 ++ ++#define TCF_CORE_REV_REG_MINOR_MASK 0x0000FF00U ++#define TCF_CORE_REV_REG_MINOR_SHIFT 8 ++#define TCF_CORE_REV_REG_MINOR_SIGNED 0 ++ ++#define TCF_CORE_REV_REG_MAJOR_MASK 0x00FF0000U ++#define TCF_CORE_REV_REG_MAJOR_SHIFT 16 ++#define TCF_CORE_REV_REG_MAJOR_SIGNED 0 ++ ++#define TCF_CORE_REV_REG_DESIGNER_MASK 0xFF000000U ++#define TCF_CORE_REV_REG_DESIGNER_SHIFT 24 ++#define TCF_CORE_REV_REG_DESIGNER_SIGNED 0 ++ ++/* ++ Register TCF_CORE_DES_REV_1 ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1 0x0030 ++#define TCF_CORE_DES_REV_1_MASK 0xFFFFFFFFU ++#define TCF_CORE_DES_REV_1_SHIFT 0 ++#define TCF_CORE_DES_REV_1_SIGNED 0 ++ ++/* ++ Register TCF_CORE_DES_REV_2 ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2 0x0038 ++#define TCF_CORE_DES_REV_2_MASK 0xFFFFFFFFU ++#define TCF_CORE_DES_REV_2_SHIFT 0 ++#define TCF_CORE_DES_REV_2_SIGNED 0 ++ ++ ++/* ++ * The following register definitions are valid if register 0x28 has value 1. ++ */ ++ ++/* ++ Register ID ++*/ ++#define TCF_CLK_CTRL_ID 0x0000 ++#define VARIANT_MASK 0x0000FFFFU ++#define VARIANT_SHIFT 0 ++#define VARIANT_SIGNED 0 ++ ++#define ID_MASK 0xFFFF0000U ++#define ID_SHIFT 16 ++#define ID_SIGNED 0 ++ ++/* ++ Register REL ++*/ ++#define TCF_CLK_CTRL_REL 0x0008 ++#define MINOR_MASK 0x0000FFFFU ++#define MINOR_SHIFT 0 ++#define MINOR_SIGNED 0 ++ ++#define MAJOR_MASK 0xFFFF0000U ++#define MAJOR_SHIFT 16 ++#define MAJOR_SIGNED 0 ++ ++/* ++ Register CHANGE_SET ++*/ ++#define TCF_CLK_CTRL_CHANGE_SET 0x0010 ++#define SET_MASK 0xFFFFFFFFU ++#define SET_SHIFT 0 ++#define SET_SIGNED 0 ++ ++/* ++ Register USER_ID ++*/ ++#define TCF_CLK_CTRL_USER_ID 0x0018 ++#define USER_ID_MASK 0x0000000FU ++#define USER_ID_SHIFT 0 ++#define USER_ID_SIGNED 0 ++ ++/* ++ Register USER_BUILD ++*/ ++#define TCF_CLK_CTRL_USER_BUILD 0x0020 ++#define BUILD_MASK 0xFFFFFFFFU ++#define BUILD_SHIFT 0 ++#define BUILD_SIGNED 0 ++ ++/* ++ Register SW_IF_VERSION ++*/ ++#define TCF_CLK_CTRL_SW_IF_VERSION 0x0028 ++#define VERSION_MASK 0x0000FFFFU ++#define VERSION_SHIFT 0 ++#define VERSION_SIGNED 0 ++ ++/* ++ * The following register definitions are valid for all Apollo builds, ++ * even if some of the registers are not available for certain cores. ++ */ ++ ++/* ++ Register SCB_GENERAL_CONTROL ++*/ ++#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL 0x0040 ++#define SCB_GC_TRANS_HALT_MASK 0x00000200U ++#define SCB_GC_TRANS_HALT_SHIFT 9 ++#define SCB_GC_TRANS_HALT_SIGNED 0 ++ ++#define SCB_GC_CKD_REGS_MASK 0x00000100U ++#define SCB_GC_CKD_REGS_SHIFT 8 ++#define SCB_GC_CKD_REGS_SIGNED 0 ++ ++#define SCB_GC_CKD_SLAVE_MASK 0x00000080U ++#define SCB_GC_CKD_SLAVE_SHIFT 7 ++#define SCB_GC_CKD_SLAVE_SIGNED 0 ++ ++#define SCB_GC_CKD_MASTER_MASK 0x00000040U ++#define SCB_GC_CKD_MASTER_SHIFT 6 ++#define SCB_GC_CKD_MASTER_SIGNED 0 ++ ++#define SCB_GC_CKD_XDATA_MASK 0x00000020U ++#define SCB_GC_CKD_XDATA_SHIFT 5 ++#define SCB_GC_CKD_XDATA_SIGNED 0 ++ ++#define SCB_GC_SFR_REG_MASK 0x00000010U ++#define SCB_GC_SFR_REG_SHIFT 4 ++#define SCB_GC_SFR_REG_SIGNED 0 ++ ++#define SCB_GC_SFR_SLAVE_MASK 0x00000008U ++#define SCB_GC_SFR_SLAVE_SHIFT 3 ++#define SCB_GC_SFR_SLAVE_SIGNED 0 ++ ++#define SCB_GC_SFR_MASTER_MASK 0x00000004U ++#define SCB_GC_SFR_MASTER_SHIFT 2 ++#define SCB_GC_SFR_MASTER_SIGNED 0 ++ ++#define SCB_GC_SFR_DET_DATA_MASK 0x00000002U ++#define SCB_GC_SFR_DET_DATA_SHIFT 1 ++#define SCB_GC_SFR_DET_DATA_SIGNED 0 ++ ++#define SCB_GC_SFR_GEN_DATA_MASK 0x00000001U ++#define SCB_GC_SFR_GEN_DATA_SHIFT 0 ++#define SCB_GC_SFR_GEN_DATA_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_READ_COUNT ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT 0x0048 ++#define MASTER_READ_COUNT_MASK 0x0000FFFFU ++#define MASTER_READ_COUNT_SHIFT 0 ++#define MASTER_READ_COUNT_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_READ_DATA ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA 0x0050 ++#define MASTER_READ_DATA_MASK 0x000000FFU ++#define MASTER_READ_DATA_SHIFT 0 ++#define MASTER_READ_DATA_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_ADDRESS ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS 0x0058 ++#define SCB_MASTER_ADDRESS_MASK 0x000003FFU ++#define SCB_MASTER_ADDRESS_SHIFT 0 ++#define SCB_MASTER_ADDRESS_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_WRITE_DATA ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA 0x0060 ++#define MASTER_WRITE_DATA_MASK 0x000000FFU ++#define MASTER_WRITE_DATA_SHIFT 0 ++#define MASTER_WRITE_DATA_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_WRITE_COUNT ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068 ++#define MASTER_WRITE_COUNT_MASK 0x0000FFFFU ++#define MASTER_WRITE_COUNT_SHIFT 0 ++#define MASTER_WRITE_COUNT_SIGNED 0 ++ ++/* ++ Register SCB_BUS_SELECT ++*/ ++#define TCF_CLK_CTRL_SCB_BUS_SELECT 0x0070 ++#define BUS_SELECT_MASK 0x00000003U ++#define BUS_SELECT_SHIFT 0 ++#define BUS_SELECT_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_FILL_STATUS ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078 ++#define MASTER_WRITE_FIFO_EMPTY_MASK 0x00000008U ++#define MASTER_WRITE_FIFO_EMPTY_SHIFT 3 ++#define MASTER_WRITE_FIFO_EMPTY_SIGNED 0 ++ ++#define MASTER_WRITE_FIFO_FULL_MASK 0x00000004U ++#define MASTER_WRITE_FIFO_FULL_SHIFT 2 ++#define MASTER_WRITE_FIFO_FULL_SIGNED 0 ++ ++#define MASTER_READ_FIFO_EMPTY_MASK 0x00000002U ++#define MASTER_READ_FIFO_EMPTY_SHIFT 1 ++#define MASTER_READ_FIFO_EMPTY_SIGNED 0 ++ ++#define MASTER_READ_FIFO_FULL_MASK 0x00000001U ++#define MASTER_READ_FIFO_FULL_SHIFT 0 ++#define MASTER_READ_FIFO_FULL_SIGNED 0 ++ ++/* ++ Register CLK_AND_RST_CTRL ++*/ ++#define TCF_CLK_CTRL_CLK_AND_RST_CTRL 0x0080 ++#define GLB_CLKG_EN_MASK 0x00020000U ++#define GLB_CLKG_EN_SHIFT 17 ++#define GLB_CLKG_EN_SIGNED 0 ++ ++#define CLK_GATE_CNTL_MASK 0x00010000U ++#define CLK_GATE_CNTL_SHIFT 16 ++#define CLK_GATE_CNTL_SIGNED 0 ++ ++#define DUT_DCM_RESETN_MASK 0x00000400U ++#define DUT_DCM_RESETN_SHIFT 10 ++#define DUT_DCM_RESETN_SIGNED 0 ++ ++#define MEM_RESYNC_BYPASS_MASK 0x00000200U ++#define MEM_RESYNC_BYPASS_SHIFT 9 ++#define MEM_RESYNC_BYPASS_SIGNED 0 ++ ++#define SYS_RESYNC_BYPASS_MASK 0x00000100U ++#define SYS_RESYNC_BYPASS_SHIFT 8 ++#define SYS_RESYNC_BYPASS_SIGNED 0 ++ ++#define SCB_RESETN_MASK 0x00000010U ++#define SCB_RESETN_SHIFT 4 ++#define SCB_RESETN_SIGNED 0 ++ ++#define PDP2_RESETN_MASK 0x00000008U ++#define PDP2_RESETN_SHIFT 3 ++#define PDP2_RESETN_SIGNED 0 ++ ++#define PDP1_RESETN_MASK 0x00000004U ++#define PDP1_RESETN_SHIFT 2 ++#define PDP1_RESETN_SIGNED 0 ++ ++#define DDR_RESETN_MASK 0x00000002U ++#define DDR_RESETN_SHIFT 1 ++#define DDR_RESETN_SIGNED 0 ++ ++#define DUT_RESETN_MASK 0x00000001U ++#define DUT_RESETN_SHIFT 0 ++#define DUT_RESETN_SIGNED 0 ++ ++/* ++ Register TEST_REG_OUT ++*/ ++#define TCF_CLK_CTRL_TEST_REG_OUT 0x0088 ++#define TEST_REG_OUT_MASK 0xFFFFFFFFU ++#define TEST_REG_OUT_SHIFT 0 ++#define TEST_REG_OUT_SIGNED 0 ++ ++/* ++ Register TEST_REG_IN ++*/ ++#define TCF_CLK_CTRL_TEST_REG_IN 0x0090 ++#define TEST_REG_IN_MASK 0xFFFFFFFFU ++#define TEST_REG_IN_SHIFT 0 ++#define TEST_REG_IN_SIGNED 0 ++ ++/* ++ Register TEST_CTRL ++*/ ++#define TCF_CLK_CTRL_TEST_CTRL 0x0098 ++#define PCI_TEST_OFFSET_MASK 0xF8000000U ++#define PCI_TEST_OFFSET_SHIFT 27 ++#define PCI_TEST_OFFSET_SIGNED 0 ++ ++#define PDP1_HOST_MEM_SELECT_MASK 0x00000200U ++#define PDP1_HOST_MEM_SELECT_SHIFT 9 ++#define PDP1_HOST_MEM_SELECT_SIGNED 0 ++ ++#define HOST_PHY_MODE_MASK 0x00000100U ++#define HOST_PHY_MODE_SHIFT 8 ++#define HOST_PHY_MODE_SIGNED 0 ++ ++#define HOST_ONLY_MODE_MASK 0x00000080U ++#define HOST_ONLY_MODE_SHIFT 7 ++#define HOST_ONLY_MODE_SIGNED 0 ++ ++#define PCI_TEST_MODE_MASK 0x00000040U ++#define PCI_TEST_MODE_SHIFT 6 ++#define PCI_TEST_MODE_SIGNED 0 ++ ++#define TURN_OFF_DDR_MASK 0x00000020U ++#define TURN_OFF_DDR_SHIFT 5 ++#define TURN_OFF_DDR_SIGNED 0 ++ ++#define SYS_RD_CLK_INV_MASK 0x00000010U ++#define SYS_RD_CLK_INV_SHIFT 4 ++#define SYS_RD_CLK_INV_SIGNED 0 ++ ++#define MEM_REQ_CLK_INV_MASK 0x00000008U ++#define MEM_REQ_CLK_INV_SHIFT 3 ++#define MEM_REQ_CLK_INV_SIGNED 0 ++ ++#define BURST_SPLIT_MASK 0x00000004U ++#define BURST_SPLIT_SHIFT 2 ++#define BURST_SPLIT_SIGNED 0 ++ ++#define CLK_INVERSION_MASK 0x00000002U ++#define CLK_INVERSION_SHIFT 1 ++#define CLK_INVERSION_SIGNED 0 ++ ++#define ADDRESS_FORCE_MASK 0x00000001U ++#define ADDRESS_FORCE_SHIFT 0 ++#define ADDRESS_FORCE_SIGNED 0 ++ ++/* ++ Register CLEAR_HOST_MEM_SIG ++*/ ++#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG 0x00A0 ++#define SIGNATURE_TAG_ID_MASK 0x00000F00U ++#define SIGNATURE_TAG_ID_SHIFT 8 ++#define SIGNATURE_TAG_ID_SIGNED 0 ++ ++#define CLEAR_HOST_MEM_SIGNATURE_MASK 0x00000001U ++#define CLEAR_HOST_MEM_SIGNATURE_SHIFT 0 ++#define CLEAR_HOST_MEM_SIGNATURE_SIGNED 0 ++ ++/* ++ Register HOST_MEM_SIGNATURE ++*/ ++#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE 0x00A8 ++#define HOST_MEM_SIGNATURE_MASK 0xFFFFFFFFU ++#define HOST_MEM_SIGNATURE_SHIFT 0 ++#define HOST_MEM_SIGNATURE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_STATUS ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_STATUS 0x00C8 ++#define INTERRUPT_MASTER_STATUS_MASK 0x80000000U ++#define INTERRUPT_MASTER_STATUS_SHIFT 31 ++#define INTERRUPT_MASTER_STATUS_SIGNED 0 ++ ++#define OTHER_INTS_MASK 0x7FFE0000U ++#define OTHER_INTS_SHIFT 17 ++#define OTHER_INTS_SIGNED 0 ++ ++#define HOST_MST_NORESPONSE_MASK 0x00010000U ++#define HOST_MST_NORESPONSE_SHIFT 16 ++#define HOST_MST_NORESPONSE_SIGNED 0 ++ ++#define PDP2_INT_MASK 0x00008000U ++#define PDP2_INT_SHIFT 15 ++#define PDP2_INT_SIGNED 0 ++ ++#define PDP1_INT_MASK 0x00004000U ++#define PDP1_INT_SHIFT 14 ++#define PDP1_INT_SIGNED 0 ++ ++#define EXT_INT_MASK 0x00002000U ++#define EXT_INT_SHIFT 13 ++#define EXT_INT_SIGNED 0 ++ ++#define SCB_MST_HLT_BIT_MASK 0x00001000U ++#define SCB_MST_HLT_BIT_SHIFT 12 ++#define SCB_MST_HLT_BIT_SIGNED 0 ++ ++#define SCB_SLV_EVENT_MASK 0x00000800U ++#define SCB_SLV_EVENT_SHIFT 11 ++#define SCB_SLV_EVENT_SIGNED 0 ++ ++#define SCB_TDONE_RX_MASK 0x00000400U ++#define SCB_TDONE_RX_SHIFT 10 ++#define SCB_TDONE_RX_SIGNED 0 ++ ++#define SCB_SLV_WT_RD_DAT_MASK 0x00000200U ++#define SCB_SLV_WT_RD_DAT_SHIFT 9 ++#define SCB_SLV_WT_RD_DAT_SIGNED 0 ++ ++#define SCB_SLV_WT_PRV_RD_MASK 0x00000100U ++#define SCB_SLV_WT_PRV_RD_SHIFT 8 ++#define SCB_SLV_WT_PRV_RD_SIGNED 0 ++ ++#define SCB_SLV_WT_WR_DAT_MASK 0x00000080U ++#define SCB_SLV_WT_WR_DAT_SHIFT 7 ++#define SCB_SLV_WT_WR_DAT_SIGNED 0 ++ ++#define SCB_MST_WT_RD_DAT_MASK 0x00000040U ++#define SCB_MST_WT_RD_DAT_SHIFT 6 ++#define SCB_MST_WT_RD_DAT_SIGNED 0 ++ ++#define SCB_ADD_ACK_ERR_MASK 0x00000020U ++#define SCB_ADD_ACK_ERR_SHIFT 5 ++#define SCB_ADD_ACK_ERR_SIGNED 0 ++ ++#define SCB_WR_ACK_ERR_MASK 0x00000010U ++#define SCB_WR_ACK_ERR_SHIFT 4 ++#define SCB_WR_ACK_ERR_SIGNED 0 ++ ++#define SCB_SDAT_LO_TIM_MASK 0x00000008U ++#define SCB_SDAT_LO_TIM_SHIFT 3 ++#define SCB_SDAT_LO_TIM_SIGNED 0 ++ ++#define SCB_SCLK_LO_TIM_MASK 0x00000004U ++#define SCB_SCLK_LO_TIM_SHIFT 2 ++#define SCB_SCLK_LO_TIM_SIGNED 0 ++ ++#define SCB_UNEX_START_BIT_MASK 0x00000002U ++#define SCB_UNEX_START_BIT_SHIFT 1 ++#define SCB_UNEX_START_BIT_SIGNED 0 ++ ++#define SCB_BUS_INACTIVE_MASK 0x00000001U ++#define SCB_BUS_INACTIVE_SHIFT 0 ++#define SCB_BUS_INACTIVE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_OP_CFG ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_OP_CFG 0x00D0 ++#define PULSE_NLEVEL_MASK 0x80000000U ++#define PULSE_NLEVEL_SHIFT 31 ++#define PULSE_NLEVEL_SIGNED 0 ++ ++#define INT_SENSE_MASK 0x40000000U ++#define INT_SENSE_SHIFT 30 ++#define INT_SENSE_SIGNED 0 ++ ++#define INTERRUPT_DEST_MASK 0x0000000FU ++#define INTERRUPT_DEST_SHIFT 0 ++#define INTERRUPT_DEST_SIGNED 0 ++ ++/* ++ Register INTERRUPT_ENABLE ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_ENABLE 0x00D8 ++#define INTERRUPT_MASTER_ENABLE_MASK 0x80000000U ++#define INTERRUPT_MASTER_ENABLE_SHIFT 31 ++#define INTERRUPT_MASTER_ENABLE_SIGNED 0 ++ ++#define INTERRUPT_ENABLE_MASK 0x7FFFFFFFU ++#define INTERRUPT_ENABLE_SHIFT 0 ++#define INTERRUPT_ENABLE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_CLEAR ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_CLEAR 0x00E0 ++#define INTERRUPT_MASTER_CLEAR_MASK 0x80000000U ++#define INTERRUPT_MASTER_CLEAR_SHIFT 31 ++#define INTERRUPT_MASTER_CLEAR_SIGNED 0 ++ ++#define INTERRUPT_CLEAR_MASK 0x7FFFFFFFU ++#define INTERRUPT_CLEAR_SHIFT 0 ++#define INTERRUPT_CLEAR_SIGNED 0 ++ ++/* ++ Register YCC_RGB_CTRL ++*/ ++#define TCF_CLK_CTRL_YCC_RGB_CTRL 0x00E8 ++#define RGB_CTRL1_MASK 0x000001FFU ++#define RGB_CTRL1_SHIFT 0 ++#define RGB_CTRL1_SIGNED 0 ++ ++#define RGB_CTRL2_MASK 0x01FF0000U ++#define RGB_CTRL2_SHIFT 16 ++#define RGB_CTRL2_SIGNED 0 ++ ++/* ++ Register EXP_BRD_CTRL ++*/ ++#define TCF_CLK_CTRL_EXP_BRD_CTRL 0x00F8 ++#define PDP1_DATA_EN_MASK 0x00000003U ++#define PDP1_DATA_EN_SHIFT 0 ++#define PDP1_DATA_EN_SIGNED 0 ++ ++#define PDP2_DATA_EN_MASK 0x00000030U ++#define PDP2_DATA_EN_SHIFT 4 ++#define PDP2_DATA_EN_SIGNED 0 ++ ++#define EXP_BRD_OUTPUT_MASK 0xFFFFFF00U ++#define EXP_BRD_OUTPUT_SHIFT 8 ++#define EXP_BRD_OUTPUT_SIGNED 0 ++ ++/* ++ Register HOSTIF_CONTROL ++*/ ++#define TCF_CLK_CTRL_HOSTIF_CONTROL 0x0100 ++#define HOSTIF_CTRL_MASK 0x000000FFU ++#define HOSTIF_CTRL_SHIFT 0 ++#define HOSTIF_CTRL_SIGNED 0 ++ ++/* ++ Register DUT_CONTROL_1 ++*/ ++#define TCF_CLK_CTRL_DUT_CONTROL_1 0x0108 ++#define DUT_CTRL_1_MASK 0xFFFFFFFFU ++#define DUT_CTRL_1_SHIFT 0 ++#define DUT_CTRL_1_SIGNED 0 ++ ++/* TC ES2 additional needs those: */ ++#define DUT_CTRL_TEST_MODE_SHIFT 0 ++#define DUT_CTRL_TEST_MODE_MASK 0x3 ++ ++#define DUT_CTRL_VCC_0V9EN (1<<12) ++#define DUT_CTRL_VCC_1V8EN (1<<13) ++#define DUT_CTRL_VCC_IO_INH (1<<14) ++#define DUT_CTRL_VCC_CORE_INH (1<<15) ++ ++/* ++ Register DUT_STATUS_1 ++*/ ++#define TCF_CLK_CTRL_DUT_STATUS_1 0x0110 ++#define DUT_STATUS_1_MASK 0xFFFFFFFFU ++#define DUT_STATUS_1_SHIFT 0 ++#define DUT_STATUS_1_SIGNED 0 ++ ++/* ++ Register DUT_CTRL_NOT_STAT_1 ++*/ ++#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1 0x0118 ++#define DUT_STAT_NOT_CTRL_1_MASK 0xFFFFFFFFU ++#define DUT_STAT_NOT_CTRL_1_SHIFT 0 ++#define DUT_STAT_NOT_CTRL_1_SIGNED 0 ++ ++/* ++ Register DUT_CONTROL_2 ++*/ ++#define TCF_CLK_CTRL_DUT_CONTROL_2 0x0120 ++#define DUT_CTRL_2_MASK 0xFFFFFFFFU ++#define DUT_CTRL_2_SHIFT 0 ++#define DUT_CTRL_2_SIGNED 0 ++ ++/* ++ Register DUT_STATUS_2 ++*/ ++#define TCF_CLK_CTRL_DUT_STATUS_2 0x0128 ++#define DUT_STATUS_2_MASK 0xFFFFFFFFU ++#define DUT_STATUS_2_SHIFT 0 ++#define DUT_STATUS_2_SIGNED 0 ++ ++/* ++ Register DUT_CTRL_NOT_STAT_2 ++*/ ++#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2 0x0130 ++#define DUT_CTRL_NOT_STAT_2_MASK 0xFFFFFFFFU ++#define DUT_CTRL_NOT_STAT_2_SHIFT 0 ++#define DUT_CTRL_NOT_STAT_2_SIGNED 0 ++ ++/* ++ Register BUS_CAP_BASE_ADDR ++*/ ++#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR 0x0138 ++#define BUS_CAP_BASE_ADDR_MASK 0xFFFFFFFFU ++#define BUS_CAP_BASE_ADDR_SHIFT 0 ++#define BUS_CAP_BASE_ADDR_SIGNED 0 ++ ++/* ++ Register BUS_CAP_ENABLE ++*/ ++#define TCF_CLK_CTRL_BUS_CAP_ENABLE 0x0140 ++#define BUS_CAP_ENABLE_MASK 0x00000001U ++#define BUS_CAP_ENABLE_SHIFT 0 ++#define BUS_CAP_ENABLE_SIGNED 0 ++ ++/* ++ Register BUS_CAP_COUNT ++*/ ++#define TCF_CLK_CTRL_BUS_CAP_COUNT 0x0148 ++#define BUS_CAP_COUNT_MASK 0xFFFFFFFFU ++#define BUS_CAP_COUNT_SHIFT 0 ++#define BUS_CAP_COUNT_SIGNED 0 ++ ++/* ++ Register DCM_LOCK_STATUS ++*/ ++#define TCF_CLK_CTRL_DCM_LOCK_STATUS 0x0150 ++#define DCM_LOCK_STATUS_MASK 0x00000007U ++#define DCM_LOCK_STATUS_SHIFT 0 ++#define DCM_LOCK_STATUS_SIGNED 0 ++ ++/* ++ Register AUX_DUT_RESETNS ++*/ ++#define TCF_CLK_CTRL_AUX_DUT_RESETNS 0x0158 ++#define AUX_DUT_RESETNS_MASK 0x0000000FU ++#define AUX_DUT_RESETNS_SHIFT 0 ++#define AUX_DUT_RESETNS_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_ADDR_RDNWR ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160 ++#define TCF_SPI_MST_ADDR_MASK 0x0003FFFFU ++#define TCF_SPI_MST_ADDR_SHIFT 0 ++#define TCF_SPI_MST_ADDR_SIGNED 0 ++ ++#define TCF_SPI_MST_RDNWR_MASK 0x00040000U ++#define TCF_SPI_MST_RDNWR_SHIFT 18 ++#define TCF_SPI_MST_RDNWR_SIGNED 0 ++ ++#define TCF_SPI_MST_SLAVE_ID_MASK 0x00080000U ++#define TCF_SPI_MST_SLAVE_ID_SHIFT 19 ++#define TCF_SPI_MST_SLAVE_ID_SIGNED 0 ++ ++#define TCF_SPI_MST_MASTER_ID_MASK 0x00300000U ++#define TCF_SPI_MST_MASTER_ID_SHIFT 20 ++#define TCF_SPI_MST_MASTER_ID_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_WDATA ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA 0x0168 ++#define TCF_SPI_MST_WDATA_MASK 0xFFFFFFFFU ++#define TCF_SPI_MST_WDATA_SHIFT 0 ++#define TCF_SPI_MST_WDATA_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_RDATA ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA 0x0170 ++#define TCF_SPI_MST_RDATA_MASK 0xFFFFFFFFU ++#define TCF_SPI_MST_RDATA_SHIFT 0 ++#define TCF_SPI_MST_RDATA_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_STATUS ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS 0x0178 ++#define TCF_SPI_MST_STATUS_MASK 0x0000000FU ++#define TCF_SPI_MST_STATUS_SHIFT 0 ++#define TCF_SPI_MST_STATUS_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_GO ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_GO 0x0180 ++#define TCF_SPI_MST_GO_MASK 0x00000001U ++#define TCF_SPI_MST_GO_SHIFT 0 ++#define TCF_SPI_MST_GO_SIGNED 0 ++ ++/* ++ Register EXT_SIG_CTRL ++*/ ++#define TCF_CLK_CTRL_EXT_SIG_CTRL 0x0188 ++#define EXT_SYS_REQ_SIG_START_MASK 0x00000001U ++#define EXT_SYS_REQ_SIG_START_SHIFT 0 ++#define EXT_SYS_REQ_SIG_START_SIGNED 0 ++ ++#define EXT_SYS_RD_SIG_START_MASK 0x00000002U ++#define EXT_SYS_RD_SIG_START_SHIFT 1 ++#define EXT_SYS_RD_SIG_START_SIGNED 0 ++ ++#define EXT_MEM_REQ_SIG_START_MASK 0x00000004U ++#define EXT_MEM_REQ_SIG_START_SHIFT 2 ++#define EXT_MEM_REQ_SIG_START_SIGNED 0 ++ ++#define EXT_MEM_RD_SIG_START_MASK 0x00000008U ++#define EXT_MEM_RD_SIG_START_SHIFT 3 ++#define EXT_MEM_RD_SIG_START_SIGNED 0 ++ ++/* ++ Register EXT_SYS_REQ_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG 0x0190 ++#define EXT_SYS_REQ_SIG_MASK 0xFFFFFFFFU ++#define EXT_SYS_REQ_SIG_SHIFT 0 ++#define EXT_SYS_REQ_SIG_SIGNED 0 ++ ++/* ++ Register EXT_SYS_RD_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_RD_SIG 0x0198 ++#define EXT_SYS_RD_SIG_MASK 0xFFFFFFFFU ++#define EXT_SYS_RD_SIG_SHIFT 0 ++#define EXT_SYS_RD_SIG_SIGNED 0 ++ ++/* ++ Register EXT_MEM_REQ_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG 0x01A0 ++#define EXT_MEM_REQ_SIG_MASK 0xFFFFFFFFU ++#define EXT_MEM_REQ_SIG_SHIFT 0 ++#define EXT_MEM_REQ_SIG_SIGNED 0 ++ ++/* ++ Register EXT_MEM_RD_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_RD_SIG 0x01A8 ++#define EXT_MEM_RD_SIG_MASK 0xFFFFFFFFU ++#define EXT_MEM_RD_SIG_SHIFT 0 ++#define EXT_MEM_RD_SIG_SIGNED 0 ++ ++/* ++ Register EXT_SYS_REQ_WR_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT 0x01B0 ++#define EXT_SYS_REQ_WR_CNT_MASK 0xFFFFFFFFU ++#define EXT_SYS_REQ_WR_CNT_SHIFT 0 ++#define EXT_SYS_REQ_WR_CNT_SIGNED 0 ++ ++/* ++ Register EXT_SYS_REQ_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT 0x01B8 ++#define EXT_SYS_REQ_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_SYS_REQ_RD_CNT_SHIFT 0 ++#define EXT_SYS_REQ_RD_CNT_SIGNED 0 ++ ++/* ++ Register EXT_SYS_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_RD_CNT 0x01C0 ++#define EXT_SYS_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_SYS_RD_CNT_SHIFT 0 ++#define EXT_SYS_RD_CNT_SIGNED 0 ++ ++/* ++ Register EXT_MEM_REQ_WR_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT 0x01C8 ++#define EXT_MEM_REQ_WR_CNT_MASK 0xFFFFFFFFU ++#define EXT_MEM_REQ_WR_CNT_SHIFT 0 ++#define EXT_MEM_REQ_WR_CNT_SIGNED 0 ++ ++/* ++ Register EXT_MEM_REQ_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT 0x01D0 ++#define EXT_MEM_REQ_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_MEM_REQ_RD_CNT_SHIFT 0 ++#define EXT_MEM_REQ_RD_CNT_SIGNED 0 ++ ++/* ++ Register EXT_MEM_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_RD_CNT 0x01D8 ++#define EXT_MEM_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_MEM_RD_CNT_SHIFT 0 ++#define EXT_MEM_RD_CNT_SIGNED 0 ++ ++/* ++ Register TCF_CORE_TARGET_BUILD_CFG ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0 ++#define TCF_CORE_TARGET_BUILD_ID_MASK 0x000000FFU ++#define TCF_CORE_TARGET_BUILD_ID_SHIFT 0 ++#define TCF_CORE_TARGET_BUILD_ID_SIGNED 0 ++ ++/* ++ Register MEM_THROUGH_SYS ++*/ ++#define TCF_CLK_CTRL_MEM_THROUGH_SYS 0x01E8 ++#define MEM_THROUGH_SYS_MASK 0x00000001U ++#define MEM_THROUGH_SYS_SHIFT 0 ++#define MEM_THROUGH_SYS_SIGNED 0 ++ ++/* ++ Register HOST_PHY_OFFSET ++*/ ++#define TCF_CLK_CTRL_HOST_PHY_OFFSET 0x01F0 ++#define HOST_PHY_OFFSET_MASK 0xFFFFFFFFU ++#define HOST_PHY_OFFSET_SHIFT 0 ++#define HOST_PHY_OFFSET_SIGNED 0 ++ ++/* ++ Register DEBUG_REG_SEL ++*/ ++#define TCF_CLK_CTRL_DEBUG_REG_SEL 0x01F8 ++#define DEBUG_REG_SELECT_MASK 0xFFFFFFFFU ++#define DEBUG_REG_SELECT_SHIFT 0 ++#define DEBUG_REG_SELECT_SIGNED 0 ++ ++/* ++ Register DEBUG_REG ++*/ ++#define TCF_CLK_CTRL_DEBUG_REG 0x0200 ++#define DEBUG_REG_VALUE_MASK 0xFFFFFFFFU ++#define DEBUG_REG_VALUE_SHIFT 0 ++#define DEBUG_REG_VALUE_SIGNED 0 ++ ++/* ++ Register JTAG_CTRL ++*/ ++#define TCF_CLK_CTRL_JTAG_CTRL 0x0208 ++#define JTAG_TRST_MASK 0x00000001U ++#define JTAG_TRST_SHIFT 0 ++#define JTAG_TRST_SIGNED 0 ++ ++#define JTAG_TMS_MASK 0x00000002U ++#define JTAG_TMS_SHIFT 1 ++#define JTAG_TMS_SIGNED 0 ++ ++#define JTAG_TCK_MASK 0x00000004U ++#define JTAG_TCK_SHIFT 2 ++#define JTAG_TCK_SIGNED 0 ++ ++#define JTAG_TDO_MASK 0x00000008U ++#define JTAG_TDO_SHIFT 3 ++#define JTAG_TDO_SIGNED 0 ++ ++#define JTAG_TDI_MASK 0x00000010U ++#define JTAG_TDI_SHIFT 4 ++#define JTAG_TDI_SIGNED 0 ++ ++#define JTAG_DASH_N_REG_MASK 0x40000000U ++#define JTAG_DASH_N_REG_SHIFT 30 ++#define JTAG_DASH_N_REG_SIGNED 0 ++ ++#define JTAG_DISABLE_MASK 0x80000000U ++#define JTAG_DISABLE_SHIFT 31 ++#define JTAG_DISABLE_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_RDNWR ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_RDNWR 0x0300 ++#define SAI_DEBUG_REG_ADDR_MASK 0x000001FFU ++#define SAI_DEBUG_REG_ADDR_SHIFT 0 ++#define SAI_DEBUG_REG_ADDR_SIGNED 0 ++ ++#define SAI_DEBUG_REG_RDNWR_MASK 0x00000200U ++#define SAI_DEBUG_REG_RDNWR_SHIFT 9 ++#define SAI_DEBUG_REG_RDNWR_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_WDATA ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_WDATA 0x0308 ++#define SAI_DEBUG_REG_WDATA_MASK 0xFFFFFFFFU ++#define SAI_DEBUG_REG_WDATA_SHIFT 0 ++#define SAI_DEBUG_REG_WDATA_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_RDATA ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_RDATA 0x0310 ++#define SAI_DEBUG_REG_RDATA_MASK 0xFFFFFFFFU ++#define SAI_DEBUG_REG_RDATA_SHIFT 0 ++#define SAI_DEBUG_REG_RDATA_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_GO ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_GO 0x0318 ++#define SAI_DEBUG_REG_GO_MASK 0x00000001U ++#define SAI_DEBUG_REG_GO_SHIFT 0 ++#define SAI_DEBUG_REG_GO_SIGNED 0 ++ ++/* ++ Register AUX_DUT_RESETS ++*/ ++#define TCF_CLK_CTRL_AUX_DUT_RESETS 0x0320 ++#define AUX_DUT_RESETS_MASK 0x0000000FU ++#define AUX_DUT_RESETS_SHIFT 0 ++#define AUX_DUT_RESETS_SIGNED 0 ++ ++/* ++ Register DUT_CLK_CTRL ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_CTRL 0x0328 ++#define MEM_REQ_PHSE_MASK 0x0000FFFFU ++#define MEM_REQ_PHSE_SHIFT 0 ++#define MEM_REQ_PHSE_SIGNED 0 ++ ++/* ++ Register DUT_CLK_STATUS ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_STATUS 0x0330 ++#define MEM_REQ_PHSE_SET_MASK 0x00000003U ++#define MEM_REQ_PHSE_SET_SHIFT 0 ++#define MEM_REQ_PHSE_SET_SIGNED 0 ++ ++/* ++ Register DUT_CLK_INFO ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_INFO 0x0340 ++#define CORE_MASK 0x0000FFFFU ++#define CORE_SHIFT 0 ++#define CORE_SIGNED 0 ++ ++#define MEM_MASK 0xFFFF0000U ++#define MEM_SHIFT 16 ++#define MEM_SIGNED 0 ++ ++/* ++ Register DUT_CLK_PHSE ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_PHSE 0x0348 ++#define MEM_REQ_MASK 0x0000FFFFU ++#define MEM_REQ_SHIFT 0 ++#define MEM_REQ_SIGNED 0 ++ ++#define MEM_RD_MASK 0xFFFF0000U ++#define MEM_RD_SHIFT 16 ++#define MEM_RD_SIGNED 0 ++ ++#endif /* !defined(_TCF_CLK_CTRL_H_) */ ++ ++/***************************************************************************** ++ End of file (tcf_clk_ctrl.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tcf_pll.h b/drivers/gpu/drm/img-rogue/apollo/tcf_pll.h +new file mode 100644 +index 000000000000..71eaf924bbd6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tcf_pll.h +@@ -0,0 +1,311 @@ ++/*************************************************************************/ /*! ++@Title Test Chip Framework PDP register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Autogenerated C -- do not edit ++ Generated from tcf_pll.def ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(_TCF_PLL_H_) ++#define _TCF_PLL_H_ ++ ++/* ++ Register PLL_DDR2_CLK0 ++*/ ++#define TCF_PLL_PLL_DDR2_CLK0 0x0000 ++#define DDR2_PLL_CLK0_PHS_MASK 0x00300000U ++#define DDR2_PLL_CLK0_PHS_SHIFT 20 ++#define DDR2_PLL_CLK0_PHS_SIGNED 0 ++ ++#define DDR2_PLL_CLK0_MS_MASK 0x00030000U ++#define DDR2_PLL_CLK0_MS_SHIFT 16 ++#define DDR2_PLL_CLK0_MS_SIGNED 0 ++ ++#define DDR2_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define DDR2_PLL_CLK0_FREQ_SHIFT 0 ++#define DDR2_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_DDR2_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_DDR2_CLK1TO5 0x0008 ++#define DDR2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define DDR2_PLL_CLK1TO5_PHS_SHIFT 20 ++#define DDR2_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define DDR2_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define DDR2_PLL_CLK1TO5_MS_SHIFT 10 ++#define DDR2_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define DDR2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define DDR2_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define DDR2_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_DDR2_DRP_GO ++*/ ++#define TCF_PLL_PLL_DDR2_DRP_GO 0x0010 ++#define PLL_DDR2_DRP_GO_MASK 0x00000001U ++#define PLL_DDR2_DRP_GO_SHIFT 0 ++#define PLL_DDR2_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_PDP_CLK0 ++*/ ++#define TCF_PLL_PLL_PDP_CLK0 0x0018 ++#define PDP_PLL_CLK0_PHS_MASK 0x00300000U ++#define PDP_PLL_CLK0_PHS_SHIFT 20 ++#define PDP_PLL_CLK0_PHS_SIGNED 0 ++ ++#define PDP_PLL_CLK0_MS_MASK 0x00030000U ++#define PDP_PLL_CLK0_MS_SHIFT 16 ++#define PDP_PLL_CLK0_MS_SIGNED 0 ++ ++#define PDP_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define PDP_PLL_CLK0_FREQ_SHIFT 0 ++#define PDP_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_PDP_CLK1TO5 0x0020 ++#define PDP_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define PDP_PLL_CLK1TO5_PHS_SHIFT 20 ++#define PDP_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define PDP_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define PDP_PLL_CLK1TO5_MS_SHIFT 10 ++#define PDP_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define PDP_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define PDP_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define PDP_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP_DRP_GO ++*/ ++#define TCF_PLL_PLL_PDP_DRP_GO 0x0028 ++#define PLL_PDP_DRP_GO_MASK 0x00000001U ++#define PLL_PDP_DRP_GO_SHIFT 0 ++#define PLL_PDP_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_PDP2_CLK0 ++*/ ++#define TCF_PLL_PLL_PDP2_CLK0 0x0030 ++#define PDP2_PLL_CLK0_PHS_MASK 0x00300000U ++#define PDP2_PLL_CLK0_PHS_SHIFT 20 ++#define PDP2_PLL_CLK0_PHS_SIGNED 0 ++ ++#define PDP2_PLL_CLK0_MS_MASK 0x00030000U ++#define PDP2_PLL_CLK0_MS_SHIFT 16 ++#define PDP2_PLL_CLK0_MS_SIGNED 0 ++ ++#define PDP2_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define PDP2_PLL_CLK0_FREQ_SHIFT 0 ++#define PDP2_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP2_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_PDP2_CLK1TO5 0x0038 ++#define PDP2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define PDP2_PLL_CLK1TO5_PHS_SHIFT 20 ++#define PDP2_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define PDP2_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define PDP2_PLL_CLK1TO5_MS_SHIFT 10 ++#define PDP2_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define PDP2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define PDP2_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define PDP2_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP2_DRP_GO ++*/ ++#define TCF_PLL_PLL_PDP2_DRP_GO 0x0040 ++#define PLL_PDP2_DRP_GO_MASK 0x00000001U ++#define PLL_PDP2_DRP_GO_SHIFT 0 ++#define PLL_PDP2_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_CORE_CLK0 ++*/ ++#define TCF_PLL_PLL_CORE_CLK0 0x0048 ++#define CORE_PLL_CLK0_PHS_MASK 0x00300000U ++#define CORE_PLL_CLK0_PHS_SHIFT 20 ++#define CORE_PLL_CLK0_PHS_SIGNED 0 ++ ++#define CORE_PLL_CLK0_MS_MASK 0x00030000U ++#define CORE_PLL_CLK0_MS_SHIFT 16 ++#define CORE_PLL_CLK0_MS_SIGNED 0 ++ ++#define CORE_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define CORE_PLL_CLK0_FREQ_SHIFT 0 ++#define CORE_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_CORE_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_CORE_CLK1TO5 0x0050 ++#define CORE_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define CORE_PLL_CLK1TO5_PHS_SHIFT 20 ++#define CORE_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define CORE_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define CORE_PLL_CLK1TO5_MS_SHIFT 10 ++#define CORE_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define CORE_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define CORE_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define CORE_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_CORE_DRP_GO ++*/ ++#define TCF_PLL_PLL_CORE_DRP_GO 0x0058 ++#define PLL_CORE_DRP_GO_MASK 0x00000001U ++#define PLL_CORE_DRP_GO_SHIFT 0 ++#define PLL_CORE_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_SYSIF_CLK0 ++*/ ++#define TCF_PLL_PLL_SYSIF_CLK0 0x0060 ++#define SYSIF_PLL_CLK0_PHS_MASK 0x00300000U ++#define SYSIF_PLL_CLK0_PHS_SHIFT 20 ++#define SYSIF_PLL_CLK0_PHS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK0_MS_MASK 0x00030000U ++#define SYSIF_PLL_CLK0_MS_SHIFT 16 ++#define SYSIF_PLL_CLK0_MS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define SYSIF_PLL_CLK0_FREQ_SHIFT 0 ++#define SYSIF_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_SYSIF_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_SYSIF_CLK1TO5 0x0068 ++#define SYSIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define SYSIF_PLL_CLK1TO5_PHS_SHIFT 20 ++#define SYSIF_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define SYSIF_PLL_CLK1TO5_MS_SHIFT 10 ++#define SYSIF_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_SYS_DRP_GO ++*/ ++#define TCF_PLL_PLL_SYS_DRP_GO 0x0070 ++#define PLL_SYS_DRP_GO_MASK 0x00000001U ++#define PLL_SYS_DRP_GO_SHIFT 0 ++#define PLL_SYS_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_MEMIF_CLK0 ++*/ ++#define TCF_PLL_PLL_MEMIF_CLK0 0x0078 ++#define MEMIF_PLL_CLK0_PHS_MASK 0x00300000U ++#define MEMIF_PLL_CLK0_PHS_SHIFT 20 ++#define MEMIF_PLL_CLK0_PHS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK0_MS_MASK 0x00030000U ++#define MEMIF_PLL_CLK0_MS_SHIFT 16 ++#define MEMIF_PLL_CLK0_MS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define MEMIF_PLL_CLK0_FREQ_SHIFT 0 ++#define MEMIF_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_MEMIF_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_MEMIF_CLK1TO5 0x0080 ++#define MEMIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define MEMIF_PLL_CLK1TO5_PHS_SHIFT 20 ++#define MEMIF_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define MEMIF_PLL_CLK1TO5_MS_SHIFT 10 ++#define MEMIF_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_MEM_DRP_GO ++*/ ++#define TCF_PLL_PLL_MEM_DRP_GO 0x0088 ++#define PLL_MEM_DRP_GO_MASK 0x00000001U ++#define PLL_MEM_DRP_GO_SHIFT 0 ++#define PLL_MEM_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_ALL_DRP_GO ++*/ ++#define TCF_PLL_PLL_ALL_DRP_GO 0x0090 ++#define PLL_ALL_DRP_GO_MASK 0x00000001U ++#define PLL_ALL_DRP_GO_SHIFT 0 ++#define PLL_ALL_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_DRP_STATUS ++*/ ++#define TCF_PLL_PLL_DRP_STATUS 0x0098 ++#define PLL_LOCKS_MASK 0x00003F00U ++#define PLL_LOCKS_SHIFT 8 ++#define PLL_LOCKS_SIGNED 0 ++ ++#define PLL_DRP_GOOD_MASK 0x0000003FU ++#define PLL_DRP_GOOD_SHIFT 0 ++#define PLL_DRP_GOOD_SIGNED 0 ++ ++#endif /* !defined(_TCF_PLL_H_) */ ++ ++/***************************************************************************** ++ End of file (tcf_pll.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h b/drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h +new file mode 100644 +index 000000000000..e87ba6152411 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/apollo/tcf_rgbpdp_regs.h +@@ -0,0 +1,559 @@ ++/*************************************************************************/ /*! ++@Title Test Chip Framework PDP register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Autogenerated C -- do not edit ++ Generated from: tcf_rgbpdp_regs.def ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(_TCF_RGBPDP_REGS_H_) ++#define _TCF_RGBPDP_REGS_H_ ++ ++/* ++ Register PVR_TCF_RGBPDP_STR1SURF ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF 0x0000 ++#define STR1HEIGHT_MASK 0x000007FFU ++#define STR1HEIGHT_SHIFT 0 ++#define STR1HEIGHT_SIGNED 0 ++ ++#define STR1WIDTH_MASK 0x003FF800U ++#define STR1WIDTH_SHIFT 11 ++#define STR1WIDTH_SIGNED 0 ++ ++#define STR1PIXFMT_MASK 0x0F000000U ++#define STR1PIXFMT_SHIFT 24 ++#define STR1PIXFMT_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_STR1ADDRCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004 ++#define STR1BASE_MASK 0x03FFFFFFU ++#define STR1BASE_SHIFT 0 ++#define STR1BASE_SIGNED 0 ++ ++#define STR1INTFIELD_MASK 0x40000000U ++#define STR1INTFIELD_SHIFT 30 ++#define STR1INTFIELD_SIGNED 0 ++ ++#define STR1STREN_MASK 0x80000000U ++#define STR1STREN_SHIFT 31 ++#define STR1STREN_SIGNED 0 ++ ++/* ++ Register PVR_PDP_STR1POSN ++*/ ++#define TCF_RGBPDP_PVR_PDP_STR1POSN 0x0008 ++#define STR1STRIDE_MASK 0x000003FFU ++#define STR1STRIDE_SHIFT 0 ++#define STR1STRIDE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_MEMCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL 0x000C ++#define MEMREFRESH_MASK 0xC0000000U ++#define MEMREFRESH_SHIFT 30 ++#define MEMREFRESH_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_STRCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL 0x0010 ++#define BURSTLEN_GFX_MASK 0x000000FFU ++#define BURSTLEN_GFX_SHIFT 0 ++#define BURSTLEN_GFX_SIGNED 0 ++ ++#define THRESHOLD_GFX_MASK 0x0000FF00U ++#define THRESHOLD_GFX_SHIFT 8 ++#define THRESHOLD_GFX_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_SYNCCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL 0x0014 ++#define HSDIS_MASK 0x00000001U ++#define HSDIS_SHIFT 0 ++#define HSDIS_SIGNED 0 ++ ++#define HSPOL_MASK 0x00000002U ++#define HSPOL_SHIFT 1 ++#define HSPOL_SIGNED 0 ++ ++#define VSDIS_MASK 0x00000004U ++#define VSDIS_SHIFT 2 ++#define VSDIS_SIGNED 0 ++ ++#define VSPOL_MASK 0x00000008U ++#define VSPOL_SHIFT 3 ++#define VSPOL_SIGNED 0 ++ ++#define BLNKDIS_MASK 0x00000010U ++#define BLNKDIS_SHIFT 4 ++#define BLNKDIS_SIGNED 0 ++ ++#define BLNKPOL_MASK 0x00000020U ++#define BLNKPOL_SHIFT 5 ++#define BLNKPOL_SIGNED 0 ++ ++#define HS_SLAVE_MASK 0x00000040U ++#define HS_SLAVE_SHIFT 6 ++#define HS_SLAVE_SIGNED 0 ++ ++#define VS_SLAVE_MASK 0x00000080U ++#define VS_SLAVE_SHIFT 7 ++#define VS_SLAVE_SIGNED 0 ++ ++#define INTERLACE_MASK 0x00000100U ++#define INTERLACE_SHIFT 8 ++#define INTERLACE_SIGNED 0 ++ ++#define FIELDPOL_MASK 0x00000200U ++#define FIELDPOL_SHIFT 9 ++#define FIELDPOL_SIGNED 0 ++ ++#define CLKPOL_MASK 0x00000800U ++#define CLKPOL_SHIFT 11 ++#define CLKPOL_SIGNED 0 ++ ++#define CSYNC_EN_MASK 0x00001000U ++#define CSYNC_EN_SHIFT 12 ++#define CSYNC_EN_SIGNED 0 ++ ++#define FIELD_EN_MASK 0x00002000U ++#define FIELD_EN_SHIFT 13 ++#define FIELD_EN_SIGNED 0 ++ ++#define UPDWAIT_MASK 0x000F0000U ++#define UPDWAIT_SHIFT 16 ++#define UPDWAIT_SIGNED 0 ++ ++#define UPDCTRL_MASK 0x01000000U ++#define UPDCTRL_SHIFT 24 ++#define UPDCTRL_SIGNED 0 ++ ++#define UPDINTCTRL_MASK 0x02000000U ++#define UPDINTCTRL_SHIFT 25 ++#define UPDINTCTRL_SIGNED 0 ++ ++#define UPDSYNCTRL_MASK 0x04000000U ++#define UPDSYNCTRL_SHIFT 26 ++#define UPDSYNCTRL_SIGNED 0 ++ ++#define POWERDN_MASK 0x10000000U ++#define POWERDN_SHIFT 28 ++#define POWERDN_SIGNED 0 ++ ++#define DISP_RST_MASK 0x20000000U ++#define DISP_RST_SHIFT 29 ++#define DISP_RST_SIGNED 0 ++ ++#define SYNCACTIVE_MASK 0x80000000U ++#define SYNCACTIVE_SHIFT 31 ++#define SYNCACTIVE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_BORDCOL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL 0x0018 ++#define BORDCOL_MASK 0x00FFFFFFU ++#define BORDCOL_SHIFT 0 ++#define BORDCOL_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_UPDCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL 0x001C ++#define UPDFIELD_MASK 0x00000001U ++#define UPDFIELD_SHIFT 0 ++#define UPDFIELD_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HSYNC1 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1 0x0020 ++#define HT_MASK 0x00000FFFU ++#define HT_SHIFT 0 ++#define HT_SIGNED 0 ++ ++#define HBPS_MASK 0x0FFF0000U ++#define HBPS_SHIFT 16 ++#define HBPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HSYNC2 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2 0x0024 ++#define HLBS_MASK 0x00000FFFU ++#define HLBS_SHIFT 0 ++#define HLBS_SIGNED 0 ++ ++#define HAS_MASK 0x0FFF0000U ++#define HAS_SHIFT 16 ++#define HAS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HSYNC3 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3 0x0028 ++#define HRBS_MASK 0x00000FFFU ++#define HRBS_SHIFT 0 ++#define HRBS_SIGNED 0 ++ ++#define HFPS_MASK 0x0FFF0000U ++#define HFPS_SHIFT 16 ++#define HFPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VSYNC1 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1 0x002C ++#define VT_MASK 0x00000FFFU ++#define VT_SHIFT 0 ++#define VT_SIGNED 0 ++ ++#define VBPS_MASK 0x0FFF0000U ++#define VBPS_SHIFT 16 ++#define VBPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VSYNC2 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2 0x0030 ++#define VTBS_MASK 0x00000FFFU ++#define VTBS_SHIFT 0 ++#define VTBS_SIGNED 0 ++ ++#define VAS_MASK 0x0FFF0000U ++#define VAS_SHIFT 16 ++#define VAS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VSYNC3 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3 0x0034 ++#define VBBS_MASK 0x00000FFFU ++#define VBBS_SHIFT 0 ++#define VBBS_SIGNED 0 ++ ++#define VFPS_MASK 0x0FFF0000U ++#define VFPS_SHIFT 16 ++#define VFPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HDECTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL 0x0038 ++#define HDEF_MASK 0x00000FFFU ++#define HDEF_SHIFT 0 ++#define HDEF_SIGNED 0 ++ ++#define HDES_MASK 0x0FFF0000U ++#define HDES_SHIFT 16 ++#define HDES_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VDECTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL 0x003C ++#define VDEF_MASK 0x00000FFFU ++#define VDEF_SHIFT 0 ++#define VDEF_SIGNED 0 ++ ++#define VDES_MASK 0x0FFF0000U ++#define VDES_SHIFT 16 ++#define VDES_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VEVENT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT 0x0040 ++#define VFETCH_MASK 0x00000FFFU ++#define VFETCH_SHIFT 0 ++#define VFETCH_SIGNED 0 ++ ++#define VEVENT_MASK 0x0FFF0000U ++#define VEVENT_SHIFT 16 ++#define VEVENT_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_OPMASK ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK 0x0044 ++#define MASKR_MASK 0x000000FFU ++#define MASKR_SHIFT 0 ++#define MASKR_SIGNED 0 ++ ++#define MASKG_MASK 0x0000FF00U ++#define MASKG_SHIFT 8 ++#define MASKG_SIGNED 0 ++ ++#define MASKB_MASK 0x00FF0000U ++#define MASKB_SHIFT 16 ++#define MASKB_SIGNED 0 ++ ++#define BLANKLEVEL_MASK 0x40000000U ++#define BLANKLEVEL_SHIFT 30 ++#define BLANKLEVEL_SIGNED 0 ++ ++#define MASKLEVEL_MASK 0x80000000U ++#define MASKLEVEL_SHIFT 31 ++#define MASKLEVEL_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTSTAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT 0x0048 ++#define INTS_HBLNK0_MASK 0x00000001U ++#define INTS_HBLNK0_SHIFT 0 ++#define INTS_HBLNK0_SIGNED 0 ++ ++#define INTS_HBLNK1_MASK 0x00000002U ++#define INTS_HBLNK1_SHIFT 1 ++#define INTS_HBLNK1_SIGNED 0 ++ ++#define INTS_VBLNK0_MASK 0x00000004U ++#define INTS_VBLNK0_SHIFT 2 ++#define INTS_VBLNK0_SIGNED 0 ++ ++#define INTS_VBLNK1_MASK 0x00000008U ++#define INTS_VBLNK1_SHIFT 3 ++#define INTS_VBLNK1_SIGNED 0 ++ ++#define INTS_STR1URUN_MASK 0x00000010U ++#define INTS_STR1URUN_SHIFT 4 ++#define INTS_STR1URUN_SIGNED 0 ++ ++#define INTS_STR1ORUN_MASK 0x00000020U ++#define INTS_STR1ORUN_SHIFT 5 ++#define INTS_STR1ORUN_SIGNED 0 ++ ++#define INTS_DISPURUN_MASK 0x00000040U ++#define INTS_DISPURUN_SHIFT 6 ++#define INTS_DISPURUN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTENAB ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB 0x004C ++#define INTEN_HBLNK0_MASK 0x00000001U ++#define INTEN_HBLNK0_SHIFT 0 ++#define INTEN_HBLNK0_SIGNED 0 ++ ++#define INTEN_HBLNK1_MASK 0x00000002U ++#define INTEN_HBLNK1_SHIFT 1 ++#define INTEN_HBLNK1_SIGNED 0 ++ ++#define INTEN_VBLNK0_MASK 0x00000004U ++#define INTEN_VBLNK0_SHIFT 2 ++#define INTEN_VBLNK0_SIGNED 0 ++ ++#define INTEN_VBLNK1_MASK 0x00000008U ++#define INTEN_VBLNK1_SHIFT 3 ++#define INTEN_VBLNK1_SIGNED 0 ++ ++#define INTEN_STR1URUN_MASK 0x00000010U ++#define INTEN_STR1URUN_SHIFT 4 ++#define INTEN_STR1URUN_SIGNED 0 ++ ++#define INTEN_STR1ORUN_MASK 0x00000020U ++#define INTEN_STR1ORUN_SHIFT 5 ++#define INTEN_STR1ORUN_SIGNED 0 ++ ++#define INTEN_DISPURUN_MASK 0x00000040U ++#define INTEN_DISPURUN_SHIFT 6 ++#define INTEN_DISPURUN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTCLEAR ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR 0x0050 ++#define INTCLR_HBLNK0_MASK 0x00000001U ++#define INTCLR_HBLNK0_SHIFT 0 ++#define INTCLR_HBLNK0_SIGNED 0 ++ ++#define INTCLR_HBLNK1_MASK 0x00000002U ++#define INTCLR_HBLNK1_SHIFT 1 ++#define INTCLR_HBLNK1_SIGNED 0 ++ ++#define INTCLR_VBLNK0_MASK 0x00000004U ++#define INTCLR_VBLNK0_SHIFT 2 ++#define INTCLR_VBLNK0_SIGNED 0 ++ ++#define INTCLR_VBLNK1_MASK 0x00000008U ++#define INTCLR_VBLNK1_SHIFT 3 ++#define INTCLR_VBLNK1_SIGNED 0 ++ ++#define INTCLR_STR1URUN_MASK 0x00000010U ++#define INTCLR_STR1URUN_SHIFT 4 ++#define INTCLR_STR1URUN_SIGNED 0 ++ ++#define INTCLR_STR1ORUN_MASK 0x00000020U ++#define INTCLR_STR1ORUN_SHIFT 5 ++#define INTCLR_STR1ORUN_SIGNED 0 ++ ++#define INTCLR_DISPURUN_MASK 0x00000040U ++#define INTCLR_DISPURUN_SHIFT 6 ++#define INTCLR_DISPURUN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL 0x0054 ++#define HBLNK_LINENO_MASK 0x00000FFFU ++#define HBLNK_LINENO_SHIFT 0 ++#define HBLNK_LINENO_SIGNED 0 ++ ++#define HBLNK_LINE_MASK 0x00010000U ++#define HBLNK_LINE_SHIFT 16 ++#define HBLNK_LINE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_SIGNAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT 0x0058 ++#define SIGNATURE_MASK 0xFFFFFFFFU ++#define SIGNATURE_SHIFT 0 ++#define SIGNATURE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_LINESTAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT 0x005C ++#define LINENO_MASK 0x00000FFFU ++#define LINENO_SHIFT 0 ++#define LINENO_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_DBGCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL 0x0060 ++#define DBG_ENAB_MASK 0x00000001U ++#define DBG_ENAB_SHIFT 0 ++#define DBG_ENAB_SIGNED 0 ++ ++#define DBG_READ_MASK 0x00000002U ++#define DBG_READ_SHIFT 1 ++#define DBG_READ_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_DBGDATA ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA 0x0064 ++#define DBG_DATA_MASK 0x00FFFFFFU ++#define DBG_DATA_SHIFT 0 ++#define DBG_DATA_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_DBGSIDE ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE 0x0068 ++#define DBG_SIDE_MASK 0x00000007U ++#define DBG_SIDE_SHIFT 0 ++#define DBG_SIDE_SIGNED 0 ++ ++#define DBG_VAL_MASK 0x00000008U ++#define DBG_VAL_SHIFT 3 ++#define DBG_VAL_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_REGLD_STAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070 ++#define REGLD_ADDROUT_MASK 0x00FFFFFFU ++#define REGLD_ADDROUT_SHIFT 0 ++#define REGLD_ADDROUT_SIGNED 0 ++ ++#define REGLD_ADDREN_MASK 0x80000000U ++#define REGLD_ADDREN_SHIFT 31 ++#define REGLD_ADDREN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_REGLD_CTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074 ++#define REGLD_ADDRIN_MASK 0x00FFFFFFU ++#define REGLD_ADDRIN_SHIFT 0 ++#define REGLD_ADDRIN_SIGNED 0 ++ ++#define REGLD_VAL_MASK 0x01000000U ++#define REGLD_VAL_SHIFT 24 ++#define REGLD_VAL_SIGNED 0 ++ ++#define REGLD_ADDRLEN_MASK 0xFE000000U ++#define REGLD_ADDRLEN_SHIFT 25 ++#define REGLD_ADDRLEN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_CORE_ID ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID 0x0078 ++#define CONFIG_ID_MASK 0x0000FFFFU ++#define CONFIG_ID_SHIFT 0 ++#define CONFIG_ID_SIGNED 0 ++ ++#define CORE_ID_MASK 0x00FF0000U ++#define CORE_ID_SHIFT 16 ++#define CORE_ID_SIGNED 0 ++ ++#define GROUP_ID_MASK 0xFF000000U ++#define GROUP_ID_SHIFT 24 ++#define GROUP_ID_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_CORE_REV ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV 0x007C ++#define MAINT_REV_MASK 0x000000FFU ++#define MAINT_REV_SHIFT 0 ++#define MAINT_REV_SIGNED 0 ++ ++#define MINOR_REV_MASK 0x0000FF00U ++#define MINOR_REV_SHIFT 8 ++#define MINOR_REV_SIGNED 0 ++ ++#define MAJOR_REV_MASK 0x00FF0000U ++#define MAJOR_REV_SHIFT 16 ++#define MAJOR_REV_SIGNED 0 ++ ++#endif /* !defined(_TCF_RGBPDP_REGS_H_) */ ++ ++/***************************************************************************** ++ End of file (tcf_rgbpdp_regs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/cache_km.c b/drivers/gpu/drm/img-rogue/cache_km.c +new file mode 100644 +index 000000000000..71ed28342d3d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/cache_km.c +@@ -0,0 +1,1630 @@ ++/*************************************************************************/ /*! ++@File cache_km.c ++@Title CPU d-cache maintenance operations framework ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements server side code for CPU d-cache maintenance taking ++ into account the idiosyncrasies of the various types of CPU ++ d-cache instruction-set architecture (ISA) maintenance ++ mechanisms. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#if defined(__linux__) ++#include ++#include ++#include ++#include ++#include ++#include ++#endif ++ ++#include "pmr.h" ++#include "log2.h" ++#include "device.h" ++#include "pvrsrv.h" ++#include "osfunc.h" ++#include "cache_km.h" ++#include "pvr_debug.h" ++#include "lock_types.h" ++#include "allocmem.h" ++#include "process_stats.h" ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++#include "ri_server.h" ++#endif ++#include "devicemem.h" ++#include "pvrsrv_apphint.h" ++#include "pvrsrv_sync_server.h" ++#include "km_apphint_defs.h" ++#include "km_apphint_defs_common.h" ++#include "oskm_apphint.h" ++#include "di_server.h" ++ ++/* This header must always be included last */ ++#if defined(__linux__) ++#include "kernel_compatibility.h" ++#endif ++ ++/* Top-level file-local build definitions */ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(__linux__) ++#define CACHEOP_DEBUG ++#define CACHEOP_STATS_ITEMS_MAX 32 ++#define INCR_WRAP(x) ((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1)) ++#define DECR_WRAP(x) ((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1)) ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++/* Refer to CacheOpStatsExecLogHeader() for header item names */ ++#define CACHEOP_RI_PRINTF_HEADER "%-8s %-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s" ++#define CACHEOP_RI_PRINTF "%-8d %-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu\n" ++#else ++#define CACHEOP_PRINTF_HEADER "%-8s %-8s %-10s %-10s %-5s %-10s %-10s %-18s" ++#define CACHEOP_PRINTF "%-8d %-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu\n" ++#endif ++#endif ++ ++//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING /* Force OS page (not cache line) flush granularity */ ++#define CACHEOP_PVR_ASSERT(x) /* Define as PVR_ASSERT(x), enable for swdev & testing */ ++#define CACHEOP_DEVMEM_OOR_ERROR_STRING "cacheop device memory request is out of range" ++#define CACHEOP_MAX_DEBUG_MESSAGE_LEN 160 ++ ++typedef struct _CACHEOP_WORK_ITEM_ ++{ ++ PMR *psPMR; ++ IMG_DEVMEM_SIZE_T uiSize; ++ PVRSRV_CACHE_OP uiCacheOp; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ PVRSRV_TIMELINE iTimeline; ++ SYNC_TIMELINE_OBJ sSWTimelineObj; ++ PVRSRV_DEVICE_NODE *psDevNode; ++#if defined(CACHEOP_DEBUG) ++ IMG_UINT64 ui64StartTime; ++ IMG_UINT64 ui64EndTime; ++ IMG_BOOL bKMReq; ++ IMG_PID pid; ++#endif ++} CACHEOP_WORK_ITEM; ++ ++typedef struct _CACHEOP_STATS_EXEC_ITEM_ ++{ ++ IMG_UINT32 ui32DeviceID; ++ IMG_PID pid; ++ PVRSRV_CACHE_OP uiCacheOp; ++ IMG_DEVMEM_SIZE_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_UINT64 ui64StartTime; ++ IMG_UINT64 ui64EndTime; ++ IMG_BOOL bKMReq; ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_DEV_PHYADDR sDevPAddr; ++#endif ++} CACHEOP_STATS_EXEC_ITEM; ++ ++typedef enum _CACHEOP_CONFIG_ ++{ ++ CACHEOP_CONFIG_DEFAULT = 0, ++ /* cache flush mechanism types */ ++ CACHEOP_CONFIG_URBF = 4, ++ /* sw-emulated deferred flush mechanism */ ++ CACHEOP_CONFIG_KDF = 8, ++ /* pseudo configuration items */ ++ CACHEOP_CONFIG_LAST = 16, ++ CACHEOP_CONFIG_KLOG = 16, ++ CACHEOP_CONFIG_ALL = 31 ++} CACHEOP_CONFIG; ++ ++typedef struct _CACHEOP_WORK_QUEUE_ ++{ ++/* ++ * Init. state & primary device node framework ++ * is anchored on. ++ */ ++ IMG_BOOL bInit; ++/* ++ MMU page size/shift & d-cache line size ++ */ ++ size_t uiPageSize; ++ IMG_UINT32 uiLineSize; ++ IMG_UINT32 uiLineShift; ++ IMG_UINT32 uiPageShift; ++ OS_CACHE_OP_ADDR_TYPE uiCacheOpAddrType; ++ PMR *psInfoPagePMR; ++ IMG_UINT32 *pui32InfoPage; ++ ++#if defined(CACHEOP_DEBUG) ++/* ++ CacheOp statistics ++ */ ++ DI_ENTRY *psDIEntry; ++ IMG_HANDLE hStatsExecLock; ++ ++ IMG_UINT32 ui32ServerOps; ++ IMG_UINT32 ui32ClientOps; ++ IMG_UINT32 ui32TotalOps; ++ IMG_UINT32 ui32ServerOpUsedUMVA; ++ IMG_UINT32 ui32AvgExecTime; ++ IMG_UINT32 ui32AvgExecTimeRemainder; ++ ++ IMG_INT32 i32StatsExecWriteIdx; ++ CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX]; ++#endif ++ ++ DI_ENTRY *psConfigTune; ++ IMG_HANDLE hConfigLock; ++ CACHEOP_CONFIG eConfig; ++ IMG_UINT32 ui32Config; ++ IMG_BOOL bSupportsUMFlush; ++} CACHEOP_WORK_QUEUE; ++ ++/* Top-level CacheOp framework object */ ++static CACHEOP_WORK_QUEUE gsCwq; ++ ++#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE) ++ ++#if defined(CACHEOP_DEBUG) ++static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN]) ++{ ++ OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ CACHEOP_RI_PRINTF_HEADER, ++#else ++ CACHEOP_PRINTF_HEADER, ++#endif ++ "DevID", ++ "Pid", ++ "CacheOp", ++ "Type", ++ "Origin", ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ "DevVAddr", ++ "DevPAddr", ++#endif ++ "Offset", ++ "Size", ++ "xTime (us)"); ++} ++ ++static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem) ++{ ++ IMG_INT32 i32WriteOffset; ++ IMG_UINT32 ui32ExecTime; ++ printk("log write\n"); ++ if (!psCacheOpWorkItem->uiCacheOp) ++ { ++ return; ++ } ++ else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) ++ { ++ /* KM logs spams the history due to frequency, this removes it completely */ ++ return; ++ } ++ ++ OSLockAcquire(gsCwq.hStatsExecLock); ++ ++ i32WriteOffset = gsCwq.i32StatsExecWriteIdx; ++ gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx); ++ gsCwq.asStatsExecuted[i32WriteOffset].ui32DeviceID = psCacheOpWorkItem->psDevNode ? psCacheOpWorkItem->psDevNode->sDevId.ui32InternalID : -1; ++ gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid; ++ gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize; ++ gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq; ++ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset = psCacheOpWorkItem->uiOffset; ++ gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp; ++ gsCwq.asStatsExecuted[i32WriteOffset].ui64StartTime = psCacheOpWorkItem->ui64StartTime; ++ gsCwq.asStatsExecuted[i32WriteOffset].ui64EndTime = psCacheOpWorkItem->ui64EndTime; ++ ++ CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid); ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ if (gsCwq.bInit && psCacheOpWorkItem->psPMR) ++ { ++ IMG_CPU_PHYADDR sDevPAddr; ++ PVRSRV_ERROR eError, eLockError; ++ IMG_BOOL bValid; ++ ++ /* Get more detailed information regarding the sub allocations that ++ PMR has from RI manager for process that requested the CacheOp */ ++ eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR, ++ gsCwq.asStatsExecuted[i32WriteOffset].pid, ++ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, ++ &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ /* (Re)lock here as some PMR might have not been locked */ ++ eLockError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR); ++ PVR_GOTO_IF_ERROR(eLockError, e0); ++ ++ eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR, ++ gsCwq.uiPageShift, ++ 1, ++ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, ++ &sDevPAddr, ++ &bValid); ++ ++ eLockError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR); ++ PVR_LOG_IF_ERROR(eLockError, "PMRUnlockSysPhysAddresses"); ++ ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ ++ ++ gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr; ++ } ++#endif ++ ++ /* Calculate the approximate cumulative moving average execution time. ++ * This calculation is based on standard equation: ++ * ++ * CMAnext = (new + count * CMAprev) / (count + 1) ++ * ++ * but in simplified form: ++ * ++ * CMAnext = CMAprev + (new - CMAprev) / (count + 1) ++ * ++ * this gets rid of multiplication and prevents overflow. ++ * ++ * Also to increase accuracy that we lose with integer division, ++ * we hold the moving remainder of the division and add it. ++ * ++ * CMAnext = CMAprev + (new - CMAprev + CMRprev) / (count + 1) ++ * ++ * Multiple tests proved it to be the best solution for approximating ++ * CMA using integers. ++ * ++ */ ++ ++ ui32ExecTime = ++ gsCwq.asStatsExecuted[i32WriteOffset].ui64EndTime - ++ gsCwq.asStatsExecuted[i32WriteOffset].ui64StartTime; ++ ++ { ++ ++ IMG_INT32 i32Div = ++ (IMG_INT32) ui32ExecTime - ++ (IMG_INT32) gsCwq.ui32AvgExecTime + ++ (IMG_INT32) gsCwq.ui32AvgExecTimeRemainder; ++ ++ gsCwq.ui32AvgExecTime += i32Div / (IMG_INT32)(gsCwq.ui32TotalOps + 1); ++ gsCwq.ui32AvgExecTimeRemainder = i32Div % (IMG_INT32)(gsCwq.ui32TotalOps + 1); ++ ++ gsCwq.ui32TotalOps++; ++ ++ } ++ ++ if (!gsCwq.asStatsExecuted[i32WriteOffset].bKMReq) ++ { ++ /* This operation queues only UM CacheOp in per-PID process statistics database */ ++ PVRSRVStatsUpdateCacheOpStats( ++ gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr, ++ gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr, ++#endif ++ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset, ++ gsCwq.asStatsExecuted[i32WriteOffset].uiSize, ++ ui32ExecTime, ++ !gsCwq.asStatsExecuted[i32WriteOffset].bKMReq, ++ psCacheOpWorkItem->pid); ++ } ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++e0: ++#endif ++ OSLockRelease(gsCwq.hStatsExecLock); ++} ++ ++static int CacheOpStatsExecLogRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ IMG_CHAR *pszFlushType; ++ IMG_CHAR *pszCacheOpType; ++ IMG_CHAR *pszFlushSource; ++ IMG_INT32 i32ReadOffset; ++ IMG_INT32 i32WriteOffset; ++ ++ IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0}; ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ OSLockAcquire(gsCwq.hStatsExecLock); ++ ++ DIPrintf(psEntry, ++ "Primary CPU d-cache architecture: LSZ: 0x%x, URBF: %s\n", ++ gsCwq.uiLineSize, ++ gsCwq.bSupportsUMFlush ? "Yes" : "No"); ++ ++ DIPrintf(psEntry, ++ "Configuration: UKT: %d, URBF: %s\n", ++ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD], ++ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No"); ++ ++ DIPrintf(psEntry, ++ "Summary: Total Ops [%d] - Server(using UMVA)/Client [%d(%d)/%d]. Avg execution time [%d]\n", ++ gsCwq.ui32TotalOps, gsCwq.ui32ServerOps, gsCwq.ui32ServerOpUsedUMVA, gsCwq.ui32ClientOps, gsCwq.ui32AvgExecTime); ++ ++ ++ CacheOpStatsExecLogHeader(szBuffer); ++ DIPrintf(psEntry, "%s\n", szBuffer); ++ ++ i32WriteOffset = gsCwq.i32StatsExecWriteIdx; ++ for (i32ReadOffset = DECR_WRAP(i32WriteOffset); ++ i32ReadOffset != i32WriteOffset; ++ i32ReadOffset = DECR_WRAP(i32ReadOffset)) ++ { ++ IMG_UINT64 ui64ExecTime = ++ gsCwq.asStatsExecuted[i32ReadOffset].ui64EndTime - ++ gsCwq.asStatsExecuted[i32ReadOffset].ui64StartTime; ++ ++ IMG_DEVMEM_SIZE_T ui64NumOfPages = ++ gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift; ++ ++ ++ if (!gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) ++ { ++ break; ++ } ++ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ pszFlushType = "RBF.Fast"; ++ } ++ else ++ { ++ pszFlushType = "RBF.Slow"; ++ } ++ ++ pszFlushSource = gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM" : " UM"; ++ ++ switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp) ++ { ++ case PVRSRV_CACHE_OP_NONE: ++ pszCacheOpType = "None"; ++ break; ++ case PVRSRV_CACHE_OP_CLEAN: ++ pszCacheOpType = "Clean"; ++ break; ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ pszCacheOpType = "Invalidate"; ++ break; ++ case PVRSRV_CACHE_OP_FLUSH: ++ pszCacheOpType = "Flush"; ++ break; ++ case PVRSRV_CACHE_OP_TIMELINE: ++ pszCacheOpType = "Timeline"; ++ pszFlushType = " "; ++ break; ++ default: ++ pszCacheOpType = "Unknown"; ++ break; ++ } ++ ++ DIPrintf(psEntry, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ CACHEOP_RI_PRINTF, ++#else ++ CACHEOP_PRINTF, ++#endif ++ gsCwq.asStatsExecuted[i32ReadOffset].ui32DeviceID, ++ gsCwq.asStatsExecuted[i32ReadOffset].pid, ++ pszCacheOpType, ++ pszFlushType, ++ pszFlushSource, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr, ++ gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr, ++#endif ++ gsCwq.asStatsExecuted[i32ReadOffset].uiOffset, ++ gsCwq.asStatsExecuted[i32ReadOffset].uiSize, ++ ui64ExecTime); ++ ++ } ++ ++ OSLockRelease(gsCwq.hStatsExecLock); ++ ++ return 0; ++} ++#endif /* defined(CACHEOP_DEBUG) */ ++ ++static INLINE void CacheOpStatsReset(void) ++{ ++#if defined(CACHEOP_DEBUG) ++ gsCwq.ui32ServerOps = 0; ++ gsCwq.ui32ClientOps = 0; ++ gsCwq.ui32TotalOps = 0; ++ gsCwq.ui32ServerOpUsedUMVA = 0; ++ gsCwq.ui32AvgExecTime = 0; ++ gsCwq.ui32AvgExecTimeRemainder = 0; ++ ++ gsCwq.i32StatsExecWriteIdx = 0; ++ ++ OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); ++#endif ++} ++ ++static void CacheOpConfigUpdate(IMG_UINT32 ui32Config) ++{ ++ OSLockAcquire(gsCwq.hConfigLock); ++ ++ /* Step 0, set the gsCwq.eConfig bits */ ++ if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1))) ++ { ++ gsCwq.eConfig = CACHEOP_CONFIG_KDF; ++ if (gsCwq.bSupportsUMFlush) ++ { ++ gsCwq.eConfig |= CACHEOP_CONFIG_URBF; ++ } ++ } ++ else ++ { ++ if (ui32Config & CACHEOP_CONFIG_KDF) ++ { ++ gsCwq.eConfig |= CACHEOP_CONFIG_KDF; ++ } ++ else ++ { ++ gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF; ++ } ++ ++ if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF)) ++ { ++ gsCwq.eConfig |= CACHEOP_CONFIG_URBF; ++ } ++ else ++ { ++ gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF; ++ } ++ } ++ ++ if (ui32Config & CACHEOP_CONFIG_KLOG) ++ { ++ /* Suppress logs from KM caller */ ++ gsCwq.eConfig |= CACHEOP_CONFIG_KLOG; ++ } ++ else ++ { ++ gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG; ++ } ++ ++ /* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */ ++ ui32Config = 0; ++ ++ if (gsCwq.eConfig & CACHEOP_CONFIG_KDF) ++ { ++ ui32Config |= CACHEOP_CONFIG_KDF; ++ } ++ if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) ++ { ++ ui32Config |= CACHEOP_CONFIG_URBF; ++ } ++ if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG) ++ { ++ ui32Config |= CACHEOP_CONFIG_KLOG; ++ } ++ gsCwq.ui32Config = ui32Config; ++ ++ ++ /* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point ++ the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM) ++ is clawed-back because of the overhead of maintaining such large request which might stalls the ++ user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */ ++ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2); ++ ++ /* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests ++ to come down into the KM for maintenance */ ++ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0; ++ ++ if (gsCwq.bSupportsUMFlush) ++ { ++ /* With URBF enabled we never go to the kernel */ ++ if (gsCwq.eConfig & CACHEOP_CONFIG_URBF) ++ { ++ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0; ++ } ++ } ++ ++ /* Step 5, reset stats. */ ++ CacheOpStatsReset(); ++ ++ OSLockRelease(gsCwq.hConfigLock); ++} ++ ++static int CacheOpConfigRead(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ DIPrintf(psEntry, "URBF: %s\n", ++ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No"); ++ ++ return 0; ++} ++ ++static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++ switch (ui32ID) ++ { ++ case APPHINT_ID_CacheOpConfig: ++ *pui32Value = gsCwq.ui32Config; ++ break; ++ ++ case APPHINT_ID_CacheOpUMKMThresholdSize: ++ *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]; ++ break; ++ ++ default: ++ break; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate; ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++ switch (ui32ID) ++ { ++ case APPHINT_ID_CacheOpConfig: ++ CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL); ++ break; ++ ++ ++ case APPHINT_ID_CacheOpUMKMThresholdSize: ++ { ++ if (!ui32Value || !gsCwq.bSupportsUMFlush) ++ { ++ /* CPU ISA does not support UM flush, therefore every request goes down into ++ the KM, silently ignore request to adjust threshold */ ++ PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]); ++ break; ++ } ++ else if (ui32Value < gsCwq.uiPageSize) ++ { ++ /* Silently round-up to OS page size */ ++ ui32Value = gsCwq.uiPageSize; ++ } ++ ++ /* Align to OS page size */ ++ ui32Value &= ~(gsCwq.uiPageSize - 1); ++ ++ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value; ++ ++ break; ++ } ++ ++ default: ++ break; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static INLINE PVRSRV_ERROR CacheOpTimelineBind(PVRSRV_DEVICE_NODE *psDevNode, ++ CACHEOP_WORK_ITEM *psCacheOpWorkItem, ++ PVRSRV_TIMELINE iTimeline) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Always default the incoming CacheOp work-item to safe values */ ++ SyncClearTimelineObj(&psCacheOpWorkItem->sSWTimelineObj); ++ psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE; ++ psCacheOpWorkItem->psDevNode = psDevNode; ++ if (iTimeline == PVRSRV_NO_TIMELINE) ++ { ++ return PVRSRV_OK; ++ } ++ ++ psCacheOpWorkItem->iTimeline = iTimeline; ++ eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj); ++ PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj"); ++ ++ return eError; ++} ++ ++static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE) ++ { ++ return PVRSRV_OK; ++ } ++ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj.pvTlObj); ++ ++ eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->psDevNode, ++ &psCacheOpWorkItem->sSWTimelineObj); ++ (void) SyncSWTimelineReleaseKM(&psCacheOpWorkItem->sSWTimelineObj); ++ ++ return eError; ++} ++ ++static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_CACHE_OP uiCacheOp, ++ IMG_BYTE *pbCpuVirtAddr, ++ IMG_CPU_PHYADDR sCpuPhyAddr, ++ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset, ++ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset, ++ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset) ++{ ++ IMG_BYTE *pbCpuVirtAddrEnd; ++ IMG_BYTE *pbCpuVirtAddrStart; ++ IMG_CPU_PHYADDR sCpuPhyAddrEnd; ++ IMG_CPU_PHYADDR sCpuPhyAddrStart; ++ IMG_DEVMEM_SIZE_T uiRelFlushSize; ++ IMG_DEVMEM_OFFSET_T uiRelFlushOffset; ++ IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset; ++ ++ /* These quantities allows us to perform cache operations ++ at cache-line granularity thereby ensuring we do not ++ perform more than is necessary */ ++ CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset); ++ uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; ++ uiRelFlushOffset = 0; ++ ++ if (uiCLAlignedStartOffset > uiPgAlignedOffset) ++ { ++ /* Zero unless initially starting at an in-page offset */ ++ uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset; ++ uiRelFlushSize -= uiRelFlushOffset; ++ } ++ ++ /* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp ++ size is smaller. The 1st case handles in-page CacheOp range and ++ the 2nd case handles multiple-page CacheOp range with a last ++ CacheOp size that is less than gsCwq.uiPageSize */ ++ uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize; ++ if (uiNextPgAlignedOffset < uiPgAlignedOffset) ++ { ++ /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset ++ by implication of this wrap-round; this only happens when ++ uiPgAlignedOffset is the last page aligned offset */ ++ uiRelFlushSize = uiRelFlushOffset ? ++ uiCLAlignedEndOffset - uiCLAlignedStartOffset : ++ uiCLAlignedEndOffset - uiPgAlignedOffset; ++ } ++ else ++ { ++ if (uiNextPgAlignedOffset > uiCLAlignedEndOffset) ++ { ++ uiRelFlushSize = uiRelFlushOffset ? ++ uiCLAlignedEndOffset - uiCLAlignedStartOffset : ++ uiCLAlignedEndOffset - uiPgAlignedOffset; ++ } ++ } ++ ++ /* More efficient to request cache maintenance operation for full ++ relative range as opposed to multiple cache-aligned ranges */ ++ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset; ++ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize; ++ if (pbCpuVirtAddr) ++ { ++ pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset; ++ pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize; ++ } ++ else ++ { ++ /* Some OS/Env layer support functions expect NULL(s) */ ++ pbCpuVirtAddrStart = NULL; ++ pbCpuVirtAddrEnd = NULL; ++ } ++ ++ /* Perform requested CacheOp on the CPU data cache for successive cache ++ line worth of bytes up to page or in-page cache-line boundary */ ++ switch (uiCacheOp) ++ { ++ case PVRSRV_CACHE_OP_CLEAN: ++ OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, ++ sCpuPhyAddrStart, sCpuPhyAddrEnd); ++ break; ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, ++ sCpuPhyAddrStart, sCpuPhyAddrEnd); ++ break; ++ case PVRSRV_CACHE_OP_FLUSH: ++ OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd, ++ sCpuPhyAddrStart, sCpuPhyAddrEnd); ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", ++ __func__, uiCacheOp)); ++ break; ++ } ++ ++} ++ ++static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_CPU_VIRTADDR pvAddress, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_CACHE_OP uiCacheOp) ++{ ++ IMG_CPU_PHYADDR sCpuPhyAddrUnused = ++ { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; ++ IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize); ++ IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1)); ++ ++ /* ++ If the start/end address isn't aligned to cache line size, round it up to the ++ nearest multiple; this ensures that we flush all the cache lines affected by ++ unaligned start/end addresses. ++ */ ++ pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize); ++ switch (uiCacheOp) ++ { ++ case PVRSRV_CACHE_OP_CLEAN: ++ OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); ++ break; ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); ++ break; ++ case PVRSRV_CACHE_OP_FLUSH: ++ OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused); ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", ++ __func__, uiCacheOp)); ++ break; ++ } ++ ++} ++ ++static INLINE PVRSRV_ERROR CacheOpValidateUMVA(PMR *psPMR, ++ IMG_CPU_VIRTADDR pvAddress, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_CACHE_OP uiCacheOp, ++ void **ppvOutAddress) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++#if defined(__linux__) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++#endif ++ void __user *pvAddr; ++ ++ IMG_BOOL bReadOnlyInvalidate = ++ (uiCacheOp == PVRSRV_CACHE_OP_INVALIDATE) && ++ !PVRSRV_CHECK_CPU_WRITEABLE(PMR_Flags(psPMR)); ++ ++ if (!pvAddress || bReadOnlyInvalidate) ++ { ++ /* As pvAddress is optional, NULL is expected from UM/KM requests */ ++ /* Also don't allow invalidates for UMVA of read-only memory */ ++ pvAddr = NULL; ++ goto e0; ++ } ++ ++ ++ ++#if !defined(__linux__) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA) ++ pvAddr = NULL; ++#else ++ /* Validate VA, assume most basic address limit access_ok() check */ ++ pvAddr = (void __user *)(uintptr_t)((uintptr_t)pvAddress + uiOffset); ++ if (!access_ok(pvAddr, uiSize)) ++ { ++ pvAddr = NULL; ++ if (! mm) ++ { ++ /* Bad KM request, don't silently ignore */ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); ++ } ++ } ++ else if (mm) ++ { ++ mmap_read_lock(mm); ++ vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddr); ++ ++ if (!vma || ++ vma->vm_start > (unsigned long)(uintptr_t)pvAddr || ++ vma->vm_end < (unsigned long)(uintptr_t)pvAddr + uiSize || ++ vma->vm_private_data != psPMR) ++ { ++ /* ++ * Request range is not fully mapped or is not matching the PMR ++ * Ignore request's VA. ++ */ ++ pvAddr = NULL; ++ } ++ mmap_read_unlock(mm); ++ } ++#endif ++ ++e0: ++ *ppvOutAddress = (IMG_CPU_VIRTADDR __force) pvAddr; ++ return eError; ++} ++ ++static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR, ++ IMG_CPU_VIRTADDR pvAddress, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_CACHE_OP uiCacheOp, ++ IMG_BOOL bIsRequestValidated) ++ ++{ ++ IMG_HANDLE hPrivOut = NULL; ++ IMG_BOOL bPMRIsSparse; ++ IMG_UINT32 ui32PageIndex; ++ IMG_UINT32 ui32NumOfPages; ++ size_t uiOutSize; /* Effectively unused */ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ IMG_DEVMEM_SIZE_T uiPgAlignedSize; ++ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset; ++ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset; ++ IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset; ++ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset; ++ IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset; ++ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr; ++ IMG_BOOL bIsPMRInfoValid = IMG_FALSE; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_BYTE *pbCpuVirtAddr = NULL; ++ IMG_BOOL *pbValid = abValid; ++ ++ if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE) ++ { ++ return PVRSRV_OK; ++ } ++ ++ if (! bIsRequestValidated) ++ { ++ IMG_DEVMEM_SIZE_T uiLPhysicalSize; ++ ++ /* Need to validate parameters before proceeding */ ++ eError = PMR_PhysicalSize(psPMR, &uiLPhysicalSize); ++ PVR_LOG_RETURN_IF_ERROR(eError, "uiLPhysicalSize"); ++ ++ PVR_LOG_RETURN_IF_FALSE(((uiOffset+uiSize) <= uiLPhysicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE); ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PMRLockSysPhysAddresses"); ++ } ++ ++ /* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */ ++ eError = CacheOpValidateUMVA(psPMR, pvAddress, uiOffset, uiSize, uiCacheOp, (void**)&pbCpuVirtAddr); ++ if (eError == PVRSRV_OK) ++ { ++ pvAddress = pbCpuVirtAddr; ++ ++ if (pvAddress && gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ++ { ++ CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp); ++ ++ if (!bIsRequestValidated) ++ { ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); ++ } ++#if defined(CACHEOP_DEBUG) ++ gsCwq.ui32ServerOpUsedUMVA += 1; ++#endif ++ return PVRSRV_OK; ++ } ++ else if (pvAddress) ++ { ++ /* Round down the incoming VA (if any) down to the nearest page aligned VA */ ++ pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)); ++#if defined(CACHEOP_DEBUG) ++ gsCwq.ui32ServerOpUsedUMVA += 1; ++#endif ++ } ++ } ++ else ++ { ++ /* ++ * This validation pathway has been added to accommodate any/all requests that might ++ * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint. ++ * parameters but if this fails then we would rather fail gracefully than cause the ++ * kernel to Oops so instead we log the fact that an invalid KM virtual address was ++ * supplied and what action was taken to mitigate against kernel Oops(ing) if any. ++ */ ++ CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL); ++ ++ if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_PHYSICAL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress", ++ __func__, ++ pvAddress)); ++ ++ /* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */ ++ pvAddress = NULL; ++ } ++ else ++ { ++ /* ++ * The approach here is to attempt a reacquisition of the PMR kernel VA and see if ++ * said VA corresponds to the parameter VA, if so fail requested cache maint. op. ++ * cause this indicates some kind of internal, memory and/or meta-data corruption ++ * else we reissue the request using this (re)acquired alias PMR kernel VA. ++ */ ++ if (PMR_IsSparse(psPMR)) ++ { ++ eError = PMRAcquireSparseKernelMappingData(psPMR, ++ 0, ++ gsCwq.uiPageSize, ++ (void **)&pbCpuVirtAddr, ++ &uiOutSize, ++ &hPrivOut); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); ++ } ++ else ++ { ++ eError = PMRAcquireKernelMappingData(psPMR, ++ 0, ++ gsCwq.uiPageSize, ++ (void **)&pbCpuVirtAddr, ++ &uiOutSize, ++ &hPrivOut); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); ++ } ++ ++ /* Here, we only compare these CPU virtual addresses at granularity of the OS page size */ ++ if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request", ++ __func__, ++ pvAddress)); ++ ++ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); ++ PVR_LOG_GOTO_WITH_ERROR("PMRReleaseKernelMappingData", eError, PVRSRV_ERROR_INVALID_CPU_ADDR, e0); ++ } ++ else if (gsCwq.uiCacheOpAddrType == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p", ++ __func__, ++ pvAddress, ++ pbCpuVirtAddr)); ++ ++ /* Note that this might still fail if there is kernel memory/meta-data corruption; ++ there is not much we can do here but at the least we will be informed of this ++ before the kernel Oops(ing) */ ++ CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp); ++ ++ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); ++ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); ++ ++ eError = PVRSRV_OK; ++ goto e0; ++ } ++ else ++ { ++ /* At this junction, we have exhausted every possible work-around possible but we do ++ know that VA reacquisition returned another/alias page-aligned VA; so with this ++ future expectation of PMRAcquireKernelMappingData(), we proceed */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress", ++ __func__, ++ pvAddress)); ++ ++ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); ++ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); ++ ++ /* NULL this to force per-page reacquisition down-stream */ ++ pvAddress = NULL; ++ } ++ } ++ } ++ ++ /* NULL clobbered var., OK to proceed */ ++ pbCpuVirtAddr = NULL; ++ eError = PVRSRV_OK; ++ ++ /* Need this for kernel mapping */ ++ bPMRIsSparse = PMR_IsSparse(psPMR); ++ psDevNode = PMR_DeviceNode(psPMR); ++ ++ /* Round the incoming offset down to the nearest cache-line / page aligned-address */ ++ uiCLAlignedEndOffset = uiOffset + uiSize; ++ uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize); ++ uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1)); ++ ++ uiPgAlignedEndOffset = uiCLAlignedEndOffset; ++ uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize); ++ uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1)); ++ uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset; ++ ++#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING) ++ /* For internal debug if cache-line optimised ++ flushing is suspected of causing data corruption */ ++ uiCLAlignedStartOffset = uiPgAlignedStartOffset; ++ uiCLAlignedEndOffset = uiPgAlignedEndOffset; ++#endif ++ ++ /* Type of allocation backing the PMR data */ ++ ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift; ++ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ /* The pbValid array is allocated first as it is needed in ++ both physical/virtual cache maintenance methods */ ++ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL)); ++ if (! pbValid) ++ { ++ pbValid = abValid; ++ } ++ else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ++ { ++ psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR)); ++ if (! psCpuPhyAddr) ++ { ++ psCpuPhyAddr = asCpuPhyAddr; ++ OSFreeMem(pbValid); ++ pbValid = abValid; ++ } ++ } ++ } ++ ++ /* We always retrieve PMR data in bulk, up-front if number of pages is within ++ PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a ++ dynamic buffer has been allocated to satisfy requests outside limits */ ++ if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid) ++ { ++ if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ++ { ++ /* Look-up PMR CpuPhyAddr once, if possible */ ++ eError = PMR_CpuPhysAddr(psPMR, ++ gsCwq.uiPageShift, ++ ui32NumOfPages, ++ uiPgAlignedStartOffset, ++ psCpuPhyAddr, ++ pbValid); ++ if (eError == PVRSRV_OK) ++ { ++ bIsPMRInfoValid = IMG_TRUE; ++ } ++ } ++ else ++ { ++ /* Look-up PMR per-page validity once, if possible */ ++ eError = PMR_IsOffsetValid(psPMR, ++ gsCwq.uiPageShift, ++ ui32NumOfPages, ++ uiPgAlignedStartOffset, ++ pbValid); ++ bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE; ++ } ++ } ++ ++ /* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */ ++ for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0; ++ uiPgAlignedOffset < uiPgAlignedEndOffset; ++ uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1) ++ { ++ ++ if (! bIsPMRInfoValid) ++ { ++ /* Never cross page boundary without looking up corresponding PMR page physical ++ address and/or page validity if these were not looked-up, in bulk, up-front */ ++ ui32PageIndex = 0; ++ if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ++ { ++ eError = PMR_CpuPhysAddr(psPMR, ++ gsCwq.uiPageShift, ++ 1, ++ uiPgAlignedOffset, ++ psCpuPhyAddr, ++ pbValid); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_CpuPhysAddr", e0); ++ } ++ else ++ { ++ eError = PMR_IsOffsetValid(psPMR, ++ gsCwq.uiPageShift, ++ 1, ++ uiPgAlignedOffset, ++ pbValid); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_IsOffsetValid", e0); ++ } ++ } ++ ++ /* Skip invalid PMR pages (i.e. sparse) */ ++ if (pbValid[ui32PageIndex] == IMG_FALSE) ++ { ++ CACHEOP_PVR_ASSERT(bPMRIsSparse); ++ continue; ++ } ++ ++ if (pvAddress) ++ { ++ /* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */ ++ pbCpuVirtAddr = ++ (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset)); ++ } ++ /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */ ++ else if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) ++ { ++ if (bPMRIsSparse) ++ { ++ eError = ++ PMRAcquireSparseKernelMappingData(psPMR, ++ uiPgAlignedOffset, ++ gsCwq.uiPageSize, ++ (void **)&pbCpuVirtAddr, ++ &uiOutSize, ++ &hPrivOut); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0); ++ } ++ else ++ { ++ eError = ++ PMRAcquireKernelMappingData(psPMR, ++ uiPgAlignedOffset, ++ gsCwq.uiPageSize, ++ (void **)&pbCpuVirtAddr, ++ &uiOutSize, ++ &hPrivOut); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0); ++ } ++ } ++ ++ /* Issue actual cache maintenance for PMR */ ++ CacheOpExecRangeBased(psDevNode, ++ uiCacheOp, ++ pbCpuVirtAddr, ++ (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ? ++ psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0], ++ uiPgAlignedOffset, ++ uiCLAlignedStartOffset, ++ uiCLAlignedEndOffset); ++ ++ if (! pvAddress) ++ { ++ /* The caller has not supplied either a KM/UM CpuVA, release mapping */ ++ if (gsCwq.uiCacheOpAddrType != OS_CACHE_OP_ADDR_TYPE_PHYSICAL) ++ { ++ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut); ++ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); ++ } ++ } ++ } ++ ++e0: ++ if (psCpuPhyAddr != asCpuPhyAddr) ++ { ++ OSFreeMem(psCpuPhyAddr); ++ } ++ ++ if (pbValid != abValid) ++ { ++ OSFreeMem(pbValid); ++ } ++ ++ if (! bIsRequestValidated) ++ { ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses"); ++ } ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_TIMELINE iTimeline) ++{ ++ PVRSRV_ERROR eError; ++ CACHEOP_WORK_ITEM sCacheOpWorkItem = {NULL}; ++ ++ eError = CacheOpTimelineBind(psDevNode, &sCacheOpWorkItem, iTimeline); ++ PVR_LOG_RETURN_IF_ERROR(eError, "CacheOpTimelineBind"); ++ ++ eError = CacheOpTimelineExec(&sCacheOpWorkItem); ++ PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec"); ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR CacheOpBatchExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode, ++ PMR **ppsPMR, ++ IMG_CPU_VIRTADDR *pvAddress, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ PVRSRV_CACHE_OP *puiCacheOp, ++ IMG_UINT32 ui32NumCacheOps, ++ PVRSRV_TIMELINE uiTimeline) ++{ ++ IMG_UINT32 ui32Idx; ++ IMG_BOOL bBatchHasTimeline; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++#if defined(CACHEOP_DEBUG) ++ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; ++ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); ++#endif ++ ++ /* Check if batch has an associated timeline update */ ++ bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE; ++ puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_TIMELINE); ++ ++ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++) ++ { ++ /* Fail UM request, don't silently ignore */ ++ PVR_GOTO_IF_INVALID_PARAM(puiSize[ui32Idx], eError, e0); ++ ++#if defined(CACHEOP_DEBUG) ++ sCacheOpWorkItem.ui64StartTime = OSClockus64(); ++#endif ++ ++ eError = CacheOpPMRExec(ppsPMR[ui32Idx], ++ pvAddress[ui32Idx], ++ puiOffset[ui32Idx], ++ puiSize[ui32Idx], ++ puiCacheOp[ui32Idx], ++ IMG_FALSE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpExecPMR", e0); ++ ++#if defined(CACHEOP_DEBUG) ++ sCacheOpWorkItem.ui64EndTime = OSClockus64(); ++ ++ sCacheOpWorkItem.psDevNode = psDevNode; ++ sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx]; ++ sCacheOpWorkItem.uiSize = puiSize[ui32Idx]; ++ sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx]; ++ sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx]; ++ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); ++ ++ gsCwq.ui32ServerOps += 1; ++#endif ++ } ++ ++e0: ++ if (bBatchHasTimeline) ++ { ++ eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline); ++ } ++ ++ return eError; ++} ++ ++ ++PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd, ++ PVRSRV_CACHE_OP uiCacheOp) ++{ ++#if defined(CACHEOP_DEBUG) ++ IMG_UINT64 ui64StartTime = OSClockus64(); ++#endif ++ ++ switch (uiCacheOp) ++ { ++ case PVRSRV_CACHE_OP_CLEAN: ++ OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); ++ break; ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); ++ break; ++ case PVRSRV_CACHE_OP_FLUSH: ++ OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd); ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", ++ __func__, uiCacheOp)); ++ break; ++ } ++ ++#if defined(CACHEOP_DEBUG) ++ if (CacheOpConfigSupports(CACHEOP_CONFIG_KLOG)) ++ { ++ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; ++ ++ gsCwq.ui32ServerOps += 1; ++ ++ sCacheOpWorkItem.uiOffset = 0; ++ sCacheOpWorkItem.bKMReq = IMG_TRUE; ++ sCacheOpWorkItem.uiCacheOp = uiCacheOp; ++ /* Use information page PMR for logging KM request */ ++ sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR; ++ sCacheOpWorkItem.psDevNode = psDevNode; ++ sCacheOpWorkItem.ui64StartTime = ui64StartTime; ++ sCacheOpWorkItem.ui64EndTime = OSClockus64(); ++ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); ++ sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr); ++ ++ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR CacheOpValExec(PMR *psPMR, ++ IMG_UINT64 uiAddress, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_CACHE_OP uiCacheOp) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress; ++#if defined(CACHEOP_DEBUG) ++ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; ++ ++ sCacheOpWorkItem.ui64StartTime = OSClockus64(); ++#endif ++ ++ eError = CacheOpPMRExec(psPMR, ++ pvAddress, ++ uiOffset, ++ uiSize, ++ uiCacheOp, ++ IMG_FALSE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpPMRExec", e0); ++ ++#if defined(CACHEOP_DEBUG) ++ sCacheOpWorkItem.ui64EndTime = OSClockus64(); ++ ++ sCacheOpWorkItem.psDevNode = PMR_DeviceNode(psPMR); ++ sCacheOpWorkItem.psPMR = psPMR; ++ sCacheOpWorkItem.uiSize = uiSize; ++ sCacheOpWorkItem.uiOffset = uiOffset; ++ sCacheOpWorkItem.uiCacheOp = uiCacheOp; ++ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); ++ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); ++ ++ gsCwq.ui32ServerOps += 1; ++#endif ++ ++e0: ++ return eError; ++} ++ ++PVRSRV_ERROR CacheOpQueue (CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32NumCacheOps, ++ PMR **ppsPMR, ++ IMG_UINT64 *puiAddress, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ PVRSRV_CACHE_OP *puiCacheOp, ++ IMG_UINT32 ui32OpTimeline) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline; ++ IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (!gsCwq.bInit) ++ { ++ PVR_LOG(("CacheOp framework not initialised, failing request")); ++ return PVRSRV_ERROR_NOT_INITIALISED; ++ } ++ else if (! ui32NumCacheOps) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ /* Ensure any single timeline CacheOp request is processed immediately */ ++ else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE) ++ { ++ eError = CacheOpBatchExecTimeline(psDevNode, uiTimeline); ++ } ++ /* This is the default entry for all client requests */ ++ else ++ { ++ if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1))) ++ { ++ /* default the configuration before execution */ ++ CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); ++ } ++ ++ eError = ++ CacheOpBatchExecRangeBased(psDevNode, ++ ppsPMR, ++ pvAddress, ++ puiOffset, ++ puiSize, ++ puiCacheOp, ++ ui32NumCacheOps, ++ uiTimeline); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR CacheOpLog (PMR *psPMR, ++ IMG_UINT64 puiAddress, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT64 ui64StartTime, ++ IMG_UINT64 ui64EndTime, ++ PVRSRV_CACHE_OP uiCacheOp) ++{ ++#if defined(CACHEOP_DEBUG) ++ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0}; ++ PVR_UNREFERENCED_PARAMETER(puiAddress); ++ ++ sCacheOpWorkItem.psDevNode = PMR_DeviceNode(psPMR); ++ sCacheOpWorkItem.psPMR = psPMR; ++ sCacheOpWorkItem.uiSize = uiSize; ++ sCacheOpWorkItem.uiOffset = uiOffset; ++ sCacheOpWorkItem.uiCacheOp = uiCacheOp; ++ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM(); ++ ++ sCacheOpWorkItem.ui64StartTime = ui64StartTime; ++ sCacheOpWorkItem.ui64EndTime = ui64EndTime; ++ ++ gsCwq.ui32ClientOps += 1; ++ ++ CacheOpStatsExecLogWrite(&sCacheOpWorkItem); ++#else ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(uiCacheOp); ++ PVR_UNREFERENCED_PARAMETER(puiAddress); ++ PVR_UNREFERENCED_PARAMETER(ui64StartTime); ++ PVR_UNREFERENCED_PARAMETER(ui64EndTime); ++#endif ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR CacheOpInit2 (void) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ /* Apphint read/write is not concurrent, so lock protects against this */ ++ eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); ++ ++ ++#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH) ++ gsCwq.bSupportsUMFlush = IMG_TRUE; ++#else ++ gsCwq.bSupportsUMFlush = IMG_FALSE; ++#endif ++ ++ gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage; ++ gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR; ++ ++ /* Normally, platforms should use their default configurations, put exceptions here */ ++#if defined(__i386__) || defined(__x86_64__) ++#if !defined(TC_MEMORY_CONFIG) ++ CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KDF); ++#else ++ CacheOpConfigUpdate(CACHEOP_CONFIG_KDF); ++#endif ++#else /* defined(__x86__) */ ++ CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT); ++#endif ++ ++ /* Initialise the remaining occupants of the CacheOp information page */ ++ gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE] = (IMG_UINT32)gsCwq.uiPageSize; ++ gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE] = (IMG_UINT32)gsCwq.uiLineSize; ++ ++ /* Set before spawning thread */ ++ gsCwq.bInit = IMG_TRUE; ++ ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpConfigRead}; ++ /* Writing the unsigned integer binary encoding of CACHEOP_CONFIG ++ into this file cycles through avail. configuration(s) */ ++ eError = DICreateEntry("cacheop_config", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, &gsCwq.psConfigTune); ++ PVR_LOG_GOTO_IF_FALSE(gsCwq.psConfigTune, "DICreateEntry", e0); ++ } ++ ++ /* Register the CacheOp framework (re)configuration handlers */ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig, ++ CacheOpConfigQuery, ++ CacheOpConfigSet, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) APPHINT_ID_CacheOpConfig); ++ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize, ++ CacheOpConfigQuery, ++ CacheOpConfigSet, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) APPHINT_ID_CacheOpUMKMThresholdSize); ++ ++ return PVRSRV_OK; ++e0: ++ CacheOpDeInit2(); ++ return eError; ++} ++ ++void CacheOpDeInit2 (void) ++{ ++ gsCwq.bInit = IMG_FALSE; ++ ++ if (gsCwq.hConfigLock) ++ { ++ OSLockDestroy(gsCwq.hConfigLock); ++ gsCwq.hConfigLock = NULL; ++ } ++ ++ if (gsCwq.psConfigTune) ++ { ++ DIDestroyEntry(gsCwq.psConfigTune); ++ gsCwq.psConfigTune = NULL; ++ } ++ ++ gsCwq.pui32InfoPage = NULL; ++ gsCwq.psInfoPagePMR = NULL; ++} ++ ++PVRSRV_ERROR CacheOpInit (void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ gsCwq.uiPageSize = OSGetPageSize(); ++ gsCwq.uiPageShift = OSGetPageShift(); ++ gsCwq.uiLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); ++ gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize); ++ PVR_LOG_RETURN_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE); ++ gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType(); ++ ++#if defined(CACHEOP_DEBUG) ++ /* debugfs file read-out is not concurrent, so lock protects against this */ ++ eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); ++ ++ gsCwq.i32StatsExecWriteIdx = 0; ++ OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted)); ++ ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = CacheOpStatsExecLogRead}; ++ /* File captures the most recent subset of CacheOp(s) executed */ ++ eError = DICreateEntry("cacheop_history", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, &gsCwq.psDIEntry); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", e0); ++ } ++e0: ++#endif ++ return eError; ++} ++ ++void CacheOpDeInit (void) ++{ ++#if defined(CACHEOP_DEBUG) ++ if (gsCwq.hStatsExecLock) ++ { ++ OSLockDestroy(gsCwq.hStatsExecLock); ++ gsCwq.hStatsExecLock = NULL; ++ } ++ ++ if (gsCwq.psDIEntry) ++ { ++ DIDestroyEntry(gsCwq.psDIEntry); ++ gsCwq.psDIEntry = NULL; ++ } ++#endif ++} +diff --git a/drivers/gpu/drm/img-rogue/cache_km.h b/drivers/gpu/drm/img-rogue/cache_km.h +new file mode 100644 +index 000000000000..282ff5bc5f0f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/cache_km.h +@@ -0,0 +1,151 @@ ++/*************************************************************************/ /*! ++@File cache_km.h ++@Title CPU cache management header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef CACHE_KM_H ++#define CACHE_KM_H ++ ++#if defined(__linux__) ++#include ++#else ++#define KERNEL_VERSION ++#endif ++ ++#include "pvrsrv_error.h" ++#include "os_cpu_cache.h" ++#include "img_types.h" ++#include "cache_ops.h" ++#include "device.h" ++#include "pmr.h" ++ ++typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE; /*!< Represents CPU address type required for CPU d-cache maintenance */ ++#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL 0x1 /*!< Operation requires CPU virtual address only */ ++#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL 0x2 /*!< Operation requires CPU physical address only */ ++#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH 0x3 /*!< Operation requires both CPU virtual & physical addresses */ ++ ++#include "connection_server.h" ++ ++/* ++ * CacheOpInit() & CacheOpDeInit() ++ * ++ * This must be called to initialise the KM cache maintenance framework. ++ * This is called early during the driver/module (un)loading phase. ++ */ ++PVRSRV_ERROR CacheOpInit(void); ++void CacheOpDeInit(void); ++ ++/* ++ * CacheOpInit2() & CacheOpDeInit2() ++ * ++ * This must be called to initialise the UM cache maintenance framework. ++ * This is called when the driver is loaded/unloaded from the kernel. ++ */ ++PVRSRV_ERROR CacheOpInit2(void); ++void CacheOpDeInit2(void); ++ ++/* ++ * CacheOpExec() ++ * ++ * This is the primary CPU data-cache maintenance interface and it is ++ * always guaranteed to be synchronous; the arguments supplied must be ++ * pre-validated for performance reasons else the d-cache maintenance ++ * operation might cause the underlying OS kernel to fault. ++ */ ++PVRSRV_ERROR CacheOpExec(PPVRSRV_DEVICE_NODE psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd, ++ PVRSRV_CACHE_OP uiCacheOp); ++ ++/* ++ * CacheOpValExec() ++ * ++ * Same as CacheOpExec(), except arguments are _Validated_ before being ++ * presented to the underlying OS kernel for CPU data-cache maintenance. ++ * The uiAddress is the start CPU virtual address for the to-be d-cache ++ * maintained PMR, it can be NULL in which case a remap will be performed ++ * internally, if required for cache maintenance. This is primarily used ++ * as the services client bridge call handler for synchronous user-mode ++ * cache maintenance requests. ++ */ ++PVRSRV_ERROR CacheOpValExec(PMR *psPMR, ++ IMG_UINT64 uiAddress, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_CACHE_OP uiCacheOp); ++ ++/* ++ * CacheOpQueue() ++ * ++ * This is the secondary cache maintenance interface and it is not ++ * guaranteed to be synchronous in that requests could be deferred ++ * and executed asynchronously. This interface is primarily meant ++ * as services client bridge call handler. Both uiInfoPgGFSeqNum ++ * and ui32[Current,Next]FenceSeqNum implements an internal client ++ * server queueing protocol so making use of this interface outside ++ * of services client is not recommended and should not be done. ++ */ ++PVRSRV_ERROR CacheOpQueue(CONNECTION_DATA *psConnection, ++ PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32OpCount, ++ PMR **ppsPMR, ++ IMG_UINT64 *puiAddress, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ PVRSRV_CACHE_OP *puiCacheOp, ++ IMG_UINT32 ui32OpTimeline); ++ ++/* ++ * CacheOpLog() ++ * ++ * This is used for logging client cache maintenance operations that ++ * was executed in user-space. ++ */ ++PVRSRV_ERROR CacheOpLog(PMR *psPMR, ++ IMG_UINT64 uiAddress, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT64 ui64StartTime, ++ IMG_UINT64 ui64EndTime, ++ PVRSRV_CACHE_OP uiCacheOp); ++ ++#endif /* CACHE_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/cache_ops.h b/drivers/gpu/drm/img-rogue/cache_ops.h +new file mode 100644 +index 000000000000..a1d714519d59 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/cache_ops.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services cache management header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines for cache management which are visible internally ++ and externally ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef CACHE_OPS_H ++#define CACHE_OPS_H ++#include "img_types.h" ++/*! ++* @Defgroup CPUCacheAPIs ++* @{ ++*/ ++#define CACHE_BATCH_MAX (8U) ++#define MAX_DMA_OPS (34) ++typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */ ++#define PVRSRV_CACHE_OP_NONE 0x0U /*!< No operation */ ++#define PVRSRV_CACHE_OP_CLEAN 0x1U /*!< Flush w/o invalidate */ ++#define PVRSRV_CACHE_OP_INVALIDATE 0x2U /*!< Invalidate w/o flush */ ++#define PVRSRV_CACHE_OP_FLUSH 0x3U /*!< Flush w/ invalidate */ ++/*! @} End of Defgroup CPUCacheAPIs */ ++ ++#endif /* CACHE_OPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_cache_bridge.h b/drivers/gpu/drm/img-rogue/client_cache_bridge.h +new file mode 100644 +index 000000000000..1dec13fccce7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_cache_bridge.h +@@ -0,0 +1,80 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for cache ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for cache ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_CACHE_BRIDGE_H ++#define CLIENT_CACHE_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_cache_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32NumCacheOps, ++ IMG_HANDLE * phPMR, ++ IMG_UINT64 * pui64Address, ++ IMG_DEVMEM_OFFSET_T * puiOffset, ++ IMG_DEVMEM_SIZE_T * puiSize, ++ PVRSRV_CACHE_OP * piuCacheOp, ++ IMG_UINT32 ui32OpTimeline); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_UINT64 ui64Address, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_UINT64 ui64Address, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_INT64 i64StartTime, ++ IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp); ++ ++#endif /* CLIENT_CACHE_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c +new file mode 100644 +index 000000000000..9691bae9352f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_cache_direct_bridge.c +@@ -0,0 +1,112 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for cache ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for cache ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_cache_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "cache_ops.h" ++ ++#include "cache_km.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpQueue(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32NumCacheOps, ++ IMG_HANDLE * phPMR, ++ IMG_UINT64 * pui64Address, ++ IMG_DEVMEM_OFFSET_T * puiOffset, ++ IMG_DEVMEM_SIZE_T * puiSize, ++ PVRSRV_CACHE_OP * piuCacheOp, ++ IMG_UINT32 ui32OpTimeline) ++{ ++ PVRSRV_ERROR eError; ++ PMR **psPMRInt; ++ ++ psPMRInt = (PMR **) phPMR; ++ ++ eError = ++ CacheOpQueue(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32NumCacheOps, ++ psPMRInt, pui64Address, puiOffset, puiSize, piuCacheOp, ui32OpTimeline); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpExec(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_UINT64 ui64Address, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, PVRSRV_CACHE_OP iuCacheOp) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = CacheOpValExec(psPMRInt, ui64Address, uiOffset, uiSize, iuCacheOp); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeCacheOpLog(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_UINT64 ui64Address, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_INT64 i64StartTime, ++ IMG_INT64 i64EndTime, PVRSRV_CACHE_OP iuCacheOp) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ CacheOpLog(psPMRInt, ++ ui64Address, uiOffset, uiSize, i64StartTime, i64EndTime, iuCacheOp); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h b/drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h +new file mode 100644 +index 000000000000..bfa6bfb9c037 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_devicememhistory_bridge.h +@@ -0,0 +1,111 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for devicememhistory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for devicememhistory ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H ++#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_devicememhistory_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 * pui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 * pui32FreePageIndices, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut); ++ ++#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c +new file mode 100644 +index 000000000000..acbb46475b7c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_devicememhistory_direct_bridge.c +@@ -0,0 +1,194 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for devicememhistory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for devicememhistory ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_devicememhistory_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "img_types.h" ++#include "img_defs.h" ++#include "devicemem_typedefs.h" ++ ++#include "devicemem_history_server.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMap(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ DevicememHistoryMapKM(psPMRInt, ++ uiOffset, ++ sDevVAddr, ++ uiSize, ++ puiText, ++ ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ DevicememHistoryUnmapKM(psPMRInt, ++ uiOffset, ++ sDevVAddr, ++ uiSize, ++ puiText, ++ ui32Log2PageSize, ui32AllocationIndex, pui32AllocationIndexOut); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ DevicememHistoryMapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ sBaseDevVAddr, ++ ui32ui32StartPage, ++ ui32NumPages, ++ uiAllocSize, ++ puiText, ++ ui32Log2PageSize, ++ ui32AllocationIndex, pui32AllocationIndexOut); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ DevicememHistoryUnmapVRangeKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ sBaseDevVAddr, ++ ui32ui32StartPage, ++ ui32NumPages, ++ uiAllocSize, ++ puiText, ++ ui32Log2PageSize, ++ ui32AllocationIndex, pui32AllocationIndexOut); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR * puiText, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 * pui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 * pui32FreePageIndices, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 * pui32AllocationIndexOut) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ DevicememHistorySparseChangeKM(psPMRInt, ++ uiOffset, ++ sDevVAddr, ++ uiSize, ++ puiText, ++ ui32Log2PageSize, ++ ui32AllocPageCount, ++ pui32AllocPageIndices, ++ ui32FreePageCount, ++ pui32FreePageIndices, ++ ui32AllocationIndex, pui32AllocationIndexOut); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h b/drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h +new file mode 100644 +index 000000000000..b3514eaba9b8 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_htbuffer_bridge.h +@@ -0,0 +1,71 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for htbuffer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for htbuffer ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_HTBUFFER_BRIDGE_H ++#define CLIENT_HTBUFFER_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_htbuffer_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32NumGroups, ++ IMG_UINT32 * pui32GroupEnable, ++ IMG_UINT32 ui32LogLevel, ++ IMG_UINT32 ui32EnablePID, ++ IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PID, ++ IMG_UINT32 ui32TID, ++ IMG_UINT64 ui64TimeStamp, ++ IMG_UINT32 ui32SF, ++ IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args); ++ ++#endif /* CLIENT_HTBUFFER_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c +new file mode 100644 +index 000000000000..9c5833116075 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_htbuffer_direct_bridge.c +@@ -0,0 +1,85 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for htbuffer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for htbuffer ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_htbuffer_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "devicemem_typedefs.h" ++#include "htbuffer_types.h" ++ ++#include "htbserver.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHTBControl(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32NumGroups, ++ IMG_UINT32 * pui32GroupEnable, ++ IMG_UINT32 ui32LogLevel, ++ IMG_UINT32 ui32EnablePID, ++ IMG_UINT32 ui32LogMode, IMG_UINT32 ui32OpMode) ++{ ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = ++ HTBControlKM(ui32NumGroups, ++ pui32GroupEnable, ui32LogLevel, ui32EnablePID, ui32LogMode, ui32OpMode); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHTBLog(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PID, ++ IMG_UINT32 ui32TID, ++ IMG_UINT64 ui64TimeStamp, ++ IMG_UINT32 ui32SF, ++ IMG_UINT32 ui32NumArgs, IMG_UINT32 * pui32Args) ++{ ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = HTBLogKM(ui32PID, ui32TID, ui64TimeStamp, ui32SF, ui32NumArgs, pui32Args); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/client_mm_bridge.h b/drivers/gpu/drm/img-rogue/client_mm_bridge.h +new file mode 100644 +index 000000000000..ce172eaf2590 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_mm_bridge.h +@@ -0,0 +1,265 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for mm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for mm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_MM_BRIDGE_H ++#define CLIENT_MM_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_mm_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_HANDLE * phPMRExport, ++ IMG_UINT64 * pui64Size, ++ IMG_UINT32 * pui32Log2Contig, ++ IMG_UINT64 * pui64Password); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, IMG_UINT64 * pui64UID); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge, ++ IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMRExport, ++ IMG_UINT64 ui64uiPassword, ++ IMG_UINT64 ui64uiSize, ++ IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hExtHandle, ++ IMG_HANDLE * phPMR, ++ IMG_DEVMEM_SIZE_T * puiSize, ++ IMG_DEVMEM_ALIGN_T * puiAlign); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 * pui32MappingTable, ++ IMG_UINT32 ui32Log2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32AnnotationLength, ++ const IMG_CHAR * puiAnnotation, ++ IMG_PID ui32PID, ++ IMG_HANDLE * phPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 * pui32MappingTable, ++ IMG_UINT32 ui32Log2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32AnnotationLength, ++ const IMG_CHAR * puiAnnotation, ++ IMG_PID ui32PID, ++ IMG_HANDLE * phPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge, ++ IMG_HANDLE hMapping, IMG_HANDLE hPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge, ++ IMG_HANDLE hMapping, IMG_HANDLE hPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, ++ IMG_BOOL bbKernelMemoryCtx, ++ IMG_HANDLE * phDevMemServerContext, ++ IMG_HANDLE * phPrivData, ++ IMG_UINT32 * pui32CPUCacheLineSize); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemServerContext); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR sHeapBaseAddr, ++ IMG_DEVMEM_SIZE_T uiHeapLength, ++ IMG_UINT32 ui32Log2DataPageSize, ++ IMG_HANDLE * phDevmemHeapPtr); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemServerHeap, ++ IMG_HANDLE hReservation, ++ IMG_HANDLE hPMR, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ IMG_HANDLE * phMapping); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemServerHeap, ++ IMG_DEV_VIRTADDR sAddress, ++ IMG_DEVMEM_SIZE_T uiLength, ++ IMG_HANDLE * phReservation); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, ++ IMG_HANDLE hReservation); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, ++ IMG_HANDLE hSrvDevMemHeap, ++ IMG_HANDLE hPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 * pui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 * pui32FreePageIndices, ++ IMG_UINT32 ui32SparseFlags, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_UINT64 ui64CPUVAddr); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, ++ IMG_HANDLE hReservation, ++ IMG_HANDLE hPMR, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 ui32PhysicalPgOffset, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddr); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, ++ IMG_HANDLE hReservation, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_UINT32 ui32PageCount); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR sAddress); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR sAddress, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_UINT64 ui64FBSCEntries); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge, ++ IMG_UINT32 * pui32NumHeapConfigs); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32HeapConfigIndex, ++ IMG_UINT32 * pui32NumHeaps); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32HeapConfigIndex, ++ IMG_UINT32 ui32HeapConfigNameBufSz, ++ IMG_CHAR * puiHeapConfigName); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32HeapConfigIndex, ++ IMG_UINT32 ui32HeapIndex, ++ IMG_UINT32 ui32HeapNameBufSz, ++ IMG_CHAR * puiHeapNameOut, ++ IMG_DEV_VIRTADDR * psDevVAddrBase, ++ IMG_DEVMEM_SIZE_T * puiHeapLength, ++ IMG_DEVMEM_SIZE_T * puiReservedRegionLength, ++ IMG_UINT32 * pui32Log2DataPageSizeOut, ++ IMG_UINT32 * pui32Log2ImportAlignmentOut); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevm, ++ IMG_UINT32 ui32PID, IMG_BOOL bRegister); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, ++ IMG_UINT32 * pui32PhysHeapCount); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP * peaPhysHeapID, ++ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, ++ PVRSRV_PHYS_HEAP * peHeap); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR * psFaultAddress); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfoPkd(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP * peaPhysHeapID, ++ PHYS_HEAP_MEM_STATS_PKD * ++ psapPhysHeapMemStats); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsagePkd(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS_PKD * ++ psapPhysHeapMemStats); ++ ++#endif /* CLIENT_MM_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c +new file mode 100644 +index 000000000000..958706b63798 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_mm_direct_bridge.c +@@ -0,0 +1,804 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for mm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for mm ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_mm_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "pvrsrv_memallocflags.h" ++#include "pvrsrv_memalloc_physheap.h" ++#include "devicemem_typedefs.h" ++ ++#include "pvrsrv_memalloc_physheap.h" ++#include "devicemem.h" ++#include "devicemem_server.h" ++#include "pmr.h" ++#include "devicemem_heapcfg.h" ++#include "physmem.h" ++#include "devicemem_utils.h" ++#include "process_stats.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRExportPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_HANDLE * phPMRExport, ++ IMG_UINT64 * pui64Size, ++ IMG_UINT32 * pui32Log2Contig, ++ IMG_UINT64 * pui64Password) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PMR_EXPORT *psPMRExportInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = PMRExportPMR(psPMRInt, &psPMRExportInt, pui64Size, pui32Log2Contig, pui64Password); ++ ++ *phPMRExport = psPMRExportInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnexportPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMRExport) ++{ ++ PVRSRV_ERROR eError; ++ PMR_EXPORT *psPMRExportInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRExportInt = (PMR_EXPORT *) hPMRExport; ++ ++ eError = PMRUnexportPMR(psPMRExportInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRGetUID(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, IMG_UINT64 * pui64UID) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = PMRGetUID(psPMRInt, pui64UID); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge, ++ IMG_HANDLE hBuffer, IMG_HANDLE * phExtMem) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psBufferInt; ++ PMR *psExtMemInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psBufferInt = (PMR *) hBuffer; ++ ++ eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); ++ ++ *phExtMem = psExtMemInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge, IMG_HANDLE hExtMem) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psExtMemInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psExtMemInt = (PMR *) hExtMem; ++ ++ eError = PMRUnmakeLocalImportHandle(psExtMemInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRImportPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMRExport, ++ IMG_UINT64 ui64uiPassword, ++ IMG_UINT64 ui64uiSize, ++ IMG_UINT32 ui32uiLog2Contig, IMG_HANDLE * phPMR) ++{ ++ PVRSRV_ERROR eError; ++ PMR_EXPORT *psPMRExportInt; ++ PMR *psPMRInt = NULL; ++ ++ psPMRExportInt = (PMR_EXPORT *) hPMRExport; ++ ++ eError = ++ PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ psPMRExportInt, ++ ui64uiPassword, ui64uiSize, ui32uiLog2Contig, &psPMRInt); ++ ++ *phPMR = psPMRInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRLocalImportPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hExtHandle, ++ IMG_HANDLE * phPMR, ++ IMG_DEVMEM_SIZE_T * puiSize, ++ IMG_DEVMEM_ALIGN_T * puiAlign) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psExtHandleInt; ++ PMR *psPMRInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psExtHandleInt = (PMR *) hExtHandle; ++ ++ eError = PMRLocalImportPMR(psExtHandleInt, &psPMRInt, puiSize, puiAlign); ++ ++ *phPMR = psPMRInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = PMRUnrefPMR(psPMRInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge, IMG_HANDLE hPMR) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = PMRUnrefUnlockPMR(psPMRInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 * pui32MappingTable, ++ IMG_UINT32 ui32Log2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32AnnotationLength, ++ const IMG_CHAR * puiAnnotation, ++ IMG_PID ui32PID, ++ IMG_HANDLE * phPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRPtrInt = NULL; ++ ++ eError = ++ PhysmemNewRamBackedPMR_direct(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ ui32Log2PageSize, ++ uiFlags, ++ ui32AnnotationLength, ++ puiAnnotation, ++ ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags); ++ ++ *phPMRPtr = psPMRPtrInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 * pui32MappingTable, ++ IMG_UINT32 ui32Log2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32AnnotationLength, ++ const IMG_CHAR * puiAnnotation, ++ IMG_PID ui32PID, ++ IMG_HANDLE * phPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T * puiOutFlags) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRPtrInt = NULL; ++ ++ eError = ++ PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ ui32Log2PageSize, ++ uiFlags, ++ ui32AnnotationLength, ++ puiAnnotation, ++ ui32PID, &psPMRPtrInt, ui32PDumpFlags, puiOutFlags); ++ ++ *phPMRPtr = psPMRPtrInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPin(IMG_HANDLE hBridge, IMG_HANDLE hPMR) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = DevmemIntPin(psPMRInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpin(IMG_HANDLE hBridge, IMG_HANDLE hPMR) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = DevmemIntUnpin(psPMRInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntPinValidate(IMG_HANDLE hBridge, ++ IMG_HANDLE hMapping, IMG_HANDLE hPMR) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_MAPPING *psMappingInt; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psMappingInt = (DEVMEMINT_MAPPING *) hMapping; ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = DevmemIntPinValidate(psMappingInt, psPMRInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge, ++ IMG_HANDLE hMapping, IMG_HANDLE hPMR) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_MAPPING *psMappingInt; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psMappingInt = (DEVMEMINT_MAPPING *) hMapping; ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge, ++ IMG_BOOL bbKernelMemoryCtx, ++ IMG_HANDLE * phDevMemServerContext, ++ IMG_HANDLE * phPrivData, ++ IMG_UINT32 * pui32CPUCacheLineSize) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevMemServerContextInt = NULL; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ eError = ++ DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ bbKernelMemoryCtx, ++ &psDevMemServerContextInt, &hPrivDataInt, pui32CPUCacheLineSize); ++ ++ *phDevMemServerContext = psDevMemServerContextInt; ++ *phPrivData = hPrivDataInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemServerContext) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemServerContextInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext; ++ ++ eError = DevmemIntCtxDestroy(psDevmemServerContextInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR sHeapBaseAddr, ++ IMG_DEVMEM_SIZE_T uiHeapLength, ++ IMG_UINT32 ui32Log2DataPageSize, ++ IMG_HANDLE * phDevmemHeapPtr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtxInt; ++ DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; ++ ++ eError = ++ DevmemIntHeapCreate(psDevmemCtxInt, ++ sHeapBaseAddr, ++ uiHeapLength, ui32Log2DataPageSize, &psDevmemHeapPtrInt); ++ ++ *phDevmemHeapPtr = psDevmemHeapPtrInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge, IMG_HANDLE hDevmemHeap) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_HEAP *psDevmemHeapInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap; ++ ++ eError = DevmemIntHeapDestroy(psDevmemHeapInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemServerHeap, ++ IMG_HANDLE hReservation, ++ IMG_HANDLE hPMR, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ IMG_HANDLE * phMapping) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_HEAP *psDevmemServerHeapInt; ++ DEVMEMINT_RESERVATION *psReservationInt; ++ PMR *psPMRInt; ++ DEVMEMINT_MAPPING *psMappingInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; ++ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ DevmemIntMapPMR(psDevmemServerHeapInt, ++ psReservationInt, psPMRInt, uiMapFlags, &psMappingInt); ++ ++ *phMapping = psMappingInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge, IMG_HANDLE hMapping) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_MAPPING *psMappingInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psMappingInt = (DEVMEMINT_MAPPING *) hMapping; ++ ++ eError = DevmemIntUnmapPMR(psMappingInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntReserveRange(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemServerHeap, ++ IMG_DEV_VIRTADDR sAddress, ++ IMG_DEVMEM_SIZE_T uiLength, ++ IMG_HANDLE * phReservation) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_HEAP *psDevmemServerHeapInt; ++ DEVMEMINT_RESERVATION *psReservationInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap; ++ ++ eError = ++ DevmemIntReserveRange(psDevmemServerHeapInt, sAddress, uiLength, &psReservationInt); ++ ++ *phReservation = psReservationInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge, IMG_HANDLE hReservation) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_RESERVATION *psReservationInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; ++ ++ eError = DevmemIntUnreserveRange(psReservationInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeChangeSparseMem(IMG_HANDLE hBridge, ++ IMG_HANDLE hSrvDevMemHeap, ++ IMG_HANDLE hPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 * pui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 * pui32FreePageIndices, ++ IMG_UINT32 ui32SparseFlags, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddr, IMG_UINT64 ui64CPUVAddr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_HEAP *psSrvDevMemHeapInt; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap; ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ DevmemIntChangeSparse(psSrvDevMemHeapInt, ++ psPMRInt, ++ ui32AllocPageCount, ++ pui32AllocPageIndices, ++ ui32FreePageCount, ++ pui32FreePageIndices, ++ ui32SparseFlags, uiFlags, sDevVAddr, ui64CPUVAddr); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntMapPages(IMG_HANDLE hBridge, ++ IMG_HANDLE hReservation, ++ IMG_HANDLE hPMR, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 ui32PhysicalPgOffset, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_RESERVATION *psReservationInt; ++ PMR *psPMRInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; ++ psPMRInt = (PMR *) hPMR; ++ ++ eError = ++ DevmemIntMapPages(psReservationInt, ++ psPMRInt, ui32PageCount, ui32PhysicalPgOffset, uiFlags, sDevVAddr); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge, ++ IMG_HANDLE hReservation, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_UINT32 ui32PageCount) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_RESERVATION *psReservationInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation; ++ ++ eError = DevmemIntUnmapPages(psReservationInt, sDevVAddr, ui32PageCount); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR sAddress) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtxInt; ++ ++ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; ++ ++ eError = ++ DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ psDevmemCtxInt, sAddress); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemFlushDevSLCRange(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR sAddress, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate) ++{ ++#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtxInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; ++ ++ eError = DevmemIntFlushDevSLCRange(psDevmemCtxInt, sAddress, uiSize, bInvalidate); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hDevmemCtx); ++ PVR_UNREFERENCED_PARAMETER(sAddress); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(bInvalidate); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemInvalidateFBSCTable(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_UINT64 ui64FBSCEntries) ++{ ++#if defined(RGX_FEATURE_FBCDC) ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtxInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; ++ ++ eError = DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ui64FBSCEntries); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hDevmemCtx); ++ PVR_UNREFERENCED_PARAMETER(ui64FBSCEntries); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge, ++ IMG_UINT32 * pui32NumHeapConfigs) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ pui32NumHeapConfigs); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapCount(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32HeapConfigIndex, ++ IMG_UINT32 * pui32NumHeaps) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32HeapConfigIndex, pui32NumHeaps); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32HeapConfigIndex, ++ IMG_UINT32 ui32HeapConfigNameBufSz, ++ IMG_CHAR * puiHeapConfigName) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32HeapConfigIndex, ui32HeapConfigNameBufSz, puiHeapConfigName); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32HeapConfigIndex, ++ IMG_UINT32 ui32HeapIndex, ++ IMG_UINT32 ui32HeapNameBufSz, ++ IMG_CHAR * puiHeapNameOut, ++ IMG_DEV_VIRTADDR * psDevVAddrBase, ++ IMG_DEVMEM_SIZE_T * puiHeapLength, ++ IMG_DEVMEM_SIZE_T * puiReservedRegionLength, ++ IMG_UINT32 * pui32Log2DataPageSizeOut, ++ IMG_UINT32 * pui32Log2ImportAlignmentOut) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32HeapConfigIndex, ++ ui32HeapIndex, ++ ui32HeapNameBufSz, ++ puiHeapNameOut, ++ psDevVAddrBase, ++ puiHeapLength, ++ puiReservedRegionLength, ++ pui32Log2DataPageSizeOut, pui32Log2ImportAlignmentOut); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevm, ++ IMG_UINT32 ui32PID, IMG_BOOL bRegister) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psDevmInt = (DEVMEMINT_CTX *) hDevm; ++ ++ eError = DevmemIntRegisterPFNotifyKM(psDevmInt, ui32PID, bRegister); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetMaxPhysHeapCount(IMG_HANDLE hBridge, ++ IMG_UINT32 * pui32PhysHeapCount) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVGetMaxPhysHeapCountKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ pui32PhysHeapCount); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfo(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP * peaPhysHeapID, ++ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVPhysHeapGetMemInfoKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32PhysHeapCount, peaPhysHeapID, pasapPhysHeapMemStats); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetDefaultPhysicalHeap(IMG_HANDLE hBridge, ++ PVRSRV_PHYS_HEAP * peHeap) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVGetDefaultPhysicalHeapKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), peHeap); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsage(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS * pasapPhysHeapMemStats) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVGetHeapPhysMemUsageKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32PhysHeapCount, pasapPhysHeapMemStats); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge, ++ IMG_HANDLE hDevmemCtx, ++ IMG_DEV_VIRTADDR * psFaultAddress) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtxInt; ++ ++ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx; ++ ++ eError = ++ DevmemIntGetFaultAddress(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ psDevmemCtxInt, psFaultAddress); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePVRSRVUpdateOOMStats(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32ui32StatType, IMG_PID ui32pid) ++{ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = PVRSRVServerUpdateOOMStats(ui32ui32StatType, ui32pid); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(ui32ui32StatType); ++ PVR_UNREFERENCED_PARAMETER(ui32pid); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgePhysHeapGetMemInfoPkd(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP * peaPhysHeapID, ++ PHYS_HEAP_MEM_STATS_PKD * ++ psapPhysHeapMemStats) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVPhysHeapGetMemInfoPkdKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32PhysHeapCount, peaPhysHeapID, psapPhysHeapMemStats); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeGetHeapPhysMemUsagePkd(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS_PKD * ++ psapPhysHeapMemStats) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVGetHeapPhysMemUsagePkdKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ ui32PhysHeapCount, psapPhysHeapMemStats); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h b/drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h +new file mode 100644 +index 000000000000..2cfafd5a83e1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_pvrtl_bridge.h +@@ -0,0 +1,93 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for pvrtl ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for pvrtl ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_PVRTL_BRIDGE_H ++#define CLIENT_PVRTL_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_pvrtl_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge, ++ const IMG_CHAR * puiName, ++ IMG_UINT32 ui32Mode, ++ IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 * pui32ReadOffset, ++ IMG_UINT32 * pui32ReadLen); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge, ++ const IMG_CHAR * puiNamePattern, ++ IMG_UINT32 ui32Size, ++ IMG_CHAR * puiStreams, ++ IMG_UINT32 * pui32NumFound); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 * pui32BufferOffset, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32 * pui32Available); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32Size, IMG_BYTE * pui8Data); ++ ++#endif /* CLIENT_PVRTL_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c +new file mode 100644 +index 000000000000..fa2fbed9595b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_pvrtl_direct_bridge.c +@@ -0,0 +1,175 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for pvrtl ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for pvrtl ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_pvrtl_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "devicemem_typedefs.h" ++#include "pvrsrv_tlcommon.h" ++ ++#include "tlserver.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLOpenStream(IMG_HANDLE hBridge, ++ const IMG_CHAR * puiName, ++ IMG_UINT32 ui32Mode, ++ IMG_HANDLE * phSD, IMG_HANDLE * phTLPMR) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt = NULL; ++ PMR *psTLPMRInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = TLServerOpenStreamKM(puiName, ui32Mode, &psSDInt, &psTLPMRInt); ++ ++ *phSD = psSDInt; ++ *phTLPMR = psTLPMRInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLCloseStream(IMG_HANDLE hBridge, IMG_HANDLE hSD) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSDInt = (TL_STREAM_DESC *) hSD; ++ ++ eError = TLServerCloseStreamKM(psSDInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLAcquireData(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 * pui32ReadOffset, ++ IMG_UINT32 * pui32ReadLen) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSDInt = (TL_STREAM_DESC *) hSD; ++ ++ eError = TLServerAcquireDataKM(psSDInt, pui32ReadOffset, pui32ReadLen); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLReleaseData(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32ReadOffset, IMG_UINT32 ui32ReadLen) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSDInt = (TL_STREAM_DESC *) hSD; ++ ++ eError = TLServerReleaseDataKM(psSDInt, ui32ReadOffset, ui32ReadLen); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLDiscoverStreams(IMG_HANDLE hBridge, ++ const IMG_CHAR * puiNamePattern, ++ IMG_UINT32 ui32Size, ++ IMG_CHAR * puiStreams, IMG_UINT32 * pui32NumFound) ++{ ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = TLServerDiscoverStreamsKM(puiNamePattern, ui32Size, puiStreams, pui32NumFound); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLReserveStream(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 * pui32BufferOffset, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, IMG_UINT32 * pui32Available) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSDInt = (TL_STREAM_DESC *) hSD; ++ ++ eError = ++ TLServerReserveStreamKM(psSDInt, ++ pui32BufferOffset, ui32Size, ui32SizeMin, pui32Available); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLCommitStream(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, IMG_UINT32 ui32ReqSize) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSDInt = (TL_STREAM_DESC *) hSD; ++ ++ eError = TLServerCommitStreamKM(psSDInt, ui32ReqSize); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeTLWriteData(IMG_HANDLE hBridge, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32Size, IMG_BYTE * pui8Data) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC *psSDInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSDInt = (TL_STREAM_DESC *) hSD; ++ ++ eError = TLServerWriteDataKM(psSDInt, ui32Size, pui8Data); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/client_ri_bridge.h b/drivers/gpu/drm/img-rogue/client_ri_bridge.h +new file mode 100644 +index 000000000000..b3c42e6f496e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_ri_bridge.h +@@ -0,0 +1,89 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for ri ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for ri ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_RI_BRIDGE_H ++#define CLIENT_RI_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_ri_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMRHandle, ++ IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR * puiTextB, ++ IMG_UINT64 ui64Offset, ++ IMG_UINT64 ui64Size, ++ IMG_BOOL bIsImport, ++ IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR * puiTextB, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64DevVAddr, ++ IMG_HANDLE * phRIHandle); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge, ++ IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMRHandle, IMG_PID ui32Owner); ++ ++#endif /* CLIENT_RI_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c +new file mode 100644 +index 000000000000..2a9934cc4316 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_ri_direct_bridge.c +@@ -0,0 +1,182 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for ri ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for ri ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_ri_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "ri_typedefs.h" ++ ++#include "ri_server.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntry(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRHandleInt = (PMR *) hPMRHandle; ++ ++ eError = RIWritePMREntryKM(psPMRHandleInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMRHandle, ++ IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR * puiTextB, ++ IMG_UINT64 ui64Offset, ++ IMG_UINT64 ui64Size, ++ IMG_BOOL bIsImport, ++ IMG_BOOL bIsSuballoc, IMG_HANDLE * phRIHandle) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRHandleInt; ++ RI_HANDLE psRIHandleInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRHandleInt = (PMR *) hPMRHandle; ++ ++ eError = ++ RIWriteMEMDESCEntryKM(psPMRHandleInt, ++ ui32TextBSize, ++ puiTextB, ++ ui64Offset, ui64Size, bIsImport, bIsSuballoc, &psRIHandleInt); ++ ++ *phRIHandle = psRIHandleInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWriteProcListEntry(IMG_HANDLE hBridge, ++ IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR * puiTextB, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64DevVAddr, ++ IMG_HANDLE * phRIHandle) ++{ ++ PVRSRV_ERROR eError; ++ RI_HANDLE psRIHandleInt = NULL; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = ++ RIWriteProcListEntryKM(ui32TextBSize, puiTextB, ui64Size, ui64DevVAddr, &psRIHandleInt); ++ ++ *phRIHandle = psRIHandleInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge, ++ IMG_HANDLE hRIHandle, IMG_DEV_VIRTADDR sAddr) ++{ ++ PVRSRV_ERROR eError; ++ RI_HANDLE psRIHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psRIHandleInt = (RI_HANDLE) hRIHandle; ++ ++ eError = RIUpdateMEMDESCAddrKM(psRIHandleInt, sAddr); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge, IMG_HANDLE hRIHandle) ++{ ++ PVRSRV_ERROR eError; ++ RI_HANDLE psRIHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psRIHandleInt = (RI_HANDLE) hRIHandle; ++ ++ eError = RIDeleteMEMDESCEntryKM(psRIHandleInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpList(IMG_HANDLE hBridge, IMG_HANDLE hPMRHandle) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRHandleInt = (PMR *) hPMRHandle; ++ ++ eError = RIDumpListKM(psPMRHandleInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpAll(IMG_HANDLE hBridge) ++{ ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = RIDumpAllKM(); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIDumpProcess(IMG_HANDLE hBridge, IMG_PID ui32Pid) ++{ ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = RIDumpProcessKM(ui32Pid); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMRHandle, IMG_PID ui32Owner) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMRHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psPMRHandleInt = (PMR *) hPMRHandle; ++ ++ eError = RIWritePMREntryWithOwnerKM(psPMRHandleInt, ui32Owner); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/client_sync_bridge.h b/drivers/gpu/drm/img-rogue/client_sync_bridge.h +new file mode 100644 +index 000000000000..19f1b0ece5b9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_sync_bridge.h +@@ -0,0 +1,102 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for sync ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for sync ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_SYNC_BRIDGE_H ++#define CLIENT_SYNC_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_sync_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge, ++ IMG_HANDLE * phSyncHandle, ++ IMG_UINT32 * pui32SyncPrimVAddr, ++ IMG_UINT32 * pui32SyncPrimBlockSize, ++ IMG_HANDLE * phhSyncPMR); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Index, IMG_UINT32 ui32Value); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Offset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge, ++ IMG_BOOL bServerSync, ++ IMG_UINT32 ui32FWAddr, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR * puiClassName); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, ++ PVRSRV_FENCE hFence); ++ ++#endif /* CLIENT_SYNC_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c +new file mode 100644 +index 000000000000..d631aea421ad +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_sync_direct_bridge.c +@@ -0,0 +1,262 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for sync ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for sync ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_sync_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++#include "pdump.h" ++#include "pdumpdefs.h" ++#include "devicemem_typedefs.h" ++#include "pvrsrv_sync_km.h" ++#include ++ ++#include "sync.h" ++#include "sync_server.h" ++#include "pdump.h" ++#include "pvrsrv_sync_km.h" ++#include "sync_fallback_server.h" ++#include "sync_checkpoint.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge, ++ IMG_HANDLE * phSyncHandle, ++ IMG_UINT32 * pui32SyncPrimVAddr, ++ IMG_UINT32 * pui32SyncPrimBlockSize, ++ IMG_HANDLE * phhSyncPMR) ++{ ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ PMR *pshSyncPMRInt = NULL; ++ ++ eError = ++ PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ &psSyncHandleInt, ++ pui32SyncPrimVAddr, ++ pui32SyncPrimBlockSize, &pshSyncPMRInt); ++ ++ *phSyncHandle = psSyncHandleInt; ++ *phhSyncPMR = pshSyncPMRInt; ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge, IMG_HANDLE hSyncHandle) ++{ ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; ++ ++ eError = PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimSet(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Index, IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; ++ ++ eError = PVRSRVSyncPrimSetKM(psSyncHandleInt, ui32Index, ui32Value); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDump(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, IMG_UINT32 ui32Offset) ++{ ++#if defined(PDUMP) ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; ++ ++ eError = PVRSRVSyncPrimPDumpKM(psSyncHandleInt, ui32Offset); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hSyncHandle); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) ++{ ++#if defined(PDUMP) ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; ++ ++ eError = PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ui32Offset, ui32Value); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hSyncHandle); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++#if defined(PDUMP) ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; ++ ++ eError = ++ PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, ++ ui32Offset, ui32Value, ui32Mask, eOperator, uiPDumpFlags); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hSyncHandle); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge, ++ IMG_HANDLE hSyncHandle, ++ IMG_UINT32 ui32Offset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize) ++{ ++#if defined(PDUMP) ++ PVRSRV_ERROR eError; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle; ++ ++ eError = ++ PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, ++ ui32Offset, uiWriteOffset, uiPacketSize, uiBufferSize); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hSyncHandle); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncAllocEvent(IMG_HANDLE hBridge, ++ IMG_BOOL bServerSync, ++ IMG_UINT32 ui32FWAddr, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR * puiClassName) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = ++ PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ bServerSync, ui32FWAddr, ui32ClassNameSize, puiClassName); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncFreeEvent(IMG_HANDLE hBridge, IMG_UINT32 ui32FWAddr) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ui32FWAddr); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncCheckpointSignalledPDumpPol(IMG_HANDLE hBridge, ++ PVRSRV_FENCE hFence) ++{ ++#if defined(PDUMP) ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ eError = PVRSRVSyncCheckpointSignalledPDumpPolKM(hFence); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(hFence); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++#endif ++} +diff --git a/drivers/gpu/drm/img-rogue/client_synctracking_bridge.h b/drivers/gpu/drm/img-rogue/client_synctracking_bridge.h +new file mode 100644 +index 000000000000..544efd962834 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_synctracking_bridge.h +@@ -0,0 +1,68 @@ ++/******************************************************************************* ++@File ++@Title Client bridge header for synctracking ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exports the client bridge functions for synctracking ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef CLIENT_SYNCTRACKING_BRIDGE_H ++#define CLIENT_SYNCTRACKING_BRIDGE_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(PVR_INDIRECT_BRIDGE_CLIENTS) ++#include "pvr_bridge_client.h" ++#include "pvr_bridge.h" ++#endif ++ ++#include "common_synctracking_bridge.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord); ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge, ++ IMG_HANDLE * phhRecord, ++ IMG_HANDLE hhServerSyncPrimBlock, ++ IMG_UINT32 ui32ui32FwBlockAddr, ++ IMG_UINT32 ui32ui32SyncOffset, ++ IMG_BOOL bbServerSync, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR * puiClassName); ++ ++#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c b/drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c +new file mode 100644 +index 000000000000..baeb89a6a0ae +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/client_synctracking_direct_bridge.c +@@ -0,0 +1,92 @@ ++/******************************************************************************* ++@File ++@Title Direct client bridge for synctracking ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the client side of the bridge for synctracking ++ which is used in calls from Server context. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include "client_synctracking_bridge.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++/* Module specific includes */ ++ ++#include "sync.h" ++#include "sync_server.h" ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge, IMG_HANDLE hhRecord) ++{ ++ PVRSRV_ERROR eError; ++ SYNC_RECORD_HANDLE pshRecordInt; ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ ++ pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord; ++ ++ eError = PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR BridgeSyncRecordAdd(IMG_HANDLE hBridge, ++ IMG_HANDLE * phhRecord, ++ IMG_HANDLE hhServerSyncPrimBlock, ++ IMG_UINT32 ui32ui32FwBlockAddr, ++ IMG_UINT32 ui32ui32SyncOffset, ++ IMG_BOOL bbServerSync, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR * puiClassName) ++{ ++ PVRSRV_ERROR eError; ++ SYNC_RECORD_HANDLE pshRecordInt = NULL; ++ SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt; ++ ++ pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock; ++ ++ eError = ++ PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *) ((void *)hBridge), ++ &pshRecordInt, ++ pshServerSyncPrimBlockInt, ++ ui32ui32FwBlockAddr, ++ ui32ui32SyncOffset, ++ bbServerSync, ui32ClassNameSize, puiClassName); ++ ++ *phhRecord = pshRecordInt; ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/common_cache_bridge.h b/drivers/gpu/drm/img-rogue/common_cache_bridge.h +new file mode 100644 +index 000000000000..cc848753f7fa +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_cache_bridge.h +@@ -0,0 +1,126 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for cache ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for cache ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_CACHE_BRIDGE_H ++#define COMMON_CACHE_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "cache_ops.h" ++ ++#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2) ++ ++/******************************************* ++ CacheOpQueue ++ *******************************************/ ++ ++/* Bridge in structure for CacheOpQueue */ ++typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG ++{ ++ PVRSRV_CACHE_OP *piuCacheOp; ++ IMG_UINT64 *pui64Address; ++ IMG_DEVMEM_OFFSET_T *puiOffset; ++ IMG_DEVMEM_SIZE_T *puiSize; ++ IMG_HANDLE *phPMR; ++ IMG_UINT32 ui32NumCacheOps; ++ IMG_UINT32 ui32OpTimeline; ++} __packed PVRSRV_BRIDGE_IN_CACHEOPQUEUE; ++ ++/* Bridge out structure for CacheOpQueue */ ++typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_CACHEOPQUEUE; ++ ++/******************************************* ++ CacheOpExec ++ *******************************************/ ++ ++/* Bridge in structure for CacheOpExec */ ++typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG ++{ ++ IMG_UINT64 ui64Address; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMR; ++ PVRSRV_CACHE_OP iuCacheOp; ++} __packed PVRSRV_BRIDGE_IN_CACHEOPEXEC; ++ ++/* Bridge out structure for CacheOpExec */ ++typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_CACHEOPEXEC; ++ ++/******************************************* ++ CacheOpLog ++ *******************************************/ ++ ++/* Bridge in structure for CacheOpLog */ ++typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG ++{ ++ IMG_INT64 i64EndTime; ++ IMG_INT64 i64StartTime; ++ IMG_UINT64 ui64Address; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMR; ++ PVRSRV_CACHE_OP iuCacheOp; ++} __packed PVRSRV_BRIDGE_IN_CACHEOPLOG; ++ ++/* Bridge out structure for CacheOpLog */ ++typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_CACHEOPLOG; ++ ++#endif /* COMMON_CACHE_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_cmm_bridge.h b/drivers/gpu/drm/img-rogue/common_cmm_bridge.h +new file mode 100644 +index 000000000000..da48de3598be +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_cmm_bridge.h +@@ -0,0 +1,114 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for cmm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for cmm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_CMM_BRIDGE_H ++#define COMMON_CMM_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "devicemem_typedefs.h" ++ ++#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2) ++ ++/******************************************* ++ DevmemIntExportCtx ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntExportCtx */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG ++{ ++ IMG_HANDLE hContext; ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX; ++ ++/* Bridge out structure for DevmemIntExportCtx */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG ++{ ++ IMG_HANDLE hContextExport; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX; ++ ++/******************************************* ++ DevmemIntUnexportCtx ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntUnexportCtx */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG ++{ ++ IMG_HANDLE hContextExport; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX; ++ ++/* Bridge out structure for DevmemIntUnexportCtx */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX; ++ ++/******************************************* ++ DevmemIntAcquireRemoteCtx ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntAcquireRemoteCtx */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX; ++ ++/* Bridge out structure for DevmemIntAcquireRemoteCtx */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG ++{ ++ IMG_HANDLE hContext; ++ IMG_HANDLE hPrivData; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX; ++ ++#endif /* COMMON_CMM_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h b/drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h +new file mode 100644 +index 000000000000..800f98dd2836 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_devicememhistory_bridge.h +@@ -0,0 +1,185 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for devicememhistory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for devicememhistory ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H ++#define COMMON_DEVICEMEMHISTORY_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "devicemem_typedefs.h" ++ ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4) ++ ++/******************************************* ++ DevicememHistoryMap ++ *******************************************/ ++ ++/* Bridge in structure for DevicememHistoryMap */ ++typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_DEVMEM_SIZE_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMR; ++ const IMG_CHAR *puiText; ++ IMG_UINT32 ui32AllocationIndex; ++ IMG_UINT32 ui32Log2PageSize; ++} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP; ++ ++/* Bridge out structure for DevicememHistoryMap */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32AllocationIndexOut; ++} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP; ++ ++/******************************************* ++ DevicememHistoryUnmap ++ *******************************************/ ++ ++/* Bridge in structure for DevicememHistoryUnmap */ ++typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_DEVMEM_SIZE_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMR; ++ const IMG_CHAR *puiText; ++ IMG_UINT32 ui32AllocationIndex; ++ IMG_UINT32 ui32Log2PageSize; ++} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP; ++ ++/* Bridge out structure for DevicememHistoryUnmap */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32AllocationIndexOut; ++} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP; ++ ++/******************************************* ++ DevicememHistoryMapVRange ++ *******************************************/ ++ ++/* Bridge in structure for DevicememHistoryMapVRange */ ++typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG ++{ ++ IMG_DEV_VIRTADDR sBaseDevVAddr; ++ IMG_DEVMEM_SIZE_T uiAllocSize; ++ const IMG_CHAR *puiText; ++ IMG_UINT32 ui32AllocationIndex; ++ IMG_UINT32 ui32Log2PageSize; ++ IMG_UINT32 ui32NumPages; ++ IMG_UINT32 ui32ui32StartPage; ++} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE; ++ ++/* Bridge out structure for DevicememHistoryMapVRange */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32AllocationIndexOut; ++} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE; ++ ++/******************************************* ++ DevicememHistoryUnmapVRange ++ *******************************************/ ++ ++/* Bridge in structure for DevicememHistoryUnmapVRange */ ++typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG ++{ ++ IMG_DEV_VIRTADDR sBaseDevVAddr; ++ IMG_DEVMEM_SIZE_T uiAllocSize; ++ const IMG_CHAR *puiText; ++ IMG_UINT32 ui32AllocationIndex; ++ IMG_UINT32 ui32Log2PageSize; ++ IMG_UINT32 ui32NumPages; ++ IMG_UINT32 ui32ui32StartPage; ++} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE; ++ ++/* Bridge out structure for DevicememHistoryUnmapVRange */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32AllocationIndexOut; ++} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE; ++ ++/******************************************* ++ DevicememHistorySparseChange ++ *******************************************/ ++ ++/* Bridge in structure for DevicememHistorySparseChange */ ++typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_DEVMEM_SIZE_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMR; ++ IMG_UINT32 *pui32AllocPageIndices; ++ IMG_UINT32 *pui32FreePageIndices; ++ const IMG_CHAR *puiText; ++ IMG_UINT32 ui32AllocPageCount; ++ IMG_UINT32 ui32AllocationIndex; ++ IMG_UINT32 ui32FreePageCount; ++ IMG_UINT32 ui32Log2PageSize; ++} __packed PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE; ++ ++/* Bridge out structure for DevicememHistorySparseChange */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32AllocationIndexOut; ++} __packed PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE; ++ ++#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_di_bridge.h b/drivers/gpu/drm/img-rogue/common_di_bridge.h +new file mode 100644 +index 000000000000..8591006140b6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_di_bridge.h +@@ -0,0 +1,153 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for di ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for di ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_DI_BRIDGE_H ++#define COMMON_DI_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "pvrsrv_tlcommon.h" ++#include "pvr_dicommon.h" ++ ++#define PVRSRV_BRIDGE_DI_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_DI_DICREATECONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT PVRSRV_BRIDGE_DI_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_DI_DIREADENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_DI_DIWRITEENTRY PVRSRV_BRIDGE_DI_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_DI_DILISTALLENTRIES PVRSRV_BRIDGE_DI_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_DI_CMD_LAST (PVRSRV_BRIDGE_DI_CMD_FIRST+4) ++ ++/******************************************* ++ DICreateContext ++ *******************************************/ ++ ++/* Bridge in structure for DICreateContext */ ++typedef struct PVRSRV_BRIDGE_IN_DICREATECONTEXT_TAG ++{ ++ IMG_CHAR *puiStreamName; ++} __packed PVRSRV_BRIDGE_IN_DICREATECONTEXT; ++ ++/* Bridge out structure for DICreateContext */ ++typedef struct PVRSRV_BRIDGE_OUT_DICREATECONTEXT_TAG ++{ ++ IMG_HANDLE hContext; ++ IMG_CHAR *puiStreamName; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DICREATECONTEXT; ++ ++/******************************************* ++ DIDestroyContext ++ *******************************************/ ++ ++/* Bridge in structure for DIDestroyContext */ ++typedef struct PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT_TAG ++{ ++ IMG_HANDLE hContext; ++} __packed PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT; ++ ++/* Bridge out structure for DIDestroyContext */ ++typedef struct PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT; ++ ++/******************************************* ++ DIReadEntry ++ *******************************************/ ++ ++/* Bridge in structure for DIReadEntry */ ++typedef struct PVRSRV_BRIDGE_IN_DIREADENTRY_TAG ++{ ++ IMG_UINT64 ui64Offset; ++ IMG_UINT64 ui64Size; ++ IMG_HANDLE hContext; ++ const IMG_CHAR *puiEntryPath; ++} __packed PVRSRV_BRIDGE_IN_DIREADENTRY; ++ ++/* Bridge out structure for DIReadEntry */ ++typedef struct PVRSRV_BRIDGE_OUT_DIREADENTRY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DIREADENTRY; ++ ++/******************************************* ++ DIWriteEntry ++ *******************************************/ ++ ++/* Bridge in structure for DIWriteEntry */ ++typedef struct PVRSRV_BRIDGE_IN_DIWRITEENTRY_TAG ++{ ++ IMG_HANDLE hContext; ++ const IMG_CHAR *puiEntryPath; ++ const IMG_CHAR *puiValue; ++ IMG_UINT32 ui32ValueSize; ++} __packed PVRSRV_BRIDGE_IN_DIWRITEENTRY; ++ ++/* Bridge out structure for DIWriteEntry */ ++typedef struct PVRSRV_BRIDGE_OUT_DIWRITEENTRY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DIWRITEENTRY; ++ ++/******************************************* ++ DIListAllEntries ++ *******************************************/ ++ ++/* Bridge in structure for DIListAllEntries */ ++typedef struct PVRSRV_BRIDGE_IN_DILISTALLENTRIES_TAG ++{ ++ IMG_HANDLE hContext; ++} __packed PVRSRV_BRIDGE_IN_DILISTALLENTRIES; ++ ++/* Bridge out structure for DIListAllEntries */ ++typedef struct PVRSRV_BRIDGE_OUT_DILISTALLENTRIES_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DILISTALLENTRIES; ++ ++#endif /* COMMON_DI_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h b/drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h +new file mode 100644 +index 000000000000..7547d9f76297 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_dmabuf_bridge.h +@@ -0,0 +1,150 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for dmabuf ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for dmabuf ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_DMABUF_BRIDGE_H ++#define COMMON_DMABUF_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "pvrsrv_memallocflags.h" ++ ++#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+3) ++ ++/******************************************* ++ PhysmemImportDmaBuf ++ *******************************************/ ++ ++/* Bridge in structure for PhysmemImportDmaBuf */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG ++{ ++ const IMG_CHAR *puiName; ++ IMG_INT ifd; ++ IMG_UINT32 ui32NameSize; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF; ++ ++/* Bridge out structure for PhysmemImportDmaBuf */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG ++{ ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMRPtr; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF; ++ ++/******************************************* ++ PhysmemImportDmaBufLocked ++ *******************************************/ ++ ++/* Bridge in structure for PhysmemImportDmaBufLocked */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED_TAG ++{ ++ const IMG_CHAR *puiName; ++ IMG_INT ifd; ++ IMG_UINT32 ui32NameSize; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED; ++ ++/* Bridge out structure for PhysmemImportDmaBufLocked */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED_TAG ++{ ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMRPtr; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED; ++ ++/******************************************* ++ PhysmemExportDmaBuf ++ *******************************************/ ++ ++/* Bridge in structure for PhysmemExportDmaBuf */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF; ++ ++/* Bridge out structure for PhysmemExportDmaBuf */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_INT iFd; ++} __packed PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF; ++ ++/******************************************* ++ PhysmemImportSparseDmaBuf ++ *******************************************/ ++ ++/* Bridge in structure for PhysmemImportSparseDmaBuf */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG ++{ ++ IMG_DEVMEM_SIZE_T uiChunkSize; ++ IMG_UINT32 *pui32MappingTable; ++ const IMG_CHAR *puiName; ++ IMG_INT ifd; ++ IMG_UINT32 ui32NameSize; ++ IMG_UINT32 ui32NumPhysChunks; ++ IMG_UINT32 ui32NumVirtChunks; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF; ++ ++/* Bridge out structure for PhysmemImportSparseDmaBuf */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG ++{ ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMRPtr; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF; ++ ++#endif /* COMMON_DMABUF_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h b/drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h +new file mode 100644 +index 000000000000..69a406b253d9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_htbuffer_bridge.h +@@ -0,0 +1,104 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for htbuffer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for htbuffer ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_HTBUFFER_BRIDGE_H ++#define COMMON_HTBUFFER_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "devicemem_typedefs.h" ++#include "htbuffer_types.h" ++ ++#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1) ++ ++/******************************************* ++ HTBControl ++ *******************************************/ ++ ++/* Bridge in structure for HTBControl */ ++typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG ++{ ++ IMG_UINT32 *pui32GroupEnable; ++ IMG_UINT32 ui32EnablePID; ++ IMG_UINT32 ui32LogLevel; ++ IMG_UINT32 ui32LogMode; ++ IMG_UINT32 ui32NumGroups; ++ IMG_UINT32 ui32OpMode; ++} __packed PVRSRV_BRIDGE_IN_HTBCONTROL; ++ ++/* Bridge out structure for HTBControl */ ++typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_HTBCONTROL; ++ ++/******************************************* ++ HTBLog ++ *******************************************/ ++ ++/* Bridge in structure for HTBLog */ ++typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG ++{ ++ IMG_UINT64 ui64TimeStamp; ++ IMG_UINT32 *pui32Args; ++ IMG_UINT32 ui32NumArgs; ++ IMG_UINT32 ui32PID; ++ IMG_UINT32 ui32SF; ++ IMG_UINT32 ui32TID; ++} __packed PVRSRV_BRIDGE_IN_HTBLOG; ++ ++/* Bridge out structure for HTBLog */ ++typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_HTBLOG; ++ ++#endif /* COMMON_HTBUFFER_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_mm_bridge.h b/drivers/gpu/drm/img-rogue/common_mm_bridge.h +new file mode 100644 +index 000000000000..bbb419b3f570 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_mm_bridge.h +@@ -0,0 +1,879 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for mm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for mm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_MM_BRIDGE_H ++#define COMMON_MM_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "pvrsrv_memallocflags.h" ++#include "pvrsrv_memalloc_physheap.h" ++#include "devicemem_typedefs.h" ++ ++#define PVRSRV_BRIDGE_MM_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8 ++#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9 ++#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+11 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+13 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+15 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+16 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+17 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+18 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+19 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+21 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22 ++#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+23 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+24 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25 ++#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+26 ++#define PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE PVRSRV_BRIDGE_MM_CMD_FIRST+27 ++#define PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE PVRSRV_BRIDGE_MM_CMD_FIRST+28 ++#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29 ++#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+30 ++#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+31 ++#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+32 ++#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+33 ++#define PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+34 ++#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO PVRSRV_BRIDGE_MM_CMD_FIRST+35 ++#define PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP PVRSRV_BRIDGE_MM_CMD_FIRST+36 ++#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE PVRSRV_BRIDGE_MM_CMD_FIRST+37 ++#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+38 ++#define PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS PVRSRV_BRIDGE_MM_CMD_FIRST+39 ++#define PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD PVRSRV_BRIDGE_MM_CMD_FIRST+40 ++#define PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD PVRSRV_BRIDGE_MM_CMD_FIRST+41 ++#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+41) ++ ++/******************************************* ++ PMRExportPMR ++ *******************************************/ ++ ++/* Bridge in structure for PMRExportPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_PMREXPORTPMR; ++ ++/* Bridge out structure for PMRExportPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG ++{ ++ IMG_UINT64 ui64Password; ++ IMG_UINT64 ui64Size; ++ IMG_HANDLE hPMRExport; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32Log2Contig; ++} __packed PVRSRV_BRIDGE_OUT_PMREXPORTPMR; ++ ++/******************************************* ++ PMRUnexportPMR ++ *******************************************/ ++ ++/* Bridge in structure for PMRUnexportPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG ++{ ++ IMG_HANDLE hPMRExport; ++} __packed PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR; ++ ++/* Bridge out structure for PMRUnexportPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR; ++ ++/******************************************* ++ PMRGetUID ++ *******************************************/ ++ ++/* Bridge in structure for PMRGetUID */ ++typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_PMRGETUID; ++ ++/* Bridge out structure for PMRGetUID */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG ++{ ++ IMG_UINT64 ui64UID; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRGETUID; ++ ++/******************************************* ++ PMRMakeLocalImportHandle ++ *******************************************/ ++ ++/* Bridge in structure for PMRMakeLocalImportHandle */ ++typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG ++{ ++ IMG_HANDLE hBuffer; ++} __packed PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE; ++ ++/* Bridge out structure for PMRMakeLocalImportHandle */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG ++{ ++ IMG_HANDLE hExtMem; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE; ++ ++/******************************************* ++ PMRUnmakeLocalImportHandle ++ *******************************************/ ++ ++/* Bridge in structure for PMRUnmakeLocalImportHandle */ ++typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG ++{ ++ IMG_HANDLE hExtMem; ++} __packed PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE; ++ ++/* Bridge out structure for PMRUnmakeLocalImportHandle */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE; ++ ++/******************************************* ++ PMRImportPMR ++ *******************************************/ ++ ++/* Bridge in structure for PMRImportPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG ++{ ++ IMG_UINT64 ui64uiPassword; ++ IMG_UINT64 ui64uiSize; ++ IMG_HANDLE hPMRExport; ++ IMG_UINT32 ui32uiLog2Contig; ++} __packed PVRSRV_BRIDGE_IN_PMRIMPORTPMR; ++ ++/* Bridge out structure for PMRImportPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG ++{ ++ IMG_HANDLE hPMR; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRIMPORTPMR; ++ ++/******************************************* ++ PMRLocalImportPMR ++ *******************************************/ ++ ++/* Bridge in structure for PMRLocalImportPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG ++{ ++ IMG_HANDLE hExtHandle; ++} __packed PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR; ++ ++/* Bridge out structure for PMRLocalImportPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG ++{ ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hPMR; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR; ++ ++/******************************************* ++ PMRUnrefPMR ++ *******************************************/ ++ ++/* Bridge in structure for PMRUnrefPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_PMRUNREFPMR; ++ ++/* Bridge out structure for PMRUnrefPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRUNREFPMR; ++ ++/******************************************* ++ PMRUnrefUnlockPMR ++ *******************************************/ ++ ++/* Bridge in structure for PMRUnrefUnlockPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR; ++ ++/* Bridge out structure for PMRUnrefUnlockPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR; ++ ++/******************************************* ++ PhysmemNewRamBackedPMR ++ *******************************************/ ++ ++/* Bridge in structure for PhysmemNewRamBackedPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG ++{ ++ IMG_DEVMEM_SIZE_T uiChunkSize; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_UINT32 *pui32MappingTable; ++ const IMG_CHAR *puiAnnotation; ++ IMG_UINT32 ui32AnnotationLength; ++ IMG_UINT32 ui32Log2PageSize; ++ IMG_UINT32 ui32NumPhysChunks; ++ IMG_UINT32 ui32NumVirtChunks; ++ IMG_UINT32 ui32PDumpFlags; ++ IMG_PID ui32PID; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR; ++ ++/* Bridge out structure for PhysmemNewRamBackedPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG ++{ ++ IMG_HANDLE hPMRPtr; ++ PVRSRV_ERROR eError; ++ PVRSRV_MEMALLOCFLAGS_T uiOutFlags; ++} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR; ++ ++/******************************************* ++ PhysmemNewRamBackedLockedPMR ++ *******************************************/ ++ ++/* Bridge in structure for PhysmemNewRamBackedLockedPMR */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG ++{ ++ IMG_DEVMEM_SIZE_T uiChunkSize; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_UINT32 *pui32MappingTable; ++ const IMG_CHAR *puiAnnotation; ++ IMG_UINT32 ui32AnnotationLength; ++ IMG_UINT32 ui32Log2PageSize; ++ IMG_UINT32 ui32NumPhysChunks; ++ IMG_UINT32 ui32NumVirtChunks; ++ IMG_UINT32 ui32PDumpFlags; ++ IMG_PID ui32PID; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR; ++ ++/* Bridge out structure for PhysmemNewRamBackedLockedPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG ++{ ++ IMG_HANDLE hPMRPtr; ++ PVRSRV_ERROR eError; ++ PVRSRV_MEMALLOCFLAGS_T uiOutFlags; ++} __packed PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR; ++ ++/******************************************* ++ DevmemIntPin ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntPin */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPIN; ++ ++/* Bridge out structure for DevmemIntPin */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPIN; ++ ++/******************************************* ++ DevmemIntUnpin ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntUnpin */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN; ++ ++/* Bridge out structure for DevmemIntUnpin */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN; ++ ++/******************************************* ++ DevmemIntPinValidate ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntPinValidate */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG ++{ ++ IMG_HANDLE hMapping; ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE; ++ ++/* Bridge out structure for DevmemIntPinValidate */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE; ++ ++/******************************************* ++ DevmemIntUnpinInvalidate ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntUnpinInvalidate */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG ++{ ++ IMG_HANDLE hMapping; ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE; ++ ++/* Bridge out structure for DevmemIntUnpinInvalidate */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE; ++ ++/******************************************* ++ DevmemIntCtxCreate ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntCtxCreate */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG ++{ ++ IMG_BOOL bbKernelMemoryCtx; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE; ++ ++/* Bridge out structure for DevmemIntCtxCreate */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG ++{ ++ IMG_HANDLE hDevMemServerContext; ++ IMG_HANDLE hPrivData; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32CPUCacheLineSize; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE; ++ ++/******************************************* ++ DevmemIntCtxDestroy ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntCtxDestroy */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG ++{ ++ IMG_HANDLE hDevmemServerContext; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY; ++ ++/* Bridge out structure for DevmemIntCtxDestroy */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY; ++ ++/******************************************* ++ DevmemIntHeapCreate ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntHeapCreate */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG ++{ ++ IMG_DEV_VIRTADDR sHeapBaseAddr; ++ IMG_DEVMEM_SIZE_T uiHeapLength; ++ IMG_HANDLE hDevmemCtx; ++ IMG_UINT32 ui32Log2DataPageSize; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE; ++ ++/* Bridge out structure for DevmemIntHeapCreate */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG ++{ ++ IMG_HANDLE hDevmemHeapPtr; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE; ++ ++/******************************************* ++ DevmemIntHeapDestroy ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntHeapDestroy */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG ++{ ++ IMG_HANDLE hDevmemHeap; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY; ++ ++/* Bridge out structure for DevmemIntHeapDestroy */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY; ++ ++/******************************************* ++ DevmemIntMapPMR ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntMapPMR */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG ++{ ++ IMG_HANDLE hDevmemServerHeap; ++ IMG_HANDLE hPMR; ++ IMG_HANDLE hReservation; ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR; ++ ++/* Bridge out structure for DevmemIntMapPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG ++{ ++ IMG_HANDLE hMapping; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR; ++ ++/******************************************* ++ DevmemIntUnmapPMR ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntUnmapPMR */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG ++{ ++ IMG_HANDLE hMapping; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR; ++ ++/* Bridge out structure for DevmemIntUnmapPMR */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR; ++ ++/******************************************* ++ DevmemIntReserveRange ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntReserveRange */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG ++{ ++ IMG_DEV_VIRTADDR sAddress; ++ IMG_DEVMEM_SIZE_T uiLength; ++ IMG_HANDLE hDevmemServerHeap; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE; ++ ++/* Bridge out structure for DevmemIntReserveRange */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG ++{ ++ IMG_HANDLE hReservation; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE; ++ ++/******************************************* ++ DevmemIntUnreserveRange ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntUnreserveRange */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG ++{ ++ IMG_HANDLE hReservation; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE; ++ ++/* Bridge out structure for DevmemIntUnreserveRange */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE; ++ ++/******************************************* ++ ChangeSparseMem ++ *******************************************/ ++ ++/* Bridge in structure for ChangeSparseMem */ ++typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_UINT64 ui64CPUVAddr; ++ IMG_HANDLE hPMR; ++ IMG_HANDLE hSrvDevMemHeap; ++ IMG_UINT32 *pui32AllocPageIndices; ++ IMG_UINT32 *pui32FreePageIndices; ++ IMG_UINT32 ui32AllocPageCount; ++ IMG_UINT32 ui32FreePageCount; ++ IMG_UINT32 ui32SparseFlags; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_CHANGESPARSEMEM; ++ ++/* Bridge out structure for ChangeSparseMem */ ++typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM; ++ ++/******************************************* ++ DevmemIntMapPages ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntMapPages */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_HANDLE hPMR; ++ IMG_HANDLE hReservation; ++ IMG_UINT32 ui32PageCount; ++ IMG_UINT32 ui32PhysicalPgOffset; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES; ++ ++/* Bridge out structure for DevmemIntMapPages */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES; ++ ++/******************************************* ++ DevmemIntUnmapPages ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntUnmapPages */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_HANDLE hReservation; ++ IMG_UINT32 ui32PageCount; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES; ++ ++/* Bridge out structure for DevmemIntUnmapPages */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES; ++ ++/******************************************* ++ DevmemIsVDevAddrValid ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIsVDevAddrValid */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG ++{ ++ IMG_DEV_VIRTADDR sAddress; ++ IMG_HANDLE hDevmemCtx; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID; ++ ++/* Bridge out structure for DevmemIsVDevAddrValid */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID; ++ ++/******************************************* ++ DevmemFlushDevSLCRange ++ *******************************************/ ++ ++/* Bridge in structure for DevmemFlushDevSLCRange */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE_TAG ++{ ++ IMG_DEV_VIRTADDR sAddress; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_HANDLE hDevmemCtx; ++ IMG_BOOL bInvalidate; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE; ++ ++/* Bridge out structure for DevmemFlushDevSLCRange */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE; ++ ++/******************************************* ++ DevmemInvalidateFBSCTable ++ *******************************************/ ++ ++/* Bridge in structure for DevmemInvalidateFBSCTable */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE_TAG ++{ ++ IMG_UINT64 ui64FBSCEntries; ++ IMG_HANDLE hDevmemCtx; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE; ++ ++/* Bridge out structure for DevmemInvalidateFBSCTable */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE; ++ ++/******************************************* ++ HeapCfgHeapConfigCount ++ *******************************************/ ++ ++/* Bridge in structure for HeapCfgHeapConfigCount */ ++typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT; ++ ++/* Bridge out structure for HeapCfgHeapConfigCount */ ++typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32NumHeapConfigs; ++} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT; ++ ++/******************************************* ++ HeapCfgHeapCount ++ *******************************************/ ++ ++/* Bridge in structure for HeapCfgHeapCount */ ++typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG ++{ ++ IMG_UINT32 ui32HeapConfigIndex; ++} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT; ++ ++/* Bridge out structure for HeapCfgHeapCount */ ++typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32NumHeaps; ++} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT; ++ ++/******************************************* ++ HeapCfgHeapConfigName ++ *******************************************/ ++ ++/* Bridge in structure for HeapCfgHeapConfigName */ ++typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG ++{ ++ IMG_CHAR *puiHeapConfigName; ++ IMG_UINT32 ui32HeapConfigIndex; ++ IMG_UINT32 ui32HeapConfigNameBufSz; ++} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME; ++ ++/* Bridge out structure for HeapCfgHeapConfigName */ ++typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG ++{ ++ IMG_CHAR *puiHeapConfigName; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME; ++ ++/******************************************* ++ HeapCfgHeapDetails ++ *******************************************/ ++ ++/* Bridge in structure for HeapCfgHeapDetails */ ++typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG ++{ ++ IMG_CHAR *puiHeapNameOut; ++ IMG_UINT32 ui32HeapConfigIndex; ++ IMG_UINT32 ui32HeapIndex; ++ IMG_UINT32 ui32HeapNameBufSz; ++} __packed PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS; ++ ++/* Bridge out structure for HeapCfgHeapDetails */ ++typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddrBase; ++ IMG_DEVMEM_SIZE_T uiHeapLength; ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength; ++ IMG_CHAR *puiHeapNameOut; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32Log2DataPageSizeOut; ++ IMG_UINT32 ui32Log2ImportAlignmentOut; ++} __packed PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS; ++ ++/******************************************* ++ DevmemIntRegisterPFNotifyKM ++ *******************************************/ ++ ++/* Bridge in structure for DevmemIntRegisterPFNotifyKM */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG ++{ ++ IMG_HANDLE hDevm; ++ IMG_BOOL bRegister; ++ IMG_UINT32 ui32PID; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM; ++ ++/* Bridge out structure for DevmemIntRegisterPFNotifyKM */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM; ++ ++/******************************************* ++ GetMaxPhysHeapCount ++ *******************************************/ ++ ++/* Bridge in structure for GetMaxPhysHeapCount */ ++typedef struct PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT; ++ ++/* Bridge out structure for GetMaxPhysHeapCount */ ++typedef struct PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32PhysHeapCount; ++} __packed PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT; ++ ++/******************************************* ++ PhysHeapGetMemInfo ++ *******************************************/ ++ ++/* Bridge in structure for PhysHeapGetMemInfo */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO_TAG ++{ ++ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; ++ PVRSRV_PHYS_HEAP *peaPhysHeapID; ++ IMG_UINT32 ui32PhysHeapCount; ++} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO; ++ ++/* Bridge out structure for PhysHeapGetMemInfo */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO_TAG ++{ ++ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO; ++ ++/******************************************* ++ GetDefaultPhysicalHeap ++ *******************************************/ ++ ++/* Bridge in structure for GetDefaultPhysicalHeap */ ++typedef struct PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP; ++ ++/* Bridge out structure for GetDefaultPhysicalHeap */ ++typedef struct PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP_TAG ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_PHYS_HEAP eHeap; ++} __packed PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP; ++ ++/******************************************* ++ GetHeapPhysMemUsage ++ *******************************************/ ++ ++/* Bridge in structure for GetHeapPhysMemUsage */ ++typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE_TAG ++{ ++ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; ++ IMG_UINT32 ui32PhysHeapCount; ++} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE; ++ ++/* Bridge out structure for GetHeapPhysMemUsage */ ++typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE_TAG ++{ ++ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStats; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE; ++ ++/******************************************* ++ DevmemGetFaultAddress ++ *******************************************/ ++ ++/* Bridge in structure for DevmemGetFaultAddress */ ++typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG ++{ ++ IMG_HANDLE hDevmemCtx; ++} __packed PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS; ++ ++/* Bridge out structure for DevmemGetFaultAddress */ ++typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG ++{ ++ IMG_DEV_VIRTADDR sFaultAddress; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS; ++ ++/******************************************* ++ PVRSRVUpdateOOMStats ++ *******************************************/ ++ ++/* Bridge in structure for PVRSRVUpdateOOMStats */ ++typedef struct PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS_TAG ++{ ++ IMG_PID ui32pid; ++ IMG_UINT32 ui32ui32StatType; ++} __packed PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS; ++ ++/* Bridge out structure for PVRSRVUpdateOOMStats */ ++typedef struct PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS; ++ ++/******************************************* ++ PhysHeapGetMemInfoPkd ++ *******************************************/ ++ ++/* Bridge in structure for PhysHeapGetMemInfoPkd */ ++typedef struct PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD_TAG ++{ ++ PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats; ++ PVRSRV_PHYS_HEAP *peaPhysHeapID; ++ IMG_UINT32 ui32PhysHeapCount; ++} __packed PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD; ++ ++/* Bridge out structure for PhysHeapGetMemInfoPkd */ ++typedef struct PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD_TAG ++{ ++ PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD; ++ ++/******************************************* ++ GetHeapPhysMemUsagePkd ++ *******************************************/ ++ ++/* Bridge in structure for GetHeapPhysMemUsagePkd */ ++typedef struct PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD_TAG ++{ ++ PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats; ++ IMG_UINT32 ui32PhysHeapCount; ++} __packed PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD; ++ ++/* Bridge out structure for GetHeapPhysMemUsagePkd */ ++typedef struct PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD_TAG ++{ ++ PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStats; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD; ++ ++#endif /* COMMON_MM_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h b/drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h +new file mode 100644 +index 000000000000..edc822375222 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_pvrtl_bridge.h +@@ -0,0 +1,214 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for pvrtl ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for pvrtl ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_PVRTL_BRIDGE_H ++#define COMMON_PVRTL_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "devicemem_typedefs.h" ++#include "pvrsrv_tlcommon.h" ++ ++#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7) ++ ++/******************************************* ++ TLOpenStream ++ *******************************************/ ++ ++/* Bridge in structure for TLOpenStream */ ++typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG ++{ ++ const IMG_CHAR *puiName; ++ IMG_UINT32 ui32Mode; ++} __packed PVRSRV_BRIDGE_IN_TLOPENSTREAM; ++ ++/* Bridge out structure for TLOpenStream */ ++typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG ++{ ++ IMG_HANDLE hSD; ++ IMG_HANDLE hTLPMR; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_TLOPENSTREAM; ++ ++/******************************************* ++ TLCloseStream ++ *******************************************/ ++ ++/* Bridge in structure for TLCloseStream */ ++typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG ++{ ++ IMG_HANDLE hSD; ++} __packed PVRSRV_BRIDGE_IN_TLCLOSESTREAM; ++ ++/* Bridge out structure for TLCloseStream */ ++typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_TLCLOSESTREAM; ++ ++/******************************************* ++ TLAcquireData ++ *******************************************/ ++ ++/* Bridge in structure for TLAcquireData */ ++typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG ++{ ++ IMG_HANDLE hSD; ++} __packed PVRSRV_BRIDGE_IN_TLACQUIREDATA; ++ ++/* Bridge out structure for TLAcquireData */ ++typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32ReadLen; ++ IMG_UINT32 ui32ReadOffset; ++} __packed PVRSRV_BRIDGE_OUT_TLACQUIREDATA; ++ ++/******************************************* ++ TLReleaseData ++ *******************************************/ ++ ++/* Bridge in structure for TLReleaseData */ ++typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG ++{ ++ IMG_HANDLE hSD; ++ IMG_UINT32 ui32ReadLen; ++ IMG_UINT32 ui32ReadOffset; ++} __packed PVRSRV_BRIDGE_IN_TLRELEASEDATA; ++ ++/* Bridge out structure for TLReleaseData */ ++typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_TLRELEASEDATA; ++ ++/******************************************* ++ TLDiscoverStreams ++ *******************************************/ ++ ++/* Bridge in structure for TLDiscoverStreams */ ++typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG ++{ ++ const IMG_CHAR *puiNamePattern; ++ IMG_CHAR *puiStreams; ++ IMG_UINT32 ui32Size; ++} __packed PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS; ++ ++/* Bridge out structure for TLDiscoverStreams */ ++typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG ++{ ++ IMG_CHAR *puiStreams; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32NumFound; ++} __packed PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS; ++ ++/******************************************* ++ TLReserveStream ++ *******************************************/ ++ ++/* Bridge in structure for TLReserveStream */ ++typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG ++{ ++ IMG_HANDLE hSD; ++ IMG_UINT32 ui32Size; ++ IMG_UINT32 ui32SizeMin; ++} __packed PVRSRV_BRIDGE_IN_TLRESERVESTREAM; ++ ++/* Bridge out structure for TLReserveStream */ ++typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32Available; ++ IMG_UINT32 ui32BufferOffset; ++} __packed PVRSRV_BRIDGE_OUT_TLRESERVESTREAM; ++ ++/******************************************* ++ TLCommitStream ++ *******************************************/ ++ ++/* Bridge in structure for TLCommitStream */ ++typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG ++{ ++ IMG_HANDLE hSD; ++ IMG_UINT32 ui32ReqSize; ++} __packed PVRSRV_BRIDGE_IN_TLCOMMITSTREAM; ++ ++/* Bridge out structure for TLCommitStream */ ++typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM; ++ ++/******************************************* ++ TLWriteData ++ *******************************************/ ++ ++/* Bridge in structure for TLWriteData */ ++typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG ++{ ++ IMG_HANDLE hSD; ++ IMG_BYTE *pui8Data; ++ IMG_UINT32 ui32Size; ++} __packed PVRSRV_BRIDGE_IN_TLWRITEDATA; ++ ++/* Bridge out structure for TLWriteData */ ++typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_TLWRITEDATA; ++ ++#endif /* COMMON_PVRTL_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h +new file mode 100644 +index 000000000000..7b83d9abce6c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxbreakpoint_bridge.h +@@ -0,0 +1,149 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxbreakpoint ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxbreakpoint ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXBREAKPOINT_BRIDGE_H ++#define COMMON_RGXBREAKPOINT_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++ ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_FIRST+4) ++ ++/******************************************* ++ RGXSetBreakpoint ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetBreakpoint */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG ++{ ++ IMG_HANDLE hPrivData; ++ IMG_UINT32 eFWDataMaster; ++ IMG_UINT32 ui32BreakpointAddr; ++ IMG_UINT32 ui32DM; ++ IMG_UINT32 ui32HandlerAddr; ++} __packed PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT; ++ ++/* Bridge out structure for RGXSetBreakpoint */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT; ++ ++/******************************************* ++ RGXClearBreakpoint ++ *******************************************/ ++ ++/* Bridge in structure for RGXClearBreakpoint */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG ++{ ++ IMG_HANDLE hPrivData; ++} __packed PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT; ++ ++/* Bridge out structure for RGXClearBreakpoint */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT; ++ ++/******************************************* ++ RGXEnableBreakpoint ++ *******************************************/ ++ ++/* Bridge in structure for RGXEnableBreakpoint */ ++typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG ++{ ++ IMG_HANDLE hPrivData; ++} __packed PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT; ++ ++/* Bridge out structure for RGXEnableBreakpoint */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT; ++ ++/******************************************* ++ RGXDisableBreakpoint ++ *******************************************/ ++ ++/* Bridge in structure for RGXDisableBreakpoint */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG ++{ ++ IMG_HANDLE hPrivData; ++} __packed PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT; ++ ++/* Bridge out structure for RGXDisableBreakpoint */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT; ++ ++/******************************************* ++ RGXOverallocateBPRegisters ++ *******************************************/ ++ ++/* Bridge in structure for RGXOverallocateBPRegisters */ ++typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG ++{ ++ IMG_UINT32 ui32SharedRegs; ++ IMG_UINT32 ui32TempRegs; ++} __packed PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS; ++ ++/* Bridge out structure for RGXOverallocateBPRegisters */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS; ++ ++#endif /* COMMON_RGXBREAKPOINT_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h +new file mode 100644 +index 000000000000..396bd3f01585 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxcmp_bridge.h +@@ -0,0 +1,229 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxcmp ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxcmp ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXCMP_BRIDGE_H ++#define COMMON_RGXCMP_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++#include "pvrsrv_sync_km.h" ++ ++#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2 PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+7) ++ ++/******************************************* ++ RGXCreateComputeContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateComputeContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG ++{ ++ IMG_UINT64 ui64RobustnessAddress; ++ IMG_HANDLE hPrivData; ++ IMG_BYTE *pui8FrameworkCmd; ++ IMG_BYTE *pui8StaticComputeContextState; ++ IMG_UINT32 ui32ContextFlags; ++ IMG_UINT32 ui32FrameworkCmdize; ++ IMG_UINT32 ui32MaxDeadlineMS; ++ IMG_UINT32 ui32PackedCCBSizeU88; ++ IMG_UINT32 ui32Priority; ++ IMG_UINT32 ui32StaticComputeContextStateSize; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT; ++ ++/* Bridge out structure for RGXCreateComputeContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG ++{ ++ IMG_HANDLE hComputeContext; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT; ++ ++/******************************************* ++ RGXDestroyComputeContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyComputeContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG ++{ ++ IMG_HANDLE hComputeContext; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT; ++ ++/* Bridge out structure for RGXDestroyComputeContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT; ++ ++/******************************************* ++ RGXFlushComputeData ++ *******************************************/ ++ ++/* Bridge in structure for RGXFlushComputeData */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG ++{ ++ IMG_HANDLE hComputeContext; ++} __packed PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA; ++ ++/* Bridge out structure for RGXFlushComputeData */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA; ++ ++/******************************************* ++ RGXSetComputeContextPriority ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetComputeContextPriority */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG ++{ ++ IMG_HANDLE hComputeContext; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY; ++ ++/* Bridge out structure for RGXSetComputeContextPriority */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY; ++ ++/******************************************* ++ RGXNotifyComputeWriteOffsetUpdate ++ *******************************************/ ++ ++/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */ ++typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG ++{ ++ IMG_HANDLE hComputeContext; ++} __packed PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; ++ ++/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE; ++ ++/******************************************* ++ RGXKickCDM2 ++ *******************************************/ ++ ++/* Bridge in structure for RGXKickCDM2 */ ++typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM2_TAG ++{ ++ IMG_UINT64 ui64DeadlineInus; ++ IMG_HANDLE hComputeContext; ++ IMG_UINT32 *pui32ClientUpdateOffset; ++ IMG_UINT32 *pui32ClientUpdateValue; ++ IMG_UINT32 *pui32SyncPMRFlags; ++ IMG_BYTE *pui8DMCmd; ++ IMG_CHAR *puiUpdateFenceName; ++ IMG_HANDLE *phClientUpdateUFOSyncPrimBlock; ++ IMG_HANDLE *phSyncPMRs; ++ PVRSRV_FENCE hCheckFenceFd; ++ PVRSRV_TIMELINE hUpdateTimeline; ++ IMG_UINT32 ui32ClientUpdateCount; ++ IMG_UINT32 ui32CmdSize; ++ IMG_UINT32 ui32ExtJobRef; ++ IMG_UINT32 ui32NumOfWorkgroups; ++ IMG_UINT32 ui32NumOfWorkitems; ++ IMG_UINT32 ui32PDumpFlags; ++ IMG_UINT32 ui32SyncPMRCount; ++} __packed PVRSRV_BRIDGE_IN_RGXKICKCDM2; ++ ++/* Bridge out structure for RGXKickCDM2 */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM2_TAG ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_FENCE hUpdateFence; ++} __packed PVRSRV_BRIDGE_OUT_RGXKICKCDM2; ++ ++/******************************************* ++ RGXSetComputeContextProperty ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetComputeContextProperty */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Input; ++ IMG_HANDLE hComputeContext; ++ IMG_UINT32 ui32Property; ++} __packed PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY; ++ ++/* Bridge out structure for RGXSetComputeContextProperty */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Output; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY; ++ ++/******************************************* ++ RGXGetLastDeviceError ++ *******************************************/ ++ ++/* Bridge in structure for RGXGetLastDeviceError */ ++typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR; ++ ++/* Bridge out structure for RGXGetLastDeviceError */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32Error; ++} __packed PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR; ++ ++#endif /* COMMON_RGXCMP_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h +new file mode 100644 +index 000000000000..68c79e57344d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxfwdbg_bridge.h +@@ -0,0 +1,200 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxfwdbg ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxfwdbg ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXFWDBG_BRIDGE_H ++#define COMMON_RGXFWDBG_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "devicemem_typedefs.h" ++#include "rgx_bridge.h" ++#include "pvrsrv_memallocflags.h" ++ ++#define PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST (PVRSRV_BRIDGE_RGXFWDBG_CMD_FIRST+7) ++ ++/******************************************* ++ RGXFWDebugSetFWLog ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugSetFWLog */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG_TAG ++{ ++ IMG_UINT32 ui32RGXFWLogType; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG; ++ ++/* Bridge out structure for RGXFWDebugSetFWLog */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG; ++ ++/******************************************* ++ RGXFWDebugDumpFreelistPageList ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugDumpFreelistPageList */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST; ++ ++/* Bridge out structure for RGXFWDebugDumpFreelistPageList */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST; ++ ++/******************************************* ++ RGXFWDebugSetHCSDeadline ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugSetHCSDeadline */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE_TAG ++{ ++ IMG_UINT32 ui32RGXHCSDeadline; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE; ++ ++/* Bridge out structure for RGXFWDebugSetHCSDeadline */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE; ++ ++/******************************************* ++ RGXFWDebugSetOSidPriority ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugSetOSidPriority */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY_TAG ++{ ++ IMG_UINT32 ui32OSid; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY; ++ ++/* Bridge out structure for RGXFWDebugSetOSidPriority */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY; ++ ++/******************************************* ++ RGXFWDebugSetOSNewOnlineState ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugSetOSNewOnlineState */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE_TAG ++{ ++ IMG_UINT32 ui32OSNewState; ++ IMG_UINT32 ui32OSid; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE; ++ ++/* Bridge out structure for RGXFWDebugSetOSNewOnlineState */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE; ++ ++/******************************************* ++ RGXFWDebugPHRConfigure ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugPHRConfigure */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE_TAG ++{ ++ IMG_UINT32 ui32ui32PHRMode; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE; ++ ++/* Bridge out structure for RGXFWDebugPHRConfigure */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE; ++ ++/******************************************* ++ RGXFWDebugWdgConfigure ++ *******************************************/ ++ ++/* Bridge in structure for RGXFWDebugWdgConfigure */ ++typedef struct PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE_TAG ++{ ++ IMG_UINT32 ui32ui32WdgPeriodUs; ++} __packed PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE; ++ ++/* Bridge out structure for RGXFWDebugWdgConfigure */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE; ++ ++/******************************************* ++ RGXCurrentTime ++ *******************************************/ ++ ++/* Bridge in structure for RGXCurrentTime */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXCURRENTTIME; ++ ++/* Bridge out structure for RGXCurrentTime */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG ++{ ++ IMG_UINT64 ui64Time; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCURRENTTIME; ++ ++#endif /* COMMON_RGXFWDBG_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h +new file mode 100644 +index 000000000000..08e80bb46279 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxhwperf_bridge.h +@@ -0,0 +1,172 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxhwperf ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxhwperf ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXHWPERF_BRIDGE_H ++#define COMMON_RGXHWPERF_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++#include "rgx_hwperf.h" ++ ++#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+5) ++ ++/******************************************* ++ RGXCtrlHWPerf ++ *******************************************/ ++ ++/* Bridge in structure for RGXCtrlHWPerf */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG ++{ ++ IMG_UINT64 ui64Mask; ++ IMG_BOOL bToggle; ++ IMG_UINT32 ui32StreamId; ++} __packed PVRSRV_BRIDGE_IN_RGXCTRLHWPERF; ++ ++/* Bridge out structure for RGXCtrlHWPerf */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF; ++ ++/******************************************* ++ RGXGetHWPerfBvncFeatureFlags ++ *******************************************/ ++ ++/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */ ++typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS; ++ ++/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG ++{ ++ RGX_HWPERF_BVNC sBVNC; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS; ++ ++/******************************************* ++ RGXConfigMuxHWPerfCounters ++ *******************************************/ ++ ++/* Bridge in structure for RGXConfigMuxHWPerfCounters */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS_TAG ++{ ++ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs; ++ IMG_UINT32 ui32ArrayLen; ++} __packed PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS; ++ ++/* Bridge out structure for RGXConfigMuxHWPerfCounters */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS; ++ ++/******************************************* ++ RGXControlHWPerfBlocks ++ *******************************************/ ++ ++/* Bridge in structure for RGXControlHWPerfBlocks */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS_TAG ++{ ++ IMG_UINT16 *pui16BlockIDs; ++ IMG_BOOL bEnable; ++ IMG_UINT32 ui32ArrayLen; ++} __packed PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS; ++ ++/* Bridge out structure for RGXControlHWPerfBlocks */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS; ++ ++/******************************************* ++ RGXConfigCustomCounters ++ *******************************************/ ++ ++/* Bridge in structure for RGXConfigCustomCounters */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG ++{ ++ IMG_UINT32 *pui32CustomCounterIDs; ++ IMG_UINT16 ui16CustomBlockID; ++ IMG_UINT16 ui16NumCustomCounters; ++} __packed PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS; ++ ++/* Bridge out structure for RGXConfigCustomCounters */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS; ++ ++/******************************************* ++ RGXConfigureHWPerfBlocks ++ *******************************************/ ++ ++/* Bridge in structure for RGXConfigureHWPerfBlocks */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS_TAG ++{ ++ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigs; ++ IMG_UINT32 ui32ArrayLen; ++ IMG_UINT32 ui32CtrlWord; ++} __packed PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS; ++ ++/* Bridge out structure for RGXConfigureHWPerfBlocks */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS; ++ ++#endif /* COMMON_RGXHWPERF_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h +new file mode 100644 +index 000000000000..afd882ca7529 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxkicksync_bridge.h +@@ -0,0 +1,143 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxkicksync ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxkicksync ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXKICKSYNC_BRIDGE_H ++#define COMMON_RGXKICKSYNC_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++#include "pvrsrv_sync_km.h" ++ ++#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2 PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+3) ++ ++/******************************************* ++ RGXCreateKickSyncContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateKickSyncContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG ++{ ++ IMG_HANDLE hPrivData; ++ IMG_UINT32 ui32ContextFlags; ++ IMG_UINT32 ui32PackedCCBSizeU88; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT; ++ ++/* Bridge out structure for RGXCreateKickSyncContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG ++{ ++ IMG_HANDLE hKickSyncContext; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT; ++ ++/******************************************* ++ RGXDestroyKickSyncContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyKickSyncContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG ++{ ++ IMG_HANDLE hKickSyncContext; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT; ++ ++/* Bridge out structure for RGXDestroyKickSyncContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT; ++ ++/******************************************* ++ RGXKickSync2 ++ *******************************************/ ++ ++/* Bridge in structure for RGXKickSync2 */ ++typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC2_TAG ++{ ++ IMG_HANDLE hKickSyncContext; ++ IMG_UINT32 *pui32UpdateDevVarOffset; ++ IMG_UINT32 *pui32UpdateValue; ++ IMG_CHAR *puiUpdateFenceName; ++ IMG_HANDLE *phUpdateUFODevVarBlock; ++ PVRSRV_FENCE hCheckFenceFD; ++ PVRSRV_TIMELINE hTimelineFenceFD; ++ IMG_UINT32 ui32ClientUpdateCount; ++ IMG_UINT32 ui32ExtJobRef; ++} __packed PVRSRV_BRIDGE_IN_RGXKICKSYNC2; ++ ++/* Bridge out structure for RGXKickSync2 */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC2_TAG ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_FENCE hUpdateFenceFD; ++} __packed PVRSRV_BRIDGE_OUT_RGXKICKSYNC2; ++ ++/******************************************* ++ RGXSetKickSyncContextProperty ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetKickSyncContextProperty */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Input; ++ IMG_HANDLE hKickSyncContext; ++ IMG_UINT32 ui32Property; ++} __packed PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY; ++ ++/* Bridge out structure for RGXSetKickSyncContextProperty */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Output; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY; ++ ++#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h +new file mode 100644 +index 000000000000..942b7e4b06d0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxregconfig_bridge.h +@@ -0,0 +1,146 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxregconfig ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxregconfig ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXREGCONFIG_BRIDGE_H ++#define COMMON_RGXREGCONFIG_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++ ++#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_CMD_FIRST+4) ++ ++/******************************************* ++ RGXSetRegConfigType ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetRegConfigType */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG ++{ ++ IMG_UINT8 ui8RegPowerIsland; ++} __packed PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE; ++ ++/* Bridge out structure for RGXSetRegConfigType */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE; ++ ++/******************************************* ++ RGXAddRegconfig ++ *******************************************/ ++ ++/* Bridge in structure for RGXAddRegconfig */ ++typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG ++{ ++ IMG_UINT64 ui64RegMask; ++ IMG_UINT64 ui64RegValue; ++ IMG_UINT32 ui32RegAddr; ++} __packed PVRSRV_BRIDGE_IN_RGXADDREGCONFIG; ++ ++/* Bridge out structure for RGXAddRegconfig */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG; ++ ++/******************************************* ++ RGXClearRegConfig ++ *******************************************/ ++ ++/* Bridge in structure for RGXClearRegConfig */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG; ++ ++/* Bridge out structure for RGXClearRegConfig */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG; ++ ++/******************************************* ++ RGXEnableRegConfig ++ *******************************************/ ++ ++/* Bridge in structure for RGXEnableRegConfig */ ++typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG; ++ ++/* Bridge out structure for RGXEnableRegConfig */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG; ++ ++/******************************************* ++ RGXDisableRegConfig ++ *******************************************/ ++ ++/* Bridge in structure for RGXDisableRegConfig */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG; ++ ++/* Bridge out structure for RGXDisableRegConfig */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG; ++ ++#endif /* COMMON_RGXREGCONFIG_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h +new file mode 100644 +index 000000000000..b5fedd7fc5e3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxta3d_bridge.h +@@ -0,0 +1,404 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxta3d ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxta3d ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXTA3D_BRIDGE_H ++#define COMMON_RGXTA3D_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++#include "rgx_fwif_shared.h" ++#include "devicemem_typedefs.h" ++#include "pvrsrv_sync_km.h" ++ ++#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2 PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12 ++#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13 ++#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13) ++ ++/******************************************* ++ RGXCreateHWRTDataSet ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateHWRTDataSet */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET_TAG ++{ ++ IMG_UINT64 ui64FlippedMultiSampleCtl; ++ IMG_UINT64 ui64MultiSampleCtl; ++ IMG_DEV_VIRTADDR *psMacrotileArrayDevVAddr; ++ IMG_DEV_VIRTADDR *psPMMlistDevVAddr; ++ IMG_DEV_VIRTADDR *psRTCDevVAddr; ++ IMG_DEV_VIRTADDR *psRgnHeaderDevVAddr; ++ IMG_DEV_VIRTADDR *psTailPtrsDevVAddr; ++ IMG_DEV_VIRTADDR *psVHeapTableDevVAddr; ++ IMG_HANDLE *phKmHwRTDataSet; ++ IMG_HANDLE *phapsFreeLists; ++ IMG_UINT32 ui32ISPMergeLowerX; ++ IMG_UINT32 ui32ISPMergeLowerY; ++ IMG_UINT32 ui32ISPMergeScaleX; ++ IMG_UINT32 ui32ISPMergeScaleY; ++ IMG_UINT32 ui32ISPMergeUpperX; ++ IMG_UINT32 ui32ISPMergeUpperY; ++ IMG_UINT32 ui32ISPMtileSize; ++ IMG_UINT32 ui32MTileStride; ++ IMG_UINT32 ui32PPPScreen; ++ IMG_UINT32 ui32RgnHeaderSize; ++ IMG_UINT32 ui32TEAA; ++ IMG_UINT32 ui32TEMTILE1; ++ IMG_UINT32 ui32TEMTILE2; ++ IMG_UINT32 ui32TEScreen; ++ IMG_UINT32 ui32TPCSize; ++ IMG_UINT32 ui32TPCStride; ++ IMG_UINT16 ui16MaxRTs; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET; ++ ++/* Bridge out structure for RGXCreateHWRTDataSet */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET_TAG ++{ ++ IMG_HANDLE *phKmHwRTDataSet; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET; ++ ++/******************************************* ++ RGXDestroyHWRTDataSet ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyHWRTDataSet */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET_TAG ++{ ++ IMG_HANDLE hKmHwRTDataSet; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET; ++ ++/* Bridge out structure for RGXDestroyHWRTDataSet */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET; ++ ++/******************************************* ++ RGXCreateZSBuffer ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateZSBuffer */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG ++{ ++ IMG_HANDLE hPMR; ++ IMG_HANDLE hReservation; ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER; ++ ++/* Bridge out structure for RGXCreateZSBuffer */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG ++{ ++ IMG_HANDLE hsZSBufferKM; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER; ++ ++/******************************************* ++ RGXDestroyZSBuffer ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyZSBuffer */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG ++{ ++ IMG_HANDLE hsZSBufferMemDesc; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER; ++ ++/* Bridge out structure for RGXDestroyZSBuffer */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER; ++ ++/******************************************* ++ RGXPopulateZSBuffer ++ *******************************************/ ++ ++/* Bridge in structure for RGXPopulateZSBuffer */ ++typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG ++{ ++ IMG_HANDLE hsZSBufferKM; ++} __packed PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER; ++ ++/* Bridge out structure for RGXPopulateZSBuffer */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG ++{ ++ IMG_HANDLE hsPopulation; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER; ++ ++/******************************************* ++ RGXUnpopulateZSBuffer ++ *******************************************/ ++ ++/* Bridge in structure for RGXUnpopulateZSBuffer */ ++typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG ++{ ++ IMG_HANDLE hsPopulation; ++} __packed PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER; ++ ++/* Bridge out structure for RGXUnpopulateZSBuffer */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER; ++ ++/******************************************* ++ RGXCreateFreeList ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateFreeList */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG ++{ ++ IMG_DEV_VIRTADDR spsFreeListDevVAddr; ++ IMG_DEVMEM_OFFSET_T uiPMROffset; ++ IMG_HANDLE hMemCtxPrivData; ++ IMG_HANDLE hsFreeListPMR; ++ IMG_HANDLE hsGlobalFreeList; ++ IMG_BOOL bbFreeListCheck; ++ IMG_UINT32 ui32GrowFLPages; ++ IMG_UINT32 ui32GrowParamThreshold; ++ IMG_UINT32 ui32InitFLPages; ++ IMG_UINT32 ui32MaxFLPages; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATEFREELIST; ++ ++/* Bridge out structure for RGXCreateFreeList */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG ++{ ++ IMG_HANDLE hCleanupCookie; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST; ++ ++/******************************************* ++ RGXDestroyFreeList ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyFreeList */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG ++{ ++ IMG_HANDLE hCleanupCookie; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST; ++ ++/* Bridge out structure for RGXDestroyFreeList */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST; ++ ++/******************************************* ++ RGXCreateRenderContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateRenderContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG ++{ ++ IMG_DEV_VIRTADDR sVDMCallStackAddr; ++ IMG_UINT64 ui64RobustnessAddress; ++ IMG_HANDLE hPrivData; ++ IMG_BYTE *pui8FrameworkCmd; ++ IMG_BYTE *pui8StaticRenderContextState; ++ IMG_UINT32 ui32ContextFlags; ++ IMG_UINT32 ui32FrameworkCmdSize; ++ IMG_UINT32 ui32Max3DDeadlineMS; ++ IMG_UINT32 ui32MaxTADeadlineMS; ++ IMG_UINT32 ui32PackedCCBSizeU8888; ++ IMG_UINT32 ui32Priority; ++ IMG_UINT32 ui32StaticRenderContextStateSize; ++ IMG_UINT32 ui32ui32CallStackDepth; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT; ++ ++/* Bridge out structure for RGXCreateRenderContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG ++{ ++ IMG_HANDLE hRenderContext; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT; ++ ++/******************************************* ++ RGXDestroyRenderContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyRenderContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG ++{ ++ IMG_HANDLE hCleanupCookie; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT; ++ ++/* Bridge out structure for RGXDestroyRenderContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT; ++ ++/******************************************* ++ RGXSetRenderContextPriority ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetRenderContextPriority */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG ++{ ++ IMG_HANDLE hRenderContext; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY; ++ ++/* Bridge out structure for RGXSetRenderContextPriority */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY; ++ ++/******************************************* ++ RGXRenderContextStalled ++ *******************************************/ ++ ++/* Bridge in structure for RGXRenderContextStalled */ ++typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG ++{ ++ IMG_HANDLE hRenderContext; ++} __packed PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED; ++ ++/* Bridge out structure for RGXRenderContextStalled */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED; ++ ++/******************************************* ++ RGXKickTA3D2 ++ *******************************************/ ++ ++/* Bridge in structure for RGXKickTA3D2 */ ++typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D2_TAG ++{ ++ IMG_UINT64 ui64Deadline; ++ IMG_HANDLE hKMHWRTDataSet; ++ IMG_HANDLE hMSAAScratchBuffer; ++ IMG_HANDLE hPRFenceUFOSyncPrimBlock; ++ IMG_HANDLE hRenderContext; ++ IMG_HANDLE hZSBuffer; ++ IMG_UINT32 *pui32Client3DUpdateSyncOffset; ++ IMG_UINT32 *pui32Client3DUpdateValue; ++ IMG_UINT32 *pui32ClientTAFenceSyncOffset; ++ IMG_UINT32 *pui32ClientTAFenceValue; ++ IMG_UINT32 *pui32ClientTAUpdateSyncOffset; ++ IMG_UINT32 *pui32ClientTAUpdateValue; ++ IMG_UINT32 *pui32SyncPMRFlags; ++ IMG_BYTE *pui83DCmd; ++ IMG_BYTE *pui83DPRCmd; ++ IMG_BYTE *pui8TACmd; ++ IMG_CHAR *puiUpdateFenceName; ++ IMG_CHAR *puiUpdateFenceName3D; ++ IMG_HANDLE *phClient3DUpdateSyncPrimBlock; ++ IMG_HANDLE *phClientTAFenceSyncPrimBlock; ++ IMG_HANDLE *phClientTAUpdateSyncPrimBlock; ++ IMG_HANDLE *phSyncPMRs; ++ IMG_BOOL bbAbort; ++ IMG_BOOL bbKick3D; ++ IMG_BOOL bbKickPR; ++ IMG_BOOL bbKickTA; ++ PVRSRV_FENCE hCheckFence; ++ PVRSRV_FENCE hCheckFence3D; ++ PVRSRV_TIMELINE hUpdateTimeline; ++ PVRSRV_TIMELINE hUpdateTimeline3D; ++ IMG_UINT32 ui323DCmdSize; ++ IMG_UINT32 ui323DPRCmdSize; ++ IMG_UINT32 ui32Client3DUpdateCount; ++ IMG_UINT32 ui32ClientTAFenceCount; ++ IMG_UINT32 ui32ClientTAUpdateCount; ++ IMG_UINT32 ui32ExtJobRef; ++ IMG_UINT32 ui32NumberOfDrawCalls; ++ IMG_UINT32 ui32NumberOfIndices; ++ IMG_UINT32 ui32NumberOfMRTs; ++ IMG_UINT32 ui32PDumpFlags; ++ IMG_UINT32 ui32PRFenceUFOSyncOffset; ++ IMG_UINT32 ui32PRFenceValue; ++ IMG_UINT32 ui32RenderTargetSize; ++ IMG_UINT32 ui32SyncPMRCount; ++ IMG_UINT32 ui32TACmdSize; ++} __packed PVRSRV_BRIDGE_IN_RGXKICKTA3D2; ++ ++/* Bridge out structure for RGXKickTA3D2 */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D2_TAG ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_FENCE hUpdateFence; ++ PVRSRV_FENCE hUpdateFence3D; ++} __packed PVRSRV_BRIDGE_OUT_RGXKICKTA3D2; ++ ++/******************************************* ++ RGXSetRenderContextProperty ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetRenderContextProperty */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Input; ++ IMG_HANDLE hRenderContext; ++ IMG_UINT32 ui32Property; ++} __packed PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY; ++ ++/* Bridge out structure for RGXSetRenderContextProperty */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Output; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY; ++ ++#endif /* COMMON_RGXTA3D_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h +new file mode 100644 +index 000000000000..34d7c2c56a2c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxtimerquery_bridge.h +@@ -0,0 +1,112 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxtimerquery ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxtimerquery ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXTIMERQUERY_BRIDGE_H ++#define COMMON_RGXTIMERQUERY_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++ ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_FIRST+2) ++ ++/******************************************* ++ RGXBeginTimerQuery ++ *******************************************/ ++ ++/* Bridge in structure for RGXBeginTimerQuery */ ++typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG ++{ ++ IMG_UINT32 ui32QueryId; ++} __packed PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY; ++ ++/* Bridge out structure for RGXBeginTimerQuery */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY; ++ ++/******************************************* ++ RGXEndTimerQuery ++ *******************************************/ ++ ++/* Bridge in structure for RGXEndTimerQuery */ ++typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY; ++ ++/* Bridge out structure for RGXEndTimerQuery */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY; ++ ++/******************************************* ++ RGXQueryTimer ++ *******************************************/ ++ ++/* Bridge in structure for RGXQueryTimer */ ++typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG ++{ ++ IMG_UINT32 ui32QueryId; ++} __packed PVRSRV_BRIDGE_IN_RGXQUERYTIMER; ++ ++/* Bridge out structure for RGXQueryTimer */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG ++{ ++ IMG_UINT64 ui64EndTime; ++ IMG_UINT64 ui64StartTime; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXQUERYTIMER; ++ ++#endif /* COMMON_RGXTIMERQUERY_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h +new file mode 100644 +index 000000000000..9489ddae2a05 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxtq2_bridge.h +@@ -0,0 +1,228 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxtq2 ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxtq2 ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXTQ2_BRIDGE_H ++#define COMMON_RGXTQ2_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++#include "pvrsrv_sync_km.h" ++ ++#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+7) ++ ++/******************************************* ++ RGXTDMCreateTransferContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMCreateTransferContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG ++{ ++ IMG_UINT64 ui64RobustnessAddress; ++ IMG_HANDLE hPrivData; ++ IMG_BYTE *pui8FrameworkCmd; ++ IMG_UINT32 ui32ContextFlags; ++ IMG_UINT32 ui32FrameworkCmdSize; ++ IMG_UINT32 ui32PackedCCBSizeU88; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT; ++ ++/* Bridge out structure for RGXTDMCreateTransferContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG ++{ ++ IMG_HANDLE hTransferContext; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT; ++ ++/******************************************* ++ RGXTDMDestroyTransferContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMDestroyTransferContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG ++{ ++ IMG_HANDLE hTransferContext; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT; ++ ++/* Bridge out structure for RGXTDMDestroyTransferContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT; ++ ++/******************************************* ++ RGXTDMSetTransferContextPriority ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMSetTransferContextPriority */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG ++{ ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY; ++ ++/* Bridge out structure for RGXTDMSetTransferContextPriority */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY; ++ ++/******************************************* ++ RGXTDMNotifyWriteOffsetUpdate ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG ++{ ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 ui32PDumpFlags; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE; ++ ++/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE; ++ ++/******************************************* ++ RGXTDMSubmitTransfer2 ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMSubmitTransfer2 */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2_TAG ++{ ++ IMG_UINT64 ui64DeadlineInus; ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 *pui32SyncPMRFlags; ++ IMG_UINT32 *pui32UpdateSyncOffset; ++ IMG_UINT32 *pui32UpdateValue; ++ IMG_UINT8 *pui8FWCommand; ++ IMG_CHAR *puiUpdateFenceName; ++ IMG_HANDLE *phSyncPMRs; ++ IMG_HANDLE *phUpdateUFOSyncPrimBlock; ++ PVRSRV_FENCE hCheckFenceFD; ++ PVRSRV_TIMELINE hUpdateTimeline; ++ IMG_UINT32 ui32Characteristic1; ++ IMG_UINT32 ui32Characteristic2; ++ IMG_UINT32 ui32ClientUpdateCount; ++ IMG_UINT32 ui32CommandSize; ++ IMG_UINT32 ui32ExternalJobReference; ++ IMG_UINT32 ui32PDumpFlags; ++ IMG_UINT32 ui32SyncPMRCount; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2; ++ ++/* Bridge out structure for RGXTDMSubmitTransfer2 */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2_TAG ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_FENCE hUpdateFence; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2; ++ ++/******************************************* ++ RGXTDMGetSharedMemory ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMGetSharedMemory */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY; ++ ++/* Bridge out structure for RGXTDMGetSharedMemory */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY_TAG ++{ ++ IMG_HANDLE hCLIPMRMem; ++ IMG_HANDLE hUSCPMRMem; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY; ++ ++/******************************************* ++ RGXTDMReleaseSharedMemory ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMReleaseSharedMemory */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY_TAG ++{ ++ IMG_HANDLE hPMRMem; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY; ++ ++/* Bridge out structure for RGXTDMReleaseSharedMemory */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY; ++ ++/******************************************* ++ RGXTDMSetTransferContextProperty ++ *******************************************/ ++ ++/* Bridge in structure for RGXTDMSetTransferContextProperty */ ++typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Input; ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 ui32Property; ++} __packed PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY; ++ ++/* Bridge out structure for RGXTDMSetTransferContextProperty */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Output; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY; ++ ++#endif /* COMMON_RGXTQ2_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h b/drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h +new file mode 100644 +index 000000000000..b8642845ee7c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_rgxtq_bridge.h +@@ -0,0 +1,176 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for rgxtq ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for rgxtq ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RGXTQ_BRIDGE_H ++#define COMMON_RGXTQ_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "rgx_bridge.h" ++#include "pvrsrv_sync_km.h" ++ ++#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2 PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+4) ++ ++/******************************************* ++ RGXCreateTransferContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXCreateTransferContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG ++{ ++ IMG_UINT64 ui64RobustnessAddress; ++ IMG_HANDLE hPrivData; ++ IMG_BYTE *pui8FrameworkCmd; ++ IMG_UINT32 ui32ContextFlags; ++ IMG_UINT32 ui32FrameworkCmdize; ++ IMG_UINT32 ui32PackedCCBSizeU8888; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT; ++ ++/* Bridge out structure for RGXCreateTransferContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG ++{ ++ IMG_HANDLE hCLIPMRMem; ++ IMG_HANDLE hTransferContext; ++ IMG_HANDLE hUSCPMRMem; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT; ++ ++/******************************************* ++ RGXDestroyTransferContext ++ *******************************************/ ++ ++/* Bridge in structure for RGXDestroyTransferContext */ ++typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG ++{ ++ IMG_HANDLE hTransferContext; ++} __packed PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT; ++ ++/* Bridge out structure for RGXDestroyTransferContext */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT; ++ ++/******************************************* ++ RGXSetTransferContextPriority ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetTransferContextPriority */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG ++{ ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 ui32Priority; ++} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY; ++ ++/* Bridge out structure for RGXSetTransferContextPriority */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY; ++ ++/******************************************* ++ RGXSubmitTransfer2 ++ *******************************************/ ++ ++/* Bridge in structure for RGXSubmitTransfer2 */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2_TAG ++{ ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 *pui32ClientUpdateCount; ++ IMG_UINT32 *pui32CommandSize; ++ IMG_UINT32 *pui32SyncPMRFlags; ++ IMG_UINT32 *pui32TQPrepareFlags; ++ IMG_UINT32 **pui32UpdateSyncOffset; ++ IMG_UINT32 **pui32UpdateValue; ++ IMG_UINT8 **pui8FWCommand; ++ IMG_CHAR *puiUpdateFenceName; ++ IMG_HANDLE *phSyncPMRs; ++ IMG_HANDLE **phUpdateUFOSyncPrimBlock; ++ PVRSRV_TIMELINE h2DUpdateTimeline; ++ PVRSRV_TIMELINE h3DUpdateTimeline; ++ PVRSRV_FENCE hCheckFenceFD; ++ IMG_UINT32 ui32ExtJobRef; ++ IMG_UINT32 ui32PrepareCount; ++ IMG_UINT32 ui32SyncPMRCount; ++} __packed PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2; ++ ++/* Bridge out structure for RGXSubmitTransfer2 */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2_TAG ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_FENCE h2DUpdateFence; ++ PVRSRV_FENCE h3DUpdateFence; ++} __packed PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2; ++ ++/******************************************* ++ RGXSetTransferContextProperty ++ *******************************************/ ++ ++/* Bridge in structure for RGXSetTransferContextProperty */ ++typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Input; ++ IMG_HANDLE hTransferContext; ++ IMG_UINT32 ui32Property; ++} __packed PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY; ++ ++/* Bridge out structure for RGXSetTransferContextProperty */ ++typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY_TAG ++{ ++ IMG_UINT64 ui64Output; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY; ++ ++#endif /* COMMON_RGXTQ_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_ri_bridge.h b/drivers/gpu/drm/img-rogue/common_ri_bridge.h +new file mode 100644 +index 000000000000..ca9b68751b13 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_ri_bridge.h +@@ -0,0 +1,225 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for ri ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for ri ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_RI_BRIDGE_H ++#define COMMON_RI_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "ri_typedefs.h" ++ ++#define PVRSRV_BRIDGE_RI_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8 ++#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8) ++ ++/******************************************* ++ RIWritePMREntry ++ *******************************************/ ++ ++/* Bridge in structure for RIWritePMREntry */ ++typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG ++{ ++ IMG_HANDLE hPMRHandle; ++} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY; ++ ++/* Bridge out structure for RIWritePMREntry */ ++typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY; ++ ++/******************************************* ++ RIWriteMEMDESCEntry ++ *******************************************/ ++ ++/* Bridge in structure for RIWriteMEMDESCEntry */ ++typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG ++{ ++ IMG_UINT64 ui64Offset; ++ IMG_UINT64 ui64Size; ++ IMG_HANDLE hPMRHandle; ++ const IMG_CHAR *puiTextB; ++ IMG_BOOL bIsImport; ++ IMG_BOOL bIsSuballoc; ++ IMG_UINT32 ui32TextBSize; ++} __packed PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY; ++ ++/* Bridge out structure for RIWriteMEMDESCEntry */ ++typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG ++{ ++ IMG_HANDLE hRIHandle; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY; ++ ++/******************************************* ++ RIWriteProcListEntry ++ *******************************************/ ++ ++/* Bridge in structure for RIWriteProcListEntry */ ++typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG ++{ ++ IMG_UINT64 ui64DevVAddr; ++ IMG_UINT64 ui64Size; ++ const IMG_CHAR *puiTextB; ++ IMG_UINT32 ui32TextBSize; ++} __packed PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY; ++ ++/* Bridge out structure for RIWriteProcListEntry */ ++typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG ++{ ++ IMG_HANDLE hRIHandle; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY; ++ ++/******************************************* ++ RIUpdateMEMDESCAddr ++ *******************************************/ ++ ++/* Bridge in structure for RIUpdateMEMDESCAddr */ ++typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG ++{ ++ IMG_DEV_VIRTADDR sAddr; ++ IMG_HANDLE hRIHandle; ++} __packed PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR; ++ ++/* Bridge out structure for RIUpdateMEMDESCAddr */ ++typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR; ++ ++/******************************************* ++ RIDeleteMEMDESCEntry ++ *******************************************/ ++ ++/* Bridge in structure for RIDeleteMEMDESCEntry */ ++typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG ++{ ++ IMG_HANDLE hRIHandle; ++} __packed PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY; ++ ++/* Bridge out structure for RIDeleteMEMDESCEntry */ ++typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY; ++ ++/******************************************* ++ RIDumpList ++ *******************************************/ ++ ++/* Bridge in structure for RIDumpList */ ++typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG ++{ ++ IMG_HANDLE hPMRHandle; ++} __packed PVRSRV_BRIDGE_IN_RIDUMPLIST; ++ ++/* Bridge out structure for RIDumpList */ ++typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIDUMPLIST; ++ ++/******************************************* ++ RIDumpAll ++ *******************************************/ ++ ++/* Bridge in structure for RIDumpAll */ ++typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_RIDUMPALL; ++ ++/* Bridge out structure for RIDumpAll */ ++typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIDUMPALL; ++ ++/******************************************* ++ RIDumpProcess ++ *******************************************/ ++ ++/* Bridge in structure for RIDumpProcess */ ++typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG ++{ ++ IMG_PID ui32Pid; ++} __packed PVRSRV_BRIDGE_IN_RIDUMPPROCESS; ++ ++/* Bridge out structure for RIDumpProcess */ ++typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIDUMPPROCESS; ++ ++/******************************************* ++ RIWritePMREntryWithOwner ++ *******************************************/ ++ ++/* Bridge in structure for RIWritePMREntryWithOwner */ ++typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG ++{ ++ IMG_HANDLE hPMRHandle; ++ IMG_PID ui32Owner; ++} __packed PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER; ++ ++/* Bridge out structure for RIWritePMREntryWithOwner */ ++typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER; ++ ++#endif /* COMMON_RI_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_srvcore_bridge.h b/drivers/gpu/drm/img-rogue/common_srvcore_bridge.h +new file mode 100644 +index 000000000000..7e9ac6eed8a1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_srvcore_bridge.h +@@ -0,0 +1,369 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for srvcore ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for srvcore ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_SRVCORE_BRIDGE_H ++#define COMMON_SRVCORE_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "pvrsrv_device_types.h" ++#include "cache_ops.h" ++ ++#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8 ++#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9 ++#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10 ++#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11 ++#define PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12 ++#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13 ++#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14 ++#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15 ++#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16 ++#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+16) ++ ++/******************************************* ++ Connect ++ *******************************************/ ++ ++/* Bridge in structure for Connect */ ++typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG ++{ ++ IMG_UINT32 ui32ClientBuildOptions; ++ IMG_UINT32 ui32ClientDDKBuild; ++ IMG_UINT32 ui32ClientDDKVersion; ++ IMG_UINT32 ui32Flags; ++} __packed PVRSRV_BRIDGE_IN_CONNECT; ++ ++/* Bridge out structure for Connect */ ++typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG ++{ ++ IMG_UINT64 ui64PackedBvnc; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32CapabilityFlags; ++ IMG_UINT8 ui8KernelArch; ++} __packed PVRSRV_BRIDGE_OUT_CONNECT; ++ ++/******************************************* ++ Disconnect ++ *******************************************/ ++ ++/* Bridge in structure for Disconnect */ ++typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_DISCONNECT; ++ ++/* Bridge out structure for Disconnect */ ++typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DISCONNECT; ++ ++/******************************************* ++ AcquireGlobalEventObject ++ *******************************************/ ++ ++/* Bridge in structure for AcquireGlobalEventObject */ ++typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT; ++ ++/* Bridge out structure for AcquireGlobalEventObject */ ++typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG ++{ ++ IMG_HANDLE hGlobalEventObject; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT; ++ ++/******************************************* ++ ReleaseGlobalEventObject ++ *******************************************/ ++ ++/* Bridge in structure for ReleaseGlobalEventObject */ ++typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG ++{ ++ IMG_HANDLE hGlobalEventObject; ++} __packed PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT; ++ ++/* Bridge out structure for ReleaseGlobalEventObject */ ++typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT; ++ ++/******************************************* ++ EventObjectOpen ++ *******************************************/ ++ ++/* Bridge in structure for EventObjectOpen */ ++typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG ++{ ++ IMG_HANDLE hEventObject; ++} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN; ++ ++/* Bridge out structure for EventObjectOpen */ ++typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG ++{ ++ IMG_HANDLE hOSEvent; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN; ++ ++/******************************************* ++ EventObjectWait ++ *******************************************/ ++ ++/* Bridge in structure for EventObjectWait */ ++typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG ++{ ++ IMG_HANDLE hOSEventKM; ++} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT; ++ ++/* Bridge out structure for EventObjectWait */ ++typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT; ++ ++/******************************************* ++ EventObjectClose ++ *******************************************/ ++ ++/* Bridge in structure for EventObjectClose */ ++typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG ++{ ++ IMG_HANDLE hOSEventKM; ++} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE; ++ ++/* Bridge out structure for EventObjectClose */ ++typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE; ++ ++/******************************************* ++ DumpDebugInfo ++ *******************************************/ ++ ++/* Bridge in structure for DumpDebugInfo */ ++typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG ++{ ++ IMG_UINT32 ui32VerbLevel; ++} __packed PVRSRV_BRIDGE_IN_DUMPDEBUGINFO; ++ ++/* Bridge out structure for DumpDebugInfo */ ++typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO; ++ ++/******************************************* ++ GetDevClockSpeed ++ *******************************************/ ++ ++/* Bridge in structure for GetDevClockSpeed */ ++typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED; ++ ++/* Bridge out structure for GetDevClockSpeed */ ++typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32ClockSpeed; ++} __packed PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED; ++ ++/******************************************* ++ HWOpTimeout ++ *******************************************/ ++ ++/* Bridge in structure for HWOpTimeout */ ++typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_HWOPTIMEOUT; ++ ++/* Bridge out structure for HWOpTimeout */ ++typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_HWOPTIMEOUT; ++ ++/******************************************* ++ AlignmentCheck ++ *******************************************/ ++ ++/* Bridge in structure for AlignmentCheck */ ++typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG ++{ ++ IMG_UINT32 *pui32AlignChecks; ++ IMG_UINT32 ui32AlignChecksSize; ++} __packed PVRSRV_BRIDGE_IN_ALIGNMENTCHECK; ++ ++/* Bridge out structure for AlignmentCheck */ ++typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK; ++ ++/******************************************* ++ GetDeviceStatus ++ *******************************************/ ++ ++/* Bridge in structure for GetDeviceStatus */ ++typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_GETDEVICESTATUS; ++ ++/* Bridge out structure for GetDeviceStatus */ ++typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32DeviceSatus; ++} __packed PVRSRV_BRIDGE_OUT_GETDEVICESTATUS; ++ ++/******************************************* ++ GetMultiCoreInfo ++ *******************************************/ ++ ++/* Bridge in structure for GetMultiCoreInfo */ ++typedef struct PVRSRV_BRIDGE_IN_GETMULTICOREINFO_TAG ++{ ++ IMG_UINT64 *pui64Caps; ++ IMG_UINT32 ui32CapsSize; ++} __packed PVRSRV_BRIDGE_IN_GETMULTICOREINFO; ++ ++/* Bridge out structure for GetMultiCoreInfo */ ++typedef struct PVRSRV_BRIDGE_OUT_GETMULTICOREINFO_TAG ++{ ++ IMG_UINT64 *pui64Caps; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32NumCores; ++} __packed PVRSRV_BRIDGE_OUT_GETMULTICOREINFO; ++ ++/******************************************* ++ EventObjectWaitTimeout ++ *******************************************/ ++ ++/* Bridge in structure for EventObjectWaitTimeout */ ++typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG ++{ ++ IMG_UINT64 ui64uiTimeoutus; ++ IMG_HANDLE hOSEventKM; ++} __packed PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT; ++ ++/* Bridge out structure for EventObjectWaitTimeout */ ++typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT; ++ ++/******************************************* ++ FindProcessMemStats ++ *******************************************/ ++ ++/* Bridge in structure for FindProcessMemStats */ ++typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG ++{ ++ IMG_UINT32 *pui32MemStatsArray; ++ IMG_BOOL bbAllProcessStats; ++ IMG_UINT32 ui32ArrSize; ++ IMG_UINT32 ui32PID; ++} __packed PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS; ++ ++/* Bridge out structure for FindProcessMemStats */ ++typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG ++{ ++ IMG_UINT32 *pui32MemStatsArray; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS; ++ ++/******************************************* ++ AcquireInfoPage ++ *******************************************/ ++ ++/* Bridge in structure for AcquireInfoPage */ ++typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE; ++ ++/* Bridge out structure for AcquireInfoPage */ ++typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG ++{ ++ IMG_HANDLE hPMR; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE; ++ ++/******************************************* ++ ReleaseInfoPage ++ *******************************************/ ++ ++/* Bridge in structure for ReleaseInfoPage */ ++typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG ++{ ++ IMG_HANDLE hPMR; ++} __packed PVRSRV_BRIDGE_IN_RELEASEINFOPAGE; ++ ++/* Bridge out structure for ReleaseInfoPage */ ++typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE; ++ ++#endif /* COMMON_SRVCORE_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_sync_bridge.h b/drivers/gpu/drm/img-rogue/common_sync_bridge.h +new file mode 100644 +index 000000000000..db48d2e90baa +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_sync_bridge.h +@@ -0,0 +1,254 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for sync ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for sync ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_SYNC_BRIDGE_H ++#define COMMON_SYNC_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "pdump.h" ++#include "pdumpdefs.h" ++#include "devicemem_typedefs.h" ++#include "pvrsrv_sync_km.h" ++#include ++ ++#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2 ++#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+3 ++#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+4 ++#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+5 ++#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6 ++#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+7 ++#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+8 ++#define PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+9 ++#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+9) ++ ++/******************************************* ++ AllocSyncPrimitiveBlock ++ *******************************************/ ++ ++/* Bridge in structure for AllocSyncPrimitiveBlock */ ++typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG ++{ ++ IMG_UINT32 ui32EmptyStructPlaceholder; ++} __packed PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK; ++ ++/* Bridge out structure for AllocSyncPrimitiveBlock */ ++typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG ++{ ++ IMG_HANDLE hSyncHandle; ++ IMG_HANDLE hhSyncPMR; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32SyncPrimBlockSize; ++ IMG_UINT32 ui32SyncPrimVAddr; ++} __packed PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK; ++ ++/******************************************* ++ FreeSyncPrimitiveBlock ++ *******************************************/ ++ ++/* Bridge in structure for FreeSyncPrimitiveBlock */ ++typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG ++{ ++ IMG_HANDLE hSyncHandle; ++} __packed PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK; ++ ++/* Bridge out structure for FreeSyncPrimitiveBlock */ ++typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK; ++ ++/******************************************* ++ SyncPrimSet ++ *******************************************/ ++ ++/* Bridge in structure for SyncPrimSet */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG ++{ ++ IMG_HANDLE hSyncHandle; ++ IMG_UINT32 ui32Index; ++ IMG_UINT32 ui32Value; ++} __packed PVRSRV_BRIDGE_IN_SYNCPRIMSET; ++ ++/* Bridge out structure for SyncPrimSet */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMSET; ++ ++/******************************************* ++ SyncPrimPDump ++ *******************************************/ ++ ++/* Bridge in structure for SyncPrimPDump */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG ++{ ++ IMG_HANDLE hSyncHandle; ++ IMG_UINT32 ui32Offset; ++} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP; ++ ++/* Bridge out structure for SyncPrimPDump */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP; ++ ++/******************************************* ++ SyncPrimPDumpValue ++ *******************************************/ ++ ++/* Bridge in structure for SyncPrimPDumpValue */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG ++{ ++ IMG_HANDLE hSyncHandle; ++ IMG_UINT32 ui32Offset; ++ IMG_UINT32 ui32Value; ++} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE; ++ ++/* Bridge out structure for SyncPrimPDumpValue */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE; ++ ++/******************************************* ++ SyncPrimPDumpPol ++ *******************************************/ ++ ++/* Bridge in structure for SyncPrimPDumpPol */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG ++{ ++ IMG_HANDLE hSyncHandle; ++ PDUMP_POLL_OPERATOR eOperator; ++ IMG_UINT32 ui32Mask; ++ IMG_UINT32 ui32Offset; ++ IMG_UINT32 ui32Value; ++ PDUMP_FLAGS_T uiPDumpFlags; ++} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL; ++ ++/* Bridge out structure for SyncPrimPDumpPol */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL; ++ ++/******************************************* ++ SyncPrimPDumpCBP ++ *******************************************/ ++ ++/* Bridge in structure for SyncPrimPDumpCBP */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG ++{ ++ IMG_DEVMEM_SIZE_T uiBufferSize; ++ IMG_DEVMEM_SIZE_T uiPacketSize; ++ IMG_DEVMEM_OFFSET_T uiWriteOffset; ++ IMG_HANDLE hSyncHandle; ++ IMG_UINT32 ui32Offset; ++} __packed PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP; ++ ++/* Bridge out structure for SyncPrimPDumpCBP */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP; ++ ++/******************************************* ++ SyncAllocEvent ++ *******************************************/ ++ ++/* Bridge in structure for SyncAllocEvent */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG ++{ ++ const IMG_CHAR *puiClassName; ++ IMG_BOOL bServerSync; ++ IMG_UINT32 ui32ClassNameSize; ++ IMG_UINT32 ui32FWAddr; ++} __packed PVRSRV_BRIDGE_IN_SYNCALLOCEVENT; ++ ++/* Bridge out structure for SyncAllocEvent */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT; ++ ++/******************************************* ++ SyncFreeEvent ++ *******************************************/ ++ ++/* Bridge in structure for SyncFreeEvent */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG ++{ ++ IMG_UINT32 ui32FWAddr; ++} __packed PVRSRV_BRIDGE_IN_SYNCFREEEVENT; ++ ++/* Bridge out structure for SyncFreeEvent */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCFREEEVENT; ++ ++/******************************************* ++ SyncCheckpointSignalledPDumpPol ++ *******************************************/ ++ ++/* Bridge in structure for SyncCheckpointSignalledPDumpPol */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG ++{ ++ PVRSRV_FENCE hFence; ++} __packed PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; ++ ++/* Bridge out structure for SyncCheckpointSignalledPDumpPol */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL; ++ ++#endif /* COMMON_SYNC_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/common_synctracking_bridge.h b/drivers/gpu/drm/img-rogue/common_synctracking_bridge.h +new file mode 100644 +index 000000000000..036c7dce629b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/common_synctracking_bridge.h +@@ -0,0 +1,97 @@ ++/******************************************************************************* ++@File ++@Title Common bridge header for synctracking ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declares common defines and structures used by both the client ++ and server side of the bridge for synctracking ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#ifndef COMMON_SYNCTRACKING_BRIDGE_H ++#define COMMON_SYNCTRACKING_BRIDGE_H ++ ++#include ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0 ++#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0 ++#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1 ++#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1) ++ ++/******************************************* ++ SyncRecordRemoveByHandle ++ *******************************************/ ++ ++/* Bridge in structure for SyncRecordRemoveByHandle */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG ++{ ++ IMG_HANDLE hhRecord; ++} __packed PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE; ++ ++/* Bridge out structure for SyncRecordRemoveByHandle */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG ++{ ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE; ++ ++/******************************************* ++ SyncRecordAdd ++ *******************************************/ ++ ++/* Bridge in structure for SyncRecordAdd */ ++typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG ++{ ++ IMG_HANDLE hhServerSyncPrimBlock; ++ const IMG_CHAR *puiClassName; ++ IMG_BOOL bbServerSync; ++ IMG_UINT32 ui32ClassNameSize; ++ IMG_UINT32 ui32ui32FwBlockAddr; ++ IMG_UINT32 ui32ui32SyncOffset; ++} __packed PVRSRV_BRIDGE_IN_SYNCRECORDADD; ++ ++/* Bridge out structure for SyncRecordAdd */ ++typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG ++{ ++ IMG_HANDLE hhRecord; ++ PVRSRV_ERROR eError; ++} __packed PVRSRV_BRIDGE_OUT_SYNCRECORDADD; ++ ++#endif /* COMMON_SYNCTRACKING_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/config_kernel.h b/drivers/gpu/drm/img-rogue/config_kernel.h +new file mode 100644 +index 000000000000..f31772b60005 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/config_kernel.h +@@ -0,0 +1,163 @@ ++#define PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY 5 ++#define PVRSRV_ENABLE_CCCB_GROW ++#define RGX_FW_FILENAME "rgx.fw" ++#define RGX_SH_FILENAME "rgx.sh" ++#define PVR_BUILD_DIR "xuantie_linux" ++#define PVRSRV_MODNAME "pvrsrvkm" ++#define PVRSYNC_MODNAME "pvr_sync" ++#define SUPPORT_RGX 1 ++#define DISPLAY_CONTROLLER drm_nulldisp ++#define PVRSRV_HWPERF_COUNTERS_PERBLK 12 ++#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_36.52.104.182.h" ++#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_36.V.104.182.h" ++#define PVRSRV_NEED_PVR_DPF ++#define SUPPORT_PHYSMEM_TEST ++#define SUPPORT_RGXTQ_BRIDGE ++#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9 ++#define PVRSRV_POISON_ON_FREE_VALUE 0x63 ++#define RGX_NUM_OS_SUPPORTED 1 ++#define RGX_OSID_0_DEFAULT_PRIORITY (1 - 0) ++#define RGX_OSID_1_DEFAULT_PRIORITY (1 - 1) ++#define RGX_OSID_2_DEFAULT_PRIORITY (1 - 2) ++#define RGX_OSID_3_DEFAULT_PRIORITY (1 - 3) ++#define RGX_OSID_4_DEFAULT_PRIORITY (1 - 4) ++#define RGX_OSID_5_DEFAULT_PRIORITY (1 - 5) ++#define RGX_OSID_6_DEFAULT_PRIORITY (1 - 6) ++#define RGX_OSID_7_DEFAULT_PRIORITY (1 - 7) ++#define RGX_HCS_DEFAULT_DEADLINE_MS 0xFFFFFFFFU ++#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF ++#define RGX_FW_HEAP_SHIFT 25 ++#define SUPPORT_POWMON_COMPONENT ++#define PVR_POWER_ACTOR_MEASUREMENT_PERIOD_MS 10U ++#define PVR_POWER_MONITOR_HWPERF ++#define PVR_LDM_PLATFORM_PRE_REGISTERED ++#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm" ++#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256 ++#define ION_DEFAULT_HEAP_NAME "ion_system_heap" ++#define ION_DEFAULT_HEAP_ID_MASK (1 << ION_HEAP_TYPE_SYSTEM) ++#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT ++#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE ++#define PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE 0x4000 ++#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432 ++#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS ++#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN ++#define PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING IMG_FALSE ++#define PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG ++#define PVRSRV_APPHINT_VALIDATEIRQ 0 ++#define PVRSRV_APPHINT_DISABLECLOCKGATING 0 ++#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0 ++#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0 ++#define PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH 0 ++#define PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH 0 ++#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL ++#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT ++#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE ++#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN ++#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0 ++#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048 ++#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048 ++#define PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS 50 ++#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP 0 ++#define PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME 0 ++#define PVRSRV_APPHINT_JONESDISABLEMASK 0 ++#define PVRSRV_APPHINT_NEWFILTERINGMODE 1 ++#define PVRSRV_APPHINT_TRUNCATEMODE 0 ++#define PVRSRV_APPHINT_EMUMAXFREQ 0 ++#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0 ++#define PVRSRV_APPHINT_RGXBVNC "" ++#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 5 ++#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0 ++#define PVRSRV_APPHINT_CACHEOPTHREADPRIORITY 1 ++#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE ++#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE ++#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG ++#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE ++#define PVRSRV_APPHINT_KCCB_SIZE_LOG2 7 ++#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT ++#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0 ++#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE ++#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0 ++#define PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS ++#define PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE 0 ++#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST ++#define PVRSRV_APPHINT_HTBUFFERSIZE 64 ++#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE ++#define PVRSRV_APPHINT_HWPERFFWFILTER 0 ++#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0 ++#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0 ++#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0 ++#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0 ++#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0 ++#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0 ++#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL 0 ++#define PVRSRV_APPHINT_TIMECORRCLOCK 0 ++#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE ++#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD ++#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE ++#define PVRSRV_APPHINT_GPUUNITSPOWERCHANGE IMG_FALSE ++#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE ++#define PVRSRV_APPHINT_CACHEOPCONFIG 0 ++#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0 ++#define PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC IMG_FALSE ++#define PVRSRV_APPHINT_PHYSMEMTESTPASSES APPHNT_PHYSMEMTEST_ENABLE ++#define PVRSRV_APPHINT_TESTSLRINTERVAL 0 ++#define PVRSRV_APPHINT_RISCVDMITEST 0 ++#define PVRSRV_APPHINT_VALIDATESOCUSCTIMERS 0 ++#define SOC_TIMER_FREQ 20 ++#define PDVFS_COM_HOST 1 ++#define PDVFS_COM_AP 2 ++#define PDVFS_COM_PMC 3 ++#define PDVFS_COM_IMG_CLKDIV 4 ++#define PDVFS_COM PDVFS_COM_HOST ++#define PVR_GPIO_MODE_GENERAL 1 ++#define PVR_GPIO_MODE_POWMON_PIN 2 ++#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL ++#define PVRSRV_ENABLE_PROCESS_STATS ++#define SUPPORT_USC_BREAKPOINT ++#define SUPPORT_AGP ++#define RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US 1000000 ++#define PVRSRV_DEVICE_INIT_MODE PVRSRV_LINUX_DEV_INIT_ON_CONNECT ++#define SUPPORT_DI_BRG_IMPL ++#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240 ++#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480 ++#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288 ++#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256 ++#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2 ++#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384 ++#define SUPPORT_NATIVE_FENCE_SYNC ++#define PVRSRV_STALLED_CCB_ACTION ++#define UPDATE_FENCE_CHECKPOINT_COUNT 1 ++#define PVR_DRM_NAME "pvr" ++#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 16 ++#define DISABLE_GPU_FREQUENCY_CALIBRATION ++#define RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS 0 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM 14 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM 13 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D 17 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D 17 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM 15 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA 16 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D 17 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC 13 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM 17 ++#define PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM 15 ++#define SUPPORT_BUFFER_SYNC 1 ++#ifdef CONFIG_DRM_POWERVR_ROGUE_DEBUG ++#define DEBUG ++#define DEBUG_BRIDGE_KM ++#define PVRSRV_ENABLE_GPU_MEMORY_INFO ++#define PVRSRV_ENABLE_SYNC_POISONING ++#define PVR_ANNOTATION_MAX_LEN 96 ++#define PVR_BUILD_TYPE "debug" ++#define TRACK_FW_BOOT ++#else ++#define PVR_ANNOTATION_MAX_LEN 63 ++#define PVR_BUILD_TYPE "release" ++#define RELEASE ++#endif +diff --git a/drivers/gpu/drm/img-rogue/config_kernel.mk b/drivers/gpu/drm/img-rogue/config_kernel.mk +new file mode 100644 +index 000000000000..c4b8919057b1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/config_kernel.mk +@@ -0,0 +1,53 @@ ++override PVRSRV_DIR := services ++override HOST_PRIMARY_ARCH := host_x86_64 ++override HOST_32BIT_ARCH := host_i386 ++override HOST_FORCE_32BIT := -m32 ++override HOST_ALL_ARCH := host_x86_64 host_i386 ++override TARGET_PRIMARY_ARCH := target_riscv64 ++override TARGET_SECONDARY_ARCH := ++override TARGET_ALL_ARCH := target_riscv64 ++override TARGET_FORCE_32BIT := ++override PVR_ARCH := rogue ++override METAG_VERSION_NEEDED := 2.8.1.0.3 ++override MIPS_VERSION_NEEDED := 2014.07-1 ++override RISCV_VERSION_NEEDED := 1.0.1 ++override KERNEL_COMPONENTS := srvkm drm_nulldisp ++override WINDOW_SYSTEM := lws-generic ++override PVRSRV_MODNAME := pvrsrvkm ++override PVR_BUILD_DIR := xuantie_linux ++override SUPPORT_RGX := 1 ++override DISPLAY_CONTROLLER := drm_nulldisp ++override PVR_SYSTEM := rgx_xuantie ++override PVR_LOADER := ++override SORT_BRIDGE_STRUCTS := 1 ++override DEBUGLINK := 1 ++override RGX_BNC := 36.V.104.182 ++override SUPPORT_PHYSMEM_TEST := 1 ++override SUPPORT_MIPS_64K_PAGE_SIZE := ++override RGX_NUM_OS_SUPPORTED := 1 ++override VMM_TYPE := stub ++override SUPPORT_POWMON_COMPONENT := 1 ++override RGX_TIMECORR_CLOCK := mono ++override PDVFS_COM_HOST := 1 ++override PDVFS_COM_AP := 2 ++override PDVFS_COM_PMC := 3 ++override PDVFS_COM_IMG_CLKDIV := 4 ++override PDVFS_COM := PDVFS_COM_HOST ++override PVR_GPIO_MODE_GENERAL := 1 ++override PVR_GPIO_MODE_POWMON_PIN := 2 ++override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL ++override PVR_HANDLE_BACKEND := idr ++override SUPPORT_DMABUF_BRIDGE := 1 ++override SUPPORT_USC_BREAKPOINT := 1 ++override SUPPORT_DI_BRG_IMPL := 1 ++override SUPPORT_NATIVE_FENCE_SYNC := 1 ++override SUPPORT_DMA_FENCE := 1 ++override SUPPORT_BUFFER_SYNC := 1 ++ifeq ($(CONFIG_DRM_POWERVR_ROGUE_DEBUG),y) ++override BUILD := debug ++override PVRSRV_ENABLE_GPU_MEMORY_INFO := 1 ++override PVR_BUILD_TYPE := debug ++else ++override BUILD := release ++override PVR_BUILD_TYPE := release ++endif +diff --git a/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_1.V.4.5.h b/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_1.V.4.5.h +new file mode 100644 +index 000000000000..b29afcf0afb0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_1.V.4.5.h +@@ -0,0 +1,80 @@ ++/*************************************************************************/ /*! ++@Title RGX Configuration for BVNC 1.V.4.5 (kernel defines) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXCONFIG_KM_1_V_4_5_H ++#define RGXCONFIG_KM_1_V_4_5_H ++ ++/***** Automatically generated file. Do not edit manually ********************/ ++ ++/****************************************************************************** ++ * B.V.N.C Validation defines ++ *****************************************************************************/ ++#define RGX_BNC_KM_B 1 ++#define RGX_BNC_KM_N 4 ++#define RGX_BNC_KM_C 5 ++ ++/****************************************************************************** ++ * DDK Defines ++ *****************************************************************************/ ++#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) ++#define RGX_FEATURE_COMPUTE ++#define RGX_FEATURE_COMPUTE_OVERLAP ++#define RGX_FEATURE_FBCDC_ALGORITHM (1U) ++#define RGX_FEATURE_FBCDC_ARCHITECTURE (1U) ++#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) ++#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U) ++#define RGX_FEATURE_GS_RTA_SUPPORT ++#define RGX_FEATURE_LAYOUT_MARS (0U) ++#define RGX_FEATURE_META (MTP218) ++#define RGX_FEATURE_META_COREMEM_SIZE (0U) ++#define RGX_FEATURE_NUM_CLUSTERS (4U) ++#define RGX_FEATURE_NUM_ISP_IPP_PIPES (3U) ++#define RGX_FEATURE_NUM_RASTER_PIPES (1U) ++#define RGX_FEATURE_PERFBUS ++#define RGX_FEATURE_PHYS_BUS_WIDTH (40U) ++#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) ++#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (128U) ++#define RGX_FEATURE_TILE_SIZE_X (32U) ++#define RGX_FEATURE_TILE_SIZE_Y (32U) ++#define RGX_FEATURE_TLA ++#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) ++ ++#endif /* RGXCONFIG_KM_1_V_4_5_H */ +diff --git a/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.104.182.h b/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.104.182.h +new file mode 100644 +index 000000000000..0a6cdb0cda3f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/configs/rgxconfig_km_36.V.104.182.h +@@ -0,0 +1,105 @@ ++/*************************************************************************/ /*! ++@Title RGX Configuration for BVNC 36.V.104.182 (kernel defines) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXCONFIG_KM_36_V_104_182_H ++#define RGXCONFIG_KM_36_V_104_182_H ++ ++/***** Automatically generated file. Do not edit manually ********************/ ++ ++/****************************************************************************** ++ * B.V.N.C Validation defines ++ *****************************************************************************/ ++#define RGX_BNC_KM_B 36 ++#define RGX_BNC_KM_N 104 ++#define RGX_BNC_KM_C 182 ++ ++/****************************************************************************** ++ * DDK Defines ++ *****************************************************************************/ ++#define RGX_FEATURE_AXI_ACELITE ++#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1U) ++#define RGX_FEATURE_COMPUTE ++#define RGX_FEATURE_COMPUTE_OVERLAP ++#define RGX_FEATURE_COREID_PER_OS ++#define RGX_FEATURE_FBCDC (50U) ++#define RGX_FEATURE_FBCDC_ALGORITHM (50U) ++#define RGX_FEATURE_FBCDC_ARCHITECTURE (7U) ++#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS (0U) ++#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS (0U) ++#define RGX_FEATURE_GPU_MULTICORE_SUPPORT ++#define RGX_FEATURE_GPU_VIRTUALISATION ++#define RGX_FEATURE_GS_RTA_SUPPORT ++#define RGX_FEATURE_IRQ_PER_OS ++#define RGX_FEATURE_LAYOUT_MARS (1U) ++#define RGX_FEATURE_MIPS ++#define RGX_FEATURE_NUM_CLUSTERS (1U) ++#define RGX_FEATURE_NUM_ISP_IPP_PIPES (6U) ++#define RGX_FEATURE_NUM_OSIDS (8U) ++#define RGX_FEATURE_NUM_RASTER_PIPES (1U) ++#define RGX_FEATURE_PBE2_IN_XE ++#define RGX_FEATURE_PBVNC_COREID_REG ++#define RGX_FEATURE_PERFBUS ++#define RGX_FEATURE_PHYS_BUS_WIDTH (36U) ++#define RGX_FEATURE_ROGUEXE ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2 ++#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION (2U) ++#define RGX_FEATURE_SLC_BANKS (1U) ++#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512U) ++#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE /* Specifies the SLC is */ ++ /* customer-configurable. True SLC */ ++ /* size must be sourced from */ ++ /* register. */ ++#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES (16U) ++#define RGX_FEATURE_SOC_TIMER ++#define RGX_FEATURE_SYS_BUS_SECURE_RESET ++#define RGX_FEATURE_TILE_SIZE_X (16U) ++#define RGX_FEATURE_TILE_SIZE_Y (16U) ++#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS ++#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS ++#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40U) ++#define RGX_FEATURE_XE_ARCHITECTURE (1U) ++#define RGX_FEATURE_XE_MEMORY_HIERARCHY ++#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH (19U) ++#define RGX_FEATURE_XPU_MAX_SLAVES (3U) ++#define RGX_FEATURE_XPU_REGISTER_BROADCAST (1U) ++ ++#endif /* RGXCONFIG_KM_36_V_104_182_H */ +diff --git a/drivers/gpu/drm/img-rogue/connection_server.c b/drivers/gpu/drm/img-rogue/connection_server.c +new file mode 100644 +index 000000000000..92e0551e7d76 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/connection_server.c +@@ -0,0 +1,491 @@ ++/*************************************************************************/ /*! ++@File ++@Title Server side connection management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Handles connections coming from the client and the management ++ connection based information ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "handle.h" ++#include "pvrsrv.h" ++#include "connection_server.h" ++#include "osconnection_server.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "sync_server.h" ++#include "process_stats.h" ++#include "pdump_km.h" ++#include "osfunc.h" ++#include "tlstream.h" ++#include "rgxhwperf_common.h" ++ ++/* PID associated with Connection currently being purged by Cleanup thread */ ++static IMG_PID gCurrentPurgeConnectionPid; ++ ++static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection) ++{ ++ PVRSRV_ERROR eError; ++ PROCESS_HANDLE_BASE *psProcessHandleBase; ++ IMG_UINT64 ui64MaxBridgeTime; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ if (psPVRSRVData->bUnload) ++ { ++ /* driver is unloading so do not allow the bridge lock to be released */ ++ ui64MaxBridgeTime = 0; ++ } ++ else ++ { ++ ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS; ++ } ++ ++ PVR_ASSERT(psConnection != NULL); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psConnection, "psConnection"); ++ ++ /* Close HWPerfClient stream here even though we created it in ++ * PVRSRVConnectKM(). */ ++ if (psConnection->hClientTLStream) ++ { ++ TLStreamClose(psConnection->hClientTLStream); ++ psConnection->hClientTLStream = NULL; ++ PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream.")); ++ } ++ ++ /* Get process handle base to decrement the refcount */ ++ psProcessHandleBase = psConnection->psProcessHandleBase; ++ ++ if (psProcessHandleBase != NULL) ++ { ++ eError = PVRSRVReleaseProcessHandleBase(psProcessHandleBase, psConnection->pid, ++ ui64MaxBridgeTime); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVReleaseProcessHandleBase"); ++ ++ psConnection->psProcessHandleBase = NULL; ++ } ++ ++ /* Free handle base for this connection */ ++ if (psConnection->psHandleBase != NULL) ++ { ++ eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime); ++ /* ++ * If we get PVRSRV_ERROR_RETRY we need to pass this back to the caller ++ * who will schedule a retry. ++ * Do not log this as it is an expected exception. ++ * This can occur if the Firmware is still processing a workload from ++ * the client when a tear-down request is received. ++ * Retrying will allow the in-flight work to be completed and the ++ * tear-down request can be completed when the FW is no longer busy. ++ */ ++ if (PVRSRV_ERROR_RETRY == eError) ++ { ++ return eError; ++ } ++ else ++ { ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase:2"); ++ } ++ ++ psConnection->psHandleBase = NULL; ++ } ++ ++ if (psConnection->psSyncConnectionData != NULL) ++ { ++ SyncUnregisterConnection(psConnection->psSyncConnectionData); ++ psConnection->psSyncConnectionData = NULL; ++ } ++ ++ if (psConnection->psPDumpConnectionData != NULL) ++ { ++ PDumpUnregisterConnection(psConnection->psPDumpConnectionData); ++ psConnection->psPDumpConnectionData = NULL; ++ } ++ ++ /* Call environment specific connection data deinit function */ ++ if (psConnection->hOsPrivateData != NULL) ++ { ++ eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSConnectionPrivateDataDeInit"); ++ ++ psConnection->hOsPrivateData = NULL; ++ } ++ ++ /* Close the PID stats entry as late as possible to catch all frees */ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ if (psConnection->hProcessStats != NULL) ++ { ++ PVRSRVStatsDeregisterProcess(psConnection->hProcessStats); ++ psConnection->hProcessStats = NULL; ++ } ++#endif ++ ++ OSFreeMemNoStats(psConnection); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData) ++{ ++ CONNECTION_DATA *psConnection; ++ PVRSRV_ERROR eError; ++ PROCESS_HANDLE_BASE *psProcessHandleBase; ++ ++ /* Allocate connection data area, no stats since process not registered yet */ ++ psConnection = OSAllocZMemNoStats(sizeof(*psConnection)); ++ PVR_LOG_RETURN_IF_NOMEM(psConnection, "psConnection"); ++ ++ /* Allocate process statistics as early as possible to catch all allocs */ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", failure); ++#endif ++ ++ /* Call environment specific connection data init function */ ++ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSConnectionPrivateDataInit", failure); ++ ++ psConnection->pid = OSGetCurrentClientProcessIDKM(); ++ psConnection->vpid = OSGetCurrentVirtualProcessID(); ++ psConnection->tid = (IMG_UINT32)OSGetCurrentClientThreadIDKM(); ++ OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN); ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ OSLockCreate(&psConnection->hDmaReqLock); ++ ++ eError = OSEventObjectCreate("Dma transfer cleanup event object", ++ &psConnection->hDmaEventObject); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", failure); ++ ++ OSAtomicWrite(&psConnection->ui32NumDmaTransfersInFlight, 0); ++ psConnection->bAcceptDmaRequests = IMG_TRUE; ++#endif ++ ++ /* Register this connection with the sync core */ ++ eError = SyncRegisterConnection(&psConnection->psSyncConnectionData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SyncRegisterConnection", failure); ++ ++ /* ++ * Register this connection and Sync PDump callback with ++ * the pdump core. Pass in the Sync connection data. ++ */ ++ eError = PDumpRegisterConnection(psConnection->psSyncConnectionData, ++ SyncConnectionPDumpSyncBlocks, ++ &psConnection->psPDumpConnectionData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PDumpRegisterConnection", failure); ++ ++ /* Allocate handle base for this connection */ ++ eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase, ++ PVRSRV_HANDLE_BASE_TYPE_CONNECTION); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", failure); ++ ++ /* get process handle base (if it doesn't exist it will be allocated) */ ++ eError = PVRSRVAcquireProcessHandleBase(psConnection->pid, &psProcessHandleBase); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAcquireProcessHandleBase", failure); ++ ++ /* hConnectionsLock now resides in PVRSRV_DEVICE_NODE */ ++ { ++ IMG_BOOL bHostStreamIsNull; ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo; ++ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnection); ++ ++ OSLockAcquire(psDevNode->hConnectionsLock); ++ dllist_add_to_tail(&psDevNode->sConnections, &psConnection->sConnectionListNode); ++#if defined(DEBUG) || defined(PDUMP) ++ PVR_LOG(("%s connected - (devID = %u)", psConnection->pszProcName, ++ psDevNode->sDevId.ui32InternalID)); ++#endif ++ OSLockRelease(psDevNode->hConnectionsLock); ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ psRgxDevInfo = _RGX_DEVICE_INFO_FROM_NODE(psDevNode); ++ ++ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); ++ bHostStreamIsNull = (IMG_BOOL)(psRgxDevInfo->hHWPerfHostStream == NULL); ++ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); ++ ++ if (!bHostStreamIsNull) ++ { ++ if (TLStreamIsOpenForReading(psRgxDevInfo->hHWPerfHostStream)) ++ { ++ /* Announce this client connection in the host stream, if event mask is set */ ++ RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(psDevNode, psConnection->pid, psConnection->pszProcName); ++ } ++ } ++ } ++ } ++ ++ psConnection->psProcessHandleBase = psProcessHandleBase; ++ ++ *ppvPrivData = psConnection; ++ ++ return PVRSRV_OK; ++ ++failure: ++ ConnectionDataDestroy(psConnection); ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData) ++{ ++ PVRSRV_ERROR eErrorConnection, eErrorKernel; ++ CONNECTION_DATA *psConnectionData = pvConnectionData; ++ ++ gCurrentPurgeConnectionPid = psConnectionData->pid; ++ ++ eErrorConnection = ConnectionDataDestroy(psConnectionData); ++ if (eErrorConnection != PVRSRV_OK) ++ { ++ if (eErrorConnection == PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Failed to purge connection data %p " ++ "(deferring destruction)", ++ __func__, ++ psConnectionData)); ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Connection data %p deferred destruction finished", ++ __func__, ++ psConnectionData)); ++ } ++ ++ /* Check if possible resize the global handle base */ ++ eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE); ++ PVR_LOG_IF_ERROR(eErrorKernel, "PVRSRVPurgeHandles"); ++ ++ gCurrentPurgeConnectionPid = 0; ++ ++ return eErrorConnection; ++} ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++static void WaitForOutstandingDma(CONNECTION_DATA *psConnectionData) ++{ ++ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hEvent; ++ IMG_UINT32 ui32Tries = 100; ++ ++#if defined(DMA_VERBOSE) ++ PVR_DPF((PVR_DBG_ERROR, ++ "Waiting on %d DMA transfers in flight...", OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight))); ++#endif ++ ++ eError = OSEventObjectOpen(psConnectionData->hDmaEventObject, &hEvent); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); ++ return; ++ } ++ ++ while (OSAtomicRead(&psConnectionData->ui32NumDmaTransfersInFlight) != 0) ++ { ++ /* ++ #define DMA_TRANSFER_TIMEOUT_US (5000000ULL) ++ ++ This currently doesn't work properly. Wait time is not as requested. ++ Using OSSleepms instead ++ ++ OSEventObjectWaitKernel(hEvent, DMA_TRANSFER_TIMEOUT_US); ++ */ ++ OSSleepms(50); ++ if (!ui32Tries) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Timeout while waiting on outstanding DMA transfers!", __func__)); ++ break; ++ } ++ ++ ui32Tries--; ++ } ++ ++ OSEventObjectClose(hEvent); ++} ++#endif ++ ++void PVRSRVCommonConnectionDisconnect(void *pvDataPtr) ++{ ++ CONNECTION_DATA *psConnectionData = pvDataPtr; ++ PVRSRV_DEVICE_NODE *psDevNode = OSGetDevNode(psConnectionData); ++ ++ OSLockAcquire(psDevNode->hConnectionsLock); ++ dllist_remove_node(&psConnectionData->sConnectionListNode); ++ OSLockRelease(psDevNode->hConnectionsLock); ++ ++ /* Notify the PDump core if the pdump control client is disconnecting */ ++ if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL) ++ { ++ PDumpDisconnectionNotify(psDevNode); ++ } ++#if defined(SUPPORT_DMA_TRANSFER) ++ OSLockAcquire(psConnectionData->hDmaReqLock); ++ ++ psConnectionData->bAcceptDmaRequests = IMG_FALSE; ++ ++ OSLockRelease(psConnectionData->hDmaReqLock); ++ ++ WaitForOutstandingDma(psConnectionData); ++ ++ OSEventObjectDestroy(psConnectionData->hDmaEventObject); ++ OSLockDestroy(psConnectionData->hDmaReqLock); ++#endif ++ ++#if defined(DEBUG) || defined(PDUMP) ++ PVR_LOG(("%s disconnected - (devID = %u)", psConnectionData->pszProcName, ++ psDevNode->sDevId.ui32InternalID)); ++#endif ++ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) ++#endif ++ { ++ /* Defer the release of the connection data */ ++ psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData; ++ psConnectionData->sCleanupThreadFn.pvData = psConnectionData; ++ psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE; ++ CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn, ++ CLEANUP_THREAD_RETRY_COUNT_DEFAULT); ++ PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn); ++ } ++} ++ ++IMG_PID PVRSRVGetPurgeConnectionPid(void) ++{ ++ return gCurrentPurgeConnectionPid; ++} ++ ++/* Prefix for debug messages about Active Connections */ ++#define DEBUG_DUMP_CONNECTION_FORMAT_STR " P%d-V%d-T%d-%s," ++#define CONNECTIONS_PREFIX "Connections Device ID:%u(%d)" ++#define MAX_CONNECTIONS_PREFIX (29) ++#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN (1+10+10+10+7+PVRSRV_CONNECTION_PROCESS_NAME_LEN) ++#define MAX_DEBUG_DUMP_STRING_LEN (1+MAX_CONNECTIONS_PREFIX+(3*MAX_DEBUG_DUMP_CONNECTION_STR_LEN)) ++ ++void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PDLLIST_NODE pNext, pNode; ++ ++ /* We must check for an initialised device before accessing its mutex. ++ * The mutex is initialised as part of DeviceInitialize() which occurs ++ * on first access to the device node. ++ */ ++ if (psDevNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ PVR_DUMPDEBUG_LOG("Connections: No Devices: No active connections"); ++ return; ++ } ++ ++ OSLockAcquire(psDevNode->hConnectionsLock); ++ if (dllist_is_empty(&psDevNode->sConnections)) ++ { ++ PVR_DUMPDEBUG_LOG(CONNECTIONS_PREFIX " No active connections", ++ (unsigned char)psDevNode->sDevId.ui32InternalID, ++ (unsigned char)psDevNode->sDevId.i32OsDeviceID); ++ } ++ else ++ { ++ IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN]; ++ IMG_UINT16 i, uiPos = 0; ++ IMG_BOOL bPrinted = IMG_FALSE; ++ size_t uiSize = sizeof(sActiveConnections); ++ ++ IMG_CHAR szTmpConBuff[MAX_CONNECTIONS_PREFIX + 1]; ++ i = OSSNPrintf(szTmpConBuff, ++ MAX_CONNECTIONS_PREFIX, ++ CONNECTIONS_PREFIX, ++ (unsigned char)psDevNode->sDevId.ui32InternalID, ++ (unsigned char)psDevNode->sDevId.i32OsDeviceID); ++ OSStringLCopy(sActiveConnections+uiPos, szTmpConBuff, uiSize); ++ ++ /* Move the write offset to the end of the current string */ ++ uiPos += i; ++ /* Update the amount of remaining space available to copy into */ ++ uiSize -= i; ++ ++ dllist_foreach_node(&psDevNode->sConnections, pNode, pNext) ++ { ++ CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); ++ ++ IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN]; ++ i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN, ++ DEBUG_DUMP_CONNECTION_FORMAT_STR, sData->pid, sData->vpid, sData->tid, sData->pszProcName); ++ i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i); ++ bPrinted = IMG_FALSE; ++ ++ OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize); ++ ++ /* Move the write offset to the end of the current string */ ++ uiPos += i; ++ /* Update the amount of remaining space available to copy into */ ++ uiSize -= i; ++ ++ /* If there is not enough space to add another connection to this line, output the line */ ++ if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN) ++ { ++ PVR_DUMPDEBUG_LOG("%s", sActiveConnections); ++ ++ /* ++ * Remove the "Connections:" prefix from the buffer. ++ * Leave the subsequent buffer contents indented by the same ++ * amount to aid in interpreting the debug output. ++ */ ++ uiPos = sizeof(CONNECTIONS_PREFIX) - 1; ++ /* Reset the amount of space available to copy into */ ++ uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos; ++ bPrinted = IMG_TRUE; ++ } ++ } ++ ++ /* Only print the current line if it hasn't already been printed */ ++ if (!bPrinted) ++ { ++ /* Strip off the final comma */ ++ sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0'; ++ PVR_DUMPDEBUG_LOG("%s", sActiveConnections); ++ } ++#undef MAX_DEBUG_DUMP_STRING_LEN ++#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE ++ } ++ OSLockRelease(psDevNode->hConnectionsLock); ++} +diff --git a/drivers/gpu/drm/img-rogue/connection_server.h b/drivers/gpu/drm/img-rogue/connection_server.h +new file mode 100644 +index 000000000000..d11a6eae8bd9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/connection_server.h +@@ -0,0 +1,144 @@ ++/*************************************************************************/ /*! ++@File ++@Title Server side connection management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description API for server side connection management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(CONNECTION_SERVER_H) ++#define CONNECTION_SERVER_H ++ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "handle.h" ++#include "pvrsrv_cleanup.h" ++ ++/* Variable used to hold in memory the timeout for the current time slice*/ ++extern IMG_UINT64 gui64TimesliceLimit; ++/* Counter number of handle data freed during the current time slice */ ++extern IMG_UINT32 gui32HandleDataFreeCounter; ++/* Set the maximum time the freeing of the resources can keep the lock */ ++#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS (3000 * 1000) /* 3ms */ ++ ++typedef struct _CONNECTION_DATA_ ++{ ++ PVRSRV_HANDLE_BASE *psHandleBase; ++ PROCESS_HANDLE_BASE *psProcessHandleBase; ++ struct _SYNC_CONNECTION_DATA_ *psSyncConnectionData; ++ struct _PDUMP_CONNECTION_DATA_ *psPDumpConnectionData; ++ ++ /* Holds the client flags supplied at connection time */ ++ IMG_UINT32 ui32ClientFlags; ++ ++ /* ++ * OS specific data can be stored via this handle. ++ * See osconnection_server.h for a generic mechanism ++ * for initialising this field. ++ */ ++ IMG_HANDLE hOsPrivateData; ++ ++#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16) ++ IMG_PID pid; ++ IMG_PID vpid; ++ IMG_UINT32 tid; ++ IMG_CHAR pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN]; ++ ++ IMG_HANDLE hProcessStats; ++ ++ IMG_HANDLE hClientTLStream; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* ++ * Connection-based values per application which can be modified by the ++ * AppHint settings 'OSid, OSidReg, bOSidAxiProtReg' for each application. ++ * These control where the connection's memory allocation is sourced from. ++ * ui32OSid, ui32OSidReg range from 0..(GPUVIRT_VALIDATION_NUM_OS - 1). ++ */ ++ IMG_UINT32 ui32OSid; ++ IMG_UINT32 ui32OSidReg; ++ IMG_BOOL bOSidAxiProtReg; ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ IMG_BOOL bAcceptDmaRequests; ++ ATOMIC_T ui32NumDmaTransfersInFlight; ++ POS_LOCK hDmaReqLock; ++ IMG_HANDLE hDmaEventObject; ++#endif ++ /* Structure which is hooked into the cleanup thread work list */ ++ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; ++ ++ DLLIST_NODE sConnectionListNode; ++ ++ /* List navigation for deferred freeing of connection data */ ++ struct _CONNECTION_DATA_ **ppsThis; ++ struct _CONNECTION_DATA_ *psNext; ++} CONNECTION_DATA; ++ ++#include "osconnection_server.h" ++ ++PVRSRV_ERROR PVRSRVCommonConnectionConnect(void **ppvPrivData, void *pvOSData); ++void PVRSRVCommonConnectionDisconnect(void *pvPrivData); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVGetPurgeConnectionPid ++ ++@Description Returns PID associated with Connection currently being purged by ++ Cleanup Thread. If no Connection is purged 0 is returned. ++ ++@Return PID associated with currently purged connection or 0 if no ++ connection is being purged ++*/ /***************************************************************************/ ++IMG_PID PVRSRVGetPurgeConnectionPid(void); ++ ++void PVRSRVConnectionDebugNotify(PVRSRV_DEVICE_NODE *psDevNode, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVConnectionPrivateData) ++#endif ++static INLINE ++IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection) ++{ ++ return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL; ++} ++ ++#endif /* !defined(CONNECTION_SERVER_H) */ +diff --git a/drivers/gpu/drm/img-rogue/cores/rgxcore_km_1.82.4.5.h b/drivers/gpu/drm/img-rogue/cores/rgxcore_km_1.82.4.5.h +new file mode 100644 +index 000000000000..7629672b1510 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/cores/rgxcore_km_1.82.4.5.h +@@ -0,0 +1,69 @@ ++/*************************************************************************/ /*! ++@Title RGX Core BVNC 1.82.4.5 ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXCORE_KM_1_82_4_5_H ++#define RGXCORE_KM_1_82_4_5_H ++ ++/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */ ++/* CS: @2503111 */ ++ ++/****************************************************************************** ++ * BVNC = 1.82.4.5 ++ *****************************************************************************/ ++#define RGX_BVNC_KM_B 1 ++#define RGX_BVNC_KM_V 82 ++#define RGX_BVNC_KM_N 4 ++#define RGX_BVNC_KM_C 5 ++ ++/****************************************************************************** ++ * Errata ++ *****************************************************************************/ ++ ++ ++ ++ ++/****************************************************************************** ++ * Enhancements ++ *****************************************************************************/ ++ ++ ++ ++#endif /* RGXCORE_KM_1_82_4_5_H */ +diff --git a/drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.52.104.182.h b/drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.52.104.182.h +new file mode 100644 +index 000000000000..a3f09195c78e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/cores/rgxcore_km_36.52.104.182.h +@@ -0,0 +1,74 @@ ++/*************************************************************************/ /*! ++@Title RGX Core BVNC 36.52.104.182 ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXCORE_KM_36_52_104_182_H ++#define RGXCORE_KM_36_52_104_182_H ++ ++/* Automatically generated file (04/10/2021 09:01:50): Do not edit manually */ ++/* CS: @5849605 */ ++ ++/****************************************************************************** ++ * BVNC = 36.52.104.182 ++ *****************************************************************************/ ++#define RGX_BVNC_KM_B 36 ++#define RGX_BVNC_KM_V 52 ++#define RGX_BVNC_KM_N 104 ++#define RGX_BVNC_KM_C 182 ++ ++/****************************************************************************** ++ * Errata ++ *****************************************************************************/ ++ ++#define FIX_HW_BRN_63553 ++ ++ ++ ++/****************************************************************************** ++ * Enhancements ++ *****************************************************************************/ ++#define HW_ERN_42290 ++#define HW_ERN_42606 ++#define HW_ERN_47025 ++#define HW_ERN_57596 ++ ++ ++ ++#endif /* RGXCORE_KM_36_52_104_182_H */ +diff --git a/drivers/gpu/drm/img-rogue/debug_common.c b/drivers/gpu/drm/img-rogue/debug_common.c +new file mode 100644 +index 000000000000..ee17281cb3a9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/debug_common.c +@@ -0,0 +1,1646 @@ ++/*************************************************************************/ /*! ++@File ++@Title Debug Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Creates common debug info entries. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(__linux__) ++#include ++#endif /* #if !defined(__linux__) */ ++ ++#include "debug_common.h" ++#include "pvrsrv.h" ++#include "di_server.h" ++#include "lists.h" ++#include "pvrversion.h" ++#include "rgx_options.h" ++#include "allocmem.h" ++#include "rgxfwutils.h" ++ ++#ifdef SUPPORT_RGX ++#include "rgxdevice.h" ++#include "rgxdebug.h" ++#include "rgxinit.h" ++#include "rgxmmudefs_km.h" ++static IMG_HANDLE ghGpuUtilUserDebugFS; ++#endif ++ ++static DI_ENTRY *gpsVersionDIEntry; ++static DI_ENTRY *gpsStatusDIEntry; ++ ++#ifdef SUPPORT_VALIDATION ++static DI_ENTRY *gpsTestMemLeakDIEntry; ++#endif /* SUPPORT_VALIDATION */ ++#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) ++static DI_ENTRY *gpsDebugLevelDIEntry; ++#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ ++ ++static void _DumpDebugDIPrintfWrapper(void *pvDumpDebugFile, const IMG_CHAR *pszFormat, ...) ++{ ++ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; ++ va_list ArgList; ++ ++ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, "%s\n", pszFormat); ++ ++ va_start(ArgList, pszFormat); ++ DIVPrintf(pvDumpDebugFile, szBuffer, ArgList); ++ va_end(ArgList); ++} ++ ++/*************************************************************************/ /*! ++ Version DebugFS entry ++*/ /**************************************************************************/ ++ ++static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, ++ va_list va) ++{ ++ IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); ++ IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); ++ IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; ++ ++ (*pui64CurrentPosition)++; ++ ++ return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; ++} ++ ++static void *_VersionDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); ++ IMG_UINT64 uiCurrentPosition = 1; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ PVR_UNREFERENCED_PARAMETER(psEntry); ++ ++ if (psPVRSRVData == NULL) { ++ PVR_DPF((PVR_DBG_ERROR, "psPVRSRVData = NULL")); ++ return NULL; ++ } ++ ++ if (*pui64Pos == 0) ++ { ++ return DI_START_TOKEN; ++ } ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, ++ _DebugVersionCompare_AnyVaCb, ++ &uiCurrentPosition, ++ *pui64Pos); ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ return psDeviceNode; ++} ++ ++static void _VersionDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) ++{ ++ PVR_UNREFERENCED_PARAMETER(psEntry); ++ PVR_UNREFERENCED_PARAMETER(pvPriv); ++} ++ ++static void *_VersionDINext(OSDI_IMPL_ENTRY *psEntry,void *pvPriv, ++ IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); ++ IMG_UINT64 uiCurrentPosition = 1; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ (*pui64Pos)++; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, ++ _DebugVersionCompare_AnyVaCb, ++ &uiCurrentPosition, ++ *pui64Pos); ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ return psDeviceNode; ++} ++ ++#define DI_PRINT_VERSION_FMTSPEC \ ++ "%s Version: %u.%u @ %u (%s) build options: 0x%08x %s\n" ++#define STR_DEBUG "debug" ++#define STR_RELEASE "release" ++ ++#if defined(DEBUG) || defined(SUPPORT_VALIDATION) ++#define BUILD_OPT_LEN 80 ++ ++static inline void _AppendOptionStr(IMG_CHAR pszBuildOptions[], const IMG_CHAR* str, OSDI_IMPL_ENTRY *psEntry, IMG_UINT32* pui32BuildOptionLen) ++{ ++ IMG_UINT32 ui32BuildOptionLen = *pui32BuildOptionLen; ++ const IMG_UINT32 strLen = OSStringLength(str); ++ const IMG_UINT32 optStrLen = sizeof(IMG_CHAR) * (BUILD_OPT_LEN-1); ++ ++ if ((ui32BuildOptionLen + strLen) > optStrLen) ++ { ++ pszBuildOptions[ui32BuildOptionLen] = '\0'; ++ DIPrintf(psEntry, "%s\n", pszBuildOptions); ++ ui32BuildOptionLen = 0; ++ } ++ if (strLen < optStrLen) ++ { ++ OSStringLCopy(pszBuildOptions+ui32BuildOptionLen, str, strLen); ++ ui32BuildOptionLen += strLen - 1; ++ } ++ *pui32BuildOptionLen = ui32BuildOptionLen; ++} ++#endif /* DEBUG || SUPPORT_VALIDATION */ ++ ++static int _VersionDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvPriv) ++{ ++ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); ++ ++ if (pvPriv == DI_START_TOKEN) ++ { ++ if (psPVRSRVData->sDriverInfo.bIsNoMatch) ++ { ++ const BUILD_INFO *psBuildInfo; ++ ++ psBuildInfo = &psPVRSRVData->sDriverInfo.sUMBuildInfo; ++ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, ++ "UM Driver", ++ PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), ++ PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), ++ psBuildInfo->ui32BuildRevision, ++ (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? ++ STR_DEBUG : STR_RELEASE, ++ psBuildInfo->ui32BuildOptions, ++ PVR_BUILD_DIR); ++ ++ psBuildInfo = &psPVRSRVData->sDriverInfo.sKMBuildInfo; ++ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, ++ "KM Driver (" PVR_ARCH_NAME ")", ++ PVRVERSION_UNPACK_MAJ(psBuildInfo->ui32BuildVersion), ++ PVRVERSION_UNPACK_MIN(psBuildInfo->ui32BuildVersion), ++ psBuildInfo->ui32BuildRevision, ++ (psBuildInfo->ui32BuildType == BUILD_TYPE_DEBUG) ? ++ STR_DEBUG : STR_RELEASE, ++ psBuildInfo->ui32BuildOptions, ++ PVR_BUILD_DIR); ++ } ++ else ++ { ++ /* bIsNoMatch is `false` in one of the following cases: ++ * - UM & KM version parameters actually match. ++ * - A comparison between UM & KM has not been made yet, because no ++ * client ever connected. ++ * ++ * In both cases, available (KM) version info is the best output we ++ * can provide. ++ */ ++ DIPrintf(psEntry, "Driver Version: %s (%s) (%s) build options: " ++ "0x%08lx %s\n", PVRVERSION_STRING, PVR_ARCH_NAME, ++ PVR_BUILD_TYPE, RGX_BUILD_OPTIONS_KM, PVR_BUILD_DIR); ++ } ++ } ++ else if (pvPriv != NULL) ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) pvPriv; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++#ifdef SUPPORT_RGX ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++#if defined(DEBUG) || defined(SUPPORT_VALIDATION) ++ IMG_CHAR pszBuildOptions[BUILD_OPT_LEN]; ++ IMG_UINT32 ui32BuildOptionLen = 0; ++ static const char* aszOptions[] = RGX_BUILD_OPTIONS_LIST; ++ int i = 0; ++#endif ++#endif /* SUPPORT_RGX */ ++ IMG_BOOL bFwVersionInfoPrinted = IMG_FALSE; ++ ++ DIPrintf(psEntry, "\nDevice Name: %s\n", psDevConfig->pszName); ++ DIPrintf(psEntry, "Device ID: %u:%d\n", psDevNode->sDevId.ui32InternalID, ++ psDevNode->sDevId.i32OsDeviceID); ++ ++ if (psDevConfig->pszVersion) ++ { ++ DIPrintf(psEntry, "Device Version: %s\n", ++ psDevConfig->pszVersion); ++ } ++ ++ if (psDevNode->pfnDeviceVersionString) ++ { ++ IMG_CHAR *pszVerStr; ++ ++ if (psDevNode->pfnDeviceVersionString(psDevNode, ++ &pszVerStr) == PVRSRV_OK) ++ { ++ DIPrintf(psEntry, "%s\n", pszVerStr); ++ ++ OSFreeMem(pszVerStr); ++ } ++ } ++ ++#ifdef SUPPORT_RGX ++ /* print device's firmware version info */ ++ if (psDevInfo->psRGXFWIfOsInitMemDesc != NULL) ++ { ++ /* psDevInfo->psRGXFWIfOsInitMemDesc should be permanently mapped */ ++ if (psDevInfo->psRGXFWIfOsInit != NULL) ++ { ++ if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) ++ { ++ const RGXFWIF_COMPCHECKS *psRGXCompChecks = ++ &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks; ++ IMG_UINT32 ui32DDKVer = psRGXCompChecks->ui32DDKVersion; ++ ++ DIPrintf(psEntry, DI_PRINT_VERSION_FMTSPEC, ++ "Firmware", ++ PVRVERSION_UNPACK_MAJ(ui32DDKVer), ++ PVRVERSION_UNPACK_MIN(ui32DDKVer), ++ psRGXCompChecks->ui32DDKBuild, ++ ((psRGXCompChecks->ui32BuildOptions & ++ OPTIONS_DEBUG_MASK) ? STR_DEBUG : STR_RELEASE), ++ psRGXCompChecks->ui32BuildOptions, ++ PVR_BUILD_DIR); ++ bFwVersionInfoPrinted = IMG_TRUE; ++ ++#if defined(DEBUG) || defined(SUPPORT_VALIDATION) ++ DIPrintf(psEntry, "Firmware Build Options:\n"); ++ ++ for (i = 0; i < ARRAY_SIZE(aszOptions); i++) ++ { ++ if ((psRGXCompChecks->ui32BuildOptions & 1<pvDevice, ++ RGXFWIF_DM_GP, ++ &sCounterDumpCmd, ++ 0, ++ PDUMP_FLAGS_CONTINUOUS, ++ pui32kCCBCommandSlot); ++ PVR_LOG_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); ++ ++ return eError; ++} ++ ++static int _DebugPowerDataDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Device not initialised when " ++ "power counter data was requested!")); ++ return -EIO; ++ } ++ ++ OSLockAcquire(psDevInfo->hCounterDumpingLock); ++ ++ eError = SendPowerCounterCommand(psDeviceNode, ++ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, ++ &ui32kCCBCommandSlot); ++ ++ if (eError != PVRSRV_OK) ++ { ++ OSLockRelease(psDevInfo->hCounterDumpingLock); ++ return -EIO; ++ } ++ ++ /* Wait for FW complete completion */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ++ ui32kCCBCommandSlot, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); ++ OSLockRelease(psDevInfo->hCounterDumpingLock); ++ return -EIO; ++ } ++ ++ /* Read back the buffer */ ++ { ++ IMG_UINT32* pui32PowerBuffer; ++ IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod; ++ IMG_UINT32 i, j; ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, ++ (void**)&pui32PowerBuffer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr"); ++ OSLockRelease(psDevInfo->hCounterDumpingLock); ++ return -EIO; ++ } ++ ++ ui32NumOfRegs = *pui32PowerBuffer++; ++ ui32SamplePeriod = *pui32PowerBuffer++; ++ ++ if (ui32NumOfRegs) ++ { ++ DIPrintf(psEntry, "Power counter data for device\n"); ++ DIPrintf(psEntry, "Sample period: 0x%08x\n", ui32SamplePeriod); ++ ++ for (i = 0; i < ui32NumOfRegs; i++) ++ { ++ IMG_UINT32 ui32High, ui32Low; ++ IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++; ++ IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++; ++ ++ PVR_ASSERT(ui32NumOfInstances); ++ ++ DIPrintf(psEntry, "0x%08x:", ui32RegOffset); ++ ++ for (j = 0; j < ui32NumOfInstances; j++) ++ { ++ ui32Low = *pui32PowerBuffer++; ++ ui32High = *pui32PowerBuffer++; ++ ++ DIPrintf(psEntry, " 0x%016llx", ++ (IMG_UINT64) ui32Low | (IMG_UINT64) ui32High << 32); ++ } ++ ++ DIPrintf(psEntry, "\n"); ++ } ++ } ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc); ++ } ++ ++ OSLockRelease(psDevInfo->hCounterDumpingLock); ++ ++ return eError; ++} ++ ++static IMG_INT64 PowerDataSet(const IMG_CHAR __user *pcBuffer, ++ IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, ++ void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_COUNTER_DUMP_REQUEST eRequest; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); ++ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); ++ PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); ++ ++ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Device not initialised when " ++ "power counter data was requested!")); ++ return -EIO; ++ } ++ ++ if (pcBuffer[0] == '1') ++ { ++ eRequest = RGXFWIF_PWR_COUNTER_DUMP_START; ++ } ++ else if (pcBuffer[0] == '0') ++ { ++ eRequest = RGXFWIF_PWR_COUNTER_DUMP_STOP; ++ } ++ else ++ { ++ return -EINVAL; ++ } ++ ++ OSLockAcquire(psDevInfo->hCounterDumpingLock); ++ ++ SendPowerCounterCommand(psDeviceNode, ++ eRequest, ++ &ui32kCCBCommandSlot); ++ ++ OSLockRelease(psDevInfo->hCounterDumpingLock); ++ ++ *pui64Pos += ui64Count; ++ return ui64Count; ++} ++ ++#endif /* defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ ++ ++/*************************************************************************/ /*! ++ Status DebugFS entry ++*/ /**************************************************************************/ ++ ++static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, ++ va_list va) ++{ ++ IMG_UINT64 *pui64CurrentPosition = va_arg(va, IMG_UINT64 *); ++ IMG_UINT64 ui64Position = va_arg(va, IMG_UINT64); ++ IMG_UINT64 ui64CurrentPosition = *pui64CurrentPosition; ++ ++ (*pui64CurrentPosition)++; ++ ++ return (ui64CurrentPosition == ui64Position) ? psDevNode : NULL; ++} ++ ++static void *_DebugStatusDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); ++ IMG_UINT64 uiCurrentPosition = 1; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ if (*pui64Pos == 0) ++ { ++ return DI_START_TOKEN; ++ } ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, ++ _DebugStatusCompare_AnyVaCb, ++ &uiCurrentPosition, ++ *pui64Pos); ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ return psDeviceNode; ++} ++ ++static void _DebugStatusDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVR_UNREFERENCED_PARAMETER(psEntry); ++ PVR_UNREFERENCED_PARAMETER(pvData); ++} ++ ++static void *_DebugStatusDINext(OSDI_IMPL_ENTRY *psEntry, ++ void *pvData, ++ IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); ++ IMG_UINT64 uiCurrentPosition = 1; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ (*pui64Pos)++; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, ++ _DebugStatusCompare_AnyVaCb, ++ &uiCurrentPosition, ++ *pui64Pos); ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ return psDeviceNode; ++} ++ ++static int _DebugStatusDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ if (pvData == DI_START_TOKEN) ++ { ++ PVRSRV_DATA *psPVRSRVData = DIGetPrivData(psEntry); ++ ++ if (psPVRSRVData != NULL) ++ { ++ switch (psPVRSRVData->eServicesState) ++ { ++ case PVRSRV_SERVICES_STATE_OK: ++ DIPrintf(psEntry, "Driver Status: OK\n"); ++ break; ++ case PVRSRV_SERVICES_STATE_BAD: ++ DIPrintf(psEntry, "Driver Status: BAD\n"); ++ break; ++ case PVRSRV_SERVICES_STATE_UNDEFINED: ++ DIPrintf(psEntry, "Driver Status: UNDEFINED\n"); ++ break; ++ default: ++ DIPrintf(psEntry, "Driver Status: UNKNOWN (%d)\n", ++ psPVRSRVData->eServicesState); ++ break; ++ } ++ } ++ } ++ else if (pvData != NULL) ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; ++ IMG_CHAR *pszStatus = ""; ++ IMG_CHAR *pszReason = ""; ++ PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus; ++ PVRSRV_DEVICE_HEALTH_REASON eHealthReason; ++ ++ DIPrintf(psEntry, "\nDevice ID: %u:%d\n", psDeviceNode->sDevId.ui32InternalID, ++ psDeviceNode->sDevId.i32OsDeviceID); ++ ++ /* Update the health status now if possible... */ ++ if (psDeviceNode->pfnUpdateHealthStatus) ++ { ++ psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE); ++ } ++ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); ++ eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason); ++ ++ switch (eHealthStatus) ++ { ++ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszStatus = "OK"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszStatus = "NOT RESPONDING"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszStatus = "DEAD"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszStatus = "FAULT"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszStatus = "UNDEFINED"; break; ++ default: pszStatus = "UNKNOWN"; break; ++ } ++ ++ switch (eHealthReason) ++ { ++ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " (Asserted)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " (Poll failing)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " (Global Event Object timeouts rising)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " (KCCB offset invalid)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " (KCCB stalled)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " (Idling)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " (Restarting)"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " (Missing interrupts)"; break; ++ default: pszReason = " (Unknown reason)"; break; ++ } ++ ++ DIPrintf(psEntry, "Firmware Status: %s%s\n", pszStatus, pszReason); ++ if (PVRSRV_ERROR_LIMIT_REACHED) ++ { ++ DIPrintf(psEntry, "Server Errors: %d+\n", IMG_UINT32_MAX); ++ } ++ else ++ { ++ DIPrintf(psEntry, "Server Errors: %d\n", PVRSRV_KM_ERRORS); ++ } ++ ++ ++ /* Write other useful stats to aid the test cycle... */ ++ if (psDeviceNode->pvDevice != NULL) ++ { ++#ifdef SUPPORT_RGX ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ const RGXFWIF_HWRINFOBUF *psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++#ifdef PVRSRV_DEBUG_LISR_EXECUTION ++ /* Show the detected #LISR, #MISR scheduled calls */ ++ DIPrintf(psEntry, "RGX #LISR: %llu\n", psDeviceNode->ui64nLISR); ++ DIPrintf(psEntry, "RGX #MISR: %llu\n", psDeviceNode->ui64nMISR); ++#endif /* PVRSRV_DEBUG_LISR_EXECUTION */ ++ ++ /* Calculate the number of HWR events in total across all the DMs... */ ++ if (psHWRInfoBuf != NULL) ++ { ++ IMG_UINT32 ui32HWREventCount = 0; ++ IMG_UINT32 ui32CRREventCount = 0; ++ IMG_UINT32 ui32DMIndex; ++ ++ for (ui32DMIndex = 0; ui32DMIndex < RGXFWIF_DM_MAX; ui32DMIndex++) ++ { ++ ui32HWREventCount += psHWRInfoBuf->aui32HwrDmLockedUpCount[ui32DMIndex]; ++ ui32CRREventCount += psHWRInfoBuf->aui32HwrDmOverranCount[ui32DMIndex]; ++ } ++ ++ DIPrintf(psEntry, "HWR Event Count: %d\n", ui32HWREventCount); ++ DIPrintf(psEntry, "CRR Event Count: %d\n", ui32CRREventCount); ++#ifdef PVRSRV_STALLED_CCB_ACTION ++ /* Write the number of Sync Lockup Recovery (SLR) events... */ ++ DIPrintf(psEntry, "SLR Event Count: %d\n", psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested); ++#endif /* PVRSRV_STALLED_CCB_ACTION */ ++ } ++ ++ /* Show error counts */ ++ DIPrintf(psEntry, "WGP Error Count: %d\n", psDevInfo->sErrorCounts.ui32WGPErrorCount); ++ DIPrintf(psEntry, "TRP Error Count: %d\n", psDevInfo->sErrorCounts.ui32TRPErrorCount); ++ ++ /* ++ * Guest drivers do not support the following functionality: ++ * - Perform actual on-chip fw tracing. ++ * - Collect actual on-chip GPU utilization stats. ++ * - Perform actual on-chip GPU power/dvfs management. ++ * - As a result no more information can be provided. ++ */ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ if (psFwSysData != NULL) ++ { ++ DIPrintf(psEntry, "FWF Event Count: %d\n", psFwSysData->ui32FWFaults); ++ } ++ ++ /* Write the number of APM events... */ ++ DIPrintf(psEntry, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal); ++ ++ /* Write the current GPU Utilisation values... */ ++ if (psDevInfo->pfnGetGpuUtilStats && ++ eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) ++ { ++ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode, ++ ghGpuUtilUserDebugFS, ++ &sGpuUtilStats); ++ ++ if ((eError == PVRSRV_OK) && ++ ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative)) ++ { ++ IMG_UINT64 util; ++ IMG_UINT32 rem; ++ ++ util = 100 * sGpuUtilStats.ui64GpuStatActive; ++ util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem); ++ ++ DIPrintf(psEntry, "GPU Utilisation: %u%%\n", (IMG_UINT32)util); ++ } ++ else ++ { ++ DIPrintf(psEntry, "GPU Utilisation: -\n"); ++ } ++ } ++ } ++#endif /* SUPPORT_RGX */ ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT64 DebugStatusSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); ++ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); ++ PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[0] == 'k' || pcBuffer[0] == 'K', -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); ++ ++ psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD; ++ ++ *pui64Pos += ui64Count; ++ return ui64Count; ++} ++ ++/*************************************************************************/ /*! ++ Dump Debug DebugFS entry ++*/ /**************************************************************************/ ++ ++static int _DebugDumpDebugDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ if (psDeviceNode->pvDevice != NULL) ++ { ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, ++ _DumpDebugDIPrintfWrapper, psEntry); ++ } ++ ++ return 0; ++} ++ ++#ifdef SUPPORT_RGX ++ ++/*************************************************************************/ /*! ++ Firmware Trace DebugFS entry ++*/ /**************************************************************************/ ++ ++static int _DebugFWTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (psDevInfo != NULL) ++ { ++ RGXDumpFirmwareTrace(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo); ++ } ++ ++ return 0; ++} ++ ++/*************************************************************************/ /*! ++ Firmware Translated Page Tables DebugFS entry ++*/ /**************************************************************************/ ++ ++static void _DocumentFwMapping(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FwVA, ++ IMG_CPU_PHYADDR sCpuPA, ++ IMG_DEV_PHYADDR sDevPA, ++ IMG_UINT64 ui64PTE) ++{ ++#if defined(RGX_FEATURE_MIPS_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ DIPrintf(psEntry, "| 0x%8X | " ++ "0x%16" IMG_UINT64_FMTSPECX " | " ++ "0x%16" IMG_UINT64_FMTSPECX " | " ++ "%s%s%s |\n", ++ ui32FwVA, ++ (IMG_UINT64) sCpuPA.uiAddr, ++ sDevPA.uiAddr, ++ gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(ui64PTE)], ++ gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(ui64PTE)], ++ gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(ui64PTE)]); ++ } ++ else ++#endif ++ { ++ /* META and RISCV use a subset of the GPU's virtual address space */ ++ DIPrintf(psEntry, "| 0x%8X | " ++ "0x%16" IMG_UINT64_FMTSPECX " | " ++ "0x%16" IMG_UINT64_FMTSPECX " | " ++ "%s%s%s%s%s%s |\n", ++ ui32FwVA, ++ (IMG_UINT64) sCpuPA.uiAddr, ++ sDevPA.uiAddr, ++ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN) ? "P" : " ", ++ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_PM_SRC_EN) ? "PM" : " ", ++#if defined(RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) ++ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN) ? "B" : " ", ++#else ++ " ", ++#endif ++ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_CC_EN) ? "C" : " ", ++ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_READ_ONLY_EN) ? "RO" : "RW", ++ BITMASK_HAS(ui64PTE, RGX_MMUCTRL_PT_DATA_VALID_EN) ? "V" : " "); ++ } ++} ++ ++static int _FirmwareMappingsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32FwVA; ++ IMG_UINT32 ui32FwPageSize; ++ IMG_UINT32 ui32OSID; ++ ++ psDeviceNode = DIGetPrivData(psEntry); ++ ++ if ((psDeviceNode == NULL) || ++ (psDeviceNode->pvDevice == NULL) || ++ (((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice)->psKernelMMUCtx == NULL)) ++ { ++ /* The Kernel MMU context containing the Firmware mappings is not initialised */ ++ return 0; ++ } ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n" ++ "| Firmware | CPU | Device | PTE |\n" ++ "| Virtual Address | Physical Address | Physical Address | Flags |\n" ++ "+-----------------+------------------------+------------------------+ +\n"); ++ ++#if defined(RGX_FEATURE_MIPS_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ DIPrintf(psEntry, "| RI/XI = Read / Execution Inhibit |\n" ++ "| C = Cache Coherent |\n" ++ "| D = Dirty Page Table Entry |\n" ++ "| V = Valid Page Table Entry |\n" ++ "| G = Global Page Table Entry |\n" ++ "+-----------------+------------------------+------------------------+--------------+\n"); ++ ++ /* MIPS uses the same page size as the OS */ ++ ui32FwPageSize = OSGetPageSize(); ++ } ++ else ++#endif ++ { ++ DIPrintf(psEntry, "| P = Pending Page Table Entry |\n" ++ "| PM = Parameter Manager Source |\n" ++ "| B = Bypass SLC |\n" ++ "| C = Cache Coherent |\n" ++ "| RW/RO = Device Access Rights |\n" ++ "| V = Valid Page Table Entry |\n" ++ "+-----------------+------------------------+------------------------+--------------+\n"); ++ ++ ui32FwPageSize = BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); ++ } ++ ++ for (ui32OSID = 0; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) ++ { ++ IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) ((RGX_FIRMWARE_RAW_HEAP_BASE + ++ (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)) & UINT_MAX); ++ IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_SIZE & UINT_MAX); ++ ++ DIPrintf(psEntry, "| OS ID %u |\n" ++ "+-----------------+------------------------+------------------------+--------------+\n", ui32OSID); ++ ++ for (ui32FwVA = ui32FwHeapBase; ++ ui32FwVA < ui32FwHeapEnd; ++ ui32FwVA += ui32FwPageSize) ++ { ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64PTE = 0U; ++ IMG_CPU_PHYADDR sCpuPA = {0U}; ++ IMG_DEV_PHYADDR sDevPA = {0U}; ++ ++ eError = RGXGetFwMapping(psDevInfo, ui32FwVA, &sCpuPA, &sDevPA, &ui64PTE); ++ ++ if (eError == PVRSRV_OK) ++ { ++ _DocumentFwMapping(psEntry, psDevInfo, ui32FwVA, sCpuPA, sDevPA, ui64PTE); ++ } ++ else if (eError != PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) ++ { ++ PVR_LOG_ERROR(eError, "RGXGetFwMapping"); ++ return -EIO; ++ } ++ } ++ ++ DIPrintf(psEntry, "+-----------------+------------------------+------------------------+--------------+\n"); ++ ++ if (PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ break; ++ } ++ } ++ ++ return 0; ++} ++ ++#ifdef SUPPORT_FIRMWARE_GCOV ++ ++static void *_FirmwareGcovDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (psDevInfo != NULL) ++ { ++ if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) ++ { ++ void *pvCpuVirtAddr; ++ DevmemAcquireCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc, &pvCpuVirtAddr); ++ return *pui64Pos ? NULL : pvCpuVirtAddr; ++ } ++ } ++ ++ return NULL; ++} ++ ++static void _FirmwareGcovDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ if (psDevInfo != NULL) ++ { ++ if (psDevInfo->psFirmwareGcovBufferMemDesc != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psFirmwareGcovBufferMemDesc); ++ } ++ } ++} ++ ++static void *_FirmwareGcovDINext(OSDI_IMPL_ENTRY *psEntry, ++ void *pvData, ++ IMG_UINT64 *pui64Pos) ++{ ++ PVR_UNREFERENCED_PARAMETER(psEntry); ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ PVR_UNREFERENCED_PARAMETER(pui64Pos); ++ return NULL; ++} ++ ++static int _FirmwareGcovDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (psDevInfo != NULL) ++ { ++ DIWrite(psEntry, pvData, psDevInfo->ui32FirmwareGcovSize); ++ } ++ return 0; ++} ++ ++#endif /* SUPPORT_FIRMWARE_GCOV */ ++ ++#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS ++ ++/*************************************************************************/ /*! ++ Power monitoring DebugFS entry ++*/ /**************************************************************************/ ++ ++static int _PowMonTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = DIGetPrivData(psEntry); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ if (psDevInfo != NULL) ++ { ++ RGXDumpPowerMonitoring(_DumpDebugDIPrintfWrapper, psEntry, psDevInfo); ++ } ++ ++ return 0; ++} ++ ++#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ ++ ++#ifdef SUPPORT_VALIDATION ++ ++#ifndef SYS_RGX_DEV_UNMAPPED_FW_REG ++#define SYS_RGX_DEV_UNMAPPED_FW_REG 0XFFFFFFFF ++#endif ++#define DI_RGXREGS_TIMEOUT_MS 1000 ++ ++/*************************************************************************/ /*! ++ RGX Registers Dump DebugFS entry ++*/ /**************************************************************************/ ++ ++static IMG_INT64 _RgxRegsSeek(IMG_UINT64 ui64Offset, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -1); ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ PVR_LOG_RETURN_IF_FALSE(ui64Offset <= (psDevInfo->ui32RegSize - 4), ++ "register offset is too big", -1); ++ ++ return ui64Offset; ++} ++ ++static IMG_INT64 _RgxRegsRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT64 ui64RegVal = 0; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT64 ui64CompRes; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO); ++ PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, ++ "wrong RGX register size", -EIO); ++ PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), ++ "register read offset isn't aligned", -EINVAL); ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) ++ { ++ if (!psDevInfo->bFirmwareInitialised) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " ++ "Firmware isn't yet initialised\n")); ++ return -EIO; ++ } ++ ++ reinit_completion(&psDevInfo->sFwRegs.sRegComp); ++ ++ eError = RGXScheduleRgxRegCommand(psDevInfo, ++ 0x00, ++ ui64Count, ++ (IMG_UINT32) *pui64Pos, ++ IMG_FALSE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); ++ return -EIO; ++ } ++ ++ ui64CompRes = wait_for_completion_timeout(&psDevInfo->sFwRegs.sRegComp, ++ msecs_to_jiffies(DI_RGXREGS_TIMEOUT_MS)); ++ if (!ui64CompRes) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "FW RGX Register access timeout %#x\n", ++ (IMG_UINT32) *pui64Pos)); ++ return -EIO; ++ } ++ ++ OSCachedMemCopy(pcBuffer, &psDevInfo->sFwRegs.ui64RegVal, ui64Count); ++ } ++ else ++ { ++ ui64RegVal = ui64Count == 4 ? ++ OSReadHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos) : ++ OSReadHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos); ++ OSCachedMemCopy(pcBuffer, &ui64RegVal, ui64Count); ++ } ++ ++ return ui64Count; ++} ++ ++static IMG_INT64 _RgxRegsWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*)pvData; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT64 ui64RegVal = 0; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ /* ignore the '\0' character */ ++ ui64Count -= 1; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", -ENXIO); ++ PVR_LOG_RETURN_IF_FALSE(ui64Count == 4 || ui64Count == 8, ++ "wrong RGX register size", -EIO); ++ PVR_LOG_RETURN_IF_FALSE(!(*pui64Pos & (ui64Count - 1)), ++ "register read offset isn't aligned", -EINVAL); ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ if (*pui64Pos >= SYS_RGX_DEV_UNMAPPED_FW_REG) ++ { ++ if (!psDevInfo->bFirmwareInitialised) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGX Register offset is above PCI mapped range but " ++ "Firmware isn't yet initialised\n")); ++ return -EIO; ++ } ++ ++ if (ui64Count == 4) ++ ui64RegVal = (IMG_UINT64) *((IMG_UINT32 *) pcBuffer); ++ else ++ ui64RegVal = *((IMG_UINT64 *) pcBuffer); ++ ++ eError = RGXScheduleRgxRegCommand(psDevInfo, ++ ui64RegVal, ++ ui64Count, ++ (IMG_UINT32) *pui64Pos, ++ IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "RGXScheduleRgxRegCommand"); ++ return -EIO; ++ } ++ ++ } ++ else ++ { ++ if (ui64Count == 4) ++ { ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, *pui64Pos, ++ *((IMG_UINT32 *) (void *) pcBuffer)); ++ } ++ else ++ { ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, *pui64Pos, ++ *((IMG_UINT64 *) (void *) pcBuffer)); ++ } ++ } ++ ++ return ui64Count; ++} ++ ++#endif /* SUPPORT_VALIDATION */ ++ ++#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) ++#define RISCV_DMI_SIZE (8U) ++ ++static IMG_INT64 _RiscvDmiRead(IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; ++ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; ++ ++ ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); ++ memcpy(pcBuffer, &psDebugInfo->ui64RiscvDmi, ui64Count); ++ ++ return ui64Count; ++} ++ ++static IMG_INT64 _RiscvDmiWrite(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; ++ ++ if (psDevInfo == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: devinfo is NULL", __func__)); ++ return 0; ++ } ++ ++ ui64Count -= 1; /* Drop `\0` */ ++ ui64Count = MIN(RISCV_DMI_SIZE, ui64Count); ++ ++ memcpy(&psDebugInfo->ui64RiscvDmi, pcBuffer, ui64Count); ++ ++ RGXRiscvDmiOp(psDevInfo, &psDebugInfo->ui64RiscvDmi); ++ ++ return ui64Count; ++} ++#endif ++ ++#endif /* SUPPORT_RGX */ ++ ++#ifdef SUPPORT_VALIDATION ++ ++static int TestMemLeakDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ PVR_RETURN_IF_FALSE(pvData != NULL, -EINVAL); ++ ++ DIPrintf(psEntry, "os: %s, %u\ngpu: %s, %u\nmmu: %s, %u\n", ++ psPVRSRVData->sMemLeakIntervals.ui32OSAlloc ? "enabled" : "disabled", ++ psPVRSRVData->sMemLeakIntervals.ui32OSAlloc, ++ psPVRSRVData->sMemLeakIntervals.ui32GPU ? "enabled" : "disabled", ++ psPVRSRVData->sMemLeakIntervals.ui32GPU, ++ psPVRSRVData->sMemLeakIntervals.ui32MMU ? "enabled" : "disabled", ++ psPVRSRVData->sMemLeakIntervals.ui32MMU); ++ ++ return 0; ++} ++ ++static IMG_INT64 TestMemLeakDISet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ IMG_CHAR *pcTemp; ++ unsigned long ui32MemLeakInterval; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); ++ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); ++ PVR_RETURN_IF_FALSE(ui64Count <= 16, -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); ++ ++ pcTemp = strchr(pcBuffer, ','); ++ ++ if (kstrtoul(pcTemp+1, 0, &ui32MemLeakInterval) != 0) ++ { ++ return -EINVAL; ++ } ++ ++ if (strncmp(pcBuffer, "os", pcTemp-pcBuffer) == 0) ++ { ++ psPVRSRVData->sMemLeakIntervals.ui32OSAlloc = ui32MemLeakInterval; ++ } ++ else if (strncmp(pcBuffer, "gpu", pcTemp-pcBuffer) == 0) ++ { ++ psPVRSRVData->sMemLeakIntervals.ui32GPU = ui32MemLeakInterval; ++ } ++ else if (strncmp(pcBuffer, "mmu", pcTemp-pcBuffer) == 0) ++ { ++ psPVRSRVData->sMemLeakIntervals.ui32MMU = ui32MemLeakInterval; ++ } ++ else ++ { ++ return -EINVAL; ++ } ++ ++ *pui64Pos += ui64Count; ++ return ui64Count; ++} ++ ++#endif /* SUPPORT_VALIDATION */ ++ ++#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) ++ ++/*************************************************************************/ /*! ++ Debug level DebugFS entry ++*/ /**************************************************************************/ ++ ++static int DebugLevelDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ DIPrintf(psEntry, "%u\n", OSDebugLevel()); ++ ++ return 0; ++} ++ ++#ifndef __GNUC__ ++static int __builtin_ffsl(long int x) ++{ ++ for (size_t i = 0; i < sizeof(x) * 8; i++) ++ { ++ if (x & (1 << i)) ++ { ++ return i + 1; ++ } ++ } ++ return 0; ++} ++#endif /* __GNUC__ */ ++ ++static IMG_INT64 DebugLevelSet(const IMG_CHAR *pcBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ const IMG_UINT uiMaxBufferSize = 6; ++ IMG_UINT32 ui32Level; ++ ++ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); ++ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); ++ PVR_RETURN_IF_FALSE(ui64Count > 0 && ui64Count < uiMaxBufferSize, -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); ++ ++ if (sscanf(pcBuffer, "%u", &ui32Level) == 0) ++ { ++ return -EINVAL; ++ } ++ ++ OSSetDebugLevel(ui32Level & ((1 << __builtin_ffsl(DBGPRIV_LAST)) - 1)); ++ ++ *pui64Pos += ui64Count; ++ return ui64Count; ++} ++#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ ++ ++PVRSRV_ERROR DebugCommonInitDriver(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psPVRSRVData != NULL); ++ ++ /* ++ * The DebugFS entries are designed to work in a single device system but ++ * this function will be called multiple times in a multi-device system. ++ * Return an error in this case. ++ */ ++ if (gpsVersionDIEntry) ++ { ++ return -EEXIST; ++ } ++ ++#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) ++ if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */ ++ ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnStart = _VersionDIStart, ++ .pfnStop = _VersionDIStop, ++ .pfnNext = _VersionDINext, ++ .pfnShow = _VersionDIShow ++ }; ++ ++ eError = DICreateEntry("version", NULL, &sIterator, psPVRSRVData, ++ DI_ENTRY_TYPE_GENERIC, &gpsVersionDIEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++ ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnStart = _DebugStatusDIStart, ++ .pfnStop = _DebugStatusDIStop, ++ .pfnNext = _DebugStatusDINext, ++ .pfnShow = _DebugStatusDIShow, ++ .pfnWrite = DebugStatusSet, ++ //'K' expected + Null terminator ++ .ui32WriteLenMax= ((1U)+1U) ++ }; ++ eError = DICreateEntry("status", NULL, &sIterator, psPVRSRVData, ++ DI_ENTRY_TYPE_GENERIC, &gpsStatusDIEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++ ++#ifdef SUPPORT_VALIDATION ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnShow = TestMemLeakDIShow, ++ .pfnWrite = TestMemLeakDISet, ++ //Function only allows max 15 chars + Null terminator ++ .ui32WriteLenMax = ((15U)+1U) ++ }; ++ eError = DICreateEntry("test_memleak", NULL, &sIterator, psPVRSRVData, ++ DI_ENTRY_TYPE_GENERIC, &gpsTestMemLeakDIEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++#endif /* SUPPORT_VALIDATION */ ++ ++#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnShow = DebugLevelDIShow, ++ .pfnWrite = DebugLevelSet, ++ //Max value of 255(3 char) + Null terminator ++ .ui32WriteLenMax =((3U)+1U) ++ }; ++ eError = DICreateEntry("debug_level", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, &gpsDebugLevelDIEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ ++ ++ return PVRSRV_OK; ++ ++return_error_: ++ DebugCommonDeInitDriver(); ++ ++ return eError; ++} ++ ++void DebugCommonDeInitDriver(void) ++{ ++#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) ++ if (gpsDebugLevelDIEntry != NULL) ++ { ++ DIDestroyEntry(gpsDebugLevelDIEntry); ++ } ++#endif /* defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON) */ ++ ++#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) ++ if (ghGpuUtilUserDebugFS != NULL) ++ { ++ SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS); ++ ghGpuUtilUserDebugFS = NULL; ++ } ++#endif /* defined(SUPPORT_RGX) && !defined(NO_HARDWARE) */ ++ ++#ifdef SUPPORT_VALIDATION ++ if (gpsTestMemLeakDIEntry != NULL) ++ { ++ DIDestroyEntry(gpsTestMemLeakDIEntry); ++ } ++#endif /* SUPPORT_VALIDATION */ ++ ++ if (gpsStatusDIEntry != NULL) ++ { ++ DIDestroyEntry(gpsStatusDIEntry); ++ } ++ ++ if (gpsVersionDIEntry != NULL) ++ { ++ DIDestroyEntry(gpsVersionDIEntry); ++ } ++} ++ ++PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; ++ PVRSRV_ERROR eError; ++ ++ { ++ IMG_CHAR pszDeviceId[sizeof("gpu4294967296")]; ++ ++ OSSNPrintf(pszDeviceId, sizeof(pszDeviceId), "gpu%02d", ++ psDeviceNode->sDevId.ui32InternalID); ++ ++ eError = DICreateGroup(pszDeviceId, NULL, &psDebugInfo->psGroup); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++ ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = _DebugDumpDebugDIShow}; ++ eError = DICreateEntry("debug_dump", psDebugInfo->psGroup, &sIterator, ++ psDeviceNode, DI_ENTRY_TYPE_GENERIC, ++ &psDebugInfo->psDumpDebugEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++ ++#ifdef SUPPORT_RGX ++ if (! PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = _DebugFWTraceDIShow}; ++ eError = DICreateEntry("firmware_trace", psDebugInfo->psGroup, &sIterator, ++ psDeviceNode, DI_ENTRY_TYPE_GENERIC, ++ &psDebugInfo->psFWTraceEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++ ++#ifdef SUPPORT_FIRMWARE_GCOV ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnStart = _FirmwareGcovDIStart, ++ .pfnStop = _FirmwareGcovDIStop, ++ .pfnNext = _FirmwareGcovDINext, ++ .pfnShow = _FirmwareGcovDIShow ++ }; ++ ++ eError = DICreateEntry("firmware_gcov", psDebugInfo->psGroup, &sIterator, ++ psDeviceNode, DI_ENTRY_TYPE_GENERIC, ++ &psDebugInfo->psFWGCOVEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++#endif /* SUPPORT_FIRMWARE_GCOV */ ++ ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = _FirmwareMappingsDIShow}; ++ eError = DICreateEntry("firmware_mappings", psDebugInfo->psGroup, &sIterator, ++ psDeviceNode, DI_ENTRY_TYPE_GENERIC, ++ &psDebugInfo->psFWMappingsEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++ ++#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnRead = _RiscvDmiRead, ++ .pfnWrite = _RiscvDmiWrite, ++ .ui32WriteLenMax = ((RISCV_DMI_SIZE)+1U) ++ }; ++ eError = DICreateEntry("riscv_dmi", psDebugInfo->psGroup, &sIterator, psDeviceNode, ++ DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRiscvDmiDIEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ psDebugInfo->ui64RiscvDmi = 0ULL; ++ } ++#endif /* SUPPORT_VALIDATION || SUPPORT_RISCV_GDB */ ++ } ++#ifdef SUPPORT_VALIDATION ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnSeek = _RgxRegsSeek, ++ .pfnRead = _RgxRegsRead, ++ .pfnWrite = _RgxRegsWrite, ++ //Max size of input binary data is 4 bytes (UINT32) or 8 bytes (UINT64) ++ .ui32WriteLenMax = ((8U)+1U) ++ }; ++ eError = DICreateEntry("rgxregs", psDebugInfo->psGroup, &sIterator, psDeviceNode, ++ DI_ENTRY_TYPE_RANDOM_ACCESS, &psDebugInfo->psRGXRegsEntry); ++ ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++#endif /* SUPPORT_VALIDATION */ ++ ++#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS ++ if (! PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnShow = _PowMonTraceDIShow ++ }; ++ eError = DICreateEntry("power_mon", psDebugInfo->psGroup, &sIterator, psDeviceNode, ++ DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowMonEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ ++#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS ++ { ++ DI_ITERATOR_CB sIterator = { ++ .pfnShow = _DebugPowerDataDIShow, ++ .pfnWrite = PowerDataSet, ++ //Expects '0' or '1' plus Null terminator ++ .ui32WriteLenMax = ((1U)+1U) ++ }; ++ eError = DICreateEntry("power_data", psDebugInfo->psGroup, &sIterator, psDeviceNode, ++ DI_ENTRY_TYPE_GENERIC, &psDebugInfo->psPowerDataEntry); ++ PVR_GOTO_IF_ERROR(eError, return_error_); ++ } ++#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ ++#endif /* SUPPORT_RGX */ ++ ++ return PVRSRV_OK; ++ ++return_error_: ++ DebugCommonDeInitDevice(psDeviceNode); ++ ++ return eError; ++} ++ ++void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_DEVICE_DEBUG_INFO *psDebugInfo = &psDeviceNode->sDebugInfo; ++ ++#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS ++ if (psDebugInfo->psPowerDataEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psPowerDataEntry); ++ psDebugInfo->psPowerDataEntry = NULL; ++ } ++#endif /* SUPPORT_POWER_SAMPLING_VIA_DEBUGFS */ ++ ++#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS ++ if (psDebugInfo->psPowMonEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psPowMonEntry); ++ psDebugInfo->psPowMonEntry = NULL; ++ } ++#endif /* SUPPORT_POWER_VALIDATION_VIA_DEBUGFS */ ++ ++#ifdef SUPPORT_VALIDATION ++ if (psDebugInfo->psRGXRegsEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psRGXRegsEntry); ++ psDebugInfo->psRGXRegsEntry = NULL; ++ } ++#endif /* SUPPORT_VALIDATION */ ++ ++#ifdef SUPPORT_RGX ++ if (psDebugInfo->psFWTraceEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psFWTraceEntry); ++ psDebugInfo->psFWTraceEntry = NULL; ++ } ++ ++#ifdef SUPPORT_FIRMWARE_GCOV ++ if (psDebugInfo->psFWGCOVEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psFWGCOVEntry); ++ psDebugInfo->psFWGCOVEntry = NULL; ++ } ++#endif ++ ++ if (psDebugInfo->psFWMappingsEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psFWMappingsEntry); ++ psDebugInfo->psFWMappingsEntry = NULL; ++ } ++ ++#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) ++ if (psDebugInfo->psRiscvDmiDIEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psRiscvDmiDIEntry); ++ psDebugInfo->psRiscvDmiDIEntry = NULL; ++ } ++#endif ++#endif /* SUPPORT_RGX */ ++ ++ if (psDebugInfo->psDumpDebugEntry != NULL) ++ { ++ DIDestroyEntry(psDebugInfo->psDumpDebugEntry); ++ psDebugInfo->psDumpDebugEntry = NULL; ++ } ++ ++ if (psDebugInfo->psGroup != NULL) ++ { ++ DIDestroyGroup(psDebugInfo->psGroup); ++ psDebugInfo->psGroup = NULL; ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/debug_common.h b/drivers/gpu/drm/img-rogue/debug_common.h +new file mode 100644 +index 000000000000..e8b902f47114 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/debug_common.h +@@ -0,0 +1,55 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common debug definitions and functions. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEBUG_COMMON_H ++#define DEBUG_COMMON_H ++ ++#include "pvrsrv_error.h" ++#include "device.h" ++ ++PVRSRV_ERROR DebugCommonInitDriver(void); ++void DebugCommonDeInitDriver(void); ++ ++PVRSRV_ERROR DebugCommonInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++void DebugCommonDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* DEBUG_COMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/device.h b/drivers/gpu/drm/img-rogue/device.h +new file mode 100644 +index 000000000000..f5948d773cc1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/device.h +@@ -0,0 +1,540 @@ ++/**************************************************************************/ /*! ++@File ++@Title Common Device header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device related function templates and defines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef DEVICE_H ++#define DEVICE_H ++ ++#include "devicemem_heapcfg.h" ++#include "mmu_common.h" ++#include "ra.h" /* RA_ARENA */ ++#include "pvrsrv_device.h" ++#include "sync_checkpoint.h" ++#include "srvkm.h" ++#include "physheap.h" ++#include "sync_internal.h" ++#include "sysinfo.h" ++#include "dllist.h" ++ ++#include "rgx_bvnc_defs_km.h" ++ ++#include "lock.h" ++ ++#include "power.h" ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV; ++ ++struct SYNC_RECORD; ++ ++struct _CONNECTION_DATA_; ++ ++/*************************************************************************/ /*! ++ @Function AllocUFOBlockCallback ++ @Description Device specific callback for allocation of a UFO block ++ ++ @Input psDeviceNode Pointer to device node to allocate ++ the UFO for. ++ @Output ppsMemDesc Pointer to pointer for the memdesc of ++ the allocation ++ @Output pui32SyncAddr FW Base address of the UFO block ++ @Output puiSyncPrimBlockSize Size of the UFO block ++ ++ @Return PVRSRV_OK if allocation was successful ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ DEVMEM_MEMDESC **ppsMemDesc, ++ IMG_UINT32 *pui32SyncAddr, ++ IMG_UINT32 *puiSyncPrimBlockSize); ++ ++/*************************************************************************/ /*! ++ @Function FreeUFOBlockCallback ++ @Description Device specific callback for freeing of a UFO ++ ++ @Input psDeviceNode Pointer to device node that the UFO block was ++ allocated from. ++ @Input psMemDesc Pointer to pointer for the memdesc of the UFO ++ block to free. ++*/ /**************************************************************************/ ++typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ DEVMEM_MEMDESC *psMemDesc); ++ ++typedef struct _PVRSRV_DEVICE_IDENTIFIER_ ++{ ++ /* Pdump memory and register bank names */ ++ IMG_CHAR *pszPDumpDevName; ++ IMG_CHAR *pszPDumpRegName; ++ ++ /* Under Linux, this is the minor number of RenderNode corresponding to this Device */ ++ IMG_INT32 i32OsDeviceID; ++ /* Services layer enumeration of the device used in pvrdebug */ ++ IMG_UINT32 ui32InternalID; ++} PVRSRV_DEVICE_IDENTIFIER; ++ ++typedef struct _DEVICE_MEMORY_INFO_ ++{ ++ /* Heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */ ++ IMG_UINT32 ui32HeapCount; ++ ++ /* Blueprints for creating new device memory contexts */ ++ IMG_UINT32 uiNumHeapConfigs; ++ DEVMEM_HEAP_CONFIG *psDeviceMemoryHeapConfigArray; ++ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeap; ++} DEVICE_MEMORY_INFO; ++ ++#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL) ++#define DUMMY_PAGE ("DUMMY_PAGE") ++#define DEV_ZERO_PAGE ("DEV_ZERO_PAGE") ++#define PVR_DUMMY_PAGE_INIT_VALUE (0x0) ++#define PVR_ZERO_PAGE_INIT_VALUE (0x0) ++ ++typedef struct __DEFAULT_PAGE__ ++{ ++ /*Page handle for the page allocated (UMA/LMA)*/ ++ PG_HANDLE sPageHandle; ++ POS_LOCK psPgLock; ++ ATOMIC_T atRefCounter; ++ /*Default page size in terms of log2 */ ++ IMG_UINT32 ui32Log2PgSize; ++ IMG_UINT64 ui64PgPhysAddr; ++#if defined(PDUMP) ++ IMG_HANDLE hPdumpPg; ++#endif ++} PVRSRV_DEF_PAGE; ++ ++typedef enum _PVRSRV_DEVICE_STATE_ ++{ ++ PVRSRV_DEVICE_STATE_UNDEFINED = 0, ++ PVRSRV_DEVICE_STATE_INIT, ++ PVRSRV_DEVICE_STATE_ACTIVE, ++ PVRSRV_DEVICE_STATE_DEINIT, ++ PVRSRV_DEVICE_STATE_BAD, ++} PVRSRV_DEVICE_STATE; ++ ++typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_ ++{ ++ PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0, ++ PVRSRV_DEVICE_HEALTH_STATUS_OK, ++ PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING, ++ PVRSRV_DEVICE_HEALTH_STATUS_DEAD, ++ PVRSRV_DEVICE_HEALTH_STATUS_FAULT ++} PVRSRV_DEVICE_HEALTH_STATUS; ++ ++typedef enum _PVRSRV_DEVICE_HEALTH_REASON_ ++{ ++ PVRSRV_DEVICE_HEALTH_REASON_NONE = 0, ++ PVRSRV_DEVICE_HEALTH_REASON_ASSERTED, ++ PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING, ++ PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS, ++ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, ++ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED, ++ PVRSRV_DEVICE_HEALTH_REASON_IDLING, ++ PVRSRV_DEVICE_HEALTH_REASON_RESTARTING, ++ PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS ++} PVRSRV_DEVICE_HEALTH_REASON; ++ ++typedef enum _PVRSRV_DEVICE_DEBUG_DUMP_STATUS_ ++{ ++ PVRSRV_DEVICE_DEBUG_DUMP_NONE = 0, ++ PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE ++} PVRSRV_DEVICE_DEBUG_DUMP_STATUS; ++ ++#ifndef DI_GROUP_DEFINED ++#define DI_GROUP_DEFINED ++typedef struct DI_GROUP DI_GROUP; ++#endif ++#ifndef DI_ENTRY_DEFINED ++#define DI_ENTRY_DEFINED ++typedef struct DI_ENTRY DI_ENTRY; ++#endif ++ ++typedef struct _PVRSRV_DEVICE_DEBUG_INFO_ ++{ ++ DI_GROUP *psGroup; ++ DI_ENTRY *psDumpDebugEntry; ++#ifdef SUPPORT_RGX ++ DI_ENTRY *psFWTraceEntry; ++#ifdef SUPPORT_FIRMWARE_GCOV ++ DI_ENTRY *psFWGCOVEntry; ++#endif ++ DI_ENTRY *psFWMappingsEntry; ++#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) ++ DI_ENTRY *psRiscvDmiDIEntry; ++ IMG_UINT64 ui64RiscvDmi; ++#endif ++#endif /* SUPPORT_RGX */ ++#ifdef SUPPORT_VALIDATION ++ DI_ENTRY *psRGXRegsEntry; ++#endif /* SUPPORT_VALIDATION */ ++#ifdef SUPPORT_POWER_VALIDATION_VIA_DEBUGFS ++ DI_ENTRY *psPowMonEntry; ++#endif ++#ifdef SUPPORT_POWER_SAMPLING_VIA_DEBUGFS ++ DI_ENTRY *psPowerDataEntry; ++#endif ++} PVRSRV_DEVICE_DEBUG_INFO; ++ ++#if defined(PVRSRV_DEBUG_LISR_EXECUTION) ++#define RGX_LISR_INIT (0U) ++#define RGX_LISR_DEVICE_NOT_POWERED (1U) ++#define RGX_LISR_NOT_TRIGGERED_BY_HW (2U) ++#define RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED (3U) ++#define RGX_LISR_PROCESSED (4U) ++ ++typedef IMG_UINT32 LISR_STATUS; ++ ++typedef struct _LISR_EXECUTION_INFO_ ++{ ++ /* status of last LISR invocation */ ++ LISR_STATUS ui32Status; ++ ++ /* snapshot from the last LISR invocation */ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ IMG_UINT32 aui32InterruptCountSnapshot[RGX_NUM_OS_SUPPORTED]; ++#else ++ IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM]; ++#endif ++ ++ /* time of the last LISR invocation */ ++ IMG_UINT64 ui64Clockns; ++} LISR_EXECUTION_INFO; ++ ++#define UPDATE_LISR_DBG_STATUS(status) psDeviceNode->sLISRExecutionInfo.ui32Status = (status) ++#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[idx] = (val) ++#define UPDATE_LISR_DBG_TIMESTAMP() psDeviceNode->sLISRExecutionInfo.ui64Clockns = OSClockns64() ++#define UPDATE_LISR_DBG_COUNTER() psDeviceNode->ui64nLISR++ ++#define UPDATE_MISR_DBG_COUNTER() psDeviceNode->ui64nMISR++ ++#else ++#define UPDATE_LISR_DBG_STATUS(status) ++#define UPDATE_LISR_DBG_SNAPSHOT(idx, val) ++#define UPDATE_LISR_DBG_TIMESTAMP() ++#define UPDATE_LISR_DBG_COUNTER() ++#define UPDATE_MISR_DBG_COUNTER() ++#endif /* defined(PVRSRV_DEBUG_LISR_EXECUTION) */ ++ ++typedef struct _PVRSRV_DEVICE_NODE_ ++{ ++ PVRSRV_DEVICE_IDENTIFIER sDevId; ++ ++ PVRSRV_DEVICE_STATE eDevState; ++ PVRSRV_DEVICE_FABRIC_TYPE eDevFabricType; ++ ++ ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */ ++ ATOMIC_T eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */ ++ ATOMIC_T eDebugDumpRequested; /* Holds values from PVRSRV_DEVICE_DEBUG_DUMP_STATUS */ ++ ++ IMG_HANDLE *hDebugTable; ++ ++ /* device specific MMU attributes */ ++ MMU_DEVICEATTRIBS *psMMUDevAttrs; ++ /* Device specific MMU firmware attributes, used only in some devices */ ++ MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs; ++ ++ PHYS_HEAP *psMMUPhysHeap; ++ ++ /* lock for power state transitions */ ++ POS_LOCK hPowerLock; ++ IMG_PID uiPwrLockOwnerPID; /* Only valid between lock and corresponding unlock ++ operations of hPowerLock */ ++ ++ /* current system device power state */ ++ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; ++ PPVRSRV_POWER_DEV psPowerDev; ++ ++ /* multicore configuration information */ ++ IMG_UINT32 ui32MultiCoreNumCores; /* total cores primary + secondaries. 0 for non-multi core */ ++ IMG_UINT32 ui32MultiCorePrimaryId; /* primary core id for this device */ ++ IMG_UINT64 *pui64MultiCoreCapabilities; /* capabilities for each core */ ++ ++ /* ++ callbacks the device must support: ++ */ ++ ++ PVRSRV_ERROR (*pfnDevSLCFlushRange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate); ++ ++ PVRSRV_ERROR (*pfnInvalFBSCTable)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_UINT64 ui64FBSCEntries); ++ ++ PVRSRV_ERROR (*pfnValidateOrTweakPhysAddrs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_DEVICEATTRIBS *psDevAttrs, ++ IMG_UINT64 *pui64Addr); ++ ++ void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_CONTEXT *psMMUContext, ++ MMU_LEVEL eLevel, ++ IMG_BOOL bUnmap); ++ ++ PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ IMG_UINT32 *pui32NextMMUInvalidateUpdate); ++ ++ IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++ ++ void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++ PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ IMG_BOOL bIsTimerPoll); ++ ++#if defined(SUPPORT_AUTOVZ) ++ void (*pfnUpdateAutoVzWatchdog)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++#endif ++ ++ PVRSRV_ERROR (*pfnValidationGPUUnitsPowerChange)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32NewState); ++ ++ PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++ PVRSRV_ERROR (*pfnVerifyBVNC)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); ++ ++ /* Method to drain device HWPerf packets from firmware buffer to host buffer */ ++ PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++ PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString); ++ ++ PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed); ++ ++ PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); ++ ++ PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]); ++ IMG_BOOL (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask); ++ ++ IMG_INT32 (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex); ++ ++ PVRSRV_ERROR (*pfnGetMultiCoreInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32CapsSize, ++ IMG_UINT32 *pui32NumCores, IMG_UINT64 *pui64Caps); ++ ++ IMG_BOOL (*pfnHasFBCDCVersion31)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++ MMU_DEVICEATTRIBS* (*pfnGetMMUDeviceAttributes)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_BOOL bKernelMemoryCtx); ++ ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ ++ /* device post-finalise compatibility check */ ++ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*); ++ ++ /* initialise device-specific physheaps */ ++ PVRSRV_ERROR (*pfnPhysMemDeviceHeapsInit) (struct _PVRSRV_DEVICE_NODE_ *); ++ ++ /* initialise fw mmu, if FW not using GPU mmu, NULL otherwise. */ ++ PVRSRV_ERROR (*pfnFwMMUInit) (struct _PVRSRV_DEVICE_NODE_ *); ++ ++ /* information about the device's address space and heaps */ ++ DEVICE_MEMORY_INFO sDevMemoryInfo; ++ ++ /* device's shared-virtual-memory heap max virtual address */ ++ IMG_UINT64 ui64GeneralSVMHeapTopVA; ++ ++ ATOMIC_T iNumClockSpeedChanges; ++ ++ /* private device information */ ++ void *pvDevice; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ RA_ARENA *psOSSharedArena; ++ RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS]; ++#endif ++ ++ /* FW_MAIN, FW_CONFIG and FW_GUEST heaps. Should be part of registered heaps? */ ++ PHYS_HEAP *psFWMainPhysHeap; ++ PHYS_HEAP *psFWCfgPhysHeap; ++ PHYS_HEAP *apsFWPremapPhysHeap[RGX_NUM_OS_SUPPORTED]; ++ ++ IMG_UINT32 ui32RegisteredPhysHeaps; ++ PHYS_HEAP **papsRegisteredPhysHeaps; ++ ++ /* PHYS_HEAP Mapping table to the platform's physical memory heap(s) ++ * used by this device. The physical heaps are created based on ++ * the PHYS_HEAP_CONFIG data from the platform's system layer at device ++ * creation time. ++ * ++ * Contains PVRSRV_PHYS_HEAP_LAST entries for all the possible physical heaps allowed in the design. ++ * It allows the system layer PhysHeaps for the device to be identified for use in creating new PMRs. ++ * See PhysHeapCreatePMR() ++ */ ++ PHYS_HEAP *apsPhysHeap[PVRSRV_PHYS_HEAP_LAST]; ++ IMG_UINT32 ui32UserAllocHeapCount; ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* Phys Heap reserved for storing the MMU mappings of firmware. ++ * The memory backing up this Phys Heap must persist between driver or OS reboots */ ++ PHYS_HEAP *psFwMMUReservedPhysHeap; ++#endif ++ ++ /* Flag indicating if the firmware has been initialised during the ++ * 1st boot of the Host driver according to the AutoVz life-cycle. */ ++ IMG_BOOL bAutoVzFwIsUp; ++ ++ struct _PVRSRV_DEVICE_NODE_ *psNext; ++ struct _PVRSRV_DEVICE_NODE_ **ppsThis; ++ ++ /* Functions for notification about memory contexts */ ++ PVRSRV_ERROR (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_HANDLE *hPrivData); ++ void (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData); ++ ++ /* Functions for allocation/freeing of UFOs */ ++ AllocUFOBlockCallback pfnAllocUFOBlock; /*!< Callback for allocation of a block of UFO memory */ ++ FreeUFOBlockCallback pfnFreeUFOBlock; /*!< Callback for freeing of a block of UFO memory */ ++ ++ IMG_HANDLE hSyncServerRecordNotify; ++ POS_LOCK hSyncServerRecordLock; ++ IMG_UINT32 ui32SyncServerRecordCount; ++ IMG_UINT32 ui32SyncServerRecordCountHighWatermark; ++ DLLIST_NODE sSyncServerRecordList; ++ struct SYNC_RECORD *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; ++ IMG_UINT32 uiSyncServerRecordFreeIdx; ++ ++ IMG_HANDLE hSyncCheckpointRecordNotify; ++ POS_LOCK hSyncCheckpointRecordLock; ++ IMG_UINT32 ui32SyncCheckpointRecordCount; ++ IMG_UINT32 ui32SyncCheckpointRecordCountHighWatermark; ++ DLLIST_NODE sSyncCheckpointRecordList; ++ struct SYNC_CHECKPOINT_RECORD *apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN]; ++ IMG_UINT32 uiSyncCheckpointRecordFreeIdx; ++ ++ IMG_HANDLE hSyncCheckpointNotify; ++ POS_SPINLOCK hSyncCheckpointListLock; /*!< Protects sSyncCheckpointSyncsList */ ++ DLLIST_NODE sSyncCheckpointSyncsList; ++ ++ PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext; ++ PSYNC_PRIM_CONTEXT hSyncPrimContext; ++ ++ /* With this sync-prim we make sure the MMU cache is flushed ++ * before we free the page table memory */ ++ PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim; ++ IMG_UINT32 ui32NextMMUInvalidateUpdate; ++ ++ IMG_HANDLE hCmdCompNotify; ++ IMG_HANDLE hDbgReqNotify; ++ IMG_HANDLE hAppHintDbgReqNotify; ++ IMG_HANDLE hPhysHeapDbgReqNotify; ++ ++ PVRSRV_DEF_PAGE sDummyPage; ++ PVRSRV_DEF_PAGE sDevZeroPage; ++ ++ POSWR_LOCK hMemoryContextPageFaultNotifyListLock; ++ DLLIST_NODE sMemoryContextPageFaultNotifyListHead; ++ ++ /* System DMA capability */ ++ IMG_BOOL bHasSystemDMA; ++ IMG_HANDLE hDmaTxChan; ++ IMG_HANDLE hDmaRxChan; ++ ++#if defined(PDUMP) ++ /* ++ * FBC clear color register default value to use. ++ */ ++ IMG_UINT64 ui64FBCClearColour; ++ ++ /* Device-level callback which is called when pdump.exe starts. ++ * Should be implemented in device-specific init code, e.g. rgxinit.c ++ */ ++ PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++ /* device-level callback to return pdump ID associated to a memory context */ ++ IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext); ++ ++ IMG_UINT8 *pui8DeferredSyncCPSignal; /*! Deferred fence events buffer */ ++ ++ IMG_UINT16 ui16SyncCPReadIdx; /*! Read index in the above deferred fence events buffer */ ++ ++ IMG_UINT16 ui16SyncCPWriteIdx; /*! Write index in the above deferred fence events buffer */ ++ ++ POS_LOCK hSyncCheckpointSignalLock; /*! Guards data shared between an sleepable-contexts */ ++ ++ void *pvSyncCPMISR; /*! MISR to emit pending/deferred fence signals */ ++ ++ void *hTransition; /*!< SyncCheckpoint PdumpTransition Cookie */ ++ ++ DLLIST_NODE sSyncCheckpointContextListHead; /*!< List head for the sync chkpt contexts */ ++ ++ POS_LOCK hSyncCheckpointContextListLock; /*! lock for accessing sync chkpt contexts list */ ++ ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++ POS_LOCK hValidationLock; ++#endif ++ ++ /* Members for linking which connections are open on this device */ ++ POS_LOCK hConnectionsLock; /*!< Lock protecting sConnections */ ++ DLLIST_NODE sConnections; /*!< The list of currently active connection objects for this device node */ ++ ++#if defined(PVRSRV_DEBUG_LISR_EXECUTION) ++ LISR_EXECUTION_INFO sLISRExecutionInfo; /*!< Information about the last execution of the LISR */ ++ IMG_UINT64 ui64nLISR; /*!< Number of LISR calls seen */ ++ IMG_UINT64 ui64nMISR; /*!< Number of MISR calls made */ ++#endif ++ ++ PVRSRV_DEVICE_DEBUG_INFO sDebugInfo; ++} PVRSRV_DEVICE_NODE; ++ ++/* ++ * Macros to be used instead of calling directly the pfns since these macros ++ * will expand the feature passed as argument into the bitmask/index to work ++ * with the macros defined in rgx_bvnc_defs_km.h ++ */ ++#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \ ++ psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK) ++#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \ ++ psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX) ++ ++PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bInitSuccessful); ++ ++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions); ++ ++ ++#endif /* DEVICE_H */ ++ ++/****************************************************************************** ++ End of file (device.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/device_connection.h b/drivers/gpu/drm/img-rogue/device_connection.h +new file mode 100644 +index 000000000000..24917745fb56 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/device_connection.h +@@ -0,0 +1,123 @@ ++/*************************************************************************/ /*! ++@File device_connection.h ++@Title ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(DEVICE_CONNECTION_H) ++#define DEVICE_CONNECTION_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#if defined(__KERNEL__) ++typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION; ++#else ++#include "connection.h" ++typedef const struct PVRSRV_DEV_CONNECTION_TAG *SHARED_DEV_CONNECTION; ++#endif ++ ++/****************************************************************************** ++ * Device capability flags and masks ++ * ++ * Following bitmask shows allocated ranges and values for our device ++ * capability settings: ++ * ++ * 31 27 23 19 15 11 7 3 0 ++ * |...|...|...|...|...|...|...|... ++ * ** CACHE_COHERENT [0x1..0x2] ++ * x PVRSRV_CACHE_COHERENT_DEVICE_FLAG ++ * x. PVRSRV_CACHE_COHERENT_CPU_FLAG ++ * *... NONMAPPABLE_MEMORY [0x8] ++ * x... PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG ++ * *.... PDUMP_IS_RECORDING [0x10] ++ * x.... PVRSRV_PDUMP_IS_RECORDING ++ * ***........ DEVMEM_SVM_ALLOC [0x100..0x400] ++ * x........ PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED ++ * x......... PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED ++ * x.......... PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL ++ * *........... FBCDC_V3_1 [0x800] ++ * x........... FBCDC_V3_1_USED ++ * *............ PVRSRV_SYSTEM_DMA ++ * x............ PVRSRV_SYSTEM_DMA_USED ++ * |...|...|...|...|...|...|...|... ++ *****************************************************************************/ ++ ++/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/ ++#define PVRSRV_CACHE_COHERENT_SHIFT (0) ++#define PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT) ++#define PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT) ++#define PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT) ++#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT) ++ ++/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */ ++#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7) ++#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT) ++ ++/* Flag to be passed over the bridge to indicate PDump activity */ ++#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4) ++#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT) ++ ++/* Flag to be passed over the bridge during connection stating SVM allocation availability */ ++#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8) ++#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) ++#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) ++#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT) ++ ++/* Flag to be passed over the bridge during connection stating whether GPU uses FBCDC v3.1 */ ++#define PVRSRV_FBCDC_V3_1_USED_SHIFT (11) ++#define PVRSRV_FBCDC_V3_1_USED (1U << PVRSRV_FBCDC_V3_1_USED_SHIFT) ++ ++/* Flag to be passed over the bridge during connection stating whether System has ++ DMA transfer capability to and from device memory */ ++#define PVRSRV_SYSTEM_DMA_SHIFT (12) ++#define PVRSRV_SYSTEM_DMA_USED (1U << PVRSRV_SYSTEM_DMA_SHIFT) ++ ++static INLINE IMG_HANDLE GetBridgeHandle(SHARED_DEV_CONNECTION hDevConnection) ++{ ++#if defined(__KERNEL__) ++ return hDevConnection; ++#else ++ return hDevConnection->hServices; ++#endif ++} ++ ++ ++#endif /* !defined(DEVICE_CONNECTION_H) */ +diff --git a/drivers/gpu/drm/img-rogue/devicemem.c b/drivers/gpu/drm/img-rogue/devicemem.c +new file mode 100644 +index 000000000000..1516b9416e0f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem.c +@@ -0,0 +1,2962 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Front End (nominally Client side part, but now invokable ++ from server too) of device memory management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "devicemem.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "allocmem.h" ++#include "ra.h" ++#include "osfunc.h" ++#include "osmmap.h" ++#include "devicemem_utils.h" ++#include "client_mm_bridge.h" ++#include "client_cache_bridge.h" ++#include "services_km.h" ++#include "pvrsrv_memallocflags_internal.h" ++ ++#if defined(PDUMP) ++#if defined(__KERNEL__) ++#include "pdump_km.h" ++#else ++#include "pdump_um.h" ++#endif ++#include "devicemem_pdump.h" ++#endif ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#include "client_ri_bridge.h" ++#endif ++#include "client_devicememhistory_bridge.h" ++#include "info_page_client.h" ++ ++#include "rgx_heaps.h" ++#if defined(__KERNEL__) ++#include "pvrsrv.h" ++#include "rgxdefs_km.h" ++#include "rgx_bvnc_defs_km.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "pvr_ricommon.h" ++#include "pvrsrv_apphint.h" ++#include "oskm_apphint.h" ++#include "srvcore.h" ++#if defined(__linux__) ++#include "linux/kernel.h" ++#endif ++#else ++#include "srvcore_intern.h" ++#include "rgxdefs.h" ++#endif ++ ++#if defined(__KERNEL__) && defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++extern PVRSRV_ERROR RIDumpAllKM(void); ++#endif ++ ++#if defined(__KERNEL__) ++#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) ++#else ++#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError) ++#endif ++ ++#if defined(__KERNEL__) ++/* Derive the virtual from the hPMR */ ++static ++IMG_UINT64 _GetPremappedVA(PMR *psPMR, PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; ++ ++ IMG_DEV_PHYADDR sDevAddr; ++ IMG_BOOL bValid; ++ PHYS_HEAP *psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; ++ IMG_DEV_PHYADDR sHeapAddr; ++ ++ eError = PhysHeapGetDevPAddr(psPhysHeap, &sHeapAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", fail); ++ ++#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) ++{ ++ if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_UMA || ++ PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_DMA) ++ { ++ IMG_DEV_PHYADDR sDevPAddrCorrected; ++ ++ PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, &sDevPAddrCorrected, (IMG_CPU_PHYADDR *)&sHeapAddr); ++ sHeapAddr.uiAddr = sDevPAddrCorrected.uiAddr; ++ } ++} ++#endif ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddr", fail); ++ ++ eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sDevAddr, &bValid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_IF_ERROR(eError, "PMR_DevPhysAddr"); ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); ++ goto fail; ++ } ++ ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddr"); ++ ++ ui64OptionalMapAddress = RGX_FIRMWARE_RAW_HEAP_BASE | (sDevAddr.uiAddr - sHeapAddr.uiAddr); ++ ++ PVR_DPF((PVR_DBG_ALLOC, "%s: sDevAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" sHeapAddr.uiAddr = 0x%"IMG_UINT64_FMTSPECx" => ui64OptionalMapAddress = 0x%"IMG_UINT64_FMTSPECx, ++ __func__, sDevAddr.uiAddr, sHeapAddr.uiAddr, ui64OptionalMapAddress)); ++fail: ++ return ui64OptionalMapAddress; ++} ++#endif ++ ++/***************************************************************************** ++ * Sub allocation internals * ++ *****************************************************************************/ ++static INLINE PVRSRV_MEMALLOCFLAGS_T ++DevmemOverrideFlagsOrPassThrough(SHARED_DEV_CONNECTION hDevConnection, PVRSRV_MEMALLOCFLAGS_T uiFlags) ++{ ++#if defined(__KERNEL__) && defined(RGX_FEATURE_GPU_CPU_COHERENCY) ++ /* ++ * Override the requested memory flags of FW allocations only, ++ * non-FW allocations pass-through unmodified. ++ * ++ * On fully coherent platforms: ++ * - We upgrade uncached, CPU-only cached or GPU-only cached to ++ * full coherency. This gives caching improvements for free. ++ * ++ * On ace-lite platforms: ++ * - If the allocation is not CPU cached, then there is nothing ++ * for the GPU to snoop regardless of the GPU cache setting. ++ * - If the allocation is not GPU cached, then the SLC will not ++ * be used and will not snoop the CPU even if it is CPU cached. ++ * - Therefore only the GPU setting can be upgraded to coherent ++ * if it is already GPU cached incoherent and the CPU is cached. ++ * ++ * All other platforms: ++ * - Do not modify the allocation flags. ++ */ ++ if (PVRSRV_CHECK_FW_MAIN(uiFlags)) ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDevConnection; ++ ++ if (PVRSRVSystemSnoopingOfDeviceCache(psDevNode->psDevConfig) && ++ PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) ++ { ++ /* Clear existing flags, mark the allocation as fully coherent. */ ++ uiFlags &= ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK); ++ uiFlags |= PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; ++ } ++ else if ((PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)) && ++ (PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)) && ++ PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig) && ++ psDevNode->eDevFabricType == PVRSRV_DEVICE_FABRIC_ACELITE) ++ { ++ /* Upgrade the allocation from GPU cached incoherent to GPU cached coherent. */ ++ uiFlags &= ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK; ++ uiFlags |= PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT; ++ } ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(hDevConnection); ++#endif ++ ++ return uiFlags; ++} ++ ++static INLINE void ++CheckAnnotationLength(const IMG_CHAR *pszAnnotation) ++{ ++ IMG_UINT32 length = OSStringLength(pszAnnotation); ++ ++ if (length >= DEVMEM_ANNOTATION_MAX_LEN) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters", ++ __func__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length)); ++ } ++} ++ ++static PVRSRV_ERROR ++AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 uiLog2Quantum, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_BOOL bExportable, ++ const IMG_CHAR *pszAnnotation, ++ DEVMEM_IMPORT **ppsImport) ++{ ++ DEVMEM_IMPORT *psImport; ++ PVRSRV_MEMALLOCFLAGS_T uiOutFlags; ++ IMG_HANDLE hPMR; ++ PVRSRV_ERROR eError; ++ ++ eError = DevmemImportStructAlloc(hDevConnection, ++ &psImport); ++ PVR_GOTO_IF_ERROR(eError, failAlloc); ++ ++ /* check if shift value is not too big (sizeof(1ULL)) */ ++ PVR_ASSERT(uiLog2Quantum < sizeof(unsigned long long) * 8); ++ /* Check the size is a multiple of the quantum */ ++ PVR_ASSERT((uiSize & ((1ULL<psImport; ++ SHARED_DEV_CONNECTION hDevConnection; ++ IMG_HANDLE hPMR; ++ IMG_HANDLE hSrvDevMemHeap; ++ POS_LOCK hLock; ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_CPU_VIRTADDR pvCpuVAddr; ++ DEVMEM_PROPERTIES_T uiProperties; ++ ++ if (NULL == psImport) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__)); ++ goto e0; ++ } ++ ++ hDevConnection = psImport->hDevConnection; ++ hPMR = psImport->hPMR; ++ hLock = psImport->hLock; ++ sDevVAddr = psImport->sDeviceImport.sDevVAddr; ++ pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr; ++ ++ if (NULL == hDevConnection) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__)); ++ goto e0; ++ } ++ ++ if (NULL == hPMR) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__)); ++ goto e0; ++ } ++ ++ if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__)); ++ goto e0; ++ } ++ ++ if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__)); ++ goto e0; ++ } ++ ++ uiProperties = GetImportProperties(psMemDesc->psImport); ++ ++ if (uiProperties & DEVMEM_PROPERTIES_SECURE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Secure buffers currently do not support sparse changes", ++ __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This memory descriptor doesn't support sparse changes", ++ __func__)); ++ eError = PVRSRV_ERROR_INVALID_REQUEST; ++ goto e0; ++ } ++ ++#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE ++ if (psMemDesc->sCPUMemDesc.ui32RefCount > 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This memory descriptor is mapped more than once (refcnt: %u)into " ++ "CPU Address space.\nRelease all CPU maps of this object and retry...", ++ __func__, psMemDesc->sCPUMemDesc.ui32RefCount)); ++ eError = PVRSRV_ERROR_OBJECT_STILL_REFERENCED; ++ goto e0; ++ } ++#endif ++ ++ hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap; ++ ++ OSLockAcquire(hLock); ++ ++ eError = BridgeChangeSparseMem(GetBridgeHandle(hDevConnection), ++ hSrvDevMemHeap, ++ hPMR, ++ ui32AllocPageCount, ++ paui32AllocPageIndices, ++ ui32FreePageCount, ++ pauiFreePageIndices, ++ uiSparseFlags, ++ psImport->uiFlags, ++ sDevVAddr, ++ (IMG_UINT64)((uintptr_t)pvCpuVAddr)); ++ ++ OSLockRelease(hLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ BridgeDevicememHistorySparseChange(GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ psMemDesc->uiOffset, ++ psMemDesc->sDeviceMemDesc.sDevVAddr, ++ psMemDesc->uiAllocSize, ++ psMemDesc->szText, ++ DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap), ++ ui32AllocPageCount, ++ paui32AllocPageIndices, ++ ui32FreePageCount, ++ pauiFreePageIndices, ++ psMemDesc->ui32AllocationIndex, ++ &psMemDesc->ui32AllocationIndex); ++ } ++ ++e0: ++ return eError; ++} ++ ++static void ++FreeDeviceMemory(DEVMEM_IMPORT *psImport) ++{ ++ DevmemImportStructRelease(psImport); ++} ++ ++static PVRSRV_ERROR ++SubAllocImportAlloc(RA_PERARENA_HANDLE hArena, ++ RA_LENGTH_T uiSize, ++ RA_FLAGS_T _flags, ++ const IMG_CHAR *pszAnnotation, ++ /* returned data */ ++ RA_BASE_T *puiBase, ++ RA_LENGTH_T *puiActualSize, ++ RA_PERISPAN_HANDLE *phImport) ++{ ++ /* When suballocations need a new lump of memory, the RA calls ++ back here. Later, in the kernel, we must construct a new PMR ++ and a pairing between the new lump of virtual memory and the ++ PMR (whether or not such PMR is backed by physical memory) */ ++ DEVMEM_HEAP *psHeap; ++ DEVMEM_IMPORT *psImport; ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32MappingTable = 0; ++ PVRSRV_MEMALLOCFLAGS_T uiFlags = (PVRSRV_MEMALLOCFLAGS_T) _flags; ++ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; ++ ++ /* Per-arena private handle is, for us, the heap */ ++ psHeap = hArena; ++ ++ /* align to the l.s.b. of the size... e.g. 96kiB aligned to ++ 32kiB. NB: There is an argument to say that the RA should never ++ ask us for Non-power-of-2 size anyway, but I don't want to make ++ that restriction arbitrarily now */ ++ uiAlign = uiSize & ~(uiSize-1); ++ ++ /* Technically this is only required for guest drivers due to ++ fw heaps being pre-allocated and pre-mapped resulting in ++ a 1:1 (i.e. virtual : physical) offset correlation but we ++ force this behaviour for all drivers to maintain consistency ++ (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */ ++ if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum)) ++ { ++ uiAlign = (IMG_DEVMEM_ALIGN_T)(1ULL << psHeap->uiLog2Quantum); ++ } ++ ++ /* The RA should not have invoked us with a size that is not a ++ multiple of the quantum anyway */ ++ PVR_ASSERT((uiSize & ((1ULL<uiLog2Quantum)-1)) == 0); ++ ++ eError = AllocateDeviceMemory(psHeap->psCtx->hDevConnection, ++ psHeap->uiLog2Quantum, ++ uiSize, ++ uiSize, ++ 1, ++ 1, ++ &ui32MappingTable, ++ uiAlign, ++ uiFlags, ++ IMG_FALSE, ++ "PMR sub-allocated", ++ &psImport); ++ PVR_GOTO_IF_ERROR(eError, failAlloc); ++ ++#if defined(PDUMP) && defined(DEBUG) ++#if defined(__KERNEL__) ++ PDUMPCOMMENTWITHFLAGS(PMR_DeviceNode((PMR*)psImport->hPMR), PDUMP_CONT, ++ "Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)", ++ psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); ++#else ++ PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, ++ "Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)", ++ psImport->hPMR, pszAnnotation, OSGetCurrentProcessID()); ++#endif ++#else ++ PVR_UNREFERENCED_PARAMETER(pszAnnotation); ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++#if defined(__KERNEL__) ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; ++ ++ PVR_ASSERT(PVRSRV_CHECK_FW_MAIN(uiFlags)); ++ ++ /* If allocation is made by the Kernel from the firmware heap, account for it ++ * under the PVR_SYS_ALLOC_PID. ++ */ ++ if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap)) ++ { ++ eError = BridgeRIWritePMREntryWithOwner (GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR, ++ PVR_SYS_ALLOC_PID); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntryWithOwner"); ++ } ++ else ++#endif ++ { ++ eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); ++ } ++ } ++#endif ++ ++#if defined(__KERNEL__) ++ if (psHeap->bPremapped) ++ { ++ ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); ++ } ++#endif ++ ++ /* ++ Suballocations always get mapped into the device was we need to ++ key the RA off something and as we can't export suballocations ++ there is no valid reason to request an allocation an not map it ++ */ ++ eError = DevmemImportStructDevMap(psHeap, ++ IMG_TRUE, ++ psImport, ++ ui64OptionalMapAddress); ++ PVR_GOTO_IF_ERROR(eError, failMap); ++ ++ OSLockAcquire(psImport->hLock); ++ /* Mark this import struct as zeroed so we can save some PDump LDBs ++ * and do not have to CPU map + mem set()*/ ++ if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) ++ { ++ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED; ++ } ++ else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) ++ { ++ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED; ++ } ++ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN; ++ OSLockRelease(psImport->hLock); ++ ++ *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr; ++ *puiActualSize = uiSize; ++ *phImport = psImport; ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++failMap: ++ FreeDeviceMemory(psImport); ++failAlloc: ++ ++ return eError; ++} ++ ++static void ++SubAllocImportFree(RA_PERARENA_HANDLE hArena, ++ RA_BASE_T uiBase, ++ RA_PERISPAN_HANDLE hImport) ++{ ++ DEVMEM_IMPORT *psImport = hImport; ++#if !defined(PVRSRV_NEED_PVR_ASSERT) ++ PVR_UNREFERENCED_PARAMETER(hArena); ++ PVR_UNREFERENCED_PARAMETER(uiBase); ++#endif ++ ++ PVR_ASSERT(psImport != NULL); ++ PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap); ++ PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr); ++ ++ (void) DevmemImportStructDevUnmap(psImport); ++ (void) DevmemImportStructRelease(psImport); ++} ++ ++/***************************************************************************** ++ * Devmem context internals * ++ *****************************************************************************/ ++ ++static PVRSRV_ERROR ++PopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx, ++ DEVMEM_HEAPCFGID uiHeapBlueprintID) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eError2; ++ struct DEVMEM_HEAP_TAG **ppsHeapArray; ++ IMG_UINT32 uiNumHeaps; ++ IMG_UINT32 uiHeapsToUnwindOnError; ++ IMG_UINT32 uiHeapIndex; ++ IMG_DEV_VIRTADDR sDevVAddrBase; ++ IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH]; ++ IMG_DEVMEM_SIZE_T uiHeapLength; ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength; ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize; ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment; ++ ++ eError = DevmemHeapCount(psCtx->hDevConnection, ++ uiHeapBlueprintID, ++ &uiNumHeaps); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ if (uiNumHeaps == 0) ++ { ++ ppsHeapArray = NULL; ++ } ++ else ++ { ++ ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps); ++ PVR_GOTO_IF_NOMEM(ppsHeapArray, eError, e0); ++ } ++ ++ uiHeapsToUnwindOnError = 0; ++ ++ for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++) ++ { ++ eError = DevmemHeapDetails(psCtx->hDevConnection, ++ uiHeapBlueprintID, ++ uiHeapIndex, ++ &aszHeapName[0], ++ sizeof(aszHeapName), ++ &sDevVAddrBase, ++ &uiHeapLength, ++ &uiReservedRegionLength, ++ &uiLog2DataPageSize, ++ &uiLog2ImportAlignment); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ eError = DevmemCreateHeap(psCtx, ++ sDevVAddrBase, ++ uiHeapLength, ++ uiReservedRegionLength, ++ uiLog2DataPageSize, ++ uiLog2ImportAlignment, ++ aszHeapName, ++ uiHeapBlueprintID, ++ &ppsHeapArray[uiHeapIndex]); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ uiHeapsToUnwindOnError = uiHeapIndex + 1; ++ } ++ ++ psCtx->uiAutoHeapCount = uiNumHeaps; ++ psCtx->ppsAutoHeapArray = ppsHeapArray; ++ ++ PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount); ++ PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps); ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths */ ++e1: ++ for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++) ++ { ++ eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]); ++ PVR_ASSERT(eError2 == PVRSRV_OK); ++ } ++ ++ if (uiNumHeaps != 0) ++ { ++ OSFreeMem(ppsHeapArray); ++ } ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static PVRSRV_ERROR ++UnpopulateContextFromBlueprint(struct DEVMEM_CONTEXT_TAG *psCtx) ++{ ++ PVRSRV_ERROR eReturn = PVRSRV_OK; ++ PVRSRV_ERROR eError2; ++ IMG_UINT32 uiHeapIndex; ++ IMG_BOOL bDoCheck = IMG_TRUE; ++#if defined(__KERNEL__) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ bDoCheck = IMG_FALSE; ++ } ++#endif ++ ++ for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++) ++ { ++ if (!psCtx->ppsAutoHeapArray[uiHeapIndex]) ++ { ++ continue; ++ } ++ ++ eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]); ++ if (eError2 != PVRSRV_OK) ++ { ++ eReturn = eError2; ++ } ++ else ++ { ++ psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL; ++ } ++ } ++ ++ if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray) ++ { ++ OSFreeMem(psCtx->ppsAutoHeapArray); ++ psCtx->ppsAutoHeapArray = NULL; ++ psCtx->uiAutoHeapCount = 0; ++ } ++ ++ return eReturn; ++} ++ ++/***************************************************************************** ++ * Devmem context functions * ++ *****************************************************************************/ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, ++ DEVMEM_HEAPCFGID uiHeapBlueprintID, ++ DEVMEM_CONTEXT **ppsCtxPtr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_CONTEXT *psCtx; ++ /* handle to the server-side counterpart of the device memory ++ context (specifically, for handling mapping to device MMU) */ ++ IMG_HANDLE hDevMemServerContext; ++ IMG_HANDLE hPrivData; ++ IMG_BOOL bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META); ++ ++ PVR_GOTO_IF_NOMEM(ppsCtxPtr, eError, e0); ++ ++ psCtx = OSAllocMem(sizeof(*psCtx)); ++ PVR_GOTO_IF_NOMEM(psCtx, eError, e0); ++ ++ psCtx->uiNumHeaps = 0; ++ ++ psCtx->hDevConnection = hDevConnection; ++ ++ /* Create (server-side) Device Memory context */ ++ eError = BridgeDevmemIntCtxCreate(GetBridgeHandle(psCtx->hDevConnection), ++ bHeapCfgMetaId, ++ &hDevMemServerContext, ++ &hPrivData, ++ &psCtx->ui32CPUCacheLineSize); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ psCtx->hDevMemServerContext = hDevMemServerContext; ++ psCtx->hPrivData = hPrivData; ++ ++ /* automagic heap creation */ ++ psCtx->uiAutoHeapCount = 0; ++ ++ eError = PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID); ++ PVR_GOTO_IF_ERROR(eError, e2); ++ ++ *ppsCtxPtr = psCtx; ++ ++ PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount); ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++e2: ++ PVR_ASSERT(psCtx->uiAutoHeapCount == 0); ++ PVR_ASSERT(psCtx->uiNumHeaps == 0); ++ BridgeDevmemIntCtxDestroy(GetBridgeHandle(psCtx->hDevConnection), hDevMemServerContext); ++ ++e1: ++ OSFreeMem(psCtx); ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, ++ IMG_HANDLE *hPrivData) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); ++ PVR_GOTO_IF_INVALID_PARAM(hPrivData, eError, e0); ++ ++ *hPrivData = psCtx->hPrivData; ++ return PVRSRV_OK; ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_GOTO_IF_INVALID_PARAM(psCtx, eError, e0); ++ return PVRSRV_OK; ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemFindHeapByName(const struct DEVMEM_CONTEXT_TAG *psCtx, ++ const IMG_CHAR *pszHeapName, ++ struct DEVMEM_HEAP_TAG **ppsHeapRet) ++{ ++ IMG_UINT32 uiHeapIndex; ++ ++ /* N.B. This func is only useful for finding "automagic" heaps by name */ ++ for (uiHeapIndex = 0; ++ uiHeapIndex < psCtx->uiAutoHeapCount; ++ uiHeapIndex++) ++ { ++ if (!OSStringNCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName, OSStringLength(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName) + 1)) ++ { ++ *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex]; ++ return PVRSRV_OK; ++ } ++ } ++ ++ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemDestroyContext(DEVMEM_CONTEXT *psCtx) ++{ ++ PVRSRV_ERROR eError; ++ IMG_BOOL bDoCheck = IMG_TRUE; ++ ++#if defined(__KERNEL__) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ bDoCheck = IMG_FALSE; ++ } ++#endif ++ ++ PVR_RETURN_IF_INVALID_PARAM(psCtx); ++ ++ eError = UnpopulateContextFromBlueprint(psCtx); ++ if (bDoCheck && eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: UnpopulateContextFromBlueprint failed (%d) leaving %d heaps", ++ __func__, eError, psCtx->uiNumHeaps)); ++ goto e1; ++ } ++ ++ eError = DestroyServerResource(psCtx->hDevConnection, ++ NULL, ++ BridgeDevmemIntCtxDestroy, ++ psCtx->hDevMemServerContext); ++ if (bDoCheck) ++ { ++ PVR_LOG_GOTO_IF_ERROR(eError, "BridgeDevMemIntCtxDestroy", e1); ++ ++ /* should be no more heaps left */ ++ if (psCtx->uiNumHeaps) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Additional heaps remain in DEVMEM_CONTEXT", ++ __func__)); ++ eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT; ++ goto e1; ++ } ++ } ++ ++ OSCachedMemSet(psCtx, 0, sizeof(*psCtx)); ++ OSFreeMem(psCtx); ++ ++e1: ++ return eError; ++} ++ ++/***************************************************************************** ++ * Devmem heap query functions * ++ *****************************************************************************/ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 *puiNumHeapConfigsOut) ++{ ++ PVRSRV_ERROR eError; ++ eError = BridgeHeapCfgHeapConfigCount(GetBridgeHandle(hDevConnection), ++ puiNumHeapConfigsOut); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 *puiNumHeapsOut) ++{ ++ PVRSRV_ERROR eError; ++ eError = BridgeHeapCfgHeapCount(GetBridgeHandle(hDevConnection), ++ uiHeapConfigIndex, ++ puiNumHeapsOut); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_CHAR *pszConfigNameOut, ++ IMG_UINT32 uiConfigNameBufSz) ++{ ++ PVRSRV_ERROR eError; ++ eError = BridgeHeapCfgHeapConfigName(GetBridgeHandle(hDevConnection), ++ uiHeapConfigIndex, ++ uiConfigNameBufSz, ++ pszConfigNameOut); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 uiHeapIndex, ++ IMG_CHAR *pszHeapNameOut, ++ IMG_UINT32 uiHeapNameBufSz, ++ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, ++ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, ++ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, ++ IMG_UINT32 *puiLog2DataPageSizeOut, ++ IMG_UINT32 *puiLog2ImportAlignmentOut) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = BridgeHeapCfgHeapDetails(GetBridgeHandle(hDevConnection), ++ uiHeapConfigIndex, ++ uiHeapIndex, ++ uiHeapNameBufSz, ++ pszHeapNameOut, ++ psDevVAddrBaseOut, ++ puiHeapLengthOut, ++ puiReservedRegionLengthOut, ++ puiLog2DataPageSizeOut, ++ puiLog2ImportAlignmentOut); ++ ++ VG_MARK_INITIALIZED(pszHeapNameOut, uiHeapNameBufSz); ++ ++ return eError; ++} ++ ++/***************************************************************************** ++ * Devmem heap functions * ++ *****************************************************************************/ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetHeapInt(DEVMEM_HEAP *psHeap, ++ IMG_HANDLE *phDevmemHeap) ++{ ++ PVR_RETURN_IF_INVALID_PARAM(psHeap); ++ *phDevmemHeap = psHeap->hDevMemServerHeap; ++ return PVRSRV_OK; ++} ++ ++/* See devicemem.h for important notes regarding the arguments ++ to this function */ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemCreateHeap(DEVMEM_CONTEXT *psCtx, ++ IMG_DEV_VIRTADDR sBaseAddress, ++ IMG_DEVMEM_SIZE_T uiLength, ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength, ++ IMG_UINT32 ui32Log2Quantum, ++ IMG_UINT32 ui32Log2ImportAlignment, ++ const IMG_CHAR *pszName, ++ DEVMEM_HEAPCFGID uiHeapBlueprintID, ++ DEVMEM_HEAP **ppsHeapPtr) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_ERROR eError2; ++ DEVMEM_HEAP *psHeap; ++ /* handle to the server-side counterpart of the device memory heap ++ (specifically, for handling mapping to device MMU) */ ++ IMG_HANDLE hDevMemServerHeap; ++ IMG_UINT32 ui32Policy = RA_POLICY_DEFAULT, ui32PolicyVMRA; ++ ++ IMG_CHAR aszBuf[100]; ++ IMG_CHAR *pszStr; ++ IMG_UINT32 ui32pszStrSize; ++ ++ if (ppsHeapPtr == NULL || ++ uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ ++ ui32PolicyVMRA = RA_POLICY_DEFAULT; ++ ++ PVR_ASSERT(uiReservedRegionLength + DEVMEM_HEAP_MINIMUM_SIZE <= uiLength); ++ ++ psHeap = OSAllocMem(sizeof(*psHeap)); ++ PVR_GOTO_IF_NOMEM(psHeap, eError, e0); ++ ++ /* Need to keep local copy of heap name, so caller may free theirs */ ++ ui32pszStrSize = OSStringLength(pszName) + 1; ++ pszStr = OSAllocMem(ui32pszStrSize); ++ PVR_GOTO_IF_NOMEM(pszStr, eError, e1); ++ OSStringLCopy(pszStr, pszName, ui32pszStrSize); ++ psHeap->pszName = pszStr; ++ ++ psHeap->uiSize = uiLength; ++ psHeap->uiReservedRegionSize = uiReservedRegionLength; ++ psHeap->sBaseAddress = sBaseAddress; ++ psHeap->bPremapped = IMG_FALSE; ++ OSAtomicWrite(&psHeap->hImportCount, 0); ++ ++ OSSNPrintf(aszBuf, sizeof(aszBuf), ++ "NDM heap '%s' (suballocs) ctx:%p", ++ pszName, psCtx); ++ ui32pszStrSize = OSStringLength(aszBuf) + 1; ++ pszStr = OSAllocMem(ui32pszStrSize); ++ PVR_GOTO_IF_NOMEM(pszStr, eError, e2); ++ OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); ++ psHeap->pszSubAllocRAName = pszStr; ++ ++#if defined(__KERNEL__) ++ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META) ++ { ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32FirmwarePolicydefault = 0, ui32FirmwarePolicy=0; ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DevMemFWHeapPolicy, ++ &ui32FirmwarePolicydefault, &ui32FirmwarePolicy); ++ ui32PolicyVMRA = ui32Policy = ui32FirmwarePolicy; ++ OSFreeKMAppHintState(pvAppHintState); ++ } ++#endif ++ ++#if defined(PDUMP) ++ /* The META heap is shared globally so a single physical memory import ++ * may be used to satisfy allocations of different processes. ++ * This is problematic when PDumping because the physical memory ++ * import used to satisfy a new allocation may actually have been ++ * imported (and thus the PDump MALLOC generated) before the PDump ++ * client was started, leading to the MALLOC being missing. ++ * ++ * This is solved by disabling splitting of imports for the META physmem ++ * RA, meaning that every firmware allocation gets its own import, thus ++ * ensuring the MALLOC is present for every allocation made within the ++ * pdump capture range ++ */ ++ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_META) ++ { ++ ui32Policy |= RA_POLICY_NO_SPLIT; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID); ++#endif ++ ++ psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName, ++ /* Subsequent imports: */ ++ ui32Log2Quantum, ++ RA_LOCKCLASS_2, ++ SubAllocImportAlloc, ++ SubAllocImportFree, ++ (RA_PERARENA_HANDLE) psHeap, ++ ui32Policy); ++ if (psHeap->psSubAllocRA == NULL) ++ { ++ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA; ++ goto e3; ++ } ++ ++ psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment; ++ psHeap->uiLog2Quantum = ui32Log2Quantum; ++ ++ if (!OSStringNCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT, sizeof(RGX_GENERAL_SVM_HEAP_IDENT))) ++ { ++ /* The SVM heap normally starts out as this type though ++ it may transition to DEVMEM_HEAP_MANAGER_USER ++ on platforms with more processor virtual address ++ bits than device virtual address bits */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; ++ } ++ else if (uiReservedRegionLength != 0) ++ { ++ /* Heaps which specify reserved VA space range are dual managed: ++ * - sBaseAddress to (sBaseAddress+uiReservedRegionLength-1): User managed ++ * - (sBaseAddress+uiReservedRegionLength) to uiLength: RA managed ++ */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_DUAL_USER_RA; ++ } ++ else ++ { ++ /* Otherwise, heap manager is decided (USER or RA) at first map */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_UNKNOWN; ++ } ++ ++ /* Mark the heap to be managed by RA */ ++ if (!OSStringNCompare(pszName, RGX_VK_CAPT_REPLAY_HEAP_IDENT, ++ sizeof(RGX_VK_CAPT_REPLAY_HEAP_IDENT))) ++ { ++ psHeap->ui32HeapManagerFlags |= DEVMEM_HEAP_MANAGER_RA; ++ } ++ ++ OSSNPrintf(aszBuf, sizeof(aszBuf), ++ "NDM heap '%s' (QVM) ctx:%p", ++ pszName, psCtx); ++ ui32pszStrSize = OSStringLength(aszBuf) + 1; ++ pszStr = OSAllocMem(ui32pszStrSize); ++ if (pszStr == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e4; ++ } ++ OSStringLCopy(pszStr, aszBuf, ui32pszStrSize); ++ psHeap->pszQuantizedVMRAName = pszStr; ++ ++ psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName, ++ /* Subsequent import: */ ++ 0, RA_LOCKCLASS_1, NULL, NULL, ++ (RA_PERARENA_HANDLE) psHeap, ++ ui32PolicyVMRA); ++ if (psHeap->psQuantizedVMRA == NULL) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); ++ } ++ ++ if (!RA_Add(psHeap->psQuantizedVMRA, ++ /* Make sure the VMRA doesn't allocate from reserved VAs */ ++ (RA_BASE_T)sBaseAddress.uiAddr + uiReservedRegionLength, ++ (RA_LENGTH_T)uiLength, ++ (RA_FLAGS_T)0, /* This RA doesn't use or need flags */ ++ NULL /* per ispan handle */)) ++ { ++ RA_Delete(psHeap->psQuantizedVMRA); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA, e5); ++ } ++ ++ psHeap->psCtx = psCtx; ++ ++ ++ /* Create server-side counterpart of Device Memory heap */ ++ eError = BridgeDevmemIntHeapCreate(GetBridgeHandle(psCtx->hDevConnection), ++ psCtx->hDevMemServerContext, ++ sBaseAddress, ++ uiLength, ++ ui32Log2Quantum, ++ &hDevMemServerHeap); ++ PVR_GOTO_IF_ERROR(eError, e6); ++ ++ psHeap->hDevMemServerHeap = hDevMemServerHeap; ++ ++ eError = OSLockCreate(&psHeap->hLock); ++ PVR_GOTO_IF_ERROR(eError, e7); ++ ++ psHeap->psCtx->uiNumHeaps++; ++ *ppsHeapPtr = psHeap; ++ ++#if defined(PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING) ++ psHeap->psMemDescList = NULL; ++#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */ ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths */ ++e7: ++ eError2 = BridgeDevmemIntHeapDestroy(GetBridgeHandle(psCtx->hDevConnection), ++ psHeap->hDevMemServerHeap); ++ PVR_ASSERT (eError2 == PVRSRV_OK); ++e6: ++ if (psHeap->psQuantizedVMRA) ++ RA_Delete(psHeap->psQuantizedVMRA); ++e5: ++ if (psHeap->pszQuantizedVMRAName) ++ OSFreeMem(psHeap->pszQuantizedVMRAName); ++e4: ++ RA_Delete(psHeap->psSubAllocRA); ++e3: ++ OSFreeMem(psHeap->pszSubAllocRAName); ++e2: ++ OSFreeMem(psHeap->pszName); ++e1: ++ OSFreeMem(psHeap); ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetHeapBaseDevVAddr(struct DEVMEM_HEAP_TAG *psHeap, ++ IMG_DEV_VIRTADDR *pDevVAddr) ++{ ++ PVR_RETURN_IF_INVALID_PARAM(psHeap); ++ ++ *pDevVAddr = psHeap->sBaseAddress; ++ ++ return PVRSRV_OK; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ IMG_DEVMEM_SIZE_T uiSize = *puiSize; ++ IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign; ++ ++ /* Just in case someone changes definition of IMG_DEVMEM_ALIGN_T. */ ++ static_assert(sizeof(unsigned long long) == sizeof(uiAlign), ++ "invalid uiAlign size"); ++ /* This value is used for shifting so it cannot be greater than number ++ * of bits in unsigned long long (sizeof(1ULL)). Using greater value is ++ * undefined behaviour. */ ++ if (uiLog2Quantum >= sizeof(unsigned long long) * 8) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if ((1ULL << uiLog2Quantum) > uiAlign) ++ { ++ uiAlign = 1ULL << uiLog2Quantum; ++ } ++ uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1); ++ ++ *puiSize = uiSize; ++ *puiAlign = uiAlign; ++ ++ return PVRSRV_OK; ++} ++ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemDestroyHeap(DEVMEM_HEAP *psHeap) ++{ ++ PVRSRV_ERROR eError; ++ IMG_INT uiImportCount; ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ IMG_BOOL bDoCheck = IMG_TRUE; ++#if defined(__KERNEL__) ++ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ bDoCheck = IMG_FALSE; ++ } ++#endif ++#endif ++ ++ PVR_RETURN_IF_INVALID_PARAM(psHeap); ++ ++ uiImportCount = OSAtomicRead(&psHeap->hImportCount); ++ if (uiImportCount > 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName)); ++#if defined(__KERNEL__) ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):")); ++ RIDumpAllKM(); ++#else ++ PVR_DPF((PVR_DBG_ERROR, "Compile with PVRSRV_ENABLE_GPU_MEMORY_INFO=1 to get a full " ++ "list of all driver allocations.")); ++#endif ++#endif ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ if (bDoCheck) ++#endif ++ { ++ return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; ++ } ++ } ++ ++ eError = DestroyServerResource(psHeap->psCtx->hDevConnection, ++ NULL, ++ BridgeDevmemIntHeapDestroy, ++ psHeap->hDevMemServerHeap); ++ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ if (bDoCheck) ++#endif ++ { ++ PVR_LOG_RETURN_IF_ERROR(eError, "BridgeDevmemIntHeapDestroy"); ++ } ++ ++ PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0); ++ psHeap->psCtx->uiNumHeaps--; ++ ++ OSLockDestroy(psHeap->hLock); ++ ++ if (psHeap->psQuantizedVMRA) ++ { ++ RA_Delete(psHeap->psQuantizedVMRA); ++ } ++ if (psHeap->pszQuantizedVMRAName) ++ { ++ OSFreeMem(psHeap->pszQuantizedVMRAName); ++ } ++ ++ RA_Delete(psHeap->psSubAllocRA); ++ OSFreeMem(psHeap->pszSubAllocRAName); ++ ++ OSFreeMem(psHeap->pszName); ++ ++ OSCachedMemSet(psHeap, 0, sizeof(*psHeap)); ++ OSFreeMem(psHeap); ++ ++ return PVRSRV_OK; ++} ++ ++/***************************************************************************** ++ * Devmem allocation/free functions * ++ *****************************************************************************/ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr, ++ IMG_DEV_VIRTADDR *psDevVirtAddr) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemSubAllocate(uiPreAllocMultiplier, ++ psHeap, ++ uiSize, ++ uiAlign, ++ uiFlags, ++ pszText, ++ ppsMemDescPtr); ++ PVR_GOTO_IF_ERROR(eError, fail_alloc); ++ ++ eError = DevmemMapToDevice(*ppsMemDescPtr, ++ psHeap, ++ psDevVirtAddr); ++ PVR_GOTO_IF_ERROR(eError, fail_map); ++ ++ return PVRSRV_OK; ++ ++fail_map: ++ DevmemFree(*ppsMemDescPtr); ++fail_alloc: ++ *ppsMemDescPtr = NULL; ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++ ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ RA_BASE_T uiAllocatedAddr = 0; ++ RA_LENGTH_T uiAllocatedSize; ++ RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */ ++ PVRSRV_ERROR eError; ++ DEVMEM_MEMDESC *psMemDesc = NULL; ++ IMG_DEVMEM_OFFSET_T uiOffset = 0; ++ DEVMEM_IMPORT *psImport; ++ IMG_UINT32 ui32CPUCacheLineSize; ++ void *pvAddr = NULL; ++ ++ IMG_BOOL bImportClean; ++ IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags); ++ IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); ++ IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); ++ IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) || ++ PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags)); ++ IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) || ++ PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags)); ++ IMG_BOOL bAlign = ! (PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags)); ++ PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE; ++ IMG_UINT32 ui32CacheLineSize = 0; ++ DEVMEM_PROPERTIES_T uiProperties; ++ ++ if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) ++ { ++ /* Deferred Allocation not supported on SubAllocs*/ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failParams); ++ } ++ ++ PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); ++ PVR_GOTO_IF_INVALID_PARAM(psHeap->psCtx, eError, failParams); ++ PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); ++ ++ uiFlags = DevmemOverrideFlagsOrPassThrough(psHeap->psCtx->hDevConnection, uiFlags); ++ ++#if defined(__KERNEL__) ++ { ++ /* The hDevConnection holds two different types of pointers depending on the ++ * address space in which it is used. ++ * In this instance the variable points to the device node in server */ ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection; ++ ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS)); ++ } ++#else ++ ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE; ++#endif ++ ++ /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU. ++ * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each. ++ * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM. ++ * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments. ++ */ ++ ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize; ++ /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple ++ * Also checking if the allocation is going to be cached on the CPU ++ * Currently there is no check for the validity of the cache coherent option. ++ * In this case, the alignment could be applied but the mode could still fall back to uncached. ++ */ ++ if (bAlign && ui32CPUCacheLineSize > uiAlign && bCPUCached) ++ { ++ uiAlign = ui32CPUCacheLineSize; ++ } ++ ++ /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple ++ * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options. ++ * Currently there is no check for the validity of the cache coherent option. ++ * In this case, the alignment could be applied but the mode could still fall back to uncached. ++ */ ++ if (bAlign && ui32CacheLineSize > uiAlign && bGPUCached) ++ { ++ uiAlign = ui32CacheLineSize; ++ } ++ ++ eError = DevmemValidateParams(uiSize, ++ uiAlign, ++ &uiFlags); ++ PVR_GOTO_IF_ERROR(eError, failParams); ++ ++ eError = DevmemMemDescAlloc(&psMemDesc); ++ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); ++ ++ /* No request for exportable memory so use the RA */ ++ eError = RA_Alloc(psHeap->psSubAllocRA, ++ uiSize, ++ uiPreAllocMultiplier, ++ uiFlags, ++ uiAlign, ++ pszText, ++ &uiAllocatedAddr, ++ &uiAllocatedSize, ++ &hImport); ++ PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); ++ ++ psImport = hImport; ++ ++ /* This assignment is assuming the RA returns an hImport where suballocations ++ * can be made from if uiSize is NOT a page multiple of the passed heap. ++ * ++ * So we check if uiSize is a page multiple and mark it as exportable ++ * if it is not. ++ * */ ++ OSLockAcquire(psImport->hLock); ++ if (!(uiSize & ((1ULL << psHeap->uiLog2Quantum) - 1)) && ++ (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER)) ++ { ++ psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE; ++ } ++ psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE; ++ uiProperties = psImport->uiProperties; ++ OSLockRelease(psImport->hLock); ++ ++ uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr; ++ ++#if defined(PDUMP) && defined(DEBUG) ++#if defined(__KERNEL__) ++ PDUMPCOMMENTWITHFLAGS(PMR_DeviceNode((PMR*)psImport->hPMR), PDUMP_CONT, ++ "Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)", ++ (IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID()); ++#else ++ PDUMPCOMMENTF(psHeap->psCtx->hDevConnection, PDUMP_FLAGS_CONTINUOUS, ++ "Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)", ++ (IMG_UINT32) uiSize, ++ pszText, ++ psImport->hPMR, ++ OSGetCurrentProcessID()); ++#endif ++#endif ++ ++ DevmemMemDescInit(psMemDesc, ++ uiOffset, ++ psImport, ++ uiSize); ++ ++#if defined(DEBUG) ++ DevmemMemDescSetPoF(psMemDesc, uiFlags); ++#endif ++ ++ bImportClean = ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0); ++ ++ /* Zero the memory */ ++ if (bZero) ++ { ++ /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */ ++ bImportClean = bImportClean && ((uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0); ++ ++ if (!bImportClean) ++ { ++ eOp = PVRSRV_CACHE_OP_FLUSH; ++ ++ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); ++ PVR_GOTO_IF_ERROR(eError, failMaintenance); ++ ++ /* uiSize is a 64-bit quantity whereas the 3rd argument ++ * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems ++ * hence a compiler warning of implicit cast and loss of data. ++ * Added explicit cast and assert to remove warning. ++ */ ++ PVR_ASSERT(uiSize < IMG_UINT32_MAX); ++ ++ DevmemCPUMemSet(pvAddr, 0, uiSize, uiFlags); ++ ++#if defined(PDUMP) ++ DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS); ++#endif ++ } ++ } ++ else if (bPoisonOnAlloc) ++ { ++ /* Has the import been poisoned on allocation and were no suballocations returned to it so far? */ ++ bPoisonOnAlloc = (uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0; ++ ++ if (!bPoisonOnAlloc) ++ { ++ eOp = PVRSRV_CACHE_OP_FLUSH; ++ ++ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr); ++ PVR_GOTO_IF_ERROR(eError, failMaintenance); ++ ++ DevmemCPUMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE, uiSize, uiFlags); ++ ++ bPoisonOnAlloc = IMG_TRUE; ++ } ++ } ++ ++ /* Flush or invalidate */ ++ if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc)) ++ { ++ eError = BridgeCacheOpExec (GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ (IMG_UINT64)(uintptr_t) ++ pvAddr - psMemDesc->uiOffset, ++ psMemDesc->uiOffset, ++ psMemDesc->uiAllocSize, ++ eOp); ++ PVR_GOTO_IF_ERROR(eError, failMaintenance); ++ } ++ ++ if (pvAddr) ++ { ++ DevmemReleaseCpuVirtAddr(psMemDesc); ++ pvAddr = NULL; ++ } ++ ++ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when ++ * the allocation gets mapped/unmapped ++ */ ++ CheckAnnotationLength(pszText); ++ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ /* Attach RI information */ ++ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN), ++ psMemDesc->szText, ++ psMemDesc->uiOffset, ++ uiAllocatedSize, ++ IMG_FALSE, ++ IMG_TRUE, ++ &(psMemDesc->hRIHandle)); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); ++ } ++#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ PVR_UNREFERENCED_PARAMETER (pszText); ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ *ppsMemDescPtr = psMemDesc; ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++failMaintenance: ++ if (pvAddr) ++ { ++ DevmemReleaseCpuVirtAddr(psMemDesc); ++ pvAddr = NULL; ++ } ++ DevmemMemDescRelease(psMemDesc); ++ psMemDesc = NULL; /* Make sure we don't do a discard after the release */ ++failDeviceMemAlloc: ++ if (psMemDesc) ++ { ++ DevmemMemDescDiscard(psMemDesc); ++ } ++failMemDescAlloc: ++failParams: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, ++ __func__, ++ PVRSRVGETERRORSTRING(eError), ++ uiSize)); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ IMG_UINT32 uiLog2HeapPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_MEMDESC *psMemDesc = NULL; ++ DEVMEM_IMPORT *psImport; ++ IMG_UINT32 ui32MappingTable = 0; ++ ++ eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, ++ &uiSize, ++ &uiAlign); ++ PVR_GOTO_IF_ERROR(eError, failParams); ++ ++ uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); ++ ++ eError = DevmemValidateParams(uiSize, ++ uiAlign, ++ &uiFlags); ++ PVR_GOTO_IF_ERROR(eError, failParams); ++ ++ eError = DevmemMemDescAlloc(&psMemDesc); ++ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); ++ ++ eError = AllocateDeviceMemory(hDevConnection, ++ uiLog2HeapPageSize, ++ uiSize, ++ uiSize, ++ 1, ++ 1, ++ &ui32MappingTable, ++ uiAlign, ++ uiFlags, ++ IMG_TRUE, ++ pszText, ++ &psImport); ++ PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); ++ ++ DevmemMemDescInit(psMemDesc, ++ 0, ++ psImport, ++ uiSize); ++ ++#if defined(DEBUG) ++ DevmemMemDescSetPoF(psMemDesc, uiFlags); ++#endif ++ ++ *ppsMemDescPtr = psMemDesc; ++ ++ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when ++ * the allocation gets mapped/unmapped ++ */ ++ CheckAnnotationLength(pszText); ++ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); ++ ++ /* Attach RI information */ ++ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR, ++ sizeof("^"), ++ "^", ++ psMemDesc->uiOffset, ++ uiSize, ++ IMG_FALSE, ++ IMG_FALSE, ++ &psMemDesc->hRIHandle); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); ++ } ++#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ PVR_UNREFERENCED_PARAMETER (pszText); ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++failDeviceMemAlloc: ++ DevmemMemDescDiscard(psMemDesc); ++ ++failMemDescAlloc: ++failParams: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, ++ __func__, ++ PVRSRVGETERRORSTRING(eError), ++ uiSize)); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ IMG_UINT32 uiLog2HeapPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_MEMDESC *psMemDesc = NULL; ++ DEVMEM_IMPORT *psImport; ++ ++ eError = DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize, ++ &uiSize, ++ &uiAlign); ++ PVR_GOTO_IF_ERROR(eError, failParams); ++ ++ uiFlags = DevmemOverrideFlagsOrPassThrough(hDevConnection, uiFlags); ++ ++ eError = DevmemValidateParams(uiSize, ++ uiAlign, ++ &uiFlags); ++ PVR_GOTO_IF_ERROR(eError, failParams); ++ ++ eError = DevmemMemDescAlloc(&psMemDesc); ++ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); ++ ++ eError = AllocateDeviceMemory(hDevConnection, ++ uiLog2HeapPageSize, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiAlign, ++ uiFlags, ++ IMG_TRUE, ++ pszText, ++ &psImport); ++ PVR_GOTO_IF_ERROR(eError, failDeviceMemAlloc); ++ ++ DevmemMemDescInit(psMemDesc, ++ 0, ++ psImport, ++ uiSize); ++ ++#if defined(DEBUG) ++ DevmemMemDescSetPoF(psMemDesc, uiFlags); ++#endif ++ ++ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when ++ * the allocation gets mapped/unmapped ++ */ ++ CheckAnnotationLength(pszText); ++ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ eError = BridgeRIWritePMREntry (GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWritePMREntry"); ++ ++ /* Attach RI information */ ++ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ sizeof("^"), ++ "^", ++ psMemDesc->uiOffset, ++ uiSize, ++ IMG_FALSE, ++ IMG_FALSE, ++ &psMemDesc->hRIHandle); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); ++ } ++#else /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ PVR_UNREFERENCED_PARAMETER (pszText); ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ *ppsMemDescPtr = psMemDesc; ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++failDeviceMemAlloc: ++ DevmemMemDescDiscard(psMemDesc); ++ ++failMemDescAlloc: ++failParams: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed! Error is %s. Allocation size: " IMG_DEVMEM_SIZE_FMTSPEC, ++ __func__, ++ PVRSRVGETERRORSTRING(eError), ++ uiSize)); ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hServerHandle, ++ IMG_HANDLE *hLocalImportHandle) ++{ ++ return BridgePMRMakeLocalImportHandle(GetBridgeHandle(hDevConnection), ++ hServerHandle, ++ hLocalImportHandle); ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hLocalImportHandle) ++{ ++ return DestroyServerResource(hDevConnection, ++ NULL, ++ BridgePMRUnmakeLocalImportHandle, ++ hLocalImportHandle); ++} ++ ++/***************************************************************************** ++ * Devmem unsecure export functions * ++ *****************************************************************************/ ++ ++#if defined(SUPPORT_INSECURE_EXPORT) ++ ++static PVRSRV_ERROR ++_Mapping_Export(DEVMEM_IMPORT *psImport, ++ DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr, ++ DEVMEM_EXPORTKEY *puiExportKeyPtr, ++ DEVMEM_SIZE_T *puiSize, ++ DEVMEM_LOG2ALIGN_T *puiLog2Contig) ++{ ++ /* Gets an export handle and key for the PMR used for this mapping */ ++ /* Can only be done if there are no suballocations for this mapping */ ++ ++ PVRSRV_ERROR eError; ++ DEVMEM_EXPORTHANDLE hPMRExportHandle; ++ DEVMEM_EXPORTKEY uiExportKey; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig; ++ ++ PVR_GOTO_IF_INVALID_PARAM(psImport, eError, failParams); ++ ++ if ((GetImportProperties(psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, failParams); ++ } ++ ++ eError = BridgePMRExportPMR(GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR, ++ &hPMRExportHandle, ++ &uiSize, ++ &uiLog2Contig, ++ &uiExportKey); ++ PVR_GOTO_IF_ERROR(eError, failExport); ++ ++ PVR_ASSERT(uiSize == psImport->uiSize); ++ ++ *phPMRExportHandlePtr = hPMRExportHandle; ++ *puiExportKeyPtr = uiExportKey; ++ *puiSize = uiSize; ++ *puiLog2Contig = uiLog2Contig; ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++failExport: ++failParams: ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++ ++} ++ ++static void ++_Mapping_Unexport(DEVMEM_IMPORT *psImport, ++ DEVMEM_EXPORTHANDLE hPMRExportHandle) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT (psImport != NULL); ++ ++ eError = DestroyServerResource(psImport->hDevConnection, ++ NULL, ++ BridgePMRUnexportPMR, ++ hPMRExportHandle); ++ PVR_ASSERT(eError == PVRSRV_OK); ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemExport(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_EXPORTCOOKIE *psExportCookie) ++{ ++ /* Caller to provide storage for export cookie struct */ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hPMRExportHandle = 0; ++ IMG_UINT64 uiPMRExportPassword = 0; ++ IMG_DEVMEM_SIZE_T uiSize = 0; ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0; ++ ++ PVR_GOTO_IF_INVALID_PARAM(psMemDesc, eError, e0); ++ PVR_GOTO_IF_INVALID_PARAM(psExportCookie, eError, e0); ++ ++ if (DEVMEM_PROPERTIES_EXPORTABLE != ++ (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This Memory (0x%p) cannot be exported!...", ++ __func__, psMemDesc)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e0); ++ } ++ ++ eError = _Mapping_Export(psMemDesc->psImport, ++ &hPMRExportHandle, ++ &uiPMRExportPassword, ++ &uiSize, ++ &uiLog2Contig); ++ if (eError != PVRSRV_OK) ++ { ++ psExportCookie->uiSize = 0; ++ goto e0; ++ } ++ ++ psExportCookie->hPMRExportHandle = hPMRExportHandle; ++ psExportCookie->uiPMRExportPassword = uiPMRExportPassword; ++ psExportCookie->uiSize = uiSize; ++ psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig; ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++IMG_INTERNAL void ++DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_EXPORTCOOKIE *psExportCookie) ++{ ++ _Mapping_Unexport(psMemDesc->psImport, ++ psExportCookie->hPMRExportHandle); ++ ++ psExportCookie->uiSize = 0; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemImport(SHARED_DEV_CONNECTION hDevConnection, ++ DEVMEM_EXPORTCOOKIE *psCookie, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ DEVMEM_MEMDESC *psMemDesc = NULL; ++ DEVMEM_IMPORT *psImport; ++ IMG_HANDLE hPMR; ++ PVRSRV_ERROR eError; ++ ++ PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); ++ ++ eError = DevmemMemDescAlloc(&psMemDesc); ++ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); ++ ++ eError = DevmemImportStructAlloc(hDevConnection, ++ &psImport); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); ++ } ++ ++ /* Get a handle to the PMR (inc refcount) */ ++ eError = BridgePMRImportPMR(GetBridgeHandle(hDevConnection), ++ psCookie->hPMRExportHandle, ++ psCookie->uiPMRExportPassword, ++ psCookie->uiSize, /* not trusted - just for validation */ ++ psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for validation */ ++ &hPMR); ++ PVR_GOTO_IF_ERROR(eError, failImport); ++ ++ DevmemImportStructInit(psImport, ++ psCookie->uiSize, ++ 1ULL << psCookie->uiLog2ContiguityGuarantee, ++ uiFlags, ++ hPMR, ++ DEVMEM_PROPERTIES_IMPORTED | ++ DEVMEM_PROPERTIES_EXPORTABLE); ++ ++ DevmemMemDescInit(psMemDesc, ++ 0, ++ psImport, ++ psImport->uiSize); ++ ++ *ppsMemDescPtr = psMemDesc; ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ /* Attach RI information */ ++ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ sizeof("^"), ++ "^", ++ psMemDesc->uiOffset, ++ psMemDesc->psImport->uiSize, ++ IMG_TRUE, ++ IMG_TRUE, ++ &psMemDesc->hRIHandle); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); ++ } ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ return PVRSRV_OK; ++ ++ /* error exit paths follow */ ++ ++failImport: ++ DevmemImportDiscard(psImport); ++failImportAlloc: ++ DevmemMemDescDiscard(psMemDesc); ++failMemDescAlloc: ++failParams: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++#endif /* SUPPORT_INSECURE_EXPORT */ ++ ++/***************************************************************************** ++ * Common MemDesc functions * ++ *****************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemUnpin(DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ DEVMEM_IMPORT *psImport = psMemDesc->psImport; ++ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport); ++ ++ if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) ++ { ++ eError = PVRSRV_ERROR_INVALID_REQUEST; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: The passed allocation is not valid to unpin", ++ __func__)); ++ ++ goto e_exit; ++ } ++ ++ /* Stop if the allocation might have suballocations. */ ++ if (!(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE)) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: The passed allocation is not valid to unpin because " ++ "there might be suballocations on it. Make sure you allocate a page multiple " ++ "of the heap when using PVRSRVAllocDeviceMem()", ++ __func__)); ++ ++ goto e_exit; ++ } ++ ++ /* Stop if the Import is still mapped to CPU */ ++ if (psImport->sCPUImport.ui32RefCount) ++ { ++ eError = PVRSRV_ERROR_STILL_MAPPED; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: There are still %u references on the CPU mapping. " ++ "Please remove all CPU mappings before unpinning.", ++ __func__, ++ psImport->sCPUImport.ui32RefCount)); ++ ++ goto e_exit; ++ } ++ ++ /* Only unpin if it is not already unpinned ++ * Return PVRSRV_OK */ ++ if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) ++ { ++ goto e_exit; ++ } ++ ++ /* Unpin it and invalidate mapping */ ++ if (psImport->sDeviceImport.bMapped) ++ { ++ eError = BridgeDevmemIntUnpinInvalidate(GetBridgeHandle(psImport->hDevConnection), ++ psImport->sDeviceImport.hMapping, ++ psImport->hPMR); ++ } ++ else ++ { ++ /* Or just unpin it */ ++ eError = BridgeDevmemIntUnpin(GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR); ++ } ++ ++ /* Update flags and RI when call was successful */ ++ if (eError == PVRSRV_OK) ++ { ++ OSLockAcquire(psImport->hLock); ++ psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED; ++ OSLockRelease(psImport->hLock); ++ } ++ else ++ { ++ /* Or just show what went wrong */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d", ++ __func__, ++ eError)); ++ } ++ ++e_exit: ++ return eError; ++} ++ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemPin(DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ DEVMEM_IMPORT *psImport = psMemDesc->psImport; ++ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psImport); ++ ++ if (uiProperties & DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_REQUEST, e_exit); ++ } ++ ++ /* Only pin if it is unpinned */ ++ if ((uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0) ++ { ++ goto e_exit; ++ } ++ ++ /* Pin it and make mapping valid */ ++ if (psImport->sDeviceImport.bMapped) ++ { ++ eError = BridgeDevmemIntPinValidate(GetBridgeHandle(psImport->hDevConnection), ++ psImport->sDeviceImport.hMapping, ++ psImport->hPMR); ++ } ++ else ++ { ++ /* Or just pin it */ ++ eError = BridgeDevmemIntPin(GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR); ++ } ++ ++ if ((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)) ++ { ++ OSLockAcquire(psImport->hLock); ++ psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED; ++ OSLockRelease(psImport->hLock); ++ } ++ else ++ { ++ /* Or just show what went wrong */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d", ++ __func__, ++ eError)); ++ } ++ ++e_exit: ++ return eError; ++} ++ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ *puiSize = psMemDesc->uiAllocSize; ++ ++ return eError; ++} ++ ++IMG_INTERNAL void ++DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation) ++{ ++ /* ++ * It is expected that psMemDesc->szText is a valid NUL-terminated string, ++ * since DevmemMemDescAlloc uses OSAllocZMem to create the memdesc. ++ */ ++ *pszAnnotation = psMemDesc->szText; ++} ++ ++/* ++ This function is called for freeing any class of memory ++ */ ++IMG_INTERNAL IMG_BOOL ++DevmemFree(DEVMEM_MEMDESC *psMemDesc) ++{ ++ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_SECURE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Please use methods dedicated to secure buffers.", ++ __func__)); ++ return IMG_FALSE; ++ } ++ ++ return DevmemMemDescRelease(psMemDesc); ++} ++ ++IMG_INTERNAL IMG_BOOL ++DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc) ++{ ++ DevmemReleaseDevVirtAddr(psMemDesc); ++ return DevmemFree(psMemDesc); ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEV_VIRTADDR *psDevVirtAddr) ++{ ++ DEVMEM_IMPORT *psImport; ++ IMG_DEV_VIRTADDR sDevVAddr; ++ PVRSRV_ERROR eError; ++ IMG_BOOL bMap = IMG_TRUE; ++ IMG_BOOL bDestroyed = IMG_FALSE; ++ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS; ++ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); ++ ++ /* Do not try to map unpinned memory */ ++ if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags); ++ } ++ ++ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); ++ PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); ++ ++ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); ++ } ++ ++ /* Don't map memory for deferred allocations */ ++ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) ++ { ++ PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); ++ bMap = IMG_FALSE; ++ } ++ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sDeviceMemDesc.ui32RefCount, ++ psMemDesc->sDeviceMemDesc.ui32RefCount+1); ++ ++ psImport = psMemDesc->psImport; ++ DevmemMemDescAcquire(psMemDesc); ++ ++#if defined(__KERNEL__) ++ if (psHeap->bPremapped) ++ { ++ ui64OptionalMapAddress = _GetPremappedVA(psImport->hPMR, psHeap->psCtx->hDevConnection); ++ } ++#endif ++ ++ eError = DevmemImportStructDevMap(psHeap, ++ bMap, ++ psImport, ++ ui64OptionalMapAddress); ++ PVR_GOTO_IF_ERROR(eError, failMap); ++ ++ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; ++ sDevVAddr.uiAddr += psMemDesc->uiOffset; ++ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; ++ psMemDesc->sDeviceMemDesc.ui32RefCount++; ++ ++ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; ++ ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ ++ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ psMemDesc->uiOffset, ++ psMemDesc->sDeviceMemDesc.sDevVAddr, ++ psMemDesc->uiAllocSize, ++ psMemDesc->szText, ++ DevmemGetHeapLog2PageSize(psHeap), ++ psMemDesc->ui32AllocationIndex, ++ &psMemDesc->ui32AllocationIndex); ++ } ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ if (psMemDesc->hRIHandle) ++ { ++ eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), ++ psMemDesc->hRIHandle, ++ psImport->sDeviceImport.sDevVAddr); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr"); ++ } ++ } ++#endif ++ ++ return PVRSRV_OK; ++ ++failMap: ++ bDestroyed = DevmemMemDescRelease(psMemDesc); ++failCheck: ++failParams: ++ if (!bDestroyed) ++ { ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ } ++ PVR_ASSERT(eError != PVRSRV_OK); ++failFlags: ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEV_VIRTADDR sDevVirtAddr) ++{ ++ DEVMEM_IMPORT *psImport; ++ IMG_DEV_VIRTADDR sDevVAddr; ++ PVRSRV_ERROR eError; ++ IMG_BOOL bMap = IMG_TRUE; ++ IMG_BOOL bDestroyed = IMG_FALSE; ++ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); ++ ++ /* Do not try to map unpinned memory */ ++ if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failFlags); ++ } ++ ++ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); ++ PVR_GOTO_IF_INVALID_PARAM(psHeap, eError, failParams); ++ ++ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED, failCheck); ++ } ++ ++ /* Don't map memory for deferred allocations */ ++ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) ++ { ++ PVR_ASSERT(uiProperties & DEVMEM_PROPERTIES_EXPORTABLE); ++ bMap = IMG_FALSE; ++ } ++ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sDeviceMemDesc.ui32RefCount, ++ psMemDesc->sDeviceMemDesc.ui32RefCount+1); ++ ++ psImport = psMemDesc->psImport; ++ DevmemMemDescAcquire(psMemDesc); ++ ++ eError = DevmemImportStructDevMap(psHeap, ++ bMap, ++ psImport, ++ sDevVirtAddr.uiAddr); ++ PVR_GOTO_IF_ERROR(eError, failMap); ++ ++ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; ++ sDevVAddr.uiAddr += psMemDesc->uiOffset; ++ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr; ++ psMemDesc->sDeviceMemDesc.ui32RefCount++; ++ ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ ++ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ BridgeDevicememHistoryMap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ psMemDesc->uiOffset, ++ psMemDesc->sDeviceMemDesc.sDevVAddr, ++ psMemDesc->uiAllocSize, ++ psMemDesc->szText, ++ DevmemGetHeapLog2PageSize(psHeap), ++ psMemDesc->ui32AllocationIndex, ++ &psMemDesc->ui32AllocationIndex); ++ } ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ if (psMemDesc->hRIHandle) ++ { ++ eError = BridgeRIUpdateMEMDESCAddr(GetBridgeHandle(psImport->hDevConnection), ++ psMemDesc->hRIHandle, ++ psImport->sDeviceImport.sDevVAddr); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIUpdateMEMDESCAddr"); ++ } ++ } ++#endif ++ ++ return PVRSRV_OK; ++ ++failMap: ++ bDestroyed = DevmemMemDescRelease(psMemDesc); ++failCheck: ++failParams: ++ if (!bDestroyed) ++ { ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ } ++ PVR_ASSERT(eError != PVRSRV_OK); ++failFlags: ++ return eError; ++} ++ ++IMG_INTERNAL IMG_DEV_VIRTADDR ++DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) ++{ ++ if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) ++ { ++ PVR_LOG_ERROR(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, "DevmemGetDevVirtAddr"); ++ } ++ ++ PVR_ASSERT(psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr !=0 ); ++ ++ return psMemDesc->sDeviceMemDesc.sDevVAddr; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEV_VIRTADDR *psDevVirtAddr) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Do not try to map unpinned memory */ ++ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_UNPINNED) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_MAP_REQUEST, failCheck); ++ } ++ ++ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sDeviceMemDesc.ui32RefCount, ++ psMemDesc->sDeviceMemDesc.ui32RefCount+1); ++ ++ if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_NO_MAPPING, failRelease); ++ } ++ psMemDesc->sDeviceMemDesc.ui32RefCount++; ++ ++ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr; ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ ++ return PVRSRV_OK; ++ ++failRelease: ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ PVR_ASSERT(eError != PVRSRV_OK); ++failCheck: ++ return eError; ++} ++ ++IMG_INTERNAL void ++DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVR_ASSERT(psMemDesc != NULL); ++ ++ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sDeviceMemDesc.ui32RefCount, ++ psMemDesc->sDeviceMemDesc.ui32RefCount-1); ++ ++ PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0); ++ ++ if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0) ++ { ++ if (GetInfoPageDebugFlags(psMemDesc->psImport->hDevConnection) & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ BridgeDevicememHistoryUnmap(GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ psMemDesc->uiOffset, ++ psMemDesc->sDeviceMemDesc.sDevVAddr, ++ psMemDesc->uiAllocSize, ++ psMemDesc->szText, ++ DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap), ++ psMemDesc->ui32AllocationIndex, ++ &psMemDesc->ui32AllocationIndex); ++ } ++ ++ /* When device mapping destroyed, zero Dev VA so DevmemGetDevVirtAddr() ++ * returns 0 */ ++ if (DevmemImportStructDevUnmap(psMemDesc->psImport) == IMG_TRUE) ++ { ++ psMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr = 0; ++ } ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ ++ DevmemMemDescRelease(psMemDesc); ++ } ++ else ++ { ++ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock); ++ } ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, ++ void **ppvCpuVirtAddr) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psMemDesc != NULL); ++ PVR_ASSERT(ppvCpuVirtAddr != NULL); ++ ++ eError = DevmemCPUMapCheckImportProperties(psMemDesc); ++ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemCPUMapCheckImportProperties"); ++ ++ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sCPUMemDesc.ui32RefCount, ++ psMemDesc->sCPUMemDesc.ui32RefCount+1); ++ ++ if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0) ++ { ++ DEVMEM_IMPORT *psImport = psMemDesc->psImport; ++ IMG_UINT8 *pui8CPUVAddr; ++ ++ DevmemMemDescAcquire(psMemDesc); ++ eError = DevmemImportStructCPUMap(psImport); ++ PVR_GOTO_IF_ERROR(eError, failMap); ++ ++ pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr; ++ pui8CPUVAddr += psMemDesc->uiOffset; ++ psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr; ++ } ++ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; ++ ++ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); ++ ++ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); ++ ++ return PVRSRV_OK; ++ ++failMap: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ psMemDesc->sCPUMemDesc.ui32RefCount--; ++ ++ if (!DevmemMemDescRelease(psMemDesc)) ++ { ++ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); ++ } ++ return eError; ++} ++ ++IMG_INTERNAL void ++DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, ++ void **ppvCpuVirtAddr) ++{ ++ PVR_ASSERT(psMemDesc != NULL); ++ PVR_ASSERT(ppvCpuVirtAddr != NULL); ++ ++ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: CPU UnMapping is not possible on this allocation!", ++ __func__)); ++ return; ++ } ++ ++ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sCPUMemDesc.ui32RefCount, ++ psMemDesc->sCPUMemDesc.ui32RefCount+1); ++ ++ *ppvCpuVirtAddr = NULL; ++ if (psMemDesc->sCPUMemDesc.ui32RefCount) ++ { ++ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr; ++ psMemDesc->sCPUMemDesc.ui32RefCount += 1; ++ } ++ ++ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize); ++ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); ++} ++ ++IMG_INTERNAL void ++DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVR_ASSERT(psMemDesc != NULL); ++ ++ if (GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_NO_CPU_MAPPING) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: CPU UnMapping is not possible on this allocation!", ++ __func__)); ++ return; ++ } ++ ++ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ psMemDesc->sCPUMemDesc.ui32RefCount, ++ psMemDesc->sCPUMemDesc.ui32RefCount-1); ++ ++ PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0); ++ ++ if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0) ++ { ++ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); ++ DevmemImportStructCPUUnmap(psMemDesc->psImport); ++ DevmemMemDescRelease(psMemDesc); ++ } ++ else ++ { ++ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); ++ } ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *phImport) ++{ ++ if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) ++ { ++ return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION; ++ } ++ ++ *phImport = psMemDesc->psImport->hPMR; ++ ++ return PVRSRV_OK; ++} ++ ++#if !defined(__KERNEL__) ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, ++ IMG_UINT64 *pui64UID) ++{ ++ DEVMEM_IMPORT *psImport = psMemDesc->psImport; ++ PVRSRV_ERROR eError; ++ ++ if (!(GetImportProperties(psImport) & (DEVMEM_PROPERTIES_IMPORTED | ++ DEVMEM_PROPERTIES_EXPORTABLE))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This Memory (0x%p) doesn't support the functionality requested...", ++ __func__, psMemDesc)); ++ return PVRSRV_ERROR_INVALID_REQUEST; ++ } ++ ++ eError = BridgePMRGetUID(GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR, ++ pui64UID); ++ ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *hReservation) ++{ ++ DEVMEM_IMPORT *psImport; ++ ++ PVR_ASSERT(psMemDesc); ++ psImport = psMemDesc->psImport; ++ ++ PVR_ASSERT(psImport); ++ *hReservation = psImport->sDeviceImport.hReservation; ++ ++ return PVRSRV_OK; ++} ++ ++#endif /* !__KERNEL__ */ ++ ++/* Kernel usage of this function will only work with ++ * memdescs of buffers allocated in the FW memory context ++ * that is created in the Server ++ */ ++void ++DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *phPMR, ++ IMG_DEVMEM_OFFSET_T *puiPMROffset) ++{ ++ DEVMEM_IMPORT *psImport; ++ ++ PVR_ASSERT(psMemDesc); ++ *puiPMROffset = psMemDesc->uiOffset; ++ psImport = psMemDesc->psImport; ++ ++ PVR_ASSERT(psImport); ++ *phPMR = psImport->hPMR; ++} ++ ++#if defined(__KERNEL__) ++IMG_INTERNAL void ++DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, ++ PVRSRV_MEMALLOCFLAGS_T *puiFlags) ++{ ++ DEVMEM_IMPORT *psImport; ++ ++ PVR_ASSERT(psMemDesc); ++ psImport = psMemDesc->psImport; ++ ++ PVR_ASSERT(psImport); ++ *puiFlags = psImport->uiFlags; ++} ++ ++IMG_INTERNAL SHARED_DEV_CONNECTION ++DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc) ++{ ++ return psMemDesc->psImport->hDevConnection; ++} ++#endif /* __KERNEL__ */ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hExtHandle, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ DEVMEM_MEMDESC **ppsMemDescPtr, ++ IMG_DEVMEM_SIZE_T *puiSizePtr, ++ const IMG_CHAR *pszAnnotation) ++{ ++ DEVMEM_MEMDESC *psMemDesc = NULL; ++ DEVMEM_IMPORT *psImport; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ IMG_HANDLE hPMR; ++ PVRSRV_ERROR eError; ++ ++ PVR_GOTO_IF_INVALID_PARAM(ppsMemDescPtr, eError, failParams); ++ ++ eError = DevmemMemDescAlloc(&psMemDesc); ++ PVR_GOTO_IF_ERROR(eError, failMemDescAlloc); ++ ++ eError = DevmemImportStructAlloc(hDevConnection, ++ &psImport); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, failImportAlloc); ++ } ++ ++ /* Get the PMR handle and its size from the server */ ++ eError = BridgePMRLocalImportPMR(GetBridgeHandle(hDevConnection), ++ hExtHandle, ++ &hPMR, ++ &uiSize, ++ &uiAlign); ++ PVR_GOTO_IF_ERROR(eError, failImport); ++ ++ DevmemImportStructInit(psImport, ++ uiSize, ++ uiAlign, ++ uiFlags, ++ hPMR, ++ DEVMEM_PROPERTIES_IMPORTED | ++ DEVMEM_PROPERTIES_EXPORTABLE); ++ ++ DevmemMemDescInit(psMemDesc, ++ 0, ++ psImport, ++ uiSize); ++ ++ *ppsMemDescPtr = psMemDesc; ++ if (puiSizePtr) ++ *puiSizePtr = uiSize; ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI)) ++ { ++ /* Attach RI information. ++ * Set backed size to 0 since this allocation has been allocated ++ * by the same process and has been accounted for. */ ++ eError = BridgeRIWriteMEMDESCEntry (GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ sizeof("^"), ++ "^", ++ psMemDesc->uiOffset, ++ psMemDesc->psImport->uiSize, ++ IMG_TRUE, ++ IMG_FALSE, ++ &(psMemDesc->hRIHandle)); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIWriteMEMDESCEntry"); ++ } ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ ++ /* Copy the allocation descriptive name and size so it can be passed ++ * to DevicememHistory when the allocation gets mapped/unmapped ++ */ ++ CheckAnnotationLength(pszAnnotation); ++ OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); ++ ++ return PVRSRV_OK; ++ ++failImport: ++ DevmemImportDiscard(psImport); ++failImportAlloc: ++ DevmemMemDescDiscard(psMemDesc); ++failMemDescAlloc: ++failParams: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++#if !defined(__KERNEL__) ++IMG_INTERNAL PVRSRV_ERROR ++DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, ++ IMG_DEV_VIRTADDR sDevVAddr) ++{ ++ return BridgeDevmemIsVDevAddrValid(GetBridgeHandle(psContext->hDevConnection), ++ psContext->hDevMemServerContext, ++ sDevVAddr); ++} ++ ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, ++ IMG_DEV_VIRTADDR *psFaultAddress) ++{ ++ return BridgeDevmemGetFaultAddress(GetBridgeHandle(psContext->hDevConnection), ++ psContext->hDevMemServerContext, ++ psFaultAddress); ++} ++IMG_INTERNAL PVRSRV_ERROR ++DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate) ++{ ++ DEVMEM_IMPORT *psImport = psMemDesc->psImport; ++ return BridgeDevmemFlushDevSLCRange(GetBridgeHandle(psImport->hDevConnection), ++ psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext, ++ sDevVAddr, ++ uiSize, ++ bInvalidate); ++} ++ ++#if defined(RGX_FEATURE_FBCDC) ++IMG_INTERNAL PVRSRV_ERROR ++DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, ++ IMG_UINT64 ui64FBSCEntries) ++{ ++ return BridgeDevmemInvalidateFBSCTable(GetBridgeHandle(psContext->hDevConnection), ++ psContext->hDevMemServerContext, ++ ui64FBSCEntries); ++} ++#endif ++ ++#endif /* !__KERNEL__ */ ++ ++IMG_INTERNAL IMG_UINT32 ++DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap) ++{ ++ return psHeap->uiLog2Quantum; ++} ++ ++IMG_INTERNAL PVRSRV_MEMALLOCFLAGS_T ++DevmemGetMemAllocFlags(DEVMEM_MEMDESC *psMemDesc) ++{ ++ return psMemDesc->psImport->uiFlags; ++} ++ ++IMG_INTERNAL IMG_DEVMEM_SIZE_T ++DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap) ++{ ++ return psHeap->uiReservedRegionSize; ++} ++ ++#if !defined(__KERNEL__) ++/**************************************************************************/ /*! ++@Function RegisterDevMemPFNotify ++@Description Registers that the application wants to be signaled when a page ++ fault occurs. ++ ++@Input psContext Memory context the process that would like to ++ be notified about. ++@Input ui32PID The PID of the calling process. ++@Input bRegister If true, register. If false, de-register. ++@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++ */ /***************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, ++ IMG_UINT32 ui32PID, ++ IMG_BOOL bRegister) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = BridgeDevmemIntRegisterPFNotifyKM(GetBridgeHandle(psContext->hDevConnection), ++ psContext->hDevMemServerContext, ++ ui32PID, ++ bRegister); ++ if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED) ++ { ++ PVR_LOG_ERROR(eError, "BridgeDevmemIntRegisterPFNotifyKM"); ++ } ++ ++ return eError; ++} ++#endif /* !__KERNEL__ */ ++ ++IMG_INTERNAL void ++DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped) ++{ ++ psHeap->bPremapped = IsPremapped; ++} +diff --git a/drivers/gpu/drm/img-rogue/devicemem.h b/drivers/gpu/drm/img-rogue/devicemem.h +new file mode 100644 +index 000000000000..1466eb387baf +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem.h +@@ -0,0 +1,730 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management core internal ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Services internal interface to core device memory management ++ functions that are shared between client and server code. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SRVCLIENT_DEVICEMEM_H ++#define SRVCLIENT_DEVICEMEM_H ++ ++/****************************************************************************** ++ * * ++ * +------------+ +------------+ +--------------+ +--------------+ * ++ * | a sub- | | a sub- | | an | | allocation | * ++ * | allocation | | allocation | | allocation | | also mapped | * ++ * | | | | | in proc 1 | | into proc 2 | * ++ * +------------+ +------------+ +--------------+ +--------------+ * ++ * | | | | * ++ * +--------------+ +--------------+ +--------------+ * ++ * | page gran- | | page gran- | | page gran- | * ++ * | ular mapping | | ular mapping | | ular mapping | * ++ * +--------------+ +--------------+ +--------------+ * ++ * | | | * ++ * | | | * ++ * | | | * ++ * +--------------+ +--------------+ * ++ * | | | | * ++ * | A "P.M.R." | | A "P.M.R." | * ++ * | | | | * ++ * +--------------+ +--------------+ * ++ * * ++ ******************************************************************************/ ++ ++/* ++ All device memory allocations are ultimately a view upon (not ++ necessarily the whole of) a "PMR". ++ ++ A PMR is a "Physical Memory Resource", which may be a ++ "pre-faulted" lump of physical memory, or it may be a ++ representation of some physical memory that will be instantiated ++ at some future time. ++ ++ PMRs always represent multiple of some power-of-2 "contiguity" ++ promised by the PMR, which will allow them to be mapped in whole ++ pages into the device MMU. As memory allocations may be smaller ++ than a page, these mappings may be suballocated and thus shared ++ between multiple allocations in one process. A PMR may also be ++ mapped simultaneously into multiple device memory contexts ++ (cross-process scenario), however, for security reasons, it is not ++ legal to share a PMR "both ways" at once, that is, mapped into ++ multiple processes and divided up amongst several suballocations. ++ ++ This PMR terminology is introduced here for background ++ information, but is generally of little concern to the caller of ++ this API. This API handles suballocations and mappings, and the ++ caller thus deals primarily with MEMORY DESCRIPTORS representing ++ an allocation or suballocation, HEAPS representing ranges of ++ virtual addresses in a CONTEXT. ++*/ ++ ++/* ++ |<---------------------------context------------------------------>| ++ |<-------heap------->| |<-------heap------->|<-------heap------->| ++ |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| | ++*/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "devicemem_typedefs.h" ++#include "pdumpdefs.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++ ++#include "pdump.h" ++ ++#include "device_connection.h" ++ ++ ++typedef IMG_UINT32 DEVMEM_HEAPCFGID; ++#define DEVMEM_HEAPCFG_FORCLIENTS 0 ++#define DEVMEM_HEAPCFG_META 1 ++ ++ ++/* ++ In order to call the server side functions, we need a bridge handle. ++ We abstract that here, as we may wish to change its form. ++ */ ++ ++typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE; ++ ++/*************************************************************************/ /*! ++@Function DevmemUnpin ++@Description This is the counterpart to DevmemPin(). It is meant to be ++ called before repinning an allocation. ++ ++ For a detailed description see client API documentation. ++ ++@Input phMemDesc The MemDesc that is going to be unpinned. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is ++ registered to be reclaimed. Error otherwise. ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemUnpin(DEVMEM_MEMDESC *psMemDesc); ++ ++/*************************************************************************/ /*! ++@Function DevmemPin ++@Description This is the counterpart to DevmemUnpin(). It is meant to be ++ called after unpinning an allocation. ++ ++ For a detailed description see client API documentation. ++ ++@Input phMemDesc The MemDesc that is going to be pinned. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content ++ was successfully restored. ++ ++ PVRSRV_ERROR_PMR_NEW_MEMORY when the content ++ could not be restored and new physical memory ++ was allocated. ++ ++ A different error otherwise. ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemPin(DEVMEM_MEMDESC *psMemDesc); ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetHeapInt(DEVMEM_HEAP *psHeap, ++ IMG_HANDLE *phDevmemHeap); ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_SIZE_T* puiSize); ++ ++IMG_INTERNAL void ++DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, ++ IMG_CHAR **pszAnnotation); ++ ++/* ++ * DevmemCreateContext() ++ * ++ * Create a device memory context ++ * ++ * This must be called before any heap is created in this context ++ * ++ * Caller to provide bridge handle which will be recorded internally and used ++ * for all future operations on items from this memory context. Caller also ++ * to provide devicenode handle, as this is used for MMU configuration and ++ * also to determine the heap configuration for the auto-instantiated heaps. ++ * ++ * Note that when compiled in services/server, the hBridge is not used and ++ * is thrown away by the "fake" direct bridge. (This may change. It is ++ * recommended that NULL be passed for the handle for now.) ++ * ++ * hDeviceNode and uiHeapBlueprintID shall together dictate which heap-config ++ * to use. ++ * ++ * This will cause the server side counterpart to be created also. ++ * ++ * If you call DevmemCreateContext() (and the call succeeds) you are promising ++ * that you will later call Devmem_ContextDestroy(), except for abnormal ++ * process termination in which case it is expected it will be destroyed as ++ * part of handle clean up. ++ * ++ * Caller to provide storage for the pointer to the newly created ++ * NEWDEVMEM_CONTEXT object. ++ */ ++PVRSRV_ERROR ++DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection, ++ DEVMEM_HEAPCFGID uiHeapBlueprintID, ++ DEVMEM_CONTEXT **ppsCtxPtr); ++ ++/* ++ * DevmemAcquireDevPrivData() ++ * ++ * Acquire the device private data for this memory context ++ */ ++PVRSRV_ERROR ++DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx, ++ IMG_HANDLE *hPrivData); ++ ++/* ++ * DevmemReleaseDevPrivData() ++ * ++ * Release the device private data for this memory context ++ */ ++PVRSRV_ERROR ++DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx); ++ ++/* ++ * DevmemDestroyContext() ++ * ++ * Undoes that done by DevmemCreateContext() ++ */ ++PVRSRV_ERROR ++DevmemDestroyContext(DEVMEM_CONTEXT *psCtx); ++ ++/* ++ * DevmemCreateHeap() ++ * ++ * Create a heap in the given context. ++ * ++ * N.B. Not intended to be called directly, though it can be. ++ * Normally, heaps are instantiated at context creation time according ++ * to the specified blueprint. See DevmemCreateContext() for details. ++ * ++ * This will cause MMU code to set up data structures for the heap, ++ * but may not cause page tables to be modified until allocations are ++ * made from the heap. ++ * ++ * uiReservedRegionLength Reserved address space for static VAs shared ++ * between clients and firmware ++ * ++ * The "Quantum" is both the device MMU page size to be configured for ++ * this heap, and the unit multiples of which "quantized" allocations ++ * are made (allocations smaller than this, known as "suballocations" ++ * will be made from a "sub alloc RA" and will "import" chunks ++ * according to this quantum) ++ * ++ * Where imported PMRs (or, for example, PMRs created by device class ++ * buffers) are mapped into this heap, it is important that the ++ * physical contiguity guarantee offered by the PMR is greater than or ++ * equal to the quantum size specified here, otherwise the attempt to ++ * map it will fail. "Normal" allocations via Devmem_Allocate ++ * shall automatically meet this requirement, as each "import" will ++ * trigger the creation of a PMR with the desired contiguity. The ++ * supported quantum sizes in that case shall be dictated by the OS ++ * specific implementation of PhysmemNewOSRamBackedPMR() (see) ++ */ ++PVRSRV_ERROR ++DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr, ++ /* base and length of heap */ ++ IMG_DEV_VIRTADDR sBaseAddress, ++ IMG_DEVMEM_SIZE_T uiLength, ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength, ++ /* log2 of allocation quantum, i.e. "page" size. ++ All allocations (that go to server side) are ++ multiples of this. We use a client-side RA to ++ make sub-allocations from this */ ++ IMG_UINT32 ui32Log2Quantum, ++ /* The minimum import alignment for this heap */ ++ IMG_UINT32 ui32Log2ImportAlignment, ++ /* Name of heap for debug */ ++ /* N.B. Okay to exist on caller's stack - this ++ func takes a copy if it needs it. */ ++ const IMG_CHAR *pszName, ++ DEVMEM_HEAPCFGID uiHeapBlueprintID, ++ DEVMEM_HEAP **ppsHeapPtr); ++/* ++ * DevmemDestroyHeap() ++ * ++ * Reverses DevmemCreateHeap() ++ * ++ * N.B. All allocations must have been freed and all mappings must ++ * have been unmapped before invoking this call ++ */ ++PVRSRV_ERROR ++DevmemDestroyHeap(DEVMEM_HEAP *psHeap); ++ ++/* ++ * DevmemExportalignAdjustSizeAndAlign() ++ * Compute the Size and Align passed to avoid suballocations ++ * (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN). ++ * ++ * Returns PVRSRV_ERROR_INVALID_PARAMS if uiLog2Quantum has invalid value. ++ */ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign); ++ ++/* ++ * DevmemSubAllocate() ++ * ++ * Makes an allocation (possibly a "suballocation", as described ++ * below) of device virtual memory from this heap. ++ * ++ * The size and alignment of the allocation will be honoured by the RA ++ * that allocates the "suballocation". The resulting allocation will ++ * be mapped into GPU virtual memory and the physical memory to back ++ * it will exist, by the time this call successfully completes. ++ * ++ * The size must be a positive integer multiple of the alignment. ++ * (i.e. the alignment specifies the alignment of both the start and ++ * the end of the resulting allocation.) ++ * ++ * Allocations made via this API are routed through a "suballocation ++ * RA" which is responsible for ensuring that small allocations can be ++ * made without wasting physical memory in the server. Furthermore, ++ * such suballocations can be made entirely client side without ++ * needing to go to the server unless the allocation spills into a new ++ * page. ++ * ++ * Such suballocations cause many allocations to share the same "PMR". ++ * This happens only when the flags match exactly. ++ * ++ */ ++ ++PVRSRV_ERROR ++DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr); ++ ++#define DevmemAllocate(...) \ ++ DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) ++ ++PVRSRV_ERROR ++DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ IMG_UINT32 uiLog2HeapPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr); ++ ++PVRSRV_ERROR ++DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *paui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pauiFreePageIndices, ++ SPARSE_MEM_RESIZE_FLAGS uiFlags); ++ ++PVRSRV_ERROR ++DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ IMG_UINT32 uiLog2HeapPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr); ++ ++PVRSRV_ERROR ++DevmemSubAllocateAndMap(IMG_UINT8 uiPreAllocMultiplier, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr, ++ IMG_DEV_VIRTADDR *psDevVirtAddr); ++ ++#define DevmemAllocateAndMap(...) \ ++ DevmemSubAllocateAndMap(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__) ++ ++/* ++ * DevmemFree() ++ * ++ * Reverses that done by DevmemSubAllocate() N.B. The underlying ++ * mapping and server side allocation _may_ not be torn down, for ++ * example, if the allocation has been exported, or if multiple ++ * allocations were suballocated from the same mapping, but this is ++ * properly refcounted, so the caller does not have to care. ++ */ ++ ++IMG_BOOL ++DevmemFree(DEVMEM_MEMDESC *psMemDesc); ++ ++IMG_BOOL ++DevmemReleaseDevAddrAndFree(DEVMEM_MEMDESC *psMemDesc); ++ ++/* ++ DevmemMapToDevice: ++ ++ Map an allocation to the device it was allocated from. ++ This function _must_ be called before any call to ++ DevmemAcquireDevVirtAddr is made as it binds the allocation ++ to the heap. ++ DevmemReleaseDevVirtAddr is used to release the reference ++ to the device mapping this function created, but it doesn't ++ mean that the memory will actually be unmapped from the ++ device as other references to the mapping obtained via ++ DevmemAcquireDevVirtAddr could still be active. ++*/ ++PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEV_VIRTADDR *psDevVirtAddr); ++ ++/* ++ DevmemMapToDeviceAddress: ++ ++ Same as DevmemMapToDevice but the caller chooses the address ++ to map to. ++*/ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_HEAP *psHeap, ++ IMG_DEV_VIRTADDR sDevVirtAddr); ++ ++/* ++ DevmemGetDevVirtAddr ++ ++ Obtain the MemDesc's device virtual address. ++ This function _must_ be called after DevmemMapToDevice(Address) ++ and is expected to be used be functions which didn't allocate ++ the MemDesc but need to know it's address. ++ It will PVR_ASSERT if no device mapping exists and 0 is returned. ++ */ ++IMG_DEV_VIRTADDR ++DevmemGetDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); ++ ++/* ++ DevmemAcquireDevVirtAddr ++ ++ Acquire the MemDesc's device virtual address. ++ This function _must_ be called after DevmemMapToDevice ++ and is expected to be used be functions which didn't allocate ++ the MemDesc but need to know it's address ++ */ ++PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEV_VIRTADDR *psDevVirtAddrRet); ++ ++/* ++ * DevmemReleaseDevVirtAddr() ++ * ++ * give up the licence to use the device virtual address that was ++ * acquired by "Acquire" or "MapToDevice" ++ */ ++void ++DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc); ++ ++/* ++ * DevmemAcquireCpuVirtAddr() ++ * ++ * Acquires a license to use the cpu virtual address of this mapping. ++ * Note that the memory may not have been mapped into cpu virtual ++ * memory prior to this call. On first "acquire" the memory will be ++ * mapped in (if it wasn't statically mapped in) and on last put it ++ * _may_ become unmapped. Later calling "Acquire" again, _may_ cause ++ * the memory to be mapped at a different address. ++ */ ++PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, ++ void **ppvCpuVirtAddr); ++ ++/* ++ * DevmemReacquireCpuVirtAddr() ++ * ++ * (Re)acquires license to use the cpu virtual address of this mapping ++ * if (and only if) there is already a pre-existing license to use the ++ * cpu virtual address for the mapping, returns NULL otherwise. ++ */ ++void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc, ++ void **ppvCpuVirtAddr); ++ ++/* ++ * DevmemReleaseDevVirtAddr() ++ * ++ * give up the licence to use the cpu virtual address that was granted ++ * with the "Get" call. ++ */ ++void ++DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc); ++ ++#if defined(SUPPORT_INSECURE_EXPORT) ++/* ++ * DevmemExport() ++ * ++ * Given a memory allocation allocated with DevmemAllocateExportable() ++ * create a "cookie" that can be passed intact by the caller's own choice ++ * of secure IPC to another process and used as the argument to "map" ++ * to map this memory into a heap in the target processes. N.B. This can ++ * also be used to map into multiple heaps in one process, though that's not ++ * the intention. ++ * ++ * Note, the caller must later call Unexport before freeing the ++ * memory. ++ */ ++PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_EXPORTCOOKIE *psExportCookie); ++ ++ ++void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc, ++ DEVMEM_EXPORTCOOKIE *psExportCookie); ++ ++PVRSRV_ERROR ++DevmemImport(SHARED_DEV_CONNECTION hDevConnection, ++ DEVMEM_EXPORTCOOKIE *psCookie, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ DEVMEM_MEMDESC **ppsMemDescPtr); ++#endif /* SUPPORT_INSECURE_EXPORT */ ++ ++/* ++ * DevmemMakeLocalImportHandle() ++ * ++ * This is a "special case" function for making a server export cookie ++ * which went through the direct bridge into an export cookie that can ++ * be passed through the client bridge. ++ */ ++PVRSRV_ERROR ++DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hServerExport, ++ IMG_HANDLE *hClientExport); ++ ++/* ++ * DevmemUnmakeLocalImportHandle() ++ * ++ * Free any resource associated with the Make operation ++ */ ++PVRSRV_ERROR ++DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hClientExport); ++ ++/* ++ * ++ * The following set of functions is specific to the heap "blueprint" ++ * stuff, for automatic creation of heaps when a context is created ++ * ++ */ ++ ++ ++/* Devmem_HeapConfigCount: returns the number of heap configs that ++ this device has. Note that there is no acquire/release semantics ++ required, as this data is guaranteed to be constant for the ++ lifetime of the device node */ ++PVRSRV_ERROR ++DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 *puiNumHeapConfigsOut); ++ ++/* Devmem_HeapCount: returns the number of heaps that a given heap ++ config on this device has. Note that there is no acquire/release ++ semantics required, as this data is guaranteed to be constant for ++ the lifetime of the device node */ ++PVRSRV_ERROR ++DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 *puiNumHeapsOut); ++/* Devmem_HeapConfigName: return the name of the given heap config. ++ The caller is to provide the storage for the returned string and ++ indicate the number of bytes (including null terminator) for such ++ string in the BufSz arg. Note that there is no acquire/release ++ semantics required, as this data is guaranteed to be constant for ++ the lifetime of the device node. ++ */ ++PVRSRV_ERROR ++DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_CHAR *pszConfigNameOut, ++ IMG_UINT32 uiConfigNameBufSz); ++ ++/* Devmem_HeapDetails: fetches all the metadata that is recorded in ++ this heap "blueprint". Namely: heap name (caller to provide ++ storage, and indicate buffer size (including null terminator) in ++ BufSz arg), device virtual address and length, log2 of data page ++ size (will be one of 12, 14, 16, 18, 20, 21, at time of writing). ++ Note that there is no acquire/release semantics required, as this ++ data is guaranteed to be constant for the lifetime of the device ++ node. */ ++PVRSRV_ERROR ++DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 uiHeapIndex, ++ IMG_CHAR *pszHeapNameOut, ++ IMG_UINT32 uiHeapNameBufSz, ++ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, ++ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, ++ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, ++ IMG_UINT32 *puiLog2DataPageSize, ++ IMG_UINT32 *puiLog2ImportAlignmentOut); ++ ++/* ++ * Devmem_FindHeapByName() ++ * ++ * returns the heap handle for the named _automagic_ heap in this ++ * context. "automagic" heaps are those that are born with the ++ * context from a blueprint ++ */ ++PVRSRV_ERROR ++DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx, ++ const IMG_CHAR *pszHeapName, ++ DEVMEM_HEAP **ppsHeapRet); ++ ++/* ++ * DevmemGetHeapBaseDevVAddr() ++ * ++ * returns the device virtual address of the base of the heap. ++ */ ++ ++PVRSRV_ERROR ++DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap, ++ IMG_DEV_VIRTADDR *pDevVAddr); ++ ++PVRSRV_ERROR ++DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *phImport); ++ ++PVRSRV_ERROR ++DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc, ++ IMG_UINT64 *pui64UID); ++ ++PVRSRV_ERROR ++DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *hReservation); ++ ++IMG_INTERNAL void ++DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *hPMR, ++ IMG_DEVMEM_OFFSET_T *puiPMROffset); ++ ++IMG_INTERNAL void ++DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc, ++ PVRSRV_MEMALLOCFLAGS_T *puiFlags); ++ ++IMG_INTERNAL SHARED_DEV_CONNECTION ++DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc); ++ ++PVRSRV_ERROR ++DevmemLocalImport(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hExtHandle, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ DEVMEM_MEMDESC **ppsMemDescPtr, ++ IMG_DEVMEM_SIZE_T *puiSizePtr, ++ const IMG_CHAR *pszAnnotation); ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext, ++ IMG_DEV_VIRTADDR sDevVAddr); ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext, ++ IMG_DEV_VIRTADDR *psFaultAddress); ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemFlushDeviceSLCRange(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate); ++ ++IMG_INTERNAL PVRSRV_ERROR ++DevmemInvalidateFBSCTable(DEVMEM_CONTEXT *psContext, ++ IMG_UINT64 ui64FBSCEntries); ++ ++/* DevmemGetHeapLog2PageSize() ++ * ++ * Get the page size used for a certain heap. ++ */ ++IMG_UINT32 ++DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap); ++ ++/* DevmemGetMemFlags() ++ * ++ * Get the memalloc flags for a certain memdesc. ++ */ ++PVRSRV_MEMALLOCFLAGS_T ++DevmemGetMemAllocFlags(DEVMEM_MEMDESC *psMemDesc); ++ ++/* DevmemGetHeapReservedSize() ++ * ++ * Get the reserved size used for a certain heap. ++ */ ++IMG_DEVMEM_SIZE_T ++DevmemGetHeapReservedSize(DEVMEM_HEAP *psHeap); ++ ++/*************************************************************************/ /*! ++@Function RegisterDevMemPFNotify ++@Description Registers that the application wants to be signaled when a page ++ fault occurs. ++ ++@Input psContext Memory context the process that would like to ++ be notified about. ++@Input ui32PID The PID of the calling process. ++@Input bRegister If true, register. If false, de-register. ++@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext, ++ IMG_UINT32 ui32PID, ++ IMG_BOOL bRegister); ++ ++/*************************************************************************/ /*! ++@Function DevmemHeapSetPremapStatus ++@Description In some special cases like virtualisation, a device memory heap ++ must be entirely backed by physical memory and mapped into the ++ device's virtual address space. This is done at context creation. ++ When objects are allocated from such a heap, the mapping part ++ must be skipped. The 'bPremapped' flag dictates if allocations ++ are to be mapped or not. ++ ++@Input psHeap Device memory heap to be updated ++@Input IsPremapped The premapping status to be set ++*/ /**************************************************************************/ ++IMG_INTERNAL void ++DevmemHeapSetPremapStatus(DEVMEM_HEAP *psHeap, IMG_BOOL IsPremapped); ++ ++#endif /* #ifndef SRVCLIENT_DEVICEMEM_H */ +diff --git a/drivers/gpu/drm/img-rogue/devicemem_heapcfg.c b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.c +new file mode 100644 +index 000000000000..f38a612cd14d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.c +@@ -0,0 +1,184 @@ ++/*************************************************************************/ /*! ++@File devicemem_heapcfg.c ++@Title Device Heap Configuration Helper Functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device memory management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++/* our exported API */ ++#include "devicemem_heapcfg.h" ++#include "devicemem_utils.h" ++ ++#include "device.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "osfunc.h" ++ ++#include "connection_server.h" ++ ++static INLINE void _CheckBlueprintHeapAlignment(DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) ++{ ++ IMG_UINT32 ui32OSPageSize = OSGetPageShift(); ++ ++ /* Any heap length should at least match OS page size at the minimum or ++ * a multiple of OS page size */ ++ if ((psHeapBlueprint->uiHeapLength < DEVMEM_HEAP_MINIMUM_SIZE) || ++ (psHeapBlueprint->uiHeapLength & (ui32OSPageSize - 1))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid Heap \"%s\" Size: " ++ "%"IMG_UINT64_FMTSPEC ++ "("IMG_DEVMEM_SIZE_FMTSPEC")", ++ __func__, ++ psHeapBlueprint->pszName, ++ psHeapBlueprint->uiHeapLength, ++ psHeapBlueprint->uiHeapLength)); ++ PVR_DPF((PVR_DBG_ERROR, ++ "Heap Size should always be a non-zero value and a " ++ "multiple of OS Page Size:%u(0x%x)", ++ ui32OSPageSize, ui32OSPageSize)); ++ PVR_ASSERT(psHeapBlueprint->uiHeapLength >= ui32OSPageSize); ++ } ++ ++ ++ PVR_ASSERT(psHeapBlueprint->uiReservedRegionLength % DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY == 0); ++} ++ ++void HeapCfgBlueprintInit(const IMG_CHAR *pszName, ++ IMG_UINT64 ui64HeapBaseAddr, ++ IMG_DEVMEM_SIZE_T uiHeapLength, ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength, ++ IMG_UINT32 ui32Log2DataPageSize, ++ IMG_UINT32 uiLog2ImportAlignment, ++ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint) ++{ ++ psHeapBlueprint->pszName = pszName; ++ psHeapBlueprint->sHeapBaseAddr.uiAddr = ui64HeapBaseAddr; ++ psHeapBlueprint->uiHeapLength = uiHeapLength; ++ psHeapBlueprint->uiReservedRegionLength = uiReservedRegionLength; ++ psHeapBlueprint->uiLog2DataPageSize = ui32Log2DataPageSize; ++ psHeapBlueprint->uiLog2ImportAlignment = uiLog2ImportAlignment; ++ ++ _CheckBlueprintHeapAlignment(psHeapBlueprint); ++} ++ ++PVRSRV_ERROR ++HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *puiNumHeapConfigsOut) ++{ ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++HeapCfgHeapCount(CONNECTION_DATA * psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 *puiNumHeapsOut) ++{ ++ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) ++ { ++ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; ++ } ++ ++ *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++HeapCfgHeapConfigName(CONNECTION_DATA * psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 uiHeapConfigNameBufSz, ++ IMG_CHAR *pszHeapConfigNameOut) ++{ ++ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) ++ { ++ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; ++ } ++ ++ OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++HeapCfgHeapDetails(CONNECTION_DATA * psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 uiHeapIndex, ++ IMG_UINT32 uiHeapNameBufSz, ++ IMG_CHAR *pszHeapNameOut, ++ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, ++ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, ++ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, ++ IMG_UINT32 *puiLog2DataPageSizeOut, ++ IMG_UINT32 *puiLog2ImportAlignmentOut) ++{ ++ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint; ++ ++ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs) ++ { ++ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX; ++ } ++ ++ if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps) ++ { ++ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX; ++ } ++ ++ psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex]; ++ ++ OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName); ++ *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr; ++ *puiHeapLengthOut = psHeapBlueprint->uiHeapLength; ++ *puiReservedRegionLengthOut = psHeapBlueprint->uiReservedRegionLength; ++ *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize; ++ *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment; ++ ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/devicemem_heapcfg.h b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.h +new file mode 100644 +index 000000000000..3b032ae6072d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_heapcfg.h +@@ -0,0 +1,184 @@ ++/**************************************************************************/ /*! ++@File ++@Title Device Heap Configuration Helper Functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device memory management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef DEVICEMEMHEAPCFG_H ++#define DEVICEMEMHEAPCFG_H ++ ++#include ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++/* ++ * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID ++ */ ++#define RGX_HEAP_4KB_PAGE_SHIFT (12U) ++#define RGX_HEAP_16KB_PAGE_SHIFT (14U) ++#define RGX_HEAP_64KB_PAGE_SHIFT (16U) ++#define RGX_HEAP_256KB_PAGE_SHIFT (18U) ++#define RGX_HEAP_1MB_PAGE_SHIFT (20U) ++#define RGX_HEAP_2MB_PAGE_SHIFT (21U) ++ ++struct _PVRSRV_DEVICE_NODE_; ++struct _CONNECTION_DATA_; ++ ++ ++/* ++ A "heap config" is a blueprint to be used for initial setting up of heaps ++ when a device memory context is created. ++ ++ We define a data structure to define this, but it's really down to the ++ caller to populate it. This is all expected to be in-kernel. We provide an ++ API that client code can use to enquire about the blueprint, such that it may ++ do the heap set-up during the context creation call on behalf of the user. ++*/ ++ ++/* Blueprint for a single heap */ ++typedef struct _DEVMEM_HEAP_BLUEPRINT_ ++{ ++ /* Name of this heap - for debug purposes, and perhaps for lookup ++ by name */ ++ const IMG_CHAR *pszName; ++ ++ /* Virtual address of the beginning of the heap. This _must_ be a ++ multiple of the data page size for the heap. It is ++ _recommended_ that it be coarser than that - especially, it ++ should begin on a boundary appropriate to the MMU for the ++ device. For Rogue, this is a Page Directory boundary, or 1GB ++ (virtual address a multiple of 0x0040000000). */ ++ IMG_DEV_VIRTADDR sHeapBaseAddr; ++ ++ /* Length of the heap. Given that the END address of the heap has ++ a similar restriction to that of the _beginning_ of the heap. ++ That is the heap length _must_ be a whole number of data pages. ++ Again, the recommendation is that it ends on a 1GB boundary. ++ Again, this is not essential, but we do know that (at the time ++ of writing) the current implementation of mmu_common.c is such ++ that no two heaps may share a page directory, thus the ++ remaining virtual space would be wasted if the length were not ++ a multiple of 1GB */ ++ IMG_DEVMEM_SIZE_T uiHeapLength; ++ ++ /* VA space starting sHeapBaseAddr to uiReservedRegionLength-1 are reserved ++ for statically defined addresses (shared/known between clients and FW). ++ Services never maps allocations into this reserved address space _unless_ ++ explicitly requested via PVRSRVMapToDeviceAddress by passing sDevVirtAddr ++ which falls within this reserved range. Since this range is completely for ++ clients to manage (where allocations are page granular), it _must_ again be ++ a whole number of data pages. Additionally, another constraint enforces this ++ to be a multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY (which evaluates to ++ max page size supported) to support varied pages sizes */ ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength; ++ ++ /* Data page size. This is the page size that is going to get ++ programmed into the MMU, so it needs to be a valid one for the ++ device. Importantly, the start address and length _must_ be ++ multiples of this page size. Note that the page size is ++ specified as the log 2 relative to 1 byte (e.g. 12 indicates ++ 4kB) */ ++ IMG_UINT32 uiLog2DataPageSize; ++ ++ /* Import alignment. Force imports to this heap to be ++ aligned to at least this value */ ++ IMG_UINT32 uiLog2ImportAlignment; ++ ++} DEVMEM_HEAP_BLUEPRINT; ++ ++void HeapCfgBlueprintInit(const IMG_CHAR *pszName, ++ IMG_UINT64 ui64HeapBaseAddr, ++ IMG_DEVMEM_SIZE_T uiHeapLength, ++ IMG_DEVMEM_SIZE_T uiReservedRegionLength, ++ IMG_UINT32 ui32Log2DataPageSize, ++ IMG_UINT32 uiLog2ImportAlignment, ++ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint); ++ ++/* Entire named heap config */ ++typedef struct _DEVMEM_HEAP_CONFIG_ ++{ ++ /* Name of this heap config - for debug and maybe lookup */ ++ const IMG_CHAR *pszName; ++ ++ /* Number of heaps in this config */ ++ IMG_UINT32 uiNumHeaps; ++ ++ /* Array of individual heap blueprints as defined above */ ++ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray; ++} DEVMEM_HEAP_CONFIG; ++ ++ ++PVRSRV_ERROR ++HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ *psConnection, ++ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ IMG_UINT32 *puiNumHeapConfigsOut ++); ++ ++PVRSRV_ERROR ++HeapCfgHeapCount(struct _CONNECTION_DATA_ *psConnection, ++ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 *puiNumHeapsOut ++); ++ ++PVRSRV_ERROR ++HeapCfgHeapConfigName(struct _CONNECTION_DATA_ *psConnection, ++ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 uiHeapConfigNameBufSz, ++ IMG_CHAR *pszHeapConfigNameOut ++); ++ ++PVRSRV_ERROR ++HeapCfgHeapDetails(struct _CONNECTION_DATA_ *psConnection, ++ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ IMG_UINT32 uiHeapConfigIndex, ++ IMG_UINT32 uiHeapIndex, ++ IMG_UINT32 uiHeapNameBufSz, ++ IMG_CHAR *pszHeapNameOut, ++ IMG_DEV_VIRTADDR *psDevVAddrBaseOut, ++ IMG_DEVMEM_SIZE_T *puiHeapLengthOut, ++ IMG_DEVMEM_SIZE_T *puiReservedRegionLengthOut, ++ IMG_UINT32 *puiLog2DataPageSizeOut, ++ IMG_UINT32 *puiLog2ImportAlignmentOut ++); ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/devicemem_history_server.c b/drivers/gpu/drm/img-rogue/devicemem_history_server.c +new file mode 100644 +index 000000000000..412a51b32319 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_history_server.c +@@ -0,0 +1,1962 @@ ++/*************************************************************************/ /*! ++@File ++@Title Devicemem history functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Devicemem history functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "allocmem.h" ++#include "img_defs.h" ++#include "pmr.h" ++#include "pvrsrv.h" ++#include "pvrsrv_device.h" ++#include "pvr_debug.h" ++#include "devicemem_server.h" ++#include "lock.h" ++#include "devicemem_history_server.h" ++#include "pdump_km.h" ++#include "di_server.h" ++ ++#define ALLOCATION_LIST_NUM_ENTRIES 10000 ++ ++/* data type to hold an allocation index. ++ * we make it 16 bits wide if possible ++ */ ++#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF ++typedef uint16_t ALLOC_INDEX_T; ++#else ++typedef uint32_t ALLOC_INDEX_T; ++#endif ++ ++/* a record describing a single allocation known to DeviceMemHistory. ++ * this is an element in a doubly linked list of allocations ++ */ ++typedef struct _RECORD_ALLOCATION_ ++{ ++ /* time when this RECORD_ALLOCATION was created/initialised */ ++ IMG_UINT64 ui64CreationTime; ++ /* serial number of the PMR relating to this allocation */ ++ IMG_UINT64 ui64Serial; ++ /* base DevVAddr of this allocation */ ++ IMG_DEV_VIRTADDR sDevVAddr; ++ /* size in bytes of this allocation */ ++ IMG_DEVMEM_SIZE_T uiSize; ++ /* Log2 page size of this allocation's GPU pages */ ++ IMG_UINT32 ui32Log2PageSize; ++ /* Process ID (PID) this allocation belongs to */ ++ IMG_PID uiPID; ++ /* index of previous allocation in the list */ ++ ALLOC_INDEX_T ui32Prev; ++ /* index of next allocation in the list */ ++ ALLOC_INDEX_T ui32Next; ++ /* annotation/name of this allocation */ ++ IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN]; ++} RECORD_ALLOCATION; ++ ++/* each command in the circular buffer is prefixed with an 8-bit value ++ * denoting the command type ++ */ ++typedef enum _COMMAND_TYPE_ ++{ ++ COMMAND_TYPE_NONE, ++ COMMAND_TYPE_TIMESTAMP, ++ COMMAND_TYPE_MAP_ALL, ++ COMMAND_TYPE_UNMAP_ALL, ++ COMMAND_TYPE_MAP_RANGE, ++ COMMAND_TYPE_UNMAP_RANGE, ++ /* sentinel value */ ++ COMMAND_TYPE_COUNT, ++} COMMAND_TYPE; ++ ++/* Timestamp command: ++ * This command is inserted into the circular buffer to provide an updated ++ * timestamp. ++ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order ++ * for the whole command to fit into 8 bytes. ++ */ ++typedef struct _COMMAND_TIMESTAMP_ ++{ ++ IMG_UINT8 aui8TimeNs[7]; ++} COMMAND_TIMESTAMP; ++ ++/* MAP_ALL command: ++ * This command denotes the allocation at the given index was wholly mapped ++ * in to the GPU MMU ++ */ ++typedef struct _COMMAND_MAP_ALL_ ++{ ++ ALLOC_INDEX_T uiAllocIndex; ++} COMMAND_MAP_ALL; ++ ++/* UNMAP_ALL command: ++ * This command denotes the allocation at the given index was wholly unmapped ++ * from the GPU MMU ++ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout. ++ */ ++typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL; ++ ++/* packing attributes for the MAP_RANGE command */ ++#define MAP_RANGE_MAX_START ((1 << 18) - 1) ++#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1) ++ ++/* MAP_RANGE command: ++ * Denotes a range of pages within the given allocation being mapped. ++ * The range is expressed as [Page Index] + [Page Count] ++ * This information is packed into a 40-bit integer, in order to make ++ * the command size 8 bytes. ++ */ ++ ++typedef struct _COMMAND_MAP_RANGE_ ++{ ++ IMG_UINT8 aui8Data[5]; ++ ALLOC_INDEX_T uiAllocIndex; ++} COMMAND_MAP_RANGE; ++ ++/* UNMAP_RANGE command: ++ * Denotes a range of pages within the given allocation being mapped. ++ * The range is expressed as [Page Index] + [Page Count] ++ * This information is packed into a 40-bit integer, in order to make ++ * the command size 8 bytes. ++ * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout. ++ */ ++typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE; ++ ++/* wrapper structure for a command */ ++typedef struct _COMMAND_WRAPPER_ ++{ ++ IMG_UINT8 ui8Type; ++ union { ++ COMMAND_TIMESTAMP sTimeStamp; ++ COMMAND_MAP_ALL sMapAll; ++ COMMAND_UNMAP_ALL sUnmapAll; ++ COMMAND_MAP_RANGE sMapRange; ++ COMMAND_UNMAP_RANGE sUnmapRange; ++ } u; ++} COMMAND_WRAPPER; ++ ++/* target size for the circular buffer of commands */ ++#define CIRCULAR_BUFFER_SIZE_KB 2048 ++/* turn the circular buffer target size into a number of commands */ ++#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER)) ++ ++/* index value denoting the end of a list */ ++#define END_OF_LIST 0xFFFFFFFF ++#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx])) ++#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES) ++ ++/* wrapper structure for the allocation records and the commands circular buffer */ ++typedef struct _RECORDS_ ++{ ++ RECORD_ALLOCATION *pasAllocations; ++ IMG_UINT32 ui32AllocationsListHead; ++ ++ IMG_UINT32 ui32Head; ++ IMG_UINT32 ui32Tail; ++ COMMAND_WRAPPER *pasCircularBuffer; ++} RECORDS; ++ ++typedef struct _DEVICEMEM_HISTORY_DATA_ ++{ ++ /* DI entry */ ++ DI_ENTRY *psDIEntry; ++ ++ RECORDS sRecords; ++ POS_LOCK hLock; ++} DEVICEMEM_HISTORY_DATA; ++ ++static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData; ++ ++/* gsDevicememHistoryData is static, hLock is NULL unless ++ * EnablePageFaultDebug is set and DevicememHistoryInitKM() ++ * was called. ++ */ ++static void DevicememHistoryLock(void) ++{ ++ if (gsDevicememHistoryData.hLock) ++ { ++ OSLockAcquire(gsDevicememHistoryData.hLock); ++ } ++} ++ ++static void DevicememHistoryUnlock(void) ++{ ++ if (gsDevicememHistoryData.hLock) ++ { ++ OSLockRelease(gsDevicememHistoryData.hLock); ++ } ++} ++ ++/* given a time stamp, calculate the age in nanoseconds */ ++static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now, ++ IMG_UINT64 ui64Then, ++ IMG_UINT64 ui64Max) ++{ ++ if (ui64Now >= ui64Then) ++ { ++ /* no clock wrap */ ++ return ui64Now - ui64Then; ++ } ++ else ++ { ++ /* clock has wrapped */ ++ return (ui64Max - ui64Then) + ui64Now + 1; ++ } ++} ++ ++/* AcquireCBSlot: ++ * Acquire the next slot in the circular buffer and ++ * move the circular buffer head along by one ++ * Returns a pointer to the acquired slot. ++ */ ++static COMMAND_WRAPPER *AcquireCBSlot(void) ++{ ++ COMMAND_WRAPPER *psSlot; ++ ++ psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head]; ++ ++ gsDevicememHistoryData.sRecords.ui32Head = ++ (gsDevicememHistoryData.sRecords.ui32Head + 1) ++ % CIRCULAR_BUFFER_NUM_COMMANDS; ++ ++ return psSlot; ++} ++ ++/* TimeStampPack: ++ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure. ++ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit ++ * integer in the COMMAND_TIMESTAMP command. ++ */ ++static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now) ++{ ++ IMG_UINT32 i; ++ ++ for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++) ++ { ++ psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF; ++ ui64Now >>= 8; ++ } ++} ++ ++/* packing a 64-bit nanosecond into a 7-byte integer loses the ++ * top 8 bits of data. This must be taken into account when ++ * comparing a full timestamp against an unpacked timestamp ++ */ ++#define TIME_STAMP_MASK ((1LLU << 56) - 1) ++#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK) ++ ++/* TimeStampUnpack: ++ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command ++ */ ++static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp) ++{ ++ IMG_UINT64 ui64TimeNs = 0; ++ IMG_UINT32 i; ++ ++ for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--) ++ { ++ ui64TimeNs <<= 8; ++ ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1]; ++ } ++ ++ return ui64TimeNs; ++} ++ ++#if defined(PDUMP) ++ ++static void EmitPDumpAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AllocationIndex, ++ RECORD_ALLOCATION *psAlloc) ++{ ++ PDUMPCOMMENT(psDeviceNode, ++ "[SrvPFD] Allocation: %u" ++ " Addr: " IMG_DEV_VIRTADDR_FMTSPEC ++ " Size: " IMG_DEVMEM_SIZE_FMTSPEC ++ " Page size: %u" ++ " PID: %u" ++ " Process: %s" ++ " Name: %s", ++ ui32AllocationIndex, ++ psAlloc->sDevVAddr.uiAddr, ++ psAlloc->uiSize, ++ 1U << psAlloc->ui32Log2PageSize, ++ psAlloc->uiPID, ++ OSGetCurrentClientProcessNameKM(), ++ psAlloc->szName); ++} ++ ++static void EmitPDumpMapUnmapAll(PVRSRV_DEVICE_NODE *psDeviceNode, ++ COMMAND_TYPE eType, ++ IMG_UINT32 ui32AllocationIndex) ++{ ++ const IMG_CHAR *pszOpName; ++ ++ switch (eType) ++ { ++ case COMMAND_TYPE_MAP_ALL: ++ pszOpName = "MAP_ALL"; ++ break; ++ case COMMAND_TYPE_UNMAP_ALL: ++ pszOpName = "UNMAP_ALL"; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u", ++ eType)); ++ return; ++ ++ } ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "[SrvPFD] Op: %s Allocation: %u", ++ pszOpName, ++ ui32AllocationIndex); ++} ++ ++static void EmitPDumpMapUnmapRange(PVRSRV_DEVICE_NODE *psDeviceNode, ++ COMMAND_TYPE eType, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32Count) ++{ ++ const IMG_CHAR *pszOpName; ++ ++ switch (eType) ++ { ++ case COMMAND_TYPE_MAP_RANGE: ++ pszOpName = "MAP_RANGE"; ++ break; ++ case COMMAND_TYPE_UNMAP_RANGE: ++ pszOpName = "UNMAP_RANGE"; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u", ++ eType)); ++ return; ++ } ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u", ++ pszOpName, ++ ui32AllocationIndex, ++ ui32StartPage, ++ ui32Count); ++} ++ ++#endif ++ ++/* InsertTimeStampCommand: ++ * Insert a timestamp command into the circular buffer. ++ */ ++static void InsertTimeStampCommand(IMG_UINT64 ui64Now) ++{ ++ COMMAND_WRAPPER *psCommand; ++ ++ psCommand = AcquireCBSlot(); ++ ++ psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP; ++ ++ TimeStampPack(&psCommand->u.sTimeStamp, ui64Now); ++} ++ ++/* InsertMapAllCommand: ++ * Insert a "MAP_ALL" command for the given allocation into the circular buffer ++ */ ++static void InsertMapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AllocIndex) ++{ ++ COMMAND_WRAPPER *psCommand; ++ ++ psCommand = AcquireCBSlot(); ++ ++ psCommand->ui8Type = COMMAND_TYPE_MAP_ALL; ++ psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex; ++ ++#if defined(PDUMP) ++ EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_MAP_ALL, ui32AllocIndex); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++} ++ ++/* InsertUnmapAllCommand: ++ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer ++ */ ++static void InsertUnmapAllCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AllocIndex) ++{ ++ COMMAND_WRAPPER *psCommand; ++ ++ psCommand = AcquireCBSlot(); ++ ++ psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL; ++ psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex; ++ ++#if defined(PDUMP) ++ EmitPDumpMapUnmapAll(psDeviceNode, COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++} ++ ++/* MapRangePack: ++ * Pack the given StartPage and Count values into the 40-bit representation ++ * in the MAP_RANGE command. ++ */ ++static void MapRangePack(COMMAND_MAP_RANGE *psMapRange, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32Count) ++{ ++ IMG_UINT64 ui64Data; ++ IMG_UINT32 i; ++ ++ /* we must encode the data into 40 bits: ++ * 18 bits for the start page index ++ * 12 bits for the range ++ */ ++ PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START); ++ PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE); ++ ++ ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count; ++ ++ for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++) ++ { ++ psMapRange->aui8Data[i] = ui64Data & 0xFF; ++ ui64Data >>= 8; ++ } ++} ++ ++/* MapRangePack: ++ * Unpack the StartPage and Count values from the 40-bit representation ++ * in the MAP_RANGE command. ++ */ ++static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange, ++ IMG_UINT32 *pui32StartPage, ++ IMG_UINT32 *pui32Count) ++{ ++ IMG_UINT64 ui64Data = 0; ++ IMG_UINT32 i; ++ ++ for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--) ++ { ++ ui64Data <<= 8; ++ ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1]; ++ } ++ ++ *pui32StartPage = (ui64Data >> 12); ++ *pui32Count = ui64Data & ((1 << 12) - 1); ++} ++ ++/* InsertMapRangeCommand: ++ * Insert a MAP_RANGE command into the circular buffer with the given ++ * StartPage and Count values. ++ */ ++static void InsertMapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AllocIndex, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32Count) ++{ ++ COMMAND_WRAPPER *psCommand; ++ ++ psCommand = AcquireCBSlot(); ++ ++ psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE; ++ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; ++ ++ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); ++ ++#if defined(PDUMP) ++ EmitPDumpMapUnmapRange(psDeviceNode, ++ COMMAND_TYPE_MAP_RANGE, ++ ui32AllocIndex, ++ ui32StartPage, ++ ui32Count); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++} ++ ++/* InsertUnmapRangeCommand: ++ * Insert a UNMAP_RANGE command into the circular buffer with the given ++ * StartPage and Count values. ++ */ ++static void InsertUnmapRangeCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AllocIndex, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32Count) ++{ ++ COMMAND_WRAPPER *psCommand; ++ ++ psCommand = AcquireCBSlot(); ++ ++ psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE; ++ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex; ++ ++ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count); ++ ++#if defined(PDUMP) ++ EmitPDumpMapUnmapRange(psDeviceNode, ++ COMMAND_TYPE_UNMAP_RANGE, ++ ui32AllocIndex, ++ ui32StartPage, ++ ui32Count); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++} ++ ++/* InsertAllocationToList: ++ * Helper function for the allocation list. ++ * Inserts the given allocation at the head of the list, whose current head is ++ * pointed to by pui32ListHead ++ */ ++static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) ++{ ++ RECORD_ALLOCATION *psAlloc; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); ++ ++ if (*pui32ListHead == END_OF_LIST) ++ { ++ /* list is currently empty, so just replace it */ ++ *pui32ListHead = ui32Alloc; ++ psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead; ++ } ++ else ++ { ++ RECORD_ALLOCATION *psHeadAlloc; ++ RECORD_ALLOCATION *psTailAlloc; ++ ++ psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead); ++ psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev); ++ ++ /* make the new alloc point forwards to the previous head */ ++ psAlloc->ui32Next = *pui32ListHead; ++ /* make the new alloc point backwards to the previous tail */ ++ psAlloc->ui32Prev = psHeadAlloc->ui32Prev; ++ ++ /* the head is now our new alloc */ ++ *pui32ListHead = ui32Alloc; ++ ++ /* the old head now points back to the new head */ ++ psHeadAlloc->ui32Prev = *pui32ListHead; ++ ++ /* the tail now points forward to the new head */ ++ psTailAlloc->ui32Next = ui32Alloc; ++ } ++} ++ ++static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc) ++{ ++ InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc); ++} ++ ++/* RemoveAllocationFromList: ++ * Helper function for the allocation list. ++ * Removes the given allocation from the list, whose head is ++ * pointed to by pui32ListHead ++ */ ++static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc) ++{ ++ RECORD_ALLOCATION *psAlloc; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); ++ ++ /* if this is the only element in the list then just make the list empty */ ++ if ((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc)) ++ { ++ *pui32ListHead = END_OF_LIST; ++ } ++ else ++ { ++ RECORD_ALLOCATION *psPrev, *psNext; ++ ++ psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev); ++ psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next); ++ ++ /* remove the allocation from the list */ ++ psPrev->ui32Next = psAlloc->ui32Next; ++ psNext->ui32Prev = psAlloc->ui32Prev; ++ ++ /* if this allocation is the head then update the head */ ++ if (*pui32ListHead == ui32Alloc) ++ { ++ *pui32ListHead = psAlloc->ui32Prev; ++ } ++ } ++} ++ ++static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc) ++{ ++ RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc); ++} ++ ++/* TouchBusyAllocation: ++ * Move the given allocation to the head of the list ++ */ ++static void TouchBusyAllocation(IMG_UINT32 ui32Alloc) ++{ ++ RemoveAllocationFromBusyList(ui32Alloc); ++ InsertAllocationToBusyList(ui32Alloc); ++} ++ ++/* GetOldestBusyAllocation: ++ * Returns the index of the oldest allocation in the MRU list ++ */ ++static IMG_UINT32 GetOldestBusyAllocation(void) ++{ ++ IMG_UINT32 ui32Alloc; ++ RECORD_ALLOCATION *psAlloc; ++ ++ ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead; ++ ++ if (ui32Alloc == END_OF_LIST) ++ { ++ return END_OF_LIST; ++ } ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); ++ ++ return psAlloc->ui32Prev; ++} ++ ++static IMG_UINT32 GetFreeAllocation(void) ++{ ++ IMG_UINT32 ui32Alloc; ++ ++ ui32Alloc = GetOldestBusyAllocation(); ++ ++ return ui32Alloc; ++} ++ ++ ++/* InitialiseAllocation: ++ * Initialise the given allocation structure with the given properties ++ */ ++static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc, ++ const IMG_CHAR *pszName, ++ IMG_UINT64 ui64Serial, ++ IMG_PID uiPID, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 ui32Log2PageSize) ++{ ++ OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName)); ++ psAlloc->ui64Serial = ui64Serial; ++ psAlloc->uiPID = uiPID; ++ psAlloc->sDevVAddr = sDevVAddr; ++ psAlloc->uiSize = uiSize; ++ psAlloc->ui32Log2PageSize = ui32Log2PageSize; ++ psAlloc->ui64CreationTime = OSClockns64(); ++} ++ ++/* CreateAllocation: ++ * Creates a new allocation with the given properties then outputs the ++ * index of the allocation ++ */ ++static PVRSRV_ERROR CreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszName, ++ IMG_UINT64 ui64Serial, ++ IMG_PID uiPID, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_BOOL bAutoPurge, ++ IMG_UINT32 *puiAllocationIndex) ++{ ++ IMG_UINT32 ui32Alloc; ++ RECORD_ALLOCATION *psAlloc; ++ ++ ui32Alloc = GetFreeAllocation(); ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); ++ ++ InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc), ++ pszName, ++ ui64Serial, ++ uiPID, ++ sDevVAddr, ++ uiSize, ++ ui32Log2PageSize); ++ ++ /* put the newly initialised allocation at the front of the MRU list */ ++ TouchBusyAllocation(ui32Alloc); ++ ++ *puiAllocationIndex = ui32Alloc; ++ ++#if defined(PDUMP) ++ EmitPDumpAllocation(psDeviceNode, ui32Alloc, psAlloc); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/* MatchAllocation: ++ * Tests if the allocation at the given index matches the supplied properties. ++ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE. ++ */ ++static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT64 ui64Serial, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszName, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_PID uiPID) ++{ ++ RECORD_ALLOCATION *psAlloc; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex); ++ ++ return (psAlloc->ui64Serial == ui64Serial) && ++ (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) && ++ (psAlloc->uiSize == uiSize) && ++ (psAlloc->ui32Log2PageSize == ui32Log2PageSize) && ++ (OSStringNCompare(psAlloc->szName, pszName, DEVMEM_ANNOTATION_MAX_LEN) == 0); ++} ++ ++/* FindOrCreateAllocation: ++ * Convenience function. ++ * Given a set of allocation properties (serial, DevVAddr, size, name, etc), ++ * this function will look for an existing record of this allocation and ++ * create the allocation if there is no existing record ++ */ ++static PVRSRV_ERROR FindOrCreateAllocation(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AllocationIndexHint, ++ IMG_UINT64 ui64Serial, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char *pszName, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_PID uiPID, ++ IMG_BOOL bSparse, ++ IMG_UINT32 *pui32AllocationIndexOut, ++ IMG_BOOL *pbCreated) ++{ ++ IMG_UINT32 ui32AllocationIndex; ++ PVRSRV_ERROR eError; ++ ++ if (ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) ++ { ++ IMG_BOOL bHaveAllocation; ++ ++ /* first, try to match against the index given by the client. ++ * if the caller provided a hint but the allocation record is no longer ++ * there, it must have been purged, so go ahead and create a new allocation ++ */ ++ bHaveAllocation = MatchAllocation(ui32AllocationIndexHint, ++ ui64Serial, ++ sDevVAddr, ++ uiSize, ++ pszName, ++ ui32Log2PageSize, ++ uiPID); ++ if (bHaveAllocation) ++ { ++ *pbCreated = IMG_FALSE; ++ *pui32AllocationIndexOut = ui32AllocationIndexHint; ++ return PVRSRV_OK; ++ } ++ } ++ ++ /* if there is no record of the allocation then we ++ * create it now ++ */ ++ eError = CreateAllocation(psDeviceNode, ++ pszName, ++ ui64Serial, ++ uiPID, ++ sDevVAddr, ++ uiSize, ++ ui32Log2PageSize, ++ IMG_TRUE, ++ &ui32AllocationIndex); ++ ++ if (eError == PVRSRV_OK) ++ { ++ *pui32AllocationIndexOut = ui32AllocationIndex; ++ *pbCreated = IMG_TRUE; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to create record for allocation %s", ++ __func__, ++ pszName)); ++ } ++ ++ return eError; ++} ++ ++/* GenerateMapUnmapCommandsForSparsePMR: ++ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's ++ * current mapping table ++ * ++ * PMR: The PMR whose mapping table to read. ++ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. ++ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping ++ * ++ * This function goes through every page in the PMR's mapping table and looks for ++ * virtually contiguous ranges to record as being mapped or unmapped. ++ */ ++static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR, ++ IMG_UINT32 ui32AllocIndex, ++ IMG_BOOL bMap) ++{ ++ PMR_MAPPING_TABLE *psMappingTable; ++ IMG_UINT32 ui32DonePages = 0; ++ IMG_UINT32 ui32NumPages; ++ IMG_UINT32 i; ++ IMG_BOOL bInARun = IMG_FALSE; ++ IMG_UINT32 ui32CurrentStart = 0; ++ IMG_UINT32 ui32RunCount = 0; ++ ++ psMappingTable = PMR_GetMappingTable(psPMR); ++ ui32NumPages = psMappingTable->ui32NumPhysChunks; ++ ++ if (ui32NumPages == 0) ++ { ++ /* nothing to do */ ++ return; ++ } ++ ++ for (i = 0; i < psMappingTable->ui32NumVirtChunks; i++) ++ { ++ if (psMappingTable->aui32Translation[i] != TRANSLATION_INVALID) ++ { ++ if (!bInARun) ++ { ++ bInARun = IMG_TRUE; ++ ui32CurrentStart = i; ++ ui32RunCount = 1; ++ } ++ else ++ { ++ ui32RunCount++; ++ } ++ } ++ ++ if (bInARun) ++ { ++ /* test if we need to end this current run and generate the command, ++ * either because the next page is not virtually contiguous ++ * to the current page, we have reached the maximum range, ++ * or this is the last page in the mapping table ++ */ ++ if ((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) || ++ (ui32RunCount == MAP_RANGE_MAX_RANGE) || ++ (i == (psMappingTable->ui32NumVirtChunks - 1))) ++ { ++ if (bMap) ++ { ++ InsertMapRangeCommand(PMR_DeviceNode(psPMR), ++ ui32AllocIndex, ++ ui32CurrentStart, ++ ui32RunCount); ++ } ++ else ++ { ++ InsertUnmapRangeCommand(PMR_DeviceNode(psPMR), ++ ui32AllocIndex, ++ ui32CurrentStart, ++ ui32RunCount); ++ } ++ ++ ui32DonePages += ui32RunCount; ++ ++ if (ui32DonePages == ui32NumPages) ++ { ++ break; ++ } ++ ++ bInARun = IMG_FALSE; ++ } ++ } ++ } ++ ++} ++ ++/* GenerateMapUnmapCommandsForChangeList: ++ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the ++ * list of page change (page map or page unmap) indices given. ++ * ++ * ui32NumPages: Number of pages which have changed. ++ * pui32PageList: List of indices of the pages which have changed. ++ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to. ++ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping ++ * ++ * This function goes through every page in the list and looks for ++ * virtually contiguous ranges to record as being mapped or unmapped. ++ */ ++static void GenerateMapUnmapCommandsForChangeList(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32NumPages, ++ IMG_UINT32 *pui32PageList, ++ IMG_UINT32 ui32AllocIndex, ++ IMG_BOOL bMap) ++{ ++ IMG_UINT32 i; ++ IMG_BOOL bInARun = IMG_FALSE; ++ IMG_UINT32 ui32CurrentStart = 0; ++ IMG_UINT32 ui32RunCount = 0; ++ ++ for (i = 0; i < ui32NumPages; i++) ++ { ++ if (!bInARun) ++ { ++ bInARun = IMG_TRUE; ++ ui32CurrentStart = pui32PageList[i]; ++ } ++ ++ ui32RunCount++; ++ ++ /* we flush if: ++ * - the next page in the list is not one greater than the current page ++ * - this is the last page in the list ++ * - we have reached the maximum range size ++ */ ++ if ((i == (ui32NumPages - 1)) || ++ ((pui32PageList[i] + 1) != pui32PageList[i + 1]) || ++ (ui32RunCount == MAP_RANGE_MAX_RANGE)) ++ { ++ if (bMap) ++ { ++ InsertMapRangeCommand(psDeviceNode, ++ ui32AllocIndex, ++ ui32CurrentStart, ++ ui32RunCount); ++ } ++ else ++ { ++ InsertUnmapRangeCommand(psDeviceNode, ++ ui32AllocIndex, ++ ui32CurrentStart, ++ ui32RunCount); ++ } ++ ++ bInARun = IMG_FALSE; ++ ui32RunCount = 0; ++ } ++ } ++} ++ ++/* DevicememHistoryMapKM: ++ * Entry point for when an allocation is mapped into the MMU GPU ++ * ++ * psPMR: The PMR to which the allocation belongs. ++ * ui32Offset: The offset within the PMR at which the allocation begins. ++ * sDevVAddr: The DevVAddr at which the allocation begins. ++ * szName: Annotation/name for the allocation. ++ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. ++ * ui32AllocationIndex: Allocation index as provided by the client. ++ * We will use this as a short-cut to find the allocation ++ * in our records. ++ * pui32AllocationIndexOut: An updated allocation index for the client. ++ * This may be a new value if we just created the ++ * allocation record. ++ */ ++PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, ++ IMG_UINT32 ui32Offset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut) ++{ ++ IMG_BOOL bSparse = PMR_IsSparse(psPMR); ++ IMG_UINT64 ui64Serial; ++ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); ++ PVRSRV_ERROR eError; ++ IMG_BOOL bCreated; ++ ++ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && ++ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", ++ __func__, ++ ui32AllocationIndex)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PMRGetUID(psPMR, &ui64Serial); ++ ++ DevicememHistoryLock(); ++ ++ eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR), ++ ui32AllocationIndex, ++ ui64Serial, ++ sDevVAddr, ++ uiSize, ++ szName, ++ ui32Log2PageSize, ++ uiPID, ++ bSparse, ++ &ui32AllocationIndex, ++ &bCreated); ++ ++ if ((eError == PVRSRV_OK) && !bCreated) ++ { ++ /* touch the allocation so it goes to the head of our MRU list */ ++ TouchBusyAllocation(ui32AllocationIndex); ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", ++ __func__, ++ szName, ++ PVRSRVGETERRORSTRING(eError))); ++ goto out_unlock; ++ } ++ ++ if (!bSparse) ++ { ++ InsertMapAllCommand(PMR_DeviceNode(psPMR), ui32AllocationIndex); ++ } ++ else ++ { ++ GenerateMapUnmapCommandsForSparsePMR(psPMR, ++ ui32AllocationIndex, ++ IMG_TRUE); ++ } ++ ++ InsertTimeStampCommand(OSClockns64()); ++ ++ *pui32AllocationIndexOut = ui32AllocationIndex; ++ ++out_unlock: ++ DevicememHistoryUnlock(); ++ ++ return eError; ++} ++ ++static void VRangeInsertMapUnmapCommands(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bMap, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ const IMG_CHAR *pszName) ++{ ++ while (ui32NumPages > 0) ++ { ++ IMG_UINT32 ui32PagesToAdd; ++ ++ ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE); ++ ++ if (ui32StartPage > MAP_RANGE_MAX_START) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page " ++ "%u on allocation %s", ++ bMap ? "map" : "unmap", ++ ui32StartPage, ++ pszName)); ++ return; ++ } ++ ++ if (bMap) ++ { ++ InsertMapRangeCommand(psDeviceNode, ++ ui32AllocationIndex, ++ ui32StartPage, ++ ui32PagesToAdd); ++ } ++ else ++ { ++ InsertUnmapRangeCommand(psDeviceNode, ++ ui32AllocationIndex, ++ ui32StartPage, ++ ui32PagesToAdd); ++ } ++ ++ ui32StartPage += ui32PagesToAdd; ++ ui32NumPages -= ui32PagesToAdd; ++ } ++} ++ ++PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut) ++{ ++ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); ++ PVRSRV_ERROR eError; ++ IMG_BOOL bCreated; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && ++ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", ++ __func__, ++ ui32AllocationIndex)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ DevicememHistoryLock(); ++ ++ eError = FindOrCreateAllocation(psDeviceNode, ++ ui32AllocationIndex, ++ 0, ++ sBaseDevVAddr, ++ uiAllocSize, ++ szName, ++ ui32Log2PageSize, ++ uiPID, ++ IMG_FALSE, ++ &ui32AllocationIndex, ++ &bCreated); ++ ++ if ((eError == PVRSRV_OK) && !bCreated) ++ { ++ /* touch the allocation so it goes to the head of our MRU list */ ++ TouchBusyAllocation(ui32AllocationIndex); ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", ++ __func__, ++ szName, ++ PVRSRVGETERRORSTRING(eError))); ++ goto out_unlock; ++ } ++ ++ VRangeInsertMapUnmapCommands(psDeviceNode, ++ IMG_TRUE, ++ ui32AllocationIndex, ++ sBaseDevVAddr, ++ ui32StartPage, ++ ui32NumPages, ++ szName); ++ ++ *pui32AllocationIndexOut = ui32AllocationIndex; ++ ++out_unlock: ++ DevicememHistoryUnlock(); ++ ++ return eError; ++ ++} ++ ++PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut) ++{ ++ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); ++ PVRSRV_ERROR eError; ++ IMG_BOOL bCreated; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && ++ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", ++ __func__, ++ ui32AllocationIndex)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ DevicememHistoryLock(); ++ ++ eError = FindOrCreateAllocation(psDeviceNode, ++ ui32AllocationIndex, ++ 0, ++ sBaseDevVAddr, ++ uiAllocSize, ++ szName, ++ ui32Log2PageSize, ++ uiPID, ++ IMG_FALSE, ++ &ui32AllocationIndex, ++ &bCreated); ++ ++ if ((eError == PVRSRV_OK) && !bCreated) ++ { ++ /* touch the allocation so it goes to the head of our MRU list */ ++ TouchBusyAllocation(ui32AllocationIndex); ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", ++ __func__, ++ szName, ++ PVRSRVGETERRORSTRING(eError))); ++ goto out_unlock; ++ } ++ ++ VRangeInsertMapUnmapCommands(psDeviceNode, ++ IMG_FALSE, ++ ui32AllocationIndex, ++ sBaseDevVAddr, ++ ui32StartPage, ++ ui32NumPages, ++ szName); ++ ++ *pui32AllocationIndexOut = ui32AllocationIndex; ++ ++out_unlock: ++ DevicememHistoryUnlock(); ++ ++ return eError; ++} ++ ++ ++ ++/* DevicememHistoryUnmapKM: ++ * Entry point for when an allocation is unmapped from the MMU GPU ++ * ++ * psPMR: The PMR to which the allocation belongs. ++ * ui32Offset: The offset within the PMR at which the allocation begins. ++ * sDevVAddr: The DevVAddr at which the allocation begins. ++ * szName: Annotation/name for the allocation. ++ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. ++ * ui32AllocationIndex: Allocation index as provided by the client. ++ * We will use this as a short-cut to find the allocation ++ * in our records. ++ * pui32AllocationIndexOut: An updated allocation index for the client. ++ * This may be a new value if we just created the ++ * allocation record. ++ */ ++PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, ++ IMG_UINT32 ui32Offset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut) ++{ ++ IMG_BOOL bSparse = PMR_IsSparse(psPMR); ++ IMG_UINT64 ui64Serial; ++ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); ++ PVRSRV_ERROR eError; ++ IMG_BOOL bCreated; ++ ++ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && ++ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", ++ __func__, ++ ui32AllocationIndex)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PMRGetUID(psPMR, &ui64Serial); ++ ++ DevicememHistoryLock(); ++ ++ eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR), ++ ui32AllocationIndex, ++ ui64Serial, ++ sDevVAddr, ++ uiSize, ++ szName, ++ ui32Log2PageSize, ++ uiPID, ++ bSparse, ++ &ui32AllocationIndex, ++ &bCreated); ++ ++ if ((eError == PVRSRV_OK) && !bCreated) ++ { ++ /* touch the allocation so it goes to the head of our MRU list */ ++ TouchBusyAllocation(ui32AllocationIndex); ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", ++ __func__, ++ szName, ++ PVRSRVGETERRORSTRING(eError))); ++ goto out_unlock; ++ } ++ ++ if (!bSparse) ++ { ++ InsertUnmapAllCommand(PMR_DeviceNode(psPMR), ui32AllocationIndex); ++ } ++ else ++ { ++ GenerateMapUnmapCommandsForSparsePMR(psPMR, ++ ui32AllocationIndex, ++ IMG_FALSE); ++ } ++ ++ InsertTimeStampCommand(OSClockns64()); ++ ++ *pui32AllocationIndexOut = ui32AllocationIndex; ++ ++out_unlock: ++ DevicememHistoryUnlock(); ++ ++ return eError; ++} ++ ++/* DevicememHistorySparseChangeKM: ++ * Entry point for when a sparse allocation is changed, such that some of the ++ * pages within the sparse allocation are mapped or unmapped. ++ * ++ * psPMR: The PMR to which the allocation belongs. ++ * ui32Offset: The offset within the PMR at which the allocation begins. ++ * sDevVAddr: The DevVAddr at which the allocation begins. ++ * szName: Annotation/name for the allocation. ++ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form. ++ * ui32AllocPageCount: Number of pages which have been mapped. ++ * paui32AllocPageIndices: Indices of pages which have been mapped. ++ * ui32FreePageCount: Number of pages which have been unmapped. ++ * paui32FreePageIndices: Indices of pages which have been unmapped. ++ * ui32AllocationIndex: Allocation index as provided by the client. ++ * We will use this as a short-cut to find the allocation ++ * in our records. ++ * pui32AllocationIndexOut: An updated allocation index for the client. ++ * This may be a new value if we just created the ++ * allocation record. ++ */ ++PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, ++ IMG_UINT32 ui32Offset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *paui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *paui32FreePageIndices, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut) ++{ ++ IMG_UINT64 ui64Serial; ++ IMG_PID uiPID = OSGetCurrentClientProcessIDKM(); ++ PVRSRV_ERROR eError; ++ IMG_BOOL bCreated; ++ ++ if ((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) && ++ !CHECK_ALLOC_INDEX(ui32AllocationIndex)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u", ++ __func__, ++ ui32AllocationIndex)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PMRGetUID(psPMR, &ui64Serial); ++ ++ DevicememHistoryLock(); ++ ++ eError = FindOrCreateAllocation(PMR_DeviceNode(psPMR), ++ ui32AllocationIndex, ++ ui64Serial, ++ sDevVAddr, ++ uiSize, ++ szName, ++ ui32Log2PageSize, ++ uiPID, ++ IMG_TRUE /* bSparse */, ++ &ui32AllocationIndex, ++ &bCreated); ++ ++ if ((eError == PVRSRV_OK) && !bCreated) ++ { ++ /* touch the allocation so it goes to the head of our MRU list */ ++ TouchBusyAllocation(ui32AllocationIndex); ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)", ++ __func__, ++ szName, ++ PVRSRVGETERRORSTRING(eError))); ++ goto out_unlock; ++ } ++ ++ GenerateMapUnmapCommandsForChangeList(PMR_DeviceNode(psPMR), ++ ui32AllocPageCount, ++ paui32AllocPageIndices, ++ ui32AllocationIndex, ++ IMG_TRUE); ++ ++ GenerateMapUnmapCommandsForChangeList(PMR_DeviceNode(psPMR), ++ ui32FreePageCount, ++ paui32FreePageIndices, ++ ui32AllocationIndex, ++ IMG_FALSE); ++ ++ InsertTimeStampCommand(OSClockns64()); ++ ++ *pui32AllocationIndexOut = ui32AllocationIndex; ++ ++out_unlock: ++ DevicememHistoryUnlock(); ++ ++ return eError; ++ ++} ++ ++/* CircularBufferIterateStart: ++ * Initialise local state for iterating over the circular buffer ++ */ ++static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter) ++{ ++ *pui32Head = gsDevicememHistoryData.sRecords.ui32Head; ++ ++ if (*pui32Head != 0) ++ { ++ *pui32Iter = *pui32Head - 1; ++ } ++ else ++ { ++ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; ++ } ++} ++ ++/* CircularBufferIteratePrevious: ++ * Iterate to the previous item in the circular buffer. ++ * This is called repeatedly to iterate over the whole circular buffer. ++ */ ++static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head, ++ IMG_UINT32 *pui32Iter, ++ COMMAND_TYPE *peType, ++ IMG_BOOL *pbLast) ++{ ++ IMG_UINT8 *pui8Header; ++ COMMAND_WRAPPER *psOut = NULL; ++ ++ psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter; ++ ++ pui8Header = (void *) psOut; ++ ++ /* Check the command looks valid. ++ * this condition should never happen, but check for it anyway ++ * and try to handle it ++ */ ++ if (*pui8Header >= COMMAND_TYPE_COUNT) ++ { ++ /* invalid header detected. Circular buffer corrupted? */ ++ PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: " ++ "Invalid header: %u", ++ *pui8Header)); ++ *pbLast = IMG_TRUE; ++ return NULL; ++ } ++ ++ *peType = *pui8Header; ++ ++ if (*pui32Iter != 0) ++ { ++ (*pui32Iter)--; ++ } ++ else ++ { ++ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1; ++ } ++ ++ ++ /* inform the caller this is the last command if either we have reached ++ * the head (where we started) or if we have reached an empty command, ++ * which means we have covered all populated entries ++ */ ++ if ((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE)) ++ { ++ /* this is the final iteration */ ++ *pbLast = IMG_TRUE; ++ } ++ ++ return psOut; ++} ++ ++/* MapUnmapCommandGetInfo: ++ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL, ++ * MAP_RANGE or UNMAP_RANGE command ++ */ ++static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand, ++ COMMAND_TYPE eType, ++ IMG_DEV_VIRTADDR *psDevVAddrStart, ++ IMG_DEV_VIRTADDR *psDevVAddrEnd, ++ IMG_BOOL *pbMap, ++ IMG_UINT32 *pui32AllocIndex) ++{ ++ if ((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL))) ++ { ++ COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll; ++ RECORD_ALLOCATION *psAlloc; ++ ++ *pbMap = (eType == COMMAND_TYPE_MAP_ALL); ++ *pui32AllocIndex = psMapAll->uiAllocIndex; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex); ++ ++ *psDevVAddrStart = psAlloc->sDevVAddr; ++ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1; ++ } ++ else if ((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE))) ++ { ++ COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange; ++ RECORD_ALLOCATION *psAlloc; ++ IMG_UINT32 ui32StartPage, ui32Count; ++ ++ *pbMap = (eType == COMMAND_TYPE_MAP_RANGE); ++ *pui32AllocIndex = psMapRange->uiAllocIndex; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex); ++ ++ MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count); ++ ++ psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr + ++ ((1ULL << psAlloc->ui32Log2PageSize) * ui32StartPage); ++ ++ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + ++ ((1ULL << psAlloc->ui32Log2PageSize) * ui32Count) - 1; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u", ++ __func__, ++ eType)); ++ } ++} ++ ++/* DevicememHistoryQuery: ++ * Entry point for rgxdebug to look up addresses relating to a page fault ++ */ ++IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, ++ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, ++ IMG_UINT32 ui32PageSizeBytes, ++ IMG_BOOL bMatchAnyAllocInPage) ++{ ++ IMG_UINT32 ui32Head, ui32Iter; ++ COMMAND_TYPE eType = COMMAND_TYPE_NONE; ++ COMMAND_WRAPPER *psCommand = NULL; ++ IMG_BOOL bLast = IMG_FALSE; ++ IMG_UINT64 ui64StartTime = OSClockns64(); ++ IMG_UINT64 ui64TimeNs = 0; ++ ++ /* initialise the results count for the caller */ ++ psQueryOut->ui32NumResults = 0; ++ ++ DevicememHistoryLock(); ++ ++ /* if the search is constrained to a particular PID then we ++ * first search the list of allocations to see if this ++ * PID is known to us ++ */ ++ if (psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) ++ { ++ IMG_UINT32 ui32Alloc; ++ ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead; ++ ++ while (ui32Alloc != END_OF_LIST) ++ { ++ RECORD_ALLOCATION *psAlloc; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc); ++ ++ if (psAlloc->uiPID == psQueryIn->uiPID) ++ { ++ goto found_pid; ++ } ++ ++ if (ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead) ++ { ++ /* gone through whole list */ ++ break; ++ } ++ } ++ ++ /* PID not found, so we do not have any suitable data for this ++ * page fault ++ */ ++ goto out_unlock; ++ } ++ ++found_pid: ++ ++ CircularBufferIterateStart(&ui32Head, &ui32Iter); ++ ++ while (!bLast) ++ { ++ psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast); ++ ++ if (eType == COMMAND_TYPE_TIMESTAMP) ++ { ++ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); ++ continue; ++ } ++ ++ if ((eType == COMMAND_TYPE_MAP_ALL) || ++ (eType == COMMAND_TYPE_UNMAP_ALL) || ++ (eType == COMMAND_TYPE_MAP_RANGE) || ++ (eType == COMMAND_TYPE_UNMAP_RANGE)) ++ { ++ RECORD_ALLOCATION *psAlloc; ++ IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig; ++ IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr; ++ IMG_BOOL bMap; ++ IMG_UINT32 ui32AllocIndex; ++ ++ MapUnmapCommandGetInfo(psCommand, ++ eType, ++ &sAllocStartAddrOrig, ++ &sAllocEndAddrOrig, ++ &bMap, ++ &ui32AllocIndex); ++ ++ sAllocStartAddr = sAllocStartAddrOrig; ++ sAllocEndAddr = sAllocEndAddrOrig; ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex); ++ ++ /* skip this command if we need to search within ++ * a particular PID, and this allocation is not from ++ * that PID ++ */ ++ if ((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) && ++ (psAlloc->uiPID != psQueryIn->uiPID)) ++ { ++ continue; ++ } ++ ++ /* if the allocation was created after this event, then this ++ * event must be for an old/removed allocation, so skip it ++ */ ++ if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) ++ { ++ continue; ++ } ++ ++ /* if the caller wants us to match any allocation in the ++ * same page as the allocation then tweak the real start/end ++ * addresses of the allocation here ++ */ ++ if (bMatchAnyAllocInPage) ++ { ++ sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1); ++ sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1); ++ } ++ ++ if ((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) && ++ (psQueryIn->sDevVAddr.uiAddr < sAllocEndAddr.uiAddr)) ++ { ++ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults]; ++ ++ OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString)); ++ psResult->sBaseDevVAddr = psAlloc->sDevVAddr; ++ psResult->uiSize = psAlloc->uiSize; ++ psResult->bMap = bMap; ++ psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK); ++ psResult->ui64When = ui64TimeNs; ++ /* write the responsible PID in the placeholder */ ++ psResult->sProcessInfo.uiPID = psAlloc->uiPID; ++ ++ if ((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL)) ++ { ++ psResult->bRange = IMG_FALSE; ++ psResult->bAll = IMG_TRUE; ++ } ++ else ++ { ++ psResult->bRange = IMG_TRUE; ++ MapRangeUnpack(&psCommand->u.sMapRange, ++ &psResult->ui32StartPage, ++ &psResult->ui32PageCount); ++ psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize)) ++ == psAlloc->uiSize; ++ psResult->sMapStartAddr = sAllocStartAddrOrig; ++ psResult->sMapEndAddr = sAllocEndAddrOrig; ++ } ++ ++ psQueryOut->ui32NumResults++; ++ ++ if (psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS) ++ { ++ break; ++ } ++ } ++ } ++ } ++ ++out_unlock: ++ DevicememHistoryUnlock(); ++ ++ return psQueryOut->ui32NumResults > 0; ++} ++ ++static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN], ++ IMG_PID uiPID, ++ const IMG_CHAR *pszName, ++ const IMG_CHAR *pszAction, ++ IMG_DEV_VIRTADDR sDevVAddrStart, ++ IMG_DEV_VIRTADDR sDevVAddrEnd, ++ IMG_UINT64 ui64TimeNs) ++{ ++ ++ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, ++ /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/ ++ "%04u %-40s %-10s " ++ IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " " ++ "0x%08" IMG_UINT64_FMTSPECX " " ++ "%013" IMG_UINT64_FMTSPEC, /* 13 digits is over 2 hours of ns */ ++ uiPID, ++ pszName, ++ pszAction, ++ sDevVAddrStart.uiAddr, ++ sDevVAddrEnd.uiAddr, ++ sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr + 1, ++ ui64TimeNs); ++} ++ ++static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]) ++{ ++ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, ++ "%-4s %-40s %-6s %10s %10s %8s %13s", ++ "PID", ++ "NAME", ++ "ACTION", ++ "ADDR MIN", ++ "ADDR MAX", ++ "SIZE", ++ "ABS NS"); ++} ++ ++static const char *CommandTypeToString(COMMAND_TYPE eType) ++{ ++ switch (eType) ++ { ++ case COMMAND_TYPE_MAP_ALL: ++ return "MapAll"; ++ case COMMAND_TYPE_UNMAP_ALL: ++ return "UnmapAll"; ++ case COMMAND_TYPE_MAP_RANGE: ++ return "MapRange"; ++ case COMMAND_TYPE_UNMAP_RANGE: ++ return "UnmapRange"; ++ case COMMAND_TYPE_TIMESTAMP: ++ return "TimeStamp"; ++ default: ++ return "???"; ++ } ++} ++ ++static void DevicememHistoryPrintAll(OSDI_IMPL_ENTRY *psEntry) ++{ ++ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; ++ IMG_UINT32 ui32Iter; ++ IMG_UINT32 ui32Head; ++ IMG_BOOL bLast = IMG_FALSE; ++ IMG_UINT64 ui64TimeNs = 0; ++ IMG_UINT64 ui64StartTime = OSClockns64(); ++ ++ DeviceMemHistoryFmtHeader(szBuffer); ++ DIPrintf(psEntry, "%s\n", szBuffer); ++ ++ CircularBufferIterateStart(&ui32Head, &ui32Iter); ++ ++ while (!bLast) ++ { ++ COMMAND_WRAPPER *psCommand; ++ COMMAND_TYPE eType = COMMAND_TYPE_NONE; ++ ++ psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, ++ &bLast); ++ ++ if (eType == COMMAND_TYPE_TIMESTAMP) ++ { ++ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp); ++ continue; ++ } ++ ++ ++ if ((eType == COMMAND_TYPE_MAP_ALL) || ++ (eType == COMMAND_TYPE_UNMAP_ALL) || ++ (eType == COMMAND_TYPE_MAP_RANGE) || ++ (eType == COMMAND_TYPE_UNMAP_RANGE)) ++ { ++ RECORD_ALLOCATION *psAlloc; ++ IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd; ++ IMG_BOOL bMap; ++ IMG_UINT32 ui32AllocIndex; ++ ++ MapUnmapCommandGetInfo(psCommand, ++ eType, ++ &sDevVAddrStart, ++ &sDevVAddrEnd, ++ &bMap, ++ &ui32AllocIndex); ++ ++ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex); ++ ++ if (DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs) ++ { ++ /* if this event relates to an allocation we ++ * are no longer tracking then do not print it ++ */ ++ continue; ++ } ++ ++ DeviceMemHistoryFmt(szBuffer, ++ psAlloc->uiPID, ++ psAlloc->szName, ++ CommandTypeToString(eType), ++ sDevVAddrStart, ++ sDevVAddrEnd, ++ ui64TimeNs); ++ ++ DIPrintf(psEntry, "%s\n", szBuffer); ++ } ++ } ++ ++ DIPrintf(psEntry, "\nTimestamp reference: %013" IMG_UINT64_FMTSPEC "\n", ++ ui64StartTime); ++} ++ ++static int DevicememHistoryPrintAllWrapper(OSDI_IMPL_ENTRY *psEntry, ++ void *pvData) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ DevicememHistoryLock(); ++ DevicememHistoryPrintAll(psEntry); ++ DevicememHistoryUnlock(); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR CreateRecords(void) ++{ ++ gsDevicememHistoryData.sRecords.pasAllocations = ++ OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES); ++ ++ PVR_RETURN_IF_NOMEM(gsDevicememHistoryData.sRecords.pasAllocations); ++ ++ /* Allocated and initialise the circular buffer with zeros so every ++ * command is initialised as a command of type COMMAND_TYPE_NONE. */ ++ gsDevicememHistoryData.sRecords.pasCircularBuffer = ++ OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS); ++ ++ if (gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL) ++ { ++ OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static void DestroyRecords(void) ++{ ++ OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer); ++ OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations); ++} ++ ++static void InitialiseRecords(void) ++{ ++ IMG_UINT32 i; ++ ++ /* initialise the allocations list */ ++ ++ gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1; ++ gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1; ++ ++ for (i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++) ++ { ++ gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1; ++ gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1; ++ } ++ ++ gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0; ++ ++ gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0; ++} ++ ++PVRSRV_ERROR DevicememHistoryInitKM(void) ++{ ++ PVRSRV_ERROR eError; ++ DI_ITERATOR_CB sIterator = {.pfnShow = DevicememHistoryPrintAllWrapper}; ++ ++ eError = OSLockCreate(&gsDevicememHistoryData.hLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", err_lock); ++ ++ eError = CreateRecords(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "CreateRecords", err_allocations); ++ ++ InitialiseRecords(); ++ ++ eError = DICreateEntry("devicemem_history", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, ++ &gsDevicememHistoryData.psDIEntry); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", err_di_creation); ++ ++ return PVRSRV_OK; ++ ++err_di_creation: ++ DestroyRecords(); ++err_allocations: ++ OSLockDestroy(gsDevicememHistoryData.hLock); ++ gsDevicememHistoryData.hLock = NULL; ++err_lock: ++ return eError; ++} ++ ++void DevicememHistoryDeInitKM(void) ++{ ++ if (gsDevicememHistoryData.psDIEntry != NULL) ++ { ++ DIDestroyEntry(gsDevicememHistoryData.psDIEntry); ++ } ++ ++ DestroyRecords(); ++ ++ if (gsDevicememHistoryData.hLock != NULL) ++ { ++ OSLockDestroy(gsDevicememHistoryData.hLock); ++ gsDevicememHistoryData.hLock = NULL; ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/devicemem_history_server.h b/drivers/gpu/drm/img-rogue/devicemem_history_server.h +new file mode 100644 +index 000000000000..8e7ca59a6905 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_history_server.h +@@ -0,0 +1,157 @@ ++/*************************************************************************/ /*! ++@File devicemem_history_server.h ++@Title Resource Information abstraction ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Devicemem History functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEVICEMEM_HISTORY_SERVER_H ++#define DEVICEMEM_HISTORY_SERVER_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "rgxmem.h" ++#include "devicemem_utils.h" ++#include "connection_server.h" ++ ++PVRSRV_ERROR DevicememHistoryInitKM(void); ++ ++void DevicememHistoryDeInitKM(void); ++ ++PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR, ++ IMG_UINT32 ui32Offset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut); ++ ++PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR, ++ IMG_UINT32 ui32Offset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut); ++ ++PVRSRV_ERROR DevicememHistoryMapVRangeKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *ui32AllocationIndexOut); ++ ++PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR sBaseDevVAddr, ++ IMG_UINT32 ui32StartPage, ++ IMG_UINT32 ui32NumPages, ++ IMG_DEVMEM_SIZE_T uiAllocSize, ++ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32AllocationIndex, ++ IMG_UINT32 *ui32AllocationIndexOut); ++ ++PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR, ++ IMG_UINT32 ui32Offset, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const char szName[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT32 ui32PageSize, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *paui32AllocPageIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pauiFreePageIndices, ++ IMG_UINT32 AllocationIndex, ++ IMG_UINT32 *pui32AllocationIndexOut); ++ ++/* used when the PID does not matter */ ++#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE ++ ++typedef struct _DEVICEMEM_HISTORY_QUERY_IN_ ++{ ++ IMG_PID uiPID; ++ IMG_DEV_VIRTADDR sDevVAddr; ++} DEVICEMEM_HISTORY_QUERY_IN; ++ ++/* Store up to 4 results for a lookup. In the case of the faulting page being ++ * re-mapped between the page fault occurring on HW and the page fault analysis ++ * being done, the second result entry will show the allocation being unmapped. ++ * A further 2 entries are added to cater for multiple buffers in the same page. ++ */ ++#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4 ++ ++typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_ ++{ ++ IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN]; ++ IMG_DEV_VIRTADDR sBaseDevVAddr; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_BOOL bMap; ++ IMG_BOOL bRange; ++ IMG_BOOL bAll; ++ IMG_UINT64 ui64When; ++ IMG_UINT64 ui64Age; ++ /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */ ++ IMG_UINT32 ui32StartPage; ++ IMG_UINT32 ui32PageCount; ++ IMG_DEV_VIRTADDR sMapStartAddr; ++ IMG_DEV_VIRTADDR sMapEndAddr; ++ RGXMEM_PROCESS_INFO sProcessInfo; ++} DEVICEMEM_HISTORY_QUERY_OUT_RESULT; ++ ++typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_ ++{ ++ IMG_UINT32 ui32NumResults; ++ /* result 0 is the newest */ ++ DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS]; ++} DEVICEMEM_HISTORY_QUERY_OUT; ++ ++IMG_BOOL ++DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn, ++ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, ++ IMG_UINT32 ui32PageSizeBytes, ++ IMG_BOOL bMatchAnyAllocInPage); ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/devicemem_pdump.h b/drivers/gpu/drm/img-rogue/devicemem_pdump.h +new file mode 100644 +index 000000000000..09b28afe7e51 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_pdump.h +@@ -0,0 +1,363 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management PDump internal ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Services internal interface to PDump device memory management ++ functions that are shared between client and server code. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEVICEMEM_PDUMP_H ++#define DEVICEMEM_PDUMP_H ++ ++#include "devicemem.h" ++#include "pdumpdefs.h" ++#include "pdump.h" ++ ++#if defined(PDUMP) ++/* ++ * DevmemPDumpLoadMem() ++ * ++ * takes a memory descriptor, offset, and size, and takes the current contents ++ * of the memory at that location and writes it to the prm pdump file, and ++ * emits a pdump LDB to load the data from that file. The intention here is ++ * that the contents of the simulated buffer upon pdump playback will be made ++ * to be the same as they are when this command is run, enabling pdump of ++ * cases where the memory has been modified externally, i.e. by the host cpu ++ * or by a third party. ++ */ ++void ++DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * DevmemPDumpLoadZeroMem() ++ * ++ * As DevmemPDumpLoadMem() but the PDump allocation will be populated with ++ * zeros from the zero page in the parameter stream ++ */ ++void ++DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * DevmemPDumpLoadMemValue32() ++ * ++ * As above but dumps the value at a dword-aligned address in plain text to ++ * the pdump script2 file. Useful for patching a buffer at pdump playback by ++ * simply editing the script output file. ++ * ++ * (The same functionality can be achieved by the above function but the ++ * binary PARAM file must be patched in that case.) ++ */ ++IMG_INTERNAL void ++DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * DevmemPDumpMemValue64() ++ * ++ * As above but dumps the 64bit-value at a dword-aligned address in plain text ++ * to the pdump script2 file. Useful for patching a buffer at pdump playback by ++ * simply editing the script output file. ++ * ++ * (The same functionality can be achieved by the above function but the ++ * binary PARAM file must be patched in that case.) ++ */ ++IMG_INTERNAL void ++DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT64 ui64Value, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * DevmemPDumpPageCatBaseToSAddr() ++ * ++ * Returns the symbolic address of a piece of memory represented by an offset ++ * into the mem descriptor. ++ */ ++PVRSRV_ERROR ++DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T *puiMemOffset, ++ IMG_CHAR *pszName, ++ IMG_UINT32 ui32Size); ++ ++/* ++ * DevmemPDumpSaveToFile() ++ * ++ * Emits a pdump SAB to cause the current contents of the memory to be written ++ * to the given file during playback ++ */ ++void ++DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset); ++ ++/* ++ * DevmemPDumpSaveToFileVirtual() ++ * ++ * Emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the virtual ++ * address and device MMU context to cause the pdump player to traverse the ++ * MMU page tables itself. ++ */ ++void ++DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 ui32FileOffset, ++ IMG_UINT32 ui32PdumpFlags); ++ ++/* ++ * DevmemPDumpDataDescriptor() ++ * ++ * Emits a pdump CMD:OutputData, using the virtual address and device MMU ++ * context. Provides more flexibility than a pdump SAB because metadata can ++ * be passed to an external pdump player library via the command header. ++ */ ++void ++DevmemPDumpDataDescriptor(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 ui32HeaderType, ++ IMG_UINT32 ui32ElementType, ++ IMG_UINT32 ui32ElementCount, ++ IMG_UINT32 ui32PdumpFlags); ++ ++ ++/* ++ * ++ * DevmemPDumpDevmemPol32() ++ * ++ * Writes a PDump 'POL' command to wait for a masked 32-bit memory location to ++ * become the specified value. ++ */ ++PVRSRV_ERROR ++DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T ui32PDumpFlags); ++ ++#if defined(__KERNEL__) ++/* ++ * ++ * DevmemPDumpDevmemCheck32() ++ * ++ * Writes a PDump 'POL' command to run a single-shot check for a masked ++ * 32-bit memory location to match the specified value. ++ */ ++PVRSRV_ERROR ++DevmemPDumpDevmemCheck32(const DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T ui32PDumpFlags); ++#endif ++ ++/* ++ * DevmemPDumpCBP() ++ * ++ * Polls for space in circular buffer. Reads the read offset from memory and ++ * waits until there is enough space to write the packet. ++ * ++ * psMemDesc - MemDesc which contains the read offset ++ * uiReadOffset - Offset into MemDesc to the read offset ++ * uiWriteOffset - Current write offset ++ * uiPacketSize - Size of packet to write ++ * uiBufferSize - Size of circular buffer ++ */ ++PVRSRV_ERROR ++DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiReadOffset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize); ++ ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpLoadMem) ++#endif ++static INLINE void ++DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpLoadMemValue32) ++#endif ++static INLINE void ++DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpLoadMemValue64) ++#endif ++static INLINE void ++DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT64 ui64Value, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(ui64Value); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpPageCatBaseToSAddr) ++#endif ++static INLINE PVRSRV_ERROR ++DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T *puiMemOffset, ++ IMG_CHAR *pszName, ++ IMG_UINT32 ui32Size) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(puiMemOffset); ++ PVR_UNREFERENCED_PARAMETER(pszName); ++ PVR_UNREFERENCED_PARAMETER(ui32Size); ++ ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpSaveToFile) ++#endif ++static INLINE void ++DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(pszFilename); ++ PVR_UNREFERENCED_PARAMETER(uiFileOffset); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpSaveToFileVirtual) ++#endif ++static INLINE void ++DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 ui32FileOffset, ++ IMG_UINT32 ui32PdumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(pszFilename); ++ PVR_UNREFERENCED_PARAMETER(ui32FileOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpDevmemPol32) ++#endif ++static INLINE PVRSRV_ERROR ++DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemPDumpCBP) ++#endif ++static INLINE PVRSRV_ERROR ++DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiReadOffset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMemDesc); ++ PVR_UNREFERENCED_PARAMETER(uiReadOffset); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++ ++ return PVRSRV_OK; ++} ++#endif /* PDUMP */ ++#endif /* DEVICEMEM_PDUMP_H */ +diff --git a/drivers/gpu/drm/img-rogue/devicemem_server.c b/drivers/gpu/drm/img-rogue/devicemem_server.c +new file mode 100644 +index 000000000000..089fa9cd1076 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_server.c +@@ -0,0 +1,1813 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Server-side component of the Device Memory Management. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++/* our exported API */ ++#include "devicemem_server.h" ++#include "devicemem_utils.h" ++#include "devicemem.h" ++ ++#include "device.h" /* For device node */ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++ ++#include "mmu_common.h" ++#include "pdump_km.h" ++#include "pmr.h" ++#include "physmem.h" ++#include "pdumpdesc.h" ++ ++#include "allocmem.h" ++#include "osfunc.h" ++#include "lock.h" ++ ++#include "pvrsrv.h" /* for PVRSRVGetPVRSRVData() */ ++ ++#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0) ++#define DEVMEMHEAP_REFCOUNT_MIN 1 ++#define DEVMEMHEAP_REFCOUNT_MAX IMG_INT32_MAX ++ ++struct _DEVMEMINT_CTX_ ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ /* MMU common code needs to have a context. There's a one-to-one ++ correspondence between device memory context and MMU context, ++ but we have the abstraction here so that we don't need to care ++ what the MMU does with its context, and the MMU code need not ++ know about us at all. */ ++ MMU_CONTEXT *psMMUContext; ++ ++ ATOMIC_T hRefCount; ++ ++ /* This handle is for devices that require notification when a new ++ memory context is created and they need to store private data that ++ is associated with the context. */ ++ IMG_HANDLE hPrivData; ++ ++ /* Protects access to sProcessNotifyListHead */ ++ POSWR_LOCK hListLock; ++ ++ /* The following tracks UM applications that need to be notified of a ++ * page fault */ ++ DLLIST_NODE sProcessNotifyListHead; ++ /* The following is a node for the list of registered devmem contexts */ ++ DLLIST_NODE sPageFaultNotifyListElem; ++ ++ /* Device virtual address of a page fault on this context */ ++ IMG_DEV_VIRTADDR sFaultAddress; ++ ++ /* General purpose flags */ ++ IMG_UINT32 ui32Flags; ++}; ++ ++struct _DEVMEMINT_CTX_EXPORT_ ++{ ++ DEVMEMINT_CTX *psDevmemCtx; ++ PMR *psPMR; ++ ATOMIC_T hRefCount; ++ DLLIST_NODE sNode; ++}; ++ ++struct _DEVMEMINT_HEAP_ ++{ ++ struct _DEVMEMINT_CTX_ *psDevmemCtx; ++ IMG_UINT32 uiLog2PageSize; ++ ATOMIC_T uiRefCount; ++}; ++ ++struct _DEVMEMINT_RESERVATION_ ++{ ++ struct _DEVMEMINT_HEAP_ *psDevmemHeap; ++ IMG_DEV_VIRTADDR sBase; ++ IMG_DEVMEM_SIZE_T uiLength; ++}; ++ ++struct _DEVMEMINT_MAPPING_ ++{ ++ struct _DEVMEMINT_RESERVATION_ *psReservation; ++ PMR *psPMR; ++ IMG_UINT32 uiNumPages; ++}; ++ ++struct _DEVMEMINT_PF_NOTIFY_ ++{ ++ IMG_UINT32 ui32PID; ++ DLLIST_NODE sProcessNotifyListElem; ++}; ++ ++/*************************************************************************/ /*! ++@Function DevmemIntCtxAcquire ++@Description Acquire a reference to the provided device memory context. ++@Return None ++*/ /**************************************************************************/ ++static INLINE void DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx) ++{ ++ OSAtomicIncrement(&psDevmemCtx->hRefCount); ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntCtxRelease ++@Description Release the reference to the provided device memory context. ++ If this is the last reference which was taken then the ++ memory context will be freed. ++@Return None ++*/ /**************************************************************************/ ++static INLINE void DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx) ++{ ++ if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0) ++ { ++ /* The last reference has gone, destroy the context */ ++ PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode; ++ DLLIST_NODE *psNode, *psNodeNext; ++ ++ /* If there are any PIDs registered for page fault notification. ++ * Loop through the registered PIDs and free each one */ ++ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) ++ { ++ DEVMEMINT_PF_NOTIFY *psNotifyNode = ++ IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); ++ dllist_remove_node(psNode); ++ OSFreeMem(psNotifyNode); ++ } ++ ++ /* If this context is in the list registered for a debugger, remove ++ * from that list */ ++ if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem)) ++ { ++ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); ++ } ++ ++ if (psDevNode->pfnUnregisterMemoryContext) ++ { ++ psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData); ++ } ++ MMU_ContextDestroy(psDevmemCtx->psMMUContext); ++ ++ OSWRLockDestroy(psDevmemCtx->hListLock); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", ++ __func__, psDevmemCtx)); ++ OSFreeMem(psDevmemCtx); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntHeapAcquire ++@Description Acquire a reference to the provided device memory heap. ++@Return IMG_TRUE if referenced and IMG_FALSE in case of error ++*/ /**************************************************************************/ ++static INLINE IMG_BOOL DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap) ++{ ++ IMG_BOOL bSuccess = OSAtomicAddUnless(&psDevmemHeap->uiRefCount, 1, ++ DEVMEMHEAP_REFCOUNT_MAX); ++ ++ if (!bSuccess) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " ++ "heap, reference count has overflowed.", __func__)); ++ return IMG_FALSE; ++ } ++ ++ return IMG_TRUE; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntHeapRelease ++@Description Release the reference to the provided device memory heap. ++ If this is the last reference which was taken then the ++ memory context will be freed. ++@Return None ++*/ /**************************************************************************/ ++static INLINE void DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap) ++{ ++ IMG_BOOL bSuccess = OSAtomicSubtractUnless(&psDevmemHeap->uiRefCount, 1, ++ DEVMEMHEAP_REFCOUNT_MIN); ++ ++ if (!bSuccess) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s(): Failed to acquire the device memory " ++ "heap, reference count has underflowed.", __func__)); ++ } ++} ++ ++PVRSRV_ERROR ++DevmemIntUnpin(PMR *psPMR) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Unpin */ ++ eError = PMRUnpinPMR(psPMR, IMG_FALSE); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PMRUnpinPMR(psPMR, IMG_TRUE); ++ PVR_GOTO_IF_ERROR(eError, e_exit); ++ ++ /* Invalidate mapping */ ++ eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, ++ psDevmemMapping->psReservation->sBase, ++ psDevmemMapping->uiNumPages, ++ psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize, ++ IMG_FALSE, /* !< Choose to invalidate PT entries */ ++ psPMR); ++ ++e_exit: ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntPin(PMR *psPMR) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Start the pinning */ ++ eError = PMRPinPMR(psPMR); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eErrorMMU = PVRSRV_OK; ++ IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize; ++ ++ /* Start the pinning */ ++ eError = PMRPinPMR(psPMR); ++ ++ if (eError == PVRSRV_OK) ++ { ++ /* Make mapping valid again */ ++ eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, ++ psDevmemMapping->psReservation->sBase, ++ psDevmemMapping->uiNumPages, ++ uiLog2PageSize, ++ IMG_TRUE, /* !< Choose to make PT entries valid again */ ++ psPMR); ++ } ++ else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY) ++ { ++ /* If we lost the physical backing we have to map it again because ++ * the old physical addresses are not valid anymore. */ ++ PMR_FLAGS_T uiFlags; ++ uiFlags = PMR_Flags(psPMR); ++ ++ eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiFlags, ++ psDevmemMapping->psReservation->sBase, ++ psPMR, ++ 0, ++ psDevmemMapping->uiNumPages, ++ NULL, ++ uiLog2PageSize); ++ } ++ ++ /* Just overwrite eError if the mappings failed. ++ * PMR_NEW_MEMORY has to be propagated to the user. */ ++ if (eErrorMMU != PVRSRV_OK) ++ { ++ eError = eErrorMMU; ++ } ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemServerGetImportHandle ++@Description For given exportable memory descriptor returns PMR handle. ++@Return Memory is exportable - Success ++ PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *phImport) ++{ ++ PVRSRV_ERROR eError; ++ ++ if ((GetImportProperties(psMemDesc->psImport) & DEVMEM_PROPERTIES_EXPORTABLE) == 0) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION, e0); ++ } ++ ++ /* A new handle means a new import tracking the PMR. ++ * Hence the source PMR memory layout should be marked fixed ++ * to make sure the importer view of the memory is the same as ++ * the exporter throughout its lifetime */ ++ PMR_SetLayoutFixed((PMR *)psMemDesc->psImport->hPMR, IMG_TRUE); ++ ++ *phImport = psMemDesc->psImport->hPMR; ++ return PVRSRV_OK; ++ ++e0: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemServerGetHeapHandle ++@Description For given reservation returns the Heap handle. ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, ++ IMG_HANDLE *phHeap) ++{ ++ if (psReservation == NULL || phHeap == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *phHeap = psReservation->psDevmemHeap; ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemServerGetContext ++@Description For given heap returns the context. ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, ++ DEVMEMINT_CTX **ppsDevmemCtxPtr) ++{ ++ if (psDevmemHeap == NULL || ppsDevmemCtxPtr == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *ppsDevmemCtxPtr = psDevmemHeap->psDevmemCtx; ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemServerGetPrivData ++@Description For given context returns the private data handle. ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_HANDLE *phPrivData) ++{ ++ if (psDevmemCtx == NULL || phPrivData == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *phPrivData = psDevmemCtx->hPrivData; ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntCtxCreate ++@Description Creates and initialises a device memory context. ++@Return valid Device Memory context handle - Success ++ PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntCtxCreate(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bKernelMemoryCtx, ++ DEVMEMINT_CTX **ppsDevmemCtxPtr, ++ IMG_HANDLE *hPrivData, ++ IMG_UINT32 *pui32CPUCacheLineSize) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtx; ++ IMG_HANDLE hPrivDataInt = NULL; ++ MMU_DEVICEATTRIBS *psMMUDevAttrs = psDeviceNode->pfnGetMMUDeviceAttributes(psDeviceNode, ++ bKernelMemoryCtx); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); ++ ++ /* ++ * Ensure that we are safe to perform unaligned accesses on memory ++ * we mark write-combine, as the compiler might generate ++ * instructions operating on this memory which require this ++ * assumption to be true. ++ */ ++ PVR_ASSERT(OSIsWriteCombineUnalignedSafe()); ++ ++ /* allocate a Devmem context */ ++ psDevmemCtx = OSAllocMem(sizeof(*psDevmemCtx)); ++ PVR_LOG_GOTO_IF_NOMEM(psDevmemCtx, eError, fail_alloc); ++ ++ OSAtomicWrite(&psDevmemCtx->hRefCount, 1); ++ psDevmemCtx->psDevNode = psDeviceNode; ++ ++ /* Call down to MMU context creation */ ++ ++ eError = MMU_ContextCreate(psConnection, ++ psDeviceNode, ++ &psDevmemCtx->psMMUContext, ++ psMMUDevAttrs); ++ PVR_LOG_GOTO_IF_ERROR(eError, "MMU_ContextCreate", fail_mmucontext); ++ ++ if (psDeviceNode->pfnRegisterMemoryContext) ++ { ++ eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt); ++ PVR_LOG_GOTO_IF_ERROR(eError, "pfnRegisterMemoryContext", fail_register); ++ } ++ ++ /* Store the private data as it is required to unregister the memory context */ ++ psDevmemCtx->hPrivData = hPrivDataInt; ++ *hPrivData = hPrivDataInt; ++ *ppsDevmemCtxPtr = psDevmemCtx; ++ ++ /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/ ++ *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); ++ ++ /* Initialise the PID notify list */ ++ OSWRLockCreate(&psDevmemCtx->hListLock); ++ dllist_init(&(psDevmemCtx->sProcessNotifyListHead)); ++ psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL; ++ psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL; ++ ++ /* Initialise page fault address */ ++ psDevmemCtx->sFaultAddress.uiAddr = 0ULL; ++ ++ /* Initialise flags */ ++ psDevmemCtx->ui32Flags = 0; ++ ++ return PVRSRV_OK; ++ ++fail_register: ++ MMU_ContextDestroy(psDevmemCtx->psMMUContext); ++fail_mmucontext: ++ OSFreeMem(psDevmemCtx); ++fail_alloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntHeapCreate ++@Description Creates and initialises a device memory heap. ++@Return valid Device Memory heap handle - Success ++ PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_DEV_VIRTADDR sHeapBaseAddr, ++ IMG_DEVMEM_SIZE_T uiHeapLength, ++ IMG_UINT32 uiLog2DataPageSize, ++ DEVMEMINT_HEAP **ppsDevmemHeapPtr) ++{ ++ DEVMEMINT_HEAP *psDevmemHeap; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s", __func__)); ++ ++ /* allocate a Devmem context */ ++ psDevmemHeap = OSAllocMem(sizeof(*psDevmemHeap)); ++ PVR_LOG_RETURN_IF_NOMEM(psDevmemHeap, "psDevmemHeap"); ++ ++ psDevmemHeap->psDevmemCtx = psDevmemCtx; ++ ++ DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx); ++ ++ OSAtomicWrite(&psDevmemHeap->uiRefCount, 1); ++ ++ psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize; ++ ++ *ppsDevmemHeapPtr = psDevmemHeap; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_DEF_PAGE *psDefPage, ++ IMG_INT uiInitValue, ++ IMG_CHAR *pcDefPageName, ++ IMG_BOOL bInitPage) ++{ ++ IMG_UINT32 ui32RefCnt; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ OSLockAcquire(psDefPage->psPgLock); ++ ++ /* We know there will not be 4G number of sparse PMR's */ ++ ui32RefCnt = OSAtomicIncrement(&psDefPage->atRefCounter); ++ ++ if (1 == ui32RefCnt) ++ { ++ IMG_DEV_PHYADDR sDevPhysAddr = {0}; ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, "Alloc %s page object", pcDefPageName); ++#endif ++ ++ /* Allocate the dummy page required for sparse backing */ ++ eError = DevPhysMemAlloc(psDevNode, ++ (1 << psDefPage->ui32Log2PgSize), ++ 0, ++ uiInitValue, ++ bInitPage, ++#if defined(PDUMP) ++ psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName, ++ pcDefPageName, ++ &psDefPage->hPdumpPg, ++#endif ++ &psDefPage->sPageHandle, ++ &sDevPhysAddr); ++ if (PVRSRV_OK != eError) ++ { ++ OSAtomicDecrement(&psDefPage->atRefCounter); ++ } ++ else ++ { ++ psDefPage->ui64PgPhysAddr = sDevPhysAddr.uiAddr; ++ } ++ } ++ ++ OSLockRelease(psDefPage->psPgLock); ++ ++ return eError; ++} ++ ++void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_DEF_PAGE *psDefPage, ++ IMG_CHAR *pcDefPageName) ++{ ++ IMG_UINT32 ui32RefCnt; ++ ++ ui32RefCnt = OSAtomicRead(&psDefPage->atRefCounter); ++ ++ /* For the cases where the dummy page allocation fails due to lack of memory ++ * The refcount can still be 0 even for a sparse allocation */ ++ if (0 != ui32RefCnt) ++ { ++ OSLockAcquire(psDefPage->psPgLock); ++ ++ /* We know there will not be 4G number of sparse PMR's */ ++ ui32RefCnt = OSAtomicDecrement(&psDefPage->atRefCounter); ++ ++ if (0 == ui32RefCnt) ++ { ++ PDUMPCOMMENT(psDevNode, "Free %s page object", pcDefPageName); ++ ++ /* Free the dummy page when refcount reaches zero */ ++ DevPhysMemFree(psDevNode, ++#if defined(PDUMP) ++ psDefPage->hPdumpPg, ++#endif ++ &psDefPage->sPageHandle); ++ ++#if defined(PDUMP) ++ psDefPage->hPdumpPg = NULL; ++#endif ++ psDefPage->ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; ++ } ++ ++ OSLockRelease(psDefPage->psPgLock); ++ } ++ ++} ++ ++PVRSRV_ERROR ++DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, ++ PMR *psPMR, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 ui32PhysicalPgOffset, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PageCount"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PhysicalPgOffset < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PhysicalPgOffset"); ++ ++ if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). " ++ "PMR contiguity must be a multiple of the heap contiguity!", ++ __func__, ++ psReservation->psDevmemHeap->uiLog2PageSize, ++ PMR_GetLog2Contiguity(psPMR))); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ ++ eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiFlags, ++ sDevVAddrBase, ++ psPMR, ++ ui32PhysicalPgOffset, ++ ui32PageCount, ++ NULL, ++ psReservation->psDevmemHeap->uiLog2PageSize); ++ ++e0: ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT32 ui32PageCount) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM((ui32PageCount < PMR_MAX_SUPPORTED_PAGE_COUNT), "ui32PageCount"); ++ ++ /* Unmap the pages and mark them invalid in the MMU PTE */ ++ MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, ++ 0, ++ sDevVAddrBase, ++ ui32PageCount, ++ NULL, ++ psReservation->psDevmemHeap->uiLog2PageSize, ++ 0); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, ++ DEVMEMINT_RESERVATION *psReservation, ++ PMR *psPMR, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ DEVMEMINT_MAPPING **ppsMappingPtr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_MAPPING *psMapping; ++ /* number of pages (device pages) that allocation spans */ ++ IMG_UINT32 ui32NumDevPages; ++ /* device virtual address of start of allocation */ ++ IMG_DEV_VIRTADDR sAllocationDevVAddr; ++ /* and its length */ ++ IMG_DEVMEM_SIZE_T uiAllocationSize; ++ IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; ++ IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ PMR_FLAGS_T uiPMRFlags; ++ PVRSRV_DEF_PAGE *psDefPage; ++ IMG_CHAR *pszPageName; ++ ++ if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Device heap and PMR have incompatible contiguity (%u - %u). " ++ "Heap contiguity must be a multiple of the heap contiguity!", ++ __func__, ++ uiLog2HeapContiguity, ++ PMR_GetLog2Contiguity(psPMR) )); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, ErrorReturnError); ++ } ++ psDevNode = psDevmemHeap->psDevmemCtx->psDevNode; ++ ++ /* Don't bother with refcount on reservation, as a reservation ++ only ever holds one mapping, so we directly increment the ++ refcount on the heap instead */ ++ if (!DevmemIntHeapAcquire(psDevmemHeap)) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ErrorReturnError); ++ } ++ ++ /* allocate memory to record the mapping info */ ++ psMapping = OSAllocMem(sizeof(*psMapping)); ++ PVR_LOG_GOTO_IF_NOMEM(psMapping, eError, ErrorUnreference); ++ ++ uiAllocationSize = psReservation->uiLength; ++ ++ ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1); ++ PVR_ASSERT((IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize); ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_GOTO_IF_ERROR(eError, ErrorFreeMapping); ++ ++ sAllocationDevVAddr = psReservation->sBase; ++ ++ /*Check if the PMR that needs to be mapped is sparse */ ++ bIsSparse = PMR_IsSparse(psPMR); ++ if (bIsSparse) ++ { ++ /*Get the flags*/ ++ uiPMRFlags = PMR_Flags(psPMR); ++ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); ++ ++ if (bNeedBacking) ++ { ++ IMG_INT uiInitValue; ++ ++ if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) ++ { ++ psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage; ++ uiInitValue = PVR_ZERO_PAGE_INIT_VALUE; ++ pszPageName = DEV_ZERO_PAGE; ++ } ++ else ++ { ++ psDefPage = &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage; ++ uiInitValue = PVR_DUMMY_PAGE_INIT_VALUE; ++ pszPageName = DUMMY_PAGE; ++ } ++ ++ /* Error is logged with in the function if any failures. ++ * As the allocation fails we need to fail the map request and ++ * return appropriate error ++ * ++ * Allocation of dummy/zero page is done after locking the pages for PMR physically ++ * By implementing this way, the best case path of dummy/zero page being most likely to be ++ * allocated after physically locking down pages, is considered. ++ * If the dummy/zero page allocation fails, we do unlock the physical address and the impact ++ * is a bit more in on demand mode of operation */ ++ eError = DevmemIntAllocDefBackingPage(psDevNode, ++ psDefPage, ++ uiInitValue, ++ pszPageName, ++ IMG_TRUE); ++ PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr); ++ } ++ ++ /* N.B. We pass mapping permission flags to MMU_MapPages and let ++ * it reject the mapping if the permissions on the PMR are not compatible. */ ++ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiMapFlags, ++ sAllocationDevVAddr, ++ psPMR, ++ 0, ++ ui32NumDevPages, ++ NULL, ++ uiLog2HeapContiguity); ++ PVR_GOTO_IF_ERROR(eError, ErrorFreeDefBackingPage); ++ } ++ else ++ { ++ eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, ++ sAllocationDevVAddr, ++ psPMR, ++ (IMG_DEVMEM_SIZE_T) ui32NumDevPages << uiLog2HeapContiguity, ++ uiMapFlags, ++ uiLog2HeapContiguity); ++ PVR_GOTO_IF_ERROR(eError, ErrorUnlockPhysAddr); ++ } ++ ++ psMapping->psReservation = psReservation; ++ psMapping->uiNumPages = ui32NumDevPages; ++ psMapping->psPMR = psPMR; ++ ++ *ppsMappingPtr = psMapping; ++ ++ return PVRSRV_OK; ++ ++ErrorFreeDefBackingPage: ++ if (bNeedBacking) ++ { ++ /*if the mapping failed, the allocated dummy ref count need ++ * to be handled accordingly */ ++ DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, ++ psDefPage, ++ pszPageName); ++ } ++ ++ErrorUnlockPhysAddr: ++ { ++ PVRSRV_ERROR eError1 = PVRSRV_OK; ++ eError1 = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_LOG_IF_ERROR(eError1, "PMRUnlockSysPhysAddresses"); ++ ++ *ppsMappingPtr = NULL; ++ } ++ ++ErrorFreeMapping: ++ OSFreeMem(psMapping); ++ ++ErrorUnreference: ++ /* if fails there's not much to do (the function will print an error) */ ++ DevmemIntHeapRelease(psDevmemHeap); ++ ++ErrorReturnError: ++ PVR_ASSERT (eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR ++DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap; ++ /* device virtual address of start of allocation */ ++ IMG_DEV_VIRTADDR sAllocationDevVAddr; ++ /* number of pages (device pages) that allocation spans */ ++ IMG_UINT32 ui32NumDevPages; ++ IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE; ++ PMR_FLAGS_T uiPMRFlags; ++ ++ ui32NumDevPages = psMapping->uiNumPages; ++ sAllocationDevVAddr = psMapping->psReservation->sBase; ++ ++ /*Check if the PMR that needs to be mapped is sparse */ ++ bIsSparse = PMR_IsSparse(psMapping->psPMR); ++ ++ if (bIsSparse) ++ { ++ /*Get the flags*/ ++ uiPMRFlags = PMR_Flags(psMapping->psPMR); ++ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags); ++ ++ if (bNeedBacking) ++ { ++ if (PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiPMRFlags)) ++ { ++ DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, ++ &psDevmemHeap->psDevmemCtx->psDevNode->sDevZeroPage, ++ DEV_ZERO_PAGE); ++ } ++ else ++ { ++ DevmemIntFreeDefBackingPage(psDevmemHeap->psDevmemCtx->psDevNode, ++ &psDevmemHeap->psDevmemCtx->psDevNode->sDummyPage, ++ DUMMY_PAGE); ++ } ++ } ++ ++ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, ++ 0, ++ sAllocationDevVAddr, ++ ui32NumDevPages, ++ NULL, ++ psMapping->psReservation->psDevmemHeap->uiLog2PageSize, ++ 0); ++ } ++ else ++ { ++ MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext, ++ sAllocationDevVAddr, ++ ui32NumDevPages, ++ psMapping->psReservation->psDevmemHeap->uiLog2PageSize); ++ } ++ ++ eError = PMRUnlockSysPhysAddresses(psMapping->psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Don't bother with refcount on reservation, as a reservation only ever ++ * holds one mapping, so we directly decrement the refcount on the heap ++ * instead. ++ * Function will print an error if the heap could not be unreferenced. */ ++ DevmemIntHeapRelease(psDevmemHeap); ++ ++ OSFreeMem(psMapping); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, ++ IMG_DEV_VIRTADDR sAllocationDevVAddr, ++ IMG_DEVMEM_SIZE_T uiAllocationSize, ++ DEVMEMINT_RESERVATION **ppsReservationPtr) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEMINT_RESERVATION *psReservation; ++ ++ if (!DevmemIntHeapAcquire(psDevmemHeap)) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_REFCOUNT_OVERFLOW, ++ ErrorReturnError); ++ } ++ ++ /* allocate memory to record the reservation info */ ++ psReservation = OSAllocMem(sizeof(*psReservation)); ++ PVR_LOG_GOTO_IF_NOMEM(psReservation, eError, ErrorUnreference); ++ ++ psReservation->sBase = sAllocationDevVAddr; ++ psReservation->uiLength = uiAllocationSize; ++ ++ eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiAllocationSize, ++ &uiAllocationSize, ++ 0, /* IMG_UINT32 uiProtFlags */ ++ 0, /* alignment is n/a since we supply devvaddr */ ++ &sAllocationDevVAddr, ++ psDevmemHeap->uiLog2PageSize); ++ PVR_GOTO_IF_ERROR(eError, ErrorFreeReservation); ++ ++ /* since we supplied the virt addr, MMU_Alloc shouldn't have ++ chosen a new one for us */ ++ PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr); ++ ++ psReservation->psDevmemHeap = psDevmemHeap; ++ *ppsReservationPtr = psReservation; ++ ++ return PVRSRV_OK; ++ ++ /* ++ * error exit paths follow ++ */ ++ ++ErrorFreeReservation: ++ OSFreeMem(psReservation); ++ ++ErrorUnreference: ++ /* if fails there's not much to do (the function will print an error) */ ++ DevmemIntHeapRelease(psDevmemHeap); ++ ++ErrorReturnError: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation) ++{ ++ IMG_DEV_VIRTADDR sBase = psReservation->sBase; ++ IMG_UINT32 uiLength = psReservation->uiLength; ++ IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize; ++ ++ MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext, ++ sBase, ++ uiLength, ++ uiLog2DataPageSize); ++ ++ /* Don't bother with refcount on reservation, as a reservation only ever ++ * holds one mapping, so we directly decrement the refcount on the heap ++ * instead. ++ * Function will print an error if the heap could not be unreferenced. */ ++ DevmemIntHeapRelease(psReservation->psDevmemHeap); ++ ++ OSFreeMem(psReservation); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap) ++{ ++ if (OSAtomicRead(&psDevmemHeap->uiRefCount) != DEVMEMHEAP_REFCOUNT_MIN) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) " ++ "which probably means reservations & mappings have been made from " ++ "the heap and not freed", __func__, ++ OSAtomicRead(&psDevmemHeap->uiRefCount))); ++ ++ /* ++ * Try again later when you've freed all the memory ++ * ++ * Note: ++ * We don't expect the application to retry (after all this call would ++ * succeed if the client had freed all the memory which it should have ++ * done before calling this function). However, given there should be ++ * an associated handle, when the handle base is destroyed it will free ++ * any allocations leaked by the client and then it will retry this call, ++ * which should then succeed. ++ */ ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ PVR_ASSERT(OSAtomicRead(&psDevmemHeap->uiRefCount) == DEVMEMHEAP_REFCOUNT_MIN); ++ ++ DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __func__, psDevmemHeap)); ++ OSFreeMem(psDevmemHeap); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, ++ PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT64 sCpuVAddrBase) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR); ++ IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize; ++ IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity; ++ IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff; ++ ++ IMG_UINT32 *pai32MapIndices = pai32AllocIndices; ++ IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices; ++ IMG_UINT32 uiMapPageCount = ui32AllocPageCount; ++ IMG_UINT32 uiUnmapPageCount = ui32FreePageCount; ++ ++ /* Special case: ++ * Adjust indices if we map into a heap that uses smaller page sizes ++ * than the physical allocation itself. ++ * The incoming parameters are all based on the page size of the PMR ++ * but the mapping functions expects parameters to be in terms of heap page sizes. */ ++ if (uiOrderDiff != 0) ++ { ++ IMG_UINT32 uiPgIdx, uiPgOffset; ++ ++ uiMapPageCount = (uiMapPageCount << uiOrderDiff); ++ uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff); ++ ++ pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices)); ++ PVR_GOTO_IF_NOMEM(pai32MapIndices, eError, e0); ++ ++ pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices)); ++ if (!pai32UnmapIndices) ++ { ++ OSFreeMem(pai32MapIndices); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); ++ } ++ ++ /* Every chunk index needs to be translated from physical indices ++ * into heap based indices. */ ++ for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++) ++ { ++ for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) ++ { ++ pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = ++ pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; ++ } ++ } ++ ++ for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++) ++ { ++ for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++) ++ { ++ pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] = ++ pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset; ++ } ++ } ++ } ++ ++ /* ++ * The order of steps in which this request is done is given below. The order of ++ * operations is very important in this case: ++ * ++ * 1. The parameters are validated in function PMR_ChangeSparseMem below. ++ * A successful response indicates all the parameters are correct. ++ * In failure case we bail out from here without processing further. ++ * 2. On success, get the PMR specific operations done. this includes page alloc, page free ++ * and the corresponding PMR status changes. ++ * when this call fails, it is ensured that the state of the PMR before is ++ * not disturbed. If it succeeds, then we can go ahead with the subsequent steps. ++ * 3. Invalidate the GPU page table entries for the pages to be freed. ++ * 4. Write the GPU page table entries for the pages that got allocated. ++ * 5. Change the corresponding CPU space map. ++ * ++ * The above steps can be selectively controlled using flags. ++ */ ++ if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH)) ++ { ++ /* Do the PMR specific changes first */ ++ eError = PMR_ChangeSparseMem(psPMR, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices, ++ uiSparseFlags); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Failed to do PMR specific changes.", ++ __func__)); ++ goto e1; ++ } ++ ++ /* Invalidate the page table entries for the free pages. ++ * Optimisation later would be not to touch the ones that gets re-mapped */ ++ if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE)) ++ { ++ PMR_FLAGS_T uiPMRFlags; ++ ++ /*Get the flags*/ ++ uiPMRFlags = PMR_Flags(psPMR); ++ ++ if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM)) ++ { ++ /* Unmap the pages and mark them invalid in the MMU PTE */ ++ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiFlags, ++ sDevVAddrBase, ++ uiUnmapPageCount, ++ pai32UnmapIndices, ++ uiLog2HeapContiguity, ++ uiPMRFlags); ++ } ++ } ++ ++ /* Wire the pages tables that got allocated */ ++ if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC)) ++ { ++ /* Map the pages and mark them Valid in the MMU PTE */ ++ eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiFlags, ++ sDevVAddrBase, ++ psPMR, ++ 0, ++ uiMapPageCount, ++ pai32MapIndices, ++ uiLog2HeapContiguity); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Failed to map alloc indices.", ++ __func__)); ++ goto e1; ++ } ++ } ++ ++ /* Currently only used for debug */ ++ if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM)) ++ { ++ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext, ++ uiFlags, ++ sDevVAddrBase, ++ psPMR, ++ 0, ++ uiMapPageCount, ++ pai32UnmapIndices, ++ uiLog2HeapContiguity); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Failed to map Free indices.", ++ __func__)); ++ goto e1; ++ } ++ } ++ } ++ ++#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE ++ /* Do the changes in sparse on to the CPU virtual map accordingly */ ++ if (uiSparseFlags & SPARSE_MAP_CPU_ADDR) ++ { ++ if (sCpuVAddrBase != 0) ++ { ++ eError = PMR_ChangeSparseMemCPUMap(psPMR, ++ sCpuVAddrBase, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Failed to map to CPU addr space.", ++ __func__)); ++ goto e0; ++ } ++ } ++ } ++#endif ++ ++e1: ++ if (pai32MapIndices != pai32AllocIndices) ++ { ++ OSFreeMem(pai32MapIndices); ++ } ++ if (pai32UnmapIndices != pai32FreeIndices) ++ { ++ OSFreeMem(pai32UnmapIndices); ++ } ++e0: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntCtxDestroy ++@Description Destroy that created by DevmemIntCtxCreate ++@Input psDevmemCtx Device Memory context ++@Return cannot fail. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx) ++{ ++ /* ++ We can't determine if we should be freeing the context here ++ as a refcount!=1 could be due to either the fact that heap(s) ++ remain with allocations on them, or that this memory context ++ has been exported. ++ As the client couldn't do anything useful with this information ++ anyway and the fact that the refcount will ensure we only ++ free the context when _all_ references have been released ++ don't bother checking and just return OK regardless. ++ */ ++ DevmemIntCtxRelease(psDevmemCtx); ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_DEV_VIRTADDR sDevAddr) ++{ ++ IMG_UINT32 i, j, uiLog2HeapPageSize = 0; ++ DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo; ++ DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray; ++ ++ IMG_BOOL bFound = IMG_FALSE; ++ ++ for (i = 0; ++ i < psDinfo->uiNumHeapConfigs && !bFound; ++ i++) ++ { ++ for (j = 0; ++ j < psConfig[i].uiNumHeaps && !bFound; ++ j++) ++ { ++ IMG_DEV_VIRTADDR uiBase = ++ psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr; ++ IMG_DEVMEM_SIZE_T uiSize = ++ psConfig[i].psHeapBlueprintArray[j].uiHeapLength; ++ ++ if ((sDevAddr.uiAddr >= uiBase.uiAddr) && ++ (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize))) ++ { ++ uiLog2HeapPageSize = ++ psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize; ++ bFound = IMG_TRUE; ++ } ++ } ++ } ++ ++ if (uiLog2HeapPageSize == 0) ++ { ++ return PVRSRV_ERROR_INVALID_GPU_ADDR; ++ } ++ ++ return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext, ++ uiLog2HeapPageSize, ++ sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR; ++} ++ ++PVRSRV_ERROR ++DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevMemContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; ++ MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; ++ ++ if (psDevNode->pfnDevSLCFlushRange) ++ { ++ return psDevNode->pfnDevSLCFlushRange(psDevNode, ++ psMMUContext, ++ sDevVAddr, ++ uiSize, ++ bInvalidate); ++ } ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++PVRSRV_ERROR ++DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT64 ui64FBSCEntryMask) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = psDevMemContext->psDevNode; ++ MMU_CONTEXT *psMMUContext = psDevMemContext->psMMUContext; ++ ++ if (psDevNode->pfnInvalFBSCTable) ++ { ++ return psDevNode->pfnInvalFBSCTable(psDevNode, ++ psMMUContext, ++ ui64FBSCEntryMask); ++ } ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_DEV_VIRTADDR *psFaultAddress) ++{ ++ if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) ++ { ++ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ } ++ ++ *psFaultAddress = psDevMemContext->sFaultAddress; ++ psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; ++ ++ return PVRSRV_OK; ++} ++ ++static POSWR_LOCK g_hExportCtxListLock; ++static DLLIST_NODE g_sExportCtxList; ++ ++PVRSRV_ERROR ++DevmemIntInit(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ dllist_init(&g_sExportCtxList); ++ ++ eError = OSWRLockCreate(&g_hExportCtxListLock); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntDeInit(void) ++{ ++ PVR_ASSERT(dllist_is_empty(&g_sExportCtxList)); ++ ++ OSWRLockDestroy(g_hExportCtxListLock); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++DevmemIntExportCtx(DEVMEMINT_CTX *psContext, ++ PMR *psPMR, ++ DEVMEMINT_CTX_EXPORT **ppsContextExport) ++{ ++ DEVMEMINT_CTX_EXPORT *psCtxExport; ++ ++ psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT)); ++ PVR_LOG_RETURN_IF_NOMEM(psCtxExport, "psCtxExport"); ++ ++ DevmemIntCtxAcquire(psContext); ++ PMRRefPMR(psPMR); ++ /* Now that the source PMR is exported, the layout ++ * can't change as there could be outstanding importers ++ * This is to make sure both exporter and importers view of ++ * the memory is same */ ++ PMR_SetLayoutFixed(psPMR, IMG_TRUE); ++ psCtxExport->psDevmemCtx = psContext; ++ psCtxExport->psPMR = psPMR; ++ OSWRLockAcquireWrite(g_hExportCtxListLock); ++ dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode); ++ OSWRLockReleaseWrite(g_hExportCtxListLock); ++ ++ *ppsContextExport = psCtxExport; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport) ++{ ++ PMRUnrefPMR(psContextExport->psPMR); ++ DevmemIntCtxRelease(psContextExport->psDevmemCtx); ++ OSWRLockAcquireWrite(g_hExportCtxListLock); ++ dllist_remove_node(&psContextExport->sNode); ++ OSWRLockReleaseWrite(g_hExportCtxListLock); ++ OSFreeMem(psContextExport); ++ ++ /* Unable to find exported context, return error */ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++DevmemIntAcquireRemoteCtx(PMR *psPMR, ++ DEVMEMINT_CTX **ppsContext, ++ IMG_HANDLE *phPrivData) ++{ ++ PDLLIST_NODE psListNode, psListNodeNext; ++ DEVMEMINT_CTX_EXPORT *psCtxExport; ++ ++ OSWRLockAcquireRead(g_hExportCtxListLock); ++ /* Find context from list using PMR as key */ ++ dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext) ++ { ++ psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode); ++ if (psCtxExport->psPMR == psPMR) ++ { ++ OSWRLockReleaseRead(g_hExportCtxListLock); ++ DevmemIntCtxAcquire(psCtxExport->psDevmemCtx); ++ *ppsContext = psCtxExport->psDevmemCtx; ++ *phPrivData = psCtxExport->psDevmemCtx->hPrivData; ++ ++ /* PMR should have been already exported to import it ++ * If a PMR is exported, its immutable and the same is ++ * checked here */ ++ PVR_ASSERT(IMG_TRUE == PMR_IsMemLayoutFixed(psPMR)); ++ ++ return PVRSRV_OK; ++ } ++ } ++ OSWRLockReleaseRead(g_hExportCtxListLock); ++ ++ /* Unable to find exported context, return error */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire remote context. Could not retrieve context with given PMR", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntRegisterPFNotify ++@Description Registers a PID to be notified when a page fault occurs on a ++ specific device memory context. ++@Input psDevmemCtx The context to be notified about. ++@Input ui32PID The PID of the process that would like to be ++ notified. ++@Input bRegister If true, register. If false, de-register. ++@Return PVRSRV_ERROR. ++*/ /**************************************************************************/ ++PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_INT32 ui32PID, ++ IMG_BOOL bRegister) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ DLLIST_NODE *psNode, *psNodeNext; ++ DEVMEMINT_PF_NOTIFY *psNotifyNode; ++ IMG_BOOL bPresent = IMG_FALSE; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevmemCtx, "psDevmemCtx"); ++ ++ /* Acquire write lock for the duration, to avoid resource free ++ * while trying to read (no need to then also acquire the read lock ++ * as we have exclusive access while holding the write lock) ++ */ ++ OSWRLockAcquireWrite(psDevmemCtx->hListLock); ++ ++ psDevNode = psDevmemCtx->psDevNode; ++ ++ if (bRegister) ++ { ++ /* If this is the first PID in the list, the device memory context ++ * needs to be registered for notification */ ++ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) ++ { ++ OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead, ++ &psDevmemCtx->sPageFaultNotifyListElem); ++ OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ } ++ } ++ ++ /* Loop through the registered PIDs and check whether this one is ++ * present */ ++ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) ++ { ++ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); ++ ++ if (psNotifyNode->ui32PID == ui32PID) ++ { ++ bPresent = IMG_TRUE; ++ break; ++ } ++ } ++ ++ if (bRegister) ++ { ++ if (bPresent) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Trying to register a PID that is already registered", ++ __func__)); ++ eError = PVRSRV_ERROR_PID_ALREADY_REGISTERED; ++ goto err_already_registered; ++ } ++ ++ psNotifyNode = OSAllocMem(sizeof(*psNotifyNode)); ++ if (psNotifyNode == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unable to allocate memory for the notify list", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_out_of_mem; ++ } ++ psNotifyNode->ui32PID = ui32PID; ++ /* Write lock is already held */ ++ dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem)); ++ } ++ else ++ { ++ if (!bPresent) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Trying to unregister a PID that is not registered", ++ __func__)); ++ eError = PVRSRV_ERROR_PID_NOT_REGISTERED; ++ goto err_not_registered; ++ } ++ /* Write lock is already held */ ++ dllist_remove_node(psNode); ++ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); ++ OSFreeMem(psNotifyNode); ++ ++ /* If the last process in the list is being unregistered, then also ++ * unregister the device memory context from the notify list. */ ++ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead)) ++ { ++ OSWRLockAcquireWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem); ++ OSWRLockReleaseWrite(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ } ++ } ++ eError = PVRSRV_OK; ++ ++err_already_registered: ++err_out_of_mem: ++err_not_registered: ++ ++ OSWRLockReleaseWrite(psDevmemCtx->hListLock); ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function DevmemIntPFNotify ++@Description Notifies any processes that have registered themselves to be ++ notified when a page fault happens on a specific device memory ++ context. ++@Input *psDevNode The device node. ++@Input ui64FaultedPCAddress The page catalogue address that faulted. ++@Input sFaultAddress The address that triggered the fault. ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT64 ui64FaultedPCAddress, ++ IMG_DEV_VIRTADDR sFaultAddress) ++{ ++ DLLIST_NODE *psNode, *psNodeNext; ++ DEVMEMINT_PF_NOTIFY *psNotifyNode; ++ PVRSRV_ERROR eError; ++ DEVMEMINT_CTX *psDevmemCtx = NULL; ++ IMG_BOOL bFailed = IMG_FALSE; ++ ++ OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead))) ++ { ++ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ return PVRSRV_OK; ++ } ++ ++ dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext) ++ { ++ DEVMEMINT_CTX *psThisContext = ++ IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem); ++ IMG_DEV_PHYADDR sPCDevPAddr; ++ ++ eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "MMU_AcquireBaseAddr"); ++ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ return eError; ++ } ++ ++ if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress) ++ { ++ psDevmemCtx = psThisContext; ++ break; ++ } ++ } ++ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock); ++ ++ if (psDevmemCtx == NULL) ++ { ++ /* Not found, just return */ ++ return PVRSRV_OK; ++ } ++ OSWRLockAcquireRead(psDevmemCtx->hListLock); ++ ++ /* ++ * Store the first occurrence of a page fault address, ++ * until that address is consumed by a client. ++ */ ++ if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0) ++ { ++ psDevmemCtx->sFaultAddress = sFaultAddress; ++ psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE; ++ } ++ ++ /* Loop through each registered PID and send a signal to the process */ ++ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext) ++ { ++ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem); ++ ++ eError = OSDebugSignalPID(psNotifyNode->ui32PID); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unable to signal process for PID: %u", ++ __func__, ++ psNotifyNode->ui32PID)); ++ ++ PVR_ASSERT(!"Unable to signal process"); ++ ++ bFailed = IMG_TRUE; ++ } ++ } ++ OSWRLockReleaseRead(psDevmemCtx->hListLock); ++ ++ if (bFailed) ++ { ++ return PVRSRV_ERROR_SIGNAL_FAILED; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++#if defined(PDUMP) ++IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext) ++{ ++ IMG_UINT32 ui32MMUContextID; ++ MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID, PDUMP_FLAGS_CONTINUOUS); ++ return ui32MMUContextID; ++} ++ ++PVRSRV_ERROR ++DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_DEV_VIRTADDR sDevAddrStart, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 ui32ArraySize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 ui32FileOffset, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 uiPDumpMMUCtx; ++ ++ PVR_UNREFERENCED_PARAMETER(ui32ArraySize); ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored in PVRSRV_DATA. ++ */ ++ if (psDevmemCtx->psDevNode->sDevId.ui32InternalID != ++ (PVRSRVGetPVRSRVData())->ui32PDumpBoundDevice) ++ { ++ return PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; ++ } ++ ++ eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext, ++ &uiPDumpMMUCtx, ++ ui32PDumpFlags); ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* ++ The following SYSMEM refers to the 'MMU Context', hence it ++ should be the MMU context, not the PMR, that says what the PDump ++ MemSpace tag is? ++ From a PDump P.O.V. it doesn't matter which name space we use as long ++ as that MemSpace is used on the 'MMU Context' we're dumping from ++ */ ++ eError = PDumpMMUSAB(psDevmemCtx->psDevNode, ++ psDevmemCtx->psDevNode->sDevId.pszPDumpDevName, ++ uiPDumpMMUCtx, ++ sDevAddrStart, ++ uiSize, ++ pszFilename, ++ ui32FileOffset, ++ ui32PDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext, ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32LogicalWidth, ++ IMG_UINT32 ui32LogicalHeight, ++ IMG_UINT32 ui32PhysicalWidth, ++ IMG_UINT32 ui32PhysicalHeight, ++ PDUMP_PIXEL_FORMAT ePixFmt, ++ IMG_MEMLAYOUT eMemLayout, ++ IMG_FB_COMPRESSION eFBCompression, ++ const IMG_UINT32 *paui32FBCClearColour, ++ PDUMP_FBC_SWIZZLE eFBCSwizzle, ++ IMG_DEV_VIRTADDR sHeader, ++ IMG_UINT32 ui32HeaderSize, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ IMG_UINT32 ui32ContextID; ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(ui32Size); ++ ++ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); ++ PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); ++ ++ eError = PDumpImageDescriptor(psDeviceNode, ++ ui32ContextID, ++ (IMG_CHAR *)pszFileName, ++ sData, ++ ui32DataSize, ++ ui32LogicalWidth, ++ ui32LogicalHeight, ++ ui32PhysicalWidth, ++ ui32PhysicalHeight, ++ ePixFmt, ++ eMemLayout, ++ eFBCompression, ++ paui32FBCClearColour, ++ eFBCSwizzle, ++ sHeader, ++ ui32HeaderSize, ++ ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "PDumpImageDescriptor"); ++ ++ /* Don't care about return value */ ++ (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32HeaderType, ++ IMG_UINT32 ui32ElementType, ++ IMG_UINT32 ui32ElementCount, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ IMG_UINT32 ui32ContextID; ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(ui32Size); ++ ++ if ((ui32HeaderType != IBIN_HEADER_TYPE) && ++ (ui32HeaderType != DATA_HEADER_TYPE)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid header type (%u)", ++ __func__, ++ ui32HeaderType)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID, ui32PDumpFlags); ++ PVR_LOG_RETURN_IF_ERROR(eError, "MMU_AcquirePDumpMMUContext"); ++ ++ eError = PDumpDataDescriptor(psDeviceNode, ++ ui32ContextID, ++ (IMG_CHAR *)pszFileName, ++ sData, ++ ui32DataSize, ++ ui32HeaderType, ++ ui32ElementType, ++ ui32ElementCount, ++ ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "PDumpDataDescriptor"); ++ ++ /* Don't care about return value */ ++ (void) MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext, ui32PDumpFlags); ++ ++ return eError; ++} ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/devicemem_server.h b/drivers/gpu/drm/img-rogue/devicemem_server.h +new file mode 100644 +index 000000000000..30a2b2e5245e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_server.h +@@ -0,0 +1,633 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Server side component for device memory management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEVICEMEM_SERVER_H ++#define DEVICEMEM_SERVER_H ++ ++#include "device.h" /* For device node */ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++ ++#include "connection_server.h" ++#include "pmr.h" ++ ++typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX; ++typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT; ++typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP; ++ ++typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION; ++typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING; ++typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY; ++ ++ ++/*************************************************************************/ /*! ++@Function DevmemIntUnpin ++@Description This is the counterpart to DevmemPin(). It is meant to be ++ called when the allocation is NOT mapped in the device virtual ++ space. ++ ++@Input psPMR The physical memory to unpin. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is ++ registered to be reclaimed. Error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR); ++ ++/*************************************************************************/ /*! ++@Function DevmemIntUnpinInvalidate ++@Description This is the counterpart to DevmemIntPinValidate(). It is meant ++ to be called for allocations that ARE mapped in the device ++ virtual space and we have to invalidate the mapping. ++ ++@Input psPMR The physical memory to unpin. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is ++ registered to be reclaimed. Error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); ++ ++/*************************************************************************/ /*! ++@Function DevmemIntPin ++@Description This is the counterpart to DevmemIntUnpin(). ++ Is meant to be called if there is NO device mapping present. ++ ++@Input psPMR The physical memory to pin. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content ++ was successfully restored. ++ ++ PVRSRV_ERROR_PMR_NEW_MEMORY when the content ++ could not be restored and new physical memory ++ was allocated. ++ ++ A different error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR DevmemIntPin(PMR *psPMR); ++ ++/*************************************************************************/ /*! ++@Function DevmemIntPinValidate ++@Description This is the counterpart to DevmemIntUnpinInvalidate(). ++ Is meant to be called if there is IS a device mapping present ++ that needs to be taken care of. ++ ++@Input psDevmemMapping The mapping structure used for the passed PMR. ++ ++@Input psPMR The physical memory to pin. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content ++ was successfully restored. ++ ++ PVRSRV_ERROR_PMR_NEW_MEMORY when the content ++ could not be restored and new physical memory ++ was allocated. ++ ++ A different error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR); ++/* ++ * DevmemServerGetImportHandle() ++ * ++ * For given exportable memory descriptor returns PMR handle ++ * ++ */ ++PVRSRV_ERROR ++DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc, ++ IMG_HANDLE *phImport); ++ ++/* ++ * DevmemServerGetHeapHandle() ++ * ++ * For given reservation returns the Heap handle ++ * ++ */ ++PVRSRV_ERROR ++DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation, ++ IMG_HANDLE *phHeap); ++ ++/* ++ * DevmemServerGetContext() ++ * ++ * For given heap returns the context. ++ * ++ */ ++PVRSRV_ERROR ++DevmemServerGetContext(DEVMEMINT_HEAP *psDevmemHeap, ++ DEVMEMINT_CTX **ppsDevmemCtxPtr); ++ ++/* ++ * DevmemServerGetPrivData() ++ * ++ * For given context returns the private data handle. ++ * ++ */ ++PVRSRV_ERROR ++DevmemServerGetPrivData(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_HANDLE *phPrivData); ++ ++/* ++ * DevmemIntAllocDefBackingPage ++ * ++ * This function allocates default backing page and initializes it ++ * with a given default value ++ * ++ */ ++PVRSRV_ERROR DevmemIntAllocDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_DEF_PAGE *psDefPage, ++ IMG_INT uiInitValue, ++ IMG_CHAR *pcDefPageName, ++ IMG_BOOL bInitPage); ++/* ++ * DevmemIntFreeDefBackingPage ++ * ++ * Frees a given page ++ */ ++void DevmemIntFreeDefBackingPage(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_DEF_PAGE *psDefPage, ++ IMG_CHAR *pcDefPageName); ++ ++ ++/* ++ * DevmemIntCtxCreate() ++ * ++ * Create a Server-side Device Memory Context. This is usually the counterpart ++ * of the client side memory context, and indeed is usually created at the ++ * same time. ++ * ++ * You must have one of these before creating any heaps. ++ * ++ * All heaps must have been destroyed before calling ++ * DevmemIntCtxDestroy() ++ * ++ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising to ++ * later call DevmemIntCtxDestroy() ++ * ++ * Note that this call will cause the device MMU code to do some work for ++ * creating the device memory context, but it does not guarantee that a page ++ * catalogue will have been created, as this may be deferred until the first ++ * allocation. ++ * ++ * Caller to provide storage for a pointer to the DEVMEM_CTX object that will ++ * be created by this call. ++ */ ++PVRSRV_ERROR ++DevmemIntCtxCreate(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ /* devnode / perproc etc */ ++ IMG_BOOL bKernelMemoryCtx, ++ DEVMEMINT_CTX **ppsDevmemCtxPtr, ++ IMG_HANDLE *hPrivData, ++ IMG_UINT32 *pui32CPUCacheLineSize); ++/* ++ * DevmemIntCtxDestroy() ++ * ++ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport. ++ */ ++PVRSRV_ERROR ++DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx); ++ ++/* ++ * DevmemIntHeapCreate() ++ * ++ * Creates a new heap in this device memory context. This will cause a call ++ * into the MMU code to allocate various data structures for managing this ++ * heap. It will not necessarily cause any page tables to be set up, as this ++ * can be deferred until first allocation. (i.e. we shouldn't care - it's up ++ * to the MMU code) ++ * ++ * Note that the data page size must be specified (as log 2). The data page ++ * size as specified here will be communicated to the mmu module, and thus may ++ * determine the page size configured in page directory entries for subsequent ++ * allocations from this heap. It is essential that the page size here is less ++ * than or equal to the "minimum contiguity guarantee" of any PMR that you ++ * subsequently attempt to map to this heap. ++ * ++ * If you call DevmemIntHeapCreate() (and the call succeeds) you are promising ++ * that you shall subsequently call DevmemIntHeapDestroy() ++ * ++ * Caller to provide storage for a pointer to the DEVMEM_HEAP object that will ++ * be created by this call. ++ */ ++PVRSRV_ERROR ++DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_DEV_VIRTADDR sHeapBaseAddr, ++ IMG_DEVMEM_SIZE_T uiHeapLength, ++ IMG_UINT32 uiLog2DataPageSize, ++ DEVMEMINT_HEAP **ppsDevmemHeapPtr); ++/* ++ * DevmemIntHeapDestroy() ++ * ++ * Destroys a heap previously created with DevmemIntHeapCreate() ++ * ++ * All allocations from his heap must have been freed before this ++ * call. ++ */ ++PVRSRV_ERROR ++DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap); ++ ++/* ++ * DevmemIntMapPMR() ++ * ++ * Maps the given PMR to the virtual range previously allocated with ++ * DevmemIntReserveRange() ++ * ++ * If appropriate, the PMR must have had its physical backing committed, as ++ * this call will call into the MMU code to set up the page tables for this ++ * allocation, which shall in turn request the physical addresses from the ++ * PMR. Alternatively, the PMR implementation can choose to do so off the ++ * the back of the "lock" callback, which it will receive as a result ++ * (indirectly) of this call. ++ * ++ * This function makes no promise w.r.t. the circumstances that it can be ++ * called, and these would be "inherited" from the implementation of the PMR. ++ * For example if the PMR "lock" callback causes pages to be pinned at that ++ * time (which may cause scheduling or disk I/O etc.) then it would not be ++ * legal to "Map" the PMR in a context where scheduling events are disallowed. ++ * ++ * If you call DevmemIntMapPMR() (and the call succeeds) then you are promising ++ * that you shall later call DevmemIntUnmapPMR() ++ */ ++PVRSRV_ERROR ++DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap, ++ DEVMEMINT_RESERVATION *psReservation, ++ PMR *psPMR, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ DEVMEMINT_MAPPING **ppsMappingPtr); ++/* ++ * DevmemIntUnmapPMR() ++ * ++ * Reverses the mapping caused by DevmemIntMapPMR() ++ */ ++PVRSRV_ERROR ++DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping); ++ ++/* DevmemIntMapPages() ++ * ++ * Maps an arbitrary amount of pages from a PMR to a reserved range ++ * ++ * @input psReservation Reservation handle for the range ++ * @input psPMR PMR that is mapped ++ * @input ui32PageCount Number of consecutive pages that are ++ * mapped ++ * @input ui32PhysicalPgOffset Logical offset in the PMR ++ * @input uiFlags Mapping flags ++ * @input sDevVAddrBase Virtual address base to start the ++ * mapping from ++ */ ++PVRSRV_ERROR ++DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation, ++ PMR *psPMR, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 ui32PhysicalPgOffset, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase); ++ ++/* DevmemIntUnmapPages() ++ * ++ * Unmaps an arbitrary amount of pages from a reserved range ++ * ++ * @input psReservation Reservation handle for the range ++ * @input sDevVAddrBase Virtual address base to start from ++ * @input ui32PageCount Number of consecutive pages that are ++ * unmapped ++ */ ++PVRSRV_ERROR ++DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT32 ui32PageCount); ++ ++/* ++ * DevmemIntReserveRange() ++ * ++ * Indicates that the specified range should be reserved from the given heap. ++ * ++ * In turn causes the page tables to be allocated to cover the specified range. ++ * ++ * If you call DevmemIntReserveRange() (and the call succeeds) then you are ++ * promising that you shall later call DevmemIntUnreserveRange() ++ */ ++PVRSRV_ERROR ++DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap, ++ IMG_DEV_VIRTADDR sAllocationDevVAddr, ++ IMG_DEVMEM_SIZE_T uiAllocationSize, ++ DEVMEMINT_RESERVATION **ppsReservationPtr); ++/* ++ * DevmemIntUnreserveRange() ++ * ++ * Undoes the state change caused by DevmemIntReserveRage() ++ */ ++PVRSRV_ERROR ++DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation); ++ ++/*************************************************************************/ /*! ++@Function DevmemIntChangeSparse ++@Description Changes the sparse allocations of a PMR by allocating and freeing ++ pages and changing their corresponding CPU and GPU mappings. ++ ++@input psDevmemHeap Pointer to the heap we map on ++@input psPMR The PMR we want to map ++@input ui32AllocPageCount Number of pages to allocate ++@input pai32AllocIndices The logical PMR indices where pages will ++ be allocated. May be NULL. ++@input ui32FreePageCount Number of pages to free ++@input pai32FreeIndices The logical PMR indices where pages will ++ be freed. May be NULL. ++@input uiSparseFlags Flags passed in to determine which kind ++ of sparse change the user wanted. ++ See devicemem_typedefs.h for details. ++@input uiFlags Memalloc flags for this virtual range. ++@input sDevVAddrBase The base address of the virtual range of ++ this sparse allocation. ++@input sCpuVAddrBase The CPU base address of this allocation. ++ May be 0 if not existing. ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap, ++ PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT64 sCpuVAddrBase); ++ ++/* ++ * DevmemIntFlushDevSLCRange() ++ * ++ * Flush specified device context's virtual address range from SLC. ++ */ ++PVRSRV_ERROR ++DevmemIntFlushDevSLCRange(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate); ++ ++/* ++ * DevmemIntRGXInvalidateFBSCTable() ++ * ++ * Invalidate selected FBSC table indices. ++ * ++ */ ++PVRSRV_ERROR ++DevmemIntInvalidateFBSCTable(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_UINT64 ui64FBSCEntryMask); ++ ++PVRSRV_ERROR ++DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_DEV_VIRTADDR sDevAddr); ++ ++PVRSRV_ERROR ++DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_DEV_VIRTADDR *psFaultAddress); ++ ++/*************************************************************************/ /*! ++@Function DevmemIntRegisterPFNotifyKM ++@Description Registers a PID to be notified when a page fault occurs on a ++ specific device memory context. ++@Input psDevmemCtx The context to be notified about. ++@Input ui32PID The PID of the process that would like to be ++ notified. ++@Input bRegister If true, register. If false, de-register. ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_INT32 ui32PID, ++ IMG_BOOL bRegister); ++ ++/*************************************************************************/ /*! ++@Function DevmemIntPFNotify ++@Description Notifies any processes that have registered themselves to be ++ notified when a page fault happens on a specific device memory ++ context. ++@Input *psDevNode The device node. ++@Input ui64FaultedPCAddress The page catalogue address that faulted. ++@Input sFaultAddress The address that triggered the fault. ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT64 ui64FaultedPCAddress, ++ IMG_DEV_VIRTADDR sFaultAddress); ++ ++#if defined(PDUMP) ++/* ++ * DevmemIntPDumpSaveToFileVirtual() ++ * ++ * Writes out PDump "SAB" commands with the data found in memory at ++ * the given virtual address. ++ */ ++PVRSRV_ERROR ++DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_DEV_VIRTADDR sDevAddrStart, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiArraySize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 ui32FileOffset, ++ IMG_UINT32 ui32PDumpFlags); ++ ++IMG_UINT32 ++DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext); ++ ++PVRSRV_ERROR ++DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32LogicalWidth, ++ IMG_UINT32 ui32LogicalHeight, ++ IMG_UINT32 ui32PhysicalWidth, ++ IMG_UINT32 ui32PhysicalHeight, ++ PDUMP_PIXEL_FORMAT ePixFmt, ++ IMG_MEMLAYOUT eMemLayout, ++ IMG_FB_COMPRESSION eFBCompression, ++ const IMG_UINT32 *paui32FBCClearColour, ++ PDUMP_FBC_SWIZZLE eFBCSwizzle, ++ IMG_DEV_VIRTADDR sHeader, ++ IMG_UINT32 ui32HeaderSize, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR ++DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32HeaderType, ++ IMG_UINT32 ui32ElementType, ++ IMG_UINT32 ui32ElementCount, ++ IMG_UINT32 ui32PDumpFlags); ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemIntPDumpSaveToFileVirtual) ++#endif ++static INLINE PVRSRV_ERROR ++DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx, ++ IMG_DEV_VIRTADDR sDevAddrStart, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiArraySize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 ui32FileOffset, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevmemCtx); ++ PVR_UNREFERENCED_PARAMETER(sDevAddrStart); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(uiArraySize); ++ PVR_UNREFERENCED_PARAMETER(pszFilename); ++ PVR_UNREFERENCED_PARAMETER(ui32FileOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemIntPDumpImageDescriptor) ++#endif ++static INLINE PVRSRV_ERROR ++DevmemIntPDumpImageDescriptor(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32LogicalWidth, ++ IMG_UINT32 ui32LogicalHeight, ++ IMG_UINT32 ui32PhysicalWidth, ++ IMG_UINT32 ui32PhysicalHeight, ++ PDUMP_PIXEL_FORMAT ePixFmt, ++ IMG_MEMLAYOUT eMemLayout, ++ IMG_FB_COMPRESSION eFBCompression, ++ const IMG_UINT32 *paui32FBCClearColour, ++ PDUMP_FBC_SWIZZLE eFBCSwizzle, ++ IMG_DEV_VIRTADDR sHeader, ++ IMG_UINT32 ui32HeaderSize, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psDevMemContext); ++ PVR_UNREFERENCED_PARAMETER(ui32Size); ++ PVR_UNREFERENCED_PARAMETER(pszFileName); ++ PVR_UNREFERENCED_PARAMETER(sData); ++ PVR_UNREFERENCED_PARAMETER(ui32DataSize); ++ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); ++ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); ++ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); ++ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); ++ PVR_UNREFERENCED_PARAMETER(ePixFmt); ++ PVR_UNREFERENCED_PARAMETER(eMemLayout); ++ PVR_UNREFERENCED_PARAMETER(eFBCompression); ++ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); ++ PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); ++ PVR_UNREFERENCED_PARAMETER(sHeader); ++ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(DevmemIntPDumpDataDescriptor) ++#endif ++static INLINE PVRSRV_ERROR ++DevmemIntPDumpDataDescriptor(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_CTX *psDevMemContext, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32ElementType, ++ IMG_UINT32 ui32ElementCount, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psDevMemContext); ++ PVR_UNREFERENCED_PARAMETER(ui32Size); ++ PVR_UNREFERENCED_PARAMETER(pszFileName); ++ PVR_UNREFERENCED_PARAMETER(sData); ++ PVR_UNREFERENCED_PARAMETER(ui32DataSize); ++ PVR_UNREFERENCED_PARAMETER(ui32ElementType); ++ PVR_UNREFERENCED_PARAMETER(ui32ElementCount); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#endif /* PDUMP */ ++ ++PVRSRV_ERROR ++DevmemIntInit(void); ++ ++PVRSRV_ERROR ++DevmemIntDeInit(void); ++ ++PVRSRV_ERROR ++DevmemIntExportCtx(DEVMEMINT_CTX *psContext, ++ PMR *psPMR, ++ DEVMEMINT_CTX_EXPORT **ppsContextExport); ++ ++PVRSRV_ERROR ++DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport); ++ ++PVRSRV_ERROR ++DevmemIntAcquireRemoteCtx(PMR *psPMR, ++ DEVMEMINT_CTX **ppsContext, ++ IMG_HANDLE *phPrivData); ++ ++#endif /* DEVICEMEM_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/devicemem_server_utils.h b/drivers/gpu/drm/img-rogue/devicemem_server_utils.h +new file mode 100644 +index 000000000000..ad85c07cdcf6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_server_utils.h +@@ -0,0 +1,198 @@ ++/**************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header file utilities that are specific to device memory functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "device.h" ++#include "pvrsrv_memallocflags.h" ++#include "pvrsrv.h" ++ ++static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_MEMALLOCFLAGS_T ulFlags, ++ IMG_UINT32 *pui32Ret) ++{ ++ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); ++ IMG_UINT32 ui32Ret; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); ++ ++ switch (ui32CPUCacheMode) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: ++ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: ++ ++ /* ++ * If system has no coherency but coherency has been requested for CPU ++ * and GPU we currently fall back to write-combine. ++ * This avoids errors on arm64 when uncached is turned into ordered device memory ++ * and suffers from problems with unaligned access. ++ */ ++ if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) && ++ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) ++ { ++ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; ++ } ++ else ++ { ++ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED; ++ } ++ break; ++ ++ default: ++ PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode)); ++ PVR_ASSERT(0); ++ /* ++ We should never get here, but if we do then setting the mode ++ to uncached is the safest thing to do. ++ */ ++ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED; ++ eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; ++ break; ++ } ++ ++ *pui32Ret = ui32Ret; ++ ++ return eError; ++} ++ ++static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_MEMALLOCFLAGS_T ulFlags, ++ IMG_UINT32 *pui32Ret) ++{ ++ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); ++ IMG_UINT32 ui32Ret; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); ++ ++ switch (ui32DeviceCacheMode) ++ { ++ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: ++ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: ++ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT: ++ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT: ++ ++ /* ++ * If system has no coherency but coherency has been requested for CPU ++ * and GPU we currently fall back to write-combine. ++ * This avoids errors on arm64 when uncached is turned into ordered device memory ++ * and suffers from problems with unaligned access. ++ */ ++ if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) && ++ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ) ++ { ++ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC; ++ } ++ else ++ { ++ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED; ++ } ++ break; ++ ++ default: ++ PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode)); ++ PVR_ASSERT(0); ++ /* ++ We should never get here, but if we do then setting the mode ++ to uncached is the safest thing to do. ++ */ ++ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED; ++ eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; ++ break; ++ } ++ ++ *pui32Ret = ui32Ret; ++ ++ return eError; ++} ++ ++static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_MEMALLOCFLAGS_T ulFlags) ++{ ++ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags); ++ IMG_BOOL bRet = IMG_FALSE; ++ ++ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags)); ++ ++ if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ++ { ++ bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig); ++ } ++ return bRet; ++} ++ ++static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_MEMALLOCFLAGS_T ulFlags) ++{ ++ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags); ++ IMG_BOOL bRet = IMG_FALSE; ++ ++ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags)); ++ ++ if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) ++ { ++ bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig); ++ } ++ return bRet; ++} +diff --git a/drivers/gpu/drm/img-rogue/devicemem_typedefs.h b/drivers/gpu/drm/img-rogue/devicemem_typedefs.h +new file mode 100644 +index 000000000000..dd66fccf3322 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_typedefs.h +@@ -0,0 +1,142 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Client side part of device memory management -- this file ++ is forked from new_devmem_allocation.h as this one has to ++ reside in the top level include so that client code is able ++ to make use of the typedefs. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEVICEMEM_TYPEDEFS_H ++#define DEVICEMEM_TYPEDEFS_H ++ ++#include ++#include "img_types.h" ++#include "pvrsrv_memallocflags.h" ++ ++typedef struct DEVMEM_CONTEXT_TAG DEVMEM_CONTEXT; /*!< Convenience typedef for struct DEVMEM_CONTEXT_TAG */ ++typedef struct DEVMEM_HEAP_TAG DEVMEM_HEAP; /*!< Convenience typedef for struct DEVMEM_HEAP_TAG */ ++typedef struct DEVMEM_MEMDESC_TAG DEVMEM_MEMDESC; /*!< Convenience typedef for struct DEVMEM_MEMDESC_TAG */ ++typedef struct DEVMEM_PAGELIST_TAG DEVMEM_PAGELIST; /*!< Convenience typedef for struct DEVMEM_PAGELIST_TAG */ ++ ++typedef IMG_HANDLE DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */ ++typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */ ++typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */ ++typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */ ++ ++typedef struct DEVMEMX_PHYS_MEMDESC_TAG DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */ ++typedef struct DEVMEMX_VIRT_MEMDESC_TAG DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */ ++ ++/*! calling code needs all the info in this struct, to be able to pass it around */ ++typedef struct ++{ ++ /*! A handle to the PMR. */ ++ IMG_HANDLE hPMRExportHandle; ++ /*! The "key" to prove we have authorisation to use this PMR */ ++ IMG_UINT64 uiPMRExportPassword; ++ /*! Size and alignment properties for this PMR. Note, these ++ numbers are not trusted in kernel, but we need to cache them ++ client-side in order to allocate from the VM arena. The kernel ++ will know the actual alignment and size of the PMR and thus ++ would prevent client code from breaching security here. Ditto ++ for physmem granularity (aka page size) if this is different ++ from alignment */ ++ IMG_DEVMEM_SIZE_T uiSize; ++ /*! We call this "contiguity guarantee" to be more precise than ++ calling it "alignment" or "page size", terms which may seem ++ similar but have different emphasis. The number reported here ++ is the minimum contiguity guarantee from the creator of the ++ PMR. Now, there is no requirement to allocate that coarsely ++ from the RA. The alignment given to the RA simply needs to be ++ at least as coarse as the device page size for the heap we ++ ultimately intend to map into. What is important is that the ++ device MMU data page size is not greater than the minimum ++ contiguity guarantee from the PMR. This value is reported to ++ the client in order that it can choose to make early checks and ++ perhaps decide which heap (in a variable page size scenario) it ++ would be safe to map this PMR into. For convenience, the ++ client may choose to use this argument as the alignment of the ++ virtual range he chooses to allocate, but this is _not_ ++ necessary and in many cases would be able to get away with a ++ finer alignment, should the heap into which this PMR will be ++ mapped support it. */ ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee; ++} DEVMEM_EXPORTCOOKIE; ++ ++/* Enum that describes the operation associated with changing sparse memory */ ++typedef IMG_UINT32 SPARSE_MEM_RESIZE_FLAGS; ++#define SPARSE_RESIZE_NONE 0U ++ ++ /* This should be set to indicate the change needs allocation */ ++#define SPARSE_RESIZE_ALLOC 1U ++ ++ /* This should be set to indicate the change needs free */ ++#define SPARSE_RESIZE_FREE 2U ++ ++#define SPARSE_RESIZE_BOTH (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE) ++ ++ /* This should be set to silently swap underlying physical memory ++ * without disturbing its device or cpu virtual maps. ++ * This flag is not supported in the case of PDUMP and could lead to ++ * PDUMP panic when used. ++ */ ++#define SPARSE_REMAP_MEM 4U ++ ++ /* Should be set to get the sparse changes appear in cpu virtual map */ ++#define SPARSE_MAP_CPU_ADDR 8U ++ ++ ++/* To be used with all the sparse allocations that gets mapped to CPU Virtual ++ * space. The sparse allocation CPU mapping is torn down and re-mapped every ++ * time the sparse allocation layout changes. ++ */ ++#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1 ++ ++/* To use with DevmemSubAllocate() as the default factor if no over-allocation ++ * is desired. ++ */ ++#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER (1U) ++ ++/* Defines the max length for PMR, MemDesc, Device memory History and RI debug ++ * annotations stored in memory, including the null terminator. ++ */ ++#define DEVMEM_ANNOTATION_MAX_LEN ((IMG_UINT32)PVR_ANNOTATION_MAX_LEN + 1U) ++ ++#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/devicemem_utils.c b/drivers/gpu/drm/img-rogue/devicemem_utils.c +new file mode 100644 +index 000000000000..d4416ae57a74 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_utils.c +@@ -0,0 +1,1259 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management internal utility functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used internally by device memory management ++ code. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "allocmem.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "ra.h" ++#include "devicemem_utils.h" ++#include "client_mm_bridge.h" ++#include "client_cache_bridge.h" ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#include "client_ri_bridge.h" ++#if defined(__KERNEL__) ++#include "pvrsrv.h" ++#else ++#include "pvr_bridge_client.h" ++#endif ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "proc_stats.h" ++#endif ++ ++#if defined(__KERNEL__) ++#include "srvcore.h" ++#else ++#include "srvcore_intern.h" ++#endif ++ ++/* ++ SVM heap management support functions for CPU (un)mapping ++ */ ++#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY 2 ++ ++static inline PVRSRV_ERROR ++DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap, ++ DEVMEM_IMPORT *psImport, ++ IMG_UINT64 *ui64MapAddress) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64SvmMapAddr; ++ IMG_UINT64 ui64SvmMapAddrEnd; ++ IMG_UINT64 ui64SvmHeapAddrEnd; ++ ++ /* SVM heap management always has XXX_MANAGER_KERNEL unless we ++ have triggered the fall back code-path in which case we ++ should not be calling into this code-path */ ++ PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL); ++ ++ /* By acquiring the CPU virtual address here, it essentially ++ means we lock-down the virtual address for the duration ++ of the life-cycle of the allocation until a de-allocation ++ request comes in. Thus the allocation is guaranteed not to ++ change its virtual address on the CPU during its life-time. ++ NOTE: Import might have already been CPU Mapped before now, ++ normally this is not a problem, see fall back */ ++ eError = DevmemImportStructCPUMap(psImport); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "DevmemImportStructCPUMap"); ++ eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED; ++ goto failSVM; ++ } ++ ++ /* Supplied kernel mmap virtual address is also device virtual address; ++ calculate the heap & kernel supplied mmap virtual address limits */ ++ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; ++ ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; ++ ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize; ++ PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0); ++ ++ /* SVM limit test may fail if processor has more virtual address bits than device */ ++ if ((ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd) || ++ (ui64SvmMapAddr & ~(ui64SvmHeapAddrEnd - 1))) ++ { ++ /* Unmap incompatible SVM virtual address, this ++ may not release address if it was elsewhere ++ CPU Mapped before call into this function */ ++ DevmemImportStructCPUUnmap(psImport); ++ ++ /* Flag incompatible SVM mapping */ ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ goto failSVM; ++ } ++ ++ *ui64MapAddress = ui64SvmMapAddr; ++failSVM: ++ /* either OK, MAP_FAILED or BAD_MAPPING */ ++ return eError; ++} ++ ++static inline void ++DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) ++{ ++ PVR_UNREFERENCED_PARAMETER(psHeap); ++ DevmemImportStructCPUUnmap(psImport); ++} ++ ++static inline PVRSRV_ERROR ++DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap, ++ DEVMEM_IMPORT *psImport, ++ IMG_UINT uiAlign, ++ IMG_UINT64 *ui64MapAddress) ++{ ++ RA_LENGTH_T uiAllocatedSize; ++ RA_BASE_T uiAllocatedAddr; ++ IMG_UINT64 ui64SvmMapAddr; ++ IMG_UINT uiRetry = 0; ++ PVRSRV_ERROR eError; ++ ++ /* If SVM heap management has transitioned to XXX_MANAGER_USER, ++ this is essentially a fall back approach that ensures we ++ continue to satisfy SVM alloc. This approach is not without ++ hazards in that we may specify a virtual address that is ++ already in use by the user process */ ++ PVR_ASSERT(psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER); ++ ++ /* Normally, for SVM heap allocations, CPUMap _must_ be done ++ before DevMap; ideally the initial CPUMap should be done by ++ SVM functions though this is not a hard requirement as long ++ as the prior elsewhere obtained CPUMap virtual address meets ++ SVM address requirements. This is a fall-back code-pathway ++ so we have to test that this assumption holds before we ++ progress any further */ ++ OSLockAcquire(psImport->sCPUImport.hLock); ++ ++ if (psImport->sCPUImport.ui32RefCount) ++ { ++ /* Already CPU Mapped SVM heap allocation, this prior elsewhere ++ obtained virtual address is responsible for the above ++ XXX_MANAGER_KERNEL failure. As we are not responsible for ++ this, we cannot progress any further so need to fail */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Previously obtained CPU map address not SVM compatible" ++ , __func__)); ++ ++ /* Revert SVM heap to DEVMEM_HEAP_MANAGER_KERNEL */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_KERNEL; ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Reverting SVM heap back to kernel managed", ++ __func__)); ++ ++ OSLockRelease(psImport->sCPUImport.hLock); ++ ++ /* Do we need a more specific error code here */ ++ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED; ++ goto failSVM; ++ } ++ ++ OSLockRelease(psImport->sCPUImport.hLock); ++ ++ do ++ { ++ /* Next we proceed to instruct the kernel to use the RA_Alloc supplied ++ virtual address to map-in this SVM import suballocation; there is no ++ guarantee that this RA_Alloc virtual address may not collide with an ++ already in-use VMA range in the process */ ++ eError = RA_Alloc(psHeap->psQuantizedVMRA, ++ psImport->uiSize, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, /* flags: this RA doesn't use flags*/ ++ uiAlign, ++ "SVM_Virtual_Alloc", ++ &uiAllocatedAddr, ++ &uiAllocatedSize, ++ NULL /* don't care about per-import priv data */); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "RA_Alloc"); ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) ++ { ++ PVRSRV_ERROR eErr; ++ eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), ++ PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, ++ OSGetCurrentProcessID()); ++ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); ++ } ++#endif ++ goto failSVM; ++ } ++ ++ /* No reason for allocated virtual size to be different from ++ the PMR's size */ ++ psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr; ++ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); ++ ++ /* Map the import or allocation using the RA_Alloc virtual address; ++ the kernel may fail the request if the supplied virtual address ++ is already in-use in which case we re-try using another virtual ++ address obtained from the RA_Alloc */ ++ eError = DevmemImportStructCPUMap(psImport); ++ if (eError != PVRSRV_OK) ++ { ++ /* For now we simply discard failed RA_Alloc() obtained virtual ++ address (i.e. plenty of virtual space), this prevents us from ++ re-using these and furthermore essentially blacklists these ++ addresses from future SVM consideration; We exit fall-back ++ attempt if retry exceeds the fall-back retry limit */ ++ if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Cannot find SVM compatible address, bad mapping", ++ __func__)); ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ goto failSVM; ++ } ++ } ++ else ++ { ++ /* Found compatible SVM virtual address, set as device virtual address */ ++ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr; ++ } ++ } while (eError != PVRSRV_OK); ++ ++ *ui64MapAddress = ui64SvmMapAddr; ++failSVM: ++ return eError; ++} ++ ++static inline void ++DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) ++{ ++ RA_BASE_T uiAllocatedAddr; ++ ++ /* We only free SVM compatible addresses, all addresses in ++ the blacklist are essentially excluded from future RA_Alloc */ ++ uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr; ++ RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr); ++ ++ DevmemImportStructCPUUnmap(psImport); ++} ++ ++static inline PVRSRV_ERROR ++DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap, ++ DEVMEM_IMPORT *psImport, ++ IMG_UINT uiAlign, ++ IMG_UINT64 *ui64MapAddress) ++{ ++ PVRSRV_ERROR eError; ++ ++ switch (psHeap->ui32HeapManagerFlags) ++ { ++ case DEVMEM_HEAP_MANAGER_KERNEL: ++ eError = DevmemCPUMapSVMKernelManaged(psHeap, ++ psImport, ++ ui64MapAddress); ++ if (eError == PVRSRV_ERROR_BAD_MAPPING) ++ { ++ /* If the SVM map address is outside of SVM heap limits, ++ change heap type to DEVMEM_HEAP_MANAGER_USER */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Kernel managed SVM heap is now user managed", ++ __func__)); ++ ++ /* Retry using user managed fall-back approach */ ++ eError = DevmemCPUMapSVMUserManaged(psHeap, ++ psImport, ++ uiAlign, ++ ui64MapAddress); ++ } ++ break; ++ ++ case DEVMEM_HEAP_MANAGER_USER: ++ eError = DevmemCPUMapSVMUserManaged(psHeap, ++ psImport, ++ uiAlign, ++ ui64MapAddress); ++ break; ++ ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ break; ++ } ++ ++ return eError; ++} ++ ++static inline void ++DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport) ++{ ++ switch (psHeap->ui32HeapManagerFlags) ++ { ++ case DEVMEM_HEAP_MANAGER_KERNEL: ++ DevmemCPUUnmapSVMKernelManaged(psHeap, psImport); ++ break; ++ ++ case DEVMEM_HEAP_MANAGER_USER: ++ DevmemCPUUnmapSVMUserManaged(psHeap, psImport); ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++/* ++ The Devmem import structure is the structure we use ++ to manage memory that is "imported" (which is page ++ granular) from the server into our process, this ++ includes allocations. ++ ++ This allows memory to be imported without requiring ++ any CPU or device mapping. Memory can then be mapped ++ into the device or CPU on demand, but neither is ++ required. ++ */ ++ ++IMG_INTERNAL ++void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport) ++{ ++ IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount); ++ PVR_UNREFERENCED_PARAMETER(iRefCount); ++ PVR_ASSERT(iRefCount != 1); ++ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ iRefCount-1, ++ iRefCount); ++} ++ ++IMG_INTERNAL ++IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport) ++{ ++ IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount); ++ PVR_ASSERT(iRefCount >= 0); ++ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ iRefCount+1, ++ iRefCount); ++ ++ if (iRefCount == 0) ++ { ++ PVRSRV_ERROR eError = DestroyServerResource(psImport->hDevConnection, ++ NULL, ++ BridgePMRUnrefPMR, ++ psImport->hPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ OSLockDestroy(psImport->sCPUImport.hLock); ++ OSLockDestroy(psImport->sDeviceImport.hLock); ++ OSLockDestroy(psImport->hLock); ++ OSFreeMem(psImport); ++ ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++IMG_INTERNAL ++void DevmemImportDiscard(DEVMEM_IMPORT *psImport) ++{ ++ PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0); ++ OSLockDestroy(psImport->sCPUImport.hLock); ++ OSLockDestroy(psImport->sDeviceImport.hLock); ++ OSLockDestroy(psImport->hLock); ++ OSFreeMem(psImport); ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc) ++{ ++ DEVMEM_MEMDESC *psMemDesc; ++ PVRSRV_ERROR eError; ++ ++ /* Must be zeroed in case it needs to be freed before it is initialised */ ++ psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC)); ++ PVR_GOTO_IF_NOMEM(psMemDesc, eError, failAlloc); ++ ++ eError = OSLockCreate(&psMemDesc->hLock); ++ PVR_GOTO_IF_ERROR(eError, failMDLock); ++ ++ eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock); ++ PVR_GOTO_IF_ERROR(eError, failDMDLock); ++ ++ eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock); ++ PVR_GOTO_IF_ERROR(eError, failCMDLock); ++ ++ OSAtomicWrite(&psMemDesc->hRefCount, 0); ++ ++ *ppsMemDesc = psMemDesc; ++ ++ return PVRSRV_OK; ++ ++failCMDLock: ++ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); ++failDMDLock: ++ OSLockDestroy(psMemDesc->hLock); ++failMDLock: ++ OSFreeMem(psMemDesc); ++failAlloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++/* ++ Init the MemDesc structure ++ */ ++IMG_INTERNAL ++void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ DEVMEM_IMPORT *psImport, ++ IMG_DEVMEM_SIZE_T uiSize) ++{ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ 0, ++ 1); ++ ++ psMemDesc->psImport = psImport; ++ psMemDesc->uiOffset = uiOffset; ++ ++ psMemDesc->sDeviceMemDesc.ui32RefCount = 0; ++ psMemDesc->sCPUMemDesc.ui32RefCount = 0; ++ psMemDesc->uiAllocSize = uiSize; ++ psMemDesc->hPrivData = NULL; ++ psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE; ++ ++#if defined(DEBUG) ++ psMemDesc->bPoisonOnFree = IMG_FALSE; ++#endif ++ ++ OSAtomicWrite(&psMemDesc->hRefCount, 1); ++} ++ ++#if defined(DEBUG) ++IMG_INTERNAL ++void DevmemMemDescSetPoF(DEVMEM_MEMDESC *psMemDesc, PVRSRV_MEMALLOCFLAGS_T uiFlags) ++{ ++ if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) ++ { ++ psMemDesc->bPoisonOnFree = IMG_TRUE; ++ } ++} ++#endif ++ ++IMG_INTERNAL ++void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc) ++{ ++ IMG_INT iRefCount = 0; ++ ++ iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ iRefCount-1, ++ iRefCount); ++ ++ PVR_UNREFERENCED_PARAMETER(iRefCount); ++} ++ ++#if defined(DEBUG) ++static void _DevmemPoisonOnFree(DEVMEM_MEMDESC *psMemDesc) ++{ ++ void *pvAddr = NULL; ++ IMG_UINT8 *pui8CPUVAddr; ++ PVRSRV_ERROR eError; ++ ++ eError = DevmemCPUMapCheckImportProperties(psMemDesc); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "DevmemCPUMapCheckImportProperties"); ++ ++ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock); ++ eError = DevmemImportStructCPUMap(psMemDesc->psImport); ++ OSLockRelease(psMemDesc->sCPUMemDesc.hLock); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "DevmemImportStructCPUMap"); ++ ++ pui8CPUVAddr = psMemDesc->psImport->sCPUImport.pvCPUVAddr; ++ pui8CPUVAddr += psMemDesc->uiOffset; ++ pvAddr = pui8CPUVAddr; ++ ++ DevmemCPUMemSet(pvAddr, ++ PVRSRV_POISON_ON_FREE_VALUE, ++ psMemDesc->uiAllocSize, ++ psMemDesc->psImport->uiFlags); ++ ++ if (PVRSRV_CHECK_CPU_CACHE_COHERENT(psMemDesc->psImport->uiFlags) || ++ PVRSRV_CHECK_CPU_CACHE_INCOHERENT(psMemDesc->psImport->uiFlags)) ++ { ++ eError = BridgeCacheOpExec(GetBridgeHandle(psMemDesc->psImport->hDevConnection), ++ psMemDesc->psImport->hPMR, ++ (IMG_UINT64) (uintptr_t) ++ pvAddr - psMemDesc->uiOffset, ++ psMemDesc->uiOffset, ++ psMemDesc->uiAllocSize, ++ PVRSRV_CACHE_OP_FLUSH); ++ PVR_LOG_IF_ERROR(eError, "BridgeCacheOpExec"); ++ } ++ ++ DevmemImportStructCPUUnmap(psMemDesc->psImport); ++ pvAddr = NULL; ++} ++#endif ++ ++IMG_INTERNAL ++IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc) ++{ ++ IMG_INT iRefCount; ++ PVR_ASSERT(psMemDesc != NULL); ++ ++ iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount); ++ PVR_ASSERT(iRefCount >= 0); ++ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psMemDesc, ++ iRefCount+1, ++ iRefCount); ++ ++ if (iRefCount == 0) ++ { ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if (PVRSRVIsBridgeEnabled(GetBridgeHandle(psMemDesc->psImport->hDevConnection), PVRSRV_BRIDGE_RI) && ++ (psMemDesc->hRIHandle)) ++ { ++ PVRSRV_ERROR eError; ++ ++ eError = DestroyServerResource(psMemDesc->psImport->hDevConnection, ++ NULL, ++ BridgeRIDeleteMEMDESCEntry, ++ psMemDesc->hRIHandle); ++ PVR_LOG_IF_ERROR(eError, "BridgeRIDeleteMEMDESCEntry"); ++ } ++#endif ++ ++ OSLockAcquire(psMemDesc->psImport->hLock); ++ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE) ++ { ++ /* As soon as the first sub-allocation on the psImport is freed ++ * we might get dirty memory when reusing it. ++ * We have to delete the ZEROED, CLEAN & POISONED flag */ ++ psMemDesc->psImport->uiProperties &= ++ ~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED | ++ DEVMEM_PROPERTIES_IMPORT_IS_CLEAN | ++ DEVMEM_PROPERTIES_IMPORT_IS_POISONED); ++ ++ OSLockRelease(psMemDesc->psImport->hLock); ++ ++#if defined(DEBUG) ++ if (psMemDesc->bPoisonOnFree) ++ { ++ _DevmemPoisonOnFree(psMemDesc); ++ } ++#endif ++ ++ RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA, ++ psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr + ++ psMemDesc->uiOffset); ++ } ++ else ++ { ++ OSLockRelease(psMemDesc->psImport->hLock); ++ DevmemImportStructRelease(psMemDesc->psImport); ++ } ++ ++ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); ++ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); ++ OSLockDestroy(psMemDesc->hLock); ++ OSFreeMem(psMemDesc); ++ ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++IMG_INTERNAL ++void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0); ++ ++ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock); ++ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock); ++ OSLockDestroy(psMemDesc->hLock); ++ OSFreeMem(psMemDesc); ++} ++ ++ ++IMG_INTERNAL ++PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T *puiFlags) ++{ ++ if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) && ++ (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (uiAlign & (uiAlign-1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: The requested alignment is not a power of two.", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (uiSize == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Please request a non-zero size value.", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* If zero or poison flags are set we have to have write access to the page. */ ++ if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || ++ PVRSRV_CHECK_POISON_ON_ALLOC(*puiFlags) || ++#if defined(DEBUG) ++ PVRSRV_CHECK_POISON_ON_FREE(*puiFlags) || ++#endif ++ PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags)) ++ { ++ (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ Allocate and init an import structure ++ */ ++IMG_INTERNAL ++PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, ++ DEVMEM_IMPORT **ppsImport) ++{ ++ DEVMEM_IMPORT *psImport; ++ PVRSRV_ERROR eError; ++ ++ psImport = OSAllocMem(sizeof(*psImport)); ++ PVR_RETURN_IF_FALSE(psImport != NULL, PVRSRV_ERROR_OUT_OF_MEMORY); ++ ++ /* Setup some known bad values for things we don't have yet */ ++ psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON; ++ psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON; ++ psImport->sDeviceImport.psHeap = NULL; ++ psImport->sDeviceImport.bMapped = IMG_FALSE; ++ ++ eError = OSLockCreate(&psImport->sDeviceImport.hLock); ++ PVR_GOTO_IF_ERROR(eError, failDIOSLockCreate); ++ ++ psImport->sCPUImport.hOSMMapData = NULL; ++ psImport->sCPUImport.pvCPUVAddr = NULL; ++ ++ eError = OSLockCreate(&psImport->sCPUImport.hLock); ++ PVR_GOTO_IF_ERROR(eError, failCIOSLockCreate); ++ ++ /* Set up common elements */ ++ psImport->hDevConnection = hDevConnection; ++ ++ /* Setup properties */ ++ psImport->uiProperties = 0; ++ ++ /* Setup refcounts */ ++ psImport->sDeviceImport.ui32RefCount = 0; ++ psImport->sCPUImport.ui32RefCount = 0; ++ OSAtomicWrite(&psImport->hRefCount, 0); ++ ++ /* Create the lock */ ++ eError = OSLockCreate(&psImport->hLock); ++ PVR_GOTO_IF_ERROR(eError, failILockAlloc); ++ ++ *ppsImport = psImport; ++ ++ return PVRSRV_OK; ++ ++failILockAlloc: ++ OSLockDestroy(psImport->sCPUImport.hLock); ++failCIOSLockCreate: ++ OSLockDestroy(psImport->sDeviceImport.hLock); ++failDIOSLockCreate: ++ OSFreeMem(psImport); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++/* ++ Initialise the import structure ++ */ ++IMG_INTERNAL ++void DevmemImportStructInit(DEVMEM_IMPORT *psImport, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_HANDLE hPMR, ++ DEVMEM_PROPERTIES_T uiProperties) ++{ ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ 0, ++ 1); ++ ++ psImport->uiSize = uiSize; ++ psImport->uiAlign = uiAlign; ++ psImport->uiFlags = uiFlags; ++ psImport->hPMR = hPMR; ++ psImport->uiProperties = uiProperties; ++ OSAtomicWrite(&psImport->hRefCount, 1); ++} ++ ++/* Allocate the requested device virtual address region ++ * from the heap */ ++static PVRSRV_ERROR DevmemReserveVARange(DEVMEM_HEAP *psHeap, ++ DEVMEM_SIZE_T uiSize, ++ IMG_UINT uiAlign, ++ RA_LENGTH_T *puiAllocatedSize, ++ IMG_UINT64 ui64OptionalMapAddress) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Allocate space in the VM */ ++ eError = RA_Alloc_Range(psHeap->psQuantizedVMRA, ++ uiSize, ++ 0, ++ uiAlign, ++ ui64OptionalMapAddress, ++ puiAllocatedSize); ++ ++ if (PVRSRV_OK != eError) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ if ((eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) || ++ (eError == PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL)) ++ { ++ PVRSRV_ERROR eErr; ++ eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), ++ PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, ++ OSGetCurrentProcessID()); ++ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); ++ } ++#endif ++ return eError; ++ } ++ ++ /* No reason for the allocated virtual size to be different from ++ the PMR's size */ ++ PVR_ASSERT(*puiAllocatedSize == uiSize); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ Map an import to the device ++ */ ++IMG_INTERNAL ++PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, ++ IMG_BOOL bMap, ++ DEVMEM_IMPORT *psImport, ++ IMG_UINT64 ui64OptionalMapAddress) ++{ ++ DEVMEM_DEVICE_IMPORT *psDeviceImport; ++ RA_BASE_T uiAllocatedAddr; ++ RA_LENGTH_T uiAllocatedSize; ++ IMG_DEV_VIRTADDR sBase; ++ IMG_HANDLE hReservation; ++ PVRSRV_ERROR eError; ++ IMG_UINT uiAlign; ++ IMG_BOOL bDestroyed = IMG_FALSE; ++ ++ /* Round the provided import alignment to the configured heap alignment */ ++ uiAlign = 1ULL << psHeap->uiLog2ImportAlignment; ++ uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1); ++ ++ psDeviceImport = &psImport->sDeviceImport; ++ ++ OSLockAcquire(psDeviceImport->hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ psDeviceImport->ui32RefCount, ++ psDeviceImport->ui32RefCount+1); ++ ++ if (psDeviceImport->ui32RefCount++ == 0) ++ { ++ DevmemImportStructAcquire(psImport); ++ ++ OSAtomicIncrement(&psHeap->hImportCount); ++ ++ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) ++ { ++ /* SVM (shared virtual memory) imports or allocations always ++ need to acquire CPU virtual address first as address is ++ used to map the allocation into the device virtual address ++ space; i.e. the virtual address of the allocation for both ++ the CPU/GPU must be identical. */ ++ eError = DevmemImportStructDevMapSVM(psHeap, ++ psImport, ++ uiAlign, ++ &ui64OptionalMapAddress); ++ PVR_GOTO_IF_ERROR(eError, failVMRAAlloc); ++ } ++ ++ if (ui64OptionalMapAddress == 0) ++ { ++ /* If heap is _completely_ managed by USER or KERNEL, we shouldn't ++ * be here, as this is RA manager code-path */ ++ if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER || ++ psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_KERNEL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? ++ "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().": ++ "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).", ++ __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); ++ } ++ ++ if (psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_UNKNOWN) ++ { ++ /* Only set the heap manager (to RA) at first map when heap manager ++ * is unknown. It might be a dual heap (both, user and RA managed), ++ * in which case heap manager is set at creation time */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_RA; ++ } ++ ++ /* Allocate space in the VM */ ++ eError = RA_Alloc(psHeap->psQuantizedVMRA, ++ psImport->uiSize, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, /* flags: this RA doesn't use flags*/ ++ uiAlign, ++ "Virtual_Alloc", ++ &uiAllocatedAddr, ++ &uiAllocatedSize, ++ NULL /* don't care about per-import priv data */ ++ ); ++ if (PVRSRV_OK != eError) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ if (eError == PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) ++ { ++ PVRSRV_ERROR eErr; ++ eErr = BridgePVRSRVUpdateOOMStats(GetBridgeHandle(psHeap->psCtx->hDevConnection), ++ PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, ++ OSGetCurrentProcessID()); ++ PVR_LOG_IF_ERROR(eErr, "BridgePVRSRVUpdateOOMStats"); ++ } ++#endif ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM, failVMRAAlloc); ++ } ++ ++ /* No reason for the allocated virtual size to be different from ++ the PMR's size */ ++ PVR_ASSERT(uiAllocatedSize == psImport->uiSize); ++ ++ sBase.uiAddr = uiAllocatedAddr; ++ ++ } ++ else ++ { ++ IMG_UINT64 ui64ValidEndAddr; ++ ++ /* Ensure supplied ui64OptionalMapAddress is within heap range */ ++ ui64ValidEndAddr = psHeap->sBaseAddress.uiAddr + psHeap->uiSize; ++ if ((ui64OptionalMapAddress + psImport->uiSize > ui64ValidEndAddr) || ++ (ui64OptionalMapAddress < psHeap->sBaseAddress.uiAddr)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>." ++ , __func__ ++ , (void*)(uintptr_t)ui64OptionalMapAddress ++ , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr ++ , (void*)(uintptr_t)ui64ValidEndAddr)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); ++ } ++ ++ switch (psHeap->ui32HeapManagerFlags) ++ { ++ case DEVMEM_HEAP_MANAGER_UNKNOWN: ++ /* DEVMEM_HEAP_MANAGER_USER can apply to _any_ heap and can only ++ * be determined here. This heap type transitions from ++ * DEVMEM_HEAP_MANAGER_UNKNOWN to DEVMEM_HEAP_MANAGER_USER on ++ * 1st alloc. */ ++ psHeap->ui32HeapManagerFlags = DEVMEM_HEAP_MANAGER_USER; ++ break; ++ ++ case DEVMEM_HEAP_MANAGER_USER: ++ case DEVMEM_HEAP_MANAGER_KERNEL: ++ if (! psHeap->uiSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ psHeap->ui32HeapManagerFlags == DEVMEM_HEAP_MANAGER_USER ? ++ "%s: Heap DEVMEM_HEAP_MANAGER_USER is disabled.": ++ "%s: Heap DEVMEM_HEAP_MANAGER_KERNEL is disabled." ++ , __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failVMRAAlloc); ++ } ++ break; ++ ++ case DEVMEM_HEAP_MANAGER_DUAL_USER_RA: ++ /* When the heap is dual managed, ensure supplied ui64OptionalMapAddress ++ * and import size are within heap address space range */ ++ if (ui64OptionalMapAddress + psImport->uiSize <= ++ psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) ++ { ++ break; ++ } ++ else ++ { ++ /* Allocate requested VM range */ ++ eError = DevmemReserveVARange(psHeap, ++ psImport->uiSize, ++ uiAlign, ++ &uiAllocatedSize, ++ ui64OptionalMapAddress); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); ++ } ++ ++ } ++ break; ++ case DEVMEM_HEAP_MANAGER_RA: ++ /* Allocate requested VM range */ ++ eError = DevmemReserveVARange(psHeap, ++ psImport->uiSize, ++ uiAlign, ++ &uiAllocatedSize, ++ ui64OptionalMapAddress); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED, failVMRAAlloc); ++ } ++ break; ++ ++ default: ++ break; ++ } ++ ++ if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid address to map to. Please provide an " ++ "address aligned to a page multiple of the heap." ++ , __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); ++ } ++ ++ if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid heap to map to. " ++ "Please choose a heap that can handle smaller page sizes." ++ , __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, failVMRAAlloc); ++ } ++ ++ uiAllocatedAddr = ui64OptionalMapAddress; ++ uiAllocatedSize = psImport->uiSize; ++ sBase.uiAddr = uiAllocatedAddr; ++ } ++ ++ if (psHeap->bPremapped) ++ { ++ /* no virtual address reservation and mapping are required for memory that's already mapped */ ++ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; ++ psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; ++ } ++ else ++ { ++ /* Setup page tables for the allocated VM space */ ++ eError = BridgeDevmemIntReserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), ++ psHeap->hDevMemServerHeap, ++ sBase, ++ uiAllocatedSize, ++ &hReservation); ++ PVR_GOTO_IF_ERROR(eError, failReserve); ++ ++ if (bMap) ++ { ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; ++ ++ uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK; ++ ++ /* Actually map the PMR to allocated VM space */ ++ eError = BridgeDevmemIntMapPMR(GetBridgeHandle(psHeap->psCtx->hDevConnection), ++ psHeap->hDevMemServerHeap, ++ hReservation, ++ psImport->hPMR, ++ uiMapFlags, ++ &psDeviceImport->hMapping); ++ PVR_GOTO_IF_ERROR(eError, failMap); ++ ++ psDeviceImport->bMapped = IMG_TRUE; ++ } ++ ++ psDeviceImport->hReservation = hReservation; ++ } ++ ++ /* Setup device mapping specific parts of the mapping info */ ++ psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr; ++ psDeviceImport->psHeap = psHeap; ++ } ++ else ++ { ++ /* ++ Check that we've been asked to map it into the ++ same heap 2nd time around ++ */ ++ if (psHeap != psDeviceImport->psHeap) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_HEAP, failParams); ++ } ++ } ++ OSLockRelease(psDeviceImport->hLock); ++ ++ return PVRSRV_OK; ++ ++failMap: ++ if (!psHeap->bPremapped) ++ { ++ BridgeDevmemIntUnreserveRange(GetBridgeHandle(psHeap->psCtx->hDevConnection), ++ hReservation); ++ } ++failReserve: ++ if (ui64OptionalMapAddress == 0) ++ { ++ RA_Free(psHeap->psQuantizedVMRA, ++ uiAllocatedAddr); ++ } ++failVMRAAlloc: ++ if ((ui64OptionalMapAddress) && PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) ++ { ++ DevmemImportStructDevUnmapSVM(psHeap, psImport); ++ } ++ bDestroyed = DevmemImportStructRelease(psImport); ++ OSAtomicDecrement(&psHeap->hImportCount); ++failParams: ++ if (!bDestroyed) ++ { ++ psDeviceImport->ui32RefCount--; ++ OSLockRelease(psDeviceImport->hLock); ++ } ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/* ++ Unmap an import from the Device ++ */ ++IMG_INTERNAL ++IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_DEVICE_IMPORT *psDeviceImport; ++ ++ psDeviceImport = &psImport->sDeviceImport; ++ ++ OSLockAcquire(psDeviceImport->hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ psDeviceImport->ui32RefCount, ++ psDeviceImport->ui32RefCount-1); ++ ++ if (--psDeviceImport->ui32RefCount == 0) ++ { ++ DEVMEM_HEAP *psHeap = psDeviceImport->psHeap; ++ ++ if (!psHeap->bPremapped) ++ { ++ if (psDeviceImport->bMapped) ++ { ++ eError = DestroyServerResource(psImport->hDevConnection, ++ NULL, ++ BridgeDevmemIntUnmapPMR, ++ psDeviceImport->hMapping); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ ++ eError = DestroyServerResource(psImport->hDevConnection, ++ NULL, ++ BridgeDevmemIntUnreserveRange, ++ psDeviceImport->hReservation); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ ++ psDeviceImport->bMapped = IMG_FALSE; ++ psDeviceImport->hMapping = LACK_OF_MAPPING_POISON; ++ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON; ++ ++ /* DEVMEM_HEAP_MANAGER_RA can also come from a dual managed heap in which case, ++ we need to check if the allocated VA falls within RA managed range */ ++ if ((psHeap->ui32HeapManagerFlags & DEVMEM_HEAP_MANAGER_RA) && ++ psDeviceImport->sDevVAddr.uiAddr >= (psHeap->sBaseAddress.uiAddr + psHeap->uiReservedRegionSize) && ++ psDeviceImport->sDevVAddr.uiAddr < (psHeap->sBaseAddress.uiAddr + psHeap->uiSize)) ++ { ++ RA_Free(psHeap->psQuantizedVMRA, psDeviceImport->sDevVAddr.uiAddr); ++ } ++ ++ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags)) ++ { ++ DevmemImportStructDevUnmapSVM(psHeap, psImport); ++ } ++ ++ OSLockRelease(psDeviceImport->hLock); ++ ++ DevmemImportStructRelease(psImport); ++ ++ OSAtomicDecrement(&psHeap->hImportCount); ++ ++ return IMG_TRUE; ++ } ++ else ++ { ++ OSLockRelease(psDeviceImport->hLock); ++ return IMG_FALSE; ++ } ++} ++ ++/* ++ Map an import into the CPU ++ */ ++IMG_INTERNAL ++PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_CPU_IMPORT *psCPUImport; ++ size_t uiMappingLength; ++ ++ psCPUImport = &psImport->sCPUImport; ++ ++ OSLockAcquire(psCPUImport->hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ psCPUImport->ui32RefCount, ++ psCPUImport->ui32RefCount+1); ++ ++ if (psCPUImport->ui32RefCount++ == 0) ++ { ++ DevmemImportStructAcquire(psImport); ++ ++ eError = OSMMapPMR(GetBridgeHandle(psImport->hDevConnection), ++ psImport->hPMR, ++ psImport->uiSize, ++ psImport->uiFlags, ++ &psCPUImport->hOSMMapData, ++ &psCPUImport->pvCPUVAddr, ++ &uiMappingLength); ++ PVR_GOTO_IF_ERROR(eError, failMap); ++ ++ /* MappingLength might be rounded up to page size */ ++ PVR_ASSERT(uiMappingLength >= psImport->uiSize); ++ } ++ OSLockRelease(psCPUImport->hLock); ++ ++ return PVRSRV_OK; ++ ++failMap: ++ psCPUImport->ui32RefCount--; ++ if (!DevmemImportStructRelease(psImport)) ++ { ++ OSLockRelease(psCPUImport->hLock); ++ } ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/* ++ Unmap an import from the CPU ++ */ ++IMG_INTERNAL ++void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport) ++{ ++ DEVMEM_CPU_IMPORT *psCPUImport; ++ ++ psCPUImport = &psImport->sCPUImport; ++ ++ OSLockAcquire(psCPUImport->hLock); ++ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d", ++ __func__, ++ psImport, ++ psCPUImport->ui32RefCount, ++ psCPUImport->ui32RefCount-1); ++ ++ if (--psCPUImport->ui32RefCount == 0) ++ { ++ /* psImport->uiSize is a 64-bit quantity whereas the 5th ++ * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems ++ * hence a compiler warning of implicit cast and loss of data. ++ * Added explicit cast and assert to remove warning. ++ */ ++#if defined(__linux__) && defined(__i386__) ++ PVR_ASSERT(psImport->uiSizehDevConnection), ++ psImport->hPMR, ++ psCPUImport->hOSMMapData, ++ psCPUImport->pvCPUVAddr, ++ (size_t)psImport->uiSize); ++ ++ psCPUImport->hOSMMapData = NULL; ++ psCPUImport->pvCPUVAddr = NULL; ++ ++ OSLockRelease(psCPUImport->hLock); ++ ++ DevmemImportStructRelease(psImport); ++ } ++ else ++ { ++ OSLockRelease(psCPUImport->hLock); ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/devicemem_utils.h b/drivers/gpu/drm/img-rogue/devicemem_utils.h +new file mode 100644 +index 000000000000..3dcef24fa33c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/devicemem_utils.h +@@ -0,0 +1,605 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management internal utility functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used internally by device memory management ++ code. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEVICEMEM_UTILS_H ++#define DEVICEMEM_UTILS_H ++ ++#include "devicemem.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvr_debug.h" ++#include "allocmem.h" ++#include "ra.h" ++#include "osfunc.h" ++#include "lock.h" ++#include "osmmap.h" ++#include "pvrsrv_memallocflags_internal.h" ++ ++#define DEVMEM_HEAPNAME_MAXLENGTH 160 ++ ++/* ++ * Reserved VA space of a heap must always be multiple of DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY, ++ * this check is validated in the DDK. Note this is only reserving "Virtual Address" space and ++ * physical allocations (and mappings thereon) should only be done as much as required (to avoid ++ * wastage). ++ * Granularity has been chosen to support the max possible practically used OS page size. ++ */ ++#define DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY 0x10000 /* 64KB is MAX anticipated OS page size */ ++ ++/* ++ * VA heap size should be at least OS page size. This check is validated in the DDK. ++ */ ++#define DEVMEM_HEAP_MINIMUM_SIZE 0x10000 /* 64KB is MAX anticipated OS page size */ ++ ++#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG) ++#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__) ++#else ++#define DEVMEM_REFCOUNT_PRINT(fmt, ...) ++#endif ++ ++/* If we need a "hMapping" but we don't have a server-side mapping, we poison ++ * the entry with this value so that it's easily recognised in the debugger. ++ * Note that this is potentially a valid handle, but then so is NULL, which is ++ * no better, indeed worse, as it's not obvious in the debugger. The value ++ * doesn't matter. We _never_ use it (and because it's valid, we never assert ++ * it isn't this) but it's nice to have a value in the source code that we can ++ * grep for if things go wrong. ++ */ ++#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead) ++#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead) ++ ++#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF ++ ++struct DEVMEM_CONTEXT_TAG ++{ ++ ++ SHARED_DEV_CONNECTION hDevConnection; ++ ++ /* Number of heaps that have been created in this context ++ * (regardless of whether they have allocations) ++ */ ++ IMG_UINT32 uiNumHeaps; ++ ++ /* Each "DEVMEM_CONTEXT" has a counterpart in the server, which ++ * is responsible for handling the mapping into device MMU. ++ * We have a handle to that here. ++ */ ++ IMG_HANDLE hDevMemServerContext; ++ ++ /* Number of automagically created heaps in this context, ++ * i.e. those that are born at context creation time from the ++ * chosen "heap config" or "blueprint" ++ */ ++ IMG_UINT32 uiAutoHeapCount; ++ ++ /* Pointer to array of such heaps */ ++ struct DEVMEM_HEAP_TAG **ppsAutoHeapArray; ++ ++ /* The cache line size for use when allocating memory, ++ * as it is not queryable on the client side ++ */ ++ IMG_UINT32 ui32CPUCacheLineSize; ++ ++ /* Private data handle for device specific data */ ++ IMG_HANDLE hPrivData; ++}; ++ ++/* Flags that record how a heaps virtual address space is managed. */ ++#define DEVMEM_HEAP_MANAGER_UNKNOWN 0 ++/* Heap VAs assigned by the client of Services APIs, heap's RA not used at all. */ ++#define DEVMEM_HEAP_MANAGER_USER (1U << 0) ++/* Heap VAs managed by the OSs kernel, VA from CPU mapping call used */ ++#define DEVMEM_HEAP_MANAGER_KERNEL (1U << 1) ++/* Heap VAs managed by the heap's own RA */ ++#define DEVMEM_HEAP_MANAGER_RA (1U << 2) ++/* Heap VAs managed jointly by Services and the client of Services. ++ * The reserved region of the heap is managed explicitly by the client of Services ++ * The non-reserved region of the heap is managed by the heap's own RA */ ++#define DEVMEM_HEAP_MANAGER_DUAL_USER_RA (DEVMEM_HEAP_MANAGER_USER | DEVMEM_HEAP_MANAGER_RA) ++ ++struct DEVMEM_HEAP_TAG ++{ ++ /* Name of heap - for debug and lookup purposes. */ ++ IMG_CHAR *pszName; ++ ++ /* Number of live imports in the heap */ ++ ATOMIC_T hImportCount; ++ ++ /* Base address and size of heap, required by clients due to some ++ * requesters not being full range ++ */ ++ IMG_DEV_VIRTADDR sBaseAddress; ++ DEVMEM_SIZE_T uiSize; ++ ++ DEVMEM_SIZE_T uiReservedRegionSize; /* uiReservedRegionLength in DEVMEM_HEAP_BLUEPRINT */ ++ ++ /* The heap manager, describing if the space is managed by the user, an RA, ++ * kernel or combination */ ++ IMG_UINT32 ui32HeapManagerFlags; ++ ++ /* This RA is for managing sub-allocations within the imports (PMRs) ++ * within the heap's virtual space. RA only used in DevmemSubAllocate() ++ * to track sub-allocated buffers. ++ * ++ * Resource Span - a PMR import added when the RA calls the ++ * imp_alloc CB (SubAllocImportAlloc) which returns the ++ * PMR import and size (span length). ++ * Resource - an allocation/buffer i.e. a MemDesc. Resource size represents ++ * the size of the sub-allocation. ++ */ ++ RA_ARENA *psSubAllocRA; ++ IMG_CHAR *pszSubAllocRAName; ++ ++ /* The psQuantizedVMRA is for the coarse allocation (PMRs) of virtual ++ * space from the heap. ++ * ++ * Resource Span - the heap's VM space from base to base+length, ++ * only one is added at heap creation. ++ * Resource - a PMR import associated with the heap. Dynamic number ++ * as memory is allocated/freed from or mapped/unmapped to ++ * the heap. Resource size follows PMR logical size. ++ */ ++ RA_ARENA *psQuantizedVMRA; ++ IMG_CHAR *pszQuantizedVMRAName; ++ ++ /* We also need to store a copy of the quantum size in order to feed ++ * this down to the server. ++ */ ++ IMG_UINT32 uiLog2Quantum; ++ ++ /* Store a copy of the minimum import alignment */ ++ IMG_UINT32 uiLog2ImportAlignment; ++ ++ /* The parent memory context for this heap */ ++ struct DEVMEM_CONTEXT_TAG *psCtx; ++ ++ /* Lock to protect this structure */ ++ POS_LOCK hLock; ++ ++ /* Each "DEVMEM_HEAP" has a counterpart in the server, which is ++ * responsible for handling the mapping into device MMU. ++ * We have a handle to that here. ++ */ ++ IMG_HANDLE hDevMemServerHeap; ++ ++ /* This heap is fully allocated and premapped into the device address space. ++ * Used in virtualisation for firmware heaps of Guest and optionally Host drivers. */ ++ IMG_BOOL bPremapped; ++}; ++ ++typedef IMG_UINT32 DEVMEM_PROPERTIES_T; /*!< Typedef for Devicemem properties */ ++#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */ ++#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */ ++#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */ ++#define DEVMEM_PROPERTIES_UNPINNED (1UL<<3) /*!< Is it currently pinned? */ ++#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */ ++#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */ ++#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */ ++#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7) /*!< Is the memory fully poisoned? */ ++#define DEVMEM_PROPERTIES_NO_CPU_MAPPING (1UL<<8) /* No CPU Mapping is allowed, RW attributes ++ are further derived from allocation memory flags */ ++#define DEVMEM_PROPERTIES_NO_LAYOUT_CHANGE (1UL<<9) /* No sparse resizing allowed, once a memory ++ layout is chosen, no change allowed later, ++ This includes pinning and unpinning */ ++ ++ ++typedef struct DEVMEM_DEVICE_IMPORT_TAG ++{ ++ DEVMEM_HEAP *psHeap; /*!< Heap this import is bound to */ ++ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the import */ ++ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ ++ IMG_HANDLE hReservation; /*!< Device memory reservation handle */ ++ IMG_HANDLE hMapping; /*!< Device mapping handle */ ++ IMG_BOOL bMapped; /*!< This is import mapped? */ ++ POS_LOCK hLock; /*!< Lock to protect the device import */ ++} DEVMEM_DEVICE_IMPORT; ++ ++typedef struct DEVMEM_CPU_IMPORT_TAG ++{ ++ void *pvCPUVAddr; /*!< CPU virtual address of the import */ ++ IMG_UINT32 ui32RefCount; /*!< Refcount of the CPU virtual address */ ++ IMG_HANDLE hOSMMapData; /*!< CPU mapping handle */ ++ POS_LOCK hLock; /*!< Lock to protect the CPU import */ ++} DEVMEM_CPU_IMPORT; ++ ++typedef struct DEVMEM_IMPORT_TAG ++{ ++ SHARED_DEV_CONNECTION hDevConnection; ++ IMG_DEVMEM_ALIGN_T uiAlign; /*!< Alignment of the PMR */ ++ DEVMEM_SIZE_T uiSize; /*!< Size of import */ ++ ATOMIC_T hRefCount; /*!< Refcount for this import */ ++ DEVMEM_PROPERTIES_T uiProperties; /*!< Stores properties of an import like if ++ it is exportable, pinned or suballocatable */ ++ IMG_HANDLE hPMR; /*!< Handle to the PMR */ ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ ++ POS_LOCK hLock; /*!< Lock to protect the import */ ++ ++ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */ ++ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the import */ ++} DEVMEM_IMPORT; ++ ++typedef struct DEVMEM_DEVICE_MEMDESC_TAG ++{ ++ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the allocation */ ++ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */ ++ POS_LOCK hLock; /*!< Lock to protect device memdesc */ ++} DEVMEM_DEVICE_MEMDESC; ++ ++typedef struct DEVMEM_CPU_MEMDESC_TAG ++{ ++ void *pvCPUVAddr; /*!< CPU virtual address of the import */ ++ IMG_UINT32 ui32RefCount; /*!< Refcount of the device CPU address */ ++ POS_LOCK hLock; /*!< Lock to protect CPU memdesc */ ++} DEVMEM_CPU_MEMDESC; ++ ++struct DEVMEM_MEMDESC_TAG ++{ ++ DEVMEM_IMPORT *psImport; /*!< Import this memdesc is on */ ++ IMG_DEVMEM_OFFSET_T uiOffset; /*!< Offset into import where our allocation starts */ ++ IMG_DEVMEM_SIZE_T uiAllocSize; /*!< Size of the allocation */ ++ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ ++ POS_LOCK hLock; /*!< Lock to protect memdesc */ ++ IMG_HANDLE hPrivData; ++ ++ DEVMEM_DEVICE_MEMDESC sDeviceMemDesc; /*!< Device specifics of the memdesc */ ++ DEVMEM_CPU_MEMDESC sCPUMemDesc; /*!< CPU specifics of the memdesc */ ++ ++ IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */ ++ ++ IMG_UINT32 ui32AllocationIndex; ++ ++#if defined(DEBUG) ++ IMG_BOOL bPoisonOnFree; ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ IMG_HANDLE hRIHandle; /*!< Handle to RI information */ ++#endif ++}; ++ ++/* The physical descriptor used to store handles and information of device ++ * physical allocations. ++ */ ++struct DEVMEMX_PHYS_MEMDESC_TAG ++{ ++ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ ++ IMG_UINT32 uiLog2PageSize; /*!< Page size */ ++ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */ ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ ++ IMG_HANDLE hPMR; /*!< Handle to the PMR */ ++ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */ ++ DEVMEM_BRIDGE_HANDLE hBridge; /*!< Bridge connection for the server */ ++ void *pvUserData; /*!< User data */ ++}; ++ ++/* The virtual descriptor used to store handles and information of a device ++ * virtual range and the mappings to it. ++ */ ++struct DEVMEMX_VIRT_MEMDESC_TAG ++{ ++ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/ ++ PVRSRV_MEMALLOCFLAGS_T uiFlags; /*!< Flags for this import */ ++ DEVMEMX_PHYSDESC **apsPhysDescTable; /*!< Table to store links to physical descs */ ++ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the memdesc */ ++ ++ IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */ ++ IMG_UINT32 ui32AllocationIndex; /*!< To track mappings in this range */ ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ IMG_HANDLE hRIHandle; /*!< Handle to RI information */ ++#endif ++}; ++ ++#define DEVICEMEM_UTILS_NO_ADDRESS 0 ++ ++/****************************************************************************** ++@Function DevmemValidateParams ++@Description Check if flags are conflicting and if align is a size multiple. ++ ++@Input uiSize Size of the import. ++@Input uiAlign Alignment of the import. ++@Input puiFlags Pointer to the flags for the import. ++@return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T *puiFlags); ++ ++/****************************************************************************** ++@Function DevmemImportStructAlloc ++@Description Allocates memory for an import struct. Does not allocate a PMR! ++ Create locks for CPU and Devmem mappings. ++ ++@Input hDevConnection Connection to use for calls from the import. ++@Input ppsImport The import to allocate. ++@return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection, ++ DEVMEM_IMPORT **ppsImport); ++ ++/****************************************************************************** ++@Function DevmemImportStructInit ++@Description Initialises the import struct with the given parameters. ++ Set it's refcount to 1! ++ ++@Input psImport The import to initialise. ++@Input uiSize Size of the import. ++@Input uiAlign Alignment of allocations in the import. ++@Input uiMapFlags ++@Input hPMR Reference to the PMR of this import struct. ++@Input uiProperties Properties of the import. Is it exportable, ++ imported, suballocatable, unpinned? ++******************************************************************************/ ++void DevmemImportStructInit(DEVMEM_IMPORT *psImport, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ IMG_HANDLE hPMR, ++ DEVMEM_PROPERTIES_T uiProperties); ++ ++/****************************************************************************** ++@Function DevmemImportStructDevMap ++@Description NEVER call after the last DevmemMemDescRelease() ++ Maps the PMR referenced by the import struct to the device's ++ virtual address space. ++ Does nothing but increase the cpu mapping refcount if the ++ import struct was already mapped. ++ ++@Input psHeap The heap to map to. ++@Input bMap Caller can choose if the import should be really ++ mapped in the page tables or if just a virtual range ++ should be reserved and the refcounts increased. ++@Input psImport The import we want to map. ++@Input uiOptionalMapAddress An optional address to map to. ++ Pass DEVICEMEM_UTILS_NOADDRESS if not used. ++@return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR DevmemImportStructDevMap(DEVMEM_HEAP *psHeap, ++ IMG_BOOL bMap, ++ DEVMEM_IMPORT *psImport, ++ IMG_UINT64 uiOptionalMapAddress); ++ ++/****************************************************************************** ++@Function DevmemImportStructDevUnmap ++@Description Unmaps the PMR referenced by the import struct from the ++ device's virtual address space. ++ If this was not the last remaining CPU mapping on the import ++ struct only the cpu mapping refcount is decreased. ++@return A boolean to signify if the import was unmapped. ++******************************************************************************/ ++IMG_BOOL DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport); ++ ++/****************************************************************************** ++@Function DevmemImportStructCPUMap ++@Description NEVER call after the last DevmemMemDescRelease() ++ Maps the PMR referenced by the import struct to the CPU's ++ virtual address space. ++ Does nothing but increase the cpu mapping refcount if the ++ import struct was already mapped. ++@return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport); ++ ++/****************************************************************************** ++@Function DevmemImportStructCPUUnmap ++@Description Unmaps the PMR referenced by the import struct from the CPU's ++ virtual address space. ++ If this was not the last remaining CPU mapping on the import ++ struct only the cpu mapping refcount is decreased. ++******************************************************************************/ ++void DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport); ++ ++ ++/****************************************************************************** ++@Function DevmemImportStructAcquire ++@Description Acquire an import struct by increasing it's refcount. ++******************************************************************************/ ++void DevmemImportStructAcquire(DEVMEM_IMPORT *psImport); ++ ++/****************************************************************************** ++@Function DevmemImportStructRelease ++@Description Reduces the refcount of the import struct. ++ Destroys the import in the case it was the last reference. ++ Destroys underlying PMR if this import was the last reference ++ to it. ++@return A boolean to signal if the import was destroyed. True = yes. ++******************************************************************************/ ++IMG_BOOL DevmemImportStructRelease(DEVMEM_IMPORT *psImport); ++ ++/****************************************************************************** ++@Function DevmemImportDiscard ++@Description Discard a created, but uninitialised import structure. ++ This must only be called before DevmemImportStructInit ++ after which DevmemImportStructRelease must be used to ++ "free" the import structure. ++******************************************************************************/ ++void DevmemImportDiscard(DEVMEM_IMPORT *psImport); ++ ++/****************************************************************************** ++@Function DevmemMemDescAlloc ++@Description Allocates a MemDesc and create it's various locks. ++ Zero the allocated memory. ++@return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc); ++ ++#if defined(DEBUG) ++/****************************************************************************** ++@Function DevmemMemDescSetPoF ++@Description Sets the Poison on Free flag to true for this MemDesc if the ++ given MemAllocFlags have the Poison on Free bit set. ++ Poison on Free is a debug only feature. ++******************************************************************************/ ++void DevmemMemDescSetPoF(DEVMEM_MEMDESC *psMemDesc, PVRSRV_MEMALLOCFLAGS_T uiFlags); ++#endif ++ ++/****************************************************************************** ++@Function DevmemMemDescInit ++@Description Sets the given offset and import struct fields in the MemDesc. ++ Initialises refcount to 1 and other values to 0. ++ ++@Input psMemDesc MemDesc to initialise. ++@Input uiOffset Offset in the import structure. ++@Input psImport Import the MemDesc is on. ++@Input uiAllocSize Size of the allocation ++******************************************************************************/ ++void DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ DEVMEM_IMPORT *psImport, ++ IMG_DEVMEM_SIZE_T uiAllocSize); ++ ++/****************************************************************************** ++@Function DevmemMemDescAcquire ++@Description Acquires the MemDesc by increasing it's refcount. ++******************************************************************************/ ++void DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc); ++ ++/****************************************************************************** ++@Function DevmemMemDescRelease ++@Description Releases the MemDesc by reducing it's refcount. ++ Destroy the MemDesc if it's recount is 0. ++ Destroy the import struct the MemDesc is on if that was the ++ last MemDesc on the import, probably following the destruction ++ of the underlying PMR. ++@return A boolean to signal if the MemDesc was destroyed. True = yes. ++******************************************************************************/ ++IMG_BOOL DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc); ++ ++/****************************************************************************** ++@Function DevmemMemDescDiscard ++@Description Discard a created, but uninitialised MemDesc structure. ++ This must only be called before DevmemMemDescInit after ++ which DevmemMemDescRelease must be used to "free" the ++ MemDesc structure. ++******************************************************************************/ ++void DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc); ++ ++ ++/****************************************************************************** ++@Function GetImportProperties ++@Description Atomically read psImport->uiProperties ++ It's possible that another thread modifies uiProperties ++ immediately after this function returns, making its result ++ stale. So, it's recommended to use this function only to ++ check if certain non-volatile flags were set. ++******************************************************************************/ ++static INLINE DEVMEM_PROPERTIES_T GetImportProperties(DEVMEM_IMPORT *psImport) ++{ ++ DEVMEM_PROPERTIES_T uiProperties; ++ ++ OSLockAcquire(psImport->hLock); ++ uiProperties = psImport->uiProperties; ++ OSLockRelease(psImport->hLock); ++ return uiProperties; ++} ++ ++/****************************************************************************** ++@Function DevmemCPUMemSet ++@Description Given a CPU Mapped Devmem address, set the memory at that ++ range (address, address + size) to the uiPattern provided. ++ Flags determine the OS abstracted MemSet method to use. ++******************************************************************************/ ++static INLINE void DevmemCPUMemSet(void *pvMem, ++ IMG_UINT8 uiPattern, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags) ++{ ++ if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags)) ++ { ++ OSDeviceMemSet(pvMem, uiPattern, uiSize); ++ } ++ else ++ { ++ /* it's safe to use OSCachedMemSet() for cached and wc memory */ ++ OSCachedMemSet(pvMem, uiPattern, uiSize); ++ } ++} ++ ++/****************************************************************************** ++@Function DevmemCPUMapCheckImportProperties ++@Description Given a MemDesc check that the import properties are correct ++ to allow for mapping the MemDesc to the CPU. ++ Returns PVRSRV_OK on success. ++******************************************************************************/ ++static INLINE PVRSRV_ERROR DevmemCPUMapCheckImportProperties(DEVMEM_MEMDESC *psMemDesc) ++{ ++ DEVMEM_PROPERTIES_T uiProperties = GetImportProperties(psMemDesc->psImport); ++ ++ if (uiProperties & ++ (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE)) ++ { ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ if (uiProperties & DEVMEM_PROPERTIES_SECURE) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Allocation is a secure buffer. " ++ "It should not be possible to map to CPU, but for security " ++ "validation this will be allowed for testing purposes, " ++ "as long as the buffer is pinned.", ++ __func__)); ++ } ++ ++ if (uiProperties & DEVMEM_PROPERTIES_UNPINNED) ++#endif ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Allocation is currently unpinned or a secure buffer. " ++ "Not possible to map to CPU!", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_MAP_REQUEST; ++ } ++ } ++ ++ if (uiProperties & DEVMEM_PROPERTIES_NO_CPU_MAPPING) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: CPU Mapping is not possible on this allocation!", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_MAP_REQUEST; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++#endif /* DEVICEMEM_UTILS_H */ +diff --git a/drivers/gpu/drm/img-rogue/di_common.h b/drivers/gpu/drm/img-rogue/di_common.h +new file mode 100644 +index 000000000000..a10178708a75 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/di_common.h +@@ -0,0 +1,236 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common types for Debug Info framework. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DI_COMMON_H ++#define DI_COMMON_H ++ ++#include "img_types.h" ++ ++/* Token that signals that a header should be printed. */ ++#define DI_START_TOKEN ((void *) 1) ++ ++/* This is a public handle to an entry. */ ++#ifndef DI_GROUP_DEFINED ++#define DI_GROUP_DEFINED ++typedef struct DI_GROUP DI_GROUP; ++#endif ++#ifndef DI_ENTRY_DEFINED ++#define DI_ENTRY_DEFINED ++typedef struct DI_ENTRY DI_ENTRY; ++#endif ++typedef struct OSDI_IMPL_ENTRY OSDI_IMPL_ENTRY; ++ ++/*! Debug Info entries types. */ ++typedef enum DI_ENTRY_TYPE ++{ ++ DI_ENTRY_TYPE_GENERIC, /*!< generic entry type, implements ++ start/stop/next/show iterator ++ interface */ ++ DI_ENTRY_TYPE_RANDOM_ACCESS, /*!< random access entry, implements ++ seek/read iterator interface */ ++} DI_ENTRY_TYPE; ++ ++/*! @Function DI_PFN_START ++ * ++ * @Description ++ * Start operation returns first entry and passes it to Show operation. ++ * ++ * @Input psEntry pointer to the implementation entry ++ * @InOut pui64Pos current data position in the entry ++ * ++ * @Return pointer to data that will be passed to the other iterator ++ * functions in pvData argument ++ */ ++typedef void *(*DI_PFN_START)(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos); ++ ++/*! @Function DI_PFN_STOP ++ * ++ * @Description ++ * Stop operations is called after iterator reaches end of data. ++ * ++ * If pvData was allocated in pfnStart it should be freed here. ++ * ++ * @Input psEntry pointer to the implementation entry ++ * @Input pvData pointer to data returned from pfnStart/pfnNext ++ */ ++typedef void (*DI_PFN_STOP)(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++ ++/*! @Function DI_PFN_NEXT ++ * ++ * @Description ++ * Next returns next data entry and passes it to Show operation. ++ * ++ * @Input psEntry pointer to the implementation entry ++ * @Input pvData pointer to data returned from pfnStart/pfnNext ++ * @InOut pui64Pos current data position in the entry ++ */ ++typedef void *(*DI_PFN_NEXT)(OSDI_IMPL_ENTRY *psEntry, void *pvData, ++ IMG_UINT64 *pui64Pos); ++ ++/*! @Function DI_PFN_SHOW ++ * ++ * @Description ++ * Outputs the data element. ++ * ++ * @Input psEntry pointer to the implementation entry ++ * @Input pvData pointer to data returned from pfnStart/pfnNext ++ */ ++typedef int (*DI_PFN_SHOW)(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++ ++/*! @Function DI_PFN_SEEK ++ * ++ * @Description ++ * Changes position of the entry data pointer ++ * ++ * @Input uiOffset new entry offset (absolute) ++ * @Input pvData private data provided during entry creation ++ */ ++typedef IMG_INT64 (*DI_PFN_SEEK)(IMG_UINT64 ui64Offset, void *pvData); ++ ++/*! @Function DI_PFN_READ ++ * ++ * @Description ++ * Retrieves data from the entry from position previously set by Seek. ++ * ++ * @Input pszBuffer output buffer ++ * @Input ui64Count length of the output buffer ++ * @InOut pui64Pos pointer to the current position in the entry ++ * @Input pvData private data provided during entry creation ++ */ ++typedef IMG_INT64 (*DI_PFN_READ)(IMG_CHAR *pszBuffer, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData); ++ ++/*! @Function DI_PFN_WRITE ++ * ++ * @Description ++ * Handle writes operation to the entry. ++ * ++ * @Input pszBuffer NUL-terminated buffer containing written data ++ * @Input ui64Count length of the data in pszBuffer (length of the buffer) ++ * @InOut pui64Pos pointer to the current position in the entry ++ * @Input pvData private data provided during entry creation ++ */ ++typedef IMG_INT64 (*DI_PFN_WRITE)(const IMG_CHAR *pszBuffer, ++ IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, ++ void *pvData); ++ ++/*! Debug info entry iterator. ++ * ++ * This covers all entry types: GENERIC and RANDOM_ACCESS. ++ * ++ * The GENERIC entry type ++ * ++ * The GENERIC type should implement either a full set of following callbacks: ++ * pfnStart, pfnStop, pfnNext and pfnShow, or pfnShow only. If only pfnShow ++ * callback is given the framework will use default handlers in place of the ++ * other ones. ++ * ++ * e.g. for generic entry: ++ * ++ * struct sIter = { ++ * .pfnStart = StartCb, .pfnStop = StopCb, pfnNext = NextCb, ++ * .pfnShow = ShowCb ++ * }; ++ * ++ * The use case for implementing pfnShow only is if the data for the given ++ * entry is short and can be printed in one go because the pfnShow callback ++ * will be called only once. ++ * ++ * e.g. for one-shot print generic entry: ++ * ++ * struct sIter = { ++ * .pfnShow = SingleShowCb ++ * }; ++ * ++ * The DICreateEntry() function will return error if DI_ENTRY_TYPE_GENERIC ++ * type is used and invalid combination of callbacks is given. ++ * ++ * The RANDOM_ACCESS entry ++ * ++ * The RANDOM_ACCESS type should implement either both pfnSeek and pfnRead ++ * or pfnRead only callbacks. ++ * ++ * e.g. of seekable and readable random access entry: ++ * ++ * struct sIter = { ++ * .pfnSeek = SeekCb, .pfnRead = ReadCb ++ * }; ++ * ++ * The DICreateEntry() function will return error if DI_ENTRY_TYPE_RANDOM_ACCESS ++ * type is used and invalid combination of callbacks is given. ++ * ++ * Writing to file (optional) ++ * ++ * The iterator allows also to pass a pfnWrite callback that allows implementing ++ * write operation on the entry. The write operation is entry type agnostic ++ * which means that it can be defined for both GENERIC and RANDOM_ACCESS ++ * entries. ++ * ++ * e.g. for writable one-shot print generic entry ++ * ++ * struct sIter = { ++ * .pfnShow = SingleShowCb, .pfnWrite = WriteCb ++ * }; ++ */ ++typedef struct DI_ITERATOR_CB ++{ ++ /* Generic entry interface. */ ++ ++ DI_PFN_START pfnStart; /*!< Starts iteration and returns first element ++ of entry's data. */ ++ DI_PFN_STOP pfnStop; /*!< Stops iteration. */ ++ DI_PFN_NEXT pfnNext; /*!< Returns next element of entry's data. */ ++ DI_PFN_SHOW pfnShow; /*!< Shows current data element of an entry. */ ++ ++ /* Optional random access entry interface. */ ++ ++ DI_PFN_SEEK pfnSeek; /*!< Sets data pointer in an entry. */ ++ DI_PFN_READ pfnRead; /*!< Reads data from an entry. */ ++ ++ /* Optional writing to entry interface. Null terminated. */ ++ ++ DI_PFN_WRITE pfnWrite; /*!< Performs write operation on an entry. */ ++ IMG_UINT32 ui32WriteLenMax; /*!< Maximum char length of entry ++ accepted for write. Includes \0 */ ++} DI_ITERATOR_CB; ++ ++#endif /* DI_COMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/di_impl_brg.c b/drivers/gpu/drm/img-rogue/di_impl_brg.c +new file mode 100644 +index 000000000000..5670af07f6d4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/di_impl_brg.c +@@ -0,0 +1,889 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS agnostic implementation of Debug Info interface. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements osdi_impl.h API to provide access to driver's ++ debug data via pvrdebug. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "allocmem.h" ++#include "hash.h" ++#include "img_defs.h" ++#include "img_types.h" ++#include "lock.h" ++#include "osfunc_common.h" ++#include "osfunc.h" /* for thread */ ++#include "tlstream.h" ++#include "dllist.h" ++ ++#include "osdi_impl.h" ++#include "di_impl_brg.h" ++#include "di_impl_brg_intern.h" ++#include "pvr_dicommon.h" ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++#include "pvrsrv.h" ++#endif ++ ++#define ENTRIES_TABLE_INIT_SIZE 64 ++#define STREAM_BUFFER_SIZE 0x4000 /* 16KB */ ++#define STREAM_LINE_LENGTH 512 ++ ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++#define WRITER_THREAD_SLEEP_TIMEOUT 0ull ++#else ++#define WRITER_THREAD_SLEEP_TIMEOUT 28800000000ull ++#endif ++#define WRITER_THREAD_DESTROY_TIMEOUT 100000ull ++#define WRITER_THREAD_DESTROY_RETRIES 10u ++ ++#define WRITE_RETRY_COUNT 10 /* retry a write to a TL buffer 10 times */ ++#define WRITE_RETRY_WAIT_TIME 100 /* wait 10ms between write retries */ ++ ++typedef enum THREAD_STATE ++{ ++ THREAD_STATE_NULL, ++ THREAD_STATE_ALIVE, ++ THREAD_STATE_TERMINATED, ++} THREAD_STATE; ++ ++static struct DIIB_IMPL ++{ ++ HASH_TABLE *psEntriesTable; /*!< Table of entries. */ ++ POS_LOCK psEntriesLock; /*!< Protects psEntriesTable. */ ++ IMG_HANDLE hWriterThread; ++ IMG_HANDLE hWriterEventObject; ++ ATOMIC_T eThreadState; ++ ++ DLLIST_NODE sWriterQueue; ++ POS_LOCK psWriterLock; /*!< Protects sWriterQueue. */ ++} *_g_psImpl; ++ ++struct DIIB_GROUP ++{ ++ const IMG_CHAR *pszName; ++ struct DIIB_GROUP *psParentGroup; ++}; ++ ++struct DIIB_ENTRY ++{ ++ struct DIIB_GROUP *psParentGroup; ++ OSDI_IMPL_ENTRY sImplEntry; ++ DI_ITERATOR_CB sIterCb; ++ DI_ENTRY_TYPE eType; ++ IMG_CHAR pszFullPath[DI_IMPL_BRG_PATH_LEN]; ++ void *pvPrivData; ++ ++ POS_LOCK hLock; /*!< Protects access to entry's iterator. */ ++}; ++ ++struct DI_CONTEXT_TAG ++{ ++ IMG_HANDLE hStream; ++ ATOMIC_T iRefCnt; ++ IMG_BOOL bClientConnected; /*!< Indicated that the client is or is not ++ connected to the DI. */ ++}; ++ ++struct DIIB_WORK_ITEM ++{ ++ DI_CONTEXT *psContext; ++ DIIB_ENTRY *psEntry; ++ IMG_UINT64 ui64Size; ++ IMG_UINT64 ui64Offset; ++ ++ DLLIST_NODE sQueueElement; ++}; ++ ++/* Declaring function here to avoid dependencies that are introduced by ++ * including osfunc.h. */ ++IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, ++ size_t uiSize); ++ ++/* djb2 hash function is public domain */ ++static IMG_UINT32 _Hash(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) ++{ ++ IMG_CHAR *pszStr = pKey; ++ IMG_UINT32 ui32Hash = 5381, ui32Char; ++ ++ PVR_UNREFERENCED_PARAMETER(uKeySize); ++ PVR_UNREFERENCED_PARAMETER(uHashTabLen); ++ ++ while ((ui32Char = *pszStr++) != '\0') ++ { ++ ui32Hash = ((ui32Hash << 5) + ui32Hash) + ui32Char; /* hash * 33 + c */ ++ } ++ ++ return ui32Hash; ++} ++ ++static IMG_BOOL _Compare(size_t uKeySize, void *pKey1, void *pKey2) ++{ ++ IMG_CHAR *pszKey1 = pKey1, *pszKey2 = pKey2; ++ ++ return OSStringNCompare(pszKey1, pszKey2, uKeySize) == 0; ++} ++ ++/* ----- native callbacks interface ----------------------------------------- */ ++ ++static void _WriteWithRetires(void *pvNativeHandle, const IMG_CHAR *pszStr, ++ IMG_UINT uiLen) ++{ ++ PVRSRV_ERROR eError; ++ IMG_INT iRetry = 0; ++ IMG_UINT32 ui32Flags = TL_FLAG_NO_WRITE_FAILED; ++ ++ do ++ { ++ /* Try to write to the buffer but don't inject MOST_RECENT_WRITE_FAILED ++ * packet in case of failure because we're going to retry. */ ++ eError = TLStreamWriteRetFlags(pvNativeHandle, (IMG_UINT8 *) pszStr, ++ uiLen, &ui32Flags); ++ if (eError == PVRSRV_ERROR_STREAM_FULL) ++ { ++ // wait to give the client a change to read ++ OSSleepms(WRITE_RETRY_WAIT_TIME); ++ } ++ } ++ while (eError == PVRSRV_ERROR_STREAM_FULL && iRetry++ < WRITE_RETRY_COUNT); ++ ++ /* One last try to write to the buffer. In this case upon failure ++ * a MOST_RECENT_WRITE_FAILED packet will be inject to the buffer to ++ * indicate data loss. */ ++ if (eError == PVRSRV_ERROR_STREAM_FULL) ++ { ++ eError = TLStreamWrite(pvNativeHandle, (IMG_UINT8 *) pszStr, uiLen); ++ } ++ ++ PVR_LOG_IF_ERROR(eError, "TLStreamWrite"); ++} ++ ++static void _WriteData(void *pvNativeHandle, const void *pvData, ++ IMG_UINT32 uiSize) ++{ ++ _WriteWithRetires(pvNativeHandle, pvData, uiSize); ++} ++ ++__printf(2, 0) ++static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, ++ va_list pArgs) ++{ ++ IMG_CHAR pcBuffer[STREAM_LINE_LENGTH]; ++ IMG_UINT uiLen = OSVSNPrintf(pcBuffer, sizeof(pcBuffer) - 1, pszFmt, pArgs); ++ pcBuffer[uiLen] = '\0'; ++ ++ _WriteWithRetires(pvNativeHandle, pcBuffer, uiLen + 1); ++} ++ ++static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) ++{ ++ _WriteWithRetires(pvNativeHandle, pszStr, OSStringLength(pszStr) + 1); ++} ++ ++static IMG_BOOL _HasOverflowed(void *pvNativeHandle) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvNativeHandle); ++ return IMG_FALSE; ++} ++ ++static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { ++ .pfnWrite = _WriteData, ++ .pfnVPrintf = _VPrintf, ++ .pfnPuts = _Puts, ++ .pfnHasOverflowed = _HasOverflowed, ++}; ++ ++/* ----- entry operations --------------------------------------------------- */ ++ ++static PVRSRV_ERROR _ContextUnrefAndMaybeDestroy(DI_CONTEXT *psContext) ++{ ++ if (OSAtomicDecrement(&psContext->iRefCnt) == 0) ++ { ++ TLStreamClose(psContext->hStream); ++ OSFreeMem(psContext); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static IMG_INT64 _ReadGeneric(const DI_CONTEXT *psContext, DIIB_ENTRY *psEntry) ++{ ++ IMG_INT64 iRet = 0; ++ IMG_UINT64 ui64Pos = 0; ++ DI_ITERATOR_CB *psIter = &psEntry->sIterCb; ++ OSDI_IMPL_ENTRY *psImplEntry = &psEntry->sImplEntry; ++ PVRSRV_ERROR eError; ++ ++ if (psIter->pfnStart != NULL) ++ { ++ /* this is a full sequence of the operation */ ++ void *pvData = psIter->pfnStart(psImplEntry, &ui64Pos); ++ ++ while (pvData != NULL && psContext->bClientConnected) ++ { ++ iRet = psIter->pfnShow(psImplEntry, pvData); ++ if (iRet < 0) ++ { ++ break; ++ } ++ ++ pvData = psIter->pfnNext(psImplEntry, pvData, &ui64Pos); ++ } ++ ++ psIter->pfnStop(psImplEntry, pvData); ++ } ++ else if (psIter->pfnShow != NULL) ++ { ++ /* this is a simplified sequence of the operation */ ++ iRet = psIter->pfnShow(psImplEntry, NULL); ++ } ++ ++ eError = TLStreamMarkEOS(psImplEntry->pvNative, IMG_FALSE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_); ++ ++ return iRet; ++ ++return_error_: ++ return -1; ++} ++ ++static IMG_INT64 _ReadRndAccess(DIIB_ENTRY *psEntry, IMG_UINT64 ui64Count, ++ IMG_UINT64 *pui64Pos, void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT8 *pui8Buffer; ++ IMG_HANDLE hStream = psEntry->sImplEntry.pvNative; ++ ++ if (psEntry->sIterCb.pfnRead == NULL) ++ { ++ return -1; ++ } ++ ++ eError = TLStreamReserve(hStream, &pui8Buffer, ui64Count); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamReserve", return_error_); ++ ++ psEntry->sIterCb.pfnRead((IMG_CHAR *) pui8Buffer, ui64Count, pui64Pos, ++ pvData); ++ ++ eError = TLStreamCommit(hStream, ui64Count); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCommit", return_error_); ++ ++ eError = TLStreamMarkEOS(psEntry->sImplEntry.pvNative, IMG_FALSE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamMarkEOS", return_error_); ++ ++ return 0; ++ ++return_error_: ++ return -1; ++} ++ ++static void _WriterThread(void *pvArg) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hEvent; ++ DLLIST_NODE *psNode; ++ ++ eError = OSEventObjectOpen(_g_psImpl->hWriterEventObject, &hEvent); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); ++ ++#ifdef PVRSRV_FORCE_UNLOAD_IF_BAD_STATE ++ while (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK && ++ OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE) ++#else ++ while (OSAtomicRead(&_g_psImpl->eThreadState) == THREAD_STATE_ALIVE) ++#endif ++ { ++ struct DIIB_WORK_ITEM *psItem = NULL; ++ ++ OSLockAcquire(_g_psImpl->psWriterLock); ++ /* Get element from list tail so that we always get the oldest element ++ * (elements are added to head). */ ++ while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL) ++ { ++ IMG_INT64 i64Ret; ++ DIIB_ENTRY *psEntry; ++ OSDI_IMPL_ENTRY *psImplEntry; ++ ++ dllist_remove_node(psNode); ++ OSLockRelease(_g_psImpl->psWriterLock); ++ ++ psItem = IMG_CONTAINER_OF(psNode, struct DIIB_WORK_ITEM, ++ sQueueElement); ++ ++ psEntry = psItem->psEntry; ++ psImplEntry = &psItem->psEntry->sImplEntry; ++ ++ /* if client has already disconnected we can just drop this item */ ++ if (psItem->psContext->bClientConnected) ++ { ++ ++ PVR_ASSERT(psItem->psContext->hStream != NULL); ++ ++ psImplEntry->pvNative = psItem->psContext->hStream; ++ ++ if (psEntry->eType == DI_ENTRY_TYPE_GENERIC) ++ { ++ i64Ret = _ReadGeneric(psItem->psContext, psEntry); ++ PVR_LOG_IF_FALSE(i64Ret >= 0, "generic access read operation " ++ "failed"); ++ } ++ else if (psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) ++ { ++ IMG_UINT64 ui64Pos = psItem->ui64Offset; ++ ++ i64Ret = _ReadRndAccess(psEntry, psItem->ui64Size, &ui64Pos, ++ psEntry->pvPrivData); ++ PVR_LOG_IF_FALSE(i64Ret >= 0, "random access read operation " ++ "failed"); ++ } ++ else ++ { ++ PVR_ASSERT(psEntry->eType == DI_ENTRY_TYPE_GENERIC || ++ psEntry->eType == DI_ENTRY_TYPE_RANDOM_ACCESS); ++ } ++ ++ psImplEntry->pvNative = NULL; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "client reading entry \"%s\" has " ++ "disconnected", psEntry->pszFullPath)); ++ } ++ ++ _ContextUnrefAndMaybeDestroy(psItem->psContext); ++ OSFreeMemNoStats(psItem); ++ ++ OSLockAcquire(_g_psImpl->psWriterLock); ++ } ++ OSLockRelease(_g_psImpl->psWriterLock); ++ ++ eError = OSEventObjectWaitKernel(hEvent, WRITER_THREAD_SLEEP_TIMEOUT); ++ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_TIMEOUT) ++ { ++ PVR_LOG_ERROR(eError, "OSEventObjectWaitKernel"); ++ } ++ } ++ ++ OSLockAcquire(_g_psImpl->psWriterLock); ++ /* clear the queue if there are any items pending */ ++ while ((psNode = dllist_get_prev_node(&_g_psImpl->sWriterQueue)) != NULL) ++ { ++ struct DIIB_WORK_ITEM *psItem = IMG_CONTAINER_OF(psNode, ++ struct DIIB_WORK_ITEM, ++ sQueueElement); ++ ++ dllist_remove_node(psNode); ++ _ContextUnrefAndMaybeDestroy(psItem->psContext); ++ OSFreeMem(psItem); ++ } ++ OSLockRelease(_g_psImpl->psWriterLock); ++ ++ eError = OSEventObjectClose(hEvent); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); ++ ++ OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED); ++} ++ ++/* ----- DI internal API ---------------------------------------------------- */ ++ ++DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath) ++{ ++ DIIB_ENTRY *psEntry; ++ ++ OSLockAcquire(_g_psImpl->psEntriesLock); ++ psEntry = (void *) HASH_Retrieve_Extended(_g_psImpl->psEntriesTable, ++ (IMG_CHAR *) pszPath); ++ OSLockRelease(_g_psImpl->psEntriesLock); ++ ++ return psEntry; ++} ++ ++/* ----- DI bridge interface ------------------------------------------------ */ ++ ++static PVRSRV_ERROR _CreateStream(IMG_CHAR *pszStreamName, IMG_HANDLE *phStream) ++{ ++ IMG_UINT32 iRet; ++ IMG_HANDLE hStream; ++ PVRSRV_ERROR eError; ++ ++ /* for now only one stream can be created. Should we be able to create ++ * per context stream? */ ++ iRet = OSSNPrintf(pszStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, ++ "di_stream_%x", OSGetCurrentClientProcessIDKM()); ++ if (iRet >= PRVSRVTL_MAX_STREAM_NAME_SIZE) ++ { ++ /* this check is superfluous because it can never happen but in case ++ * someone changes the definition of PRVSRVTL_MAX_STREAM_NAME_SIZE ++ * handle this case */ ++ pszStreamName[0] = '\0'; ++ return PVRSRV_ERROR_INTERNAL_ERROR; ++ } ++ ++ eError = TLStreamCreate(&hStream, pszStreamName, STREAM_BUFFER_SIZE, ++ TL_OPMODE_DROP_NEWER, NULL, NULL, NULL, NULL); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ *phStream = hStream; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, DI_CONTEXT **ppsContext) ++{ ++ PVRSRV_ERROR eError; ++ DI_CONTEXT *psContext; ++ IMG_HANDLE hStream = NULL; ++ THREAD_STATE eTState; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsContext != NULL, "ppsContext"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszStreamName != NULL, "pszStreamName"); ++ ++ psContext = OSAllocMem(sizeof(*psContext)); ++ PVR_LOG_GOTO_IF_NOMEM(psContext, eError, return_); ++ ++ eError = _CreateStream(pszStreamName, &hStream); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_CreateStream", free_desc_); ++ ++ psContext->hStream = hStream; ++ /* indicated to the write thread if the client is still connected and ++ * waiting for the data */ ++ psContext->bClientConnected = IMG_TRUE; ++ OSAtomicWrite(&psContext->iRefCnt, 1); ++ ++ eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState, ++ THREAD_STATE_NULL, ++ THREAD_STATE_ALIVE); ++ ++ /* if the thread has not been started yet do it */ ++ if (eTState == THREAD_STATE_NULL) ++ { ++ PVR_ASSERT(_g_psImpl->hWriterThread == NULL); ++ ++ eError = OSThreadCreate(&_g_psImpl->hWriterThread, "di_writer", ++ _WriterThread, NULL, IMG_FALSE, NULL); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreate", free_close_stream_); ++ } ++ ++ *ppsContext = psContext; ++ ++ return PVRSRV_OK; ++ ++free_close_stream_: ++ TLStreamClose(psContext->hStream); ++ OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_TERMINATED); ++free_desc_: ++ OSFreeMem(psContext); ++return_: ++ return eError; ++} ++ ++PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); ++ ++ /* pass the information to the write thread that the client has ++ * disconnected */ ++ psContext->bClientConnected = IMG_FALSE; ++ ++ return _ContextUnrefAndMaybeDestroy(psContext); ++} ++ ++PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, ++ IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size) ++{ ++ PVRSRV_ERROR eError; ++ struct DIIB_WORK_ITEM *psItem; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath"); ++ ++ /* 'no stats' to avoid acquiring the process stats locks */ ++ psItem = OSAllocMemNoStats(sizeof(*psItem)); ++ PVR_LOG_GOTO_IF_NOMEM(psItem, eError, return_); ++ ++ psItem->psContext = psContext; ++ psItem->psEntry = DIImplBrgFind(pszEntryPath); ++ PVR_LOG_GOTO_IF_FALSE_VA(psItem->psEntry != NULL, free_item_, ++ "entry %s does not exist", pszEntryPath); ++ psItem->ui64Size = ui64Size; ++ psItem->ui64Offset = ui64Offset; ++ ++ /* increment ref count on the context so that it doesn't get freed ++ * before it gets processed by the writer thread. */ ++ OSAtomicIncrement(&psContext->iRefCnt); ++ ++ OSLockAcquire(_g_psImpl->psWriterLock); ++ dllist_add_to_head(&_g_psImpl->sWriterQueue, &psItem->sQueueElement); ++ OSLockRelease(_g_psImpl->psWriterLock); ++ ++ eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ ++ return PVRSRV_OK; ++ ++free_item_: ++ eError = PVRSRV_ERROR_NOT_FOUND; ++ OSFreeMemNoStats(psItem); ++return_: ++ return eError; ++} ++ ++PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, ++ IMG_UINT32 ui32ValueSize, const IMG_CHAR *pszValue) ++{ ++ DIIB_ENTRY *psEntry; ++ DI_PFN_WRITE pfnEntryPuts; ++ IMG_INT64 i64Length = 0; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszEntryPath != NULL, "pszEntryPath"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszValue != NULL, "pszValue"); ++ ++ psEntry = DIImplBrgFind(pszEntryPath); ++ PVR_LOG_RETURN_IF_FALSE_VA(psEntry != NULL, PVRSRV_ERROR_NOT_FOUND, ++ "entry %s does not exist", pszEntryPath); ++ ++ pfnEntryPuts = psEntry->sIterCb.pfnWrite; ++ if (pfnEntryPuts != NULL) ++ { ++ i64Length = pfnEntryPuts(pszValue, ui32ValueSize, (IMG_UINT64*)&i64Length, psEntry->pvPrivData); ++ ++ /* To deal with -EINVAL being returned */ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(i64Length >= 0, pszValue); ++ } ++ else ++ { ++ PVR_LOG_MSG(PVR_DBG_WARNING, "Unable to write to Entry. Write callback not enabled"); ++ return PVRSRV_ERROR_INVALID_REQUEST; ++ } ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _listName(uintptr_t k, ++ uintptr_t v, ++ void* hStream) ++{ ++ PVRSRV_ERROR eError; ++ DIIB_ENTRY *psEntry; ++ IMG_UINT32 ui32Size; ++ IMG_CHAR aszName[DI_IMPL_BRG_PATH_LEN]; ++ ++ psEntry = (DIIB_ENTRY*) v; ++ PVR_ASSERT(psEntry != NULL); ++ PVR_UNREFERENCED_PARAMETER(k); ++ ++ ui32Size = OSSNPrintf(aszName, DI_IMPL_BRG_PATH_LEN, "%s\n", psEntry->pszFullPath); ++ PVR_LOG_IF_FALSE(ui32Size > 5, "ui32Size too small, Error suspected!"); ++ eError = TLStreamWrite(hStream, (IMG_UINT8 *)aszName, ui32Size+1); ++ ++ return eError; ++} ++ ++ ++PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psContext != NULL, "psContext"); ++ ++ eError = HASH_Iterate(_g_psImpl->psEntriesTable, _listName, psContext->hStream); ++ PVR_LOG_IF_ERROR(eError, "HASH_Iterate_Extended"); ++ ++ eError = TLStreamMarkEOS(psContext->hStream, IMG_FALSE); ++ return eError; ++} ++ ++/* ----- DI implementation interface ---------------------------------------- */ ++ ++static PVRSRV_ERROR _Init(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ _g_psImpl = OSAllocMem(sizeof(*_g_psImpl)); ++ PVR_LOG_GOTO_IF_NOMEM(_g_psImpl, eError, return_); ++ ++ _g_psImpl->psEntriesTable = HASH_Create_Extended(ENTRIES_TABLE_INIT_SIZE, ++ DI_IMPL_BRG_PATH_LEN, ++ _Hash, _Compare); ++ PVR_LOG_GOTO_IF_NOMEM(_g_psImpl->psEntriesTable, eError, free_impl_); ++ ++ eError = OSLockCreate(&_g_psImpl->psEntriesLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_table_); ++ ++ eError = OSLockCreate(&_g_psImpl->psWriterLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSCreateLock", free_entries_lock_); ++ ++ eError = OSEventObjectCreate("DI_WRITER_EO", ++ &_g_psImpl->hWriterEventObject); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", free_writer_lock_); ++ ++ _g_psImpl->hWriterThread = NULL; ++ OSAtomicWrite(&_g_psImpl->eThreadState, THREAD_STATE_NULL); ++ ++ dllist_init(&_g_psImpl->sWriterQueue); ++ ++ return PVRSRV_OK; ++ ++free_writer_lock_: ++ OSLockDestroy(_g_psImpl->psWriterLock); ++free_entries_lock_: ++ OSLockDestroy(_g_psImpl->psEntriesLock); ++free_table_: ++ HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); ++free_impl_: ++ OSFreeMem(_g_psImpl); ++ _g_psImpl = NULL; ++return_: ++ return eError; ++} ++ ++static void _DeInit(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ THREAD_STATE eTState; ++ ++ eTState = OSAtomicCompareExchange(&_g_psImpl->eThreadState, ++ THREAD_STATE_ALIVE, ++ THREAD_STATE_TERMINATED); ++ ++ if (eTState == THREAD_STATE_ALIVE) ++ { ++ if (_g_psImpl->hWriterEventObject != NULL) ++ { ++ eError = OSEventObjectSignal(_g_psImpl->hWriterEventObject); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ ++ LOOP_UNTIL_TIMEOUT(WRITER_THREAD_DESTROY_TIMEOUT) ++ { ++ eError = OSThreadDestroy(_g_psImpl->hWriterThread); ++ if (eError == PVRSRV_OK) ++ { ++ break; ++ } ++ OSWaitus(WRITER_THREAD_DESTROY_TIMEOUT/WRITER_THREAD_DESTROY_RETRIES); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); ++ } ++ ++ if (_g_psImpl->hWriterEventObject != NULL) ++ { ++ eError = OSEventObjectDestroy(_g_psImpl->hWriterEventObject); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ } ++ ++ HASH_Delete_Extended(_g_psImpl->psEntriesTable, IMG_FALSE); ++ OSLockDestroy(_g_psImpl->psWriterLock); ++ OSLockDestroy(_g_psImpl->psEntriesLock); ++ OSFreeMem(_g_psImpl); ++ _g_psImpl = NULL; ++} ++ ++/* Recursively traverses the ancestors list up to the root group and ++ * appends their names preceded by "/" to the path in reverse order ++ * (root group's name first and psGroup group's name last). ++ * Returns current offset in the path (the current path length without the ++ * NUL character). If there is no more space in the path returns -1 ++ * to indicate an error (the path is too long to fit into the buffer). */ ++static IMG_INT _BuildGroupPath(IMG_CHAR *pszPath, const DIIB_GROUP *psGroup) ++{ ++ IMG_INT iOff; ++ ++ if (psGroup == NULL) ++ { ++ return 0; ++ } ++ ++ PVR_ASSERT(pszPath != NULL); ++ ++ iOff = _BuildGroupPath(pszPath, psGroup->psParentGroup); ++ PVR_RETURN_IF_FALSE(iOff != -1, -1); ++ ++ iOff += OSStringLCopy(pszPath + iOff, "/", ++ DI_IMPL_BRG_PATH_LEN - iOff); ++ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); ++ ++ iOff += OSStringLCopy(pszPath + iOff, psGroup->pszName, ++ DI_IMPL_BRG_PATH_LEN - iOff); ++ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, -1); ++ ++ return iOff; ++} ++ ++static PVRSRV_ERROR _BuildEntryPath(IMG_CHAR *pszPath, const IMG_CHAR *pszName, ++ const DIIB_GROUP *psGroup) ++{ ++ IMG_INT iOff = _BuildGroupPath(pszPath, psGroup); ++ PVR_RETURN_IF_FALSE(iOff != -1, PVRSRV_ERROR_INVALID_OFFSET); ++ ++ iOff += OSStringLCopy(pszPath + iOff, "/", DI_IMPL_BRG_PATH_LEN - iOff); ++ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, ++ PVRSRV_ERROR_INVALID_OFFSET); ++ ++ iOff += OSStringLCopy(pszPath + iOff, pszName, DI_IMPL_BRG_PATH_LEN - iOff); ++ PVR_RETURN_IF_FALSE(iOff < DI_IMPL_BRG_PATH_LEN, ++ PVRSRV_ERROR_INVALID_OFFSET); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _CreateEntry(const IMG_CHAR *pszName, ++ DI_ENTRY_TYPE eType, ++ const DI_ITERATOR_CB *psIterCb, ++ void *pvPrivData, ++ void *pvParentGroup, ++ void **pvEntry) ++{ ++ DIIB_GROUP *psParentGroup = pvParentGroup; ++ DIIB_ENTRY *psEntry; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pvEntry != NULL, "pvEntry"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentGroup != NULL, "pvParentGroup"); ++ ++ switch (eType) ++ { ++ case DI_ENTRY_TYPE_GENERIC: ++ break; ++ case DI_ENTRY_TYPE_RANDOM_ACCESS: ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, return_); ++ } ++ ++ psEntry = OSAllocMem(sizeof(*psEntry)); ++ PVR_LOG_GOTO_IF_NOMEM(psEntry, eError, return_); ++ ++ eError = OSLockCreate(&psEntry->hLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", free_entry_); ++ ++ psEntry->eType = eType; ++ psEntry->sIterCb = *psIterCb; ++ psEntry->pvPrivData = pvPrivData; ++ psEntry->psParentGroup = psParentGroup; ++ psEntry->pszFullPath[0] = '\0'; ++ ++ psEntry->sImplEntry.pvPrivData = pvPrivData; ++ psEntry->sImplEntry.pvNative = NULL; ++ psEntry->sImplEntry.psCb = &_g_sEntryCallbacks; ++ ++ eError = _BuildEntryPath(psEntry->pszFullPath, pszName, ++ psEntry->psParentGroup); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed in _BuildEntryPath() for \"%s\" " ++ "entry", __func__, pszName)); ++ goto destroy_lock_; ++ } ++ ++ OSLockAcquire(_g_psImpl->psEntriesLock); ++ eError = HASH_Insert_Extended(_g_psImpl->psEntriesTable, ++ psEntry->pszFullPath, ++ (uintptr_t) psEntry) ? ++ PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; ++ OSLockRelease(_g_psImpl->psEntriesLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "HASH_Insert_Extended failed", destroy_lock_); ++ ++ *pvEntry = psEntry; ++ ++ return PVRSRV_OK; ++ ++destroy_lock_: ++ OSLockDestroy(psEntry->hLock); ++free_entry_: ++ OSFreeMem(psEntry); ++return_: ++ return eError; ++} ++ ++static void _DestroyEntry(void *pvEntry) ++{ ++ DIIB_ENTRY *psEntry = pvEntry; ++ PVR_ASSERT(psEntry != NULL); ++ ++ OSLockAcquire(_g_psImpl->psEntriesLock); ++ HASH_Remove_Extended(_g_psImpl->psEntriesTable, psEntry->pszFullPath); ++ OSLockRelease(_g_psImpl->psEntriesLock); ++ ++ OSLockDestroy(psEntry->hLock); ++ OSFreeMem(psEntry); ++} ++ ++static PVRSRV_ERROR _CreateGroup(const IMG_CHAR *pszName, ++ void *pvParentGroup, ++ void **ppvGroup) ++{ ++ DIIB_GROUP *psNewGroup; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppvGroup != NULL, "ppvGroup"); ++ ++ psNewGroup = OSAllocMem(sizeof(*psNewGroup)); ++ PVR_LOG_RETURN_IF_NOMEM(psNewGroup, "OSAllocMem"); ++ ++ psNewGroup->pszName = pszName; ++ psNewGroup->psParentGroup = pvParentGroup; ++ ++ *ppvGroup = psNewGroup; ++ ++ return PVRSRV_OK; ++} ++ ++static void _DestroyGroup(void *pvGroup) ++{ ++ DIIB_GROUP *psGroup = pvGroup; ++ PVR_ASSERT(psGroup != NULL); ++ ++ OSFreeMem(psGroup); ++} ++ ++PVRSRV_ERROR PVRDIImplBrgRegister(void) ++{ ++ OSDI_IMPL_CB sImplCb = { ++ .pfnInit = _Init, ++ .pfnDeInit = _DeInit, ++ .pfnCreateEntry = _CreateEntry, ++ .pfnDestroyEntry = _DestroyEntry, ++ .pfnCreateGroup = _CreateGroup, ++ .pfnDestroyGroup = _DestroyGroup ++ }; ++ ++ return DIRegisterImplementation("impl_brg", &sImplCb); ++} +diff --git a/drivers/gpu/drm/img-rogue/di_impl_brg.h b/drivers/gpu/drm/img-rogue/di_impl_brg.h +new file mode 100644 +index 000000000000..7d5a6ca757e8 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/di_impl_brg.h +@@ -0,0 +1,92 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS agnostic implementation of Debug Info interface. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_IMPL_BRG_H ++#define PVR_IMPL_BRG_H ++ ++#include "pvrsrv_error.h" ++ ++typedef struct DI_CONTEXT_TAG DI_CONTEXT; ++typedef struct DI_ENTRY_DESC DI_ENTRY_DESC; ++ ++PVRSRV_ERROR PVRDIImplBrgRegister(void); ++ ++/*! @Function DICreateContextKM ++ * ++ * @Description ++ * Creates DI context which among others also creates a TL stream for reading ++ * entries. ++ * ++ * @Output pszStreamName: name of the TL stream created in this context ++ * @Output ppsContext: pointer to the new context ++ * ++ * @Return PVRSRV_ERROR error code ++ * PVRSRV_OK in case of a success ++ * PVRSRV_ERROR_INVALID_PARAMS if any of the parameters is invalid ++ * PVRSRV_ERROR_OUT_OF_MEMORY if any of the memory allocations failed ++ * error codes returned by TLStreamCreate() ++ */ ++PVRSRV_ERROR DICreateContextKM(IMG_CHAR *pszStreamName, ++ DI_CONTEXT **ppsContext); ++ ++/*! @Function DIDestroyContextKM ++ * ++ * @Description ++ * Destroy the DI context and all underlying dependencies. ++ * ++ * @Input psContext: pointer to the context ++ * ++ * @Return PVRSRV_ERROR error code ++ * PVRSRV_OK in case of a success ++ * PVRSRV_ERROR_INVALID_PARAMS if invalid context pointer given ++ */ ++PVRSRV_ERROR DIDestroyContextKM(DI_CONTEXT *psContext); ++ ++PVRSRV_ERROR DIReadEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, ++ IMG_UINT64 ui64Offset, IMG_UINT64 ui64Size); ++ ++PVRSRV_ERROR DIWriteEntryKM(DI_CONTEXT *psContext, const IMG_CHAR *pszEntryPath, ++ IMG_UINT32 ui32ValueSize, const IMG_CHAR *pszValue); ++ ++PVRSRV_ERROR DIListAllEntriesKM(DI_CONTEXT *psContext); ++ ++#endif /* PVR_IMPL_BRG_H */ +diff --git a/drivers/gpu/drm/img-rogue/di_impl_brg_intern.h b/drivers/gpu/drm/img-rogue/di_impl_brg_intern.h +new file mode 100644 +index 000000000000..5e11cac59854 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/di_impl_brg_intern.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS agnostic implementation of Debug Info internal interface. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_IMPL_BRG_INTERN_H ++#define PVR_IMPL_BRG_INTERN_H ++ ++typedef struct DIIB_GROUP DIIB_GROUP; ++typedef struct DIIB_ENTRY DIIB_ENTRY; ++ ++/*! @Function DIImplBrgFind ++ * ++ * @Description ++ * Retrieves an entry based on a given path. ++ * ++ * @Input pszPath: Full entry path in form of ++ * /rootGroup/.../parentGroup/entryName. ++ * ++ * @Return Returns entry object if exists or NULL otherwise. ++ */ ++DIIB_ENTRY *DIImplBrgFind(const IMG_CHAR *pszPath); ++ ++#endif /* PVR_IMPL_BRG_INTERN_H */ +diff --git a/drivers/gpu/drm/img-rogue/di_server.c b/drivers/gpu/drm/img-rogue/di_server.c +new file mode 100644 +index 000000000000..391f3aa134a7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/di_server.c +@@ -0,0 +1,780 @@ ++/*************************************************************************/ /*! ++@File ++@Title Debug Info framework functions and types. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "di_server.h" ++#include "osdi_impl.h" ++#include "pvrsrv_error.h" ++#include "dllist.h" ++#include "lock.h" ++#include "allocmem.h" ++#include "osfunc.h" ++ ++#define ROOT_GROUP_NAME PVR_DRM_NAME ++ ++/*! Implementation object. */ ++typedef struct DI_IMPL ++{ ++ const IMG_CHAR *pszName; /*pszName = OSAllocMem(sizeof(ROOT_GROUP_NAME)); ++ PVR_LOG_GOTO_IF_NOMEM(_g_psRootGroup->pszName, eError, cleanup_name_); ++ OSStringLCopy(_g_psRootGroup->pszName, ROOT_GROUP_NAME, ++ sizeof(ROOT_GROUP_NAME)); ++ ++ dllist_init(&_g_psRootGroup->sListNode); ++ dllist_init(&_g_psRootGroup->sGroupList); ++ dllist_init(&_g_psRootGroup->sEntryList); ++ dllist_init(&_g_psRootGroup->sNativeHandleList); ++ ++ return PVRSRV_OK; ++ ++cleanup_name_: ++ OSFreeMem(_g_psRootGroup); ++destroy_lock_: ++ OSLockDestroy(_g_hLock); ++return_: ++ return eError; ++} ++ ++/* Destroys the whole tree of group and entries for a given group as a root. */ ++static void _DeInitGroupRecursively(DI_GROUP *psGroup) ++{ ++ DLLIST_NODE *psThis, *psNext; ++ ++ dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) ++ { ++ DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); ++ DIDestroyEntry(psThisEntry); ++ } ++ ++ dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) ++ { ++ DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); ++ ++ _DeInitGroupRecursively(psThisGroup); ++ } ++ ++ DIDestroyGroup(psGroup); ++} ++ ++void DIDeInit(void) ++{ ++ DLLIST_NODE *psThis, *psNext; ++ ++ OSLockAcquire(_g_hLock); ++ ++ if (!dllist_is_empty(&_g_psRootGroup->sGroupList) || ++ !dllist_is_empty(&_g_psRootGroup->sEntryList)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: entries or groups still exist during " ++ "de-initialisation process, destroying all", __func__)); ++ } ++ ++ _DeInitGroupRecursively(_g_psRootGroup); ++ _g_psRootGroup = NULL; ++ ++ /* Remove all of the implementations. */ ++ dllist_foreach_node(&_g_sImpls, psThis, psNext) ++ { ++ DI_IMPL *psDiImpl = IMG_CONTAINER_OF(psThis, DI_IMPL, sListNode); ++ ++ if (psDiImpl->bInitialised) ++ { ++ psDiImpl->sCb.pfnDeInit(); ++ psDiImpl->bInitialised = IMG_FALSE; ++ } ++ ++ dllist_remove_node(&psDiImpl->sListNode); ++ OSFreeMem(psDiImpl); ++ } ++ ++ OSLockRelease(_g_hLock); ++ ++ /* all resources freed so free the lock itself too */ ++ ++ OSLockDestroy(_g_hLock); ++} ++ ++static IMG_BOOL _ValidateIteratorCb(const DI_ITERATOR_CB *psIterCb, ++ DI_ENTRY_TYPE eType) ++{ ++ IMG_UINT32 uiFlags = 0; ++ ++ if (psIterCb == NULL) ++ { ++ return IMG_FALSE; ++ } ++ ++ if (eType == DI_ENTRY_TYPE_GENERIC) ++ { ++ uiFlags |= psIterCb->pfnShow != NULL ? BIT(0) : 0; ++ uiFlags |= psIterCb->pfnStart != NULL ? BIT(1) : 0; ++ uiFlags |= psIterCb->pfnStop != NULL ? BIT(2) : 0; ++ uiFlags |= psIterCb->pfnNext != NULL ? BIT(3) : 0; ++ ++ /* either only pfnShow or all callbacks need to be set */ ++ if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x0f)) ++ { ++ return IMG_FALSE; ++ } ++ } ++ else if (eType == DI_ENTRY_TYPE_RANDOM_ACCESS) ++ { ++ uiFlags |= psIterCb->pfnRead != NULL ? BIT(0) : 0; ++ uiFlags |= psIterCb->pfnSeek != NULL ? BIT(1) : 0; ++ ++ /* either only pfnRead or all callbacks need to be set */ ++ if (uiFlags != BIT(0) && !BITMASK_HAS(uiFlags, 0x03)) ++ { ++ return IMG_FALSE; ++ } ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++ ++ return IMG_TRUE; ++} ++ ++static PVRSRV_ERROR _CreateNativeEntry(DI_ENTRY *psEntry, ++ const DI_NATIVE_HANDLE *psNativeParent) ++{ ++ PVRSRV_ERROR eError; ++ DI_IMPL *psImpl = psNativeParent->psDiImpl; ++ ++ DI_NATIVE_HANDLE *psNativeEntry = OSAllocMem(sizeof(*psNativeEntry)); ++ PVR_LOG_GOTO_IF_NOMEM(psNativeEntry, eError, return_); ++ ++ eError = psImpl->sCb.pfnCreateEntry(psEntry->pszName, ++ psEntry->eType, ++ &psEntry->sIterCb, ++ psEntry->pvPrivData, ++ psNativeParent->pvHandle, ++ &psNativeEntry->pvHandle); ++ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateEntry", free_memory_); ++ ++ psNativeEntry->psDiImpl = psImpl; ++ ++ dllist_add_to_head(&psEntry->sNativeHandleList, &psNativeEntry->sListNode); ++ ++ return PVRSRV_OK; ++ ++free_memory_: ++ OSFreeMem(psNativeEntry); ++return_: ++ return eError; ++} ++ ++static void _DestroyNativeEntry(DI_NATIVE_HANDLE *psNativeEntry) ++{ ++ dllist_remove_node(&psNativeEntry->sListNode); ++ OSFreeMem(psNativeEntry); ++} ++ ++PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, ++ DI_GROUP *psGroup, ++ const DI_ITERATOR_CB *psIterCb, ++ void *pvPriv, ++ DI_ENTRY_TYPE eType, ++ DI_ENTRY **ppsEntry) ++{ ++ PVRSRV_ERROR eError; ++ DLLIST_NODE *psThis, *psNext; ++ DI_ENTRY *psEntry; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateIteratorCb(psIterCb, eType), ++ "psIterCb"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsEntry != NULL, "psEntry"); ++ ++ psEntry = OSAllocMem(sizeof(*psEntry)); ++ PVR_LOG_RETURN_IF_NOMEM(psEntry, "OSAllocMem"); ++ ++ if (psGroup == NULL) ++ { ++ psGroup = _g_psRootGroup; ++ } ++ ++ psEntry->pszName = pszName; ++ psEntry->pvPrivData = pvPriv; ++ psEntry->eType = eType; ++ psEntry->sIterCb = *psIterCb; ++ dllist_init(&psEntry->sNativeHandleList); ++ ++ OSLockAcquire(_g_hLock); ++ ++ dllist_add_to_tail(&psGroup->sEntryList, &psEntry->sListNode); ++ ++ /* Iterate over all of the native handles of parent group to create ++ * the entry for every registered implementation. */ ++ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNativeGroup = ++ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); ++ ++ eError = _CreateNativeEntry(psEntry, psNativeGroup); ++ PVR_GOTO_IF_ERROR(eError, cleanup_); ++ } ++ ++ OSLockRelease(_g_hLock); ++ ++ *ppsEntry = psEntry; ++ ++ return PVRSRV_OK; ++ ++cleanup_: ++ OSLockRelease(_g_hLock); ++ ++ /* Something went wrong so if there were any native entries created remove ++ * them from the list, free them and free the DI entry itself. */ ++ dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNativeEntry = ++ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); ++ ++ _DestroyNativeEntry(psNativeEntry); ++ } ++ ++ OSFreeMem(psEntry); ++ ++ return eError; ++} ++ ++void DIDestroyEntry(DI_ENTRY *psEntry) ++{ ++ DLLIST_NODE *psThis, *psNext; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE(psEntry != NULL, ++ "psEntry invalid in DIDestroyEntry()"); ++ ++ /* Iterate through all of the native entries of the DI entry, remove ++ * them from the list and then destroy them. After that, destroy the ++ * DI entry itself. */ ++ dllist_foreach_node(&psEntry->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, ++ sListNode); ++ ++ /* The implementation must ensure that entry is not removed if any ++ * operations are being executed on the entry. If this is the case ++ * the implementation should block until all of them are finished ++ * and prevent any further operations. ++ * This will guarantee proper synchronisation between the DI framework ++ * and underlying implementations and prevent destruction/access ++ * races. */ ++ psNative->psDiImpl->sCb.pfnDestroyEntry(psNative->pvHandle); ++ dllist_remove_node(&psNative->sListNode); ++ OSFreeMem(psNative); ++ } ++ ++ dllist_remove_node(&psEntry->sListNode); ++ ++ OSFreeMem(psEntry); ++} ++ ++static PVRSRV_ERROR _CreateNativeGroup(DI_GROUP *psGroup, ++ const DI_NATIVE_HANDLE *psNativeParent, ++ DI_NATIVE_HANDLE **ppsNativeGroup) ++{ ++ PVRSRV_ERROR eError; ++ DI_IMPL *psImpl = psNativeParent->psDiImpl; ++ ++ DI_NATIVE_HANDLE *psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); ++ PVR_LOG_GOTO_IF_NOMEM(psNativeGroup, eError, return_); ++ ++ eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName, ++ psNativeParent->pvHandle, ++ &psNativeGroup->pvHandle); ++ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); ++ ++ psNativeGroup->psDiImpl = psImpl; ++ ++ dllist_add_to_head(&psGroup->sNativeHandleList, &psNativeGroup->sListNode); ++ ++ *ppsNativeGroup = psNativeGroup; ++ ++ return PVRSRV_OK; ++ ++free_memory_: ++ OSFreeMem(psNativeGroup); ++return_: ++ return eError; ++} ++ ++static void _DestroyNativeGroup(DI_NATIVE_HANDLE *psNativeEntry) ++{ ++ dllist_remove_node(&psNativeEntry->sListNode); ++ OSFreeMem(psNativeEntry); ++} ++ ++PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, ++ DI_GROUP *psParent, ++ DI_GROUP **ppsGroup) ++{ ++ PVRSRV_ERROR eError; ++ DLLIST_NODE *psThis, *psNext; ++ DI_GROUP *psGroup; ++ size_t uSize; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsGroup != NULL, "ppsDiGroup"); ++ ++ psGroup = OSAllocMem(sizeof(*psGroup)); ++ PVR_LOG_RETURN_IF_NOMEM(psGroup, "OSAllocMem"); ++ ++ if (psParent == NULL) ++ { ++ psParent = _g_psRootGroup; ++ } ++ ++ uSize = OSStringLength(pszName) + 1; ++ psGroup->pszName = OSAllocMem(uSize * sizeof(*psGroup->pszName)); ++ PVR_LOG_GOTO_IF_NOMEM(psGroup->pszName, eError, cleanup_name_); ++ OSStringLCopy(psGroup->pszName, pszName, uSize); ++ ++ psGroup->psParent = psParent; ++ dllist_init(&psGroup->sGroupList); ++ dllist_init(&psGroup->sEntryList); ++ dllist_init(&psGroup->sNativeHandleList); ++ ++ OSLockAcquire(_g_hLock); ++ ++ dllist_add_to_tail(&psParent->sGroupList, &psGroup->sListNode); ++ ++ /* Iterate over all of the native handles of parent group to create ++ * the group for every registered implementation. */ ++ dllist_foreach_node(&psParent->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNativeGroup = NULL, *psNativeParent = ++ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); ++ ++ eError = _CreateNativeGroup(psGroup, psNativeParent, &psNativeGroup); ++ PVR_GOTO_IF_ERROR(eError, cleanup_); ++ } ++ ++ OSLockRelease(_g_hLock); ++ ++ *ppsGroup = psGroup; ++ ++ return PVRSRV_OK; ++ ++cleanup_: ++ OSLockRelease(_g_hLock); ++ ++ /* Something went wrong so if there were any native groups created remove ++ * them from the list, free them and free the DI group itself. */ ++ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNativeGroup = ++ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); ++ ++ dllist_remove_node(&psNativeGroup->sListNode); ++ OSFreeMem(psNativeGroup); ++ } ++ ++ OSFreeMem(psGroup->pszName); ++cleanup_name_: ++ OSFreeMem(psGroup); ++ ++ return eError; ++} ++ ++void DIDestroyGroup(DI_GROUP *psGroup) ++{ ++ DLLIST_NODE *psThis, *psNext; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE(psGroup != NULL, ++ "psGroup invalid in DIDestroyGroup()"); ++ ++ /* Iterate through all of the native groups of the DI group, remove ++ * them from the list and then destroy them. After that destroy the ++ * DI group itself. */ ++ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNative = IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, ++ sListNode); ++ ++ psNative->psDiImpl->sCb.pfnDestroyGroup(psNative->pvHandle); ++ dllist_remove_node(&psNative->sListNode); ++ OSFreeMem(psNative); ++ } ++ ++ dllist_remove_node(&psGroup->sListNode); ++ ++ OSFreeMem(psGroup->pszName); ++ OSFreeMem(psGroup); ++} ++ ++void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry) ++{ ++ PVR_ASSERT(psEntry != NULL); ++ ++ return psEntry->pvPrivData; ++} ++ ++void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData, ++ IMG_UINT32 uiSize) ++{ ++ PVR_ASSERT(psEntry != NULL); ++ PVR_ASSERT(psEntry->psCb != NULL); ++ PVR_ASSERT(psEntry->psCb->pfnWrite != NULL); ++ PVR_ASSERT(psEntry->pvNative != NULL); ++ ++ psEntry->psCb->pfnWrite(psEntry->pvNative, pvData, uiSize); ++} ++ ++void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) ++{ ++ va_list args; ++ ++ PVR_ASSERT(psEntry != NULL); ++ PVR_ASSERT(psEntry->psCb != NULL); ++ PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL); ++ PVR_ASSERT(psEntry->pvNative != NULL); ++ ++ va_start(args, pszFmt); ++ psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, args); ++ va_end(args); ++} ++ ++void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ++ va_list pArgs) ++{ ++ PVR_ASSERT(psEntry != NULL); ++ PVR_ASSERT(psEntry->psCb != NULL); ++ PVR_ASSERT(psEntry->psCb->pfnVPrintf != NULL); ++ PVR_ASSERT(psEntry->pvNative != NULL); ++ ++ psEntry->psCb->pfnVPrintf(psEntry->pvNative, pszFmt, pArgs); ++} ++ ++void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr) ++{ ++ PVR_ASSERT(psEntry != NULL); ++ PVR_ASSERT(psEntry->psCb != NULL); ++ PVR_ASSERT(psEntry->psCb->pfnPuts != NULL); ++ PVR_ASSERT(psEntry->pvNative != NULL); ++ ++ psEntry->psCb->pfnPuts(psEntry->pvNative, pszStr); ++} ++ ++IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry) ++{ ++ PVR_ASSERT(psEntry != NULL); ++ PVR_ASSERT(psEntry->psCb != NULL); ++ PVR_ASSERT(psEntry->psCb->pfnHasOverflowed != NULL); ++ PVR_ASSERT(psEntry->pvNative != NULL); ++ ++ return psEntry->psCb->pfnHasOverflowed(psEntry->pvNative); ++} ++ ++/* ---- OS implementation API ---------------------------------------------- */ ++ ++static IMG_BOOL _ValidateImplCb(const OSDI_IMPL_CB *psImplCb) ++{ ++ PVR_GOTO_IF_FALSE(psImplCb->pfnInit != NULL, failed_); ++ PVR_GOTO_IF_FALSE(psImplCb->pfnDeInit != NULL, failed_); ++ PVR_GOTO_IF_FALSE(psImplCb->pfnCreateGroup != NULL, failed_); ++ PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyGroup != NULL, failed_); ++ PVR_GOTO_IF_FALSE(psImplCb->pfnCreateEntry != NULL, failed_); ++ PVR_GOTO_IF_FALSE(psImplCb->pfnDestroyEntry != NULL, failed_); ++ ++ return IMG_TRUE; ++ ++failed_: ++ return IMG_FALSE; ++} ++ ++/* Walks the tree of groups and entries and create all of the native handles ++ * for the given implementation for all of the already existing groups and ++ * entries. */ ++static PVRSRV_ERROR _InitNativeHandlesRecursively(DI_IMPL *psImpl, ++ DI_GROUP *psGroup, ++ DI_NATIVE_HANDLE *psNativeParent) ++{ ++ PVRSRV_ERROR eError; ++ DLLIST_NODE *psThis, *psNext; ++ DI_NATIVE_HANDLE *psNativeGroup; ++ ++ psNativeGroup = OSAllocMem(sizeof(*psNativeGroup)); ++ PVR_LOG_RETURN_IF_NOMEM(psNativeGroup, "OSAllocMem"); ++ ++ eError = psImpl->sCb.pfnCreateGroup(psGroup->pszName, ++ psNativeParent ? psNativeParent->pvHandle : NULL, ++ &psNativeGroup->pvHandle); ++ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->sCb.pfnCreateGroup", free_memory_); ++ ++ psNativeGroup->psDiImpl = psImpl; ++ ++ dllist_add_to_head(&psGroup->sNativeHandleList, ++ &psNativeGroup->sListNode); ++ ++ dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) ++ { ++ DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); ++ ++ // and then walk the new group ++ eError = _InitNativeHandlesRecursively(psImpl, psThisGroup, ++ psNativeGroup); ++ PVR_LOG_RETURN_IF_ERROR(eError, "_InitNativeHandlesRecursively"); ++ } ++ ++ dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) ++ { ++ DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); ++ ++ eError = _CreateNativeEntry(psThisEntry, psNativeGroup); ++ PVR_LOG_RETURN_IF_ERROR(eError, "_CreateNativeEntry"); ++ } ++ ++ return PVRSRV_OK; ++ ++free_memory_: ++ OSFreeMem(psNativeGroup); ++ ++ return eError; ++} ++ ++/* Walks the tree of groups and entries and destroys all of the native handles ++ * for the given implementation. */ ++static void _DeInitNativeHandlesRecursively(DI_IMPL *psImpl, DI_GROUP *psGroup) ++{ ++ DLLIST_NODE *psThis, *psNext; ++ ++ dllist_foreach_node(&psGroup->sEntryList, psThis, psNext) ++ { ++ DI_ENTRY *psThisEntry = IMG_CONTAINER_OF(psThis, DI_ENTRY, sListNode); ++ ++ // free all of the native entries that belong to this implementation ++ dllist_foreach_node(&psThisEntry->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNativeEntry = ++ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); ++ ++ if (psNativeEntry->psDiImpl == psImpl) ++ { ++ _DestroyNativeEntry(psNativeEntry); ++ // there can be only one entry on the list for a given ++ // implementation ++ break; ++ } ++ } ++ } ++ ++ dllist_foreach_node(&psGroup->sGroupList, psThis, psNext) ++ { ++ DI_GROUP *psThisGroup = IMG_CONTAINER_OF(psThis, DI_GROUP, sListNode); ++ ++ // and then walk the new group ++ _DeInitNativeHandlesRecursively(psImpl, psThisGroup); ++ } ++ ++ // free all of the native entries that belong to this implementation ++ dllist_foreach_node(&psGroup->sNativeHandleList, psThis, psNext) ++ { ++ DI_NATIVE_HANDLE *psNativeGroup = ++ IMG_CONTAINER_OF(psThis, DI_NATIVE_HANDLE, sListNode); ++ ++ if (psNativeGroup->psDiImpl == psImpl) ++ { ++ _DestroyNativeGroup(psNativeGroup); ++ // there can be only one entry on the list for a given ++ // implementation ++ break; ++ } ++ } ++} ++ ++static PVRSRV_ERROR _InitImpl(DI_IMPL *psImpl) ++{ ++ PVRSRV_ERROR eError; ++ // DI_NATIVE_HANDLE *psNativeGroup; ++ ++ eError = psImpl->sCb.pfnInit(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "psImpl->pfnInit()", return_); ++ ++ /* if the implementation is being created after any groups or entries ++ * have been created we need to walk the current tree and create ++ * native groups and entries for all of the existing ones */ ++ eError = _InitNativeHandlesRecursively(psImpl, _g_psRootGroup, NULL); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_InitNativeHandlesRecursively", ++ free_native_handles_and_deinit_); ++ ++ psImpl->bInitialised = IMG_TRUE; ++ ++ return PVRSRV_OK; ++ ++free_native_handles_and_deinit_: ++ /* something went wrong so we need to walk the tree and remove all of the ++ * native entries and groups that we've created before we can destroy ++ * the implementation */ ++ _DeInitNativeHandlesRecursively(psImpl, _g_psRootGroup); ++ psImpl->sCb.pfnDeInit(); ++return_: ++ return eError; ++} ++ ++PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, ++ const OSDI_IMPL_CB *psImplCb) ++{ ++ DI_IMPL *psImpl; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(_ValidateImplCb(psImplCb), "psImplCb"); ++ /* if root group does not exist it can mean 2 things: ++ * - DIInit() was not called so initialisation order is incorrect and needs ++ * to be fixed ++ * - DIInit() failed but if that happens we should never make it here */ ++ PVR_ASSERT(_g_psRootGroup != NULL); ++ ++ psImpl = OSAllocMem(sizeof(*psImpl)); ++ PVR_LOG_RETURN_IF_NOMEM(psImpl, "OSAllocMem"); ++ ++ psImpl->pszName = pszName; ++ psImpl->sCb = *psImplCb; ++ ++ OSLockAcquire(_g_hLock); ++ ++ eError = _InitImpl(psImpl); ++ if (eError != PVRSRV_OK) ++ { ++ /* implementation could not be initialised so remove it from the ++ * list, free the memory and forget about it */ ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: could not initialise \"%s\" debug " ++ "info implementation, discarding", __func__, ++ psImpl->pszName)); ++ ++ goto free_impl_; ++ } ++ ++ psImpl->bInitialised = IMG_TRUE; ++ ++ dllist_add_to_tail(&_g_sImpls, &psImpl->sListNode); ++ ++ OSLockRelease(_g_hLock); ++ ++ return PVRSRV_OK; ++ ++free_impl_: ++ OSLockRelease(_g_hLock); ++ ++ OSFreeMem(psImpl); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/di_server.h b/drivers/gpu/drm/img-rogue/di_server.h +new file mode 100644 +index 000000000000..a68894b1b430 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/di_server.h +@@ -0,0 +1,219 @@ ++/*************************************************************************/ /*! ++@File ++@Title Functions for creating Debug Info groups and entries. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DI_SERVER_H ++#define DI_SERVER_H ++ ++#if defined(__linux__) ++ #include ++ ++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++ #include ++ #else ++ #include ++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ ++#else ++ #include ++#endif /* __linux__ */ ++ ++#include "di_common.h" ++#include "pvrsrv_error.h" ++#include "img_defs.h" ++ ++/*! @Function DIInit ++ * ++ * @Description ++ * Initialises Debug Info framework. This function will create common resources ++ * for the framework. ++ * ++ * Note: This function must be called before first call to ++ * DIRegisterImplementation() all of the implementations. ++ */ ++PVRSRV_ERROR DIInit(void); ++ ++/*! @Function DIDeInit ++ * ++ * @Description ++ * De-initialises Debug Info framework. This function will call pfnDeInit() ++ * on each implementation and clean up common resources. ++ * ++ * In case some of the entries and groups have not been cleaned up this function ++ * will also perform recursive sweep and remove all entries and group for ++ * all implementations. ++ */ ++void DIDeInit(void); ++ ++/*! @Function DICreateEntry ++ * ++ * @Description ++ * Creates debug info entry. Depending on different implementations the entry ++ * might be for example a DebugFS file or something totally different. ++ * ++ * The entry will belong to a parent group if provided or to the root group ++ * if not. ++ * ++ * @Input pszName: name of the new entry ++ * @Input psDiGroup: parent group, if NULL entry will belong to the root group ++ * @Input psIterCb: implementation of the iterator for the entry ++ * @Input psPriv: private data that will be passed to the iterator operations ++ * @Input eType: type of the entry ++ * ++ * @Output ppsEntry: handle to the newly created entry ++ * ++ * @Return PVRSRV_ERROR error code ++ */ ++PVRSRV_ERROR DICreateEntry(const IMG_CHAR *pszName, ++ DI_GROUP *psGroup, ++ const DI_ITERATOR_CB *psIterCb, ++ void *psPriv, ++ DI_ENTRY_TYPE eType, ++ DI_ENTRY **ppsEntry); ++ ++/*! @Function DIDestroyEntry ++ * ++ * @Description ++ * Destroys debug info entry. ++ * ++ * @Input psEntry: handle to the entry ++ */ ++void DIDestroyEntry(DI_ENTRY *psEntry); ++ ++/*! @Function DICreateGroup ++ * ++ * @Description ++ * Creates debug info group. Depending on different implementations the group ++ * might be for example a DebugFS directory or something totally different. ++ * ++ * The group will belong to a parent group if provided or to the root group ++ * if not. ++ * ++ * @Input pszName: name of the new entry ++ * @Input psParent: parent group, if NULL entry will belong to the root group ++ * ++ * @Output ppsGroup: handle to the newly created entry ++ * ++ * @Return PVRSRV_ERROR error code ++ */ ++PVRSRV_ERROR DICreateGroup(const IMG_CHAR *pszName, ++ DI_GROUP *psParent, ++ DI_GROUP **ppsGroup); ++ ++/*! @Function DIDestroyGroup ++ * ++ * @Description ++ * Destroys debug info group. ++ * ++ * @Input psGroup: handle to the group ++ */ ++void DIDestroyGroup(DI_GROUP *psGroup); ++ ++/*! @Function DIGetPrivData ++ * ++ * @Description ++ * Retrieves private data from psEntry. The data is either passed during ++ * entry creation via psPriv parameter of DICreateEntry() function ++ * or by explicitly setting it with DIGetPrivData() function. ++ * ++ * @Input psEntry pointer to OSDI_IMPL_ENTRY object ++ * ++ * @Returns pointer to the private data (can be NULL if private data ++ * has not been specified) ++ */ ++void *DIGetPrivData(const OSDI_IMPL_ENTRY *psEntry); ++ ++/*! @Function DIWrite ++ * ++ * @Description ++ * Writes the binary data of the DI entry to the output sync, whatever that may ++ * be for the DI implementation. ++ * ++ * @Input psEntry pointer to OSDI_IMPL_ENTRY object ++ * @Input pvData data ++ * @Input uiSize pvData length ++ */ ++void DIWrite(const OSDI_IMPL_ENTRY *psEntry, const void *pvData, ++ IMG_UINT32 uiSize); ++ ++/*! @Function DIPrintf ++ * ++ * @Description ++ * Prints formatted string to the DI entry. ++ * ++ * @Input psEntry pointer to OSDI_IMPL_ENTRY object ++ * @Input pszFmt NUL-terminated format string ++ */ ++void DIPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ...) ++ __printf(2, 3); ++ ++/*! @Function DIVPrintf ++ * ++ * @Description ++ * Prints formatted string to the DI entry. Equivalent to DIPrintf but takes ++ * va_list instead of a variable number of arguments. ++ * ++ * @Input psEntry pointer to OSDI_IMPL_ENTRY object ++ * @Input pszFmt NUL-terminated format string ++ * @Input pArgs vs_list object ++ */ ++void DIVPrintf(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszFmt, ++ va_list pArgs); ++ ++/*! @Function DIPrintf ++ * ++ * @Description ++ * Prints a string to the DI entry. ++ * ++ * @Input psEntry pointer to OSDI_IMPL_ENTRY object ++ * @Input pszFmt NUL-terminated string ++ */ ++void DIPuts(const OSDI_IMPL_ENTRY *psEntry, const IMG_CHAR *pszStr); ++ ++/*! @Function DIHasOverflowed ++ * ++ * @Description ++ * Checks if the DI buffer has overflowed. ++ * ++ * @Return IMG_TRUE if buffer overflowed ++ */ ++IMG_BOOL DIHasOverflowed(const OSDI_IMPL_ENTRY *psEntry); ++ ++#endif /* DI_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/dllist.h b/drivers/gpu/drm/img-rogue/dllist.h +new file mode 100644 +index 000000000000..fa73dff59c44 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/dllist.h +@@ -0,0 +1,408 @@ ++/*************************************************************************/ /*! ++@File ++@Title Double linked list header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Double linked list interface ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DLLIST_H ++#define DLLIST_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++/*! ++ Pointer to a linked list node ++*/ ++typedef struct DLLIST_NODE_ *PDLLIST_NODE; ++ ++ ++/*! ++ Node in a linked list ++*/ ++/* ++ * Note: the following structure's size is architecture-dependent and clients ++ * may need to create a mirror of the structure definition if it needs to be ++ * used in a structure shared between host and device. ++ * Consider such clients if any changes are made to this structure. ++ */ ++typedef struct DLLIST_NODE_ ++{ ++ struct DLLIST_NODE_ *psPrevNode; ++ struct DLLIST_NODE_ *psNextNode; ++} DLLIST_NODE; ++ ++ ++/*! ++ Static initialiser ++*/ ++#define DECLARE_DLLIST(n) \ ++DLLIST_NODE (n) = {&(n), &(n)} ++ ++/*************************************************************************/ /*! ++@Function dllist_foreach_node ++ ++@Description Walk through all the nodes on the list. ++ Safe against removal of (node). ++ ++@Input list_head List node to start the operation ++@Input node Current list node ++@Input next Node after the current one ++ ++*/ ++/*****************************************************************************/ ++#define dllist_foreach_node(list_head, node, next) \ ++ for ((node) = (list_head)->psNextNode, (next) = (node)->psNextNode; \ ++ (node) != (list_head); \ ++ (node) = (next), (next) = (node)->psNextNode) ++ ++#define dllist_foreach_node_backwards(list_head, node, prev) \ ++ for ((node) = (list_head)->psPrevNode, (prev) = (node)->psPrevNode; \ ++ (node) != (list_head); \ ++ (node) = (prev), (prev) = (node)->psPrevNode) ++ ++ ++/*************************************************************************/ /*! ++@Function dllist_foreach ++ ++@Description Simplification of dllist_foreach_node. ++ Walk through all the nodes on the list. ++ Safe against removal of currently-iterated node. ++ ++ Adds utility-macro dllist_cur() to typecast the current node. ++ ++@Input list_head List node to start the operation ++ ++*/ ++/*****************************************************************************/ ++#define dllist_foreach(list_head) \ ++ for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode; \ ++ _DllNode != &(list_head); \ ++ _DllNode = _DllNext, _DllNext = _DllNode->psNextNode) ++ ++#define dllist_foreach_backwards(list_head) \ ++ for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode; \ ++ _DllNode != &(list_head); \ ++ _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode) ++ ++#define dllist_cur(type, member) IMG_CONTAINER_OF(_DllNode, type, member) ++ ++/*************************************************************************/ /*! ++@Function dllist_init ++ ++@Description Initialize a new double linked list ++ ++@Input psListHead List head Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_init(PDLLIST_NODE psListHead) ++{ ++ psListHead->psPrevNode = psListHead; ++ psListHead->psNextNode = psListHead; ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_is_empty ++ ++@Description Returns whether the list is empty ++ ++@Input psListHead List head Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++bool dllist_is_empty(PDLLIST_NODE psListHead) ++{ ++ return ((psListHead->psPrevNode == psListHead) ++ && (psListHead->psNextNode == psListHead)); ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_add_to_head ++ ++@Description Add psNewNode to head of list psListHead ++ ++@Input psListHead Head Node ++@Input psNewNode New Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) ++{ ++ PDLLIST_NODE psTmp; ++ ++ psTmp = psListHead->psNextNode; ++ ++ psListHead->psNextNode = psNewNode; ++ psNewNode->psNextNode = psTmp; ++ ++ psTmp->psPrevNode = psNewNode; ++ psNewNode->psPrevNode = psListHead; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function dllist_add_to_tail ++ ++@Description Add psNewNode to tail of list psListHead ++ ++@Input psListHead Head Node ++@Input psNewNode New Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) ++{ ++ PDLLIST_NODE psTmp; ++ ++ psTmp = psListHead->psPrevNode; ++ ++ psListHead->psPrevNode = psNewNode; ++ psNewNode->psPrevNode = psTmp; ++ ++ psTmp->psNextNode = psNewNode; ++ psNewNode->psNextNode = psListHead; ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_node_is_in_list ++ ++@Description Returns true if psNode is in a list ++ ++@Input psNode List node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++bool dllist_node_is_in_list(PDLLIST_NODE psNode) ++{ ++ return (psNode->psNextNode != NULL); ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_get_next_node ++ ++@Description Returns the list node after psListHead or NULL psListHead is ++ the only element in the list. ++ ++@Input psListHead List node to start the operation ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead) ++{ ++ if (psListHead->psNextNode == psListHead) ++ { ++ return NULL; ++ } ++ else ++ { ++ return psListHead->psNextNode; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_get_prev_node ++ ++@Description Returns the list node preceding psListHead or NULL if ++ psListHead is the only element in the list. ++ ++@Input psListHead List node to start the operation ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++PDLLIST_NODE dllist_get_prev_node(PDLLIST_NODE psListHead) ++{ ++ if (psListHead->psPrevNode == psListHead) ++ { ++ return NULL; ++ } ++ else ++ { ++ return psListHead->psPrevNode; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_remove_node ++ ++@Description Removes psListNode from the list where it currently belongs ++ ++@Input psListNode List node to be removed ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_remove_node(PDLLIST_NODE psListNode) ++{ ++ psListNode->psNextNode->psPrevNode = psListNode->psPrevNode; ++ psListNode->psPrevNode->psNextNode = psListNode->psNextNode; ++ ++ /* Clear the node to show it's not in a list */ ++ psListNode->psPrevNode = NULL; ++ psListNode->psNextNode = NULL; ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_replace_head ++ ++@Description Moves the list from psOldHead to psNewHead ++ ++@Input psOldHead List node to be replaced. Will become a ++ head node of an empty list. ++@Input psNewHead List node to be inserted. Must be an ++ empty list head. ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead) ++{ ++ if (dllist_is_empty(psOldHead)) ++ { ++ psNewHead->psNextNode = psNewHead; ++ psNewHead->psPrevNode = psNewHead; ++ } ++ else ++ { ++ /* Change the neighbouring nodes */ ++ psOldHead->psNextNode->psPrevNode = psNewHead; ++ psOldHead->psPrevNode->psNextNode = psNewHead; ++ ++ /* Copy the old data to the new node */ ++ psNewHead->psNextNode = psOldHead->psNextNode; ++ psNewHead->psPrevNode = psOldHead->psPrevNode; ++ ++ /* Remove links to the previous list */ ++ psOldHead->psNextNode = psOldHead; ++ psOldHead->psPrevNode = psOldHead; ++ } ++} ++ ++/**************************************************************************/ /*! ++@Function dllist_insert_list_at_head ++ ++@Description Inserts psInHead list into the head of the psOutHead list. ++ After this operation psOutHead will contain psInHead at the ++ head of the list and the remaining elements that were ++ already in psOutHead will be places after the psInList (so ++ at a tail of the original list). ++ ++@Input psOutHead List node psInHead will be inserted to. ++@Input psInHead List node to be inserted to psOutHead. ++ After this operation this becomes an empty list. ++*/ /***************************************************************************/ ++static INLINE ++void dllist_insert_list_at_head(PDLLIST_NODE psOutHead, PDLLIST_NODE psInHead) ++{ ++ PDLLIST_NODE psInHeadNextNode = psInHead->psNextNode; ++ PDLLIST_NODE psOutHeadNextNode = psOutHead->psNextNode; ++ ++ if (!dllist_is_empty(psInHead)) ++ { ++ psOutHead->psNextNode = psInHeadNextNode; ++ psInHeadNextNode->psPrevNode = psOutHead; ++ ++ psInHead->psPrevNode->psNextNode = psOutHeadNextNode; ++ psOutHeadNextNode->psPrevNode = psInHead->psPrevNode; ++ ++ dllist_init(psInHead); ++ } ++ } ++ ++/*************************************************************************/ /*! ++@Description Pointer to a dllist comparison callback function. ++@Input psNode Pointer to a node in a dllist. ++@Input psNext Pointer to psNode's next neighbour. ++*/ /**************************************************************************/ ++typedef bool (*DLLIST_CMP_CB)(const DLLIST_NODE *psNode, const DLLIST_NODE *psNext); ++ ++/*************************************************************************/ /*! ++@Function dllist_sort ++ ++@Description Insert-sorts the List in place ++ The cmpr function passes the current and next node, ++ From which the user writes the function responsible ++ for choosing to swap order or not. ++ The function returns true if a swap is required ++ ++@Input psListHead List Head to be sorted. ++ ++@Input cmpr Function pointer to use for sorting ++ ++*/ ++/*****************************************************************************/ ++static INLINE void dllist_sort(PDLLIST_NODE psListHead, ++ DLLIST_CMP_CB cmpr) ++{ ++ DLLIST_NODE *node, *next; ++ DLLIST_NODE sTempHead; ++ ++ dllist_init(&sTempHead); ++ ++ dllist_foreach_node(psListHead, node, next) ++ { ++ dllist_remove_node(node); ++ dllist_add_to_head(&sTempHead, node); ++ } ++ ++ while (!dllist_is_empty(&sTempHead)) ++ { ++ DLLIST_NODE *psSmallestNode = NULL; ++ ++ dllist_foreach_node(&sTempHead, node, next) ++ { ++ if (!psSmallestNode || cmpr(psSmallestNode, node)) ++ { ++ psSmallestNode = node; ++ } ++ } ++ ++ dllist_remove_node(psSmallestNode); ++ dllist_add_to_tail(psListHead, psSmallestNode); ++ } ++} ++ ++#endif /* DLLIST_H */ +diff --git a/drivers/gpu/drm/img-rogue/dma_km.h b/drivers/gpu/drm/img-rogue/dma_km.h +new file mode 100644 +index 000000000000..185d4ff29194 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/dma_km.h +@@ -0,0 +1,83 @@ ++/*************************************************************************/ /*! ++@File dma_km.h ++@Title DMA transfer module header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DMA_KM_H ++#define DMA_KM_H ++ ++#if defined(__linux__) ++#include ++#else ++#define KERNEL_VERSION ++#endif ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "cache_ops.h" ++#include "device.h" ++#include "pmr.h" ++#include "pvrsrv_sync_km.h" ++#include "connection_server.h" ++ ++PVRSRV_ERROR DmaDeviceParams(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 *ui32DmaBuffAlign, ++ IMG_UINT32 *ui32DmaTransferMult); ++ ++PVRSRV_ERROR DmaSparseMappingTable(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32SizeInPages, ++ IMG_BOOL *pbTable); ++ ++PVRSRV_ERROR DmaTransfer(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 uiNumDMAs, ++ PMR** ppsPMR, ++ IMG_UINT64 *puiAddress, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_BOOL bMemToDev, ++ PVRSRV_TIMELINE iUpdateTimeline); ++ ++PVRSRV_ERROR PVRSRVInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); ++void PVRSRVDeInitialiseDMA(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* DMA_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/dma_support.c b/drivers/gpu/drm/img-rogue/dma_support.c +new file mode 100644 +index 000000000000..f7a4f68bccc4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/dma_support.c +@@ -0,0 +1,523 @@ ++/*************************************************************************/ /*! ++@File dma_support.c ++@Title System DMA support ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides a contiguous memory allocator (i.e. DMA allocator); ++ APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA) ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "allocmem.h" ++#include "dma_support.h" ++#include "pvr_vmap.h" ++#include "kernel_compatibility.h" ++ ++#define DMA_MAX_IOREMAP_ENTRIES 2 ++static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE; ++static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}}; ++ ++static void* ++SysDmaAcquireKernelAddress(struct page *psPage, IMG_UINT64 ui64Size, DMA_ALLOC *psDmaAlloc) ++{ ++ IMG_BOOL bPageByPage = IMG_TRUE; ++ IMG_UINT32 uiIdx; ++ void *pvVirtAddr = NULL; ++ IMG_UINT32 ui32PgCount = (IMG_UINT32)(ui64Size >> OSGetPageShift()); ++ PVRSRV_DEVICE_NODE *psDevNode = OSAllocZMemNoStats(sizeof(*psDevNode)); ++ PVRSRV_DEVICE_CONFIG *psDevConfig = OSAllocZMemNoStats(sizeof(*psDevConfig)); ++ struct page **pagearray = OSAllocZMemNoStats(ui32PgCount * sizeof(struct page *)); ++ void *pvOSDevice = psDmaAlloc->pvOSDevice; ++#if defined(CONFIG_ARM64) ++ pgprot_t prot = pgprot_writecombine(PAGE_KERNEL); ++#else ++ pgprot_t prot = pgprot_noncached(PAGE_KERNEL); ++#endif ++ ++ /* Validate all required dynamic tmp buffer allocations */ ++ if (psDevNode == NULL || psDevConfig == NULL || pagearray == NULL) ++ { ++ if (psDevNode) ++ { ++ OSFreeMem(psDevNode); ++ } ++ ++ if (psDevConfig) ++ { ++ OSFreeMem(psDevConfig); ++ } ++ ++ if (pagearray) ++ { ++ OSFreeMem(pagearray); ++ } ++ ++ goto e0; ++ } ++ ++ /* Fake psDevNode->psDevConfig->pvOSDevice */ ++ psDevConfig->pvOSDevice = pvOSDevice; ++ psDevNode->psDevConfig = psDevConfig; ++ ++ /* Evict any page data contents from d-cache */ ++ for (uiIdx = 0; uiIdx < ui32PgCount; uiIdx++) ++ { ++ void *pvVirtStart, *pvVirtEnd; ++ IMG_CPU_PHYADDR sCPUPhysStart, sCPUPhysEnd; ++ ++ /* Prepare array required for vmap */ ++ pagearray[uiIdx] = &psPage[uiIdx]; ++ ++ if (bPageByPage) ++ { ++#if defined(CONFIG_64BIT) ++ bPageByPage = IMG_FALSE; ++ ++ pvVirtStart = kmap(&psPage[uiIdx]); ++ pvVirtEnd = pvVirtStart + ui64Size; ++ ++ sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]); ++ sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + ui64Size; ++ /* all pages have a kernel linear address, flush entire range */ ++#else ++ pvVirtStart = kmap(&psPage[uiIdx]); ++ pvVirtEnd = pvVirtStart + PAGE_SIZE; ++ ++ sCPUPhysStart.uiAddr = page_to_phys(&psPage[uiIdx]); ++ sCPUPhysEnd.uiAddr = sCPUPhysStart.uiAddr + PAGE_SIZE; ++ /* pages might be from HIGHMEM, need to kmap/flush per page */ ++#endif ++ ++ /* Fallback to range-based d-cache flush */ ++ OSCPUCacheInvalidateRangeKM(psDevNode, ++ pvVirtStart, pvVirtEnd, ++ sCPUPhysStart, sCPUPhysEnd); ++ ++ kunmap(&psPage[uiIdx]); ++ } ++ } ++ ++ /* Remap pages into VMALLOC space */ ++ pvVirtAddr = pvr_vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot); ++ psDmaAlloc->PageProps = prot; ++ ++ /* Clean-up tmp buffers */ ++ OSFreeMem(psDevConfig); ++ OSFreeMem(psDevNode); ++ OSFreeMem(pagearray); ++ ++e0: ++ return pvVirtAddr; ++} ++ ++static void SysDmaReleaseKernelAddress(void *pvVirtAddr, IMG_UINT64 ui64Size, pgprot_t pgprot) ++{ ++ pvr_vunmap(pvVirtAddr, ui64Size >> OSGetPageShift(), pgprot); ++} ++ ++/*! ++****************************************************************************** ++ @Function SysDmaAllocMem ++ ++ @Description Allocates physically contiguous memory ++ ++ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++ ******************************************************************************/ ++PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ struct device *psDev; ++ struct page *psPage; ++ size_t uiSize; ++ ++ if (psDmaAlloc == NULL || ++ psDmaAlloc->hHandle || ++ psDmaAlloc->pvVirtAddr || ++ psDmaAlloc->ui64Size == 0 || ++ psDmaAlloc->sBusAddr.uiAddr || ++ psDmaAlloc->pvOSDevice == NULL) ++ { ++ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); ++ psDev = (struct device *)psDmaAlloc->pvOSDevice; ++ ++ psDmaAlloc->hHandle = dma_alloc_coherent(psDev, uiSize, (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr, GFP_KERNEL); ++ ++ if (psDmaAlloc->hHandle) ++ { ++ psDmaAlloc->pvVirtAddr = psDmaAlloc->hHandle; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Allocated DMA buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX, ++ psDmaAlloc->pvVirtAddr, ++ psDmaAlloc->sBusAddr.uiAddr, ++ uiSize)); ++ } ++ else if ((psPage = alloc_pages(GFP_KERNEL, get_order(uiSize)))) ++ { ++ psDmaAlloc->sBusAddr.uiAddr = dma_map_page(psDev, psPage, 0, uiSize, DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(psDev, psDmaAlloc->sBusAddr.uiAddr)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "dma_map_page() failed, page 0x%p order %d", ++ psPage, ++ get_order(uiSize))); ++ __free_pages(psPage, get_order(uiSize)); ++ goto e0; ++ } ++ psDmaAlloc->psPage = psPage; ++ ++ psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(psPage, uiSize, psDmaAlloc); ++ if (! psDmaAlloc->pvVirtAddr) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "SysDmaAcquireKernelAddress() failed, page 0x%p order %d", ++ psPage, ++ get_order(uiSize))); ++ dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); ++ __free_pages(psPage, get_order(uiSize)); ++ goto e0; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Allocated contiguous buffer V:0x%p P:0x%llx S:0x"IMG_SIZE_FMTSPECX, ++ psDmaAlloc->pvVirtAddr, ++ psDmaAlloc->sBusAddr.uiAddr, ++ uiSize)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Unable to allocate contiguous buffer, size: 0x"IMG_SIZE_FMTSPECX, uiSize)); ++ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; ++ } ++ ++e0: ++ PVR_LOG_RETURN_IF_FALSE((psDmaAlloc->pvVirtAddr), "DMA/CMA allocation failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES); ++ return eError; ++} ++ ++/*! ++****************************************************************************** ++ @Function SysDmaFreeMem ++ ++ @Description Free physically contiguous memory ++ ++ @Return void ++ ******************************************************************************/ ++void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc) ++{ ++ size_t uiSize; ++ struct device *psDev; ++ ++ if (psDmaAlloc == NULL || ++ psDmaAlloc->ui64Size == 0 || ++ psDmaAlloc->pvOSDevice == NULL || ++ psDmaAlloc->pvVirtAddr == NULL || ++ psDmaAlloc->sBusAddr.uiAddr == 0) ++ { ++ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); ++ return; ++ } ++ ++ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); ++ psDev = (struct device *)psDmaAlloc->pvOSDevice; ++ ++ if (psDmaAlloc->pvVirtAddr != psDmaAlloc->hHandle) ++ { ++ SysDmaReleaseKernelAddress(psDmaAlloc->pvVirtAddr, uiSize, psDmaAlloc->PageProps); ++ } ++ ++ if (! psDmaAlloc->hHandle) ++ { ++ struct page *psPage; ++ dma_unmap_page(psDev, psDmaAlloc->sBusAddr.uiAddr, uiSize, DMA_BIDIRECTIONAL); ++ psPage = psDmaAlloc->psPage; ++ __free_pages(psPage, get_order(uiSize)); ++ return; ++ } ++ ++ dma_free_coherent(psDev, uiSize, psDmaAlloc->hHandle, (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr); ++} ++ ++/*! ++****************************************************************************** ++ @Function SysDmaRegisterForIoRemapping ++ ++ @Description Registers DMA_ALLOC for manual I/O remapping ++ ++ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++ ******************************************************************************/ ++PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc) ++{ ++ size_t uiSize; ++ IMG_UINT32 ui32Idx; ++ IMG_BOOL bTabEntryFound = IMG_TRUE; ++ PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS; ++ ++ if (psDmaAlloc == NULL || ++ psDmaAlloc->ui64Size == 0 || ++ psDmaAlloc->pvOSDevice == NULL || ++ psDmaAlloc->pvVirtAddr == NULL || ++ psDmaAlloc->sBusAddr.uiAddr == 0) ++ { ++ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); ++ ++ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) ++ { ++ /* Check if an I/O remap entry exists for remapping */ ++ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL) ++ { ++ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0); ++ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0); ++ break; ++ } ++ } ++ ++ if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES) ++ { ++ bTabEntryFound = IMG_FALSE; ++ } ++ ++ if (bTabEntryFound) ++ { ++ IMG_BOOL bSameVAddr, bSamePAddr, bSameSize; ++ ++ bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr; ++ bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr; ++ bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == uiSize; ++ ++ if (bSameVAddr) ++ { ++ if (bSamePAddr && bSameSize) ++ { ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_ALREADY_EXISTS; ++ } ++ } ++ else ++ { ++ PVR_ASSERT(bSamePAddr == IMG_FALSE); ++ ++ gsDmaIoRemapArray[ui32Idx].ui64Size = uiSize; ++ gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr; ++ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "DMA: register I/O remap: " ++ "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX, ++ psDmaAlloc->pvVirtAddr, ++ psDmaAlloc->sBusAddr.uiAddr, ++ uiSize)); ++ ++ gbEnableDmaIoRemapping = IMG_TRUE; ++ eError = PVRSRV_OK; ++ } ++ } ++ ++ return eError; ++} ++ ++/*! ++****************************************************************************** ++ @Function SysDmaDeregisterForIoRemapping ++ ++ @Description Deregisters DMA_ALLOC from manual I/O remapping ++ ++ @Return void ++ ******************************************************************************/ ++void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc) ++{ ++ size_t uiSize; ++ IMG_UINT32 ui32Idx; ++ ++ if (psDmaAlloc == NULL || ++ psDmaAlloc->ui64Size == 0 || ++ psDmaAlloc->pvOSDevice == NULL || ++ psDmaAlloc->pvVirtAddr == NULL || ++ psDmaAlloc->sBusAddr.uiAddr == 0) ++ { ++ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter"); ++ return; ++ } ++ ++ uiSize = PVR_ALIGN(psDmaAlloc->ui64Size, PAGE_SIZE); ++ ++ /* Remove specified entries from list of I/O remap entries */ ++ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) ++ { ++ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr) ++ { ++ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0; ++ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL; ++ gsDmaIoRemapArray[ui32Idx].ui64Size = 0; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "DMA: deregister I/O remap: " ++ "VA: 0x%p, PA: 0x%llx, Size: 0x"IMG_SIZE_FMTSPECX, ++ psDmaAlloc->pvVirtAddr, ++ psDmaAlloc->sBusAddr.uiAddr, ++ uiSize)); ++ ++ break; ++ } ++ } ++ ++ /* Check if no other I/O remap entries exists for remapping */ ++ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) ++ { ++ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL) ++ { ++ break; ++ } ++ } ++ ++ if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES) ++ { ++ /* No entries found so disable remapping */ ++ gbEnableDmaIoRemapping = IMG_FALSE; ++ } ++} ++ ++/*! ++****************************************************************************** ++ @Function SysDmaDevPAddrToCpuVAddr ++ ++ @Description Maps a DMA_ALLOC physical address to CPU virtual address ++ ++ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL ++ ******************************************************************************/ ++IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size) ++{ ++ IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL; ++ DMA_ALLOC *psHeapDmaAlloc; ++ IMG_UINT32 ui32Idx; ++ ++ if (gbEnableDmaIoRemapping == IMG_FALSE) ++ { ++ return pvDMAVirtAddr; ++ } ++ ++ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) ++ { ++ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; ++ if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr) ++ { ++ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; ++ IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr; ++ ++ if (uiOffset < uiSpan) ++ { ++ PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan); ++ pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "DMA: remap: PA: 0x%llx => VA: 0x%p", ++ uiAddr, pvDMAVirtAddr)); ++ ++ break; ++ } ++ } ++ } ++ ++ return pvDMAVirtAddr; ++} ++ ++/*! ++****************************************************************************** ++ @Function SysDmaCpuVAddrToDevPAddr ++ ++ @Description Maps a DMA_ALLOC CPU virtual address to physical address ++ ++ @Return Non-zero value on success. Otherwise, a 0 ++ ******************************************************************************/ ++IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr) ++{ ++ IMG_UINT64 uiAddr = 0; ++ DMA_ALLOC *psHeapDmaAlloc; ++ IMG_UINT32 ui32Idx; ++ ++ if (gbEnableDmaIoRemapping == IMG_FALSE) ++ { ++ return uiAddr; ++ } ++ ++ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx) ++ { ++ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx]; ++ if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr) ++ { ++ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size; ++ IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr; ++ ++ if (uiOffset < uiSpan) ++ { ++ uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "DMA: remap: VA: 0x%p => PA: 0x%llx", ++ pvDMAVirtAddr, uiAddr)); ++ ++ break; ++ } ++ } ++ } ++ ++ return uiAddr; ++} ++ ++/****************************************************************************** ++ End of file (dma_support.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/dma_support.h b/drivers/gpu/drm/img-rogue/dma_support.h +new file mode 100644 +index 000000000000..c1d22bddf523 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/dma_support.h +@@ -0,0 +1,117 @@ ++/*************************************************************************/ /*! ++@File dma_support.h ++@Title Device contiguous memory allocator and I/O re-mapper ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides a contiguous memory allocator API; mainly ++ used for allocating / ioremapping (DMA/PA <-> CPU/VA) ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DMA_SUPPORT_H ++#define DMA_SUPPORT_H ++ ++#include "osfunc.h" ++#include "pvrsrv.h" ++ ++typedef struct _DMA_ALLOC_ ++{ ++ IMG_UINT64 ui64Size; ++ IMG_CPU_VIRTADDR pvVirtAddr; ++ IMG_DEV_PHYADDR sBusAddr; ++ IMG_HANDLE hHandle; ++#if defined(__linux__) ++ struct page *psPage; ++ pgprot_t PageProps; ++#endif ++ void *pvOSDevice; ++} DMA_ALLOC; ++ ++/*! ++******************************************************************************* ++ @Function SysDmaAllocMem ++ @Description Allocates physically contiguous memory ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc); ++ ++/*! ++******************************************************************************* ++ @Function SysDmaFreeMem ++ @Description Free physically contiguous memory ++ @Return void ++******************************************************************************/ ++void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc); ++ ++/*! ++******************************************************************************* ++ @Function SysDmaRegisterForIoRemapping ++ @Description Registers DMA_ALLOC for manual I/O remapping ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); ++ ++/*! ++******************************************************************************* ++ @Function SysDmaDeregisterForIoRemapping ++ @Description Deregisters DMA_ALLOC from manual I/O remapping ++ @Return void ++******************************************************************************/ ++void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc); ++ ++/*! ++******************************************************************************* ++ @Function SysDmaDevPAddrToCpuVAddr ++ @Description Maps a DMA_ALLOC physical address to CPU virtual address ++ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL ++******************************************************************************/ ++IMG_CPU_VIRTADDR ++SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size); ++ ++/*! ++******************************************************************************* ++ @Function SysDmaCpuVAddrToDevPAddr ++ @Description Maps a DMA_ALLOC CPU virtual address to physical address ++ @Return Non-zero value on success. Otherwise, a 0 ++******************************************************************************/ ++IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr); ++ ++#endif /* DMA_SUPPORT_H */ ++ ++/****************************************************************************** ++ End of file (dma_support.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/drm_netlink_gem.c b/drivers/gpu/drm/img-rogue/drm_netlink_gem.c +new file mode 100644 +index 000000000000..467e1f4edb6b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_netlink_gem.c +@@ -0,0 +1,143 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#endif ++ ++#include "drm_netlink_gem.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++#include ++#endif ++ ++#include ++ ++#include "kernel_compatibility.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++static int netlink_gem_mmap_capsys(struct file *file, ++ struct vm_area_struct *vma) ++{ ++ struct drm_file *file_priv = file->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_vma_offset_node *node; ++ struct drm_gem_object *obj = NULL; ++ int err; ++ ++ drm_vma_offset_lock_lookup(dev->vma_offset_manager); ++ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, ++ vma->vm_pgoff, ++ vma_pages(vma)); ++ if (node) { ++ obj = container_of(node, struct drm_gem_object, vma_node); ++ ++ /* Don't mmap an object that is being destroyed */ ++ if (!kref_get_unless_zero(&obj->refcount)) ++ obj = NULL; ++ } ++ drm_vma_offset_unlock_lookup(dev->vma_offset_manager); ++ ++ if (!obj) ++ return -EINVAL; ++ ++ err = drm_vma_node_allow(node, file_priv); ++ if (!err) { ++ err = drm_gem_mmap(file, vma); ++ ++ drm_vma_node_revoke(node, file_priv); ++ } ++ ++ drm_gem_object_put(obj); ++ ++ return err; ++} ++ ++int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ int err; ++ ++ err = drm_gem_mmap(file, vma); ++ if (!!err && capable(CAP_SYS_RAWIO)) ++ err = netlink_gem_mmap_capsys(file, vma); ++ ++ return err; ++} ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */ ++int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct drm_file *file_priv = file->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_vma_offset_node *node; ++ struct drm_gem_object *obj; ++ int err; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, ++ vma->vm_pgoff, ++ vma_pages(vma)); ++ if (!node) { ++ err = -EINVAL; ++ goto exit_unlock; ++ } ++ ++ /* Allow Netlink clients to mmap any object for reading */ ++ if (!capable(CAP_SYS_RAWIO) || (vma->vm_flags & VM_WRITE)) { ++ if (!drm_vma_node_is_allowed(node, file_priv)) { ++ err = -EACCES; ++ goto exit_unlock; ++ } ++ } ++ ++ obj = container_of(node, struct drm_gem_object, vma_node); ++ ++ err = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); ++ ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) */ +diff --git a/drivers/gpu/drm/img-rogue/drm_netlink_gem.h b/drivers/gpu/drm/img-rogue/drm_netlink_gem.h +new file mode 100644 +index 000000000000..3a3fcd4901e5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_netlink_gem.h +@@ -0,0 +1,61 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__DRM_NETLINK_GEM_H__) ++#define __DRM_NETLINK_GEM_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++struct file; ++struct vm_area_struct; ++#else ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++#include ++#endif ++ ++int netlink_gem_mmap(struct file *file, struct vm_area_struct *vma); ++ ++#endif /* !defined(__DRM_NETLINK_GEM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/drm_nulldisp_drv.c b/drivers/gpu/drm/img-rogue/drm_nulldisp_drv.c +new file mode 100644 +index 000000000000..b1273fcafd53 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_nulldisp_drv.c +@@ -0,0 +1,2731 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include "pvr_linux_fence.h" ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#include ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 1, 0)) ++#include ++#endif ++ ++#include "pvr_dma_resv.h" ++ ++#include "img_drm_fourcc_internal.h" ++#include ++ ++#include ++ ++#include "drm_nulldisp_drv.h" ++#if defined(LMA) ++#include "tc_drv.h" ++#include "drm_pdp_gem.h" ++#include "pdp_drm.h" ++#else ++#include "drm_nulldisp_gem.h" ++#endif ++#include "nulldisp_drm.h" ++#include "drm_netlink_gem.h" ++#include "drm_nulldisp_netlink.h" ++ ++#if defined(NULLDISP_USE_ATOMIC) ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)) ++#include ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0) */ ++#endif ++ ++#include "kernel_compatibility.h" ++ ++#define DRIVER_NAME "nulldisp" ++#define DRIVER_DESC "Imagination Technologies Null DRM Display Driver" ++#define DRIVER_DATE "20150612" ++ ++#if defined(NULLDISP_USE_ATOMIC) ++#define NULLDISP_DRIVER_ATOMIC DRIVER_ATOMIC ++#else ++#define NULLDISP_DRIVER_ATOMIC 0 ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++#define NULLDISP_DRIVER_PRIME 0 ++#else ++#define NULLDISP_DRIVER_PRIME DRIVER_PRIME ++#endif ++ ++#define NULLDISP_FB_WIDTH_MIN 0 ++#define NULLDISP_FB_WIDTH_MAX 8192 ++#define NULLDISP_FB_HEIGHT_MIN 0 ++#define NULLDISP_FB_HEIGHT_MAX 8192 ++ ++#define NULLDISP_DEFAULT_WIDTH 640 ++#define NULLDISP_DEFAULT_HEIGHT 480 ++#define NULLDISP_DEFAULT_REFRESH_RATE 60 ++ ++#define NULLDISP_MAX_PLANES 3 ++ ++#if defined(NULLDISP_USE_ATOMIC) ++#define NULLDISP_NETLINK_TIMEOUT 5 ++#else ++#define NULLDISP_NETLINK_TIMEOUT 30 ++#endif ++#define NULLDISP_NETLINK_TIMEOUT_MAX 300 ++#define NULLDISP_NETLINK_TIMEOUT_MIN 1 ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++MODULE_IMPORT_NS(DMA_BUF); ++#endif ++ ++enum nulldisp_crtc_flip_status { ++ NULLDISP_CRTC_FLIP_STATUS_NONE = 0, ++#if !defined(NULLDISP_USE_ATOMIC) ++ NULLDISP_CRTC_FLIP_STATUS_PENDING, ++#endif ++ NULLDISP_CRTC_FLIP_STATUS_DONE, ++}; ++ ++struct nulldisp_flip_data { ++ struct dma_fence_cb base; ++ struct drm_crtc *crtc; ++ struct dma_fence *wait_fence; ++}; ++ ++struct nulldisp_crtc { ++ struct drm_crtc base; ++ struct delayed_work vb_work; ++#if defined(NULLDISP_USE_ATOMIC) ++ struct drm_framebuffer *fb; ++ struct completion flip_done; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) ++ struct completion copy_done; ++#endif ++#else ++ struct work_struct flip_work; ++ struct delayed_work flip_to_work; ++ struct delayed_work copy_to_work; ++ ++ struct completion flip_scheduled; ++ struct completion copy_done; ++#endif ++ ++ /* Reuse the drm_device event_lock to protect these */ ++ atomic_t flip_status; ++ struct drm_pending_vblank_event *flip_event; ++#if !defined(NULLDISP_USE_ATOMIC) ++ struct drm_framebuffer *old_fb; ++ struct nulldisp_flip_data *flip_data; ++#endif ++ bool flip_async; ++}; ++ ++struct nulldisp_display_device { ++ struct drm_device *dev; ++ ++ struct workqueue_struct *workqueue; ++ struct nulldisp_crtc *nulldisp_crtc; ++ struct nlpvrdpy *nlpvrdpy; ++#if defined(LMA) ++ struct pdp_gem_private *pdp_gem_priv; ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) || \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) ++ struct drm_connector *connector; ++#endif ++}; ++ ++#if !defined(NULLDISP_USE_ATOMIC) ++struct nulldisp_framebuffer { ++ struct drm_framebuffer base; ++ struct drm_gem_object *obj[NULLDISP_MAX_PLANES]; ++}; ++ ++#define to_nulldisp_framebuffer(framebuffer) \ ++ container_of(framebuffer, struct nulldisp_framebuffer, base) ++#endif ++ ++struct nulldisp_module_params { ++ unsigned int hdisplay; ++ unsigned int vdisplay; ++ unsigned int vrefresh; ++ unsigned int updateto; ++}; ++ ++#define to_nulldisp_crtc(crtc) \ ++ container_of(crtc, struct nulldisp_crtc, base) ++ ++#if defined(LMA) ++#define obj_to_resv(obj) pdp_gem_get_resv(obj) ++#else ++#define obj_to_resv(obj) nulldisp_gem_get_resv(obj) ++#endif ++ ++/* ++ * The order of this array helps determine the order in which EGL configs are ++ * returned to an application using eglGetConfigs. As such, RGB 8888 formats ++ * should appear first, followed by RGB 565 configs. YUV configs should appear ++ * last. ++ */ ++static const uint32_t nulldisp_modeset_formats[] = { ++ DRM_FORMAT_XRGB8888, ++ DRM_FORMAT_ARGB8888, ++ DRM_FORMAT_RGB565, ++ DRM_FORMAT_ABGR2101010, ++#ifdef DRM_FORMAT_ABGR16161616F ++ DRM_FORMAT_ABGR16161616F, ++#endif ++ DRM_FORMAT_NV12, ++ DRM_FORMAT_NV21, ++ DRM_FORMAT_YUYV, ++ DRM_FORMAT_YUV444, ++ DRM_FORMAT_YUV420, ++ DRM_FORMAT_YVU420, ++}; ++ ++/* ++ * Note that nulldisp, being a no-hardware display controller driver, ++ * "supports" a number different decompression hardware ++ * versions (V0, V1, V2 ...). Real, hardware display controllers are ++ * likely to support only a single version. ++ */ ++static const uint64_t nulldisp_primary_plane_modifiers[] = { ++ DRM_FORMAT_MOD_LINEAR, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0_FIX, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V2, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V3, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V8, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12, ++ DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0_FIX, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V2, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V3, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V8, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12, ++ DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13, ++ DRM_FORMAT_MOD_PVR_FBCDC_32x2_V1, ++ DRM_FORMAT_MOD_PVR_FBCDC_32x2_V3, ++ DRM_FORMAT_MOD_PVR_FBCDC_32x2_V8, ++ DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10, ++ DRM_FORMAT_MOD_PVR_FBCDC_32x2_V12, ++ DRM_FORMAT_MOD_INVALID ++}; ++ ++static struct nulldisp_module_params module_params = { ++ .hdisplay = NULLDISP_DEFAULT_WIDTH, ++ .vdisplay = NULLDISP_DEFAULT_HEIGHT, ++ .vrefresh = NULLDISP_DEFAULT_REFRESH_RATE, ++ .updateto = NULLDISP_NETLINK_TIMEOUT, ++}; ++ ++static int updateto_param_set(const char *val, const struct kernel_param *kp); ++ ++static const struct kernel_param_ops updateto_ops = { ++ .set = updateto_param_set, ++ .get = param_get_uint, ++}; ++ ++module_param_named(width, module_params.hdisplay, uint, 0444); ++module_param_named(height, module_params.vdisplay, uint, 0444); ++module_param_named(refreshrate, module_params.vrefresh, uint, 0444); ++module_param_cb(updateto, &updateto_ops, &module_params.updateto, 0644); ++ ++MODULE_PARM_DESC(width, "Preferred display width in pixels"); ++MODULE_PARM_DESC(height, "Preferred display height in pixels"); ++MODULE_PARM_DESC(refreshrate, "Preferred display refresh rate"); ++MODULE_PARM_DESC(updateto, "Preferred remote update timeout (in seconds)"); ++ ++/* ++ * Please use this function to obtain the module parameters instead of ++ * accessing the global "module_params" structure directly. ++ */ ++static inline const struct nulldisp_module_params * ++nulldisp_get_module_params(void) ++{ ++ return &module_params; ++} ++ ++static int updateto_param_set(const char *val, const struct kernel_param *kp) ++{ ++ unsigned int updateto; ++ int err; ++ ++ err = kstrtouint(val, 10, &updateto); ++ if (err) ++ return err; ++ ++ if (updateto < NULLDISP_NETLINK_TIMEOUT_MIN || ++ updateto > NULLDISP_NETLINK_TIMEOUT_MAX) ++ return -EINVAL; ++ ++ return param_set_uint(val, kp); ++} ++ ++static unsigned long nulldisp_netlink_timeout(void) ++{ ++ const struct nulldisp_module_params *module_params = ++ nulldisp_get_module_params(); ++ unsigned int updateto; ++ ++#if !defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) ++ kparam_block_sysfs_write(updateto); ++#else ++ kernel_param_lock(THIS_MODULE); ++#endif ++ ++ updateto = module_params->updateto; ++ ++#if !defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)) ++ kparam_unblock_sysfs_write(updateto); ++#else ++ kernel_param_unlock(THIS_MODULE); ++#endif ++ ++ return msecs_to_jiffies(updateto * 1000); ++} ++ ++/****************************************************************************** ++ * Linux compatibility functions ++ ******************************************************************************/ ++static inline void ++nulldisp_drm_fb_set_format(struct drm_framebuffer *fb, ++ u32 pixel_format) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ fb->format = drm_format_info(pixel_format); ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ const struct drm_format_info *format = drm_format_info(pixel_format); ++ ++ fb->pixel_format = pixel_format; ++ ++ fb->depth = format->depth; ++ fb->bits_per_pixel = format->depth ? (format->cpp[0] * 8) : 0; ++#else ++ fb->pixel_format = pixel_format; ++ ++ switch (pixel_format) { ++ case DRM_FORMAT_NV12: ++ case DRM_FORMAT_NV21: ++ case DRM_FORMAT_YUYV: ++ case DRM_FORMAT_YUV444: ++ case DRM_FORMAT_YUV420: ++ case DRM_FORMAT_YVU420: ++ /* Unused for YUV formats */ ++ fb->depth = 0; ++ fb->bits_per_pixel = 0; ++ break; ++ ++ default: /* RGB */ ++ drm_fb_get_bpp_depth(pixel_format, ++ &fb->depth, ++ &fb->bits_per_pixel); ++ } ++#endif ++} ++ ++static inline void nulldisp_drm_fb_set_modifier(struct drm_framebuffer *fb, ++ uint64_t value) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ fb->modifier = value; ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ /* FB modifier values must be the same for all planes */ ++ fb->modifier[0] = value; ++ fb->modifier[1] = value; ++ fb->modifier[2] = value; ++ fb->modifier[3] = value; ++#else ++ /* Modifiers are not supported */ ++#endif ++} ++ ++/****************************************************************************** ++ * Plane functions ++ ******************************************************************************/ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++static bool nulldisp_primary_format_mod_supported(struct drm_plane *plane, ++ uint32_t format, ++ uint64_t modifier) ++{ ++ /* ++ * All 'nulldisp_modeset_formats' are supported for every modifier ++ * in the 'nulldisp_primary_plane_modifiers' array. ++ */ ++ return true; ++} ++#endif ++ ++#if defined(NULLDISP_USE_ATOMIC) ++static int nulldisp_plane_helper_atomic_check(struct drm_plane *plane, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) ++ struct drm_plane_state *state) ++{ ++#else ++ struct drm_atomic_state *astate) ++{ ++ struct drm_plane_state *state = ++ drm_atomic_get_new_plane_state(astate, plane); ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) */ ++ struct drm_crtc_state *crtc_new_state; ++ ++ if (!state->crtc) ++ return 0; ++ ++ crtc_new_state = drm_atomic_get_new_crtc_state(state->state, ++ state->crtc); ++ ++ return drm_atomic_helper_check_plane_state(state, crtc_new_state, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ DRM_PLANE_NO_SCALING, ++ DRM_PLANE_NO_SCALING, ++#else ++ DRM_PLANE_HELPER_NO_SCALING, ++ DRM_PLANE_HELPER_NO_SCALING, ++#endif ++ false, true); ++} ++ ++static void ++nulldisp_plane_helper_atomic_update(struct drm_plane *plane, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) ++ struct drm_plane_state *old_state) ++#else ++ struct drm_atomic_state *astate) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) */ ++{ ++ struct drm_plane_state *state = plane->state; ++ ++ if (state->crtc) { ++ struct nulldisp_crtc *nulldisp_crtc = ++ to_nulldisp_crtc(state->crtc); ++ ++ nulldisp_crtc->fb = state->fb; ++ } ++} ++ ++static const struct drm_plane_helper_funcs nulldisp_plane_helper_funcs = { ++ .prepare_fb = drm_gem_plane_helper_prepare_fb, ++ .atomic_check = nulldisp_plane_helper_atomic_check, ++ .atomic_update = nulldisp_plane_helper_atomic_update, ++}; ++ ++static const struct drm_plane_funcs nulldisp_plane_funcs = { ++ .update_plane = drm_atomic_helper_update_plane, ++ .disable_plane = drm_atomic_helper_disable_plane, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ .destroy = drm_plane_helper_destroy, ++#else ++ .destroy = drm_primary_helper_destroy, ++#endif ++ .reset = drm_atomic_helper_plane_reset, ++ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, ++ .format_mod_supported = nulldisp_primary_format_mod_supported, ++}; ++#else /* defined(NULLDISP_USE_ATOMIC) */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) ++static int nulldisp_primary_helper_update(struct drm_plane *plane, ++ struct drm_crtc *crtc, ++ struct drm_framebuffer *fb, ++ int crtc_x, int crtc_y, ++ unsigned int crtc_w, ++ unsigned int crtc_h, ++ uint32_t src_x, uint32_t src_y, ++ uint32_t src_w, uint32_t src_h, ++ struct drm_modeset_acquire_ctx *ctx) ++{ ++ struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; ++ struct drm_plane_state plane_state = { ++ .plane = plane, ++ .crtc = crtc, ++ .fb = fb, ++ .crtc_x = crtc_x, ++ .crtc_y = crtc_y, ++ .crtc_w = crtc_w, ++ .crtc_h = crtc_h, ++ .src_x = src_x, ++ .src_y = src_y, ++ .src_w = src_w, ++ .src_h = src_h, ++ .alpha = DRM_BLEND_ALPHA_OPAQUE, ++ .rotation = DRM_MODE_ROTATE_0, ++ }; ++ struct drm_crtc_state crtc_state = { ++ .crtc = crtc, ++ .enable = crtc->enabled, ++ .adjusted_mode = crtc->mode, ++ .mode = crtc->mode, ++ }; ++ struct drm_mode_set set = { ++ .fb = fb, ++ .crtc = crtc, ++ .mode = &crtc->mode, ++ .x = src_x >> 16, /* convert from fixed point */ ++ .y = src_y >> 16, /* convert from fixed point */ ++ .connectors = &nulldisp_dev->connector, ++ .num_connectors = 1, ++ }; ++ int err; ++ ++ BUG_ON(nulldisp_dev->connector->encoder == NULL); ++ BUG_ON(nulldisp_dev->connector->encoder->crtc != crtc); ++ ++ err = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state, ++ DRM_PLANE_HELPER_NO_SCALING, ++ DRM_PLANE_HELPER_NO_SCALING, ++ false, false); ++ if (err) ++ return err; ++ ++ if (!plane_state.visible) ++ return -EINVAL; ++ ++ return crtc->funcs->set_config(&set, ctx); ++} ++ ++static int nulldisp_primary_helper_disable(struct drm_plane *plane, ++ struct drm_modeset_acquire_ctx *ctx) ++{ ++ return -EINVAL; ++} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) */ ++ ++static const struct drm_plane_funcs nulldisp_plane_funcs = { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) ++ .update_plane = nulldisp_primary_helper_update, ++ .disable_plane = nulldisp_primary_helper_disable, ++#else ++ .update_plane = drm_primary_helper_update, ++ .disable_plane = drm_primary_helper_disable, ++#endif ++ .destroy = drm_primary_helper_destroy, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++ .format_mod_supported = nulldisp_primary_format_mod_supported, ++#endif ++}; ++#endif /* defined(NULLDISP_USE_ATOMIC) */ ++ ++/****************************************************************************** ++ * CRTC functions ++ ******************************************************************************/ ++ ++static bool ++nulldisp_crtc_helper_mode_fixup(struct drm_crtc *crtc, ++ const struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* ++ * Fix up mode so that it's compatible with the hardware. The results ++ * should be stored in adjusted_mode (i.e. mode should be untouched). ++ */ ++ return true; ++} ++ ++static void nulldisp_crtc_helper_disable(struct drm_crtc *crtc) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ ++#if !defined(NULLDISP_USE_ATOMIC) ++ if (atomic_read(&nulldisp_crtc->flip_status) == ++ NULLDISP_CRTC_FLIP_STATUS_PENDING) ++ wait_for_completion(&nulldisp_crtc->flip_scheduled); ++ ++ /* ++ * Flush any outstanding page flip related work. The order this ++ * is done is important, to ensure there are no outstanding ++ * page flips. ++ */ ++ flush_work(&nulldisp_crtc->flip_work); ++ flush_delayed_work(&nulldisp_crtc->flip_to_work); ++#endif ++ flush_delayed_work(&nulldisp_crtc->vb_work); ++ ++ drm_crtc_vblank_off(crtc); ++ flush_delayed_work(&nulldisp_crtc->vb_work); ++ ++ /* ++ * Vblank has been disabled, so the vblank handler shouldn't be ++ * able to reschedule itself. ++ */ ++ BUG_ON(cancel_delayed_work(&nulldisp_crtc->vb_work)); ++ ++ BUG_ON(atomic_read(&nulldisp_crtc->flip_status) != ++ NULLDISP_CRTC_FLIP_STATUS_NONE); ++ ++#if !defined(NULLDISP_USE_ATOMIC) ++ /* Flush any remaining dirty FB work */ ++ flush_delayed_work(&nulldisp_crtc->copy_to_work); ++#endif ++} ++ ++static void nulldisp_crtc_flip_complete(struct drm_crtc *crtc) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ ++ /* The flipping process has been completed so reset the flip state */ ++ atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE); ++ nulldisp_crtc->flip_async = false; ++ ++#if !defined(NULLDISP_USE_ATOMIC) ++ if (nulldisp_crtc->flip_data) { ++ dma_fence_put(nulldisp_crtc->flip_data->wait_fence); ++ kfree(nulldisp_crtc->flip_data); ++ nulldisp_crtc->flip_data = NULL; ++ } ++#endif ++ if (nulldisp_crtc->flip_event) { ++ drm_crtc_send_vblank_event(crtc, nulldisp_crtc->flip_event); ++ nulldisp_crtc->flip_event = NULL; ++ } ++ ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++} ++ ++#if defined(NULLDISP_USE_ATOMIC) ++static void nulldisp_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) ++{ ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void nulldisp_crtc_helper_atomic_flush(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_state) ++{ ++#else ++static void nulldisp_crtc_helper_atomic_flush(struct drm_crtc *crtc, ++ struct drm_atomic_state *state) ++{ ++ struct drm_crtc_state *old_state = drm_atomic_get_new_crtc_state(state, crtc); ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ ++ if (!crtc->state->active || !old_state->active) ++ return; ++ ++ if (nulldisp_crtc->fb) { ++ struct nulldisp_display_device *nulldisp_dev = ++ crtc->dev->dev_private; ++ ++ reinit_completion(&nulldisp_crtc->flip_done); ++ ++ if (!nlpvrdpy_send_flip(nulldisp_dev->nlpvrdpy, ++ nulldisp_crtc->fb, ++ &nulldisp_crtc->fb->obj[0])) { ++ unsigned long res; ++ ++ res = wait_for_completion_timeout( ++ &nulldisp_crtc->flip_done, ++ nulldisp_netlink_timeout()); ++ ++ if (!res) ++ DRM_ERROR( ++ "timed out waiting for remote update\n"); ++ } ++ ++ nulldisp_crtc->fb = NULL; ++ } ++ ++ if (crtc->state->event) { ++ unsigned long flags; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++ nulldisp_crtc->flip_async = crtc->state->async_flip; ++#else ++ nulldisp_crtc->flip_async = !!(crtc->state->pageflip_flags ++ & DRM_MODE_PAGE_FLIP_ASYNC); ++#endif ++ if (nulldisp_crtc->flip_async) ++ WARN_ON(drm_crtc_vblank_get(crtc) != 0); ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ nulldisp_crtc->flip_event = crtc->state->event; ++ crtc->state->event = NULL; ++ ++ atomic_set(&nulldisp_crtc->flip_status, ++ NULLDISP_CRTC_FLIP_STATUS_DONE); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ ++ if (nulldisp_crtc->flip_async) ++ nulldisp_crtc_flip_complete(crtc); ++ } ++} ++ ++static void nulldisp_crtc_set_enabled(struct drm_crtc *crtc, bool enable) ++{ ++ if (enable) ++ drm_crtc_vblank_on(crtc); ++ else ++ nulldisp_crtc_helper_disable(crtc); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void ++nulldisp_crtc_helper_atomic_enable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++#else ++static void ++nulldisp_crtc_helper_atomic_enable(struct drm_crtc *crtc, ++ struct drm_atomic_state *state) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++{ ++ nulldisp_crtc_set_enabled(crtc, true); ++ ++ if (crtc->state->event) { ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ unsigned long flags; ++ ++ WARN_ON(drm_crtc_vblank_get(crtc) != 0); ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ nulldisp_crtc->flip_event = crtc->state->event; ++ crtc->state->event = NULL; ++ ++ atomic_set(&nulldisp_crtc->flip_status, ++ NULLDISP_CRTC_FLIP_STATUS_DONE); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ } ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++static void ++nulldisp_crtc_helper_atomic_disable(struct drm_crtc *crtc, ++ struct drm_crtc_state *old_crtc_state) ++#else ++static void ++nulldisp_crtc_helper_atomic_disable(struct drm_crtc *crtc, ++ struct drm_atomic_state *state) ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ ++ nulldisp_crtc_set_enabled(crtc, false); ++ ++ nulldisp_crtc->fb = NULL; ++ ++ if (crtc->state->event) { ++ unsigned long flags; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ drm_crtc_send_vblank_event(crtc, crtc->state->event); ++ crtc->state->event = NULL; ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ } ++} ++#else /* defined(NULLDISP_USE_ATOMIC) */ ++static void nulldisp_crtc_helper_dpms(struct drm_crtc *crtc, ++ int mode) ++{ ++ /* ++ * Change the power state of the display/pipe/port/etc. If the mode ++ * passed in is unsupported, the provider must use the next lowest ++ * power level. ++ */ ++} ++ ++static void nulldisp_crtc_helper_prepare(struct drm_crtc *crtc) ++{ ++ drm_crtc_vblank_off(crtc); ++ ++ /* ++ * Prepare the display/pipe/port/etc for a mode change e.g. put them ++ * in a low power state/turn them off ++ */ ++} ++ ++static void nulldisp_crtc_helper_commit(struct drm_crtc *crtc) ++{ ++ /* Turn the display/pipe/port/etc back on */ ++ ++ drm_crtc_vblank_on(crtc); ++} ++ ++static int ++nulldisp_crtc_helper_mode_set_base_atomic(struct drm_crtc *crtc, ++ struct drm_framebuffer *fb, ++ int x, int y, ++ enum mode_set_atomic atomic) ++{ ++ /* Set the display base address or offset from the base address */ ++ return 0; ++} ++ ++static int nulldisp_crtc_helper_mode_set_base(struct drm_crtc *crtc, ++ int x, int y, ++ struct drm_framebuffer *old_fb) ++{ ++ return nulldisp_crtc_helper_mode_set_base_atomic(crtc, ++ crtc->primary->fb, ++ x, ++ y, ++ 0); ++} ++ ++static int ++nulldisp_crtc_helper_mode_set(struct drm_crtc *crtc, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode, ++ int x, int y, ++ struct drm_framebuffer *old_fb) ++{ ++ /* Setup the new mode and/or framebuffer */ ++ return nulldisp_crtc_helper_mode_set_base(crtc, x, y, old_fb); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++static void nulldisp_crtc_helper_load_lut(struct drm_crtc *crtc) ++{ ++} ++#endif ++#endif /* defined(NULLDISP_USE_ATOMIC) */ ++ ++static void nulldisp_crtc_destroy(struct drm_crtc *crtc) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ ++ DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); ++ ++ drm_crtc_cleanup(crtc); ++ ++ BUG_ON(atomic_read(&nulldisp_crtc->flip_status) != ++ NULLDISP_CRTC_FLIP_STATUS_NONE); ++ ++ kfree(nulldisp_crtc); ++} ++ ++#if !defined(NULLDISP_USE_ATOMIC) ++static void nulldisp_crtc_flip_done(struct nulldisp_crtc *nulldisp_crtc) ++{ ++ struct drm_crtc *crtc = &nulldisp_crtc->base; ++ ++ struct drm_framebuffer *old_fb; ++ ++ WARN_ON(atomic_read(&nulldisp_crtc->flip_status) != ++ NULLDISP_CRTC_FLIP_STATUS_PENDING); ++ ++ old_fb = nulldisp_crtc->old_fb; ++ nulldisp_crtc->old_fb = NULL; ++ ++ (void) nulldisp_crtc_helper_mode_set_base(crtc, crtc->x, crtc->y, ++ old_fb); ++ ++ atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_DONE); ++ ++ if (nulldisp_crtc->flip_async) ++ nulldisp_crtc_flip_complete(crtc); ++} ++ ++static bool nulldisp_set_flip_to(struct nulldisp_crtc *nulldisp_crtc) ++{ ++ struct drm_crtc *crtc = &nulldisp_crtc->base; ++ struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; ++ ++ /* Returns false if work already queued, else true */ ++ return queue_delayed_work(nulldisp_dev->workqueue, ++ &nulldisp_crtc->flip_to_work, ++ nulldisp_netlink_timeout()); ++} ++ ++static bool nulldisp_set_copy_to(struct nulldisp_crtc *nulldisp_crtc) ++{ ++ struct drm_crtc *crtc = &nulldisp_crtc->base; ++ struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; ++ ++ /* Returns false if work already queued, else true */ ++ return queue_delayed_work(nulldisp_dev->workqueue, ++ &nulldisp_crtc->copy_to_work, ++ nulldisp_netlink_timeout()); ++} ++ ++static void nulldisp_flip_to_work(struct work_struct *w) ++{ ++ struct delayed_work *dw = ++ container_of(w, struct delayed_work, work); ++ struct nulldisp_crtc *nulldisp_crtc = ++ container_of(dw, struct nulldisp_crtc, flip_to_work); ++ ++ if (atomic_read(&nulldisp_crtc->flip_status) == ++ NULLDISP_CRTC_FLIP_STATUS_PENDING) ++ nulldisp_crtc_flip_done(nulldisp_crtc); ++} ++ ++static void nulldisp_copy_to_work(struct work_struct *w) ++{ ++ struct delayed_work *dw = ++ container_of(w, struct delayed_work, work); ++ struct nulldisp_crtc *nulldisp_crtc = ++ container_of(dw, struct nulldisp_crtc, copy_to_work); ++ ++ complete(&nulldisp_crtc->copy_done); ++} ++ ++static void nulldisp_flip_work(struct work_struct *w) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = ++ container_of(w, struct nulldisp_crtc, flip_work); ++ struct drm_crtc *crtc = &nulldisp_crtc->base; ++ struct drm_device *dev = crtc->dev; ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ struct nulldisp_framebuffer *nulldisp_fb = ++ to_nulldisp_framebuffer(crtc->primary->fb); ++ ++ /* ++ * To prevent races with disconnect requests from user space, ++ * set the timeout before sending the flip request. ++ */ ++ nulldisp_set_flip_to(nulldisp_crtc); ++ ++ if (nlpvrdpy_send_flip(nulldisp_dev->nlpvrdpy, ++ &nulldisp_fb->base, ++ &nulldisp_fb->obj[0])) ++ goto fail_cancel; ++ ++ return; ++ ++fail_cancel: ++ /* ++ * We can't flush the work, as we are running on the same ++ * single threaded workqueue as the work to be flushed. ++ */ ++ cancel_delayed_work(&nulldisp_crtc->flip_to_work); ++ ++ nulldisp_crtc_flip_done(nulldisp_crtc); ++} ++ ++static void nulldisp_crtc_flip_cb(struct dma_fence *fence, ++ struct dma_fence_cb *cb) ++{ ++ struct nulldisp_flip_data *flip_data = ++ container_of(cb, struct nulldisp_flip_data, base); ++ struct drm_crtc *crtc = flip_data->crtc; ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ struct drm_device *dev = crtc->dev; ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ ++ (void) queue_work(nulldisp_dev->workqueue, ++ &nulldisp_crtc->flip_work); ++ ++ complete_all(&nulldisp_crtc->flip_scheduled); ++} ++ ++static void nulldisp_crtc_flip_schedule_cb(struct dma_fence *fence, ++ struct dma_fence_cb *cb) ++{ ++ struct nulldisp_flip_data *flip_data = ++ container_of(cb, struct nulldisp_flip_data, base); ++ int err = 0; ++ ++ if (flip_data->wait_fence) ++ err = dma_fence_add_callback(flip_data->wait_fence, ++ &flip_data->base, ++ nulldisp_crtc_flip_cb); ++ ++ if (!flip_data->wait_fence || err) { ++ if (err && err != -ENOENT) ++ DRM_ERROR("flip failed to wait on old buffer\n"); ++ nulldisp_crtc_flip_cb(flip_data->wait_fence, &flip_data->base); ++ } ++} ++ ++static int nulldisp_crtc_flip_schedule(struct drm_crtc *crtc, ++ struct drm_gem_object *obj, ++ struct drm_gem_object *old_obj) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ struct dma_resv *resv = obj_to_resv(obj); ++ struct dma_resv *old_resv = obj_to_resv(old_obj); ++ struct nulldisp_flip_data *flip_data; ++ struct dma_fence *fence; ++ int err; ++ ++ flip_data = kmalloc(sizeof(*flip_data), GFP_KERNEL); ++ if (!flip_data) ++ return -ENOMEM; ++ ++ flip_data->crtc = crtc; ++ ++ ww_mutex_lock(&old_resv->lock, NULL); ++ flip_data->wait_fence = ++ dma_fence_get(dma_resv_get_excl(old_resv)); ++ ++ if (old_resv != resv) { ++ ww_mutex_unlock(&old_resv->lock); ++ ww_mutex_lock(&resv->lock, NULL); ++ } ++ ++ fence = dma_fence_get(dma_resv_get_excl(resv)); ++ ww_mutex_unlock(&resv->lock); ++ ++ nulldisp_crtc->flip_data = flip_data; ++ reinit_completion(&nulldisp_crtc->flip_scheduled); ++ atomic_set(&nulldisp_crtc->flip_status, ++ NULLDISP_CRTC_FLIP_STATUS_PENDING); ++ ++ if (fence) { ++ err = dma_fence_add_callback(fence, &flip_data->base, ++ nulldisp_crtc_flip_schedule_cb); ++ dma_fence_put(fence); ++ if (err && err != -ENOENT) ++ goto err_set_flip_status_none; ++ } ++ ++ if (!fence || err == -ENOENT) { ++ nulldisp_crtc_flip_schedule_cb(fence, &flip_data->base); ++ err = 0; ++ } ++ ++ return err; ++ ++err_set_flip_status_none: ++ atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE); ++ dma_fence_put(flip_data->wait_fence); ++ kfree(flip_data); ++ return err; ++} ++ ++static int nulldisp_crtc_page_flip(struct drm_crtc *crtc, ++ struct drm_framebuffer *fb, ++ struct drm_pending_vblank_event *event, ++ uint32_t page_flip_flags ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) ++ , struct drm_modeset_acquire_ctx *ctx ++#endif ++ ) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ struct nulldisp_framebuffer *nulldisp_fb = to_nulldisp_framebuffer(fb); ++ struct nulldisp_framebuffer *nulldisp_old_fb = ++ to_nulldisp_framebuffer(crtc->primary->fb); ++ enum nulldisp_crtc_flip_status status; ++ unsigned long flags; ++ int err; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ status = atomic_read(&nulldisp_crtc->flip_status); ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++ ++ if (status != NULLDISP_CRTC_FLIP_STATUS_NONE) ++ return -EBUSY; ++ ++ if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) { ++ err = drm_crtc_vblank_get(crtc); ++ if (err) ++ return err; ++ } ++ ++ nulldisp_crtc->old_fb = crtc->primary->fb; ++ nulldisp_crtc->flip_event = event; ++ nulldisp_crtc->flip_async = !!(page_flip_flags & ++ DRM_MODE_PAGE_FLIP_ASYNC); ++ ++ /* Set the crtc to point to the new framebuffer */ ++ crtc->primary->fb = fb; ++ ++ err = nulldisp_crtc_flip_schedule(crtc, nulldisp_fb->obj[0], ++ nulldisp_old_fb->obj[0]); ++ if (err) { ++ crtc->primary->fb = nulldisp_crtc->old_fb; ++ nulldisp_crtc->old_fb = NULL; ++ nulldisp_crtc->flip_event = NULL; ++ nulldisp_crtc->flip_async = false; ++ ++ DRM_ERROR("failed to schedule flip (err=%d)\n", err); ++ goto err_vblank_put; ++ } ++ ++ return 0; ++ ++err_vblank_put: ++ if (!(page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC)) ++ drm_crtc_vblank_put(crtc); ++ return err; ++} ++#endif /* !defined(NULLDISP_USE_ATOMIC) */ ++ ++static bool nulldisp_queue_vblank_work(struct nulldisp_crtc *nulldisp_crtc) ++{ ++ struct drm_crtc *crtc = &nulldisp_crtc->base; ++ struct nulldisp_display_device *nulldisp_dev = crtc->dev->dev_private; ++ int vrefresh; ++ const int vrefresh_default = 60; ++ ++ vrefresh = drm_mode_vrefresh(&crtc->hwmode); ++ if (!vrefresh) { ++ vrefresh = vrefresh_default; ++ DRM_INFO_ONCE( ++ "vertical refresh rate is zero, defaulting to %d\n", ++ vrefresh); ++ } ++ ++ /* Returns false if work already queued, else true */ ++ return queue_delayed_work(nulldisp_dev->workqueue, ++ &nulldisp_crtc->vb_work, ++ usecs_to_jiffies(1000000/vrefresh)); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++static int nulldisp_enable_vblank(struct drm_crtc *crtc) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++static int nulldisp_enable_vblank(struct drm_device *dev, unsigned int pipe) ++#else ++static int nulldisp_enable_vblank(struct drm_device *dev, int pipe) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++ struct drm_device *dev = crtc->dev; ++ unsigned int pipe = drm_crtc_index(crtc); ++#endif ++ ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ ++ switch (pipe) { ++ case 0: ++ break; ++ default: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++ DRM_ERROR("invalid crtc %u\n", pipe); ++#else ++ DRM_ERROR("invalid crtc %d\n", pipe); ++#endif ++ return -EINVAL; ++ } ++ ++ if (!nulldisp_queue_vblank_work(nulldisp_dev->nulldisp_crtc)) { ++ DRM_ERROR("work already queued\n"); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++static void nulldisp_disable_vblank(struct drm_crtc *crtc) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++static void nulldisp_disable_vblank(struct drm_device *dev, unsigned int pipe) ++#else ++static void nulldisp_disable_vblank(struct drm_device *dev, int pipe) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++ struct drm_device *dev = crtc->dev; ++ unsigned int pipe = drm_crtc_index(crtc); ++#endif ++ ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ ++ switch (pipe) { ++ case 0: ++ break; ++ default: ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) ++ DRM_ERROR("invalid crtc %u\n", pipe); ++#else ++ DRM_ERROR("invalid crtc %d\n", pipe); ++#endif ++ return; ++ } ++ ++ /* ++ * Vblank events may be disabled from within the vblank handler, ++ * so don't wait for the work to complete. ++ */ ++ (void) cancel_delayed_work(&nulldisp_dev->nulldisp_crtc->vb_work); ++} ++ ++static const struct drm_crtc_helper_funcs nulldisp_crtc_helper_funcs = { ++ .mode_fixup = nulldisp_crtc_helper_mode_fixup, ++#if defined(NULLDISP_USE_ATOMIC) ++ .mode_set_nofb = nulldisp_crtc_helper_mode_set_nofb, ++ .atomic_flush = nulldisp_crtc_helper_atomic_flush, ++ .atomic_enable = nulldisp_crtc_helper_atomic_enable, ++ .atomic_disable = nulldisp_crtc_helper_atomic_disable, ++#else ++ .dpms = nulldisp_crtc_helper_dpms, ++ .prepare = nulldisp_crtc_helper_prepare, ++ .commit = nulldisp_crtc_helper_commit, ++ .mode_set = nulldisp_crtc_helper_mode_set, ++ .mode_set_base = nulldisp_crtc_helper_mode_set_base, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ .load_lut = nulldisp_crtc_helper_load_lut, ++#endif ++ .mode_set_base_atomic = nulldisp_crtc_helper_mode_set_base_atomic, ++ .disable = nulldisp_crtc_helper_disable, ++#endif /* defined(NULLDISP_USE_ATOMIC) */ ++}; ++ ++static const struct drm_crtc_funcs nulldisp_crtc_funcs = { ++ .destroy = nulldisp_crtc_destroy, ++#if defined(NULLDISP_USE_ATOMIC) ++ .reset = drm_atomic_helper_crtc_reset, ++ .set_config = drm_atomic_helper_set_config, ++ .page_flip = drm_atomic_helper_page_flip, ++ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, ++#else ++ .reset = NULL, ++ .cursor_set = NULL, ++ .cursor_move = NULL, ++ .gamma_set = NULL, ++ .set_config = drm_crtc_helper_set_config, ++ .page_flip = nulldisp_crtc_page_flip, ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) ++ .enable_vblank = nulldisp_enable_vblank, ++ .disable_vblank = nulldisp_disable_vblank, ++#endif ++}; ++ ++static void nulldisp_handle_vblank(struct work_struct *w) ++{ ++ struct delayed_work *dw = ++ container_of(w, struct delayed_work, work); ++ struct nulldisp_crtc *nulldisp_crtc = ++ container_of(dw, struct nulldisp_crtc, vb_work); ++ struct drm_crtc *crtc = &nulldisp_crtc->base; ++ struct drm_device *dev = crtc->dev; ++ enum nulldisp_crtc_flip_status status; ++ ++ /* ++ * Reschedule the handler, if necessary. This is done before ++ * calling drm_crtc_vblank_put, so that the work can be cancelled ++ * if vblank events are disabled. ++ */ ++ if (drm_handle_vblank(dev, 0)) ++ (void) nulldisp_queue_vblank_work(nulldisp_crtc); ++ ++ status = atomic_read(&nulldisp_crtc->flip_status); ++ if (status == NULLDISP_CRTC_FLIP_STATUS_DONE) { ++ if (!nulldisp_crtc->flip_async) ++ nulldisp_crtc_flip_complete(crtc); ++#if !defined(NULLDISP_USE_ATOMIC) ++ drm_crtc_vblank_put(crtc); ++#endif ++ } ++ ++} ++ ++static struct nulldisp_crtc * ++nulldisp_crtc_create(struct nulldisp_display_device *nulldisp_dev) ++{ ++ struct nulldisp_crtc *nulldisp_crtc; ++ struct drm_crtc *crtc; ++ struct drm_plane *primary; ++ ++ nulldisp_crtc = kzalloc(sizeof(*nulldisp_crtc), GFP_KERNEL); ++ if (!nulldisp_crtc) ++ goto err_return; ++ ++ primary = kzalloc(sizeof(*primary), GFP_KERNEL); ++ if (!primary) ++ goto err_free_crtc; ++ ++ crtc = &nulldisp_crtc->base; ++ ++ atomic_set(&nulldisp_crtc->flip_status, NULLDISP_CRTC_FLIP_STATUS_NONE); ++#if defined(NULLDISP_USE_ATOMIC) ++ init_completion(&nulldisp_crtc->flip_done); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) ++ init_completion(&nulldisp_crtc->copy_done); ++#endif ++#else ++ init_completion(&nulldisp_crtc->flip_scheduled); ++ init_completion(&nulldisp_crtc->copy_done); ++#endif ++ ++ if (drm_universal_plane_init(nulldisp_dev->dev, primary, 0, ++ &nulldisp_plane_funcs, ++ nulldisp_modeset_formats, ++ ARRAY_SIZE(nulldisp_modeset_formats), ++ nulldisp_primary_plane_modifiers, ++ DRM_PLANE_TYPE_PRIMARY, NULL)) { ++ goto err_free_primary; ++ } ++ ++#if defined(NULLDISP_USE_ATOMIC) ++ drm_plane_helper_add(primary, &nulldisp_plane_helper_funcs); ++#endif ++ ++ if (drm_crtc_init_with_planes(nulldisp_dev->dev, crtc, primary, ++ NULL, &nulldisp_crtc_funcs, NULL)) { ++ goto err_cleanup_plane; ++ } ++ ++ drm_crtc_helper_add(crtc, &nulldisp_crtc_helper_funcs); ++ ++ INIT_DELAYED_WORK(&nulldisp_crtc->vb_work, nulldisp_handle_vblank); ++#if !defined(NULLDISP_USE_ATOMIC) ++ INIT_WORK(&nulldisp_crtc->flip_work, nulldisp_flip_work); ++ INIT_DELAYED_WORK(&nulldisp_crtc->copy_to_work, nulldisp_copy_to_work); ++ INIT_DELAYED_WORK(&nulldisp_crtc->flip_to_work, nulldisp_flip_to_work); ++#endif ++ ++ DRM_DEBUG_DRIVER("[CRTC:%d]\n", crtc->base.id); ++ ++ return nulldisp_crtc; ++ ++err_cleanup_plane: ++ drm_plane_cleanup(primary); ++err_free_primary: ++ kfree(primary); ++err_free_crtc: ++ kfree(nulldisp_crtc); ++err_return: ++ return NULL; ++} ++ ++ ++/****************************************************************************** ++ * Connector functions ++ ******************************************************************************/ ++ ++static int ++nulldisp_validate_module_parameters(void) ++{ ++ const struct nulldisp_module_params *module_params = ++ nulldisp_get_module_params(); ++ ++ if (!module_params->hdisplay || ++ !module_params->vdisplay || ++ !module_params->vrefresh || ++ (module_params->hdisplay > NULLDISP_FB_WIDTH_MAX) || ++ (module_params->vdisplay > NULLDISP_FB_HEIGHT_MAX)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static bool ++nulldisp_set_preferred_mode(struct drm_connector *connector, ++ uint32_t hdisplay, ++ uint32_t vdisplay, ++ uint32_t vrefresh) ++{ ++ struct drm_display_mode *mode; ++ ++ /* ++ * Mark the first mode, matching the hdisplay, vdisplay and ++ * vrefresh, preferred. ++ */ ++ list_for_each_entry(mode, &connector->probed_modes, head) ++ if (mode->hdisplay == hdisplay && ++ mode->vdisplay == vdisplay && ++ drm_mode_vrefresh(mode) == vrefresh) { ++ mode->type |= DRM_MODE_TYPE_PREFERRED; ++ return true; ++ } ++ ++ return false; ++} ++ ++static bool ++nulldisp_connector_add_preferred_mode(struct drm_connector *connector, ++ uint32_t hdisplay, ++ uint32_t vdisplay, ++ uint32_t vrefresh) ++{ ++ struct drm_display_mode *preferred_mode; ++ ++ preferred_mode = drm_cvt_mode(connector->dev, ++ hdisplay, vdisplay, vrefresh, ++ false, false, false); ++ if (!preferred_mode) { ++ DRM_DEBUG_DRIVER("[CONNECTOR:%s]:create mode %dx%d@%d failed\n", ++ connector->name, ++ hdisplay, ++ vdisplay, ++ vrefresh); ++ ++ return false; ++ } ++ ++ preferred_mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; ++ ++ drm_mode_probed_add(connector, preferred_mode); ++ ++ return true; ++} ++ ++/* ++ * Gather modes. Here we can get the EDID data from the monitor and ++ * turn it into drm_display_mode structures. ++ */ ++static int ++nulldisp_connector_helper_get_modes(struct drm_connector *connector) ++{ ++ int modes_count; ++ struct drm_device *dev = connector->dev; ++ const struct nulldisp_module_params *module_params = ++ nulldisp_get_module_params(); ++ uint32_t hdisplay = module_params->hdisplay; ++ uint32_t vdisplay = module_params->vdisplay; ++ uint32_t vrefresh = module_params->vrefresh; ++ ++ /* Add common modes */ ++ modes_count = drm_add_modes_noedid(connector, ++ dev->mode_config.max_width, ++ dev->mode_config.max_height); ++ ++ /* ++ * Check if any of the connector modes match the preferred mode ++ * criteria specified by the module parameters. If the mode is ++ * found - flag it as preferred. Otherwise create the preferred ++ * mode based on the module parameters criteria, and flag it as ++ * preferred. ++ */ ++ if (!nulldisp_set_preferred_mode(connector, ++ hdisplay, ++ vdisplay, ++ vrefresh)) ++ if (nulldisp_connector_add_preferred_mode(connector, ++ hdisplay, ++ vdisplay, ++ vrefresh)) ++ modes_count++; ++ ++ /* Sort the connector modes by relevance */ ++ drm_mode_sort(&connector->probed_modes); ++ ++ return modes_count; ++} ++ ++static int ++nulldisp_connector_helper_mode_valid(struct drm_connector *connector, ++ struct drm_display_mode *mode) ++{ ++ /* ++ * This function is called on each gathered mode (e.g. via EDID) ++ * and gives the driver a chance to reject it if the hardware ++ * cannot support it. ++ */ ++ return MODE_OK; ++} ++ ++#if !defined(NULLDISP_USE_ATOMIC) ++static struct drm_encoder * ++nulldisp_connector_helper_best_encoder(struct drm_connector *connector) ++{ ++ /* Pick the first encoder we find */ ++ if (connector->encoder_ids[0] != 0) { ++ struct drm_encoder *encoder; ++ ++ encoder = drm_encoder_find(connector->dev, ++ NULL, ++ connector->encoder_ids[0]); ++ if (encoder) { ++ DRM_DEBUG_DRIVER( ++ "[ENCODER:%d:%s] best for [CONNECTOR:%d:%s]\n", ++ encoder->base.id, ++ encoder->name, ++ connector->base.id, ++ connector->name); ++ return encoder; ++ } ++ } ++ ++ return NULL; ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) ++static enum drm_connector_status ++nulldisp_connector_detect(struct drm_connector *connector, ++ bool force) ++{ ++ /* Return whether or not a monitor is attached to the connector */ ++ return connector_status_connected; ++} ++#endif ++ ++static void nulldisp_connector_destroy(struct drm_connector *connector) ++{ ++ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", ++ connector->base.id, ++ connector->name); ++ ++ drm_connector_update_edid_property(connector, NULL); ++ drm_connector_cleanup(connector); ++ ++ kfree(connector); ++} ++ ++static void nulldisp_connector_force(struct drm_connector *connector) ++{ ++} ++ ++static const struct drm_connector_helper_funcs ++nulldisp_connector_helper_funcs = { ++ .get_modes = nulldisp_connector_helper_get_modes, ++ .mode_valid = nulldisp_connector_helper_mode_valid, ++ /* ++ * For atomic, don't set atomic_best_encoder or best_encoder. This will ++ * cause the DRM core to fallback to drm_atomic_helper_best_encoder(). ++ * This is fine as we only have a single connector and encoder. ++ */ ++#if !defined(NULLDISP_USE_ATOMIC) ++ .best_encoder = nulldisp_connector_helper_best_encoder, ++#endif ++}; ++ ++static const struct drm_connector_funcs nulldisp_connector_funcs = { ++#if defined(NULLDISP_USE_ATOMIC) ++ .reset = drm_atomic_helper_connector_reset, ++ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, ++ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, ++#else ++ .dpms = drm_helper_connector_dpms, ++ .reset = NULL, ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) ++ .detect = nulldisp_connector_detect, ++#endif ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .destroy = nulldisp_connector_destroy, ++ .force = nulldisp_connector_force, ++}; ++ ++static struct drm_connector * ++nulldisp_connector_create(struct nulldisp_display_device *nulldisp_dev, ++ int type) ++{ ++ struct drm_connector *connector; ++ ++ connector = kzalloc(sizeof(*connector), GFP_KERNEL); ++ if (!connector) ++ return NULL; ++ ++ drm_connector_init(nulldisp_dev->dev, ++ connector, ++ &nulldisp_connector_funcs, ++ type); ++ drm_connector_helper_add(connector, &nulldisp_connector_helper_funcs); ++ ++ connector->dpms = DRM_MODE_DPMS_OFF; ++ connector->interlace_allowed = false; ++ connector->doublescan_allowed = false; ++ connector->display_info.subpixel_order = SubPixelUnknown; ++ ++ DRM_DEBUG_DRIVER("[CONNECTOR:%d:%s]\n", ++ connector->base.id, ++ connector->name); ++ ++ return connector; ++} ++ ++ ++/****************************************************************************** ++ * Encoder functions ++ ******************************************************************************/ ++ ++static void nulldisp_encoder_helper_dpms(struct drm_encoder *encoder, ++ int mode) ++{ ++ /* ++ * Set the display power state or active encoder based on the mode. If ++ * the mode passed in is unsupported, the provider must use the next ++ * lowest power level. ++ */ ++} ++ ++static bool ++nulldisp_encoder_helper_mode_fixup(struct drm_encoder *encoder, ++ const struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* ++ * Fix up mode so that it's compatible with the hardware. The results ++ * should be stored in adjusted_mode (i.e. mode should be untouched). ++ */ ++ return true; ++} ++ ++static void nulldisp_encoder_helper_prepare(struct drm_encoder *encoder) ++{ ++ /* ++ * Prepare the encoder for a mode change e.g. set the active encoder ++ * accordingly/turn the encoder off ++ */ ++} ++ ++static void nulldisp_encoder_helper_commit(struct drm_encoder *encoder) ++{ ++ /* Turn the encoder back on/set the active encoder */ ++} ++ ++static void ++nulldisp_encoder_helper_mode_set(struct drm_encoder *encoder, ++ struct drm_display_mode *mode, ++ struct drm_display_mode *adjusted_mode) ++{ ++ /* Setup the encoder for the new mode */ ++} ++ ++static void nulldisp_encoder_destroy(struct drm_encoder *encoder) ++{ ++ DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name); ++ ++ drm_encoder_cleanup(encoder); ++ kfree(encoder); ++} ++ ++static const struct drm_encoder_helper_funcs nulldisp_encoder_helper_funcs = { ++ .dpms = nulldisp_encoder_helper_dpms, ++ .mode_fixup = nulldisp_encoder_helper_mode_fixup, ++ .prepare = nulldisp_encoder_helper_prepare, ++ .commit = nulldisp_encoder_helper_commit, ++ .mode_set = nulldisp_encoder_helper_mode_set, ++ .detect = NULL, ++ .disable = NULL, ++}; ++ ++static const struct drm_encoder_funcs nulldisp_encoder_funcs = { ++ .reset = NULL, ++ .destroy = nulldisp_encoder_destroy, ++}; ++ ++static struct drm_encoder * ++nulldisp_encoder_create(struct nulldisp_display_device *nulldisp_dev, ++ int type) ++{ ++ struct drm_encoder *encoder; ++ int err; ++ ++ encoder = kzalloc(sizeof(*encoder), GFP_KERNEL); ++ if (!encoder) ++ return ERR_PTR(-ENOMEM); ++ ++ err = drm_encoder_init(nulldisp_dev->dev, ++ encoder, ++ &nulldisp_encoder_funcs, ++ type, ++ NULL); ++ if (err) { ++ DRM_ERROR("Failed to initialise encoder\n"); ++ return ERR_PTR(err); ++ } ++ drm_encoder_helper_add(encoder, &nulldisp_encoder_helper_funcs); ++ ++ /* ++ * This is a bit field that's used to determine which ++ * CRTCs can drive this encoder. ++ */ ++ encoder->possible_crtcs = 0x1; ++ ++ DRM_DEBUG_DRIVER("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name); ++ ++ return encoder; ++} ++ ++ ++/****************************************************************************** ++ * Framebuffer functions ++ ******************************************************************************/ ++ ++#if defined(NULLDISP_USE_ATOMIC) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) ++static int ++nulldisp_framebuffer_dirty(struct drm_framebuffer *framebuffer, ++ struct drm_file *file_priv, ++ unsigned int flags, ++ unsigned int color, ++ struct drm_clip_rect *clips, ++ unsigned int num_clips) ++{ ++ struct nulldisp_display_device *nulldisp_dev = ++ framebuffer->dev->dev_private; ++ struct nulldisp_crtc *nulldisp_crtc = nulldisp_dev->nulldisp_crtc; ++ ++ reinit_completion(&nulldisp_crtc->copy_done); ++ ++ if (!nlpvrdpy_send_copy(nulldisp_dev->nlpvrdpy, ++ framebuffer, ++ &framebuffer->obj[0])) { ++ unsigned long res; ++ ++ res = wait_for_completion_timeout(&nulldisp_crtc->copy_done, ++ nulldisp_netlink_timeout()); ++ ++ if (!res) ++ DRM_ERROR("timed out waiting for remote update\n"); ++ } ++ ++ return 0; ++} ++ ++static const struct drm_framebuffer_funcs nulldisp_framebuffer_funcs = { ++ .destroy = drm_gem_fb_destroy, ++ .create_handle = drm_gem_fb_create_handle, ++ .dirty = nulldisp_framebuffer_dirty, ++}; ++ ++static struct drm_framebuffer * ++nulldisp_fb_create(struct drm_device *dev, struct drm_file *file, ++ const struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, ++ &nulldisp_framebuffer_funcs); ++} ++#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) */ ++#define nulldisp_fb_create drm_gem_fb_create_with_dirty ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) */ ++#else /* defined(NULLDISP_USE_ATOMIC) */ ++static void nulldisp_framebuffer_destroy(struct drm_framebuffer *framebuffer) ++{ ++ struct nulldisp_framebuffer *nulldisp_framebuffer = ++ to_nulldisp_framebuffer(framebuffer); ++ int i; ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", framebuffer->base.id); ++ ++ drm_framebuffer_cleanup(framebuffer); ++ ++ for (i = 0; i < nulldisp_drm_fb_num_planes(framebuffer); i++) ++ drm_gem_object_put(nulldisp_framebuffer->obj[i]); ++ ++ kfree(nulldisp_framebuffer); ++} ++ ++static int ++nulldisp_framebuffer_create_handle(struct drm_framebuffer *framebuffer, ++ struct drm_file *file_priv, ++ unsigned int *handle) ++{ ++ struct nulldisp_framebuffer *nulldisp_framebuffer = ++ to_nulldisp_framebuffer(framebuffer); ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", framebuffer->base.id); ++ ++ return drm_gem_handle_create(file_priv, ++ nulldisp_framebuffer->obj[0], ++ handle); ++} ++ ++static int ++nulldisp_framebuffer_dirty(struct drm_framebuffer *framebuffer, ++ struct drm_file *file_priv, ++ unsigned int flags, ++ unsigned int color, ++ struct drm_clip_rect *clips, ++ unsigned int num_clips) ++{ ++ struct nulldisp_framebuffer *nulldisp_fb = ++ to_nulldisp_framebuffer(framebuffer); ++ struct nulldisp_display_device *nulldisp_dev = ++ framebuffer->dev->dev_private; ++ struct nulldisp_crtc *nulldisp_crtc = nulldisp_dev->nulldisp_crtc; ++ ++ /* ++ * To prevent races with disconnect requests from user space, ++ * set the timeout before sending the copy request. ++ */ ++ nulldisp_set_copy_to(nulldisp_crtc); ++ ++ if (nlpvrdpy_send_copy(nulldisp_dev->nlpvrdpy, ++ &nulldisp_fb->base, ++ &nulldisp_fb->obj[0])) ++ goto fail_flush; ++ ++ wait_for_completion(&nulldisp_crtc->copy_done); ++ ++ return 0; ++ ++fail_flush: ++ flush_delayed_work(&nulldisp_crtc->copy_to_work); ++ ++ wait_for_completion(&nulldisp_crtc->copy_done); ++ ++ return 0; ++ ++} ++ ++static const struct drm_framebuffer_funcs nulldisp_framebuffer_funcs = { ++ .destroy = nulldisp_framebuffer_destroy, ++ .create_handle = nulldisp_framebuffer_create_handle, ++ .dirty = nulldisp_framebuffer_dirty, ++}; ++ ++static int ++nulldisp_framebuffer_init(struct drm_device *dev, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++ const ++#endif ++ struct drm_mode_fb_cmd2 *mode_cmd, ++ struct nulldisp_framebuffer *nulldisp_framebuffer, ++ struct drm_gem_object **obj) ++{ ++ struct drm_framebuffer *fb = &nulldisp_framebuffer->base; ++ int err; ++ int i; ++ ++ fb->dev = dev; ++ ++ nulldisp_drm_fb_set_format(fb, mode_cmd->pixel_format); ++ ++ fb->width = mode_cmd->width; ++ fb->height = mode_cmd->height; ++ fb->flags = mode_cmd->flags; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ nulldisp_drm_fb_set_modifier(fb, mode_cmd->modifier[0]); ++#endif ++ ++ for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) { ++ fb->pitches[i] = mode_cmd->pitches[i]; ++ fb->offsets[i] = mode_cmd->offsets[i]; ++ ++ nulldisp_framebuffer->obj[i] = obj[i]; ++ } ++ ++ err = drm_framebuffer_init(dev, fb, &nulldisp_framebuffer_funcs); ++ if (err) { ++ DRM_ERROR("failed to initialise framebuffer structure (%d)\n", ++ err); ++ return err; ++ } ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", fb->base.id); ++ ++ return 0; ++} ++ ++static struct drm_framebuffer * ++nulldisp_fb_create(struct drm_device *dev, ++ struct drm_file *file_priv, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || \ ++ (defined(CHROMIUMOS_KERNEL) && \ ++ (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))) ++ const ++#endif ++ struct drm_mode_fb_cmd2 *mode_cmd) ++{ ++ struct drm_gem_object *obj[NULLDISP_MAX_PLANES]; ++ struct nulldisp_framebuffer *nulldisp_framebuffer; ++ int err; ++ int i; ++ ++ nulldisp_framebuffer = kzalloc(sizeof(*nulldisp_framebuffer), ++ GFP_KERNEL); ++ if (!nulldisp_framebuffer) { ++ err = -ENOMEM; ++ goto fail_exit; ++ } ++ ++ for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { ++ obj[i] = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); ++ if (!obj[i]) { ++ DRM_ERROR("failed to find buffer with handle %u\n", ++ mode_cmd->handles[i]); ++ err = -ENOENT; ++ goto fail_unreference; ++ } ++ } ++ ++ err = nulldisp_framebuffer_init(dev, ++ mode_cmd, ++ nulldisp_framebuffer, ++ obj); ++ if (err) ++ goto fail_unreference; ++ ++ DRM_DEBUG_DRIVER("[FB:%d]\n", nulldisp_framebuffer->base.base.id); ++ ++ return &nulldisp_framebuffer->base; ++ ++fail_unreference: ++ kfree(nulldisp_framebuffer); ++ ++ while (i--) ++ drm_gem_object_put(obj[i]); ++ ++fail_exit: ++ return ERR_PTR(err); ++} ++#endif /* defined(NULLDISP_USE_ATOMIC) */ ++ ++static const struct drm_mode_config_funcs nulldisp_mode_config_funcs = { ++ .fb_create = nulldisp_fb_create, ++ .output_poll_changed = NULL, ++#if defined(NULLDISP_USE_ATOMIC) ++ .atomic_check = drm_atomic_helper_check, ++ .atomic_commit = drm_atomic_helper_commit, ++#endif ++}; ++ ++static int nulldisp_nl_flipped_cb(void *data) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = data; ++ ++#if defined(NULLDISP_USE_ATOMIC) ++ complete(&nulldisp_crtc->flip_done); ++#else ++ flush_delayed_work(&nulldisp_crtc->flip_to_work); ++#endif ++ flush_delayed_work(&nulldisp_crtc->vb_work); ++ ++ return 0; ++} ++ ++static int nulldisp_nl_copied_cb(void *data) ++{ ++#if defined(NULLDISP_USE_ATOMIC) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) ++ struct nulldisp_crtc *nulldisp_crtc = data; ++ ++ complete(&nulldisp_crtc->copy_done); ++#endif ++#else ++ struct nulldisp_crtc *nulldisp_crtc = data; ++ ++ flush_delayed_work(&nulldisp_crtc->copy_to_work); ++#endif ++ return 0; ++} ++ ++static void nulldisp_nl_disconnect_cb(void *data) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = data; ++ ++#if defined(NULLDISP_USE_ATOMIC) ++ complete(&nulldisp_crtc->flip_done); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)) ++ complete(&nulldisp_crtc->copy_done); ++#endif ++#else ++ flush_delayed_work(&nulldisp_crtc->flip_to_work); ++ flush_delayed_work(&nulldisp_crtc->copy_to_work); ++#endif ++} ++ ++static int nulldisp_early_load(struct drm_device *dev) ++{ ++ struct nulldisp_display_device *nulldisp_dev; ++ struct drm_connector *connector; ++ struct drm_encoder *encoder; ++ int err; ++ ++ platform_set_drvdata(to_platform_device(dev->dev), dev); ++ ++ nulldisp_dev = kzalloc(sizeof(*nulldisp_dev), GFP_KERNEL); ++ if (!nulldisp_dev) ++ return -ENOMEM; ++ ++ dev->dev_private = nulldisp_dev; ++ nulldisp_dev->dev = dev; ++ ++ drm_mode_config_init(dev); ++ ++ dev->mode_config.funcs = (void *)&nulldisp_mode_config_funcs; ++ dev->mode_config.min_width = NULLDISP_FB_WIDTH_MIN; ++ dev->mode_config.max_width = NULLDISP_FB_WIDTH_MAX; ++ dev->mode_config.min_height = NULLDISP_FB_HEIGHT_MIN; ++ dev->mode_config.max_height = NULLDISP_FB_HEIGHT_MAX; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) ++ dev->mode_config.fb_base = 0; ++#endif ++ dev->mode_config.async_page_flip = true; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ dev->mode_config.fb_modifiers_not_supported = false; ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ dev->mode_config.allow_fb_modifiers = true; ++#endif ++ ++ nulldisp_dev->nulldisp_crtc = nulldisp_crtc_create(nulldisp_dev); ++ if (!nulldisp_dev->nulldisp_crtc) { ++ DRM_ERROR("failed to create a CRTC.\n"); ++ ++ err = -ENOMEM; ++ goto err_config_cleanup; ++ } ++ ++ connector = nulldisp_connector_create(nulldisp_dev, ++ DRM_MODE_CONNECTOR_Unknown); ++ if (!connector) { ++ DRM_ERROR("failed to create a connector.\n"); ++ ++ err = -ENOMEM; ++ goto err_config_cleanup; ++ } ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) ++ nulldisp_dev->connector = connector; ++#endif ++ encoder = nulldisp_encoder_create(nulldisp_dev, ++ DRM_MODE_ENCODER_NONE); ++ if (IS_ERR(encoder)) { ++ DRM_ERROR("failed to create an encoder.\n"); ++ ++ err = PTR_ERR(encoder); ++ goto err_config_cleanup; ++ } ++ ++ err = drm_connector_attach_encoder(connector, encoder); ++ if (err) { ++ DRM_ERROR("failed to attach [ENCODER:%d:%s] to [CONNECTOR:%d:%s] (err=%d)\n", ++ encoder->base.id, ++ encoder->name, ++ connector->base.id, ++ connector->name, ++ err); ++ goto err_config_cleanup; ++ } ++ ++#if defined(LMA) ++ nulldisp_dev->pdp_gem_priv = pdp_gem_init(dev); ++ if (!nulldisp_dev->pdp_gem_priv) { ++ err = -ENOMEM; ++ goto err_config_cleanup; ++ } ++#endif ++ nulldisp_dev->workqueue = ++ create_singlethread_workqueue(DRIVER_NAME); ++ if (!nulldisp_dev->workqueue) { ++ DRM_ERROR("failed to create work queue\n"); ++ goto err_gem_cleanup; ++ } ++ ++ err = drm_vblank_init(nulldisp_dev->dev, 1); ++ if (err) { ++ DRM_ERROR("failed to complete vblank init (err=%d)\n", err); ++ goto err_workqueue_cleanup; ++ } ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) ++ dev->irq_enabled = true; ++#endif ++ ++ nulldisp_dev->nlpvrdpy = nlpvrdpy_create(dev, ++ nulldisp_nl_disconnect_cb, ++ nulldisp_dev->nulldisp_crtc, ++ nulldisp_nl_flipped_cb, ++ nulldisp_dev->nulldisp_crtc, ++ nulldisp_nl_copied_cb, ++ nulldisp_dev->nulldisp_crtc); ++ if (!nulldisp_dev->nlpvrdpy) { ++ DRM_ERROR("Netlink initialisation failed (err=%d)\n", err); ++ goto err_vblank_cleanup; ++ } ++ ++ return 0; ++ ++err_vblank_cleanup: ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ /* Called by drm_dev_fini in Linux 4.11.0 and later */ ++ drm_vblank_cleanup(dev); ++#endif ++err_workqueue_cleanup: ++ destroy_workqueue(nulldisp_dev->workqueue); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) ++ dev->irq_enabled = false; ++#endif ++err_gem_cleanup: ++#if defined(LMA) ++ pdp_gem_cleanup(nulldisp_dev->pdp_gem_priv); ++#endif ++err_config_cleanup: ++ drm_mode_config_cleanup(dev); ++ kfree(nulldisp_dev); ++ return err; ++} ++ ++static int nulldisp_late_load(struct drm_device *dev) ++{ ++ drm_mode_config_reset(dev); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) ++ { ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ int err; ++ ++ err = drm_connector_register(nulldisp_dev->connector); ++ if (err) { ++ DRM_ERROR( ++ "[CONNECTOR:%d:%s] failed to register (err=%d)\n", ++ nulldisp_dev->connector->base.id, ++ nulldisp_dev->connector->name, ++ err); ++ return err; ++ } ++ } ++#endif ++ return 0; ++} ++ ++static void nulldisp_early_unload(struct drm_device *dev) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ ++ drm_connector_unregister(nulldisp_dev->connector); ++#endif ++} ++ ++static void nulldisp_late_unload(struct drm_device *dev) ++{ ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ ++ nlpvrdpy_send_disconnect(nulldisp_dev->nlpvrdpy); ++ nlpvrdpy_destroy(nulldisp_dev->nlpvrdpy); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ /* Called by drm_dev_fini in Linux 4.11.0 and later */ ++ drm_vblank_cleanup(dev); ++#endif ++ destroy_workqueue(nulldisp_dev->workqueue); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) ++ dev->irq_enabled = false; ++#endif ++ ++#if defined(LMA) ++ pdp_gem_cleanup(nulldisp_dev->pdp_gem_priv); ++#endif ++ drm_mode_config_cleanup(dev); ++ ++ kfree(nulldisp_dev); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) ++static int nulldisp_load(struct drm_device *dev, unsigned long flags) ++{ ++ int err; ++ ++ err = nulldisp_early_load(dev); ++ if (err) ++ return err; ++ ++ err = nulldisp_late_load(dev); ++ if (err) { ++ nulldisp_late_unload(dev); ++ return err; ++ } ++ ++ return 0; ++} ++ ++static int nulldisp_unload(struct drm_device *dev) ++{ ++ nulldisp_early_unload(dev); ++ nulldisp_late_unload(dev); ++ ++ return 0; ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++static void ++nulldisp_crtc_flip_event_cancel(struct drm_crtc *crtc, struct drm_file *file) ++{ ++ struct nulldisp_crtc *nulldisp_crtc = to_nulldisp_crtc(crtc); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&crtc->dev->event_lock, flags); ++ ++ if (nulldisp_crtc->flip_event && ++ nulldisp_crtc->flip_event->base.file_priv == file) { ++ struct drm_pending_event *pending_event = ++ &nulldisp_crtc->flip_event->base; ++ ++ pending_event->destroy(pending_event); ++ nulldisp_crtc->flip_event = NULL; ++ } ++ ++ spin_unlock_irqrestore(&crtc->dev->event_lock, flags); ++} ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++static void nulldisp_preclose(struct drm_device *dev, struct drm_file *file) ++{ ++ struct drm_crtc *crtc; ++ ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) ++ nulldisp_crtc_flip_event_cancel(crtc, file); ++} ++#endif ++ ++static void nulldisp_lastclose(struct drm_device *dev) ++{ ++#if defined(NULLDISP_USE_ATOMIC) ++ drm_atomic_helper_shutdown(dev); ++#else ++ struct drm_crtc *crtc; ++ ++ drm_modeset_lock_all(dev); ++ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { ++ if (crtc->primary->fb) { ++ struct drm_mode_set mode_set = { .crtc = crtc }; ++ int err; ++ ++ err = drm_mode_set_config_internal(&mode_set); ++ if (err) ++ DRM_ERROR( ++ "failed to disable crtc %p (err=%d)\n", ++ crtc, err); ++ } ++ } ++ drm_modeset_unlock_all(dev); ++#endif ++} ++ ++static const struct vm_operations_struct nulldisp_gem_vm_ops = { ++#if defined(LMA) ++ .fault = pdp_gem_object_vm_fault, ++ .open = drm_gem_vm_open, ++ .close = drm_gem_vm_close, ++#else ++ .fault = nulldisp_gem_object_vm_fault, ++ .open = nulldisp_gem_vm_open, ++ .close = nulldisp_gem_vm_close, ++#endif ++}; ++ ++#if defined(LMA) ++static int pdp_gem_dumb_create(struct drm_file *file, ++ struct drm_device *dev, ++ struct drm_mode_create_dumb *args) ++{ ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ ++ return pdp_gem_dumb_create_priv(file, ++ dev, ++ nulldisp_dev->pdp_gem_priv, ++ args); ++} ++ ++static int nulldisp_gem_object_create_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_create *args = data; ++ struct nulldisp_display_device *nulldisp_dev = dev->dev_private; ++ struct drm_pdp_gem_create pdp_args; ++ int err; ++ ++ if (args->flags) { ++ DRM_ERROR("invalid flags: %#08x\n", args->flags); ++ return -EINVAL; ++ } ++ ++ if (args->handle) { ++ DRM_ERROR("invalid handle (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ /* ++ * Remapping of nulldisp create args to pdp create args. ++ * ++ * Note: even though the nulldisp and pdp args are identical ++ * in this case, they may potentially change in future. ++ */ ++ pdp_args.size = args->size; ++ pdp_args.flags = args->flags; ++ pdp_args.handle = args->handle; ++ ++ err = pdp_gem_object_create_ioctl_priv(dev, ++ nulldisp_dev->pdp_gem_priv, ++ &pdp_args, ++ file); ++ if (!err) ++ args->handle = pdp_args.handle; ++ ++ return err; ++} ++ ++static int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_mmap *args = data; ++ struct drm_pdp_gem_mmap pdp_args; ++ int err; ++ ++ pdp_args.handle = args->handle; ++ pdp_args.pad = args->pad; ++ pdp_args.offset = args->offset; ++ ++ err = pdp_gem_object_mmap_ioctl(dev, &pdp_args, file); ++ ++ if (!err) ++ args->offset = pdp_args.offset; ++ ++ return err; ++} ++ ++static int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_cpu_prep *args = ++ (struct drm_nulldisp_gem_cpu_prep *)data; ++ struct drm_pdp_gem_cpu_prep pdp_args; ++ ++ pdp_args.handle = args->handle; ++ pdp_args.flags = args->flags; ++ ++ return pdp_gem_object_cpu_prep_ioctl(dev, &pdp_args, file); ++} ++ ++static int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_cpu_fini *args = ++ (struct drm_nulldisp_gem_cpu_fini *)data; ++ struct drm_pdp_gem_cpu_fini pdp_args; ++ ++ pdp_args.handle = args->handle; ++ pdp_args.pad = args->pad; ++ ++ return pdp_gem_object_cpu_fini_ioctl(dev, &pdp_args, file); ++} ++ ++static void nulldisp_pdp_gem_object_free(struct drm_gem_object *obj) ++{ ++ struct nulldisp_display_device *nulldisp_dev = obj->dev->dev_private; ++ ++ pdp_gem_object_free_priv(nulldisp_dev->pdp_gem_priv, obj); ++} ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) ++const struct drm_gem_object_funcs nulldisp_gem_funcs = { ++#if defined(LMA) ++ .free = nulldisp_pdp_gem_object_free, ++ .export = pdp_gem_prime_export, ++#else ++ .export = drm_gem_prime_export, ++ .pin = nulldisp_gem_prime_pin, ++ .unpin = nulldisp_gem_prime_unpin, ++ .get_sg_table = nulldisp_gem_prime_get_sg_table, ++ .vmap = nulldisp_gem_prime_vmap, ++ .vunmap = nulldisp_gem_prime_vunmap, ++ .free = nulldisp_gem_object_free, ++#endif /* defined(LMA) */ ++ .vm_ops = &nulldisp_gem_vm_ops, ++}; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ ++ ++static const struct drm_ioctl_desc nulldisp_ioctls[] = { ++ DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CREATE, ++ nulldisp_gem_object_create_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(NULLDISP_GEM_MMAP, ++ nulldisp_gem_object_mmap_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CPU_PREP, ++ nulldisp_gem_object_cpu_prep_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(NULLDISP_GEM_CPU_FINI, ++ nulldisp_gem_object_cpu_fini_ioctl, ++ DRM_AUTH | DRM_UNLOCKED), ++}; ++ ++static int nulldisp_gem_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ int err; ++ ++ err = netlink_gem_mmap(file, vma); ++#if !defined(LMA) ++ if (!err) { ++ struct drm_file *file_priv = file->private_data; ++ struct drm_device *dev = file_priv->minor->dev; ++ struct drm_gem_object *obj; ++ ++ mutex_lock(&dev->struct_mutex); ++ obj = vma->vm_private_data; ++ ++ if (obj->import_attach) ++ err = dma_buf_mmap(obj->dma_buf, vma, 0); ++ else ++ err = nulldisp_gem_object_get_pages(obj); ++ ++ mutex_unlock(&dev->struct_mutex); ++ } ++#endif ++ return err; ++} ++ ++static const struct file_operations nulldisp_driver_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .unlocked_ioctl = drm_ioctl, ++ .mmap = nulldisp_gem_mmap, ++ .poll = drm_poll, ++ .read = drm_read, ++ .llseek = noop_llseek, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = drm_compat_ioctl, ++#endif ++}; ++ ++static struct drm_driver nulldisp_drm_driver = { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ .load = NULL, ++ .unload = NULL, ++#else ++ .load = nulldisp_load, ++ .unload = nulldisp_unload, ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++ .preclose = nulldisp_preclose, ++#endif ++ .lastclose = nulldisp_lastclose, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ .set_busid = drm_platform_set_busid, ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) ++ .get_vblank_counter = drm_vblank_count, ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ .get_vblank_counter = drm_vblank_no_hw_counter, ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0)) ++ .enable_vblank = nulldisp_enable_vblank, ++ .disable_vblank = nulldisp_disable_vblank, ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) ++ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, ++ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, ++#endif ++ ++#if defined(LMA) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) ++ .gem_free_object = nulldisp_pdp_gem_object_free, ++ .gem_prime_export = pdp_gem_prime_export, ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) */ ++ .gem_prime_import = pdp_gem_prime_import, ++ .gem_prime_import_sg_table = pdp_gem_prime_import_sg_table, ++ ++ .dumb_create = pdp_gem_dumb_create, ++ .dumb_map_offset = pdp_gem_dumb_map_offset, ++#else ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) ++ .gem_prime_pin = nulldisp_gem_prime_pin, ++ .gem_prime_unpin = nulldisp_gem_prime_unpin, ++ .gem_prime_get_sg_table = nulldisp_gem_prime_get_sg_table, ++ .gem_prime_vmap = nulldisp_gem_prime_vmap, ++ .gem_prime_vunmap = nulldisp_gem_prime_vunmap, ++ .gem_free_object = nulldisp_gem_object_free, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++ .gem_prime_export = nulldisp_gem_prime_export, ++#else ++ .gem_prime_export = drm_gem_prime_export, ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) */ ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) */ ++ .gem_prime_import_sg_table = nulldisp_gem_prime_import_sg_table, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) ++ .gem_prime_mmap = nulldisp_gem_prime_mmap, ++#endif ++ .gem_prime_import = drm_gem_prime_import, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++ .gem_prime_res_obj = nulldisp_gem_prime_res_obj, ++#endif ++ .dumb_create = nulldisp_gem_dumb_create, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ .dumb_map_offset = nulldisp_gem_dumb_map_offset, ++#endif ++#endif /* defined(LMA) */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ .dumb_destroy = drm_gem_dumb_destroy, ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) ++ .gem_vm_ops = &nulldisp_gem_vm_ops, ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0) */ ++ .name = DRIVER_NAME, ++ .desc = DRIVER_DESC, ++ .date = DRIVER_DATE, ++ .major = PVRVERSION_MAJ, ++ .minor = PVRVERSION_MIN, ++ .patchlevel = PVRVERSION_BUILD, ++ ++ .driver_features = DRIVER_GEM | ++ DRIVER_MODESET | ++ NULLDISP_DRIVER_PRIME | ++ NULLDISP_DRIVER_ATOMIC, ++ .ioctls = nulldisp_ioctls, ++ .num_ioctls = ARRAY_SIZE(nulldisp_ioctls), ++ .fops = &nulldisp_driver_fops, ++}; ++ ++static int nulldisp_probe(struct platform_device *pdev) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ struct drm_device *ddev; ++ int ret; ++ ++ ddev = drm_dev_alloc(&nulldisp_drm_driver, &pdev->dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ if (IS_ERR(ddev)) ++ return PTR_ERR(ddev); ++#else ++ if (!ddev) ++ return -ENOMEM; ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ /* Needed by drm_platform_set_busid */ ++ ddev->platformdev = pdev; ++#endif ++ /* ++ * The load callback, called from drm_dev_register, is deprecated, ++ * because of potential race conditions. ++ */ ++ BUG_ON(nulldisp_drm_driver.load != NULL); ++ ++ ret = nulldisp_early_load(ddev); ++ if (ret) ++ goto err_drm_dev_put; ++ ++ ret = drm_dev_register(ddev, 0); ++ if (ret) ++ goto err_drm_dev_late_unload; ++ ++ ret = nulldisp_late_load(ddev); ++ if (ret) ++ goto err_drm_dev_unregister; ++ ++ drm_mode_config_reset(ddev); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", ++ nulldisp_drm_driver.name, ++ nulldisp_drm_driver.major, ++ nulldisp_drm_driver.minor, ++ nulldisp_drm_driver.patchlevel, ++ nulldisp_drm_driver.date, ++ ddev->primary->index); ++#endif ++ return 0; ++ ++err_drm_dev_unregister: ++ drm_dev_unregister(ddev); ++err_drm_dev_late_unload: ++ nulldisp_late_unload(ddev); ++err_drm_dev_put: ++ drm_dev_put(ddev); ++ return ret; ++#else ++ return drm_platform_init(&nulldisp_drm_driver, pdev); ++#endif ++} ++ ++static int nulldisp_remove(struct platform_device *pdev) ++{ ++ struct drm_device *ddev = platform_get_drvdata(pdev); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ /* ++ * The unload callback, called from drm_dev_unregister, is ++ * deprecated. ++ */ ++ BUG_ON(nulldisp_drm_driver.unload != NULL); ++ ++ nulldisp_early_unload(ddev); ++ ++ drm_dev_unregister(ddev); ++ ++ nulldisp_late_unload(ddev); ++ ++ drm_dev_put(ddev); ++#else ++ drm_put_dev(ddev); ++#endif ++ return 0; ++} ++ ++static void nulldisp_shutdown(struct platform_device *pdev) ++{ ++} ++ ++static struct platform_device_id nulldisp_platform_device_id_table[] = { ++#if defined(LMA) ++ { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 }, ++ { .name = ODN_DEVICE_NAME_PDP, .driver_data = 0 }, ++#else ++ { .name = "nulldisp", .driver_data = 0 }, ++#endif ++ { }, ++}; ++ ++static struct platform_driver nulldisp_platform_driver = { ++ .probe = nulldisp_probe, ++ .remove = nulldisp_remove, ++ .shutdown = nulldisp_shutdown, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = DRIVER_NAME, ++ }, ++ .id_table = nulldisp_platform_device_id_table, ++}; ++ ++ ++#if !defined(LMA) ++static struct platform_device_info nulldisp_device_info = { ++ .name = "nulldisp", ++ .id = -1, ++#if defined(NO_HARDWARE) ++ /* ++ * Not all cores have 40 bit physical support, but this ++ * will work unless > 32 bit address is returned on those cores. ++ * In the future this will be fixed properly. ++ */ ++ .dma_mask = DMA_BIT_MASK(40), ++#else ++ .dma_mask = DMA_BIT_MASK(32), ++#endif ++}; ++ ++static struct platform_device *nulldisp_dev; ++#endif ++ ++static int __init nulldisp_init(void) ++{ ++ int err; ++ ++ err = nulldisp_validate_module_parameters(); ++ if (err) { ++ DRM_ERROR("invalid module parameters (err=%d)\n", err); ++ return err; ++ } ++ ++ err = nlpvrdpy_register(); ++ if (err) { ++ DRM_ERROR("failed to register with netlink (err=%d)\n", err); ++ return err; ++ } ++ ++#if !defined(LMA) ++ nulldisp_dev = platform_device_register_full(&nulldisp_device_info); ++ if (IS_ERR(nulldisp_dev)) { ++ err = PTR_ERR(nulldisp_dev); ++ nulldisp_dev = NULL; ++ goto err_unregister_family; ++ } ++#endif ++ err = platform_driver_register(&nulldisp_platform_driver); ++ if (err) ++ goto err_unregister_family; ++ ++ return 0; ++ ++err_unregister_family: ++ (void) nlpvrdpy_unregister(); ++ return err; ++} ++ ++static void __exit nulldisp_exit(void) ++{ ++ int err; ++ ++ err = nlpvrdpy_unregister(); ++ BUG_ON(err); ++ ++#if !defined(LMA) ++ if (nulldisp_dev) ++ platform_device_unregister(nulldisp_dev); ++#endif ++ platform_driver_unregister(&nulldisp_platform_driver); ++} ++ ++module_init(nulldisp_init); ++module_exit(nulldisp_exit); ++ ++MODULE_AUTHOR("Imagination Technologies Ltd. "); ++MODULE_DESCRIPTION(DRIVER_DESC); ++MODULE_LICENSE("Dual MIT/GPL"); +diff --git a/drivers/gpu/drm/img-rogue/drm_nulldisp_drv.h b/drivers/gpu/drm/img-rogue/drm_nulldisp_drv.h +new file mode 100644 +index 000000000000..a5e37c551a0c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_nulldisp_drv.h +@@ -0,0 +1,97 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __DRM_NULLDISP_DRV_H__ ++#define __DRM_NULLDISP_DRV_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) ++#define NULLDISP_USE_ATOMIC ++#endif ++ ++struct drm_framebuffer; ++ ++/****************************************************************************** ++ * Linux compatibility functions ++ ******************************************************************************/ ++static inline u32 nulldisp_drm_fb_format(struct drm_framebuffer *fb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ return fb->format->format; ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ ++ return fb->pixel_format; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ ++} ++ ++static inline u64 nulldisp_drm_fb_modifier(struct drm_framebuffer *fb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ return fb->modifier; ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ return fb->modifier[0]; ++#else ++ /* 0 represents DRM_FORMAT_MOD_NONE, doesn't exist before 4.1 */ ++ return 0; ++#endif ++} ++ ++/****************************************************************************** ++ * DRM framebuffer support functions ++ ******************************************************************************/ ++static inline int nulldisp_drm_fb_num_planes(struct drm_framebuffer *fb) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ return fb->format->num_planes; ++#else ++ return drm_format_num_planes(nulldisp_drm_fb_format(fb)); ++#endif ++} ++#endif /* __DRM_NULLDISP_DRV_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/drm_nulldisp_gem.c b/drivers/gpu/drm/img-rogue/drm_nulldisp_gem.c +new file mode 100644 +index 000000000000..e859d6139b98 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_nulldisp_gem.c +@@ -0,0 +1,678 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pvr_dma_resv.h" ++#include "drm_nulldisp_gem.h" ++#include "nulldisp_drm.h" ++#include "kernel_compatibility.h" ++ ++struct nulldisp_gem_object { ++ struct drm_gem_object base; ++ ++ atomic_t pg_refcnt; ++ struct page **pages; ++ dma_addr_t *addrs; /* Will be NULL for imported buffers. */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ struct dma_resv _resv; ++#endif ++ struct dma_resv *resv; ++ ++ bool cpu_prep; ++ struct sg_table *import_sgt; ++}; ++ ++#define to_nulldisp_obj(obj) \ ++ container_of(obj, struct nulldisp_gem_object, base) ++ ++int nulldisp_gem_object_get_pages(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ struct page **pages; ++ int err; ++ ++ if (WARN_ON(obj->import_attach)) ++ return -EEXIST; ++ ++ WARN_ON(!mutex_is_locked(&dev->struct_mutex)); ++ ++ if (atomic_inc_return(&nulldisp_obj->pg_refcnt) == 1) { ++ unsigned int npages = obj->size >> PAGE_SHIFT; ++ dma_addr_t *addrs; ++ unsigned int i; ++ ++ pages = drm_gem_get_pages(obj); ++ if (IS_ERR(pages)) { ++ err = PTR_ERR(pages); ++ goto dec_refcnt; ++ } ++ ++ addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); ++ if (!addrs) { ++ err = -ENOMEM; ++ goto free_pages; ++ } ++ ++ for (i = 0; i < npages; i++) { ++ addrs[i] = dma_map_page(dev->dev, pages[i], ++ 0, PAGE_SIZE, ++ DMA_BIDIRECTIONAL); ++ } ++ ++ nulldisp_obj->pages = pages; ++ nulldisp_obj->addrs = addrs; ++ } ++ ++ return 0; ++ ++free_pages: ++ drm_gem_put_pages(obj, pages, false, false); ++dec_refcnt: ++ atomic_dec(&nulldisp_obj->pg_refcnt); ++ return err; ++} ++ ++static void nulldisp_gem_object_put_pages(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ ++ WARN_ON(!mutex_is_locked(&dev->struct_mutex)); ++ ++ if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) ++ return; ++ ++ if (atomic_dec_and_test(&nulldisp_obj->pg_refcnt)) { ++ unsigned int npages = obj->size >> PAGE_SHIFT; ++ unsigned int i; ++ ++ for (i = 0; i < npages; i++) { ++ dma_unmap_page(dev->dev, nulldisp_obj->addrs[i], ++ PAGE_SIZE, DMA_BIDIRECTIONAL); ++ } ++ ++ kfree(nulldisp_obj->addrs); ++ nulldisp_obj->addrs = NULL; ++ ++ drm_gem_put_pages(obj, nulldisp_obj->pages, true, true); ++ nulldisp_obj->pages = NULL; ++ } ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++vm_fault_t nulldisp_gem_object_vm_fault(struct vm_fault *vmf) ++#else ++int nulldisp_gem_object_vm_fault(struct vm_area_struct *vma, ++ struct vm_fault *vmf) ++#endif ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++ struct vm_area_struct *vma = vmf->vma; ++#endif ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ unsigned long addr = vmf->address; ++#else ++ unsigned long addr = (unsigned long)vmf->virtual_address; ++#endif ++ struct drm_gem_object *obj = vma->vm_private_data; ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ unsigned long pg_off; ++ struct page *page; ++ ++ /* ++ * nulldisp_gem_object_get_pages should have been called in ++ * nulldisp_gem_mmap so there's no need to do it here. ++ */ ++ if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) ++ return VM_FAULT_SIGBUS; ++ ++ pg_off = (addr - vma->vm_start) >> PAGE_SHIFT; ++ page = nulldisp_obj->pages[pg_off]; ++ ++ get_page(page); ++ vmf->page = page; ++ ++ return 0; ++} ++ ++void nulldisp_gem_vm_open(struct vm_area_struct *vma) ++{ ++ struct drm_gem_object *obj = vma->vm_private_data; ++ ++ drm_gem_vm_open(vma); ++ ++ if (!obj->import_attach) { ++ struct drm_device *dev = obj->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ (void) nulldisp_gem_object_get_pages(obj); ++ mutex_unlock(&dev->struct_mutex); ++ } ++} ++ ++void nulldisp_gem_vm_close(struct vm_area_struct *vma) ++{ ++ struct drm_gem_object *obj = vma->vm_private_data; ++ ++ if (!obj->import_attach) { ++ struct drm_device *dev = obj->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ (void) nulldisp_gem_object_put_pages(obj); ++ mutex_unlock(&dev->struct_mutex); ++ } ++ ++ drm_gem_vm_close(vma); ++} ++ ++void nulldisp_gem_object_free(struct drm_gem_object *obj) ++{ ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ ++ WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) != 0); ++ ++ if (obj->import_attach) { ++ kfree(nulldisp_obj->pages); ++ drm_gem_free_mmap_offset(obj); ++ drm_prime_gem_destroy(obj, nulldisp_obj->import_sgt); ++ } else { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ dma_resv_fini(&nulldisp_obj->_resv); ++#endif ++ drm_gem_object_release(obj); ++ } ++ ++ kfree(nulldisp_obj); ++} ++ ++int nulldisp_gem_prime_pin(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ int err; ++ ++ mutex_lock(&dev->struct_mutex); ++ err = nulldisp_gem_object_get_pages(obj); ++ mutex_unlock(&dev->struct_mutex); ++ ++ return err; ++} ++ ++void nulldisp_gem_prime_unpin(struct drm_gem_object *obj) ++{ ++ struct drm_device *dev = obj->dev; ++ ++ mutex_lock(&dev->struct_mutex); ++ nulldisp_gem_object_put_pages(obj); ++ mutex_unlock(&dev->struct_mutex); ++} ++ ++struct sg_table * ++nulldisp_gem_prime_get_sg_table(struct drm_gem_object *obj) ++{ ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ int nr_pages = obj->size >> PAGE_SHIFT; ++ ++ /* ++ * nulldisp_gem_prime_pin should have been called in which case we don't ++ * need to call nulldisp_gem_object_get_pages. ++ */ ++ if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) ++ return NULL; ++ ++ return drm_prime_pages_to_sg(obj->dev, nulldisp_obj->pages, nr_pages); ++} ++ ++struct drm_gem_object * ++nulldisp_gem_prime_import_sg_table(struct drm_device *dev, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ struct dma_buf_attachment *attach, ++#else ++ size_t size, ++#endif ++ struct sg_table *sgt) ++{ ++ struct nulldisp_gem_object *nulldisp_obj; ++ struct drm_gem_object *obj; ++ struct page **pages; ++ unsigned int npages; ++ ++ nulldisp_obj = kzalloc(sizeof(*nulldisp_obj), GFP_KERNEL); ++ if (!nulldisp_obj) ++ return NULL; ++ ++ nulldisp_obj->resv = attach->dmabuf->resv; ++ ++ obj = &nulldisp_obj->base; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) ++ obj->resv = nulldisp_obj->resv; ++#endif ++ ++ drm_gem_private_object_init(dev, obj, attach->dmabuf->size); ++ ++ npages = obj->size >> PAGE_SHIFT; ++ ++ pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); ++ if (!pages) ++ goto exit_free_arrays; ++ ++ if (drm_prime_sg_to_page_array(sgt, pages, npages)) ++ goto exit_free_arrays; ++ ++ nulldisp_obj->import_sgt = sgt; ++ nulldisp_obj->pages = pages; ++ nulldisp_obj->addrs = NULL; ++ ++ return obj; ++ ++exit_free_arrays: ++ kfree(pages); ++ drm_prime_gem_destroy(obj, sgt); ++ kfree(nulldisp_obj); ++ return NULL; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++struct dma_buf *nulldisp_gem_prime_export( ++ struct drm_device *dev, ++ struct drm_gem_object *obj, ++ int flags) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++ /* Read/write access required */ ++ flags |= O_RDWR; ++#endif ++ return drm_gem_prime_export(dev, obj, flags); ++} ++#endif ++ ++static void *nulldisp_gem_vmap(struct drm_gem_object *obj) ++{ ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ int nr_pages = obj->size >> PAGE_SHIFT; ++ ++ /* ++ * nulldisp_gem_prime_pin should have been called in which case we don't ++ * need to call nulldisp_gem_object_get_pages. ++ */ ++ if (WARN_ON(atomic_read(&nulldisp_obj->pg_refcnt) == 0)) ++ return NULL; ++ ++ ++ return vmap(nulldisp_obj->pages, nr_pages, 0, PAGE_KERNEL); ++} ++ ++static void nulldisp_gem_vunmap(struct drm_gem_object *obj, void *vaddr) ++{ ++ vunmap(vaddr); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++int nulldisp_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) ++{ ++ void *vaddr = nulldisp_gem_vmap(obj); ++ ++ iosys_map_set_vaddr(map, vaddr); ++ return (vaddr == NULL) ? -ENOMEM : 0; ++} ++ ++void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) ++{ ++ nulldisp_gem_vunmap(obj, map->vaddr); ++ iosys_map_clear(map); ++} ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++void *nulldisp_gem_prime_vmap(struct drm_gem_object *obj) ++{ ++ return nulldisp_gem_vmap(obj); ++} ++ ++void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ++{ ++ nulldisp_gem_vunmap(obj, vaddr); ++} ++#else ++int nulldisp_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) ++{ ++ void *vaddr = nulldisp_gem_vmap(obj); ++ ++ dma_buf_map_set_vaddr(map, vaddr); ++ return (vaddr == NULL) ? -ENOMEM : 0; ++} ++ ++void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) ++{ ++ nulldisp_gem_vunmap(obj, map->vaddr); ++ dma_buf_map_clear(map); ++} ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) */ ++ ++int nulldisp_gem_prime_mmap(struct drm_gem_object *obj, ++ struct vm_area_struct *vma) ++{ ++ int err; ++ ++ mutex_lock(&obj->dev->struct_mutex); ++ err = nulldisp_gem_object_get_pages(obj); ++ if (!err) ++ err = drm_gem_mmap_obj(obj, obj->size, vma); ++ mutex_unlock(&obj->dev->struct_mutex); ++ ++ return err; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++struct dma_resv * ++nulldisp_gem_prime_res_obj(struct drm_gem_object *obj) ++{ ++ struct nulldisp_gem_object *nulldisp_obj = to_nulldisp_obj(obj); ++ ++ return nulldisp_obj->resv; ++} ++#endif ++ ++int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_mmap *args = ++ (struct drm_nulldisp_gem_mmap *)data; ++ ++ if (args->pad) { ++ DRM_ERROR("invalid pad (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ if (args->offset) { ++ DRM_ERROR("invalid offset (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ return nulldisp_gem_dumb_map_offset(file, dev, args->handle, ++ &args->offset); ++} ++ ++int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_cpu_prep *args = ++ (struct drm_nulldisp_gem_cpu_prep *)data; ++ ++ struct drm_gem_object *obj; ++ struct nulldisp_gem_object *nulldisp_obj; ++ bool write = !!(args->flags & NULLDISP_GEM_CPU_PREP_WRITE); ++ bool wait = !(args->flags & NULLDISP_GEM_CPU_PREP_NOWAIT); ++ int err; ++ ++ if (args->flags & ~(NULLDISP_GEM_CPU_PREP_READ | ++ NULLDISP_GEM_CPU_PREP_WRITE | ++ NULLDISP_GEM_CPU_PREP_NOWAIT)) { ++ DRM_ERROR("invalid flags: %#08x\n", args->flags); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(file, args->handle); ++ if (!obj) { ++ err = -ENOENT; ++ goto exit_unlock; ++ } ++ ++ nulldisp_obj = to_nulldisp_obj(obj); ++ ++ if (nulldisp_obj->cpu_prep) { ++ err = -EBUSY; ++ goto exit_unref; ++ } ++ ++ if (wait) { ++ long lerr; ++ ++ lerr = dma_resv_wait_timeout(nulldisp_obj->resv, ++ write, ++ true, ++ 30 * HZ); ++ ++ /* Remap return value (0 indicates busy state, > 0 success) */ ++ if (lerr > 0) ++ err = 0; ++ else if (!lerr) ++ err = -EBUSY; ++ else ++ err = lerr; ++ } else { ++ /* ++ * Remap return value (false indicates busy state, ++ * true success). ++ */ ++ if (!dma_resv_test_signaled(nulldisp_obj->resv, ++ write)) ++ err = -EBUSY; ++ else ++ err = 0; ++ } ++ ++ if (!err) ++ nulldisp_obj->cpu_prep = true; ++exit_unref: ++ drm_gem_object_put(obj); ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} ++ ++int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_cpu_fini *args = ++ (struct drm_nulldisp_gem_cpu_fini *)data; ++ ++ struct drm_gem_object *obj; ++ struct nulldisp_gem_object *nulldisp_obj; ++ int err; ++ ++ if (args->pad) { ++ DRM_ERROR("invalid pad (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(file, args->handle); ++ if (!obj) { ++ err = -ENOENT; ++ goto exit_unlock; ++ } ++ ++ nulldisp_obj = to_nulldisp_obj(obj); ++ ++ if (!nulldisp_obj->cpu_prep) { ++ err = -EINVAL; ++ goto exit_unref; ++ } ++ ++ nulldisp_obj->cpu_prep = false; ++ err = 0; ++exit_unref: ++ drm_gem_object_put(obj); ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} ++ ++static int nulldisp_gem_object_create_priv(struct drm_file *file, ++ struct drm_device *dev, ++ u64 size, ++ u32 *handle) ++{ ++ struct nulldisp_gem_object *nulldisp_obj; ++ struct drm_gem_object *obj; ++ struct address_space *mapping; ++ int err; ++ ++ nulldisp_obj = kzalloc(sizeof(*nulldisp_obj), GFP_KERNEL); ++ if (!nulldisp_obj) ++ return -ENOMEM; ++ ++ obj = &nulldisp_obj->base; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) ++ obj->funcs = &nulldisp_gem_funcs; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ ++ ++ err = drm_gem_object_init(dev, obj, size); ++ if (err) { ++ kfree(nulldisp_obj); ++ return err; ++ } ++ ++ mapping = file_inode(obj->filp)->i_mapping; ++ mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32 | __GFP_NORETRY); ++ ++ err = drm_gem_handle_create(file, obj, handle); ++ if (err) ++ goto exit; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ dma_resv_init(&nulldisp_obj->_resv); ++ nulldisp_obj->resv = &nulldisp_obj->_resv; ++#else ++ nulldisp_obj->resv = nulldisp_obj->base.resv; ++#endif ++ ++exit: ++ drm_gem_object_put(obj); ++ return err; ++} ++ ++int nulldisp_gem_object_create_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file) ++{ ++ struct drm_nulldisp_gem_create *args = data; ++ u32 handle; ++ int err; ++ u64 aligned_size; ++ ++ if (args->flags) { ++ DRM_ERROR("invalid flags: %#08x\n", args->flags); ++ return -EINVAL; ++ } ++ ++ if (args->handle) { ++ DRM_ERROR("invalid handle (this should always be 0)\n"); ++ return -EINVAL; ++ } ++ ++ aligned_size = PAGE_ALIGN(args->size); ++ ++ err = nulldisp_gem_object_create_priv(file, dev, aligned_size, &handle); ++ if (!err) ++ args->handle = handle; ++ ++ return err; ++} ++ ++int nulldisp_gem_dumb_create(struct drm_file *file, ++ struct drm_device *dev, ++ struct drm_mode_create_dumb *args) ++{ ++ u32 handle; ++ u32 pitch; ++ size_t size; ++ int err; ++ ++ pitch = args->width * (ALIGN(args->bpp, 8) >> 3); ++ size = PAGE_ALIGN(pitch * args->height); ++ ++ err = nulldisp_gem_object_create_priv(file, dev, size, &handle); ++ if (!err) { ++ args->handle = handle; ++ args->pitch = pitch; ++ args->size = size; ++ } ++ ++ return err; ++} ++ ++int nulldisp_gem_dumb_map_offset(struct drm_file *file, ++ struct drm_device *dev, ++ uint32_t handle, ++ uint64_t *offset) ++{ ++ struct drm_gem_object *obj; ++ int err; ++ ++ mutex_lock(&dev->struct_mutex); ++ ++ obj = drm_gem_object_lookup(file, handle); ++ if (!obj) { ++ err = -ENOENT; ++ goto exit_unlock; ++ } ++ ++ err = drm_gem_create_mmap_offset(obj); ++ if (err) ++ goto exit_obj_unref; ++ ++ *offset = drm_vma_node_offset_addr(&obj->vma_node); ++ ++exit_obj_unref: ++ drm_gem_object_put(obj); ++exit_unlock: ++ mutex_unlock(&dev->struct_mutex); ++ return err; ++} ++ ++struct dma_resv *nulldisp_gem_get_resv(struct drm_gem_object *obj) ++{ ++ return (to_nulldisp_obj(obj)->resv); ++} +diff --git a/drivers/gpu/drm/img-rogue/drm_nulldisp_gem.h b/drivers/gpu/drm/img-rogue/drm_nulldisp_gem.h +new file mode 100644 +index 000000000000..6a991c6e1fc8 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_nulldisp_gem.h +@@ -0,0 +1,160 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__DRM_NULLDISP_H__) ++#define __DRM_NULLDISP_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#include ++ ++struct dma_buf_attachment; ++struct vm_area_struct; ++struct vm_fault; ++#else ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) ++extern const struct drm_gem_object_funcs nulldisp_gem_funcs; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0) */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++#include ++#endif ++ ++int nulldisp_gem_object_get_pages(struct drm_gem_object *obj); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) ++typedef int vm_fault_t; ++#endif ++vm_fault_t nulldisp_gem_object_vm_fault(struct vm_fault *vmf); ++#else ++int nulldisp_gem_object_vm_fault(struct vm_area_struct *vma, ++ struct vm_fault *vmf); ++#endif ++ ++void nulldisp_gem_vm_open(struct vm_area_struct *vma); ++ ++void nulldisp_gem_vm_close(struct vm_area_struct *vma); ++ ++void nulldisp_gem_object_free(struct drm_gem_object *obj); ++ ++int nulldisp_gem_prime_pin(struct drm_gem_object *obj); ++ ++void nulldisp_gem_prime_unpin(struct drm_gem_object *obj); ++ ++struct sg_table *nulldisp_gem_prime_get_sg_table(struct drm_gem_object *obj); ++ ++struct drm_gem_object * ++nulldisp_gem_prime_import_sg_table(struct drm_device *dev, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ struct dma_buf_attachment *attach, ++#else ++ size_t size, ++#endif ++ struct sg_table *sgt); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++struct dma_buf *nulldisp_gem_prime_export(struct drm_device *dev, ++ struct drm_gem_object *obj, ++ int flags); ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++int nulldisp_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++void *nulldisp_gem_prime_vmap(struct drm_gem_object *obj); ++#else ++int nulldisp_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); ++#endif /* LLINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); ++#else ++void nulldisp_gem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0) */ ++ ++int nulldisp_gem_prime_mmap(struct drm_gem_object *obj, ++ struct vm_area_struct *vma); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++struct dma_resv * ++nulldisp_gem_prime_res_obj(struct drm_gem_object *obj); ++#endif ++ ++int nulldisp_gem_dumb_create(struct drm_file *file, ++ struct drm_device *dev, ++ struct drm_mode_create_dumb *args); ++ ++int nulldisp_gem_dumb_map_offset(struct drm_file *file, ++ struct drm_device *dev, ++ uint32_t handle, ++ uint64_t *offset); ++ ++/* internal interfaces */ ++struct dma_resv *nulldisp_gem_get_resv(struct drm_gem_object *obj); ++ ++int nulldisp_gem_object_mmap_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file); ++ ++int nulldisp_gem_object_cpu_prep_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file); ++ ++int nulldisp_gem_object_cpu_fini_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file); ++ ++int nulldisp_gem_object_create_ioctl(struct drm_device *dev, ++ void *data, ++ struct drm_file *file); ++ ++#endif /* !defined(__DRM_NULLDISP_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.c b/drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.c +new file mode 100644 +index 000000000000..8c95ca04dab4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.c +@@ -0,0 +1,710 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#include ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "drm_netlink_gem.h" ++#include "drm_nulldisp_drv.h" ++#include "drm_nulldisp_netlink.h" ++#include "kernel_compatibility.h" ++ ++#include "netlink.h" ++ ++struct nlpvrdpy { ++ atomic_t connected; ++ struct net *net; ++ u32 dst_portid; ++ struct drm_device *dev; ++ nlpvrdpy_disconnect_cb disconnect_cb; ++ void *disconnect_cb_data; ++ nlpvrdpy_flipped_cb flipped_cb; ++ void *flipped_cb_data; ++ nlpvrdpy_copied_cb copied_cb; ++ void *copied_cb_data; ++ struct mutex mutex; ++ struct list_head nl_list; ++ bool gem_names_required; ++}; ++#define NLPVRDPY_MINOR(nlpvrdpy) \ ++ ((unsigned int)((nlpvrdpy)->dev->primary->index)) ++ ++/* Command internal flags */ ++#define NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED 0x00000001 ++#define NLPVRDPY_CIF_NLPVRDPY 0x00000002 ++ ++static LIST_HEAD(nlpvrdpy_list); ++static DEFINE_MUTEX(nlpvrdpy_list_mutex); ++ ++static inline void nlpvrdpy_lock(struct nlpvrdpy *nlpvrdpy) ++{ ++ mutex_lock(&nlpvrdpy->mutex); ++} ++ ++static inline void nlpvrdpy_unlock(struct nlpvrdpy *nlpvrdpy) ++{ ++ mutex_unlock(&nlpvrdpy->mutex); ++} ++ ++struct nlpvrdpy *nlpvrdpy_create(struct drm_device *dev, ++ nlpvrdpy_disconnect_cb disconnect_cb, ++ void *disconnect_cb_data, ++ nlpvrdpy_flipped_cb flipped_cb, ++ void *flipped_cb_data, ++ nlpvrdpy_copied_cb copied_cb, ++ void *copied_cb_data) ++{ ++ struct nlpvrdpy *nlpvrdpy = kzalloc(sizeof(*nlpvrdpy), GFP_KERNEL); ++ ++ if (!nlpvrdpy) ++ return NULL; ++ ++ mutex_init(&nlpvrdpy->mutex); ++ INIT_LIST_HEAD(&nlpvrdpy->nl_list); ++ ++ atomic_set(&nlpvrdpy->connected, 0); ++ ++ nlpvrdpy->dev = dev; ++ nlpvrdpy->disconnect_cb = disconnect_cb; ++ nlpvrdpy->disconnect_cb_data = disconnect_cb_data; ++ nlpvrdpy->flipped_cb = flipped_cb; ++ nlpvrdpy->flipped_cb_data = flipped_cb_data; ++ nlpvrdpy->copied_cb = copied_cb; ++ nlpvrdpy->copied_cb_data = copied_cb_data; ++ ++ mutex_lock(&nlpvrdpy_list_mutex); ++ list_add_tail(&nlpvrdpy->nl_list, &nlpvrdpy_list); ++ mutex_unlock(&nlpvrdpy_list_mutex); ++ ++ return nlpvrdpy; ++} ++ ++void nlpvrdpy_destroy(struct nlpvrdpy *nlpvrdpy) ++{ ++ if (!nlpvrdpy) ++ return; ++ ++ mutex_lock(&nlpvrdpy_list_mutex); ++ nlpvrdpy_lock(nlpvrdpy); ++ list_del(&nlpvrdpy->nl_list); ++ nlpvrdpy_unlock(nlpvrdpy); ++ mutex_unlock(&nlpvrdpy_list_mutex); ++ ++ mutex_destroy(&nlpvrdpy->mutex); ++ ++ kfree(nlpvrdpy); ++} ++ ++static struct nlpvrdpy *nlpvrdpy_lookup(u32 minor) ++{ ++ struct nlpvrdpy *nlpvrdpy = NULL; ++ struct nlpvrdpy *iter; ++ ++ mutex_lock(&nlpvrdpy_list_mutex); ++ list_for_each_entry(iter, &nlpvrdpy_list, nl_list) { ++ if (NLPVRDPY_MINOR(iter) == minor) { ++ nlpvrdpy = iter; ++ nlpvrdpy_lock(nlpvrdpy); ++ break; ++ } ++ } ++ mutex_unlock(&nlpvrdpy_list_mutex); ++ ++ return nlpvrdpy; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++static int nlpvrdpy_pre_cmd(const struct genl_split_ops *ops, ++ struct sk_buff *skb, ++ struct genl_info *info) ++#else ++static int nlpvrdpy_pre_cmd(const struct genl_ops *ops, ++ struct sk_buff *skb, ++ struct genl_info *info) ++#endif ++{ ++ struct nlattr **attrs = info->attrs; ++ struct nlpvrdpy *nlpvrdpy = NULL; ++ int ret; ++ ++ if (ops->internal_flags & NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED) { ++ if (!(ops->flags & GENL_ADMIN_PERM)) ++ return -EINVAL; ++ } ++ ++ if (ops->internal_flags & (NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED | ++ NLPVRDPY_CIF_NLPVRDPY)) { ++ u32 minor; ++ ++ if (!attrs[NLPVRDPY_ATTR_MINOR]) ++ return -EINVAL; ++ ++ minor = nla_get_u32(attrs[NLPVRDPY_ATTR_MINOR]); ++ ++ nlpvrdpy = nlpvrdpy_lookup(minor); ++ if (!nlpvrdpy) ++ return -ENODEV; ++ ++ if (ops->internal_flags & NLPVRDPY_CIF_NLPVRDPY) { ++ if (!atomic_read(&nlpvrdpy->connected)) { ++ ret = -ENOTCONN; ++ goto err_unlock; ++ } ++ if ((nlpvrdpy->net != genl_info_net(info)) || ++ (nlpvrdpy->dst_portid != info->snd_portid)) { ++ ret = -EPROTO; ++ goto err_unlock; ++ } ++ } ++ ++ info->user_ptr[0] = nlpvrdpy; ++ } ++ ++ ret = 0; ++ ++err_unlock: ++ nlpvrdpy_unlock(nlpvrdpy); ++ return ret; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++static void nlpvrdpy_post_cmd(const struct genl_split_ops *ops, ++ struct sk_buff *skb, ++ struct genl_info *info) ++#else ++static void nlpvrdpy_post_cmd(const struct genl_ops *ops, ++ struct sk_buff *skb, ++ struct genl_info *info) ++#endif ++{ ++} ++ ++static struct genl_family nlpvrdpy_family = { ++ .name = "nlpvrdpy", ++ .version = 1, ++ .maxattr = NLPVRDPY_ATTR_MAX, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0)) ++ .policy = nlpvrdpy_policy, ++#endif ++ .pre_doit = &nlpvrdpy_pre_cmd, ++ .post_doit = &nlpvrdpy_post_cmd ++}; ++ ++/* Must be called with the struct nlpvrdpy mutex held */ ++static int nlpvrdpy_send_msg_locked(struct nlpvrdpy *nlpvrdpy, ++ struct sk_buff *msg) ++{ ++ int err; ++ ++ if (atomic_read(&nlpvrdpy->connected)) { ++ err = genlmsg_unicast(nlpvrdpy->net, msg, nlpvrdpy->dst_portid); ++ if (err == -ECONNREFUSED) ++ atomic_set(&nlpvrdpy->connected, 0); ++ } else { ++ err = -ENOTCONN; ++ nlmsg_free(msg); ++ } ++ ++ return err; ++} ++ ++static int nlpvrdpy_send_msg(struct nlpvrdpy *nlpvrdpy, struct sk_buff *msg) ++{ ++ int err; ++ ++ nlpvrdpy_lock(nlpvrdpy); ++ err = nlpvrdpy_send_msg_locked(nlpvrdpy, msg); ++ nlpvrdpy_unlock(nlpvrdpy); ++ ++ return err; ++} ++ ++void nlpvrdpy_send_disconnect(struct nlpvrdpy *nlpvrdpy) ++{ ++ struct sk_buff *msg; ++ void *hdr; ++ int err; ++ ++ if (!atomic_read(&nlpvrdpy->connected)) ++ return; ++ ++ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); ++ if (!msg) ++ return; ++ ++ hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0, ++ &nlpvrdpy_family, 0, NLPVRDPY_CMD_DISCONNECT); ++ if (!hdr) ++ goto err_msg_free; ++ ++ err = nla_put_u32(msg, NLPVRDPY_ATTR_MINOR, NLPVRDPY_MINOR(nlpvrdpy)); ++ if (err) ++ goto err_msg_free; ++ ++ genlmsg_end(msg, hdr); ++ ++ nlpvrdpy_lock(nlpvrdpy); ++ ++ (void) nlpvrdpy_send_msg_locked(nlpvrdpy, msg); ++ ++ atomic_set(&nlpvrdpy->connected, 0); ++ nlpvrdpy->net = NULL; ++ nlpvrdpy->dst_portid = 0; ++ ++ nlpvrdpy_unlock(nlpvrdpy); ++ ++ return; ++ ++err_msg_free: ++ nlmsg_free(msg); ++} ++ ++static int nlpvrdpy_get_offsets_and_sizes(struct drm_framebuffer *fb, ++ struct drm_gem_object **objs, ++ u64 *addr, u64 *size) ++{ ++ int i; ++ ++ for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) { ++ int err; ++ struct drm_gem_object *obj = objs[i]; ++ ++ err = drm_gem_create_mmap_offset(obj); ++ if (err) { ++ DRM_ERROR( ++ "Failed to get mmap offset for buffer[%d] = %p\n", ++ i, obj); ++ return err; ++ } ++ ++ addr[i] = drm_vma_node_offset_addr(&obj->vma_node); ++ size[i] = obj->size; ++ } ++ ++ return 0; ++} ++ ++static int nlpvrdpy_put_fb_attributes(struct sk_buff *msg, ++ struct drm_framebuffer *fb, ++ struct nlpvrdpy *nlpvrdpy, ++ struct drm_gem_object **objs) ++{ ++ int i, err; ++ const int num_planes = nulldisp_drm_fb_num_planes(fb); ++ u64 plane_addr[NLPVRDPY_MAX_NUM_PLANES], ++ plane_size[NLPVRDPY_MAX_NUM_PLANES]; ++ ++ err = nlpvrdpy_get_offsets_and_sizes(fb, objs, &plane_addr[0], &plane_size[0]); ++ if (err) { ++ pr_err("%s: nlpvrdpy_get_offsets_and_sizes failed", __func__); ++ return err; ++ } ++ ++ err = nla_put_u32(msg, NLPVRDPY_ATTR_MINOR, NLPVRDPY_MINOR(nlpvrdpy)); ++ if (err) { ++ pr_err("%s: nla_put_u32 NLPVRDPY_ATTR_MINOR failed", __func__); ++ return err; ++ } ++ ++ err = nla_put_u8(msg, NLPVRDPY_ATTR_NUM_PLANES, num_planes); ++ if (err) { ++ pr_err("%s: nla_put_u8 NLPVRDPY_ATTR_NUM_PLANES failed", __func__); ++ return err; ++ } ++ ++ err = nla_put_u32(msg, NLPVRDPY_ATTR_WIDTH, fb->width); ++ if (err) { ++ pr_err("%s: nla_put_u32 NLPVRDPY_ATTR_WIDTH failed", __func__); ++ return err; ++ } ++ ++ err = nla_put_u32(msg, NLPVRDPY_ATTR_HEIGHT, fb->height); ++ if (err) { ++ pr_err("%s: nla_put_u32 NLPVRDPY_ATTR_HEIGHT failed", __func__); ++ return err; ++ } ++ ++ err = nla_put_u32(msg, NLPVRDPY_ATTR_PIXFMT, nulldisp_drm_fb_format(fb)); ++ if (err) { ++ pr_err("%s: nla_put_u32 NLPVRDPY_ATTR_PIXFMT failed", ++ __func__); ++ return err; ++ } ++ ++ err = nla_put_u64_64bit(msg, NLPVRDPY_ATTR_FB_MODIFIER, ++ nulldisp_drm_fb_modifier(fb), NLPVRDPY_ATTR_PAD); ++ if (err) { ++ pr_err("%s: nla_put_u64_64bit NLPVRDPY_ATTR_FB_MODIFIER " ++ "NLPVRDPY_ATTR_PAD failed", __func__); ++ return err; ++ } ++ ++ /* IMG_COLORSPACE_BT601_CONFORMANT_RANGE */ ++ err = nla_put_u8(msg, NLPVRDPY_ATTR_YUV_CSC, 1); ++ if (err) { ++ pr_err("%s: nla_put_u8 NLPVRDPY_ATTR_YUV_CSC 1 failed", __func__); ++ return err; ++ } ++ ++ /* 8-bit per sample */ ++ err = nla_put_u8(msg, NLPVRDPY_ATTR_YUV_BPP, 8); ++ if (err) { ++ pr_err("%s: nla_put_u8 NLPVRDPY_ATTR_YUV_BPP 8 failed", __func__); ++ return err; ++ } ++ ++ for (i = 0; i < num_planes; i++) { ++ err = nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE(i, ADDR), ++ plane_addr[i], NLPVRDPY_ATTR_PAD); ++ if (err) { ++ pr_err("%s: nla_put_u64_64bit NLPVRDPY_ATTR_PLANE(%d, ADDR)" ++ " NLPVRDPY_ATTR_PAD failed", __func__, i); ++ return err; ++ } ++ ++ err = nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE(i, SIZE), ++ plane_size[i], NLPVRDPY_ATTR_PAD); ++ if (err) { ++ pr_err("%s: nla_put_u64_64bit NLPVRDPY_ATTR_PLANE(%d, SIZE)" ++ " NLPVRDPY_ATTR_PAD failed", __func__, i); ++ return err; ++ } ++ ++ err = nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE(i, OFFSET), ++ fb->offsets[i], NLPVRDPY_ATTR_PAD); ++ if (err) { ++ pr_err("%s: nla_put_u64_64bit NLPVRDPY_ATTR_PLANE(%d, OFFSET)" ++ " NLPVRDPY_ATTR_PAD failed", __func__, i); ++ return err; ++ } ++ ++ err = nla_put_u64_64bit(msg, NLPVRDPY_ATTR_PLANE(i, PITCH), ++ fb->pitches[i], NLPVRDPY_ATTR_PAD); ++ if (err) { ++ pr_err("%s: nla_put_u64_64bit NLPVRDPY_ATTR_PLANE(%d, PITCH)" ++ " NLPVRDPY_ATTR_PAD failed", __func__, i); ++ return err; ++ } ++ ++ err = nla_put_u32(msg, NLPVRDPY_ATTR_PLANE(i, GEM_OBJ_NAME), (u32)objs[0]->name); ++ if (err) { ++ pr_err("%s: nla_put_u32 NLPVRDPY_ATTR_PLANE(%d, GEM_OBJ_NAME)" ++ " failed", __func__, i); ++ return err; ++ } ++ } ++ ++ WARN_ONCE(num_planes > NLPVRDPY_MAX_NUM_PLANES, ++ "NLPVRDPY_MAX_NUM_PLANES = [%d], num_planes = [%d]\n", ++ NLPVRDPY_MAX_NUM_PLANES, num_planes); ++ ++ return 0; ++} ++ ++static int nlpvrdpy_name_gem_obj(struct drm_device *dev, ++ struct drm_gem_object *obj) ++{ ++ int ret; ++ ++ mutex_lock(&dev->object_name_lock); ++ if (!obj->name) { ++ ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); ++ if (ret < 0) ++ goto exit_unlock; ++ ++ obj->name = ret; ++ } ++ ++ ret = 0; ++ ++exit_unlock: ++ mutex_unlock(&dev->object_name_lock); ++ return ret; ++} ++ ++static int nlpvrdpy_name_gem_objs(struct drm_framebuffer *fb, ++ struct drm_gem_object **objs) ++{ ++ int i; ++ struct drm_device *dev = fb->dev; ++ ++ for (i = 0; i < nulldisp_drm_fb_num_planes(fb); i++) { ++ int err = nlpvrdpy_name_gem_obj(dev, objs[i]); ++ ++ if (err < 0) ++ return err; ++ } ++ ++ return 0; ++} ++ ++int nlpvrdpy_send_flip(struct nlpvrdpy *nlpvrdpy, ++ struct drm_framebuffer *fb, ++ struct drm_gem_object **objs) ++{ ++ struct sk_buff *msg; ++ void *hdr; ++ int err; ++ ++ if (!atomic_read(&nlpvrdpy->connected)) ++ return -ENOTCONN; ++ ++ if (nlpvrdpy->gem_names_required) { ++ err = nlpvrdpy_name_gem_objs(fb, objs); ++ if (err) ++ return err; ++ } ++ ++ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); ++ if (!msg) ++ return -ENOMEM; ++ ++ hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0, ++ &nlpvrdpy_family, 0, NLPVRDPY_CMD_FLIP); ++ if (!hdr) { ++ err = -ENOMEM; ++ goto err_msg_free; ++ } ++ ++ err = nlpvrdpy_put_fb_attributes(msg, fb, nlpvrdpy, objs); ++ if (err) ++ goto err_msg_free; ++ ++ genlmsg_end(msg, hdr); ++ ++ return nlpvrdpy_send_msg(nlpvrdpy, msg); ++ ++err_msg_free: ++ nlmsg_free(msg); ++ return err; ++} ++ ++int nlpvrdpy_send_copy(struct nlpvrdpy *nlpvrdpy, ++ struct drm_framebuffer *fb, ++ struct drm_gem_object **objs) ++{ ++ struct sk_buff *msg; ++ void *hdr; ++ int err; ++ ++ if (!atomic_read(&nlpvrdpy->connected)) ++ return -ENOTCONN; ++ ++ if (nlpvrdpy->gem_names_required) { ++ err = nlpvrdpy_name_gem_objs(fb, objs); ++ if (err) ++ return err; ++ } ++ ++ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); ++ if (!msg) ++ return -ENOMEM; ++ ++ hdr = genlmsg_put(msg, nlpvrdpy->dst_portid, 0, ++ &nlpvrdpy_family, 0, NLPVRDPY_CMD_COPY); ++ if (!hdr) { ++ err = -ENOMEM; ++ goto err_msg_free; ++ } ++ ++ err = nlpvrdpy_put_fb_attributes(msg, fb, nlpvrdpy, objs); ++ if (err) ++ goto err_msg_free; ++ ++ genlmsg_end(msg, hdr); ++ ++ return nlpvrdpy_send_msg(nlpvrdpy, msg); ++ ++err_msg_free: ++ nlmsg_free(msg); ++ return err; ++} ++ ++static int nlpvrdpy_cmd_connect(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; ++ struct sk_buff *msg; ++ void *hdr; ++ int err; ++ ++ if (info->attrs[NLPVRDPY_ATTR_NAMING_REQUIRED]) ++ nlpvrdpy->gem_names_required = true; ++ ++ msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); ++ if (!msg) ++ return -ENOMEM; ++ ++ hdr = genlmsg_put_reply(msg, info, &nlpvrdpy_family, ++ 0, NLPVRDPY_CMD_CONNECTED); ++ if (!hdr) { ++ err = -ENOMEM; ++ goto err_msg_free; ++ } ++ ++ err = nla_put_string(msg, NLPVRDPY_ATTR_NAME, ++ nlpvrdpy->dev->driver->name); ++ if (err) ++ goto err_msg_free; ++ ++ genlmsg_end(msg, hdr); ++ ++ err = genlmsg_reply(msg, info); ++ ++ if (!err) { ++ nlpvrdpy_lock(nlpvrdpy); ++ ++ nlpvrdpy->net = genl_info_net(info); ++ nlpvrdpy->dst_portid = info->snd_portid; ++ atomic_set(&nlpvrdpy->connected, 1); ++ ++ nlpvrdpy_unlock(nlpvrdpy); ++ } ++ ++ return err; ++ ++err_msg_free: ++ nlmsg_free(msg); ++ return err; ++} ++ ++static int nlpvrdpy_cmd_disconnect(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; ++ ++ atomic_set(&nlpvrdpy->connected, 0); ++ ++ if (nlpvrdpy->disconnect_cb) ++ nlpvrdpy->disconnect_cb(nlpvrdpy->disconnect_cb_data); ++ ++ return 0; ++} ++ ++static int nlpvrdpy_cmd_flipped(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; ++ ++ return (nlpvrdpy->flipped_cb) ? ++ nlpvrdpy->flipped_cb(nlpvrdpy->flipped_cb_data) : ++ 0; ++} ++ ++static int nlpvrdpy_cmd_copied(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct nlpvrdpy *nlpvrdpy = info->user_ptr[0]; ++ ++ return (nlpvrdpy->copied_cb) ? ++ nlpvrdpy->copied_cb(nlpvrdpy->copied_cb_data) : ++ 0; ++} ++ ++static struct genl_ops nlpvrdpy_ops[] = { ++ { ++ .cmd = NLPVRDPY_CMD_CONNECT, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ .policy = nlpvrdpy_policy, ++#endif ++ .doit = nlpvrdpy_cmd_connect, ++ .flags = GENL_ADMIN_PERM, ++ .internal_flags = NLPVRDPY_CIF_NLPVRDPY_NOT_CONNECTED ++ }, ++ { ++ .cmd = NLPVRDPY_CMD_DISCONNECT, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ .policy = nlpvrdpy_policy, ++#endif ++ .doit = nlpvrdpy_cmd_disconnect, ++ .flags = 0, ++ .internal_flags = NLPVRDPY_CIF_NLPVRDPY ++ }, ++ { ++ .cmd = NLPVRDPY_CMD_FLIPPED, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ .policy = nlpvrdpy_policy, ++#endif ++ .doit = nlpvrdpy_cmd_flipped, ++ .flags = 0, ++ .internal_flags = NLPVRDPY_CIF_NLPVRDPY ++ }, ++ { ++ .cmd = NLPVRDPY_CMD_COPIED, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ++ .policy = nlpvrdpy_policy, ++#endif ++ .doit = nlpvrdpy_cmd_copied, ++ .flags = 0, ++ .internal_flags = NLPVRDPY_CIF_NLPVRDPY ++ } ++}; ++ ++int nlpvrdpy_register(void) ++{ ++ nlpvrdpy_family.module = THIS_MODULE; ++ nlpvrdpy_family.ops = nlpvrdpy_ops; ++ nlpvrdpy_family.n_ops = ARRAY_SIZE(nlpvrdpy_ops); ++ ++ return genl_register_family(&nlpvrdpy_family); ++} ++ ++int nlpvrdpy_unregister(void) ++{ ++ return genl_unregister_family(&nlpvrdpy_family); ++} +diff --git a/drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.h b/drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.h +new file mode 100644 +index 000000000000..1cea24085bff +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/drm_nulldisp_netlink.h +@@ -0,0 +1,77 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef __DRM_NULLDISP_NETLINK_H__ ++#define __DRM_NULLDISP_NETLINK_H__ ++ ++#include ++#include ++ ++typedef void (*nlpvrdpy_disconnect_cb)(void *data); ++typedef int (*nlpvrdpy_flipped_cb)(void *data); ++typedef int (*nlpvrdpy_copied_cb)(void *data); ++ ++struct nlpvrdpy *nlpvrdpy_create(struct drm_device *dev, ++ nlpvrdpy_disconnect_cb disconnect_cb, ++ void *disconnect_cb_data, ++ nlpvrdpy_flipped_cb flipped_cb, ++ void *flipped_cb_data, ++ nlpvrdpy_copied_cb copied_cb, ++ void *copied_cb_data); ++ ++void nlpvrdpy_destroy(struct nlpvrdpy *nlpvrdpy); ++ ++int nlpvrdpy_send_flip(struct nlpvrdpy *nlpvrdpy, ++ struct drm_framebuffer *fb, ++ struct drm_gem_object **objs); ++ ++int nlpvrdpy_send_copy(struct nlpvrdpy *nlpvrdpy, ++ struct drm_framebuffer *fb, ++ struct drm_gem_object **objs); ++ ++void nlpvrdpy_send_disconnect(struct nlpvrdpy *nlpvrdpy); ++ ++int nlpvrdpy_register(void); ++ ++int nlpvrdpy_unregister(void); ++ ++#endif /* __DRM_NULLDISP_NETLINK_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/env_connection.h b/drivers/gpu/drm/img-rogue/env_connection.h +new file mode 100644 +index 000000000000..2a6c7d05c412 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/env_connection.h +@@ -0,0 +1,92 @@ ++/*************************************************************************/ /*! ++@File ++@Title Server side connection management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Linux specific server side connection management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(ENV_CONNECTION_H) ++#define ENV_CONNECTION_H ++ ++#include ++#include ++#include ++ ++#include "handle.h" ++#include "pvr_debug.h" ++#include "device.h" ++ ++#if defined(SUPPORT_ION) ++#include PVR_ANDROID_ION_HEADER ++#include "ion_sys.h" ++#include "allocmem.h" ++#endif ++ ++typedef struct _ENV_CONNECTION_PRIVATE_DATA_ ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++} ENV_CONNECTION_PRIVATE_DATA; ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++#define ION_CLIENT_NAME_SIZE 50 ++ ++typedef struct _ENV_ION_CONNECTION_DATA_ ++{ ++ IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE]; ++ struct ion_device *psIonDev; ++ struct ion_client *psIonClient; ++} ENV_ION_CONNECTION_DATA; ++#endif ++ ++typedef struct _ENV_CONNECTION_DATA_ ++{ ++ pid_t owner; ++ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ void *pvPvrSyncPrivateData; ++#endif ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ ENV_ION_CONNECTION_DATA *psIonData; ++#endif ++} ENV_CONNECTION_DATA; ++ ++#endif /* !defined(ENV_CONNECTION_H) */ +diff --git a/drivers/gpu/drm/img-rogue/event.c b/drivers/gpu/drm/img-rogue/event.c +new file mode 100644 +index 000000000000..aec0fc8a02dd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/event.c +@@ -0,0 +1,514 @@ ++/*************************************************************************/ /*! ++@File ++@Title Event Object ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0) ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "allocmem.h" ++#include "event.h" ++#include "pvr_debug.h" ++#include "pvrsrv.h" ++#include "pvr_bridge_k.h" ++ ++#include "osfunc.h" ++ ++/* Uncomment to enable event object stats that are useful for debugging. ++ * The stats can be gotten at any time (during lifetime of event object) ++ * using OSEventObjectDumpdebugInfo API */ ++// #define LINUX_EVENT_OBJECT_STATS ++ ++ ++typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG ++{ ++ rwlock_t sLock; ++ /* Counts how many times event object was signalled i.e. how many times ++ * LinuxEventObjectSignal() was called on a given event object. ++ * Used for detecting pending signals. ++ * Note that this is in no way related to OS signals. */ ++ atomic_t sEventSignalCount; ++ struct list_head sList; ++} PVRSRV_LINUX_EVENT_OBJECT_LIST; ++ ++ ++typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG ++{ ++ IMG_UINT32 ui32EventSignalCountPrevious; ++#if defined(DEBUG) ++ IMG_UINT ui32Stats; ++#endif ++ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ POS_LOCK hLock; ++ IMG_UINT32 ui32ScheduleAvoided; ++ IMG_UINT32 ui32ScheduleCalled; ++ IMG_UINT32 ui32ScheduleSleptFully; ++ IMG_UINT32 ui32ScheduleSleptPartially; ++ IMG_UINT32 ui32ScheduleReturnedImmediately; ++#endif ++ wait_queue_head_t sWait; ++ struct list_head sList; ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList; ++} PVRSRV_LINUX_EVENT_OBJECT; ++ ++/*! ++****************************************************************************** ++ ++ @Function LinuxEventObjectListCreate ++ ++ @Description ++ ++ Linux wait object list creation ++ ++ @Output hOSEventKM : Pointer to the event object list handle ++ ++ @Return PVRSRV_ERROR : Error code ++ ++******************************************************************************/ ++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList) ++{ ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList; ++ ++ psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList)); ++ if (psEvenObjectList == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list")); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ INIT_LIST_HEAD(&psEvenObjectList->sList); ++ ++ rwlock_init(&psEvenObjectList->sLock); ++ atomic_set(&psEvenObjectList->sEventSignalCount, 0); ++ ++ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function LinuxEventObjectListDestroy ++ ++ @Description ++ ++ Linux wait object list destruction ++ ++ @Input hOSEventKM : Event object list handle ++ ++ @Return PVRSRV_ERROR : Error code ++ ++******************************************************************************/ ++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList) ++{ ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList; ++ ++ if (psEvenObjectList) ++ { ++ if (!list_empty(&psEvenObjectList->sList)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty")); ++ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; ++ } ++ OSFreeMem(psEvenObjectList); ++ /*not nulling pointer, copy on stack*/ ++ } ++ return PVRSRV_OK; ++} ++ ++ ++/*! ++****************************************************************************** ++ ++ @Function LinuxEventObjectDelete ++ ++ @Description ++ ++ Linux wait object removal ++ ++ @Input hOSEventObject : Event object handle ++ ++ @Return PVRSRV_ERROR : Error code ++ ++******************************************************************************/ ++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject) ++{ ++ if (hOSEventObject) ++ { ++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; ++ ++ write_lock_bh(&psLinuxEventObjectList->sLock); ++ list_del(&psLinuxEventObject->sList); ++ write_unlock_bh(&psLinuxEventObjectList->sLock); ++ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ OSLockDestroy(psLinuxEventObject->hLock); ++#endif ++ ++#if defined(DEBUG) ++// PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats)); ++#endif ++ ++ OSFreeMem(psLinuxEventObject); ++ /*not nulling pointer, copy on stack*/ ++ ++ return PVRSRV_OK; ++ } ++ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function LinuxEventObjectAdd ++ ++ @Description ++ ++ Linux wait object addition ++ ++ @Input hOSEventObjectList : Event object list handle ++ @Output phOSEventObject : Pointer to the event object handle ++ ++ @Return PVRSRV_ERROR : Error code ++ ++******************************************************************************/ ++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject) ++ { ++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; ++ ++ /* allocate completion variable */ ++ psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject)); ++ if (psLinuxEventObject == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory")); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ INIT_LIST_HEAD(&psLinuxEventObject->sList); ++ ++ /* Start with the timestamp at which event object was added to the list */ ++ psLinuxEventObject->ui32EventSignalCountPrevious = atomic_read(&psLinuxEventObjectList->sEventSignalCount); ++ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&psLinuxEventObject->hLock), "OSLockCreate"); ++ psLinuxEventObject->ui32ScheduleAvoided = 0; ++ psLinuxEventObject->ui32ScheduleCalled = 0; ++ psLinuxEventObject->ui32ScheduleSleptFully = 0; ++ psLinuxEventObject->ui32ScheduleSleptPartially = 0; ++ psLinuxEventObject->ui32ScheduleReturnedImmediately = 0; ++#endif ++ ++#if defined(DEBUG) ++ psLinuxEventObject->ui32Stats = 0; ++#endif ++ init_waitqueue_head(&psLinuxEventObject->sWait); ++ ++ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList; ++ ++ write_lock_bh(&psLinuxEventObjectList->sLock); ++ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList); ++ write_unlock_bh(&psLinuxEventObjectList->sLock); ++ ++ *phOSEventObject = psLinuxEventObject; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function LinuxEventObjectSignal ++ ++ @Description ++ ++ Linux wait object signaling function ++ ++ @Input hOSEventObjectList : Event object list handle ++ ++ @Return PVRSRV_ERROR : Error code ++ ++******************************************************************************/ ++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList) ++{ ++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject; ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList; ++ struct list_head *psListEntry, *psListEntryTemp, *psList; ++ psList = &psLinuxEventObjectList->sList; ++ ++ /* Move the timestamp ahead for this call, so a potential "Wait" from any ++ * EventObject/s doesn't wait for the signal to occur before returning. Early ++ * setting/incrementing of timestamp reduces the window where a concurrent ++ * "Wait" call might block while "this" Signal call is being processed */ ++ atomic_inc(&psLinuxEventObjectList->sEventSignalCount); ++ ++ read_lock_bh(&psLinuxEventObjectList->sLock); ++ list_for_each_safe(psListEntry, psListEntryTemp, psList) ++ { ++ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList); ++ wake_up_interruptible(&psLinuxEventObject->sWait); ++ } ++ read_unlock_bh(&psLinuxEventObjectList->sLock); ++ ++ return PVRSRV_OK; ++} ++ ++static void _TryToFreeze(void) ++{ ++ /* if we reach zero it means that all of the threads called try_to_freeze */ ++ LinuxBridgeNumActiveKernelThreadsDecrement(); ++ ++ /* Returns true if the thread was frozen, should we do anything with this ++ * information? What do we return? Which one is the error case? */ ++ try_to_freeze(); ++ ++ LinuxBridgeNumActiveKernelThreadsIncrement(); ++} ++ ++void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject) ++{ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject; ++ ++ OSLockAcquire(psLinuxEventObject->hLock); ++ PVR_LOG(("%s: EvObj(%p) schedule: Avoided(%u) Called(%u) ReturnedImmediately(%u) SleptFully(%u) SleptPartially(%u)", ++ __func__, psLinuxEventObject, psLinuxEventObject->ui32ScheduleAvoided, ++ psLinuxEventObject->ui32ScheduleCalled, psLinuxEventObject->ui32ScheduleReturnedImmediately, ++ psLinuxEventObject->ui32ScheduleSleptFully, psLinuxEventObject->ui32ScheduleSleptPartially)); ++ OSLockRelease(psLinuxEventObject->hLock); ++#else ++ PVR_LOG(("%s: LINUX_EVENT_OBJECT_STATS disabled!", __func__)); ++#endif ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function LinuxEventObjectWait ++ ++ @Description ++ ++ Linux wait object routine ++ ++ @Input hOSEventObject : Event object handle ++ ++ @Input ui64Timeoutus : Time out value in usec ++ ++ @Return PVRSRV_ERROR : Error code ++ ++******************************************************************************/ ++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, ++ IMG_UINT64 ui64Timeoutus, ++ IMG_BOOL bFreezable) ++{ ++ IMG_UINT32 ui32EventSignalCount; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ IMG_UINT32 ui32Remainder; ++ long timeOutJiffies; ++#ifdef LINUX_EVENT_OBJECT_STATS ++ long totalTimeoutJiffies; ++ IMG_BOOL bScheduleCalled = IMG_FALSE; ++#endif ++ ++ DEFINE_WAIT(sWait); ++ ++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList; ++ ++ /* Check if the driver is good shape */ ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an ++ * uint use the msec version. With such a long timeout we really don't need ++ * the high resolution of usecs. */ ++ if (ui64Timeoutus > 0xffffffffULL) ++ timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder)); ++ else ++ timeOutJiffies = usecs_to_jiffies(ui64Timeoutus); ++ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ totalTimeoutJiffies = timeOutJiffies; ++#endif ++ ++ do ++ { ++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); ++ ui32EventSignalCount = (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); ++ ++ if (psLinuxEventObject->ui32EventSignalCountPrevious != ui32EventSignalCount) ++ { ++ /* There is a pending event signal i.e. LinuxEventObjectSignal() ++ * was called on the event object since the last time we checked. ++ * Return without waiting. */ ++ break; ++ } ++ ++ if (signal_pending(current)) ++ { ++ /* There is an OS signal pending so return. ++ * This allows to kill/interrupt user space processes which ++ * are waiting on this event object. */ ++ break; ++ } ++ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ bScheduleCalled = IMG_TRUE; ++#endif ++ timeOutJiffies = schedule_timeout(timeOutJiffies); ++ ++ if (bFreezable) ++ { ++ _TryToFreeze(); ++ } ++ ++#if defined(DEBUG) ++ psLinuxEventObject->ui32Stats++; ++#endif ++ ++ ++ } while (timeOutJiffies); ++ ++ finish_wait(&psLinuxEventObject->sWait, &sWait); ++ ++ psLinuxEventObject->ui32EventSignalCountPrevious = ui32EventSignalCount; ++ ++#ifdef LINUX_EVENT_OBJECT_STATS ++ OSLockAcquire(psLinuxEventObject->hLock); ++ if (bScheduleCalled) ++ { ++ psLinuxEventObject->ui32ScheduleCalled++; ++ if (totalTimeoutJiffies == timeOutJiffies) ++ { ++ psLinuxEventObject->ui32ScheduleReturnedImmediately++; ++ } ++ else if (timeOutJiffies == 0) ++ { ++ psLinuxEventObject->ui32ScheduleSleptFully++; ++ } ++ else ++ { ++ psLinuxEventObject->ui32ScheduleSleptPartially++; ++ } ++ } ++ else ++ { ++ psLinuxEventObject->ui32ScheduleAvoided++; ++ } ++ OSLockRelease(psLinuxEventObject->hLock); ++#endif ++ ++ if (signal_pending(current)) ++ { ++ return PVRSRV_ERROR_INTERRUPTED; ++ } ++ else ++ { ++ return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT; ++ } ++} ++ ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ ++PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ DEFINE_WAIT(sWait); ++ ++ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = ++ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject; ++ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = ++ psLinuxEventObject->psLinuxEventObjectList; ++ ++ /* Check if the driver is in good shape */ ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE); ++ ++ if (psLinuxEventObject->ui32EventSignalCountPrevious != ++ (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount)) ++ { ++ /* There is a pending signal, so return without waiting */ ++ goto finish; ++ } ++ ++ schedule(); ++ ++ _TryToFreeze(); ++ ++finish: ++ finish_wait(&psLinuxEventObject->sWait, &sWait); ++ ++ psLinuxEventObject->ui32EventSignalCountPrevious = ++ (IMG_UINT32) atomic_read(&psLinuxEventObjectList->sEventSignalCount); ++ ++ return PVRSRV_OK; ++} ++ ++#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ +diff --git a/drivers/gpu/drm/img-rogue/event.h b/drivers/gpu/drm/img-rogue/event.h +new file mode 100644 +index 000000000000..bb378cb9220f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/event.h +@@ -0,0 +1,54 @@ ++/*************************************************************************/ /*! ++@File ++@Title Event Object ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList); ++PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList); ++PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject); ++PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject); ++PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList); ++PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject, ++ IMG_UINT64 ui64Timeoutus, ++ IMG_BOOL bFreezable); ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject); ++#endif ++void LinuxEventObjectDumpDebugInfo(IMG_HANDLE hOSEventObject); +diff --git a/drivers/gpu/drm/img-rogue/fwload.c b/drivers/gpu/drm/img-rogue/fwload.c +new file mode 100644 +index 000000000000..35e52af56a2e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/fwload.c +@@ -0,0 +1,255 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services firmware load and access routines for Linux ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++#include ++ ++#include "device.h" ++#include "module_common.h" ++#include "fwload.h" ++#include "pvr_debug.h" ++#include "srvkm.h" ++ ++#if defined(RGX_FW_SIGNED) ++ ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++#include ++#else ++#define PKEY_ID_PKCS7 2 ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0) */ ++ ++#include "signfw.h" ++#endif /* RGX_FW_SIGNED */ ++ ++struct OS_FW_IMAGE_t ++{ ++ const struct firmware *psFW; ++ size_t uSignatureSize; ++}; ++ ++#if defined(RGX_FW_SIGNED) ++ ++static int OSCheckSignature(const struct FirmwareSignatureHeader *psHeader, size_t uSize) ++{ ++ if (be32_to_cpu(psHeader->ui32SignatureLen) >= uSize - sizeof(*psHeader)) ++ { ++ return -EBADMSG; ++ } ++ ++ if (psHeader->ui8IDType != PKEY_ID_PKCS7) ++ { ++ return -ENOPKG; ++ } ++ ++ if (psHeader->ui8Algo != 0 || psHeader->ui8HashAlgo != 0 || ++ psHeader->ui8SignerLen != 0 || psHeader->ui8KeyIDLen != 0 || ++ psHeader->__ui8Padding[0] != 0 || psHeader->__ui8Padding[1] != 0 || ++ psHeader->__ui8Padding[2] != 0) ++ { ++ return -EBADMSG; ++ } ++ ++ return 0; ++} ++ ++bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage) ++{ ++ const struct firmware *psFW = psFWImage->psFW; ++ const u8 *pui8FWData = psFW->data; ++ size_t uFWSize = psFW->size; ++ uint32_t ui32MagicLen = sizeof(MODULE_SIG_STRING) - 1; ++ struct FirmwareSignatureHeader sHeader; ++ int err; ++ ++ if (uFWSize <= ui32MagicLen) ++ { ++ return false; ++ } ++ ++ /* ++ * Linux Kernel's sign-file utility is primarily intended for signing ++ * modules, and so appends the MODULE_SIG_STRING magic at the end of ++ * the signature. Only proceed with verification if this magic is found. ++ */ ++ if (memcmp(pui8FWData + uFWSize - ui32MagicLen, MODULE_SIG_STRING, ui32MagicLen) != 0) ++ { ++ return false; ++ } ++ ++ uFWSize -= ui32MagicLen; ++ if (uFWSize <= sizeof(sHeader)) ++ { ++ return false; ++ } ++ ++ /* ++ * After the magic, a header is placed which informs about the digest / ++ * crypto algorithm etc. Copy that header and ensure that it has valid ++ * contents (We only support RSA Crypto, SHA Hash, X509 certificate and ++ * PKCS#7 signature). ++ */ ++ memcpy(&sHeader, pui8FWData + (uFWSize - sizeof(sHeader)), sizeof(sHeader)); ++ if (OSCheckSignature(&sHeader, uFWSize) != 0) ++ { ++ return false; ++ } ++ ++ /* ++ * As all information is now extracted, we can go ahead and ask PKCS ++ * module to verify the sign. ++ */ ++ uFWSize -= be32_to_cpu(sHeader.ui32SignatureLen) + sizeof(sHeader); ++ err = verify_pkcs7_signature(pui8FWData, uFWSize, pui8FWData + uFWSize, ++ be32_to_cpu(sHeader.ui32SignatureLen), NULL, ++ VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL); ++ if (err == 0) ++ { ++ psFWImage->uSignatureSize = psFW->size - uFWSize; ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware Successfully Verified", ++ __func__)); ++ return true; ++ } ++ ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Verification Failed (%d)", ++ __func__, err)); ++ return false; ++} ++ ++#else /* defined(RGX_FW_SIGNED) */ ++ ++inline bool OSVerifyFirmware(OS_FW_IMAGE *psFWImage) ++{ ++ return true; ++} ++ ++#endif /* defined(RGX_FW_SIGNED) */ ++ ++PVRSRV_ERROR ++OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, ++ bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), OS_FW_IMAGE **ppsFWImage) ++{ ++ const struct firmware *psFW = NULL; ++ OS_FW_IMAGE *psFWImage; ++ IMG_INT32 res; ++ PVRSRV_ERROR eError; ++ ++ res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice); ++ if (res != 0) ++ { ++ release_firmware(psFW); ++ if (res == -ENOENT) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not found (%d)", ++ __func__, pszBVNCString, res)); ++ eError = PVRSRV_ERROR_NOT_FOUND; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: request_firmware('%s') not ready (%d)", ++ __func__, pszBVNCString, res)); ++ eError = PVRSRV_ERROR_NOT_READY; ++ } ++ goto err_exit; ++ } ++ ++ psFWImage = OSAllocZMem(sizeof(*psFWImage)); ++ if (psFWImage == NULL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: OSAllocZMem('%s') failed.", ++ __func__, pszBVNCString)); ++ ++ release_firmware(psFW); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_exit; ++ } ++ ++ psFWImage->psFW = psFW; ++ if (pfnVerifyFirmware != NULL && !pfnVerifyFirmware(psFWImage)) ++ { ++ release_firmware(psFW); ++ OSFreeMem(psFWImage); ++ eError = PVRSRV_ERROR_NOT_AUTHENTICATED; ++ goto err_exit; ++ } ++ ++ *ppsFWImage = psFWImage; ++ return PVRSRV_OK; ++ ++err_exit: ++ *ppsFWImage = NULL; ++ return eError; ++} ++ ++void ++OSUnloadFirmware(OS_FW_IMAGE *psFWImage) ++{ ++ const struct firmware *psFW = psFWImage->psFW; ++ ++ release_firmware(psFW); ++ OSFreeMem(psFWImage); ++} ++ ++size_t ++OSFirmwareSize(OS_FW_IMAGE *psFWImage) ++{ ++ const struct firmware *psFW = psFWImage->psFW; ++ return psFW->size - psFWImage->uSignatureSize; ++} ++ ++const void * ++OSFirmwareData(OS_FW_IMAGE *psFWImage) ++{ ++ const struct firmware *psFW = psFWImage->psFW; ++ ++ return psFW->data; ++} ++ ++/****************************************************************************** ++ End of file (fwload.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/fwload.h b/drivers/gpu/drm/img-rogue/fwload.h +new file mode 100644 +index 000000000000..08e7f533b666 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/fwload.h +@@ -0,0 +1,158 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services RGX OS Interface for loading the firmware ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This file defines the OS interface through which the RGX ++ device initialisation code in the kernel/server will obtain ++ the RGX firmware binary image. The API is used during the ++ initialisation of an RGX device via the ++ PVRSRVCommonDeviceInitialise() ++ call sequence. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef FWLOAD_H ++#define FWLOAD_H ++ ++#include "img_defs.h" ++#include "device_connection.h" ++#include "device.h" ++ ++/*! Opaque type handle defined and known to the OS layer implementation of this ++ * fwload.h OS API. This private data is allocated in the implementation of ++ * OSLoadFirmware() and contains whatever data and information needed to be ++ * able to acquire and return the firmware binary image to the Services ++ * kernel/server during initialisation. ++ * It is no longer required and may be freed when OSUnloadFirmware() is called. ++ */ ++typedef struct OS_FW_IMAGE_t OS_FW_IMAGE; ++ ++#if defined(__linux__) ++ ++bool OSVerifyFirmware(OS_FW_IMAGE* psFWImage); ++ ++#endif ++ ++/*************************************************************************/ /*! ++@Function OSLoadFirmware ++@Description The OS implementation must load or acquire the firmware (FW) ++ image binary needed by the driver stack. ++ A handle to the common layer device node is given to identify ++ which device instance in the system is being initialised. The ++ BVNC string is also supplied so that the implementation knows ++ which FW image to retrieve since each FW image only supports one ++ GPU type/revision. ++ The calling server code supports multiple GPU types and revisions ++ and will detect the specific GPU type and revision before calling ++ this API. It will also have runtime configuration of the VZ mode, ++ hence this API must be able to retrieve different FW binary ++ images based on the pszBVNCString given. The purpose of the end ++ platform/system is key to understand which FW images must be ++ available to the kernel server. ++ On exit the implementation must return a pointer to some private ++ data it uses to hold the FW image information and data. It will ++ be passed onto later API calls by the kernel server code. ++ NULL should be returned if the FW image could not be retrieved. ++ The format of the BVNC string is as follows ([x] denotes ++ optional field): ++ "rgx.fw[.signed].B.V[p].N.C[.vz]" ++ The implementation must first try to load the FW identified ++ by the pszBVpNCString parameter. If this is not available then it ++ should drop back to retrieving the FW identified by the ++ pszBVNCString parameter. The fields in the string are: ++ B, V, N, C are all unsigned integer identifying type/revision. ++ [.signed] is present when RGX_FW_SIGNED=1 is defined in the ++ server build. ++ [p] denotes a provisional (pre-silicon) GPU configuration. ++ [.vz] is present when the kernel server is loaded on the HOST ++ of a virtualised platform. See the DriverMode server ++ AppHint for details. ++ ++@Input psDeviceNode Device instance identifier. ++@Input pszBVNCString Identifier string of the FW image to ++ be loaded/acquired in production driver. ++@Input pfnVerifyFirmware Callback which checks validity of FW image. ++@Output ppsFWImage Ptr to private data on success, ++ NULL otherwise. ++@Return PVRSRV_ERROR PVRSRV_OK on success, ++ PVRSRV_ERROR_NOT_READY if filesystem is not ++ ready/initialised, ++ PVRSRV_ERROR_NOT_FOUND if no suitable FW ++ image could be found ++ PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc ++ memory for FW image ++ PVRSRV_ERROR_NOT_AUTHENTICATED if FW image ++ cannot be verified. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszBVNCString, ++ bool (*pfnVerifyFirmware)(OS_FW_IMAGE*), ++ OS_FW_IMAGE **ppsFWImage); ++ ++/*************************************************************************/ /*! ++@Function OSFirmwareData ++@Description This function returns a pointer to the start of the FW image ++ binary data held in memory. It must remain valid until ++ OSUnloadFirmware() is called. ++@Input psFWImage Private data opaque handle ++@Return void* Ptr to FW binary image to start on GPU. ++*/ /**************************************************************************/ ++const void* OSFirmwareData(OS_FW_IMAGE *psFWImage); ++ ++/*************************************************************************/ /*! ++@Function OSFirmwareSize ++@Description This function returns the size of the FW image binary data. ++@Input psFWImage Private data opaque handle ++@Return size_t Size in bytes of the firmware binary image ++*/ /**************************************************************************/ ++size_t OSFirmwareSize(OS_FW_IMAGE *psFWImage); ++ ++/*************************************************************************/ /*! ++@Function OSUnloadFirmware ++@Description This is called when the server has completed firmware ++ initialisation and no longer needs the private data, possibly ++ allocated by OSLoadFirmware(). ++@Input psFWImage Private data opaque handle ++*/ /**************************************************************************/ ++void OSUnloadFirmware(OS_FW_IMAGE *psFWImage); ++ ++#endif /* FWLOAD_H */ ++ ++/****************************************************************************** ++ End of file (fwload.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/fwtrace_string.h b/drivers/gpu/drm/img-rogue/fwtrace_string.h +new file mode 100644 +index 000000000000..a2ab95c3a815 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/fwtrace_string.h +@@ -0,0 +1,52 @@ ++/*************************************************************************/ /*! ++@File fwtrace_string.h ++@Title RGX Firmware trace strings for KM ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform Generic ++@Description This file defines SFs tuple. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef KM_TRACE_STRING_H ++#define KM_TRACE_STRING_H ++ ++#include "rgx_fwif_sf.h" ++ ++extern const RGXKM_STID_FMT SFs[]; ++extern const IMG_UINT32 g_ui32SFsCount; ++ ++#endif /* KM_TRACE_STRING_H */ +diff --git a/drivers/gpu/drm/img-rogue/gpu_trace_point.h b/drivers/gpu/drm/img-rogue/gpu_trace_point.h +new file mode 100644 +index 000000000000..b7352befedb4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/gpu_trace_point.h +@@ -0,0 +1,39 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM gpu_trace_point ++ ++#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_GPU_H ++ ++#include ++#include ++#include ++ ++TRACE_EVENT(gpu_interrupt, ++ ++ TP_PROTO(unsigned int IRQStatusReg, unsigned int IRQStatus), ++ ++ TP_ARGS(IRQStatusReg, IRQStatus), ++ ++ TP_STRUCT__entry( ++ __field( unsigned int, IRQStatusReg) ++ __field( unsigned int, IRQStatus) ++ ), ++ ++ TP_fast_assign( ++ __entry->IRQStatusReg = IRQStatusReg; ++ __entry->IRQStatus = IRQStatus; ++ ), ++ ++ TP_printk("IRQStatusReg=%d IRQStatus=%d", __entry->IRQStatusReg, __entry->IRQStatus) ++); ++ ++#endif /* _TRACE_GPU_H */ ++ ++/* We don't want to use include/trace/events */ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH . ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE gpu_trace_point ++/* This part must be outside protection */ ++#include +diff --git a/drivers/gpu/drm/img-rogue/handle.c b/drivers/gpu/drm/img-rogue/handle.c +new file mode 100644 +index 000000000000..c5dd5d77b33c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/handle.c +@@ -0,0 +1,2484 @@ ++/*************************************************************************/ /*! ++@File ++@Title Resource Handle Manager ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provide resource handle management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++/* See handle.h for a description of the handle API. */ ++ ++/* ++ * The implementation supports movable handle structures, allowing the address ++ * of a handle structure to change without having to fix up pointers in ++ * any of the handle structures. For example, the linked list mechanism ++ * used to link subhandles together uses handle array indices rather than ++ * pointers to the structures themselves. ++ */ ++ ++#if defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "img_defs.h" ++#include "handle.h" ++#include "handle_impl.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "osfunc.h" ++#include "lock.h" ++#include "connection_server.h" ++ ++#define HANDLE_HASH_TAB_INIT_SIZE 32 ++#define HANDLE_PROC_HANDLE_HASH_INIT_SIZE 10 ++ ++#define TEST_FLAG(v, f) BITMASK_HAS(v, f) ++#define TEST_ALLOC_FLAG(psHandleData, f) BITMASK_HAS((psHandleData)->eFlag, f) ++ ++ ++/* Linked list structure. Used for both the list head and list items */ ++typedef struct _HANDLE_LIST_ ++{ ++ IMG_HANDLE hPrev; ++ IMG_HANDLE hNext; ++ IMG_HANDLE hParent; ++} HANDLE_LIST; ++ ++typedef struct _HANDLE_DATA_ ++{ ++ /* The handle that represents this structure */ ++ IMG_HANDLE hHandle; ++ ++ /* Handle type */ ++ PVRSRV_HANDLE_TYPE eType; ++ ++ /* Flags specified when the handle was allocated */ ++ PVRSRV_HANDLE_ALLOC_FLAG eFlag; ++ ++ /* Pointer to the data that the handle represents */ ++ void *pvData; ++ ++ /* ++ * Callback specified at handle allocation time to ++ * release/destroy/free the data represented by the ++ * handle when it's reference count reaches 0. This ++ * should always be NULL for subhandles. ++ */ ++ PFN_HANDLE_RELEASE pfnReleaseData; ++ ++ /* List head for subhandles of this handle */ ++ HANDLE_LIST sChildren; ++ ++ /* List entry for sibling subhandles */ ++ HANDLE_LIST sSiblings; ++ ++ /* Reference count of lookups made. It helps track which resources are in ++ * use in concurrent bridge calls. */ ++ IMG_INT32 iLookupCount; ++ /* State of a handle. If the handle was already destroyed this is false. ++ * If this is false and iLookupCount is 0 the pfnReleaseData callback is ++ * called on the handle. */ ++ IMG_BOOL bCanLookup; ++ ++#if defined(PVRSRV_DEBUG_HANDLE_LOCK) ++ /* Store the handle base used for this handle, so we ++ * can later access the handle base lock (or check if ++ * it has been already acquired) ++ */ ++ PVRSRV_HANDLE_BASE *psBase; ++#endif ++ ++} HANDLE_DATA; ++ ++struct _HANDLE_BASE_ ++{ ++ /* Pointer to a handle implementations base structure */ ++ HANDLE_IMPL_BASE *psImplBase; ++ ++ /* ++ * Pointer to handle hash table. ++ * The hash table is used to do reverse lookups, converting data ++ * pointers to handles. ++ */ ++ HASH_TABLE *psHashTab; ++ ++ /* Type specific (connection/global/process) Lock handle */ ++ POS_LOCK hLock; ++ ++ /* Can be connection, process, global */ ++ PVRSRV_HANDLE_BASE_TYPE eType; ++}; ++ ++/* ++ * The key for the handle hash table is an array of three elements, the ++ * pointer to the resource, the resource type and the parent handle (or ++ * NULL if there is no parent). The eHandKey enumeration gives the ++ * array indices of the elements making up the key. ++ */ ++enum eHandKey ++{ ++ HAND_KEY_DATA = 0, ++ HAND_KEY_TYPE, ++ HAND_KEY_PARENT, ++ HAND_KEY_LEN /* Must be last item in list */ ++}; ++ ++/* HAND_KEY is the type of the hash table key */ ++typedef uintptr_t HAND_KEY[HAND_KEY_LEN]; ++ ++typedef struct FREE_HANDLE_DATA_TAG ++{ ++ PVRSRV_HANDLE_BASE *psBase; ++ PVRSRV_HANDLE_TYPE eHandleFreeType; ++ /* timing data (ns) to release bridge lock upon the deadline */ ++ IMG_UINT64 ui64TimeStart; ++ IMG_UINT64 ui64MaxBridgeTime; ++} FREE_HANDLE_DATA; ++ ++typedef struct FREE_KERNEL_HANDLE_DATA_TAG ++{ ++ PVRSRV_HANDLE_BASE *psBase; ++ HANDLE_DATA *psProcessHandleData; ++ IMG_HANDLE hKernelHandle; ++} FREE_KERNEL_HANDLE_DATA; ++ ++/* Stores a pointer to the function table of the handle back-end in use */ ++static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs; ++ ++static POS_LOCK gKernelHandleLock; ++static IMG_BOOL gbLockInitialised = IMG_FALSE; ++/* Pointer to process handle base currently being freed */ ++static PVRSRV_HANDLE_BASE *g_psProcessHandleBaseBeingFreed; ++/* Lock for the process handle base table */ ++static POS_LOCK g_hProcessHandleBaseLock; ++/* Hash table with process handle bases */ ++static HASH_TABLE *g_psProcessHandleBaseTable; ++ ++void LockHandle(PVRSRV_HANDLE_BASE *psBase) ++{ ++ OSLockAcquire(psBase->hLock); ++} ++ ++void UnlockHandle(PVRSRV_HANDLE_BASE *psBase) ++{ ++ OSLockRelease(psBase->hLock); ++} ++ ++/* ++ * Kernel handle base structure. This is used for handles that are not ++ * allocated on behalf of a particular process. ++ */ ++PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL; ++ ++/* Increase the lookup reference count on the given handle. ++ * The handle lock must already be acquired. ++ * Returns: the reference count after the increment ++ */ ++static inline IMG_UINT32 HandleGet(HANDLE_DATA *psHandleData) ++{ ++#if defined(PVRSRV_DEBUG_HANDLE_LOCK) ++ if (!OSLockIsLocked(psHandleData->psBase->hLock)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); ++ OSDumpStack(); ++ } ++#endif ++ ++#ifdef DEBUG_REFCNT ++ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = %u, iLookupCount %d -> %d", ++ __func__, psHandleData->bCanLookup, psHandleData->iLookupCount, ++ psHandleData->iLookupCount + 1)); ++#endif /* DEBUG_REFCNT */ ++ ++ PVR_ASSERT(psHandleData->bCanLookup); ++ ++ return ++psHandleData->iLookupCount; ++} ++ ++/* Decrease the lookup reference count on the given handle. ++ * The handle lock must already be acquired. ++ * Returns: the reference count after the decrement ++ */ ++static inline IMG_UINT32 HandlePut(HANDLE_DATA *psHandleData) ++{ ++#if defined(PVRSRV_DEBUG_HANDLE_LOCK) ++ if (!OSLockIsLocked(psHandleData->psBase->hLock)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__)); ++ OSDumpStack(); ++ } ++#endif ++ ++#ifdef DEBUG_REFCNT ++ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = %u, iLookupCount %d -> %d", ++ __func__, psHandleData->bCanLookup, psHandleData->iLookupCount, ++ psHandleData->iLookupCount - 1)); ++#endif /* DEBUG_REFCNT */ ++ ++ /* psHandleData->bCanLookup can be false at this point */ ++ PVR_ASSERT(psHandleData->iLookupCount > 0); ++ ++ return --psHandleData->iLookupCount; ++} ++ ++static inline IMG_BOOL IsRetryError(PVRSRV_ERROR eError) ++{ ++ return eError == PVRSRV_ERROR_RETRY || eError == PVRSRV_ERROR_KERNEL_CCB_FULL; ++} ++ ++#if defined(PVRSRV_NEED_PVR_DPF) ++static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType) ++{ ++ #define HANDLETYPE(x) \ ++ case PVRSRV_HANDLE_TYPE_##x: \ ++ return #x; ++ switch (eType) ++ { ++ #include "handle_types.h" ++ #undef HANDLETYPE ++ ++ default: ++ return "INVALID"; ++ } ++} ++ ++static const IMG_CHAR *HandleBaseTypeToString(PVRSRV_HANDLE_BASE_TYPE eType) ++{ ++ #define HANDLEBASETYPE(x) \ ++ case PVRSRV_HANDLE_BASE_TYPE_##x: \ ++ return #x; ++ switch (eType) ++ { ++ HANDLEBASETYPE(CONNECTION); ++ HANDLEBASETYPE(PROCESS); ++ HANDLEBASETYPE(GLOBAL); ++ #undef HANDLEBASETYPE ++ ++ default: ++ return "INVALID"; ++ } ++} ++#endif ++ ++static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType); ++ ++static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType); ++ ++static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType); ++ ++/*! ++******************************************************************************* ++ @Function GetHandleData ++ @Description Get the handle data structure for a given handle ++ @Input psBase - pointer to handle base structure ++ hHandle - handle from client ++ eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the handle ++ type is not to be checked. ++ @Output ppsHandleData - pointer to a pointer to the handle data struct ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(GetHandleData) ++#endif ++static INLINE ++PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA **ppsHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ HANDLE_DATA *psHandleData; ++ PVRSRV_ERROR eError; ++ ++ eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase, ++ hHandle, ++ (void **)&psHandleData); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ /* ++ * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function, ++ * check handle is of the correct type. ++ */ ++ if (unlikely(eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)", ++ hHandle, ++ HandleTypeToString(eType), ++ eType, ++ HandleTypeToString(psHandleData->eType), ++ psHandleData->eType)); ++ return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH; ++ } ++ ++ /* Return the handle structure */ ++ *ppsHandleData = psHandleData; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function HandleListInit ++ @Description Initialise a linked list structure embedded in a handle ++ structure. ++ @Input hHandle - handle containing the linked list structure ++ psList - pointer to linked list structure ++ hParent - parent handle or NULL ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(HandleListInit) ++#endif ++static INLINE ++void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent) ++{ ++ psList->hPrev = hHandle; ++ psList->hNext = hHandle; ++ psList->hParent = hParent; ++} ++ ++/*! ++******************************************************************************* ++ @Function InitParentList ++ @Description Initialise the children list head in a handle structure. ++ The children are the subhandles of this handle. ++ @Input psHandleData - pointer to handle data structure ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(InitParentList) ++#endif ++static INLINE ++void InitParentList(HANDLE_DATA *psHandleData) ++{ ++ IMG_HANDLE hParent = psHandleData->hHandle; ++ ++ HandleListInit(hParent, &psHandleData->sChildren, hParent); ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function InitChildEntry ++ @Description Initialise the child list entry in a handle structure. The list ++ entry is used to link together subhandles of a given handle. ++ @Input psHandleData - pointer to handle data structure ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(InitChildEntry) ++#endif ++static INLINE ++void InitChildEntry(HANDLE_DATA *psHandleData) ++{ ++ HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL); ++} ++ ++/*! ++******************************************************************************* ++ @Function HandleListIsEmpty ++ @Description Determine whether a given linked list is empty. ++ @Input hHandle - handle containing the list head ++ psList - pointer to the list head ++ @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't. ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(HandleListIsEmpty) ++#endif ++static INLINE ++IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */ ++{ ++ IMG_BOOL bIsEmpty; ++ ++ bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle); ++ ++#ifdef DEBUG ++ { ++ IMG_BOOL bIsEmpty2; ++ ++ bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle); ++ PVR_ASSERT(bIsEmpty == bIsEmpty2); ++ } ++#endif ++ ++ return bIsEmpty; ++} ++ ++#ifdef DEBUG ++/*! ++******************************************************************************* ++ @Function NoChildren ++ @Description Determine whether a handle has any subhandles ++ @Input psHandleData - pointer to handle data structure ++ @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does. ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(NoChildren) ++#endif ++static INLINE ++IMG_BOOL NoChildren(HANDLE_DATA *psHandleData) ++{ ++ PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle); ++ ++ return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren); ++} ++ ++/*! ++******************************************************************************* ++ @Function NoParent ++ @Description Determine whether a handle is a subhandle ++ @Input psHandleData - pointer to handle data structure ++ @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is. ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(NoParent) ++#endif ++static INLINE ++IMG_BOOL NoParent(HANDLE_DATA *psHandleData) ++{ ++ if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings)) ++ { ++ PVR_ASSERT(psHandleData->sSiblings.hParent == NULL); ++ ++ return IMG_TRUE; ++ } ++ ++ PVR_ASSERT(psHandleData->sSiblings.hParent != NULL); ++ return IMG_FALSE; ++} ++#endif /*DEBUG*/ ++ ++/*! ++******************************************************************************* ++ @Function ParentHandle ++ @Description Determine the parent of a handle ++ @Input psHandleData - pointer to handle data structure ++ @Return Parent handle, or NULL if the handle is not a subhandle. ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(ParentHandle) ++#endif ++static INLINE ++IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData) ++{ ++ return psHandleData->sSiblings.hParent; ++} ++ ++/* ++ * GetHandleListFromHandleAndOffset is used to generate either a ++ * pointer to the subhandle list head, or a pointer to the linked list ++ * structure of an item on a subhandle list. ++ * The list head is itself on the list, but is at a different offset ++ * in the handle structure to the linked list structure for items on ++ * the list. The two linked list structures are differentiated by ++ * the third parameter, containing the parent handle. The parent field ++ * in the list head structure references the handle structure that contains ++ * it. For items on the list, the parent field in the linked list structure ++ * references the parent handle, which will be different from the handle ++ * containing the linked list structure. ++ */ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(GetHandleListFromHandleAndOffset) ++#endif ++static INLINE ++HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hEntry, ++ IMG_HANDLE hParent, ++ size_t uiParentOffset, ++ size_t uiEntryOffset) ++{ ++ HANDLE_DATA *psHandleData = NULL; ++ ++ PVR_ASSERT(psBase != NULL); ++ ++ if (GetHandleData(psBase, &psHandleData, hEntry, ++ PVRSRV_HANDLE_TYPE_NONE) != PVRSRV_OK) ++ { ++ return NULL; ++ } ++ ++ if (hEntry == hParent) ++ { ++ return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiParentOffset); ++ } ++ else ++ { ++ return (HANDLE_LIST *)IMG_OFFSET_ADDR(psHandleData, uiEntryOffset); ++ } ++} ++ ++/*! ++******************************************************************************* ++ @Function HandleListInsertBefore ++ @Description Insert a handle before a handle currently on the list. ++ @Input hEntry - handle to be inserted after ++ psEntry - pointer to handle structure to be inserted after ++ uiParentOffset - offset to list head struct in handle structure ++ hNewEntry - handle to be inserted ++ psNewEntry - pointer to handle structure of item to be inserted ++ uiEntryOffset - offset of list item struct in handle structure ++ hParent - parent handle of hNewEntry ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(HandleListInsertBefore) ++#endif ++static INLINE ++PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hEntry, ++ HANDLE_LIST *psEntry, ++ size_t uiParentOffset, ++ IMG_HANDLE hNewEntry, ++ HANDLE_LIST *psNewEntry, ++ size_t uiEntryOffset, ++ IMG_HANDLE hParent) ++{ ++ HANDLE_LIST *psPrevEntry; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psNewEntry != NULL, "psNewEntry"); ++ ++ psPrevEntry = GetHandleListFromHandleAndOffset(psBase, ++ psEntry->hPrev, ++ hParent, ++ uiParentOffset, ++ uiEntryOffset); ++ if (psPrevEntry == NULL) ++ { ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++ ++ PVR_ASSERT(psNewEntry->hParent == NULL); ++ PVR_ASSERT(hEntry == psPrevEntry->hNext); ++ ++#if defined(DEBUG) ++ { ++ HANDLE_LIST *psParentList; ++ ++ psParentList = GetHandleListFromHandleAndOffset(psBase, ++ hParent, ++ hParent, ++ uiParentOffset, ++ uiParentOffset); ++ PVR_ASSERT(psParentList && psParentList->hParent == hParent); ++ } ++#endif /* defined(DEBUG) */ ++ ++ psNewEntry->hPrev = psEntry->hPrev; ++ psEntry->hPrev = hNewEntry; ++ ++ psNewEntry->hNext = hEntry; ++ psPrevEntry->hNext = hNewEntry; ++ ++ psNewEntry->hParent = hParent; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function AdoptChild ++ @Description Assign a subhandle to a handle ++ @Input psParentData - pointer to handle structure of parent handle ++ psChildData - pointer to handle structure of child subhandle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(AdoptChild) ++#endif ++static INLINE ++PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psParentData, ++ HANDLE_DATA *psChildData) ++{ ++ IMG_HANDLE hParent = psParentData->sChildren.hParent; ++ ++ PVR_ASSERT(hParent == psParentData->hHandle); ++ ++ return HandleListInsertBefore(psBase, ++ hParent, ++ &psParentData->sChildren, ++ offsetof(HANDLE_DATA, sChildren), ++ psChildData->hHandle, ++ &psChildData->sSiblings, ++ offsetof(HANDLE_DATA, sSiblings), ++ hParent); ++} ++ ++/*! ++******************************************************************************* ++ @Function HandleListRemove ++ @Description Remove a handle from a list ++ @Input hEntry - handle to be removed ++ psEntry - pointer to handle structure of item to be removed ++ uiEntryOffset - offset of list item struct in handle structure ++ uiParentOffset - offset to list head struct in handle structure ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(HandleListRemove) ++#endif ++static INLINE ++PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hEntry, ++ HANDLE_LIST *psEntry, ++ size_t uiEntryOffset, ++ size_t uiParentOffset) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psEntry != NULL, "psEntry"); ++ ++ if (!HandleListIsEmpty(hEntry, psEntry)) ++ { ++ HANDLE_LIST *psPrev; ++ HANDLE_LIST *psNext; ++ ++ psPrev = GetHandleListFromHandleAndOffset(psBase, ++ psEntry->hPrev, ++ psEntry->hParent, ++ uiParentOffset, ++ uiEntryOffset); ++ if (psPrev == NULL) ++ { ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++ ++ psNext = GetHandleListFromHandleAndOffset(psBase, ++ psEntry->hNext, ++ psEntry->hParent, ++ uiParentOffset, ++ uiEntryOffset); ++ if (psNext == NULL) ++ { ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++ ++ /* ++ * The list head is on the list, and we don't want to ++ * remove it. ++ */ ++ PVR_ASSERT(psEntry->hParent != NULL); ++ ++ psPrev->hNext = psEntry->hNext; ++ psNext->hPrev = psEntry->hPrev; ++ ++ HandleListInit(hEntry, psEntry, NULL); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function UnlinkFromParent ++ @Description Remove a subhandle from its parents list ++ @Input psHandleData - pointer to handle data structure of child ++ subhandle. ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(UnlinkFromParent) ++#endif ++static INLINE ++PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData) ++{ ++ return HandleListRemove(psBase, ++ psHandleData->hHandle, ++ &psHandleData->sSiblings, ++ offsetof(HANDLE_DATA, sSiblings), ++ offsetof(HANDLE_DATA, sChildren)); ++} ++ ++/*! ++******************************************************************************* ++ @Function HandleListIterate ++ @Description Iterate over the items in a list ++ @Input psHead - pointer to list head ++ uiParentOffset - offset to list head struct in handle structure ++ uiEntryOffset - offset of list item struct in handle structure ++ pfnIterFunc - function to be called for each handle in the list ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(HandleListIterate) ++#endif ++static INLINE ++PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_LIST *psHead, ++ size_t uiParentOffset, ++ size_t uiEntryOffset, ++ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) ++{ ++ IMG_HANDLE hHandle = psHead->hNext; ++ IMG_HANDLE hParent = psHead->hParent; ++ IMG_HANDLE hNext; ++ ++ PVR_ASSERT(psHead->hParent != NULL); ++ ++ /* ++ * Follow the next chain from the list head until we reach ++ * the list head again, which signifies the end of the list. ++ */ ++ while (hHandle != hParent) ++ { ++ HANDLE_LIST *psEntry; ++ PVRSRV_ERROR eError; ++ ++ psEntry = GetHandleListFromHandleAndOffset(psBase, ++ hHandle, ++ hParent, ++ uiParentOffset, ++ uiEntryOffset); ++ if (psEntry == NULL) ++ { ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++ ++ PVR_ASSERT(psEntry->hParent == psHead->hParent); ++ ++ /* ++ * Get the next index now, in case the list item is ++ * modified by the iteration function. ++ */ ++ hNext = psEntry->hNext; ++ ++ eError = (*pfnIterFunc)(psBase, hHandle); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ hHandle = hNext; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function IterateOverChildren ++ @Description Iterate over the subhandles of a parent handle ++ @Input psParentData - pointer to parent handle structure ++ pfnIterFunc - function to be called for each subhandle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(IterateOverChildren) ++#endif ++static INLINE ++PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psParentData, ++ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE)) ++{ ++ return HandleListIterate(psBase, ++ &psParentData->sChildren, ++ offsetof(HANDLE_DATA, sChildren), ++ offsetof(HANDLE_DATA, sSiblings), ++ pfnIterFunc); ++} ++ ++/*! ++******************************************************************************* ++ @Function ParentIfPrivate ++ @Description Return the parent handle if the handle was allocated with ++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return NULL. ++ @Input psHandleData - pointer to handle data structure ++ @Return Parent handle or NULL ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(ParentIfPrivate) ++#endif ++static INLINE ++IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData) ++{ ++ return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? ++ ParentHandle(psHandleData) : NULL; ++} ++ ++/*! ++******************************************************************************* ++ @Function InitKey ++ @Description Initialise a hash table key for the current process ++ @Input aKey - pointer to key ++ psBase - pointer to handle base structure ++ pvData - pointer to the resource the handle represents ++ eType - type of resource ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(InitKey) ++#endif ++static INLINE ++void InitKey(HAND_KEY aKey, ++ PVRSRV_HANDLE_BASE *psBase, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ IMG_HANDLE hParent) ++{ ++ PVR_UNREFERENCED_PARAMETER(psBase); ++ ++ aKey[HAND_KEY_DATA] = (uintptr_t)pvData; ++ aKey[HAND_KEY_TYPE] = (uintptr_t)eType; ++ aKey[HAND_KEY_PARENT] = (uintptr_t)hParent; ++} ++ ++/*! ++******************************************************************************* ++ @Function FindHandle ++ @Description Find handle corresponding to a resource pointer ++ @Input psBase - pointer to handle base structure ++ pvData - pointer to resource to be associated with the handle ++ eType - the type of resource ++ @Return The handle, or NULL if not found ++******************************************************************************/ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(FindHandle) ++#endif ++static INLINE ++IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ IMG_HANDLE hParent) ++{ ++ HAND_KEY aKey; ++ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ ++ InitKey(aKey, psBase, pvData, eType, hParent); ++ ++ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey); ++} ++ ++/*! ++******************************************************************************* ++ @Function AllocHandle ++ @Description Allocate a new handle ++ @Input phHandle - location for new handle ++ pvData - pointer to resource to be associated with the handle ++ eType - the type of resource ++ hParent - parent handle or NULL ++ pfnReleaseData - Function to release resource at handle release ++ time ++ @Output phHandle - points to new handle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, ++ IMG_HANDLE hParent, ++ PFN_HANDLE_RELEASE pfnReleaseData) ++{ ++ HANDLE_DATA *psNewHandleData; ++ IMG_HANDLE hHandle; ++ PVRSRV_ERROR eError; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) ++ { ++ /* Handle must not already exist */ ++ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL); ++ } ++ ++ psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData)); ++ PVR_LOG_RETURN_IF_NOMEM(psNewHandleData, "OSAllocZMem"); ++ ++ eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, ++ psNewHandleData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "pfnAcquireHandle", ++ ErrorFreeHandleData); ++ ++ /* ++ * If a data pointer can be associated with multiple handles, we ++ * don't put the handle in the hash table, as the data pointer ++ * may not map to a unique handle ++ */ ++ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) ++ { ++ HAND_KEY aKey; ++ ++ /* Initialise hash key */ ++ InitKey(aKey, psBase, pvData, eType, hParent); ++ ++ /* Put the new handle in the hash table */ ++ eError = HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle) ? ++ PVRSRV_OK : PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; ++ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "couldn't add handle to hash table", ++ ErrorReleaseHandle); ++ } ++ ++ psNewHandleData->hHandle = hHandle; ++ psNewHandleData->eType = eType; ++ psNewHandleData->eFlag = eFlag; ++ psNewHandleData->pvData = pvData; ++ psNewHandleData->pfnReleaseData = pfnReleaseData; ++ psNewHandleData->iLookupCount = 0; ++ psNewHandleData->bCanLookup = IMG_TRUE; ++ ++#ifdef DEBUG_REFCNT ++ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = true", __func__)); ++#endif /* DEBUG_REFCNT */ ++ ++ InitParentList(psNewHandleData); ++#if defined(DEBUG) ++ PVR_ASSERT(NoChildren(psNewHandleData)); ++#endif ++ ++ InitChildEntry(psNewHandleData); ++#if defined(DEBUG) ++ PVR_ASSERT(NoParent(psNewHandleData)); ++#endif ++ ++#if defined(PVRSRV_DEBUG_HANDLE_LOCK) ++ psNewHandleData->psBase = psBase; ++#endif ++ ++ /* Return the new handle to the client */ ++ *phHandle = psNewHandleData->hHandle; ++ ++ return PVRSRV_OK; ++ ++ErrorReleaseHandle: ++ (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL); ++ ++ErrorFreeHandleData: ++ OSFreeMem(psNewHandleData); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVAllocHandle ++ @Description Allocate a handle ++ @Input psBase - pointer to handle base structure ++ pvData - pointer to resource to be associated with the handle ++ eType - the type of resource ++ pfnReleaseData - Function to release resource at handle release ++ time ++ @Output phHandle - points to new handle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, ++ PFN_HANDLE_RELEASE pfnReleaseData) ++{ ++ PVRSRV_ERROR eError; ++ ++ LockHandle(psBase); ++ eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData); ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVAllocHandleUnlocked ++ @Description Allocate a handle without acquiring/releasing the handle lock. ++ The function assumes you hold the lock when called. ++ @Input phHandle - location for new handle ++ pvData - pointer to resource to be associated with the handle ++ eType - the type of resource ++ pfnReleaseData - Function to release resource at handle release ++ time ++ @Output phHandle - points to new handle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, ++ PFN_HANDLE_RELEASE pfnReleaseData) ++{ ++ *phHandle = NULL; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pfnReleaseData != NULL, "pfnReleaseData"); ++ ++ return AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData); ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVAllocSubHandle ++ @Description Allocate a subhandle ++ @Input pvData - pointer to resource to be associated with the subhandle ++ eType - the type of resource ++ hParent - parent handle ++ @Output phHandle - points to new subhandle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, ++ IMG_HANDLE hParent) ++{ ++ PVRSRV_ERROR eError; ++ ++ LockHandle(psBase); ++ eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent); ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVAllocSubHandleUnlocked ++ @Description Allocate a subhandle without acquiring/releasing the handle ++ lock. The function assumes you hold the lock when called. ++ @Input pvData - pointer to resource to be associated with the subhandle ++ eType - the type of resource ++ hParent - parent handle ++ @Output phHandle - points to new subhandle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType, ++ PVRSRV_HANDLE_ALLOC_FLAG eFlag, ++ IMG_HANDLE hParent) ++{ ++ HANDLE_DATA *psPHandleData = NULL; ++ HANDLE_DATA *psCHandleData = NULL; ++ IMG_HANDLE hParentKey; ++ IMG_HANDLE hHandle; ++ PVRSRV_ERROR eError; ++ ++ *phHandle = NULL; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psBase, eError, Exit); ++ ++ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL; ++ ++ /* Lookup the parent handle */ ++ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", ++ Exit); ++ ++ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL); ++ PVR_GOTO_IF_ERROR(eError, Exit); ++ ++ eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); ++ /* If we were able to allocate the handle then there should be no reason why we ++ * can't also get it's handle structure. Otherwise something has gone badly wrong. ++ */ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "Failed to get parent handle structure", ++ ExitFreeHandle); ++ ++ /* ++ * Get the parent handle structure again, in case the handle ++ * structure has moved (depending on the implementation ++ * of AllocHandle). ++ */ ++ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to get parent handle structure", ++ ExitFreeHandle); ++ ++ eError = AdoptChild(psBase, psPHandleData, psCHandleData); ++ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "parent handle failed to adopt subhandle", ++ ExitFreeHandle); ++ ++ *phHandle = hHandle; ++ ++ return PVRSRV_OK; ++ ++ExitFreeHandle: ++ PVRSRVDestroyHandleUnlocked(psBase, hHandle, eType); ++Exit: ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVFindHandle ++ @Description Find handle corresponding to a resource pointer ++ @Input pvData - pointer to resource to be associated with the handle ++ eType - the type of resource ++ @Output phHandle - points to returned handle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ PVRSRV_ERROR eError; ++ ++ LockHandle(psBase); ++ eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType); ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVFindHandleUnlocked ++ @Description Find handle corresponding to a resource pointer without ++ acquiring/releasing the handle lock. The function assumes you ++ hold the lock when called. ++ @Input pvData - pointer to resource to be associated with the handle ++ eType - the type of resource ++ @Output phHandle - points to the returned handle ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ IMG_HANDLE hHandle; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ ++ /* See if there is a handle for this data pointer */ ++ hHandle = FindHandle(psBase, pvData, eType, NULL); ++ if (hHandle == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error finding handle. Type %u", ++ __func__, ++ eType)); ++ ++ return PVRSRV_ERROR_HANDLE_NOT_FOUND; ++ } ++ ++ *phHandle = hHandle; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVLookupHandle ++ @Description Lookup the data pointer corresponding to a handle ++ @Input hHandle - handle from client ++ eType - handle type ++ bRef - If TRUE, a reference will be added on the handle if the ++ lookup is successful. ++ @Output ppvData - points to the return data pointer ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, ++ void **ppvData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType, ++ IMG_BOOL bRef) ++{ ++ PVRSRV_ERROR eError; ++ ++ LockHandle(psBase); ++ eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef); ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVLookupHandleUnlocked ++ @Description Lookup the data pointer corresponding to a handle without ++ acquiring/releasing the handle lock. The function assumes you ++ hold the lock when called. ++ @Input hHandle - handle from client ++ eType - handle type ++ bRef - If TRUE, a reference will be added on the handle if the ++ lookup is successful. ++ @Output ppvData - points to the returned data pointer ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ void **ppvData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType, ++ IMG_BOOL bRef) ++{ ++ HANDLE_DATA *psHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ ++ eError = GetHandleData(psBase, &psHandleData, hHandle, eType); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error looking up handle (%s) for base %p of type %s. Handle %p, type %s", ++ __func__, ++ PVRSRVGetErrorString(eError), ++ psBase, ++ HandleBaseTypeToString(psBase->eType), ++ (void*) hHandle, ++ HandleTypeToString(eType))); ++#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF) ++ OSDumpStack(); ++#endif ++ return eError; ++ } ++ ++ /* If bCanLookup is false it means that a destroy operation was already ++ * called on this handle; therefore it can no longer be looked up. */ ++ if (!psHandleData->bCanLookup) ++ { ++ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; ++ } ++ ++ if (bRef) ++ { ++ HandleGet(psHandleData); ++ } ++ ++ *ppvData = psHandleData->pvData; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVLookupSubHandle ++ @Description Lookup the data pointer corresponding to a subhandle ++ @Input hHandle - handle from client ++ eType - handle type ++ hAncestor - ancestor handle ++ @Output ppvData - points to the returned data pointer ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, ++ void **ppvData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType, ++ IMG_HANDLE hAncestor) ++{ ++ HANDLE_DATA *psPHandleData = NULL; ++ HANDLE_DATA *psCHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ ++ LockHandle(psBase); ++ ++ eError = GetHandleData(psBase, &psCHandleData, hHandle, eType); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error looking up subhandle (%s). Handle %p, type %u", ++ __func__, ++ PVRSRVGetErrorString(eError), ++ (void*) hHandle, ++ eType)); ++ OSDumpStack(); ++ goto ExitUnlock; ++ } ++ ++ /* Look for hAncestor among the handle's ancestors */ ++ for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; ) ++ { ++ eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "GetHandleData"); ++ eError = PVRSRV_ERROR_INVALID_SUBHANDLE; ++ goto ExitUnlock; ++ } ++ } ++ ++ *ppvData = psCHandleData->pvData; ++ ++ eError = PVRSRV_OK; ++ ++ExitUnlock: ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVReleaseHandle ++ @Description Release a handle that is no longer needed ++ @Input hHandle - handle from client ++ eType - handle type ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++void PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ LockHandle(psBase); ++ PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType); ++ UnlockHandle(psBase); ++} ++ ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVReleaseHandleUnlocked ++ @Description Release a handle that is no longer needed without ++ acquiring/releasing the handle lock. The function assumes you ++ hold the lock when called. ++ @Input hHandle - handle from client ++ eType - handle type ++******************************************************************************/ ++void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ HANDLE_DATA *psHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */ ++ PVR_ASSERT(psBase != NULL); ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE(psBase != NULL, "invalid psBase"); ++ ++ eError = GetHandleData(psBase, &psHandleData, hHandle, eType); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) looking up handle %p of type %s " ++ "for base %p of type %s.", __func__, PVRSRVGetErrorString(eError), ++ (void*) hHandle, HandleTypeToString(eType), psBase, ++ HandleBaseTypeToString(psBase->eType))); ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ return; ++ } ++ ++ PVR_ASSERT(psHandleData->bCanLookup); ++ PVR_ASSERT(psHandleData->iLookupCount > 0); ++ ++ /* If there are still outstanding lookups for this handle or the handle ++ * has not been destroyed yet, return early */ ++ HandlePut(psHandleData); ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVPurgeHandles ++ @Description Purge handles for a given handle base ++ @Input psBase - pointer to handle base structure ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ ++ LockHandle(psBase); ++ eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase); ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFreeWrapper(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle) ++{ ++ HANDLE_DATA *psHandleData; ++ PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ return HandleUnrefAndMaybeMarkForFree(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); ++} ++ ++static PVRSRV_ERROR HandleUnrefAndMaybeMarkForFree(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* If bCanLookup is false it means that the destructor was called more than ++ * once on this handle. */ ++ if (!psHandleData->bCanLookup) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Handle %p of type %s already freed.", ++ __func__, psHandleData->hHandle, ++ HandleTypeToString(psHandleData->eType))); ++ return PVRSRV_ERROR_HANDLE_NOT_FOUND; ++ } ++ ++ if (psHandleData->iLookupCount > 0) ++ { ++ return PVRSRV_ERROR_OBJECT_STILL_REFERENCED; ++ } ++ ++ /* Mark this handle as freed only if it's no longer referenced by any ++ * lookup. The user space should retry freeing this handle once there are ++ * no outstanding lookups. */ ++ psHandleData->bCanLookup = IMG_FALSE; ++ ++#ifdef DEBUG_REFCNT ++ PVR_DPF((PVR_DBG_ERROR, "%s: bCanLookup = false, iLookupCount = %d", __func__, ++ psHandleData->iLookupCount)); ++#endif /* DEBUG_REFCNT */ ++ ++ /* Prepare children for destruction */ ++ eError = IterateOverChildren(psBase, psHandleData, ++ HandleUnrefAndMaybeMarkForFreeWrapper); ++ PVR_LOG_RETURN_IF_ERROR(eError, "HandleUnrefAndMaybeMarkForFreeWrapper"); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR HandleFreePrivDataWrapper(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle) ++{ ++ HANDLE_DATA *psHandleData; ++ PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ return HandleFreePrivData(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); ++} ++ ++static PVRSRV_ERROR HandleFreePrivData(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Call the release data callback for each reference on the handle */ ++ if (psHandleData->pfnReleaseData != NULL) ++ { ++ eError = psHandleData->pfnReleaseData(psHandleData->pvData); ++ if (eError != PVRSRV_OK) ++ { ++ if (IsRetryError(eError)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release " ++ "data callback for handle %p of type = %s", __func__, ++ hHandle, HandleTypeToString(psHandleData->eType))); ++ } ++ else ++ { ++ PVR_LOG_ERROR(eError, "pfnReleaseData"); ++ } ++ ++ return eError; ++ } ++ ++ /* we don't need this so make sure it's not called on ++ * the pvData for the second time ++ */ ++ psHandleData->pfnReleaseData = NULL; ++ } ++ ++ /* Free children's data */ ++ eError = IterateOverChildren(psBase, psHandleData, ++ HandleFreePrivDataWrapper); ++ PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreePrivData"); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR HandleFreeDestroyWrapper(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle) ++{ ++ HANDLE_DATA *psHandleData; ++ PVRSRV_ERROR eError = GetHandleData(psBase, &psHandleData, hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ return HandleFreeDestroy(psBase, psHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE); ++} ++ ++static PVRSRV_ERROR HandleFreeDestroy(PVRSRV_HANDLE_BASE *psBase, ++ HANDLE_DATA *psHandleData, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ HANDLE_DATA *psReleasedHandleData; ++ PVRSRV_ERROR eError; ++ ++ eError = UnlinkFromParent(psBase, psHandleData); ++ PVR_LOG_RETURN_IF_ERROR(eError, "UnlinkFromParent"); ++ ++ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) ++ { ++ HAND_KEY aKey; ++ IMG_HANDLE hRemovedHandle; ++ ++ InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ++ ParentIfPrivate(psHandleData)); ++ ++ hRemovedHandle = (IMG_HANDLE) HASH_Remove_Extended(psBase->psHashTab, ++ aKey); ++ ++ PVR_ASSERT(hRemovedHandle != NULL); ++ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); ++ PVR_UNREFERENCED_PARAMETER(hRemovedHandle); ++ } ++ ++ /* Free children */ ++ eError = IterateOverChildren(psBase, psHandleData, HandleFreeDestroyWrapper); ++ PVR_LOG_RETURN_IF_ERROR(eError, "IterateOverChildren->HandleFreeDestroy"); ++ ++ eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, ++ psHandleData->hHandle, ++ (void **)&psReleasedHandleData); ++ OSFreeMem(psHandleData); ++ PVR_LOG_RETURN_IF_ERROR(eError, "pfnReleaseHandle"); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR DestroyHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType, ++ IMG_BOOL bReleaseLock) ++{ ++ PVRSRV_ERROR eError; ++ HANDLE_DATA *psHandleData = NULL; ++ ++ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE); ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ ++ eError = GetHandleData(psBase, &psHandleData, hHandle, eType); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ eError = HandleUnrefAndMaybeMarkForFree(psBase, psHandleData, hHandle, eType); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ if (bReleaseLock) ++ { ++ UnlockHandle(psBase); ++ } ++ ++ eError = HandleFreePrivData(psBase, psHandleData, hHandle, eType); ++ if (eError != PVRSRV_OK) ++ { ++ if (bReleaseLock) ++ { ++ LockHandle(psBase); ++ } ++ ++ /* If the data could not be freed due to a temporary condition the ++ * handle must be kept alive so that the next destroy call can try again */ ++ if (IsRetryError(eError)) ++ { ++ psHandleData->bCanLookup = IMG_TRUE; ++ } ++ ++ return eError; ++ } ++ ++ if (bReleaseLock) ++ { ++ LockHandle(psBase); ++ } ++ ++ return HandleFreeDestroy(psBase, psHandleData, hHandle, eType); ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVDestroyHandle ++ @Description Destroys a handle that is no longer needed. Will ++ acquiring the handle lock for duration of the call. ++ Can return RETRY or KERNEL_CCB_FULL if resource could not be ++ destroyed, caller should retry sometime later. ++ @Input psBase - pointer to handle base structure ++ hHandle - handle from client ++ eType - handle type ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDestroyHandle(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ PVRSRV_ERROR eError; ++ ++ LockHandle(psBase); ++ eError = DestroyHandle(psBase, hHandle, eType, IMG_FALSE); ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVDestroyHandleUnlocked ++ @Description Destroys a handle that is no longer needed without ++ acquiring/releasing the handle lock. The function assumes you ++ hold the lock when called. ++ Can return RETRY or KERNEL_CCB_FULL if resource could not be ++ destroyed, caller should retry sometime later. ++ @Input psBase - pointer to handle base structure ++ hHandle - handle from client ++ eType - handle type ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDestroyHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ return DestroyHandle(psBase, hHandle, eType, IMG_FALSE); ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVDestroyHandleStagedUnlocked ++ @Description Destroys a handle that is no longer needed without ++ acquiring/releasing the handle lock. The function assumes you ++ hold the lock when called. This function, unlike ++ PVRSRVDestroyHandleUnlocked(), releases the handle lock while ++ destroying handle private data. This is done to open the ++ bridge for other bridge calls. ++ Can return RETRY or KERNEL_CCB_FULL if resource could not be ++ destroyed, caller should retry sometime later. ++ @Input psBase - pointer to handle base structure ++ hHandle - handle from client ++ eType - handle type ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDestroyHandleStagedUnlocked(PVRSRV_HANDLE_BASE *psBase, ++ IMG_HANDLE hHandle, ++ PVRSRV_HANDLE_TYPE eType) ++{ ++ return DestroyHandle(psBase, hHandle, eType, IMG_TRUE); ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVAllocHandleBase ++ @Description Allocate a handle base structure for a process ++ @Input eType - handle type ++ @Output ppsBase - points to handle base structure pointer ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, ++ PVRSRV_HANDLE_BASE_TYPE eType) ++{ ++ PVRSRV_HANDLE_BASE *psBase; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_FALSE(gpsHandleFuncs != NULL, "handle management not initialised", ++ PVRSRV_ERROR_NOT_READY); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsBase != NULL, "ppsBase"); ++ ++ psBase = OSAllocZMem(sizeof(*psBase)); ++ PVR_LOG_RETURN_IF_NOMEM(psBase, "psBase"); ++ ++ eError = OSLockCreate(&psBase->hLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", ErrorFreeHandleBase); ++ ++ psBase->eType = eType; ++ ++ LockHandle(psBase); ++ ++ eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase); ++ PVR_GOTO_IF_ERROR(eError, ErrorUnlock); ++ ++ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE, ++ sizeof(HAND_KEY), ++ HASH_Func_Default, ++ HASH_Key_Comp_Default); ++ PVR_LOG_GOTO_IF_FALSE(psBase->psHashTab != NULL, "couldn't create data pointer" ++ " hash table", ErrorDestroyHandleBase); ++ ++ *ppsBase = psBase; ++ ++ UnlockHandle(psBase); ++ ++ return PVRSRV_OK; ++ ++ErrorDestroyHandleBase: ++ (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); ++ ++ErrorUnlock: ++ UnlockHandle(psBase); ++ OSLockDestroy(psBase->hLock); ++ ++ErrorFreeHandleBase: ++ OSFreeMem(psBase); ++ ++ return eError; ++} ++ ++#if defined(DEBUG) ++typedef struct _COUNT_HANDLE_DATA_ ++{ ++ PVRSRV_HANDLE_BASE *psBase; ++ IMG_UINT32 uiHandleDataCount; ++} COUNT_HANDLE_DATA; ++ ++/* Used to count the number of handles that have data associated with them */ ++static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) ++{ ++ COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData; ++ HANDLE_DATA *psHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); ++ ++ eError = GetHandleData(psData->psBase, ++ &psHandleData, ++ hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); ++ ++ if (psHandleData != NULL) ++ { ++ psData->uiHandleDataCount++; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* Print a handle in the handle base. Used with the iterator callback. */ ++static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData) ++{ ++ PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData; ++ HANDLE_DATA *psHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBase != NULL, "psBase"); ++ ++ eError = GetHandleData(psBase, ++ &psHandleData, ++ hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); ++ ++ if (psHandleData != NULL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ " Handle: %6u, CanLookup: %u, LookupCount: %3u, Type: %s (%u), pvData<%p>", ++ (IMG_UINT32) (uintptr_t) psHandleData->hHandle, psHandleData->bCanLookup, ++ psHandleData->iLookupCount, HandleTypeToString(psHandleData->eType), ++ psHandleData->eType, psHandleData->pvData)); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++#endif /* defined(DEBUG) */ ++ ++static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime) ++{ ++ /* unsigned arithmetic is well defined so this will wrap around correctly */ ++ return (IMG_BOOL)((OSClockns64() - ui64TimeStart) >= ui64MaxBridgeTime); ++} ++ ++static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData) ++{ ++ FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; ++ HANDLE_DATA *psKernelHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ /* Get kernel handle data. */ ++ eError = GetHandleData(KERNEL_HANDLE_BASE, ++ &psKernelHandleData, ++ hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); ++ ++ if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData) ++ { ++ /* This kernel handle belongs to our process handle. */ ++ psData->hKernelHandle = hHandle; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData) ++{ ++ FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ /* Get process handle data. */ ++ eError = GetHandleData(psData->psBase, ++ &psData->psProcessHandleData, ++ hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); ++ ++ if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI ++#if defined(SUPPORT_INSECURE_EXPORT) ++ || psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT ++#endif ++ ) ++ { ++ /* Only multi alloc process handles might be in kernel handle base. */ ++ psData->hKernelHandle = NULL; ++ /* Iterate over kernel handles. */ ++ eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase, ++ &FreeKernelHandlesWrapperIterKernel, ++ (void *)psData); ++ PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "failed to iterate over kernel handles", ++ eError); ++ ++ if (psData->hKernelHandle) ++ { ++ /* Release kernel handle which belongs to our process handle. */ ++ eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase, ++ psData->hKernelHandle, ++ NULL); ++ PVR_LOG_RETURN_IF_FALSE(eError == PVRSRV_OK, "couldn't release kernel handle", ++ eError); ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData) ++{ ++ FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData; ++ HANDLE_DATA *psHandleData = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psData != NULL, "psData"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psData->psBase != NULL, "psData->psBase"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psData->eHandleFreeType != PVRSRV_HANDLE_TYPE_NONE, ++ "psData->eHandleFreeType"); ++ ++ eError = GetHandleData(psData->psBase, ++ &psHandleData, ++ hHandle, ++ PVRSRV_HANDLE_TYPE_NONE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "GetHandleData"); ++ ++ if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_ASSERT(psHandleData->bCanLookup && psHandleData->iLookupCount == 0); ++ ++ if (psHandleData->bCanLookup) ++ { ++ if (psHandleData->pfnReleaseData != NULL) ++ { ++ eError = psHandleData->pfnReleaseData(psHandleData->pvData); ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Got retry while calling release " ++ "data callback for handle %p of type = %s", __func__, ++ hHandle, HandleTypeToString(psHandleData->eType))); ++ ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ } ++ ++ psHandleData->bCanLookup = IMG_FALSE; ++ } ++ ++ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI)) ++ { ++ HAND_KEY aKey; ++ IMG_HANDLE hRemovedHandle; ++ ++ InitKey(aKey, ++ psData->psBase, ++ psHandleData->pvData, ++ psHandleData->eType, ++ ParentIfPrivate(psHandleData)); ++ ++ hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey); ++ ++ PVR_ASSERT(hRemovedHandle != NULL); ++ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle); ++ PVR_UNREFERENCED_PARAMETER(hRemovedHandle); ++ } ++ ++ eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ OSFreeMem(psHandleData); ++ ++ /* If we reach the end of the time slice release we can release the global ++ * lock, invoke the scheduler and reacquire the lock */ ++ if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")", ++ __func__, ++ psData->ui64MaxBridgeTime)); ++ UnlockHandle(psData->psBase); ++ /* Invoke the scheduler to check if other processes are waiting for the lock */ ++ OSReleaseThreadQuanta(); ++ LockHandle(psData->psBase); ++ /* Set again lock timeout and reset the counter */ ++ psData->ui64TimeStart = OSClockns64(); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Lock acquired again", __func__)); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* The Ordered Array of PVRSRV_HANDLE_TYPE Enum Entries. ++ * ++ * Some handles must be destroyed prior to other handles, ++ * such relationships are established with respect to handle types. ++ * Therefore elements of this array have to maintain specific order, ++ * e.g. the PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET must be placed ++ * before PVRSRV_HANDLE_TYPE_RGX_FREELIST. ++ * ++ * If ordering is incorrect driver may fail on the ground of cleanup ++ * routines. Unfortunately, we can mainly rely on the actual definition of ++ * the array, there is no explicit information about all relationships ++ * between handle types. These relationships do not necessarily come from ++ * bridge-specified handle attributes such as 'sub handle' and 'parent ++ * handle'. They may come from internal/private ref-counters contained by ++ * objects referenced by our kernel handles. ++ * ++ * For example, at the bridge level, PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET ++ * and PVRSRV_HANDLE_TYPE_RGX_FREELIST have no explicit relationship, meaning ++ * none of them is a sub-handle for the other. ++ * However the freelist contains internal ref-count that is decremented by ++ * the destroy routine for KM_HW_RT_DATASET. ++ * ++ * BE CAREFUL when adding/deleting/moving handle types. ++ */ ++static const PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] = ++{ ++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, ++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, ++ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC, ++ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST, ++ PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK, ++ PVRSRV_HANDLE_TYPE_RGX_POPULATION, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, ++#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION) ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_GPUMAP_CONTEXT, ++#endif ++ PVRSRV_HANDLE_TYPE_RI_HANDLE, ++ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER, ++ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT, ++ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, ++ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE, ++ PVRSRV_HANDLE_TYPE_DC_BUFFER, ++ PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT, ++ PVRSRV_HANDLE_TYPE_DC_DEVICE, ++ PVRSRV_HANDLE_TYPE_PVR_TL_SD, ++ PVRSRV_HANDLE_TYPE_DI_CONTEXT, ++ PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP ++}; ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVFreeKernelHandles ++ @Description Free kernel handles which belongs to process handles ++ @Input psBase - pointer to handle base structure ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase) ++{ ++ FREE_KERNEL_HANDLE_DATA sHandleData = {NULL}; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ LockHandle(psBase); ++ ++ sHandleData.psBase = psBase; ++ /* Iterate over process handles. */ ++ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, ++ &FreeKernelHandlesWrapperIterProcess, ++ (void *)&sHandleData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); ++ ++ eError = PVRSRV_OK; ++ ++ExitUnlock: ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRetrieveProcessHandleBase ++ @Description Returns a pointer to the process handle base for the current ++ process. If the current process is the cleanup thread, then the ++ process handle base for the process currently being cleaned up ++ is returned ++ @Return Pointer to the process handle base, or NULL if not found. ++******************************************************************************/ ++PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void) ++{ ++ PVRSRV_HANDLE_BASE *psHandleBase = NULL; ++ PROCESS_HANDLE_BASE *psProcHandleBase = NULL; ++ IMG_PID ui32PurgePid = PVRSRVGetPurgeConnectionPid(); ++ IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid(); ++ uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid(); ++ ++ OSLockAcquire(g_hProcessHandleBaseLock); ++ ++ /* Check to see if we're being called from the cleanup thread... */ ++ if ((OSGetCurrentProcessID() == uiCleanupPid) && ++ (OSGetCurrentThreadID() == uiCleanupTid) && ++ (ui32PurgePid > 0)) ++ { ++ /* Check to see if the cleanup thread has already removed the ++ * process handle base from the HASH table. ++ */ ++ psHandleBase = g_psProcessHandleBaseBeingFreed; ++ /* psHandleBase shouldn't be null, as cleanup thread ++ * should be removing this from the HASH table before ++ * we get here, so assert if not. ++ */ ++ PVR_ASSERT(psHandleBase); ++ } ++ else ++ { ++ /* Not being called from the cleanup thread, so return the process ++ * handle base for the current process. ++ */ ++ psProcHandleBase = (PROCESS_HANDLE_BASE *) ++ HASH_Retrieve(g_psProcessHandleBaseTable, OSGetCurrentClientProcessIDKM()); ++ } ++ ++ OSLockRelease(g_hProcessHandleBaseLock); ++ ++ if (psHandleBase == NULL && psProcHandleBase != NULL) ++ { ++ psHandleBase = psProcHandleBase->psHandleBase; ++ } ++ return psHandleBase; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVAcquireProcessHandleBase ++ @Description Increments reference count on a process handle base identified ++ by uiPid and returns pointer to the base. If the handle base ++ does not exist it will be allocated. ++ @Inout uiPid - PID of a process ++ @Output ppsBase - pointer to a handle base for the process identified by ++ uiPid ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase) ++{ ++ PROCESS_HANDLE_BASE *psBase; ++ PVRSRV_ERROR eError; ++ ++ OSLockAcquire(g_hProcessHandleBaseLock); ++ ++ psBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(g_psProcessHandleBaseTable, uiPid); ++ ++ /* In case there is none we are going to allocate one */ ++ if (psBase == NULL) ++ { ++ IMG_BOOL bSuccess; ++ ++ psBase = OSAllocZMem(sizeof(*psBase)); ++ PVR_LOG_GOTO_IF_NOMEM(psBase, eError, ErrorUnlock); ++ ++ /* Allocate handle base for this process */ ++ eError = PVRSRVAllocHandleBase(&psBase->psHandleBase, PVRSRV_HANDLE_BASE_TYPE_PROCESS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorFreeProcessHandleBase); ++ ++ /* Insert the handle base into the global hash table */ ++ bSuccess = HASH_Insert(g_psProcessHandleBaseTable, uiPid, (uintptr_t) psBase); ++ PVR_LOG_GOTO_IF_FALSE(bSuccess, "HASH_Insert failed", ErrorFreeHandleBase); ++ } ++ ++ OSAtomicIncrement(&psBase->iRefCount); ++ ++ OSLockRelease(g_hProcessHandleBaseLock); ++ ++ *ppsBase = psBase; ++ ++ return PVRSRV_OK; ++ ++ErrorFreeHandleBase: ++ PVRSRVFreeHandleBase(psBase->psHandleBase, 0); ++ErrorFreeProcessHandleBase: ++ OSFreeMem(psBase); ++ErrorUnlock: ++ OSLockRelease(g_hProcessHandleBaseLock); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVReleaseProcessHandleBase ++ @Description Decrements reference count on a process handle base psBase ++ for a process identified by uiPid. If the reference count ++ reaches 0 the handle base will be freed.. ++ @Input psBase - pointer to a process handle base ++ @Inout uiPid - PID of a process ++ @Inout ui64MaxBridgeTime - maximum time a handle destroy operation ++ can hold the handle base lock (after that ++ time a lock will be release and reacquired ++ for another time slice) ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, ++ IMG_UINT64 ui64MaxBridgeTime) ++{ ++ PVRSRV_ERROR eError; ++ IMG_INT iRefCount; ++ uintptr_t uiHashValue; ++ ++ OSLockAcquire(g_hProcessHandleBaseLock); ++ ++ iRefCount = OSAtomicDecrement(&psBase->iRefCount); ++ ++ if (iRefCount != 0) ++ { ++ OSLockRelease(g_hProcessHandleBaseLock); ++ return PVRSRV_OK; ++ } ++ ++ /* in case the refcount becomes 0 we can remove the process handle base ++ * and all related objects */ ++ ++ uiHashValue = HASH_Remove(g_psProcessHandleBaseTable, uiPid); ++ OSLockRelease(g_hProcessHandleBaseLock); ++ ++ PVR_LOG_RETURN_IF_FALSE(uiHashValue != 0, "HASH_Remove failed", ++ PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE); ++ ++ eError = PVRSRVFreeKernelHandles(psBase->psHandleBase); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeKernelHandles"); ++ ++ eError = PVRSRVFreeHandleBase(psBase->psHandleBase, ui64MaxBridgeTime); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVFreeHandleBase"); ++ ++ OSFreeMem(psBase); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVFreeHandleBase ++ @Description Free a handle base structure ++ @Input psBase - pointer to handle base structure ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime) ++{ ++#if defined(DEBUG) ++ COUNT_HANDLE_DATA sCountData = {NULL}; ++#endif ++ FREE_HANDLE_DATA sHandleData = {NULL}; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError; ++ IMG_PID uiCleanupPid = PVRSRVCleanupThreadGetPid(); ++ uintptr_t uiCleanupTid = PVRSRVCleanupThreadGetTid(); ++ ++ PVR_ASSERT(gpsHandleFuncs); ++ ++ LockHandle(psBase); ++ ++ /* If this is a process handle base being freed by the cleanup ++ * thread, store this in g_psProcessHandleBaseBeingFreed ++ */ ++ if ((OSGetCurrentProcessID() == uiCleanupPid) && ++ (OSGetCurrentThreadID() == uiCleanupTid) && ++ (psBase->eType == PVRSRV_HANDLE_BASE_TYPE_PROCESS)) ++ { ++ g_psProcessHandleBaseBeingFreed = psBase; ++ } ++ ++ sHandleData.psBase = psBase; ++ sHandleData.ui64TimeStart = OSClockns64(); ++ sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime; ++ ++ ++#if defined(DEBUG) ++ ++ sCountData.psBase = psBase; ++ ++ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, ++ &CountHandleDataWrapper, ++ (void *)&sCountData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "pfnIterateOverHandles", ExitUnlock); ++ ++ if (sCountData.uiHandleDataCount != 0) ++ { ++ IMG_BOOL bList = (IMG_BOOL)(sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM); ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: %u remaining handles in handle base 0x%p " ++ "(PVRSRV_HANDLE_BASE_TYPE %u).%s", ++ __func__, ++ sCountData.uiHandleDataCount, ++ psBase, ++ psBase->eType, ++ bList ? "": " Skipping details, too many items...")); ++ ++ if (bList) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------")); ++ (void) gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, ++ &ListHandlesInBase, ++ psBase); ++ PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing --------")); ++ } ++ } ++ ++#endif /* defined(DEBUG) */ ++ ++ /* ++ * As we're freeing handles based on type, make sure all ++ * handles have actually had their data freed to avoid ++ * resources being leaked ++ */ ++ for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++) ++ { ++ sHandleData.eHandleFreeType = g_aeOrderedFreeList[i]; ++ ++ /* Make sure all handles have been freed before destroying the handle base */ ++ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase, ++ &FreeHandleDataWrapper, ++ (void *)&sHandleData); ++ PVR_GOTO_IF_ERROR(eError, ExitUnlock); ++ } ++ ++ ++ if (psBase->psHashTab != NULL) ++ { ++ HASH_Delete(psBase->psHashTab); ++ } ++ ++ eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase); ++ PVR_GOTO_IF_ERROR(eError, ExitUnlock); ++ ++ UnlockHandle(psBase); ++ OSLockDestroy(psBase->hLock); ++ OSFreeMem(psBase); ++ ++ return eError; ++ ++ExitUnlock: ++ if ((OSGetCurrentProcessID() == uiCleanupPid) && ++ (OSGetCurrentThreadID() == uiCleanupTid)) ++ { ++ g_psProcessHandleBaseBeingFreed = NULL; ++ } ++ UnlockHandle(psBase); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVHandleInit ++ @Description Initialise handle management ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVHandleInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(gpsKernelHandleBase == NULL); ++ PVR_ASSERT(gpsHandleFuncs == NULL); ++ PVR_ASSERT(g_hProcessHandleBaseLock == NULL); ++ PVR_ASSERT(g_psProcessHandleBaseTable == NULL); ++ PVR_ASSERT(!gbLockInitialised); ++ ++ eError = OSLockCreate(&gKernelHandleLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate:1"); ++ ++ eError = OSLockCreate(&g_hProcessHandleBaseLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:2", ErrorHandleDeinit); ++ ++ gbLockInitialised = IMG_TRUE; ++ ++ eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVHandleGetFuncTable", ErrorHandleDeinit); ++ ++ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase, ++ PVRSRV_HANDLE_BASE_TYPE_GLOBAL); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVAllocHandleBase", ErrorHandleDeinit); ++ ++ g_psProcessHandleBaseTable = HASH_Create(HANDLE_PROC_HANDLE_HASH_INIT_SIZE); ++ PVR_LOG_GOTO_IF_NOMEM(g_psProcessHandleBaseTable, eError, ErrorHandleDeinit); ++ ++ eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase); ++ PVR_LOG_GOTO_IF_ERROR(eError, "pfnEnableHandlePurging", ErrorHandleDeinit); ++ ++ return PVRSRV_OK; ++ ++ErrorHandleDeinit: ++ (void) PVRSRVHandleDeInit(); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVHandleDeInit ++ @Description De-initialise handle management ++ @Return Error code or PVRSRV_OK ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVHandleDeInit(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (gpsHandleFuncs != NULL) ++ { ++ if (gpsKernelHandleBase != NULL) ++ { ++ eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */); ++ if (eError == PVRSRV_OK) ++ { ++ gpsKernelHandleBase = NULL; ++ } ++ else ++ { ++ PVR_LOG_ERROR(eError, "PVRSRVFreeHandleBase"); ++ } ++ } ++ ++ if (eError == PVRSRV_OK) ++ { ++ gpsHandleFuncs = NULL; ++ } ++ } ++ else ++ { ++ /* If we don't have a handle function table we shouldn't have a handle base either */ ++ PVR_ASSERT(gpsKernelHandleBase == NULL); ++ } ++ ++ if (g_psProcessHandleBaseTable != NULL) ++ { ++ HASH_Delete(g_psProcessHandleBaseTable); ++ g_psProcessHandleBaseTable = NULL; ++ } ++ ++ if (g_hProcessHandleBaseLock != NULL) ++ { ++ OSLockDestroy(g_hProcessHandleBaseLock); ++ g_hProcessHandleBaseLock = NULL; ++ } ++ ++ if (gKernelHandleLock != NULL) ++ { ++ OSLockDestroy(gKernelHandleLock); ++ gbLockInitialised = IMG_FALSE; ++ } ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/handle.h b/drivers/gpu/drm/img-rogue/handle.h +new file mode 100644 +index 000000000000..92946b6fbb36 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/handle.h +@@ -0,0 +1,206 @@ ++/**************************************************************************/ /*! ++@File ++@Title Handle Manager API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provide handle management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#if !defined(HANDLE_API_H) ++#define HANDLE_API_H ++ ++#include "lock_types.h" ++ ++/* ++ * Handle API ++ * ---------- ++ * The handle API is intended to provide handles for kernel resources, which ++ * can then be passed back to user space processes. ++ * ++ * The following functions comprise the API. Each function takes a pointer to ++ * a PVRSRV_HANDLE_BASE structure, one of which is allocated for each process, ++ * and stored in the per-process data area. Use KERNEL_HANDLE_BASE for handles ++ * not allocated for a particular process, or for handles that need to be ++ * allocated before the PVRSRV_HANDLE_BASE structure for the process is ++ * available. ++ * ++ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, ++ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, ++ * PVRSRV_HANDLE_ALLOC_FLAG eFlag); ++ * ++ * Allocate a handle phHandle, for the resource of type eType pointed to by ++ * pvData. ++ * ++ * For handles that have a definite lifetime, where the corresponding resource ++ * is explicitly created and destroyed, eFlag should be zero. ++ * ++ * If a particular resource may be referenced multiple times by a given ++ * process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI will allow multiple ++ * handles to be allocated for the resource. Such handles cannot be found with ++ * PVRSRVFindHandle. ++ * ++ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, ++ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, ++ * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); ++ * ++ * This function is similar to PVRSRVAllocHandle, except that the allocated ++ * handles are associated with a parent handle, hParent, that has been ++ * allocated previously. Subhandles are automatically deallocated when their ++ * parent handle is deallocated. ++ * Subhandles can be treated as ordinary handles. For example, they may have ++ * subhandles of their own, and may be explicitly deallocated using ++ * PVRSRVReleaseHandle (see below). ++ * ++ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, ++ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); ++ * ++ * Find the handle previously allocated for the resource pointed to by pvData, ++ * of type eType. Handles allocated with the flag ++ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this function. ++ * ++ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, ++ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++ * ++ * Given a handle for a resource of type eType, return the pointer to the ++ * resource. ++ * ++ * PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, ++ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, ++ * IMH_HANDLE hAncestor); ++ * ++ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant ++ * of hAncestor. ++ * ++ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, ++ * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++ * ++ * Deallocate a handle of given type. ++ * ++ * Return the parent of a handle in *phParent, or NULL if the handle has ++ * no parent. ++ */ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "hash.h" ++ ++typedef enum ++{ ++ #define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x, ++ #include "handle_types.h" ++ #undef HANDLETYPE ++} PVRSRV_HANDLE_TYPE; ++ ++static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero"); ++ ++typedef enum ++{ ++ PVRSRV_HANDLE_BASE_TYPE_CONNECTION, ++ PVRSRV_HANDLE_BASE_TYPE_PROCESS, ++ PVRSRV_HANDLE_BASE_TYPE_GLOBAL ++} PVRSRV_HANDLE_BASE_TYPE; ++ ++ ++typedef enum ++{ ++ /* No flags */ ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0, ++ /* Multiple handles can point at the given data pointer */ ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x01, ++ /* Subhandles are allocated in a private handle space */ ++ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02 ++} PVRSRV_HANDLE_ALLOC_FLAG; ++ ++typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE; ++ ++typedef struct _PROCESS_HANDLE_BASE_ ++{ ++ PVRSRV_HANDLE_BASE *psHandleBase; ++ ATOMIC_T iRefCount; ++} PROCESS_HANDLE_BASE; ++ ++extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase; ++#define KERNEL_HANDLE_BASE (gpsKernelHandleBase) ++ ++#define HANDLE_DEBUG_LISTING_MAX_NUM 20 ++ ++typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData); ++ ++PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); ++PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData); ++ ++PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); ++PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent); ++ ++PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); ++PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType); ++ ++PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); ++PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef); ++ ++PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor); ++ ++void PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++void PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++ ++PVRSRV_ERROR PVRSRVDestroyHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++PVRSRV_ERROR PVRSRVDestroyHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++PVRSRV_ERROR PVRSRVDestroyHandleStagedUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType); ++ ++PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase); ++ ++PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase, ++ PVRSRV_HANDLE_BASE_TYPE eType); ++ ++PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime); ++ ++PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase); ++ ++PVRSRV_ERROR PVRSRVHandleInit(void); ++ ++PVRSRV_ERROR PVRSRVHandleDeInit(void); ++ ++PVRSRV_HANDLE_BASE *PVRSRVRetrieveProcessHandleBase(void); ++ ++PVRSRV_ERROR PVRSRVAcquireProcessHandleBase(IMG_PID uiPid, PROCESS_HANDLE_BASE **ppsBase); ++PVRSRV_ERROR PVRSRVReleaseProcessHandleBase(PROCESS_HANDLE_BASE *psBase, IMG_PID uiPid, IMG_UINT64 ui64MaxBridgeTime); ++ ++void LockHandle(PVRSRV_HANDLE_BASE *psBase); ++void UnlockHandle(PVRSRV_HANDLE_BASE *psBase); ++ ++#endif /* !defined(HANDLE_API_H) */ +diff --git a/drivers/gpu/drm/img-rogue/handle_idr.c b/drivers/gpu/drm/img-rogue/handle_idr.c +new file mode 100644 +index 000000000000..c40e096bfaa5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/handle_idr.c +@@ -0,0 +1,440 @@ ++/*************************************************************************/ /*! ++@File ++@Title Resource Handle Manager - IDR Back-end ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provide IDR based resource handle management back-end ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "handle_impl.h" ++#include "allocmem.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++ ++#define ID_VALUE_MIN 1 ++#define ID_VALUE_MAX INT_MAX ++ ++#define ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i)) ++#define HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h)) ++ ++struct _HANDLE_IMPL_BASE_ ++{ ++ struct idr sIdr; ++ ++ IMG_UINT32 ui32MaxHandleValue; ++ ++ IMG_UINT32 ui32TotalHandCount; ++}; ++ ++typedef struct _HANDLE_ITER_DATA_WRAPPER_ ++{ ++ PFN_HANDLE_ITER pfnHandleIter; ++ void *pvHandleIterData; ++} HANDLE_ITER_DATA_WRAPPER; ++ ++ ++static int HandleIterFuncWrapper(int id, void *data, void *iter_data) ++{ ++ HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data; ++ ++ PVR_UNREFERENCED_PARAMETER(data); ++ ++ return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData); ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function AcquireHandle ++ ++ @Description Acquire a new handle ++ ++ @Input psBase - Pointer to handle base structure ++ phHandle - Points to a handle pointer ++ pvData - Pointer to resource to be associated with the handle ++ ++ @Output phHandle - Points to a handle pointer ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase, ++ IMG_HANDLE *phHandle, ++ void *pvData) ++{ ++ int id; ++ int result; ++ ++ PVR_ASSERT(psBase != NULL); ++ PVR_ASSERT(phHandle != NULL); ++ PVR_ASSERT(pvData != NULL); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) ++ idr_preload(GFP_KERNEL); ++ id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0); ++ idr_preload_end(); ++ ++ result = id; ++#else ++ do ++ { ++ if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id); ++ } while (result == -EAGAIN); ++ ++ if ((IMG_UINT32)id > psBase->ui32MaxHandleValue) ++ { ++ idr_remove(&psBase->sIdr, id); ++ result = -ENOSPC; ++ } ++#endif ++ ++ if (result < 0) ++ { ++ if (result == -ENOSPC) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached", ++ __func__, psBase->ui32MaxHandleValue)); ++ ++ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; ++ } ++ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psBase->ui32TotalHandCount++; ++ ++ *phHandle = ID_TO_HANDLE(id); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function ReleaseHandle ++ ++ @Description Release a handle that is no longer needed. ++ ++ @Input psBase - Pointer to handle base structure ++ hHandle - Handle to release ++ ppvData - Points to a void data pointer ++ ++ @Output ppvData - Points to a void data pointer ++ ++ @Return PVRSRV_OK or PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase, ++ IMG_HANDLE hHandle, ++ void **ppvData) ++{ ++ int id = HANDLE_TO_ID(hHandle); ++ void *pvData; ++ ++ PVR_ASSERT(psBase); ++ ++ /* Get the data associated with the handle. If we get back NULL then ++ it's an invalid handle */ ++ ++ pvData = idr_find(&psBase->sIdr, id); ++ if (likely(pvData)) ++ { ++ idr_remove(&psBase->sIdr, id); ++ psBase->ui32TotalHandCount--; ++ } ++ ++ if (unlikely(pvData == NULL)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)", ++ __func__, id, psBase->ui32TotalHandCount)); ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++ ++ if (ppvData) ++ { ++ *ppvData = pvData; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function GetHandleData ++ ++ @Description Get the data associated with the given handle ++ ++ @Input psBase - Pointer to handle base structure ++ hHandle - Handle from which data should be retrieved ++ ppvData - Points to a void data pointer ++ ++ @Output ppvData - Points to a void data pointer ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase, ++ IMG_HANDLE hHandle, ++ void **ppvData) ++{ ++ int id = HANDLE_TO_ID(hHandle); ++ void *pvData; ++ ++ PVR_ASSERT(psBase); ++ PVR_ASSERT(ppvData); ++ ++ pvData = idr_find(&psBase->sIdr, id); ++ if (likely(pvData)) ++ { ++ *ppvData = pvData; ++ ++ return PVRSRV_OK; ++ } ++ else ++ { ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function SetHandleData ++ ++ @Description Set the data associated with the given handle ++ ++ @Input psBase - Pointer to handle base structure ++ hHandle - Handle for which data should be changed ++ pvData - Pointer to new data to be associated with the handle ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase, ++ IMG_HANDLE hHandle, ++ void *pvData) ++{ ++ int id = HANDLE_TO_ID(hHandle); ++ void *pvOldData; ++ ++ PVR_ASSERT(psBase); ++ ++ pvOldData = idr_replace(&psBase->sIdr, pvData, id); ++ if (IS_ERR(pvOldData)) ++ { ++ if (PTR_ERR(pvOldData) == -ENOENT) ++ { ++ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED; ++ } ++ else ++ { ++ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE; ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData) ++{ ++ HANDLE_ITER_DATA_WRAPPER sIterData; ++ ++ PVR_ASSERT(psBase); ++ PVR_ASSERT(pfnHandleIter); ++ ++ sIterData.pfnHandleIter = pfnHandleIter; ++ sIterData.pvHandleIterData = pvHandleIterData; ++ ++ return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData); ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function EnableHandlePurging ++ ++ @Description Enable purging for a given handle base ++ ++ @Input psBase - pointer to handle base structure ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase) ++{ ++ PVR_UNREFERENCED_PARAMETER(psBase); ++ PVR_ASSERT(psBase); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function PurgeHandles ++ ++ @Description Purge handles for a given handle base ++ ++ @Input psBase - Pointer to handle base structure ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase) ++{ ++ PVR_UNREFERENCED_PARAMETER(psBase); ++ PVR_ASSERT(psBase); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function CreateHandleBase ++ ++ @Description Create a handle base structure ++ ++ @Input ppsBase - pointer to handle base structure pointer ++ ++ @Output ppsBase - points to handle base structure pointer ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase) ++{ ++ HANDLE_IMPL_BASE *psBase; ++ ++ PVR_ASSERT(ppsBase); ++ ++ psBase = OSAllocZMem(sizeof(*psBase)); ++ if (psBase == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", ++ __func__)); ++ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ idr_init(&psBase->sIdr); ++ ++ psBase->ui32MaxHandleValue = ID_VALUE_MAX; ++ psBase->ui32TotalHandCount = 0; ++ ++ *ppsBase = psBase; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function DestroyHandleBase ++ ++ @Description Destroy a handle base structure ++ ++ @Input psBase - pointer to handle base structure ++ ++ @Return Error code or PVRSRV_OK ++ ++******************************************************************************/ ++static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase) ++{ ++ PVR_ASSERT(psBase); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)) ++ idr_remove_all(&psBase->sIdr); ++#endif ++ ++ /* Finally destroy the idr */ ++ idr_destroy(&psBase->sIdr); ++ ++ OSFreeMem(psBase); ++ ++ return PVRSRV_OK; ++} ++ ++ ++static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab = ++{ ++ .pfnAcquireHandle = AcquireHandle, ++ .pfnReleaseHandle = ReleaseHandle, ++ .pfnGetHandleData = GetHandleData, ++ .pfnSetHandleData = SetHandleData, ++ .pfnIterateOverHandles = IterateOverHandles, ++ .pfnEnableHandlePurging = EnableHandlePurging, ++ .pfnPurgeHandles = PurgeHandles, ++ .pfnCreateHandleBase = CreateHandleBase, ++ .pfnDestroyHandleBase = DestroyHandleBase ++}; ++ ++PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs) ++{ ++ static IMG_BOOL bAcquired = IMG_FALSE; ++ ++ if (bAcquired) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired", ++ __func__)); ++ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ } ++ ++ if (ppsFuncs == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *ppsFuncs = &g_sHandleFuncTab; ++ ++ bAcquired = IMG_TRUE; ++ ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/handle_impl.h b/drivers/gpu/drm/img-rogue/handle_impl.h +new file mode 100644 +index 000000000000..94305979d130 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/handle_impl.h +@@ -0,0 +1,89 @@ ++/**************************************************************************/ /*! ++@File ++@Title Implementation Callbacks for Handle Manager API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the handle manager API. This file is for declarations ++ and definitions that are private/internal to the handle manager ++ API but need to be shared between the generic handle manager ++ code and the various handle manager backends, i.e. the code that ++ implements the various callbacks. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#if !defined(HANDLE_IMPL_H) ++#define HANDLE_IMPL_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE; ++ ++typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData); ++ ++typedef struct _HANDLE_IMPL_FUNCTAB_ ++{ ++ /* Acquire a new handle which is associated with the given data */ ++ PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData); ++ ++ /* Release the given handle (optionally returning the data associated with it) */ ++ PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); ++ ++ /* Get the data associated with the given handle */ ++ PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData); ++ ++ /* Set the data associated with the given handle */ ++ PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData); ++ ++ PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData); ++ ++ /* Enable handle purging on the given handle base */ ++ PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase); ++ ++ /* Purge handles on the given handle base */ ++ PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase); ++ ++ /* Create handle base */ ++ PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase); ++ ++ /* Destroy handle base */ ++ PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase); ++} HANDLE_IMPL_FUNCTAB; ++ ++PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs); ++ ++#endif /* !defined(HANDLE_IMPL_H) */ +diff --git a/drivers/gpu/drm/img-rogue/handle_types.h b/drivers/gpu/drm/img-rogue/handle_types.h +new file mode 100644 +index 000000000000..795e2061809f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/handle_types.h +@@ -0,0 +1,88 @@ ++/**************************************************************************/ /*! ++@File ++@Title Handle Manager handle types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provide handle management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++/* NOTE: Do not add include guards to this file */ ++ ++HANDLETYPE(NONE) ++HANDLETYPE(SHARED_EVENT_OBJECT) ++HANDLETYPE(EVENT_OBJECT_CONNECT) ++HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE) ++HANDLETYPE(PHYSMEM_PMR) ++HANDLETYPE(PHYSMEM_PMR_EXPORT) ++HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT) ++HANDLETYPE(DEVMEMINT_CTX) ++HANDLETYPE(DEVMEMINT_CTX_EXPORT) ++HANDLETYPE(DEVMEMINT_HEAP) ++HANDLETYPE(DEVMEMINT_RESERVATION) ++HANDLETYPE(DEVMEMINT_MAPPING) ++HANDLETYPE(RGX_FW_MEMDESC) ++HANDLETYPE(RGX_FREELIST) ++HANDLETYPE(RGX_MEMORY_BLOCK) ++HANDLETYPE(RGX_SERVER_RENDER_CONTEXT) ++HANDLETYPE(RGX_SERVER_TQ_CONTEXT) ++HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT) ++HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT) ++HANDLETYPE(RGX_SERVER_RAY_CONTEXT) ++HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT) ++#if defined(PVR_TESTING_UTILS) && defined(SUPPORT_VALIDATION) ++HANDLETYPE(RGX_SERVER_GPUMAP_CONTEXT) ++#endif ++HANDLETYPE(SYNC_PRIMITIVE_BLOCK) ++HANDLETYPE(SYNC_RECORD_HANDLE) ++HANDLETYPE(PVRSRV_TIMELINE_SERVER) ++HANDLETYPE(PVRSRV_FENCE_SERVER) ++HANDLETYPE(PVRSRV_FENCE_EXPORT) ++HANDLETYPE(RGX_KM_HW_RT_DATASET) ++HANDLETYPE(RGX_FWIF_ZSBUFFER) ++HANDLETYPE(RGX_POPULATION) ++HANDLETYPE(DC_DEVICE) ++HANDLETYPE(DC_DISPLAY_CONTEXT) ++HANDLETYPE(DC_BUFFER) ++HANDLETYPE(DC_PIN_HANDLE) ++HANDLETYPE(DEVMEM_MEM_IMPORT) ++HANDLETYPE(PHYSMEM_PMR_PAGELIST) ++HANDLETYPE(PVR_TL_SD) ++HANDLETYPE(RI_HANDLE) ++HANDLETYPE(DEV_PRIV_DATA) ++HANDLETYPE(MM_PLAT_CLEANUP) ++HANDLETYPE(WORKEST_RETURN_DATA) ++HANDLETYPE(DI_CONTEXT) +diff --git a/drivers/gpu/drm/img-rogue/hash.c b/drivers/gpu/drm/img-rogue/hash.c +new file mode 100644 +index 000000000000..994ae5871f5f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/hash.c +@@ -0,0 +1,734 @@ ++/*************************************************************************/ /*! ++@File ++@Title Self scaling hash tables. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description ++ Implements simple self scaling hash tables. Hash collisions are handled by ++ chaining entries together. Hash tables are increased in size when they ++ become more than (50%?) full and decreased in size when less than (25%?) ++ full. Hash tables are never decreased below their initial size. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* include/ */ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++ ++/* services/shared/include/ */ ++#include "hash.h" ++ ++/* services/client/include/ or services/server/include/ */ ++#include "osfunc_common.h" ++#include "allocmem.h" ++ ++//#define PERF_DBG_RESIZE ++#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) ++#include ++#endif ++ ++#if defined(__KERNEL__) ++#include "pvrsrv.h" ++#endif ++ ++#define KEY_TO_INDEX(pHash, key, uSize) \ ++ ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize)) ++ ++#define KEY_COMPARE(pHash, pKey1, pKey2) \ ++ ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2))) ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#define _AllocMem OSAllocMemNoStats ++#define _AllocZMem OSAllocZMemNoStats ++#define _FreeMem OSFreeMemNoStats ++#else ++#define _AllocMem OSAllocMem ++#define _AllocZMem OSAllocZMem ++#define _FreeMem OSFreeMem ++#endif ++ ++#define NO_SHRINK 0 ++ ++/* Each entry in a hash table is placed into a bucket */ ++typedef struct _BUCKET_ ++{ ++ struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */ ++ uintptr_t v; /*!< entry value */ ++ uintptr_t k[]; /* PRQA S 0642 */ ++ /* override dynamic array declaration warning */ ++} BUCKET; ++ ++struct _HASH_TABLE_ ++{ ++ IMG_UINT32 uSize; /*!< current size of the hash table */ ++ IMG_UINT32 uCount; /*!< number of entries currently in the hash table */ ++ IMG_UINT32 uMinimumSize; /*!< the minimum size that the hash table should be re-sized to */ ++ IMG_UINT32 uKeySize; /*!< size of key in bytes */ ++ IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */ ++ IMG_UINT32 uGrowThreshold; /*!< The threshold at which to trigger a grow */ ++ HASH_FUNC* pfnHashFunc; /*!< hash function */ ++ HASH_KEY_COMP* pfnKeyComp; /*!< key comparison function */ ++ BUCKET** ppBucketTable; /*!< the hash table array */ ++#if defined(DEBUG) ++ const char* pszFile; ++ unsigned int ui32LineNum; ++#endif ++}; ++ ++/*************************************************************************/ /*! ++@Function HASH_Func_Default ++@Description Hash function intended for hashing keys composed of uintptr_t ++ arrays. ++@Input uKeySize The size of the hash key, in bytes. ++@Input pKey A pointer to the key to hash. ++@Input uHashTabLen The length of the hash table. ++@Return The hash value. ++*/ /**************************************************************************/ ++IMG_INTERNAL IMG_UINT32 ++HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) ++{ ++ uintptr_t *p = (uintptr_t *)pKey; ++ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); ++ IMG_UINT32 ui; ++ IMG_UINT32 uHashKey = 0; ++ ++ PVR_UNREFERENCED_PARAMETER(uHashTabLen); ++ ++ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); ++ ++ for (ui = 0; ui < uKeyLen; ui++) ++ { ++ IMG_UINT32 uHashPart = (IMG_UINT32)*p++; ++ ++ uHashPart += (uHashPart << 12); ++ uHashPart ^= (uHashPart >> 22); ++ uHashPart += (uHashPart << 4); ++ uHashPart ^= (uHashPart >> 9); ++ uHashPart += (uHashPart << 10); ++ uHashPart ^= (uHashPart >> 2); ++ uHashPart += (uHashPart << 7); ++ uHashPart ^= (uHashPart >> 12); ++ ++ uHashKey += uHashPart; ++ } ++ ++ return uHashKey; ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Key_Comp_Default ++@Description Compares keys composed of uintptr_t arrays. ++@Input uKeySize The size of the hash key, in bytes. ++@Input pKey1 Pointer to first hash key to compare. ++@Input pKey2 Pointer to second hash key to compare. ++@Return IMG_TRUE - The keys match. ++ IMG_FALSE - The keys don't match. ++*/ /**************************************************************************/ ++IMG_INTERNAL IMG_BOOL ++HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2) ++{ ++ uintptr_t *p1 = (uintptr_t *)pKey1; ++ uintptr_t *p2 = (uintptr_t *)pKey2; ++ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t); ++ IMG_UINT32 ui; ++ ++ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0); ++ ++ for (ui = 0; ui < uKeyLen; ui++) ++ { ++ if (*p1++ != *p2++) ++ return IMG_FALSE; ++ } ++ ++ return IMG_TRUE; ++} ++ ++/*************************************************************************/ /*! ++@Function _ChainInsert ++@Description Insert a bucket into the appropriate hash table chain. ++@Input pBucket The bucket ++@Input ppBucketTable The hash table ++@Input uSize The size of the hash table ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static void ++_ChainInsert(HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize) ++{ ++ IMG_UINT32 uIndex; ++ ++ /* We assume that all parameters passed by the caller are valid. */ ++ PVR_ASSERT(pBucket != NULL); ++ PVR_ASSERT(ppBucketTable != NULL); ++ PVR_ASSERT(uSize != 0); ++ ++ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */ ++ pBucket->pNext = ppBucketTable[uIndex]; ++ ppBucketTable[uIndex] = pBucket; ++} ++ ++/*************************************************************************/ /*! ++@Function _Rehash ++@Description Iterate over every entry in an old hash table and rehash into ++ the new table. ++@Input ppOldTable The old hash table ++@Input uOldSize The size of the old hash table ++@Input ppNewTable The new hash table ++@Input uNewSize The size of the new hash table ++@Return None ++*/ /**************************************************************************/ ++static void ++_Rehash(HASH_TABLE *pHash, ++ BUCKET **ppOldTable, IMG_UINT32 uOldSize, ++ BUCKET **ppNewTable, IMG_UINT32 uNewSize) ++{ ++ IMG_UINT32 uIndex; ++ for (uIndex=0; uIndex< uOldSize; uIndex++) ++ { ++ BUCKET *pBucket; ++ pBucket = ppOldTable[uIndex]; ++ while (pBucket != NULL) ++ { ++ BUCKET *pNextBucket = pBucket->pNext; ++ _ChainInsert(pHash, pBucket, ppNewTable, uNewSize); ++ pBucket = pNextBucket; ++ } ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function _Resize ++@Description Attempt to resize a hash table, failure to allocate a new ++ larger hash table is not considered a hard failure. We simply ++ continue and allow the table to fill up, the effect is to ++ allow hash chains to become longer. ++@Input pHash Hash table to resize. ++@Input uNewSize Required table size. ++@Return IMG_TRUE Success ++ IMG_FALSE Failed ++*/ /**************************************************************************/ ++static IMG_BOOL ++_Resize(HASH_TABLE *pHash, IMG_UINT32 uNewSize) ++{ ++ BUCKET **ppNewTable; ++ IMG_UINT32 uiThreshold = uNewSize >> 2; ++#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) ++ struct timeval start, end; ++#endif ++ ++ if (uNewSize == pHash->uSize) ++ { ++ return IMG_TRUE; ++ } ++ ++#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) ++ gettimeofday(&start, NULL); ++#endif ++ ++ ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize); ++ if (ppNewTable == NULL) ++ { ++ return IMG_FALSE; ++ } ++ ++ _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize); ++ ++ _FreeMem(pHash->ppBucketTable); ++ ++#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE) ++ gettimeofday(&end, NULL); ++ if (start.tv_usec > end.tv_usec) ++ { ++ end.tv_usec = 1000000 - start.tv_usec + end.tv_usec; ++ } ++ else ++ { ++ end.tv_usec -= start.tv_usec; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec)); ++#endif ++ ++ /*not nulling pointer, being reassigned just below*/ ++ pHash->ppBucketTable = ppNewTable; ++ pHash->uSize = uNewSize; ++ ++ pHash->uGrowThreshold = uiThreshold * 3; ++ pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold; ++ ++ return IMG_TRUE; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function HASH_Create_Extended ++@Description Create a self scaling hash table, using the supplied key size, ++ and the supplied hash and key comparison functions. ++@Input uInitialLen Initial and minimum length of the hash table, ++ where the length refers to the number of entries ++ in the hash table, not its size in bytes. ++@Input uKeySize The size of the key, in bytes. ++@Input pfnHashFunc Pointer to hash function. ++@Input pfnKeyComp Pointer to key comparison function. ++@Return NULL or hash table handle. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++HASH_TABLE * HASH_Create_Extended_Int (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp) ++{ ++ HASH_TABLE *pHash; ++ ++ if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input parameters", __func__)); ++ return NULL; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: InitialSize=0x%x", __func__, uInitialLen)); ++ ++ pHash = _AllocMem(sizeof(HASH_TABLE)); ++ if (pHash == NULL) ++ { ++ return NULL; ++ } ++ ++ pHash->uCount = 0; ++ pHash->uSize = uInitialLen; ++ pHash->uMinimumSize = uInitialLen; ++ pHash->uKeySize = uKeySize; ++ pHash->uGrowThreshold = (uInitialLen >> 2) * 3; ++ pHash->uShrinkThreshold = NO_SHRINK; ++ pHash->pfnHashFunc = pfnHashFunc; ++ pHash->pfnKeyComp = pfnKeyComp; ++ ++ pHash->ppBucketTable = _AllocZMem(sizeof(BUCKET *) * pHash->uSize); ++ if (pHash->ppBucketTable == NULL) ++ { ++ _FreeMem(pHash); ++ /*not nulling pointer, out of scope*/ ++ return NULL; ++ } ++ ++ return pHash; ++} ++ ++#if defined(DEBUG) ++IMG_INTERNAL ++HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, ++ const char *file, const unsigned int line) ++{ ++ HASH_TABLE *hash; ++ hash = HASH_Create_Extended_Int(uInitialLen, uKeySize, ++ pfnHashFunc, pfnKeyComp); ++ if (hash) ++ { ++ hash->pszFile = file; ++ hash->ui32LineNum = line; ++ } ++ return hash; ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function HASH_Create ++@Description Create a self scaling hash table with a key consisting of a ++ single uintptr_t, and using the default hash and key ++ comparison functions. ++@Input uInitialLen Initial and minimum length of the hash table, ++ where the length refers to the number of entries ++ in the hash table, not its size in bytes. ++@Return NULL or hash table handle. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++HASH_TABLE * HASH_Create_Int (IMG_UINT32 uInitialLen) ++{ ++ return HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), ++ &HASH_Func_Default, &HASH_Key_Comp_Default); ++} ++ ++#if defined(DEBUG) ++IMG_INTERNAL ++HASH_TABLE * HASH_Create_Debug(IMG_UINT32 uInitialLen, const char *file, const unsigned int line) ++{ ++ HASH_TABLE *hash; ++ hash = HASH_Create_Extended_Int(uInitialLen, sizeof(uintptr_t), ++ &HASH_Func_Default, &HASH_Key_Comp_Default); ++ if (hash) ++ { ++ hash->pszFile = file; ++ hash->ui32LineNum = line; ++ } ++ return hash; ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function HASH_Delete_Extended ++@Description Delete a hash table created by HASH_Create_Extended or ++ HASH_Create. All entries in the table should have been removed ++ before calling this function. ++@Input pHash Hash table ++@Input bWarn Set false to suppress warnings in the case of ++ deletion with active entries. ++*/ /**************************************************************************/ ++IMG_INTERNAL void ++HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn) ++{ ++ IMG_BOOL bDoCheck = IMG_TRUE; ++#if defined(__KERNEL__) && !defined(__QNXNTO__) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ if (psPVRSRVData != NULL) ++ { ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ bDoCheck = IMG_FALSE; ++ } ++ } ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ else ++ { ++ bDoCheck = IMG_FALSE; ++ } ++#endif ++#endif ++ if (pHash != NULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "HASH_Delete")); ++ ++ if (bDoCheck) ++ { ++ PVR_ASSERT(pHash->uCount==0); ++ } ++ if (pHash->uCount != 0) ++ { ++ IMG_UINT32 i; ++ if (bWarn) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, pHash->uCount)); ++#if defined(DEBUG) ++ PVR_DPF ((PVR_DBG_ERROR, "%s: Hash %p created at %s:%u.", __func__, (uintptr_t*)pHash, pHash->pszFile, pHash->ui32LineNum)); ++#endif ++ } ++ ++ for (i = 0; i < pHash->uSize; i++) ++ { ++ BUCKET *pBucket = pHash->ppBucketTable[i]; ++ while (pBucket != NULL) ++ { ++ BUCKET *pNextBucket = pBucket->pNext; ++ _FreeMem(pBucket); ++ pBucket = pNextBucket; ++ } ++ } ++ ++ } ++ _FreeMem(pHash->ppBucketTable); ++ pHash->ppBucketTable = NULL; ++ _FreeMem(pHash); ++ /*not nulling pointer, copy on stack*/ ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Delete ++@Description Delete a hash table created by HASH_Create_Extended or ++ HASH_Create. All entries in the table must have been removed ++ before calling this function. ++@Input pHash Hash table ++*/ /**************************************************************************/ ++IMG_INTERNAL void ++HASH_Delete(HASH_TABLE *pHash) ++{ ++ HASH_Delete_Extended(pHash, IMG_TRUE); ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Insert_Extended ++@Description Insert a key value pair into a hash table created with ++ HASH_Create_Extended. ++@Input pHash The hash table. ++@Input pKey Pointer to the key. ++@Input v The value associated with the key. ++@Return IMG_TRUE - success. ++ IMG_FALSE - failure. ++*/ /**************************************************************************/ ++IMG_INTERNAL IMG_BOOL ++HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v) ++{ ++ BUCKET *pBucket; ++ ++ PVR_ASSERT(pHash != NULL); ++ ++ if (pHash == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter", __func__)); ++ return IMG_FALSE; ++ } ++ ++ pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize); ++ if (pBucket == NULL) ++ { ++ return IMG_FALSE; ++ } ++ ++ pBucket->v = v; ++ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/ ++ OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize); ++ ++ _ChainInsert(pHash, pBucket, pHash->ppBucketTable, pHash->uSize); ++ ++ pHash->uCount++; ++ ++ /* check if we need to think about re-balancing */ ++ if (pHash->uCount > pHash->uGrowThreshold) ++ { ++ /* Ignore the return code from _Resize because the hash table is ++ still in a valid state and although not ideally sized, it is still ++ functional */ ++ _Resize(pHash, pHash->uSize << 1); ++ } ++ ++ return IMG_TRUE; ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Insert ++@Description Insert a key value pair into a hash table created with ++ HASH_Create. ++@Input pHash The hash table. ++@Input k The key value. ++@Input v The value associated with the key. ++@Return IMG_TRUE - success. ++ IMG_FALSE - failure. ++*/ /**************************************************************************/ ++IMG_INTERNAL IMG_BOOL ++HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v) ++{ ++ return HASH_Insert_Extended(pHash, &k, v); ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Remove_Extended ++@Description Remove a key from a hash table created with ++ HASH_Create_Extended. ++@Input pHash The hash table. ++@Input pKey Pointer to key. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++IMG_INTERNAL uintptr_t ++HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey) ++{ ++ BUCKET **ppBucket; ++ IMG_UINT32 uIndex; ++ ++ PVR_ASSERT(pHash != NULL); ++ ++ if (pHash == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); ++ return 0; ++ } ++ ++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); ++ ++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) ++ { ++ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ ++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) ++ { ++ BUCKET *pBucket = *ppBucket; ++ uintptr_t v = pBucket->v; ++ (*ppBucket) = pBucket->pNext; ++ ++ _FreeMem(pBucket); ++ /*not nulling original pointer, already overwritten*/ ++ ++ pHash->uCount--; ++ ++ /* check if we need to think about re-balancing, when the shrink ++ * threshold is 0 we are at the minimum size, no further shrink */ ++ if (pHash->uCount < pHash->uShrinkThreshold) ++ { ++ /* Ignore the return code from _Resize because the ++ hash table is still in a valid state and although ++ not ideally sized, it is still functional */ ++ _Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize)); ++ } ++ ++ return v; ++ } ++ } ++ return 0; ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Remove ++@Description Remove a key value pair from a hash table created with ++ HASH_Create. ++@Input pHash The hash table. ++@Input pKey Pointer to key. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++IMG_INTERNAL uintptr_t ++HASH_Remove(HASH_TABLE *pHash, uintptr_t k) ++{ ++ return HASH_Remove_Extended(pHash, &k); ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Retrieve_Extended ++@Description Retrieve a value from a hash table created with ++ HASH_Create_Extended. ++@Input pHash The hash table. ++@Input pKey Pointer to key. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++IMG_INTERNAL uintptr_t ++HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey) ++{ ++ BUCKET **ppBucket; ++ IMG_UINT32 uIndex; ++ ++ PVR_ASSERT(pHash != NULL); ++ ++ if (pHash == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Null hash table", __func__)); ++ return 0; ++ } ++ ++ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize); ++ ++ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext)) ++ { ++ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */ ++ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey)) ++ { ++ BUCKET *pBucket = *ppBucket; ++ uintptr_t v = pBucket->v; ++ ++ return v; ++ } ++ } ++ return 0; ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Retrieve ++@Description Retrieve a value from a hash table created with HASH_Create. ++@Input pHash The hash table. ++@Input pKey Pointer to key. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++IMG_INTERNAL uintptr_t ++HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k) ++{ ++ return HASH_Retrieve_Extended(pHash, &k); ++} ++ ++/*************************************************************************/ /*! ++@Function HASH_Iterate ++@Description Iterate over every entry in the hash table. ++@Input pHash Hash table to iterate. ++@Input pfnCallback Callback to call with the key and data for each ++. entry in the hash table ++@Return Callback error if any, otherwise PVRSRV_OK ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args) ++{ ++ IMG_UINT32 uIndex; ++ for (uIndex=0; uIndex < pHash->uSize; uIndex++) ++ { ++ BUCKET *pBucket; ++ pBucket = pHash->ppBucketTable[uIndex]; ++ while (pBucket != NULL) ++ { ++ PVRSRV_ERROR eError; ++ BUCKET *pNextBucket = pBucket->pNext; ++ ++ eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v, args); ++ ++ /* The callback might want us to break out early */ ++ if (eError != PVRSRV_OK) ++ return eError; ++ ++ pBucket = pNextBucket; ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++#ifdef HASH_TRACE ++/*************************************************************************/ /*! ++@Function HASH_Dump ++@Description Dump out some information about a hash table. ++@Input pHash The hash table. ++*/ /**************************************************************************/ ++void ++HASH_Dump(HASH_TABLE *pHash) ++{ ++ IMG_UINT32 uIndex; ++ IMG_UINT32 uMaxLength=0; ++ IMG_UINT32 uEmptyCount=0; ++ ++ PVR_ASSERT(pHash != NULL); ++ for (uIndex=0; uIndexuSize; uIndex++) ++ { ++ BUCKET *pBucket; ++ IMG_UINT32 uLength = 0; ++ if (pHash->ppBucketTable[uIndex] == NULL) ++ { ++ uEmptyCount++; ++ } ++ for (pBucket=pHash->ppBucketTable[uIndex]; ++ pBucket != NULL; ++ pBucket = pBucket->pNext) ++ { ++ uLength++; ++ } ++ uMaxLength = MAX(uMaxLength, uLength); ++ } ++ ++ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d", ++ pHash->uMinimumSize, pHash->uSize, pHash->uCount)); ++ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength)); ++} ++#endif +diff --git a/drivers/gpu/drm/img-rogue/hash.h b/drivers/gpu/drm/img-rogue/hash.h +new file mode 100644 +index 000000000000..92d48990af82 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/hash.h +@@ -0,0 +1,247 @@ ++/*************************************************************************/ /*! ++@File ++@Title Self scaling hash tables ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements simple self scaling hash tables. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef HASH_H ++#define HASH_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* ++ * Keys passed to the comparison function are only guaranteed to be aligned on ++ * an uintptr_t boundary. ++ */ ++typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); ++typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2); ++ ++typedef struct _HASH_TABLE_ HASH_TABLE; ++ ++typedef PVRSRV_ERROR (*HASH_pfnCallback) ( ++ uintptr_t k, ++ uintptr_t v, ++ void* pvPriv ++); ++ ++#if defined(DEBUG) ++#else ++#define HASH_CREATE(LEN) HASH_Create(LEN) ++#endif ++ ++/*************************************************************************/ /*! ++@Function HASH_Func_Default ++@Description Hash function intended for hashing keys composed of uintptr_t ++ arrays. ++@Input uKeySize The size of the hash key, in bytes. ++@Input pKey A pointer to the key to hash. ++@Input uHashTabLen The length of the hash table. ++@Return The hash value. ++*/ /**************************************************************************/ ++IMG_UINT32 HASH_Func_Default(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); ++ ++/*************************************************************************/ /*! ++@Function HASH_Key_Comp_Default ++@Description Compares keys composed of uintptr_t arrays. ++@Input uKeySize The size of the hash key, in bytes. ++@Input pKey1 Pointer to first hash key to compare. ++@Input pKey2 Pointer to second hash key to compare. ++@Return IMG_TRUE - The keys match. ++ IMG_FALSE - The keys don't match. ++*/ /**************************************************************************/ ++IMG_BOOL HASH_Key_Comp_Default(size_t uKeySize, void *pKey1, void *pKey2); ++ ++/*************************************************************************/ /*! ++@Function HASH_Create_Extended ++@Description Create a self scaling hash table, using the supplied key size, ++ and the supplied hash and key comparison functions. ++@Input uInitialLen Initial and minimum length of the hash table, ++ where the length refers to the number of entries ++ in the hash table, not its size in bytes. ++@Input uKeySize The size of the key, in bytes. ++@Input pfnHashFunc Pointer to hash function. ++@Input pfnKeyComp Pointer to key comparison function. ++@Return NULL or hash table handle. ++*/ /**************************************************************************/ ++HASH_TABLE * HASH_Create_Extended_Int(IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp); ++#if defined(DEBUG) ++#define HASH_Create_Extended(LEN, KS, FUN, CMP) HASH_Create_Extended_Debug(LEN, KS, FUN, CMP, __FILE__, __LINE__) ++HASH_TABLE * HASH_Create_Extended_Debug (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp, ++ const char *file, const unsigned int line); ++#else ++#define HASH_Create_Extended HASH_Create_Extended_Int ++#endif ++ ++/*************************************************************************/ /*! ++@Function HASH_Create ++@Description Create a self scaling hash table with a key consisting of a ++ single uintptr_t, and using the default hash and key ++ comparison functions. ++@Input uInitialLen Initial and minimum length of the hash table, ++ where the length refers to the number of entries ++ in the hash table, not its size in bytes. ++@Return NULL or hash table handle. ++*/ /**************************************************************************/ ++HASH_TABLE * HASH_Create_Int(IMG_UINT32 uInitialLen); ++#if defined(DEBUG) ++#define HASH_Create(LEN) HASH_Create_Debug(LEN, __FILE__, __LINE__) ++HASH_TABLE * HASH_Create_Debug (IMG_UINT32 uInitialLen, const char *file, const unsigned int line); ++#else ++#define HASH_Create HASH_Create_Int ++#endif ++ ++/*************************************************************************/ /*! ++@Function HASH_Delete_Extended ++@Description Delete a hash table created by HASH_Create_Extended or ++ HASH_Create. All entries in the table should have been removed ++ before calling this function. ++@Input pHash Hash table ++@Input bWarn Set false to suppress warnings in the case of ++ deletion with active entries. ++@Return None ++*/ /**************************************************************************/ ++void HASH_Delete_Extended(HASH_TABLE *pHash, IMG_BOOL bWarn); ++ ++/*************************************************************************/ /*! ++@Function HASH_Delete ++@Description Delete a hash table created by HASH_Create_Extended or ++ HASH_Create. All entries in the table must have been removed ++ before calling this function. ++@Input pHash Hash table ++@Return None ++*/ /**************************************************************************/ ++void HASH_Delete(HASH_TABLE *pHash); ++ ++/*************************************************************************/ /*! ++@Function HASH_Insert_Extended ++@Description Insert a key value pair into a hash table created with ++ HASH_Create_Extended. ++@Input pHash The hash table. ++@Input pKey Pointer to the key. ++@Input v The value associated with the key. ++@Return IMG_TRUE - success. ++ IMG_FALSE - failure. ++*/ /**************************************************************************/ ++IMG_BOOL HASH_Insert_Extended(HASH_TABLE *pHash, void *pKey, uintptr_t v); ++ ++/*************************************************************************/ /*! ++@Function HASH_Insert ++@Description Insert a key value pair into a hash table created with ++ HASH_Create. ++@Input pHash The hash table. ++@Input k The key value. ++@Input v The value associated with the key. ++@Return IMG_TRUE - success. ++ IMG_FALSE - failure. ++*/ /**************************************************************************/ ++IMG_BOOL HASH_Insert(HASH_TABLE *pHash, uintptr_t k, uintptr_t v); ++ ++/*************************************************************************/ /*! ++@Function HASH_Remove_Extended ++@Description Remove a key from a hash table created with ++ HASH_Create_Extended. ++@Input pHash The hash table. ++@Input pKey Pointer to key. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey); ++ ++/*************************************************************************/ /*! ++@Function HASH_Remove ++@Description Remove a key value pair from a hash table created with ++ HASH_Create. ++@Input pHash The hash table. ++@Input k The key value. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++uintptr_t HASH_Remove(HASH_TABLE *pHash, uintptr_t k); ++ ++/*************************************************************************/ /*! ++@Function HASH_Retrieve_Extended ++@Description Retrieve a value from a hash table created with ++ HASH_Create_Extended. ++@Input pHash The hash table. ++@Input pKey Pointer to key. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++uintptr_t HASH_Retrieve_Extended(HASH_TABLE *pHash, void *pKey); ++ ++/*************************************************************************/ /*! ++@Function HASH_Retrieve ++@Description Retrieve a value from a hash table created with HASH_Create. ++@Input pHash The hash table. ++@Input k The key value. ++@Return 0 if the key is missing, or the value associated with the key. ++*/ /**************************************************************************/ ++uintptr_t HASH_Retrieve(HASH_TABLE *pHash, uintptr_t k); ++ ++/*************************************************************************/ /*! ++@Function HASH_Iterate ++@Description Iterate over every entry in the hash table. ++@Input pHash Hash table to iterate. ++@Input pfnCallback Callback to call with the key and data for each ++. entry in the hash table ++@Return Callback error if any, otherwise PVRSRV_OK ++*/ /**************************************************************************/ ++PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback, void* args); ++ ++#ifdef HASH_TRACE ++/*************************************************************************/ /*! ++@Function HASH_Dump ++@Description Dump out some information about a hash table. ++@Input pHash The hash table. ++*/ /**************************************************************************/ ++void HASH_Dump(HASH_TABLE *pHash); ++#endif ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* HASH_H */ ++ ++/****************************************************************************** ++ End of file (hash.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/htb_debug.c b/drivers/gpu/drm/img-rogue/htb_debug.c +new file mode 100644 +index 000000000000..f3614146531d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htb_debug.c +@@ -0,0 +1,1190 @@ ++/*************************************************************************/ /*! ++@File htb_debug.c ++@Title Debug Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides kernel side debugFS Functionality. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "rgxdevice.h" ++#include "htbserver.h" ++#include "htbuffer.h" ++#include "htbuffer_types.h" ++#include "tlstream.h" ++#include "tlclient.h" ++#include "pvrsrv_tlcommon.h" ++#include "di_server.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "osfunc.h" ++#include "allocmem.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "htb_debug.h" ++ ++// Global data handles for buffer manipulation and processing ++ ++typedef struct { ++ IMG_PBYTE pBuf; /* Raw data buffer from TL stream */ ++ IMG_UINT32 uiBufLen; /* Amount of data to process from 'pBuf' */ ++ IMG_UINT32 uiTotal; /* Total bytes processed */ ++ IMG_UINT32 uiMsgLen; /* Length of HTB message to be processed */ ++ IMG_PBYTE pCurr; /* pointer to current message to be decoded */ ++ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; /* Output string */ ++} HTB_Sentinel_t; ++ ++typedef struct ++{ ++ DI_ENTRY *psDumpHostDiEntry; /* debug info entry */ ++ HTB_Sentinel_t sSentinel; /* private control structure for HTB DI ++ operations */ ++ IMG_HANDLE hStream; /* stream handle for debugFS use */ ++} HTB_DBG_INFO; ++ ++static HTB_DBG_INFO g_sHTBData; ++ ++// Comment out for extra debug level ++// #define HTB_CHATTY_PRINT(x) PVR_DPF(x) ++#define HTB_CHATTY_PRINT(x) ++ ++typedef void (DI_PRINTF)(const OSDI_IMPL_ENTRY *, const IMG_CHAR *, ...); ++ ++/****************************************************************************** ++ * debugFS display routines ++ *****************************************************************************/ ++static int HTBDumpBuffer(DI_PRINTF, OSDI_IMPL_ENTRY *, void *); ++ ++static int _DebugHBTraceDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ int retVal; ++ ++ PVR_ASSERT(psEntry != NULL); ++ ++ /* psEntry should never be NULL */ ++ if (psEntry == NULL) ++ { ++ return -1; ++ } ++ ++ /* Ensure that we have a valid address to use to dump info from. If NULL we ++ * return a failure code to terminate the DI read call. pvData is either ++ * DI_START_TOKEN (for the initial call) or an HTB buffer address for ++ * subsequent calls [returned from the NEXT function]. */ ++ if (pvData == NULL) ++ { ++ return -1; ++ } ++ ++ retVal = HTBDumpBuffer(DIPrintf, psEntry, pvData); ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal)); ++ ++ return retVal; ++} ++ ++static IMG_UINT32 idToLogIdx(IMG_UINT32); /* Forward declaration */ ++ ++/* ++ * HTB_GetNextMessage ++ * ++ * Get next non-empty message block from the buffer held in pSentinel->pBuf ++ * If we exhaust the data buffer we refill it (after releasing the previous ++ * message(s) [only one non-NULL message, but PAD messages will get released ++ * as we traverse them]. ++ * ++ * Input: ++ * pSentinel references the already acquired data buffer ++ * ++ * Output: ++ * pSentinel ++ * -> uiMsglen updated to the size of the non-NULL message ++ * ++ * Returns: ++ * Address of first non-NULL message in the buffer (if any) ++ * NULL if there is no further data available from the stream and the buffer ++ * contents have been drained. ++ */ ++static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel) ++{ ++ void *pNext, *pLast, *pStart, *pData = NULL; ++ void *pCurrent; /* Current processing point within buffer */ ++ PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ ++ IMG_UINT32 uiHdrType; /* Packet header type */ ++ IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ ++ IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; ++ IMG_UINT32 ui32Data; ++ IMG_UINT32 ui32LogIdx; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(pSentinel != NULL); ++ ++ pLast = pSentinel->pBuf + pSentinel->uiBufLen; ++ ++ pStart = pSentinel->pBuf; ++ ++ pNext = pStart; ++ pSentinel->uiMsgLen = 0; // Reset count for this message ++ uiMsgSize = 0; // nothing processed so far ++ ui32LogIdx = HTB_SF_LAST; // Loop terminator condition ++ ++ do ++ { ++ /* ++ * If we've drained the buffer we must RELEASE and ACQUIRE some more. ++ */ ++ if (pNext >= pLast) ++ { ++ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); ++ ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__, ++ "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); ++ return NULL; ++ } ++ ++ // Reset our limits - if we've returned an empty buffer we're done. ++ pLast = pSentinel->pBuf + pSentinel->uiBufLen; ++ pStart = pSentinel->pBuf; ++ pNext = pStart; ++ ++ if (pStart == NULL || pLast == NULL) ++ { ++ return NULL; ++ } ++ } ++ ++ /* ++ * We should have a header followed by data block(s) in the stream. ++ */ ++ ++ pCurrent = pNext; ++ ppHdr = GET_PACKET_HDR(pCurrent); ++ ++ if (ppHdr == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unexpected NULL packet in Host Trace buffer", ++ __func__)); ++ pSentinel->uiMsgLen += uiMsgSize; ++ return NULL; // This should never happen ++ } ++ ++ /* ++ * This should *NEVER* fire. If it does it means we have got some ++ * dubious packet header back from the HTB stream. In this case ++ * the sensible thing is to abort processing and return to ++ * the caller ++ */ ++ uiHdrType = GET_PACKET_TYPE(ppHdr); ++ ++ PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST && ++ uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF); ++ ++ if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST && ++ uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF) ++ { ++ /* ++ * We have a (potentially) valid data header. We should see if ++ * the associated packet header matches one of our expected ++ * types. ++ */ ++ pNext = GET_NEXT_PACKET_ADDR(ppHdr); ++ ++ PVR_ASSERT(pNext != NULL); ++ ++ uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); ++ ++ pSentinel->uiMsgLen += uiMsgSize; ++ ++ pData = GET_PACKET_DATA_PTR(ppHdr); ++ ++ /* ++ * Handle non-DATA packet types. These include PAD fields which ++ * may have data associated and other types. We simply discard ++ * these as they have no decodable information within them. ++ */ ++ if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA) ++ { ++ /* ++ * Now release the current non-data packet and proceed to the ++ * next entry (if any). ++ */ ++ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, uiMsgSize); ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Packet Type %x " ++ "Length %u", __func__, uiHdrType, uiMsgSize)); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message" ++ " size %u", __func__, "TLClientReleaseDataLess", ++ PVRSRVGETERRORSTRING(eError), uiMsgSize)); ++ } ++ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); ++ ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up", ++ __func__, "TLClientAcquireData", ++ PVRSRVGETERRORSTRING(eError))); ++ ++ return NULL; ++ } ++ pSentinel->uiMsgLen = 0; ++ // Reset our limits - if we've returned an empty buffer we're done. ++ pLast = pSentinel->pBuf + pSentinel->uiBufLen; ++ pStart = pSentinel->pBuf; ++ pNext = pStart; ++ ++ if (pStart == NULL || pLast == NULL) ++ { ++ return NULL; ++ } ++ continue; ++ } ++ if (pData == NULL || pData >= pLast) ++ { ++ continue; ++ } ++ ui32Data = *(IMG_UINT32 *)pData; ++ ui32LogIdx = idToLogIdx(ui32Data); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x", ++ ppHdr, uiHdrType)); ++ ++ return NULL; ++ } ++ ++ /* ++ * Check if the unrecognized ID is valid and therefore, tracebuf ++ * needs updating. ++ */ ++ if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data) ++ && IMG_FALSE == bUnrecognizedErrorPrinted) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", ++ __func__, ui32Data, HTB_SF_GID(ui32Data), ++ HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); ++ bUnrecognizedErrorPrinted = IMG_FALSE; ++ } ++ ++ } while (HTB_SF_LAST == ui32LogIdx); ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'", ++ __func__, pCurrent, ui32Data)); ++ ++ return pCurrent; ++} ++ ++/* ++ * HTB_GetFirstMessage ++ * ++ * Called from START to obtain the buffer address of the first message within ++ * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty. ++ * ++ * Input: ++ * pSentinel ++ * pui64Pos Offset within the debugFS file ++ * ++ * Output: ++ * pSentinel->pCurr Set to reference the first valid non-NULL message within ++ * the buffer. If no valid message is found set to NULL. ++ * pSentinel ++ * ->pBuf if unset on entry ++ * ->uiBufLen if pBuf unset on entry ++ * ++ * Side-effects: ++ * HTB TL stream will be updated to bypass any zero-length PAD messages before ++ * the first non-NULL message (if any). ++ */ ++static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(pui64Pos); ++ ++ if (pSentinel == NULL) ++ return; ++ ++ if (pSentinel->pBuf == NULL) ++ { ++ /* Acquire data */ ++ pSentinel->uiMsgLen = 0; ++ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen); ++ ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", ++ __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError))); ++ ++ pSentinel->pBuf = NULL; ++ pSentinel->pCurr = NULL; ++ } ++ else ++ { ++ /* ++ * If there is no data available we set pSentinel->pCurr to NULL ++ * and return. This is expected behaviour if we've drained the ++ * data and nothing else has yet been produced. ++ */ ++ if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL) ++ { ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Empty Buffer @ %p", ++ __func__, pSentinel->pBuf)); ++ ++ pSentinel->pCurr = NULL; ++ return; ++ } ++ } ++ } ++ ++ /* Locate next message within buffer. NULL => no more data to process */ ++ pSentinel->pCurr = HTB_GetNextMessage(pSentinel); ++} ++ ++/* ++ * _DebugHBTraceDIStart: ++ * ++ * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops. ++ * Return DI_START_TOKEN for the very first call and allocate a sentinel for ++ * use by the 'Show' routine and its helpers. ++ * This is stored in the psEntry's private hook field. ++ * ++ * We obtain access to the TLstream associated with the HTB. If this doesn't ++ * exist (because no pvrdebug capture trace has been set) we simply return with ++ * a NULL value which will stop the DI traversal. ++ */ ++static void *_DebugHBTraceDIStart(OSDI_IMPL_ENTRY *psEntry, ++ IMG_UINT64 *pui64Pos) ++{ ++ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); ++ PVRSRV_ERROR eError; ++ IMG_UINT32 uiTLMode; ++ void *retVal; ++ IMG_HANDLE hStream; ++ ++ /* The sentinel object should have been allocated during the creation ++ * of the DI entry. If it's not there it means that something went ++ * wrong. Return NULL in such case. */ ++ if (pSentinel == NULL) ++ { ++ return NULL; ++ } ++ ++ /* Check to see if the HTB stream has been configured yet. If not, there is ++ * nothing to display so we just return NULL to stop the stream access. ++ */ ++ if (!HTBIsConfigured()) ++ { ++ return NULL; ++ } ++ ++ /* Open the stream in non-blocking mode so that we can determine if there ++ * is no data to consume. Also disable the producer callback (if any) and ++ * the open callback so that we do not generate spurious trace data when ++ * accessing the stream. ++ */ ++ uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING| ++ PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK| ++ PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK; ++ ++ /* If two or more processes try to read from this file at the same time ++ * the TLClientOpenStream() function will handle this by allowing only ++ * one of them to actually open the stream. The other process will get ++ * an error stating that the stream is already open. The open function ++ * is threads safe. */ ++ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode, ++ &hStream); ++ ++ if (eError == PVRSRV_ERROR_ALREADY_OPEN) ++ { ++ /* Stream allows only one reader so return error if it's already ++ * opened. */ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Stream handle %p already " ++ "exists for %s", __func__, g_sHTBData.hStream, ++ HTB_STREAM_NAME)); ++ return NULL; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ /* ++ * No stream available so nothing to report ++ */ ++ return NULL; ++ } ++ ++ /* There is a window where hStream can be NULL but the stream is already ++ * opened. This shouldn't matter since the TLClientOpenStream() will make ++ * sure that only one stream can be opened and only one process can reach ++ * this place at a time. Also the .stop function will be always called ++ * after this function returns so there should be no risk of stream ++ * not being closed. */ ++ PVR_ASSERT(g_sHTBData.hStream == NULL); ++ g_sHTBData.hStream = hStream; ++ ++ /* We're starting the read operation so ensure we properly zero the ++ * sentinel object. */ ++ memset(pSentinel, 0, sizeof(*pSentinel)); ++ ++ /* ++ * Find the first message location within pSentinel->pBuf ++ * => for DI_START_TOKEN we must issue our first ACQUIRE, also for the ++ * subsequent re-START calls (if any). ++ */ ++ ++ HTB_GetFirstMessage(pSentinel, pui64Pos); ++ ++ retVal = *pui64Pos == 0 ? DI_START_TOKEN : pSentinel->pCurr; ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p", ++ __func__, retVal, HTB_STREAM_NAME, g_sHTBData.hStream)); ++ ++ return retVal; ++} ++ ++/* ++ * _DebugTBTraceDIStop: ++ * ++ * Stop processing data collection and release any previously allocated private ++ * data structure if we have exhausted the previously filled data buffers. ++ */ ++static void _DebugHBTraceDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); ++ IMG_UINT32 uiMsgLen; ++ PVRSRV_ERROR eError; ++ ++ if (pSentinel == NULL) ++ { ++ return; ++ } ++ ++ uiMsgLen = pSentinel->uiMsgLen; ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen)); ++ ++ /* If we get here the handle should never be NULL because ++ * _DebugHBTraceDIStart() shouldn't allow that. */ ++ if (g_sHTBData.hStream == NULL) ++ { ++ return; ++ } ++ ++ if (uiMsgLen != 0) ++ { ++ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, uiMsgLen); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u", ++ __func__, "TLClientReleaseDataLess", ++ PVRSRVGETERRORSTRING(eError), uiMsgLen)); ++ } ++ } ++ ++ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", ++ "TLClientCloseStream", PVRSRVGETERRORSTRING(eError), ++ __func__)); ++ } ++ ++ g_sHTBData.hStream = NULL; ++} ++ ++ ++/* ++ * _DebugHBTraceDINext: ++ * ++ * This is where we release any acquired data which has been processed by the ++ * DIShow routine. If we have encountered a DI entry overflow we stop ++ * processing and return NULL. Otherwise we release the message that we ++ * previously processed and simply update our position pointer to the next ++ * valid HTB message (if any) ++ */ ++static void *_DebugHBTraceDINext(OSDI_IMPL_ENTRY *psEntry, void *pvPriv, ++ IMG_UINT64 *pui64Pos) ++{ ++ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); ++ IMG_UINT64 ui64CurPos; ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(pvPriv); ++ ++ if (pui64Pos) ++ { ++ ui64CurPos = *pui64Pos; ++ *pui64Pos = ui64CurPos + 1; ++ } ++ ++ /* Determine if we've had an overflow on the previous 'Show' call. If so ++ * we leave the previously acquired data in the queue (by releasing 0 bytes) ++ * and return NULL to end this DI entry iteration. ++ * If we have not overflowed we simply get the next HTB message and use that ++ * for our display purposes. */ ++ ++ if (DIHasOverflowed(psEntry)) ++ { ++ (void) TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, ++ 0); ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL", ++ __func__)); ++ ++ return NULL; ++ } ++ else ++ { ++ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, ++ pSentinel->uiMsgLen); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d", ++ __func__, "TLClientReleaseDataLess", ++ PVRSRVGETERRORSTRING(eError), pSentinel->pCurr, ++ pSentinel->uiMsgLen)); ++ PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__, ++ pSentinel->pBuf, ++ (IMG_PBYTE) (pSentinel->pBuf + pSentinel->uiBufLen))); ++ ++ } ++ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, ++ g_sHTBData.hStream, &pSentinel->pBuf, ++ &pSentinel->uiBufLen); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d", ++ __func__, "TLClientAcquireData", ++ PVRSRVGETERRORSTRING(eError), pSentinel->uiMsgLen)); ++ pSentinel->pBuf = NULL; ++ } ++ ++ pSentinel->uiMsgLen = 0; /* We don't (yet) know the message size */ ++ } ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Returning %p Msglen %d", __func__, ++ pSentinel->pBuf, pSentinel->uiMsgLen)); ++ ++ if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0) ++ { ++ return NULL; ++ } ++ ++ pSentinel->pCurr = HTB_GetNextMessage(pSentinel); ++ ++ return pSentinel->pCurr; ++} ++ ++/****************************************************************************** ++ * HTB Dumping routines and definitions ++ *****************************************************************************/ ++#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL) ++#define MAX_STRING_SIZE (128) ++ ++typedef enum ++{ ++ TRACEBUF_ARG_TYPE_INT, ++ TRACEBUF_ARG_TYPE_ERR, ++ TRACEBUF_ARG_TYPE_NONE ++} TRACEBUF_ARG_TYPE; ++ ++/* ++ * Array of all Host Trace log IDs used to convert the tracebuf data ++ */ ++typedef struct _HTB_TRACEBUF_LOG_ { ++ HTB_LOG_SFids eSFId; ++ IMG_CHAR *pszName; ++ IMG_CHAR *pszFmt; ++ IMG_UINT32 ui32ArgNum; ++} HTB_TRACEBUF_LOG; ++ ++static const HTB_TRACEBUF_LOG aLogs[] = { ++#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e}, ++ HTB_LOG_SFIDLIST ++#undef X ++}; ++ ++static const IMG_CHAR *aGroups[] = { ++#define X(A,B) #B, ++ HTB_LOG_SFGROUPLIST ++#undef X ++}; ++static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1; ++ ++static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *); ++/* ++ * ExtractOneArgFmt ++ * ++ * Scan the input 'printf-like' string *ppszFmt and return the next ++ * value string to be displayed. If there is no '%' format field in the ++ * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string ++ * untouched. ++ * ++ * Input ++ * ppszFmt reference to format string to be decoded ++ * pszOneArgFmt single field format from *ppszFmt ++ * ++ * Returns ++ * TRACEBUF_ARG_TYPE_ERR unrecognised argument ++ * TRACEBUF_ARG_TYPE_INT variable is of numeric type ++ * TRACEBUF_ARG_TYPE_NONE no variable reference in *ppszFmt ++ * ++ * Side-effect ++ * *ppszFmt is updated to reference the next part of the format string ++ * to be scanned ++ */ ++static TRACEBUF_ARG_TYPE ExtractOneArgFmt( ++ IMG_CHAR **ppszFmt, ++ IMG_CHAR *pszOneArgFmt) ++{ ++ IMG_CHAR *pszFmt; ++ IMG_CHAR *psT; ++ IMG_UINT32 ui32Count = MAX_STRING_SIZE; ++ IMG_UINT32 ui32OneArgSize; ++ TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR; ++ ++ if (NULL == ppszFmt) ++ return TRACEBUF_ARG_TYPE_ERR; ++ ++ pszFmt = *ppszFmt; ++ if (NULL == pszFmt) ++ return TRACEBUF_ARG_TYPE_ERR; ++ ++ /* ++ * Find the first '%' ++ * NOTE: we can be passed a simple string to display which will have no ++ * parameters embedded within it. In this case we simply return ++ * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt ++ */ ++ psT = strchr(pszFmt, '%'); ++ if (psT == NULL) ++ { ++ return TRACEBUF_ARG_TYPE_NONE; ++ } ++ ++ /* Find next conversion identifier after the initial '%' */ ++ while ((*psT++) && (ui32Count-- > 0)) ++ { ++ switch (*psT) ++ { ++ case 'd': ++ case 'i': ++ case 'o': ++ case 'u': ++ case 'x': ++ case 'X': ++ { ++ eRet = TRACEBUF_ARG_TYPE_INT; ++ goto _found_arg; ++ } ++ case 's': ++ { ++ eRet = TRACEBUF_ARG_TYPE_ERR; ++ goto _found_arg; ++ } ++ } ++ } ++ ++ if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR; ++ ++_found_arg: ++ ui32OneArgSize = psT - pszFmt + 1; ++ OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize); ++ pszOneArgFmt[ui32OneArgSize] = '\0'; ++ ++ *ppszFmt = psT + 1; ++ ++ return eRet; ++} ++ ++static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData) ++{ ++ IMG_UINT32 i = 0; ++ for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++) ++ { ++ if ( ui32CheckData == aLogs[i].eSFId ) ++ return i; ++ } ++ /* Nothing found, return max value */ ++ return HTB_SF_LAST; ++} ++ ++/* ++ * DecodeHTB ++ * ++ * Decode the data buffer message located at pBuf. This should be a valid ++ * HTB message as we are provided with the start of the buffer. If empty there ++ * is no message to process. We update the uiMsgLen field with the size of the ++ * HTB message that we have processed so that it can be returned to the system ++ * on successful logging of the message to the output file. ++ * ++ * Input ++ * pSentinel reference to newly read data and pending completion data ++ * from a previous invocation [handle DI entry buffer overflow] ++ * -> pBuf reference to raw data that we are to parse ++ * -> uiBufLen total number of bytes of data available ++ * -> pCurr start of message to decode ++ * ++ * pvDumpDebugFile output file ++ * pfnDumpDebugPrintf output generating routine ++ * ++ * Output ++ * pSentinel ++ * -> uiMsgLen length of the decoded message which will be freed to ++ * the system on successful completion of the DI entry ++ * update via _DebugHBTraceDINext(), ++ * Return Value ++ * 0 successful decode ++ * -1 unsuccessful decode ++ */ ++static int ++DecodeHTB(HTB_Sentinel_t *pSentinel, OSDI_IMPL_ENTRY *pvDumpDebugFile, ++ DI_PRINTF pfnDumpDebugPrintf) ++{ ++ IMG_UINT32 ui32Data, ui32LogIdx, ui32ArgsCur; ++ IMG_CHAR *pszFmt = NULL; ++ IMG_CHAR aszOneArgFmt[MAX_STRING_SIZE]; ++ IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE; ++ ++ size_t nPrinted; ++ ++ void *pNext, *pLast, *pStart, *pData = NULL; ++ PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */ ++ IMG_UINT32 uiHdrType; /* Packet header type */ ++ IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */ ++ IMG_BOOL bPacketsDropped; ++ ++ pLast = pSentinel->pBuf + pSentinel->uiBufLen; ++ pStart = pSentinel->pCurr; ++ ++ pSentinel->uiMsgLen = 0; // Reset count for this message ++ ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d", ++ __func__, pStart, pLast, pSentinel->uiBufLen)); ++ ++ /* ++ * We should have a DATA header with the necessary information following ++ */ ++ ppHdr = GET_PACKET_HDR(pStart); ++ ++ if (ppHdr == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unexpected NULL packet in Host Trace buffer", __func__)); ++ return -1; ++ } ++ ++ uiHdrType = GET_PACKET_TYPE(ppHdr); ++ PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA); ++ ++ pNext = GET_NEXT_PACKET_ADDR(ppHdr); ++ ++ PVR_ASSERT(pNext != NULL); ++ ++ uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr); ++ ++ pSentinel->uiMsgLen += uiMsgSize; ++ ++ pData = GET_PACKET_DATA_PTR(ppHdr); ++ ++ if (pData == NULL || pData >= pLast) ++ { ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p " ++ "Returning 0", __func__, pData, pLast)); ++ return 0; ++ } ++ ++ ui32Data = *(IMG_UINT32 *)pData; ++ ui32LogIdx = idToLogIdx(ui32Data); ++ ++ /* ++ * Check if the unrecognised ID is valid and therefore, tracebuf ++ * needs updating. ++ */ ++ if (ui32LogIdx == HTB_SF_LAST) ++ { ++ if (HTB_LOG_VALIDID(ui32Data)) ++ { ++ if (!bUnrecognizedErrorPrinted) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'", ++ __func__, ui32Data, HTB_SF_GID(ui32Data), ++ HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData)); ++ bUnrecognizedErrorPrinted = IMG_TRUE; ++ } ++ ++ return 0; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unrecognised and invalid LOG value detected '%x'", ++ __func__, ui32Data)); ++ ++ return -1; ++ } ++ ++ /* The string format we are going to display */ ++ /* ++ * The display will show the header (log-ID, group-ID, number of params) ++ * The maximum parameter list length = 15 (only 4bits used to encode) ++ * so we need HEADER + 15 * sizeof(UINT32) and the displayed string ++ * describing the event. We use a buffer in the per-process pSentinel ++ * structure to hold the data. ++ */ ++ pszFmt = aLogs[ui32LogIdx].pszFmt; ++ ++ /* add the message payload size to the running count */ ++ ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data); ++ ++ /* Determine if we've over-filled the buffer and had to drop packets */ ++ bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr); ++ if (bPacketsDropped || ++ (uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)) ++ { ++ /* Flag this as it is useful to know ... */ ++ ++ PVR_DUMPDEBUG_LOG("\n<========================== *** PACKETS DROPPED *** ======================>\n"); ++ } ++ ++ { ++ IMG_UINT32 ui32Timestampns, ui32PID, ui32TID; ++ IMG_UINT64 ui64Timestamp, ui64TimestampSec; ++ IMG_CHAR *szBuffer = pSentinel->szBuffer; // Buffer start ++ IMG_CHAR *pszBuffer = pSentinel->szBuffer; // Current place in buf ++ size_t uBufBytesAvailable = sizeof(pSentinel->szBuffer); ++ IMG_UINT32 *pui32Data = (IMG_UINT32 *)pData; ++ IMG_UINT32 ui_aGroupIdx; ++ ++ // Get PID field from data stream ++ pui32Data++; ++ ui32PID = *pui32Data; ++ // Get TID field from data stream ++ pui32Data++; ++ ui32TID = *pui32Data; ++ // Get Timestamp part 1 from data stream ++ pui32Data++; ++ ui64Timestamp = (IMG_UINT64) *pui32Data << 32; ++ // Get Timestamp part 2 from data stream ++ pui32Data++; ++ ui64Timestamp |= (IMG_UINT64) *pui32Data; ++ // Move to start of message contents data ++ pui32Data++; ++ ++ /* ++ * We need to snprintf the data to a local in-kernel buffer ++ * and then PVR_DUMPDEBUG_LOG() that in one shot ++ */ ++ ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups); ++ ++ /* Divide by 1B to get seconds & mod using output var (nanosecond resolution)*/ ++ ui64TimestampSec = OSDivide64r64(ui64Timestamp, 1000000000, &ui32Timestampns); ++ ++ nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%010"IMG_UINT64_FMTSPEC".%09u:%-5u-%-5u-%s> ", ++ ui64TimestampSec, ui32Timestampns, ui32PID, ui32TID, aGroups[ui_aGroupIdx]); ++ if (nPrinted >= uBufBytesAvailable) ++ { ++ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," ++ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, ++ uBufBytesAvailable); ++ ++ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ ++ } ++ ++ PVR_DUMPDEBUG_LOG("%s", pszBuffer); ++ /* Update where our next 'output' point in the buffer is */ ++ pszBuffer += nPrinted; ++ uBufBytesAvailable -= nPrinted; ++ ++ /* ++ * Print one argument at a time as this simplifies handling variable ++ * number of arguments. Special case handling for no arguments. ++ * This is the case for simple format strings such as ++ * HTB_SF_MAIN_KICK_UNCOUNTED. ++ */ ++ if (ui32ArgsCur == 0) ++ { ++ if (pszFmt) ++ { ++ nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); ++ if (nPrinted >= uBufBytesAvailable) ++ { ++ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," ++ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, ++ uBufBytesAvailable); ++ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ ++ } ++ PVR_DUMPDEBUG_LOG("%s", pszBuffer); ++ pszBuffer += nPrinted; ++ /* Don't update the uBufBytesAvailable as we have finished this ++ * message decode. pszBuffer - szBuffer is the total amount of ++ * data we have decoded. ++ */ ++ } ++ } ++ else ++ { ++ if (HTB_SF_GID(ui32Data) == HTB_GID_CTRL && HTB_SF_ID(ui32Data) == HTB_ID_MARK_SCALE) ++ { ++ IMG_UINT32 i; ++ IMG_UINT32 ui32ArgArray[HTB_MARK_SCALE_ARG_ARRAY_SIZE]; ++ IMG_UINT64 ui64OSTS = 0; ++ IMG_UINT32 ui32OSTSRem = 0; ++ IMG_UINT64 ui64CRTS = 0; ++ ++ /* Retrieve 6 args to an array */ ++ for (i = 0; i < ARRAY_SIZE(ui32ArgArray); i++) ++ { ++ ui32ArgArray[i] = *pui32Data; ++ pui32Data++; ++ --ui32ArgsCur; ++ } ++ ++ ui64OSTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_OSTS_PT1] << 32 | ui32ArgArray[HTB_ARG_OSTS_PT2]; ++ ui64CRTS = (IMG_UINT64) ui32ArgArray[HTB_ARG_CRTS_PT1] << 32 | ui32ArgArray[HTB_ARG_CRTS_PT2]; ++ ++ /* Divide by 1B to get seconds, remainder in nano seconds*/ ++ ui64OSTS = OSDivide64r64(ui64OSTS, 1000000000, &ui32OSTSRem); ++ ++ nPrinted = OSSNPrintf(pszBuffer, ++ uBufBytesAvailable, ++ "HTBFWMkSync Mark=%u OSTS=%010" IMG_UINT64_FMTSPEC ".%09u CRTS=%" IMG_UINT64_FMTSPEC " CalcClkSpd=%u\n", ++ ui32ArgArray[HTB_ARG_SYNCMARK], ++ ui64OSTS, ++ ui32OSTSRem, ++ ui64CRTS, ++ ui32ArgArray[HTB_ARG_CLKSPD]); ++ ++ if (nPrinted >= uBufBytesAvailable) ++ { ++ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," ++ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, ++ uBufBytesAvailable); ++ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ ++ } ++ ++ PVR_DUMPDEBUG_LOG("%s", pszBuffer); ++ pszBuffer += nPrinted; ++ uBufBytesAvailable -= nPrinted; ++ } ++ else ++ { ++ while (IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0)) ++ { ++ IMG_UINT32 ui32TmpArg = *pui32Data; ++ TRACEBUF_ARG_TYPE eArgType; ++ ++ eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt); ++ ++ pui32Data++; ++ ui32ArgsCur--; ++ ++ switch (eArgType) ++ { ++ case TRACEBUF_ARG_TYPE_INT: ++ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, ++ aszOneArgFmt, ui32TmpArg); ++ break; ++ ++ case TRACEBUF_ARG_TYPE_NONE: ++ nPrinted = OSStringLCopy(pszBuffer, pszFmt, ++ uBufBytesAvailable); ++ break; ++ ++ default: ++ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, ++ "Error processing arguments, type not " ++ "recognized (fmt: %s)", aszOneArgFmt); ++ break; ++ } ++ if (nPrinted >= uBufBytesAvailable) ++ { ++ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," ++ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, ++ uBufBytesAvailable); ++ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ ++ } ++ PVR_DUMPDEBUG_LOG("%s", pszBuffer); ++ pszBuffer += nPrinted; ++ uBufBytesAvailable -= nPrinted; ++ } ++ /* Display any remaining text in pszFmt string */ ++ if (pszFmt) ++ { ++ nPrinted = OSStringLCopy(pszBuffer, pszFmt, uBufBytesAvailable); ++ if (nPrinted >= uBufBytesAvailable) ++ { ++ PVR_DUMPDEBUG_LOG("Buffer overrun - "IMG_SIZE_FMTSPEC" printed," ++ " max space "IMG_SIZE_FMTSPEC"\n", nPrinted, ++ uBufBytesAvailable); ++ nPrinted = uBufBytesAvailable; /* Ensure we don't overflow buffer */ ++ } ++ PVR_DUMPDEBUG_LOG("%s", pszBuffer); ++ pszBuffer += nPrinted; ++ /* Don't update the uBufBytesAvailable as we have finished this ++ * message decode. pszBuffer - szBuffer is the total amount of ++ * data we have decoded. ++ */ ++ } ++ } ++ } ++ ++ /* Update total bytes processed */ ++ pSentinel->uiTotal += (pszBuffer - szBuffer); ++ } ++ return 0; ++} ++ ++/* ++ * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API ++ * ++ * This routine just parses *one* message from the buffer. ++ * The stream will be opened by the Start() routine, closed by the Stop() and ++ * updated for data consumed by this routine once we have DebugPrintf'd it. ++ * We use the new TLReleaseDataLess() routine which enables us to update the ++ * HTB contents with just the amount of data we have successfully processed. ++ * If we need to leave the data available we can call this with a 0 count. ++ * This will happen in the case of a buffer overflow so that we can reprocess ++ * any data which wasn't handled before. ++ * ++ * In case of overflow or an error we return -1 otherwise 0 ++ * ++ * Input: ++ * pfnPrintf output routine to display data ++ * psEntry handle to debug frontend ++ * pvData data address to start dumping from ++ * (set by Start() / Next()) ++ */ ++static int HTBDumpBuffer(DI_PRINTF pfnPrintf, OSDI_IMPL_ENTRY *psEntry, ++ void *pvData) ++{ ++ HTB_Sentinel_t *pSentinel = DIGetPrivData(psEntry); ++ ++ PVR_ASSERT(pvData != NULL); ++ ++ if (pvData == DI_START_TOKEN) ++ { ++ if (pSentinel->pCurr == NULL) ++ { ++ HTB_CHATTY_PRINT((PVR_DBG_WARNING, "%s: DI_START_TOKEN, " ++ "Empty buffer", __func__)); ++ return 0; ++ } ++ PVR_ASSERT(pSentinel->pCurr != NULL); ++ ++ /* Display a Header as we have data to process */ ++ pfnPrintf(psEntry, "%-20s:%-5s-%-5s-%s %s\n", "Timestamp", "PID", "TID", "Group>", ++ "Log Entry"); ++ } ++ else ++ { ++ if (pvData != NULL) ++ { ++ PVR_ASSERT(pSentinel->pCurr == pvData); ++ } ++ } ++ ++ return DecodeHTB(pSentinel, psEntry, pfnPrintf); ++} ++ ++ ++/****************************************************************************** ++ * External Entry Point routines ... ++ *****************************************************************************/ ++/*************************************************************************/ /*! ++ @Function HTB_CreateDIEntry ++ ++ @Description Create the debugFS entry-point for the host-trace-buffer ++ ++ @Returns eError internal error code, PVRSRV_OK on success ++ ++ */ /*************************************************************************/ ++PVRSRV_ERROR HTB_CreateDIEntry(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ DI_ITERATOR_CB sIterator = { ++ .pfnStart = _DebugHBTraceDIStart, ++ .pfnStop = _DebugHBTraceDIStop, ++ .pfnNext = _DebugHBTraceDINext, ++ .pfnShow = _DebugHBTraceDIShow, ++ }; ++ ++ eError = DICreateEntry("host_trace", NULL, &sIterator, ++ &g_sHTBData.sSentinel, ++ DI_ENTRY_TYPE_GENERIC, ++ &g_sHTBData.psDumpHostDiEntry); ++ PVR_LOG_RETURN_IF_ERROR(eError, "DICreateEntry"); ++ ++ return PVRSRV_OK; ++} ++ ++ ++/*************************************************************************/ /*! ++ @Function HTB_DestroyDIEntry ++ ++ @Description Destroy the debugFS entry-point created by earlier ++ HTB_CreateDIEntry() call. ++*/ /**************************************************************************/ ++void HTB_DestroyDIEntry(void) ++{ ++ if (g_sHTBData.psDumpHostDiEntry != NULL) ++ { ++ DIDestroyEntry(g_sHTBData.psDumpHostDiEntry); ++ g_sHTBData.psDumpHostDiEntry = NULL; ++ } ++} ++ ++/* EOF */ +diff --git a/drivers/gpu/drm/img-rogue/htb_debug.h b/drivers/gpu/drm/img-rogue/htb_debug.h +new file mode 100644 +index 000000000000..04132e13c302 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htb_debug.h +@@ -0,0 +1,64 @@ ++/*************************************************************************/ /*! ++@File htb_debug.h ++@Title Linux debugFS routine setup header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef HTB_DEBUG_H ++#define HTB_DEBUG_H ++ ++/**************************************************************************/ /*! ++ @Function HTB_CreateDIEntry ++ ++ @Description Create the debugFS entry-point for the host-trace-buffer ++ ++ @Returns eError internal error code, PVRSRV_OK on success ++ ++ */ /**************************************************************************/ ++PVRSRV_ERROR HTB_CreateDIEntry(void); ++ ++/**************************************************************************/ /*! ++ @Function HTB_DestroyFSEntry ++ ++ @Description Destroy the debugFS entry-point created by earlier ++ HTB_CreateDIEntry() call. ++*/ /**************************************************************************/ ++void HTB_DestroyDIEntry(void); ++ ++#endif /* HTB_DEBUG_H */ +diff --git a/drivers/gpu/drm/img-rogue/htbserver.c b/drivers/gpu/drm/img-rogue/htbserver.c +new file mode 100644 +index 000000000000..2ada5ab98f10 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htbserver.c +@@ -0,0 +1,857 @@ ++/*************************************************************************/ /*! ++@File htbserver.c ++@Title Host Trace Buffer server implementation. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Host Trace Buffer provides a mechanism to log Host events to a ++ buffer in a similar way to the Firmware Trace mechanism. ++ Host Trace Buffer logs data using a Transport Layer buffer. ++ The Transport Layer and pvrtld tool provides the mechanism to ++ retrieve the trace data. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "htbserver.h" ++#include "htbuffer.h" ++#include "htbuffer_types.h" ++#include "tlstream.h" ++#include "pvrsrv_tlcommon.h" ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "osfunc.h" ++#include "allocmem.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "pvrsrv_apphint.h" ++#include "oskm_apphint.h" ++ ++/* size of circular buffer controlling the maximum number of concurrent PIDs logged */ ++#define HTB_MAX_NUM_PID 8 ++ ++/* number of times to try rewriting a log entry */ ++#define HTB_LOG_RETRY_COUNT 5 ++ ++/*************************************************************************/ /*! ++ Host Trace Buffer control information structure ++*/ /**************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32BufferSize; /*!< Requested buffer size in bytes ++ Once set this may not be changed */ ++ ++ HTB_OPMODE_CTRL eOpMode; /*!< Control what trace data is dropped if ++ the buffer is full. ++ Once set this may not be changed */ ++ ++/* IMG_UINT32 ui32GroupEnable; */ /*!< Flags word controlling groups to be ++ logged */ ++ ++ IMG_UINT32 ui32LogLevel; /*!< Log level to control messages logged */ ++ ++ IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for ++ a specific set of processes */ ++ ++ IMG_UINT32 ui32PIDCount; /*!< Current number of PIDs being logged */ ++ ++ IMG_UINT32 ui32PIDHead; /*!< Head of the PID circular buffer */ ++ ++ HTB_LOGMODE_CTRL eLogMode; /*!< Logging mode control */ ++ ++ IMG_BOOL bLogDropSignalled; /*!< Flag indicating if a log message has ++ been signalled as dropped */ ++ ++ /* synchronisation parameters */ ++ IMG_UINT64 ui64SyncOSTS; ++ IMG_UINT64 ui64SyncCRTS; ++ IMG_UINT32 ui32SyncCalcClkSpd; ++ IMG_UINT32 ui32SyncMarker; ++ ++ IMG_BOOL bInitDone; /* Set by HTBInit, reset by HTBDeInit */ ++ ++ POS_SPINLOCK hRepeatMarkerLock; /*!< Spinlock used in HTBLogKM to protect global variables ++ (ByteCount, OSTS, CRTS ClkSpeed) ++ from becoming inconsistent due to calls from ++ both KM and UM */ ++ ++ IMG_UINT32 ui32ByteCount; /* Byte count used for triggering repeat sync point */ ++ /* static variables containing details of previous sync point */ ++ IMG_UINT64 ui64OSTS; ++ IMG_UINT64 ui64CRTS; ++ IMG_UINT32 ui32ClkSpeed; ++ ++} HTB_CTRL_INFO; ++ ++ ++/*************************************************************************/ /*! ++*/ /**************************************************************************/ ++static const IMG_UINT32 MapFlags[] = ++{ ++ 0, /* HTB_OPMODE_UNDEF = 0 */ ++ TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */ ++ TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */ ++ TL_OPMODE_BLOCK /* HTB_OPMODE_BLOCK */ ++}; ++ ++static_assert(0 == HTB_OPMODE_UNDEF, "Unexpected value for HTB_OPMODE_UNDEF"); ++static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST"); ++static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST"); ++static_assert(3 == HTB_OPMODE_BLOCK, "Unexpected value for HTB_OPMODE_BLOCK"); ++ ++static_assert(1 == TL_OPMODE_DROP_NEWER, "Unexpected value for TL_OPMODE_DROP_NEWER"); ++static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST"); ++static_assert(3 == TL_OPMODE_BLOCK, "Unexpected value for TL_OPMODE_BLOCK"); ++ ++static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT ++ ++/* Minimum TL buffer size. ++ * Large enough for around 60 worst case messages or 200 average messages ++ */ ++#define HTB_TL_BUFFER_SIZE_MIN (0x10000) ++ ++/* Minimum concentration of HTB packets in a TL Stream is 60% ++ * If we just put the HTB header in the TL stream (12 bytes), the TL overhead ++ * is 8 bytes for its own header, so for the smallest possible (and most ++ * inefficient) packet we have 3/5 of the buffer used for actual HTB data. ++ * This shift is used as a guaranteed estimation on when to produce a repeat ++ * packet. By shifting the size of the buffer by 1 we effectively /2 this ++ * under the 60% boundary chance we may have overwritten the marker and thus ++ * guaranteed to always have a marker in the stream */ ++#define HTB_MARKER_PREDICTION_THRESHOLD(val) (val >> 1) ++ ++static HTB_CTRL_INFO g_sCtrl; ++static IMG_BOOL g_bConfigured = IMG_FALSE; ++static IMG_HANDLE g_hTLStream; ++ ++static IMG_HANDLE hHtbDbgReqNotify; ++ ++ ++/************************************************************************/ /*! ++ @Function _LookupFlags ++ @Description Convert HTBuffer Operation mode to TLStream flags ++ ++ @Input eModeHTBuffer Operation Mode ++ ++ @Return IMG_UINT32 TLStream FLags ++*/ /**************************************************************************/ ++static IMG_UINT32 ++_LookupFlags( HTB_OPMODE_CTRL eMode ) ++{ ++ return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0; ++} ++ ++ ++/************************************************************************/ /*! ++ @Function _HTBLogDebugInfo ++ @Description Debug dump handler used to dump the state of the HTB module. ++ Called for each verbosity level during a debug dump. Function ++ only prints state when called for High verbosity. ++ ++ @Input hDebugRequestHandle See PFN_DBGREQ_NOTIFY ++ ++ @Input ui32VerbLevel See PFN_DBGREQ_NOTIFY ++ ++ @Input pfnDumpDebugPrintf See PFN_DBGREQ_NOTIFY ++ ++ @Input pvDumpDebugFile See PFN_DBGREQ_NOTIFY ++ ++*/ /**************************************************************************/ ++static void _HTBLogDebugInfo( ++ PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile ++) ++{ ++ PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle); ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) ++ { ++ ++ if (g_bConfigured) ++ { ++ IMG_INT i; ++ ++ PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------"); ++ ++ PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode); ++ PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel); ++ PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode); ++ ++ for (i=0; i < HTB_FLAG_NUM_EL; i++) ++ { ++ PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]); ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------"); ++ } ++ } ++} ++ ++static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN; ++ ++/* ++ * AppHint access routine forward definitions ++ */ ++static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *, ++ IMG_UINT32); ++static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *, ++ IMG_UINT32 *); ++ ++static PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *, ++ IMG_UINT32); ++static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *, ++ IMG_UINT32 *); ++ ++static void _OnTLReaderOpenCallback(void *); ++ ++/************************************************************************/ /*! ++ @Function HTBInit ++ @Description Allocate and initialise the Host Trace Buffer ++ The buffer size may be changed by specifying ++ HTBufferSizeInKB=xxxx ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBInit(void) ++{ ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32AppHintDefault; ++ IMG_UINT32 ui32BufBytes; ++ PVRSRV_ERROR eError; ++ ++ if (g_sCtrl.bInitDone) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised")); ++ return PVRSRV_ERROR_ALREADY_EXISTS; ++ } ++ ++ /* ++ * Buffer Size can be configured by specifying a value in the AppHint ++ * This will only take effect at module load time so there is no query ++ * or setting mechanism available. ++ */ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB, ++ NULL, ++ NULL, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ NULL); ++ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup, ++ _HTBReadLogGroup, ++ _HTBSetLogGroup, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ NULL); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode, ++ _HTBReadOpMode, ++ _HTBSetOpMode, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ NULL); ++ ++ /* ++ * Now get whatever values have been configured for our AppHints ++ */ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HTBufferSizeInKB, ++ &ui32AppHintDefault, &g_ui32HTBufferSize); ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ ui32BufBytes = g_ui32HTBufferSize * 1024; ++ ++ /* initialise rest of state */ ++ g_sCtrl.ui32BufferSize = ++ (ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN) ++ ? HTB_TL_BUFFER_SIZE_MIN ++ : ui32BufBytes; ++ g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST; ++ g_sCtrl.ui32LogLevel = 0; ++ g_sCtrl.ui32PIDCount = 0; ++ g_sCtrl.ui32PIDHead = 0; ++ g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID; ++ g_sCtrl.bLogDropSignalled = IMG_FALSE; ++ ++ eError = OSSpinLockCreate(&g_sCtrl.hRepeatMarkerLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSSpinLockCreate"); ++ ++ eError = PVRSRVRegisterDriverDbgRequestNotify(&hHtbDbgReqNotify, ++ _HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify"); ++ ++ g_sCtrl.bInitDone = IMG_TRUE; ++ ++ /* Log the current driver parameter setting for the HTBufferSizeInKB. ++ * We do this here as there is no other infrastructure for obtaining ++ * the value. ++ */ ++ if (g_ui32HTBufferSize != ui32AppHintDefault) ++ { ++ PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize)); ++ } ++ ++ ++ return PVRSRV_OK; ++} ++ ++/************************************************************************/ /*! ++ @Function HTBDeInit ++ @Description Close the Host Trace Buffer and free all resources. Must ++ perform a no-op if already de-initialised. ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBDeInit( void ) ++{ ++ if (!g_sCtrl.bInitDone) ++ return PVRSRV_OK; ++ ++ if (hHtbDbgReqNotify) ++ { ++ /* Not much we can do if it fails, driver unloading */ ++ (void)PVRSRVUnregisterDriverDbgRequestNotify(hHtbDbgReqNotify); ++ hHtbDbgReqNotify = NULL; ++ } ++ ++ if (g_hTLStream) ++ { ++ TLStreamClose( g_hTLStream ); ++ g_hTLStream = NULL; ++ } ++ ++ if (g_sCtrl.hRepeatMarkerLock != NULL) ++ { ++ OSSpinLockDestroy(g_sCtrl.hRepeatMarkerLock); ++ g_sCtrl.hRepeatMarkerLock = NULL; ++ } ++ ++ g_sCtrl.bInitDone = IMG_FALSE; ++ return PVRSRV_OK; ++} ++ ++ ++/*************************************************************************/ /*! ++ AppHint interface functions ++*/ /**************************************************************************/ ++static ++PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ return HTBControlKM(1, &ui32Value, 0, 0, ++ HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF); ++} ++ ++static ++PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ *pui32Value = g_auiHTBGroupEnable[0]; ++ return PVRSRV_OK; ++} ++ ++static ++PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value); ++} ++ ++static ++PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode; ++ return PVRSRV_OK; ++} ++ ++ ++static void ++_OnTLReaderOpenCallback( void *pvArg ) ++{ ++ if ( g_hTLStream ) ++ { ++ IMG_UINT64 ui64Time; ++ OSClockMonotonicns64(&ui64Time); ++ (void) HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ++ g_sCtrl.ui32SyncMarker, ++ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ++ ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), ++ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ++ ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), ++ g_sCtrl.ui32SyncCalcClkSpd); ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(pvArg); ++} ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBControlKM ++ @Description Update the configuration of the Host Trace Buffer ++ ++ @Input ui32NumFlagGroups Number of group enable flags words ++ ++ @Input aui32GroupEnable Flags words controlling groups to be logged ++ ++ @Input ui32LogLevel Log level to record ++ ++ @Input ui32EnablePID PID to enable logging for a specific process ++ ++ @Input eLogMode Enable logging for all or specific processes, ++ ++ @Input eOpMode Control the behaviour of the data buffer ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBControlKM( ++ const IMG_UINT32 ui32NumFlagGroups, ++ const IMG_UINT32 * aui32GroupEnable, ++ const IMG_UINT32 ui32LogLevel, ++ const IMG_UINT32 ui32EnablePID, ++ const HTB_LOGMODE_CTRL eLogMode, ++ const HTB_OPMODE_CTRL eOpMode ++) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; ++ IMG_UINT32 i; ++ IMG_UINT64 ui64Time; ++ OSClockMonotonicns64(&ui64Time); ++ ++ if ( !g_bConfigured && ui32NumFlagGroups ) ++ { ++ eError = TLStreamCreate( ++ &g_hTLStream, ++ HTB_STREAM_NAME, ++ g_sCtrl.ui32BufferSize, ++ _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags, ++ _OnTLReaderOpenCallback, NULL, NULL, NULL); ++ PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); ++ g_bConfigured = IMG_TRUE; ++ } ++ ++ if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode) ++ { ++ g_sCtrl.eOpMode = eOpMode; ++ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); ++ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) ++ { ++ OSReleaseThreadQuanta(); ++ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags)); ++ } ++ PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamReconfigure"); ++ } ++ ++ if ( ui32EnablePID ) ++ { ++ g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID; ++ g_sCtrl.ui32PIDHead++; ++ g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID; ++ g_sCtrl.ui32PIDCount++; ++ if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID ) ++ { ++ g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID; ++ } ++ } ++ ++ /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */ ++ if ( HTB_LOGMODE_ALLPID == eLogMode ) ++ { ++ OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID)); ++ g_sCtrl.ui32PIDCount = 0; ++ g_sCtrl.ui32PIDHead = 0; ++ } ++ if ( HTB_LOGMODE_UNDEF != eLogMode ) ++ { ++ g_sCtrl.eLogMode = eLogMode; ++ } ++ ++ if ( ui32NumFlagGroups ) ++ { ++ for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++) ++ { ++ g_auiHTBGroupEnable[i] = aui32GroupEnable[i]; ++ } ++ for (; i < HTB_FLAG_NUM_EL; i++) ++ { ++ g_auiHTBGroupEnable[i] = 0; ++ } ++ } ++ ++ if ( ui32LogLevel ) ++ { ++ g_sCtrl.ui32LogLevel = ui32LogLevel; ++ } ++ ++ /* Dump the current configuration state */ ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode); ++ PVR_LOG_IF_ERROR(eError, "HTBLog"); ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]); ++ PVR_LOG_IF_ERROR(eError, "HTBLog"); ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel); ++ PVR_LOG_IF_ERROR(eError, "HTBLog"); ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode); ++ PVR_LOG_IF_ERROR(eError, "HTBLog"); ++ for (i = 0; i < g_sCtrl.ui32PIDCount; i++) ++ { ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]); ++ PVR_LOG_IF_ERROR(eError, "HTBLog"); ++ } ++ /* Else should never be hit as we set the spd when the power state is updated */ ++ if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd) ++ { ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ++ g_sCtrl.ui32SyncMarker, ++ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), ++ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), ++ g_sCtrl.ui32SyncCalcClkSpd); ++ PVR_LOG_IF_ERROR(eError, "HTBLog"); ++ } ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++*/ /**************************************************************************/ ++static IMG_BOOL ++_ValidPID( IMG_UINT32 PID ) ++{ ++ IMG_UINT32 i; ++ ++ for (i = 0; i < g_sCtrl.ui32PIDCount; i++) ++ { ++ if ( g_sCtrl.aui32EnablePID[i] == PID ) ++ { ++ return IMG_TRUE; ++ } ++ } ++ return IMG_FALSE; ++} ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBSyncPartitionMarker ++ @Description Write an HTB sync partition marker to the HTB log ++ ++ @Input ui33Marker Marker value ++ ++*/ /**************************************************************************/ ++void ++HTBSyncPartitionMarker( ++ const IMG_UINT32 ui32Marker ++) ++{ ++ g_sCtrl.ui32SyncMarker = ui32Marker; ++ if ( g_hTLStream ) ++ { ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64Time; ++ OSClockMonotonicns64(&ui64Time); ++ ++ /* Else should never be hit as we set the spd when the power state is updated */ ++ if (0 != g_sCtrl.ui32SyncCalcClkSpd) ++ { ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ++ ui32Marker, ++ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)), ++ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)), ++ g_sCtrl.ui32SyncCalcClkSpd); ++ PVR_WARN_IF_ERROR(eError, "HTBLog"); ++ } ++ } ++} ++ ++/*************************************************************************/ /*! ++ @Function HTBSyncPartitionMarkerRepeat ++ @Description Write a HTB sync partition marker to the HTB log, given ++ the previous values to repeat. ++ ++ @Input ui33Marker Marker value ++ @Input ui64SyncOSTS previous OSTS ++ @Input ui64SyncCRTS previous CRTS ++ @Input ui32ClkSpeed previous Clock speed ++ ++*/ /**************************************************************************/ ++void ++HTBSyncPartitionMarkerRepeat( ++ const IMG_UINT32 ui32Marker, ++ const IMG_UINT64 ui64SyncOSTS, ++ const IMG_UINT64 ui64SyncCRTS, ++ const IMG_UINT32 ui32ClkSpeed ++) ++{ ++ if ( g_hTLStream ) ++ { ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64Time; ++ OSClockMonotonicns64(&ui64Time); ++ ++ /* Else should never be hit as we set the spd when the power state is updated */ ++ if (0 != ui32ClkSpeed) ++ { ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ++ ui32Marker, ++ ((IMG_UINT32)((ui64SyncOSTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncOSTS&0xffffffffU)), ++ ((IMG_UINT32)((ui64SyncCRTS>>32)&0xffffffffU)), ((IMG_UINT32)(ui64SyncCRTS&0xffffffffU)), ++ ui32ClkSpeed); ++ PVR_WARN_IF_ERROR(eError, "HTBLog"); ++ } ++ } ++} ++ ++/*************************************************************************/ /*! ++ @Function HTBSyncScale ++ @Description Write FW-Host synchronisation data to the HTB log when clocks ++ change or are re-calibrated ++ ++ @Input bLogValues IMG_TRUE if value should be immediately written ++ out to the log ++ ++ @Input ui32OSTS OS Timestamp ++ ++ @Input ui32CRTS Rogue timestamp ++ ++ @Input ui32CalcClkSpd Calculated clock speed ++ ++*/ /**************************************************************************/ ++void ++HTBSyncScale( ++ const IMG_BOOL bLogValues, ++ const IMG_UINT64 ui64OSTS, ++ const IMG_UINT64 ui64CRTS, ++ const IMG_UINT32 ui32CalcClkSpd ++) ++{ ++ g_sCtrl.ui64SyncOSTS = ui64OSTS; ++ g_sCtrl.ui64SyncCRTS = ui64CRTS; ++ g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd; ++ if (g_hTLStream && bLogValues) ++ { ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64Time; ++ OSClockMonotonicns64(&ui64Time); ++ eError = HTBLog((IMG_HANDLE) NULL, 0, 0, ui64Time, HTB_SF_CTRL_FWSYNC_MARK_SCALE, ++ g_sCtrl.ui32SyncMarker, ++ ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)), ++ ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)), ++ ui32CalcClkSpd); ++ /* ++ * Don't spam the log with non-failure cases ++ */ ++ PVR_WARN_IF_ERROR(eError, "HTBLog"); ++ } ++} ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBLogKM ++ @Description Record a Host Trace Buffer log event ++ ++ @Input PID The PID of the process the event is associated ++ with. This is provided as an argument rather ++ than querying internally so that events associated ++ with a particular process, but performed by ++ another can be logged correctly. ++ ++ @Input ui64TimeStamp The timestamp to be associated with this log event ++ ++ @Input SF The log event ID ++ ++ @Input ... Log parameters ++ ++ @Return PVRSRV_OK Success. ++ ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBLogKM( ++ IMG_UINT32 PID, ++ IMG_UINT32 TID, ++ IMG_UINT64 ui64TimeStamp, ++ HTB_LOG_SFids SF, ++ IMG_UINT32 ui32NumArgs, ++ IMG_UINT32 * aui32Args ++) ++{ ++ OS_SPINLOCK_FLAGS uiSpinLockFlags; ++ IMG_UINT32 ui32ReturnFlags = 0; ++ ++ /* Local snapshot variables of global counters */ ++ IMG_UINT64 ui64OSTSSnap; ++ IMG_UINT64 ui64CRTSSnap; ++ IMG_UINT32 ui32ClkSpeedSnap; ++ ++ /* format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* ++ * Buffer is on the stack so we don't need a semaphore to guard it ++ */ ++ IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS]; ++ ++ /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/ ++ * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 80 bytes, ++ * hence with these constraints this design is unlikely to get ++ * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error ++ */ ++ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED; ++ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT; ++ IMG_UINT32 * pui32Message = aui32MessageBuffer; ++ IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs); ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(aui32Args != NULL, eError, ReturnError); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs == HTB_SF_PARAMNUM(SF), eError, ReturnError); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(ui32NumArgs <= HTB_LOG_MAX_PARAMS, eError, ReturnError); ++ ++ if ( g_hTLStream ++ && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) ) ++/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */ ++/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */ ++ ) ++ { ++ *pui32Message++ = SF; ++ *pui32Message++ = PID; ++ *pui32Message++ = TID; ++ *pui32Message++ = ((IMG_UINT32)((ui64TimeStamp>>32)&0xffffffff)); ++ *pui32Message++ = ((IMG_UINT32)(ui64TimeStamp&0xffffffff)); ++ while ( ui32NumArgs ) ++ { ++ ui32NumArgs--; ++ pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs]; ++ } ++ ++ eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); ++ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- ) ++ { ++ OSReleaseThreadQuanta(); ++ eError = TLStreamWriteRetFlags( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize, &ui32ReturnFlags ); ++ } ++ ++ if ( PVRSRV_OK == eError ) ++ { ++ g_sCtrl.bLogDropSignalled = IMG_FALSE; ++ } ++ else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled ) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__)); ++ } ++ if ( PVRSRV_ERROR_STREAM_FULL == eError ) ++ { ++ g_sCtrl.bLogDropSignalled = IMG_TRUE; ++ } ++ ++ } ++ ++ if (SF == HTB_SF_CTRL_FWSYNC_MARK_SCALE) ++ { ++ OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); ++ ++ /* If a marker is being placed reset byte count from last marker */ ++ g_sCtrl.ui32ByteCount = 0; ++ g_sCtrl.ui64OSTS = (IMG_UINT64)aui32Args[HTB_ARG_OSTS_PT1] << 32 | aui32Args[HTB_ARG_OSTS_PT2]; ++ g_sCtrl.ui64CRTS = (IMG_UINT64)aui32Args[HTB_ARG_CRTS_PT1] << 32 | aui32Args[HTB_ARG_CRTS_PT2]; ++ g_sCtrl.ui32ClkSpeed = aui32Args[HTB_ARG_CLKSPD]; ++ ++ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); ++ } ++ else ++ { ++ OSSpinLockAcquire(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); ++ /* Increase global count */ ++ g_sCtrl.ui32ByteCount += ui32MessageSize; ++ ++ /* Check if packet has overwritten last marker/rpt && ++ If the packet count is over half the size of the buffer */ ++ if (ui32ReturnFlags & TL_FLAG_OVERWRITE_DETECTED && ++ g_sCtrl.ui32ByteCount > HTB_MARKER_PREDICTION_THRESHOLD(g_sCtrl.ui32BufferSize)) ++ { ++ /* Take snapshot of global variables */ ++ ui64OSTSSnap = g_sCtrl.ui64OSTS; ++ ui64CRTSSnap = g_sCtrl.ui64CRTS; ++ ui32ClkSpeedSnap = g_sCtrl.ui32ClkSpeed; ++ /* Reset global variable counter */ ++ g_sCtrl.ui32ByteCount = 0; ++ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); ++ ++ /* Produce a repeat marker */ ++ HTBSyncPartitionMarkerRepeat(g_sCtrl.ui32SyncMarker, ui64OSTSSnap, ui64CRTSSnap, ui32ClkSpeedSnap); ++ } ++ else ++ { ++ OSSpinLockRelease(g_sCtrl.hRepeatMarkerLock, uiSpinLockFlags); ++ } ++ } ++ ++ReturnError: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++ @Function HTBIsConfigured ++ @Description Determine if HTB stream has been configured ++ ++ @Input none ++ ++ @Return IMG_FALSE Stream has not been configured ++ IMG_TRUE Stream has been configured ++ ++*/ /**************************************************************************/ ++IMG_BOOL ++HTBIsConfigured(void) ++{ ++ return g_bConfigured; ++} ++/* EOF */ +diff --git a/drivers/gpu/drm/img-rogue/htbserver.h b/drivers/gpu/drm/img-rogue/htbserver.h +new file mode 100644 +index 000000000000..c30556c3501a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htbserver.h +@@ -0,0 +1,228 @@ ++/*************************************************************************/ /*! ++@File htbserver.h ++@Title Host Trace Buffer server implementation. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ ++@Description Host Trace Buffer provides a mechanism to log Host events to a ++ buffer in a similar way to the Firmware Trace mechanism. ++ Host Trace Buffer logs data using a Transport Layer buffer. ++ The Transport Layer and pvrtld tool provides the mechanism to ++ retrieve the trace data. ++ ++ A Host Trace can be merged with a corresponding Firmware Trace. ++ This is achieved by inserting synchronisation data into both ++ traces and post processing to merge them. ++ ++ The FW Trace will contain a "Sync Partition Marker". This is ++ updated every time the RGX is brought out of reset (RGX clock ++ timestamps reset at this point) and is repeated when the FW ++ Trace buffer wraps to ensure there is always at least 1 ++ partition marker in the Firmware Trace buffer whenever it is ++ read. ++ ++ The Host Trace will contain corresponding "Sync Partition ++ Markers" - #HTBSyncPartitionMarker(). Each partition is then ++ subdivided into "Sync Scale" sections - #HTBSyncScale(). The ++ "Sync Scale" data allows the timestamps from the two traces to ++ be correlated. The "Sync Scale" data is updated as part of the ++ standard RGX time correlation code (rgxtimecorr.c) and is ++ updated periodically including on power and clock changes. ++ ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef HTBSERVER_H ++#define HTBSERVER_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv.h" ++#include "htbuffer.h" ++ ++/************************************************************************/ /*! ++ @Function HTBInit ++ @Description Initialise the Host Trace Buffer and allocate all resources ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBInit(void); ++ ++/************************************************************************/ /*! ++ @Function HTBDeInit ++ @Description Close the Host Trace Buffer and free all resources ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBDeInit(void); ++ ++/*************************************************************************/ /*! ++ @Function HTBConfigureKM ++ @Description Configure or update the configuration of the Host Trace Buffer ++ ++ @Input ui32NameSize Size of the pszName string ++ ++ @Input pszName Name to use for the underlying data buffer ++ ++ @Input ui32BufferSize Size of the underlying data buffer ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBConfigureKM(IMG_UINT32 ui32NameSize, const IMG_CHAR * pszName, ++ const IMG_UINT32 ui32BufferSize); ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBControlKM ++ @Description Update the configuration of the Host Trace Buffer ++ ++ @Input ui32NumFlagGroups Number of group enable flags words ++ ++ @Input aui32GroupEnable Flags words controlling groups to be logged ++ ++ @Input ui32LogLevel Log level to record ++ ++ @Input ui32EnablePID PID to enable logging for a specific process ++ ++ @Input eLogMode Enable logging for all or specific processes, ++ ++ @Input eOpMode Control the behaviour of the data buffer ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBControlKM(const IMG_UINT32 ui32NumFlagGroups, ++ const IMG_UINT32 *aui32GroupEnable, ++ const IMG_UINT32 ui32LogLevel, ++ const IMG_UINT32 ui32EnablePID, ++ const HTB_LOGMODE_CTRL eLogMode, ++ const HTB_OPMODE_CTRL eOpMode); ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBSyncPartitionMarker ++ @Description Write an HTB sync partition marker to the HTB log ++ ++ @Input ui32Marker Marker value ++ ++*/ /**************************************************************************/ ++void ++HTBSyncPartitionMarker(const IMG_UINT32 ui32Marker); ++ ++/*************************************************************************/ /*! ++ @Function HTBSyncPartitionMarkerRpt ++ @Description Write a HTB sync partition marker to the HTB log, given ++ the previous values to repeat. ++ ++ @Input ui32Marker Marker value ++ @Input ui64SyncOSTS previous OSTS ++ @Input ui64SyncCRTS previous CRTS ++ @Input ui32ClkSpeed previous Clockspeed ++ ++*/ /**************************************************************************/ ++void ++HTBSyncPartitionMarkerRepeat(const IMG_UINT32 ui32Marker, ++ const IMG_UINT64 ui64SyncOSTS, ++ const IMG_UINT64 ui64SyncCRTS, ++ const IMG_UINT32 ui32ClkSpeed); ++ ++/*************************************************************************/ /*! ++ @Function HTBSyncScale ++ @Description Write FW-Host synchronisation data to the HTB log when clocks ++ change or are re-calibrated ++ ++ @Input bLogValues IMG_TRUE if value should be immediately written ++ out to the log ++ ++ @Input ui64OSTS OS Timestamp ++ ++ @Input ui64CRTS Rogue timestamp ++ ++ @Input ui32CalcClkSpd Calculated clock speed ++ ++*/ /**************************************************************************/ ++void ++HTBSyncScale(const IMG_BOOL bLogValues, const IMG_UINT64 ui64OSTS, ++ const IMG_UINT64 ui64CRTS, const IMG_UINT32 ui32CalcClkSpd); ++ ++/*************************************************************************/ /*! ++ @Function HTBLogKM ++ @Description Record a Host Trace Buffer log event ++ ++ @Input PID The PID of the process the event is associated ++ with. This is provided as an argument rather ++ than querying internally so that events associated ++ with a particular process, but performed by ++ another can be logged correctly. ++ ++ @Input TID The TID of the process the event is associated with. ++ ++ @Input ui64TimeStamp The timestamp to be associated with this log event ++ ++ @Input SF The log event ID ++ ++ @Input ... Log parameters ++ ++ @Return PVRSRV_OK Success. ++ ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++HTBLogKM(IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStamp, HTB_LOG_SFids SF, ++ IMG_UINT32 ui32NumArgs, IMG_UINT32 *aui32Args); ++ ++/*************************************************************************/ /*! ++ @Function HTBIsConfigured ++ @Description Determine if HTB stream has been configured ++ ++ @Input none ++ ++ @Return IMG_FALSE Stream has not been configured ++ IMG_TRUE Stream has been configured ++ ++*/ /**************************************************************************/ ++IMG_BOOL ++HTBIsConfigured(void); ++#endif /* HTBSERVER_H */ ++ ++/* EOF */ +diff --git a/drivers/gpu/drm/img-rogue/htbuffer.c b/drivers/gpu/drm/img-rogue/htbuffer.c +new file mode 100644 +index 000000000000..c326ae261b7e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htbuffer.c +@@ -0,0 +1,197 @@ ++/*************************************************************************/ /*! ++@File htbuffer.c ++@Title Host Trace Buffer shared API. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Host Trace Buffer provides a mechanism to log Host events to a ++ buffer in a similar way to the Firmware Trace mechanism. ++ Host Trace Buffer logs data using a Transport Layer buffer. ++ The Transport Layer and pvrtld tool provides the mechanism to ++ retrieve the trace data. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if defined(__linux__) ++ #include ++ ++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++ #include ++ #else ++ #include ++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ ++#else ++ #include ++#endif /* __linux__ */ ++ ++#include "htbuffer.h" ++#include "osfunc.h" ++#include "client_htbuffer_bridge.h" ++ ++/* The group flags array of ints large enough to store all the group flags ++ * NB: This will only work while all logging is in the kernel ++ */ ++IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0}; ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBControl ++ @Description Update the configuration of the Host Trace Buffer ++ @Input hSrvHandle Server Handle ++ @Input ui32NumFlagGroups Number of group enable flags words ++ @Input aui32GroupEnable Flags words controlling groups to be logged ++ @Input ui32LogLevel Log level to record ++ @Input ui32EnablePID PID to enable logging for a specific process ++ @Input eLogPidMode Enable logging for all or specific processes, ++ @Input eOpMode Control what trace data is dropped if the TL ++ buffer is full ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++HTBControl( ++ IMG_HANDLE hSrvHandle, ++ IMG_UINT32 ui32NumFlagGroups, ++ IMG_UINT32 * aui32GroupEnable, ++ IMG_UINT32 ui32LogLevel, ++ IMG_UINT32 ui32EnablePID, ++ HTB_LOGMODE_CTRL eLogPidMode, ++ HTB_OPMODE_CTRL eOpMode ++) ++{ ++ return BridgeHTBControl( ++ hSrvHandle, ++ ui32NumFlagGroups, ++ aui32GroupEnable, ++ ui32LogLevel, ++ ui32EnablePID, ++ eLogPidMode, ++ eOpMode ++ ); ++} ++ ++ ++/*************************************************************************/ /*! ++*/ /**************************************************************************/ ++static PVRSRV_ERROR ++_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampus, ++ HTB_LOG_SFids SF, va_list args) ++{ ++#if defined(__KERNEL__) ++ IMG_UINT32 i; ++ IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF); ++#if defined(__KLOCWORK__) ++ IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS + 1]; // Prevent KW False-positive ++#else ++ IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS]; ++#endif ++ ++ PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS); ++ ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS) ? ++ HTB_LOG_MAX_PARAMS : ui32NumArgs; ++ ++ /* unpack var args before sending over bridge */ ++ for (i=0; i>32)&0xffffffff)) ++#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff)) ++ ++/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */ ++#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff)) ++#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff)) ++ ++/*************************************************************************/ /*! ++ @Function HTBLog ++ @Description Record a Host Trace Buffer log event ++ ++ @Input PID The PID of the process the event is associated ++ with. This is provided as an argument rather ++ than querying internally so that events associated ++ with a particular process, but performed by ++ another can be logged correctly. ++ ++ @Input TID The TID (Thread ID) of the thread the event is ++ associated with. ++ ++ @Input TimeStampus The timestamp in us for this event ++ ++ @Input SF The log event ID ++ ++ @Input ... Log parameters ++ ++ @Return PVRSRV_OK Success. ++ ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 TID, IMG_UINT64 ui64TimeStampns, IMG_UINT32 SF, ...); ++ ++ ++/*************************************************************************/ /*! ++ @Function HTBLogSimple ++ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp ++ ++ @Input SF The log event ID ++ ++ @Input ... Log parameters ++ ++ @Return PVRSRV_OK Success. ++ ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...); ++ ++ ++ ++/* DEBUG log group enable */ ++#if !defined(HTB_DEBUG_LOG_GROUP) ++#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */ ++#define HTB_LOG_TYPE_DBG __BUILDERROR__ ++#endif ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* HTBUFFER_H */ ++/***************************************************************************** ++ End of file (htbuffer.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/htbuffer_init.h b/drivers/gpu/drm/img-rogue/htbuffer_init.h +new file mode 100644 +index 000000000000..d114579964b4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htbuffer_init.h +@@ -0,0 +1,114 @@ ++/*************************************************************************/ /*! ++@File htbuffer_init.h ++@Title Host Trace Buffer functions needed for Services initialisation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef HTBUFFER_INIT_H ++#define HTBUFFER_INIT_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++/*************************************************************************/ /*! ++ @Function HTBConfigure ++ @Description Configure the Host Trace Buffer. ++ Once these parameters are set they may not be changed ++ ++ @Input hSrvHandle Server Handle ++ ++ @Input pszBufferName Name to use for the TL buffer, this will be ++ required to request trace data from the TL ++ ++ @Input ui32BufferSize Requested TL buffer size in bytes ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++HTBConfigure( ++ IMG_HANDLE hSrvHandle, ++ IMG_CHAR * pszBufferName, ++ IMG_UINT32 ui32BufferSize ++); ++ ++/*************************************************************************/ /*! ++ @Function HTBControl ++ @Description Update the configuration of the Host Trace Buffer ++ ++ @Input hSrvHandle Server Handle ++ ++ @Input ui32NumFlagGroups Number of group enable flags words ++ ++ @Input aui32GroupEnable Flags words controlling groups to be logged ++ ++ @Input ui32LogLevel Log level to record ++ ++ @Input ui32EnablePID PID to enable logging for a specific process ++ ++ @Input eLogMode Enable logging for all or specific processes, ++ ++ @Input eOpMode Control what trace data is dropped if the TL ++ buffer is full ++ ++ @Return eError Internal services call returned eError error ++ number ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++HTBControl( ++ IMG_HANDLE hSrvHandle, ++ IMG_UINT32 ui32NumFlagGroups, ++ IMG_UINT32 * aui32GroupEnable, ++ IMG_UINT32 ui32LogLevel, ++ IMG_UINT32 ui32EnablePID, ++ HTB_LOGMODE_CTRL eLogMode, ++ HTB_OPMODE_CTRL eOpMode ++); ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* HTBUFFER_INIT_H */ ++/***************************************************************************** ++ End of file (htbuffer_init.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/htbuffer_sf.h b/drivers/gpu/drm/img-rogue/htbuffer_sf.h +new file mode 100644 +index 000000000000..9042de20cd8f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htbuffer_sf.h +@@ -0,0 +1,241 @@ ++/*************************************************************************/ /*! ++@File htbuffer_sf.h ++@Title Host Trace Buffer interface string format specifiers ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the Host Trace Buffer logging messages. The following ++ list are the messages the host driver prints. Changing anything ++ but the first column or spelling mistakes in the strings will ++ break compatibility with log files created with older/newer ++ driver versions. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef HTBUFFER_SF_H ++#define HTBUFFER_SF_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++ ++/****************************************************************************** ++ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you ++ * WILL BREAK host tracing message compatibility with previous ++ * driver versions. Only add new ones, if so required. ++ *****************************************************************************/ ++ ++ ++/* String used in pvrdebug -h output */ ++#define HTB_LOG_GROUPS_STRING_LIST "ctrl,mmu,sync,main,brg" ++ ++/* Used in print statements to display log group state, one %s per group defined */ ++#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s" ++ ++/* Available log groups - Master template ++ * ++ * Group usage is as follows: ++ * CTRL - Internal Host Trace information and synchronisation data ++ * MMU - MMU page mapping information ++ * SYNC - Synchronisation debug ++ * MAIN - Data master kicks, etc. tying in with the MAIN group in FWTrace ++ * DBG - Temporary debugging group, logs not to be left in the driver ++ * ++ */ ++#define HTB_LOG_SFGROUPLIST \ ++ X( HTB_GROUP_NONE, NONE ) \ ++/* gid, group flag / apphint name */ \ ++ X( HTB_GROUP_CTRL, CTRL ) \ ++ X( HTB_GROUP_MMU, MMU ) \ ++ X( HTB_GROUP_SYNC, SYNC ) \ ++ X( HTB_GROUP_MAIN, MAIN ) \ ++ X( HTB_GROUP_BRG, BRG ) \ ++/* Debug group HTB_GROUP_DBG must always be last */ \ ++ X( HTB_GROUP_DBG, DBG ) ++ ++ ++/* Table of String Format specifiers, the group they belong and the number of ++ * arguments each expects. Xmacro styled macros are used to generate what is ++ * needed without requiring hand editing. ++ * ++ * id : unique id within a group ++ * gid : group id as defined above ++ * sym name : symbolic name of enumerations used to identify message strings ++ * string : Actual string ++ * #args : number of arguments the string format requires ++ */ ++#define HTB_LOG_SFIDLIST \ ++/*id, gid, sym name, string, # arguments */ \ ++X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \ ++\ ++X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \ ++X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \ ++X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \ ++X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \ ++X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \ ++X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ ++X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \ ++X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \ ++X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \ ++X( 10, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_SCALE, "Text not used", 6)\ ++\ ++X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \ ++X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \ ++X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \ ++X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ ++X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \ ++X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \ ++\ ++X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \ ++X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \ ++X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \ ++X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \ ++X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \ ++X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \ ++\ ++X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx %08X @ %d\n", 2) \ ++X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx %08X @ %d\n", 2) \ ++X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM_DEPRECATED,"Kick CDM: FWCtx %08X @ %d\n", 2) \ ++X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \ ++X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \ ++X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D_DEPRECATED, "Kick 2D: FWCtx %08X @ %d\n", 2) \ ++X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \ ++X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \ ++X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \ ++X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \ ++X(11, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ ++X(12, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ ++X(13, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ ++X(14, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08x @ %d (frame:%d, ext:0x%08x, int:0x%08x)\n", 5) \ ++\ ++X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \ ++X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \ ++\ ++X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \ ++\ ++X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15) ++ ++ ++ ++/* gid - Group numbers */ ++typedef enum _HTB_LOG_SFGROUPS { ++#define X(A,B) A, ++ HTB_LOG_SFGROUPLIST ++#undef X ++} HTB_LOG_SFGROUPS; ++ ++ ++/* Group flags are stored in an array of elements. ++ * Each of which have a certain number of bits. ++ */ ++#define HTB_FLAG_EL_T IMG_UINT32 ++#define HTB_FLAG_NUM_BITS_IN_EL (sizeof(HTB_FLAG_EL_T) * 8) ++ ++#define HTB_LOG_GROUP_FLAG_GROUP(gid) ((gid-1) / HTB_FLAG_NUM_BITS_IN_EL) ++#define HTB_LOG_GROUP_FLAG(gid) (gid ? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)) : 0) ++#define HTB_LOG_GROUP_FLAG_NAME(gid) HTB_LOG_TYPE_ ## gid ++ ++/* Group enable flags */ ++typedef enum _HTB_LOG_TYPE { ++#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a), ++ HTB_LOG_SFGROUPLIST ++#undef X ++} HTB_LOG_TYPE; ++ ++ ++ ++/* The symbolic names found in the table above are assigned an ui32 value of ++ * the following format: ++ * 31 30 28 27 20 19 16 15 12 11 0 bits ++ * - --- ---- ---- ---- ---- ---- ---- ---- ++ * 0-11: id number ++ * 12-15: group id number ++ * 16-19: number of parameters ++ * 20-27: unused ++ * 28-30: active: identify SF packet, otherwise regular int32 ++ * 31: reserved for signed/unsigned compatibility ++ * ++ * The following macro assigns those values to the enum generated SF ids list. ++ */ ++#define HTB_LOG_IDMARKER (0x70000000) ++#define HTB_LOG_CREATESFID(a,b,e) (((a) | (b << 12) | (e << 16)) | HTB_LOG_IDMARKER) ++ ++#define HTB_LOG_IDMASK (0xFFF00000) ++#define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER ) ++ ++typedef enum HTB_LOG_SFids { ++#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e), ++ HTB_LOG_SFIDLIST ++#undef X ++} HTB_LOG_SFids; ++ ++/* Return the group id that the given (enum generated) id belongs to */ ++#define HTB_SF_GID(x) (((x)>>12) & 0xf) ++/* Future improvement to support log levels */ ++#define HTB_SF_LVL(x) (0) ++/* Returns how many arguments the SF(string format) for the given ++ * (enum generated) id requires. ++ */ ++#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf) ++/* Returns the id of given enum */ ++#define HTB_SF_ID(x) (x & 0xfff) ++ ++/* Format of messages is: SF:PID:TID:TIMEPT1:TIMEPT2:[PARn]* ++ */ ++#define HTB_LOG_HEADER_SIZE 5 ++#define HTB_LOG_MAX_PARAMS 15 ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++/* Defines for handling MARK_SCALE special case */ ++#define HTB_GID_CTRL 1 ++#define HTB_ID_MARK_SCALE 10 ++#define HTB_MARK_SCALE_ARG_ARRAY_SIZE 6 ++ ++/* Defines for extracting args from array for special case MARK_SCALE */ ++#define HTB_ARG_SYNCMARK 0 ++#define HTB_ARG_OSTS_PT1 1 ++#define HTB_ARG_OSTS_PT2 2 ++#define HTB_ARG_CRTS_PT1 3 ++#define HTB_ARG_CRTS_PT2 4 ++#define HTB_ARG_CLKSPD 5 ++ ++#endif /* HTBUFFER_SF_H */ ++/***************************************************************************** ++ End of file (htbuffer_sf.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/htbuffer_types.h b/drivers/gpu/drm/img-rogue/htbuffer_types.h +new file mode 100644 +index 000000000000..a404bf8b7b10 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/htbuffer_types.h +@@ -0,0 +1,118 @@ ++/*************************************************************************/ /*! ++@File htbuffer_types.h ++@Title Host Trace Buffer types. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Host Trace Buffer provides a mechanism to log Host events to a ++ buffer in a similar way to the Firmware Trace mechanism. ++ Host Trace Buffer logs data using a Transport Layer buffer. ++ The Transport Layer and pvrtld tool provides the mechanism to ++ retrieve the trace data. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef HTBUFFER_TYPES_H ++#define HTBUFFER_TYPES_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++#include "htbuffer_sf.h" ++ ++/* The group flags array of ints large enough to store all the group flags */ ++#define HTB_FLAG_NUM_EL (((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1) ++extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL]; ++ ++#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF))) ++ ++/*************************************************************************/ /*! ++ Host Trace Buffer operation mode ++ Care must be taken if changing this enum to ensure the MapFlags[] array ++ in htbserver.c is kept in-step. ++*/ /**************************************************************************/ ++typedef enum ++{ ++ /*! Undefined operation mode */ ++ HTB_OPMODE_UNDEF = 0, ++ ++ /*! Drop latest, intended for continuous logging to a UM daemon. ++ * If the daemon does not keep up, the most recent log data ++ * will be dropped ++ */ ++ HTB_OPMODE_DROPLATEST, ++ ++ /*! Drop oldest, intended for crash logging. ++ * Data will be continuously written to a circular buffer. ++ * After a crash the buffer will contain events leading up to the crash ++ */ ++ HTB_OPMODE_DROPOLDEST, ++ ++ /*! Block write if buffer is full */ ++ HTB_OPMODE_BLOCK, ++ ++ HTB_OPMODE_LAST = HTB_OPMODE_BLOCK ++} HTB_OPMODE_CTRL; ++ ++ ++/*************************************************************************/ /*! ++ Host Trace Buffer log mode control ++*/ /**************************************************************************/ ++typedef enum ++{ ++ /*! Undefined log mode, used if update is not applied */ ++ HTB_LOGMODE_UNDEF = 0, ++ ++ /*! Log trace messages for all PIDs. */ ++ HTB_LOGMODE_ALLPID, ++ ++ /*! Log trace messages for specific PIDs only. */ ++ HTB_LOGMODE_RESTRICTEDPID, ++ ++ HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID ++} HTB_LOGMODE_CTRL; ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* HTBUFFER_TYPES_H */ ++ ++/****************************************************************************** ++ End of file (htbuffer_types.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/img_3dtypes.h b/drivers/gpu/drm/img-rogue/img_3dtypes.h +new file mode 100644 +index 000000000000..916e3a1eedc4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/img_3dtypes.h +@@ -0,0 +1,248 @@ ++/*************************************************************************/ /*! ++@File ++@Title Global 3D types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines 3D types for use by IMG APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_3DTYPES_H ++#define IMG_3DTYPES_H ++ ++#include ++#include "img_types.h" ++#include "img_defs.h" ++ ++/** ++ * Comparison functions ++ * This comparison function is defined as: ++ * A {CmpFunc} B ++ * A is a reference value, e.g., incoming depth etc. ++ * B is the sample value, e.g., value in depth buffer. ++ */ ++typedef enum _IMG_COMPFUNC_ ++{ ++ IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */ ++ IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */ ++ IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */ ++ IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to ++ operation */ ++ IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation ++ */ ++ IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation ++ */ ++ IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or ++ equal-to operation */ ++ IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */ ++} IMG_COMPFUNC; ++ ++/** ++ * Stencil op functions ++ */ ++typedef enum _IMG_STENCILOP_ ++{ ++ IMG_STENCILOP_KEEP, /**< Keep original value */ ++ IMG_STENCILOP_ZERO, /**< Set stencil to 0 */ ++ IMG_STENCILOP_REPLACE, /**< Replace stencil entry */ ++ IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */ ++ IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */ ++ IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */ ++ IMG_STENCILOP_INCR, /**< Increment stencil entry, ++ wrapping if necessary */ ++ IMG_STENCILOP_DECR, /**< Decrement stencil entry, ++ wrapping if necessary */ ++} IMG_STENCILOP; ++ ++/** ++ * Alpha blending allows colours and textures on one surface ++ * to be blended with transparency onto another surface. ++ * These definitions apply to both source and destination blending ++ * states ++ */ ++typedef enum _IMG_BLEND_ ++{ ++ IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */ ++ IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */ ++ IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */ ++ IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour ++ (i.e. 1-src_col) */ ++ IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */ ++ IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha ++ (i.e. 1-src_alpha) */ ++ IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */ ++ IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination ++ alpha */ ++ IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */ ++ IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination ++ colour */ ++ IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the ++ minimum of (Src alpha, ++ 1 - destination alpha)) */ ++ IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */ ++ IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/ ++ IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from ++ the pixel shader */ ++ IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour ++ outputted from the pixel shader */ ++ IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from ++ the pixel shader */ ++ IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha ++ outputted from the pixel shader */ ++} IMG_BLEND; ++ ++/** ++ * The arithmetic operation to perform when blending ++ */ ++typedef enum _IMG_BLENDOP_ ++{ ++ IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */ ++ IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */ ++ IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */ ++ IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */ ++ IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */ ++} IMG_BLENDOP; ++ ++/** ++ * Logical operation to perform when logic ops are enabled ++ */ ++typedef enum _IMG_LOGICOP_ ++{ ++ IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */ ++ IMG_LOGICOP_SET, /**< Result = -1 */ ++ IMG_LOGICOP_COPY, /**< Result = Source */ ++ IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */ ++ IMG_LOGICOP_NOOP, /**< Result = Destination */ ++ IMG_LOGICOP_INVERT, /**< Result = ~Destination */ ++ IMG_LOGICOP_AND, /**< Result = Source & Destination */ ++ IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */ ++ IMG_LOGICOP_OR, /**< Result = Source | Destination */ ++ IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */ ++ IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */ ++ IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */ ++ IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */ ++ IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */ ++ IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */ ++ IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */ ++} IMG_LOGICOP; ++ ++/** ++ * Type of fog blending supported ++ */ ++typedef enum _IMG_FOGMODE_ ++{ ++ IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are ++ * based on the value output from the vertex phase */ ++ IMG_FOGMODE_LINEAR, /**< Linear interpolation */ ++ IMG_FOGMODE_EXP, /**< Exponential */ ++ IMG_FOGMODE_EXP2, /**< Exponential squaring */ ++} IMG_FOGMODE; ++ ++/** ++ * Types of filtering ++ */ ++typedef enum _IMG_FILTER_ ++{ ++ IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */ ++ IMG_FILTER_POINT, /**< Point filtering */ ++ IMG_FILTER_LINEAR, /**< Bi-linear filtering */ ++ IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */ ++} IMG_FILTER; ++ ++/** ++ * Addressing modes for textures ++ */ ++typedef enum _IMG_ADDRESSMODE_ ++{ ++ IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */ ++ IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */ ++ IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */ ++ IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */ ++ IMG_ADDRESSMODE_CLAMPBORDER, ++ IMG_ADDRESSMODE_OGL_CLAMP, ++ IMG_ADDRESSMODE_OVG_TILEFILL, ++ IMG_ADDRESSMODE_DONTCARE, ++} IMG_ADDRESSMODE; ++ ++/** ++ * Culling based on winding order of triangle. ++ */ ++typedef enum _IMG_CULLMODE_ ++{ ++ IMG_CULLMODE_NONE, /**< Don't cull */ ++ IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */ ++ IMG_CULLMODE_BACKFACING, /**< Back facing triangles */ ++} IMG_CULLMODE; ++ ++/** ++ * Colour for clearing surfaces. ++ * The four elements of the 4 x 32 bit array will map to colour ++ * R,G,B,A components, in order. ++ * For YUV colour space the order is Y,U,V. ++ * For Depth and Stencil formats D maps to R and S maps to G. ++ */ ++typedef union IMG_CLEAR_COLOUR_TAG { ++ IMG_UINT32 aui32[4]; ++ IMG_INT32 ai32[4]; ++ IMG_FLOAT af32[4]; ++} IMG_CLEAR_COLOUR; ++ ++static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits."); ++ ++/*! ************************************************************************//** ++@brief Specifies the MSAA resolve operation. ++*/ /**************************************************************************/ ++typedef enum _IMG_RESOLVE_OP_ ++{ ++ IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */ ++ IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */ ++ IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */ ++ IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */ ++ IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */ ++ IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */ ++ IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */ ++ IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */ ++ IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */ ++ IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */ ++ IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */ ++} IMG_RESOLVE_OP; ++ ++ ++#endif /* IMG_3DTYPES_H */ ++/****************************************************************************** ++ End of file (img_3dtypes.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/img_defs.h b/drivers/gpu/drm/img-rogue/img_defs.h +new file mode 100644 +index 000000000000..a79e8a65d3cf +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/img_defs.h +@@ -0,0 +1,567 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common header containing type definitions for portability ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Contains variable and structure definitions. Any platform ++ specific types should be defined in this file. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_DEFS_H ++#define IMG_DEFS_H ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++#if !(defined(__linux__) && defined(__KERNEL__)) ++#include ++#endif ++ ++#include "img_types.h" ++ ++#if defined(NO_INLINE_FUNCS) ++ #define INLINE ++ #define FORCE_INLINE ++#else ++#if defined(__cplusplus) || defined(INTEGRITY_OS) ++ #if !defined(INLINE) ++ #define INLINE inline ++ #endif ++ #define FORCE_INLINE static inline ++#else ++#if !defined(INLINE) ++ #define INLINE __inline ++#endif ++#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_) ++ #define FORCE_INLINE __forceinline ++#else ++ #define FORCE_INLINE static __inline ++#endif ++#endif ++#endif ++ ++/* True if the GCC version is at least the given version. False for older ++ * versions of GCC, or other compilers. ++ */ ++#if defined(__GNUC__) ++#define GCC_VERSION_AT_LEAST(major, minor) \ ++ (__GNUC__ > (major) || \ ++ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) ++#else ++#define GCC_VERSION_AT_LEAST(major, minor) 0 ++#endif ++ ++/* Use Clang's __has_extension and __has_builtin macros if available. */ ++#if defined(__has_extension) ++#define has_clang_extension(e) __has_extension(e) ++#else ++#define has_clang_extension(e) 0 ++#endif ++ ++#if defined(__has_builtin) ++#define has_clang_builtin(e) __has_builtin(e) ++#else ++#define has_clang_builtin(e) 0 ++#endif ++ ++/* Use this in any file, or use attributes under GCC - see below */ ++#ifndef PVR_UNREFERENCED_PARAMETER ++#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param)) ++#endif ++ ++/* static_assert(condition, "message to print if it fails"); ++ * ++ * Assert something at compile time. If the assertion fails, try to print ++ * the message, otherwise do nothing. static_assert is available if: ++ * ++ * - It's already defined as a macro (e.g. by in C11) ++ * - We're using MSVC which exposes static_assert unconditionally ++ * - We're using a C++ compiler that supports C++11 ++ * - We're using GCC 4.6 and up in C mode (in which case it's available as ++ * _Static_assert) ++ * ++ * In all other cases, fall back to an equivalent that makes an invalid ++ * declaration. ++ */ ++#if !defined(static_assert) && !defined(_MSC_VER) && \ ++ (!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__) ++ /* static_assert isn't already available */ ++ #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \ ++ (defined(__clang__) && has_clang_extension(c_static_assert))) ++ #define static_assert _Static_assert ++ #else ++ #define static_assert(expr, message) \ ++ extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused)) ++ #endif ++#endif ++ ++/* ++ * unreachable("explanation") can be used to indicate to the compiler that ++ * some parts of the code can never be reached, like the default branch ++ * of a switch that covers all real-world possibilities, even though there ++ * are other ints that exist for instance. ++ * ++ * The message will be printed as an assert() when debugging. ++ * ++ * Note: there is no need to add a 'return' or any error handling after ++ * calling unreachable(), as this call will never return. ++ */ ++#if defined(__linux__) && defined(__KERNEL__) ++/* Kernel has its own unreachable(), which is a simple infinite loop */ ++#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) ++ #define unreachable(msg) \ ++ do { \ ++ assert(!(msg)); \ ++ __builtin_unreachable(); \ ++ } while (false) ++#elif defined(_MSC_VER) ++ #define unreachable(msg) \ ++ do { \ ++ assert(!(msg)); \ ++ __assume(0); \ ++ } while (false) ++#else ++ #define unreachable(msg) \ ++ do { \ ++ assert(!(msg)); \ ++ while (1); \ ++ } while (false) ++#endif ++ ++/* ++ * assume(x > 2 && x <= 7) works like an assert(), except it hints to the ++ * compiler what it can assume to optimise the code, like a limited range ++ * of parameter values. ++ */ ++#if has_clang_builtin(__builtin_assume) ++ #define assume(expr) \ ++ do { \ ++ assert(expr); \ ++ __builtin_assume(expr); \ ++ } while (false) ++#elif defined(_MSC_VER) ++ #define assume(expr) \ ++ do { \ ++ assert(expr); \ ++ __assume(expr); \ ++ } while (false) ++#elif defined(__linux__) && defined(__KERNEL__) ++ #define assume(expr) ((void)(expr)) ++#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) ++ #define assume(expr) \ ++ do { \ ++ if (unlikely(!(expr))) \ ++ unreachable("Assumption isn't true: " # expr); \ ++ } while (false) ++#else ++ #define assume(expr) assert(expr) ++#endif ++ ++/*! Macro to calculate the n-byte aligned value from that supplied rounding up. ++ * n must be a power of two. ++ * ++ * Both arguments should be of a type with the same size otherwise the macro may ++ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n. ++ */ ++#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1U)) & ~((_n)-1U)) ++ ++#if defined(_WIN32) ++ ++#if defined(WINDOWS_WDF) ++ ++ /* ++ * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system. ++ * This 'empty' choice helps to resolve all the calling conv issues. ++ * ++ */ ++ #define IMG_CALLCONV ++ #define C_CALLCONV ++ ++ #define IMG_INTERNAL ++ #define IMG_RESTRICT __restrict ++ ++ /* ++ * The proper way of dll linking under MS compilers is made of two things: ++ * - decorate implementation with __declspec(dllexport) ++ * this decoration helps compiler with making the so called ++ * 'export library' ++ * - decorate forward-declaration (in a source dependent on a dll) with ++ * __declspec(dllimport), this decoration helps the compiler to make ++ * faster and smaller code in terms of calling dll-imported functions ++ * ++ * Usually these decorations are performed by having a single macro define ++ * making that expands to a proper __declspec() depending on the ++ * translation unit, dllexport inside the dll source and dllimport outside ++ * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same ++ * __declspec() makes no sense, but at least works. ++ */ ++ #define IMG_IMPORT __declspec(dllexport) ++ #define IMG_EXPORT __declspec(dllexport) ++ ++#else ++ ++ #define IMG_CALLCONV __stdcall ++ #define IMG_INTERNAL ++ #define IMG_EXPORT __declspec(dllexport) ++ #define IMG_RESTRICT __restrict ++ #define C_CALLCONV __cdecl ++ ++ /* ++ * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations ++ * match. Some compilers require the header to be declared IMPORT, while ++ * the implementation is declared EXPORT. ++ */ ++ #define IMG_IMPORT IMG_EXPORT ++ ++#endif ++ ++#if defined(UNDER_WDDM) ++ #ifndef _INC_STDLIB ++ #if defined(__mips) ++ /* do nothing */ ++ #elif defined(UNDER_MSBUILD) ++ /* do nothing */ ++ #else ++ _CRTIMP void __cdecl abort(void); ++ #endif ++ #endif ++#endif /* UNDER_WDDM */ ++#else ++ #if (defined(__linux__) || defined(__QNXNTO__)) && defined(__KERNEL__) ++ #define IMG_INTERNAL ++ #define IMG_EXPORT ++ #define IMG_CALLCONV ++ #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) ++ #define IMG_CALLCONV ++ #define C_CALLCONV ++ ++ #if defined(__METAG) ++ #define IMG_INTERNAL ++ #else ++ #define IMG_INTERNAL __attribute__((visibility("hidden"))) ++ #endif ++ ++ #define IMG_EXPORT __attribute__((visibility("default"))) ++ #define IMG_RESTRICT __restrict__ ++ #elif defined(INTEGRITY_OS) ++ #define IMG_CALLCONV ++ #define IMG_INTERNAL ++ #define IMG_EXPORT ++ #define IMG_RESTRICT ++ #define C_CALLCONV ++ #define __cdecl ++ ++ #ifndef USE_CODE ++ #define IMG_ABORT() printf("IMG_ABORT was called.\n") ++ #endif ++ #else ++ #error("define an OS") ++ #endif ++ ++#endif ++ ++/* Use default definition if not overridden */ ++#ifndef IMG_ABORT ++ #if defined(EXIT_ON_ABORT) ++ #define IMG_ABORT() exit(1) ++ #else ++ #define IMG_ABORT() abort() ++ #endif ++#endif ++ ++/* The best way to suppress unused parameter warnings using GCC is to use a ++ * variable attribute. Place the __maybe_unused between the type and name of an ++ * unused parameter in a function parameter list e.g. 'int __maybe_unused var'. ++ * This should only be used in GCC build environments, for example, in files ++ * that compile only on Linux. ++ * Other files should use PVR_UNREFERENCED_PARAMETER ++ */ ++ ++/* Kernel macros for compiler attributes */ ++/* Note: param positions start at 1 */ ++#if defined(__linux__) && defined(__KERNEL__) ++ #include ++ ++ #if !defined(__fallthrough) ++ #if GCC_VERSION_AT_LEAST(7, 0) ++ #define __fallthrough __attribute__((__fallthrough__)) ++ #else ++ #define __fallthrough ++ #endif ++ #endif ++#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) ++ #define __must_check __attribute__((warn_unused_result)) ++ #define __maybe_unused __attribute__((unused)) ++ #define __malloc __attribute__((malloc)) ++ ++ /* Bionic's might have defined these already */ ++ /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */ ++ #if !defined(__packed) ++ #define __packed __attribute__((packed)) ++ #endif ++ #if !defined(__aligned) ++ #define __aligned(n) __attribute__((aligned(n))) ++ #endif ++ #if !defined(__noreturn) ++ #define __noreturn __attribute__((noreturn)) ++ #endif ++ ++ /* That one compiler that supports attributes but doesn't support ++ * the printf attribute... */ ++ #if defined(__GNUC__) ++ #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) ++ #else ++ #define __printf(fmt, va) ++ #endif /* defined(__GNUC__) */ ++ ++ #if defined(__cplusplus) && (__cplusplus >= 201703L) ++ #define __fallthrough [[fallthrough]] ++ #elif GCC_VERSION_AT_LEAST(7, 0) ++ #define __fallthrough __attribute__((__fallthrough__)) ++ #else ++ #define __fallthrough ++ #endif ++ ++ #define __user ++ #define __force ++ #define __iomem ++#else ++ /* Silently ignore those attributes */ ++ #define __printf(fmt, va) ++ #define __packed ++ #define __aligned(n) ++ #define __must_check ++ #define __maybe_unused ++ #define __malloc ++ ++ #if defined(_MSC_VER) || defined(CC_ARM) ++ #define __noreturn __declspec(noreturn) ++ #else ++ #define __noreturn ++ #endif ++ ++ /* This may already been defined, e.g. by SAL (Source Annotation Language) */ ++ #if !defined(__fallthrough) ++ #define __fallthrough ++ #endif ++ ++ #define __user ++ #define __force ++ #define __iomem ++#endif ++ ++ ++/* Other attributes, following the same style */ ++#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) ++ #define __const_function __attribute__((const)) ++#else ++ #define __const_function ++#endif ++ ++ ++/* GCC builtins */ ++#if defined(__linux__) && defined(__KERNEL__) ++ #include ++#elif defined(__GNUC__) || defined(INTEGRITY_OS) ++ ++/* Klocwork does not support __builtin_expect, which makes the actual condition ++ * expressions hidden during analysis, affecting it negatively. */ ++#if !defined(__KLOCWORK__) && !defined(INTEGRITY_OS) && !defined(DEBUG) ++ #define likely(x) __builtin_expect(!!(x), 1) ++ #define unlikely(x) __builtin_expect(!!(x), 0) ++#endif ++ ++ /* Compiler memory barrier to prevent reordering */ ++ #define barrier() __asm__ __volatile__("": : :"memory") ++#else ++ #define barrier() static_assert(0, "barrier() isn't supported by your compiler"); ++#endif ++ ++/* That one OS that defines one but not the other... */ ++#ifndef likely ++ #define likely(x) (x) ++#endif ++#ifndef unlikely ++ #define unlikely(x) (x) ++#endif ++ ++/* These two macros are also provided by the kernel */ ++#ifndef BIT ++#define BIT(b) (1UL << (b)) ++#endif ++ ++#ifndef BIT_ULL ++#define BIT_ULL(b) (1ULL << (b)) ++#endif ++ ++#define BIT_SET(f, b) BITMASK_SET((f), BIT(b)) ++#define BIT_UNSET(f, b) BITMASK_UNSET((f), BIT(b)) ++#define BIT_TOGGLE(f, b) BITMASK_TOGGLE((f), BIT(b)) ++#define BIT_ISSET(f, b) BITMASK_HAS((f), BIT(b)) ++ ++#define BITMASK_SET(f, m) do { ((f) |= (m)); } while (false) ++#define BITMASK_UNSET(f, m) do { ((f) &= ~(m)); } while (false) ++#define BITMASK_TOGGLE(f, m) do { ((f) ^= (m)); } while (false) ++#define BITMASK_HAS(f, m) (((f) & (m)) == (m)) /* the bits from the mask are all set */ ++#define BITMASK_ANY(f, m) (((f) & (m)) != 0U) /* any bit from the mask is set */ ++ ++#ifndef MAX ++#define MAX(a ,b) (((a) > (b)) ? (a) : (b)) ++#endif ++ ++#ifndef MIN ++#define MIN(a, b) (((a) < (b)) ? (a) : (b)) ++#endif ++ ++#ifndef CLAMP ++#define CLAMP(min, max, n) ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n))) ++#endif ++ ++#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y); ++ ++ ++#if defined(__linux__) && defined(__KERNEL__) ++ #include ++ #include ++#endif ++ ++/* Get a structure's address from the address of a member */ ++#define IMG_CONTAINER_OF(ptr, type, member) \ ++ (type *) ((uintptr_t) (ptr) - offsetof(type, member)) ++ ++/* Get a new pointer with an offset (in bytes) from a base address, useful ++ * when traversing byte buffers and accessing data in buffers through struct ++ * pointers. ++ * Note, this macro is not equivalent to or replacing offsetof() */ ++#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \ ++ (void*)&(((IMG_UINT8*)(void*)(addr))[offset_in_bytes]) ++ ++/* Get a new pointer with an offset (in dwords) from a base address, useful ++ * when traversing byte buffers and accessing data in buffers through struct ++ * pointers. ++ * Note, this macro is not equivalent to or replacing offsetof() */ ++#define IMG_OFFSET_ADDR_DW(addr, offset_in_dwords) \ ++ (void*)(((IMG_UINT32*)(void*)(addr)) + (offset_in_dwords)) ++ ++/* The number of elements in a fixed-sized array */ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0])) ++#endif ++ ++/* To guarantee that __func__ can be used, define it as a macro here if it ++ isn't already provided by the compiler. */ ++#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L) ++#define __func__ __FUNCTION__ ++#endif ++ ++#if defined(__cplusplus) ++/* C++ Specific: ++ * Disallow use of copy and assignment operator within a class. ++ * Should be placed under private. */ ++#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \ ++ C(const C&); \ ++ void operator=(const C&) ++#endif ++ ++#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips) && !defined(__riscv) ++ #include "/usr/include/valgrind/memcheck.h" ++ ++ #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size) ++ #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size) ++ #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size) ++ #define VG_ASSERT_DEFINED(pvData,ui32Size) VALGRIND_CHECK_MEM_IS_DEFINED(pvData,ui32Size) ++#else ++ #if defined(_MSC_VER) ++ # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) ++ #else ++ # define PVR_MSC_SUPPRESS_4127 ++ #endif ++ ++ #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++ #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++ #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++ #define VG_ASSERT_DEFINED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++#endif ++ ++#define IMG_STRINGIFY_IMPL(x) # x ++#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x) ++ ++#if defined(INTEGRITY_OS) ++ /* Definitions not present in INTEGRITY. */ ++ #define PATH_MAX 200 ++#endif ++ ++#if defined(__clang__) || defined(__GNUC__) ++ /* __SIZEOF_POINTER__ is defined already by these compilers */ ++#elif defined(INTEGRITY_OS) ++ #if defined(__Ptr_Is_64) ++ #define __SIZEOF_POINTER__ 8 ++ #else ++ #define __SIZEOF_POINTER__ 4 ++ #endif ++#elif defined(_WIN32) ++ #define __SIZEOF_POINTER__ sizeof(char *) ++#else ++ #warning Unknown OS - using default method to determine whether CPU arch is 64-bit. ++ #define __SIZEOF_POINTER__ sizeof(char *) ++#endif ++ ++/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with ++ * uncached device memory allocations. Some pointers are made 'volatile' ++ * to prevent those optimisations being applied to writes through those ++ * pointers. ++ */ ++#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__)) ++#define NOLDSTOPT volatile ++/* after applying 'volatile' to a pointer, we may need to cast it to 'void *' ++ * to keep it compatible with its existing uses. ++ */ ++#define NOLDSTOPT_VOID (void *) ++ ++#define NOLDSTOPT_REQUIRED 1 ++#else ++#define NOLDSTOPT ++#define NOLDSTOPT_VOID ++#endif ++ ++#endif /* IMG_DEFS_H */ ++/***************************************************************************** ++ End of file (img_defs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/img_elf.h b/drivers/gpu/drm/img-rogue/img_elf.h +new file mode 100644 +index 000000000000..8837d9592599 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/img_elf.h +@@ -0,0 +1,111 @@ ++/*************************************************************************/ /*! ++@File img_elf.h ++@Title IMG ELF file definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform RGX ++@Description Definitions for ELF file structures used in the DDK. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(IMG_ELF_H) ++#define IMG_ELF_H ++ ++#include "img_types.h" ++ ++/* ELF format defines */ ++#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */ ++#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */ ++#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */ ++#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the ELF file */ ++ ++/* Redefined structs of ELF format */ ++typedef struct ++{ ++ IMG_UINT8 ui32Eident[16]; ++ IMG_UINT16 ui32Etype; ++ IMG_UINT16 ui32Emachine; ++ IMG_UINT32 ui32Eversion; ++ IMG_UINT32 ui32Eentry; ++ IMG_UINT32 ui32Ephoff; ++ IMG_UINT32 ui32Eshoff; ++ IMG_UINT32 ui32Eflags; ++ IMG_UINT16 ui32Eehsize; ++ IMG_UINT16 ui32Ephentsize; ++ IMG_UINT16 ui32Ephnum; ++ IMG_UINT16 ui32Eshentsize; ++ IMG_UINT16 ui32Eshnum; ++ IMG_UINT16 ui32Eshtrndx; ++} IMG_ELF_HDR; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Stname; ++ IMG_UINT32 ui32Stvalue; ++ IMG_UINT32 ui32Stsize; ++ IMG_UINT8 ui32Stinfo; ++ IMG_UINT8 ui32Stother; ++ IMG_UINT16 ui32Stshndx; ++} IMG_ELF_SYM; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Shname; ++ IMG_UINT32 ui32Shtype; ++ IMG_UINT32 ui32Shflags; ++ IMG_UINT32 ui32Shaddr; ++ IMG_UINT32 ui32Shoffset; ++ IMG_UINT32 ui32Shsize; ++ IMG_UINT32 ui32Shlink; ++ IMG_UINT32 ui32Shinfo; ++ IMG_UINT32 ui32Shaddralign; ++ IMG_UINT32 ui32Shentsize; ++} IMG_ELF_SHDR; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Ptype; ++ IMG_UINT32 ui32Poffset; ++ IMG_UINT32 ui32Pvaddr; ++ IMG_UINT32 ui32Ppaddr; ++ IMG_UINT32 ui32Pfilesz; ++ IMG_UINT32 ui32Pmemsz; ++ IMG_UINT32 ui32Pflags; ++ IMG_UINT32 ui32Palign; ++} IMG_ELF_PROGRAM_HDR; ++ ++#endif /* IMG_ELF_H */ +diff --git a/drivers/gpu/drm/img-rogue/img_types.h b/drivers/gpu/drm/img-rogue/img_types.h +new file mode 100644 +index 000000000000..c2654d21edb3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/img_types.h +@@ -0,0 +1,324 @@ ++/*************************************************************************/ /*! ++@File ++@Title Global types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines type aliases for use by IMG APIs. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_TYPES_H ++#define IMG_TYPES_H ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* To use C99 types and definitions, there are two special cases we need to ++ * cater for: ++ * ++ * - Visual Studio: in VS2010 or later, some standard headers are available, ++ * and MSVC has its own built-in sized types. We can define the C99 types ++ * in terms of these. ++ * ++ * - Linux kernel code: C99 sized types are defined in , but ++ * some other features (like macros for constants or printf format ++ * strings) are missing, so we need to fill in the gaps ourselves. ++ * ++ * For other cases (userspace code under Linux, Android or Neutrino, or ++ * firmware code), we can include the standard headers. ++ */ ++#if defined(_MSC_VER) ++ #include /* bool */ ++ #include "msvc_types.h" ++#elif defined(__linux__) && defined(__KERNEL__) ++ #include ++ #include ++ #include "kernel_types.h" ++#elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \ ++ defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) ++ #include /* NULL */ ++ #include ++ #include /* intX_t/uintX_t, format specifiers */ ++ #include /* INT_MIN, etc */ ++ #include /* bool */ ++#elif defined(__mips) ++ #include /* NULL */ ++ #include /* intX_t/uintX_t, format specifiers */ ++ #include /* bool */ ++#else ++ #error C99 support not set up for this build ++#endif ++ ++/* ++ * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of ++ * boolean type. This results in large number of false-positives being reported ++ * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char' ++ * is assigned to an object of essential type 'bool'"). Work around this by ++ * redefining those constants with cast to bool added. ++ */ ++#if defined(__KLOCWORK__) && !defined(__cplusplus) ++#undef true ++#undef false ++#define true ((bool) 1) ++#define false ((bool) 0) ++#endif ++ ++typedef unsigned int IMG_UINT; ++typedef int IMG_INT; ++ ++typedef uint8_t IMG_UINT8, *IMG_PUINT8; ++typedef uint8_t IMG_BYTE, *IMG_PBYTE; ++typedef int8_t IMG_INT8; ++typedef char IMG_CHAR, *IMG_PCHAR; ++ ++typedef uint16_t IMG_UINT16, *IMG_PUINT16; ++typedef int16_t IMG_INT16; ++typedef uint32_t IMG_UINT32, *IMG_PUINT32; ++typedef int32_t IMG_INT32, *IMG_PINT32; ++#if defined(INTEGRITY_OS) ++#if __INT_BIT >= 32U ++#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## U)) ++#elif __LONG_BIT >= 32U ++#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## UL)) ++#elif defined(__LLONG_BIT) && __LLONG_BIT >= 32U ++#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## ULL)) ++#endif ++#else /* defined(INTEGRITY_OS) */ ++#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c)) ++#endif /* defined(INTEGRITY_OS) */ ++ ++typedef uint64_t IMG_UINT64, *IMG_PUINT64; ++typedef int64_t IMG_INT64; ++#define IMG_INT64_C(c) INT64_C(c) ++#if defined(INTEGRITY_OS) ++#if __INT_BIT >= 64U ++#define IMG_UINT64_C(n) (n ## U) ++#elif defined(__LONG_BIT) && __LONG_BIT >= 64U ++#define IMG_UINT64_C(n) (n ## UL) ++#elif defined(__LLONG_BIT) && __LLONG_BIT >= 64U ++#define IMG_UINT64_C(n) (n ## ULL) ++#endif ++#else /* defined(INTEGRITY_OS) */ ++#define IMG_UINT64_C(c) UINT64_C(c) ++#endif /* defined(INTEGRITY_OS) */ ++#define IMG_UINT16_C(c) UINT16_C(c) ++#define IMG_UINT64_FMTSPEC PRIu64 ++#define IMG_UINT64_FMTSPECX PRIX64 ++#define IMG_UINT64_FMTSPECx PRIx64 ++#define IMG_UINT64_FMTSPECo PRIo64 ++#define IMG_INT64_FMTSPECd PRId64 ++ ++#define IMG_UINT16_MAX UINT16_MAX ++#define IMG_UINT32_MAX UINT32_MAX ++#define IMG_UINT64_MAX UINT64_MAX ++ ++#define IMG_INT16_MAX INT16_MAX ++#define IMG_INT32_MAX INT32_MAX ++#define IMG_INT64_MAX INT64_MAX ++ ++/* Linux kernel mode does not use floating point */ ++typedef float IMG_FLOAT, *IMG_PFLOAT; ++typedef double IMG_DOUBLE; ++ ++typedef union ++{ ++ IMG_UINT32 ui32; ++ IMG_FLOAT f; ++} IMG_UINT32_FLOAT; ++ ++typedef int IMG_SECURE_TYPE; ++ ++typedef enum tag_img_bool ++{ ++ IMG_FALSE = 0, ++ IMG_TRUE = 1, ++ IMG_FORCE_ALIGN = 0x7FFFFFFF ++} IMG_BOOL, *IMG_PBOOL; ++ ++#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) ++typedef IMG_CHAR const* IMG_PCCHAR; ++#endif ++ ++/* Format specifiers for 'size_t' type */ ++#if defined(_MSC_VER) || defined(__MINGW32__) ++#define IMG_SIZE_FMTSPEC "%Iu" ++#define IMG_SIZE_FMTSPECX "%Ix" ++#else ++#define IMG_SIZE_FMTSPEC "%zu" ++#define IMG_SIZE_FMTSPECX "%zx" ++#endif ++ ++#if defined(__linux__) && defined(__KERNEL__) ++/* prints the function name when used with printk */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#define IMG_PFN_FMTSPEC "%ps" ++#else ++#define IMG_PFN_FMTSPEC "%pf" ++#endif ++#else ++#define IMG_PFN_FMTSPEC "%p" ++#endif ++ ++typedef void *IMG_HANDLE; ++ ++/* Process IDs */ ++typedef IMG_UINT32 IMG_PID; ++ ++/* OS connection type */ ++typedef int IMG_OS_CONNECTION; ++ ++ ++/* ++ * Address types. ++ * All types used to refer to a block of memory are wrapped in structures ++ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot ++ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the ++ * same thing. ++ * ++ * There is an assumption that the system contains at most one non-cpu mmu, ++ * and a memory block is only mapped by the MMU once. ++ * ++ * Different devices could have offset views of the physical address space. ++ * ++ */ ++ ++ ++/* ++ * ++ * +------------+ +------------+ +------------+ +------------+ ++ * | CPU | | DEV | | DEV | | DEV | ++ * +------------+ +------------+ +------------+ +------------+ ++ * | | | | ++ * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR | ++ * | \-------------------/ | ++ * | | | ++ * +------------+ +------------+ | ++ * | MMU | | MMU | | ++ * +------------+ +------------+ | ++ * | | | ++ * | | | ++ * | | | ++ * +--------+ +---------+ +--------+ ++ * | Offset | | (Offset)| | Offset | ++ * +--------+ +---------+ +--------+ ++ * | | IMG_DEV_PHYADDR | ++ * | | | ++ * | | IMG_DEV_PHYADDR | ++ * +---------------------------------------------------------------------+ ++ * | System Address bus | ++ * +---------------------------------------------------------------------+ ++ * ++ */ ++ ++#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++ ++/* cpu physical address */ ++typedef struct ++{ ++#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) ++ uintptr_t uiAddr; ++#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var) ++#define CPUPHYADDR_FMTARG(var) (IMG_UINT64)(var) ++#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx ++#elif defined(__linux__) && defined(__KERNEL__) ++ phys_addr_t uiAddr; ++#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var) ++#define CPUPHYADDR_FMTARG(var) (&var) ++#define CPUPHYADDR_UINT_FMTSPEC "%pa" ++#else ++ IMG_UINT64 uiAddr; ++#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var) ++#define CPUPHYADDR_FMTARG(var) (var) ++#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx ++#endif ++} IMG_CPU_PHYADDR; ++ ++/* device physical address */ ++typedef struct ++{ ++ IMG_UINT64 uiAddr; ++} IMG_DEV_PHYADDR; ++ ++/* dma address */ ++typedef struct ++{ ++ IMG_UINT64 uiAddr; ++} IMG_DMA_ADDR; ++ ++/* ++ rectangle structure ++*/ ++typedef struct ++{ ++ IMG_INT32 x0; ++ IMG_INT32 y0; ++ IMG_INT32 x1; ++ IMG_INT32 y1; ++} IMG_RECT; ++ ++typedef struct ++{ ++ IMG_INT16 x0; ++ IMG_INT16 y0; ++ IMG_INT16 x1; ++ IMG_INT16 y1; ++} IMG_RECT_16; ++ ++/* ++ * box structure ++ */ ++typedef struct ++{ ++ IMG_INT32 x0; ++ IMG_INT32 y0; ++ IMG_INT32 z0; ++ IMG_INT32 x1; ++ IMG_INT32 y1; ++ IMG_INT32 z1; ++} IMG_BOX; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* IMG_TYPES_H */ ++/****************************************************************************** ++ End of file (img_types.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/img_types_check.h b/drivers/gpu/drm/img-rogue/img_types_check.h +new file mode 100644 +index 000000000000..4708583b6c23 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/img_types_check.h +@@ -0,0 +1,58 @@ ++/*************************************************************************/ /*! ++@File ++@Title Global types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Performs size checks on some of the IMG types. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_TYPES_CHECK_H ++#define IMG_TYPES_CHECK_H ++ ++#ifndef __KERNEL__ ++#include ++#endif /* __KERNEL__ */ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++static_assert(sizeof(IMG_BOOL) == 4, "invalid size of IMG_BOOL"); ++static_assert(sizeof(IMG_INT) == 4, "invalid size of IMG_INT"); ++static_assert(sizeof(IMG_UINT) == 4, "invalid size of IMG_UINT"); ++static_assert(sizeof(PVRSRV_ERROR) == 4, "invalid size of PVRSRV_ERROR"); ++ ++#endif /* IMG_TYPES_CHECK_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/cache_ops.h b/drivers/gpu/drm/img-rogue/include/cache_ops.h +new file mode 100644 +index 000000000000..a1d714519d59 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/cache_ops.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services cache management header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines for cache management which are visible internally ++ and externally ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef CACHE_OPS_H ++#define CACHE_OPS_H ++#include "img_types.h" ++/*! ++* @Defgroup CPUCacheAPIs ++* @{ ++*/ ++#define CACHE_BATCH_MAX (8U) ++#define MAX_DMA_OPS (34) ++typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */ ++#define PVRSRV_CACHE_OP_NONE 0x0U /*!< No operation */ ++#define PVRSRV_CACHE_OP_CLEAN 0x1U /*!< Flush w/o invalidate */ ++#define PVRSRV_CACHE_OP_INVALIDATE 0x2U /*!< Invalidate w/o flush */ ++#define PVRSRV_CACHE_OP_FLUSH 0x3U /*!< Flush w/ invalidate */ ++/*! @} End of Defgroup CPUCacheAPIs */ ++ ++#endif /* CACHE_OPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/devicemem_typedefs.h b/drivers/gpu/drm/img-rogue/include/devicemem_typedefs.h +new file mode 100644 +index 000000000000..dd66fccf3322 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/devicemem_typedefs.h +@@ -0,0 +1,142 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Client side part of device memory management -- this file ++ is forked from new_devmem_allocation.h as this one has to ++ reside in the top level include so that client code is able ++ to make use of the typedefs. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DEVICEMEM_TYPEDEFS_H ++#define DEVICEMEM_TYPEDEFS_H ++ ++#include ++#include "img_types.h" ++#include "pvrsrv_memallocflags.h" ++ ++typedef struct DEVMEM_CONTEXT_TAG DEVMEM_CONTEXT; /*!< Convenience typedef for struct DEVMEM_CONTEXT_TAG */ ++typedef struct DEVMEM_HEAP_TAG DEVMEM_HEAP; /*!< Convenience typedef for struct DEVMEM_HEAP_TAG */ ++typedef struct DEVMEM_MEMDESC_TAG DEVMEM_MEMDESC; /*!< Convenience typedef for struct DEVMEM_MEMDESC_TAG */ ++typedef struct DEVMEM_PAGELIST_TAG DEVMEM_PAGELIST; /*!< Convenience typedef for struct DEVMEM_PAGELIST_TAG */ ++ ++typedef IMG_HANDLE DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */ ++typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */ ++typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */ ++typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */ ++ ++typedef struct DEVMEMX_PHYS_MEMDESC_TAG DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */ ++typedef struct DEVMEMX_VIRT_MEMDESC_TAG DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */ ++ ++/*! calling code needs all the info in this struct, to be able to pass it around */ ++typedef struct ++{ ++ /*! A handle to the PMR. */ ++ IMG_HANDLE hPMRExportHandle; ++ /*! The "key" to prove we have authorisation to use this PMR */ ++ IMG_UINT64 uiPMRExportPassword; ++ /*! Size and alignment properties for this PMR. Note, these ++ numbers are not trusted in kernel, but we need to cache them ++ client-side in order to allocate from the VM arena. The kernel ++ will know the actual alignment and size of the PMR and thus ++ would prevent client code from breaching security here. Ditto ++ for physmem granularity (aka page size) if this is different ++ from alignment */ ++ IMG_DEVMEM_SIZE_T uiSize; ++ /*! We call this "contiguity guarantee" to be more precise than ++ calling it "alignment" or "page size", terms which may seem ++ similar but have different emphasis. The number reported here ++ is the minimum contiguity guarantee from the creator of the ++ PMR. Now, there is no requirement to allocate that coarsely ++ from the RA. The alignment given to the RA simply needs to be ++ at least as coarse as the device page size for the heap we ++ ultimately intend to map into. What is important is that the ++ device MMU data page size is not greater than the minimum ++ contiguity guarantee from the PMR. This value is reported to ++ the client in order that it can choose to make early checks and ++ perhaps decide which heap (in a variable page size scenario) it ++ would be safe to map this PMR into. For convenience, the ++ client may choose to use this argument as the alignment of the ++ virtual range he chooses to allocate, but this is _not_ ++ necessary and in many cases would be able to get away with a ++ finer alignment, should the heap into which this PMR will be ++ mapped support it. */ ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee; ++} DEVMEM_EXPORTCOOKIE; ++ ++/* Enum that describes the operation associated with changing sparse memory */ ++typedef IMG_UINT32 SPARSE_MEM_RESIZE_FLAGS; ++#define SPARSE_RESIZE_NONE 0U ++ ++ /* This should be set to indicate the change needs allocation */ ++#define SPARSE_RESIZE_ALLOC 1U ++ ++ /* This should be set to indicate the change needs free */ ++#define SPARSE_RESIZE_FREE 2U ++ ++#define SPARSE_RESIZE_BOTH (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE) ++ ++ /* This should be set to silently swap underlying physical memory ++ * without disturbing its device or cpu virtual maps. ++ * This flag is not supported in the case of PDUMP and could lead to ++ * PDUMP panic when used. ++ */ ++#define SPARSE_REMAP_MEM 4U ++ ++ /* Should be set to get the sparse changes appear in cpu virtual map */ ++#define SPARSE_MAP_CPU_ADDR 8U ++ ++ ++/* To be used with all the sparse allocations that gets mapped to CPU Virtual ++ * space. The sparse allocation CPU mapping is torn down and re-mapped every ++ * time the sparse allocation layout changes. ++ */ ++#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1 ++ ++/* To use with DevmemSubAllocate() as the default factor if no over-allocation ++ * is desired. ++ */ ++#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER (1U) ++ ++/* Defines the max length for PMR, MemDesc, Device memory History and RI debug ++ * annotations stored in memory, including the null terminator. ++ */ ++#define DEVMEM_ANNOTATION_MAX_LEN ((IMG_UINT32)PVR_ANNOTATION_MAX_LEN + 1U) ++ ++#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/dllist.h b/drivers/gpu/drm/img-rogue/include/dllist.h +new file mode 100644 +index 000000000000..fa73dff59c44 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/dllist.h +@@ -0,0 +1,408 @@ ++/*************************************************************************/ /*! ++@File ++@Title Double linked list header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Double linked list interface ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef DLLIST_H ++#define DLLIST_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++/*! ++ Pointer to a linked list node ++*/ ++typedef struct DLLIST_NODE_ *PDLLIST_NODE; ++ ++ ++/*! ++ Node in a linked list ++*/ ++/* ++ * Note: the following structure's size is architecture-dependent and clients ++ * may need to create a mirror of the structure definition if it needs to be ++ * used in a structure shared between host and device. ++ * Consider such clients if any changes are made to this structure. ++ */ ++typedef struct DLLIST_NODE_ ++{ ++ struct DLLIST_NODE_ *psPrevNode; ++ struct DLLIST_NODE_ *psNextNode; ++} DLLIST_NODE; ++ ++ ++/*! ++ Static initialiser ++*/ ++#define DECLARE_DLLIST(n) \ ++DLLIST_NODE (n) = {&(n), &(n)} ++ ++/*************************************************************************/ /*! ++@Function dllist_foreach_node ++ ++@Description Walk through all the nodes on the list. ++ Safe against removal of (node). ++ ++@Input list_head List node to start the operation ++@Input node Current list node ++@Input next Node after the current one ++ ++*/ ++/*****************************************************************************/ ++#define dllist_foreach_node(list_head, node, next) \ ++ for ((node) = (list_head)->psNextNode, (next) = (node)->psNextNode; \ ++ (node) != (list_head); \ ++ (node) = (next), (next) = (node)->psNextNode) ++ ++#define dllist_foreach_node_backwards(list_head, node, prev) \ ++ for ((node) = (list_head)->psPrevNode, (prev) = (node)->psPrevNode; \ ++ (node) != (list_head); \ ++ (node) = (prev), (prev) = (node)->psPrevNode) ++ ++ ++/*************************************************************************/ /*! ++@Function dllist_foreach ++ ++@Description Simplification of dllist_foreach_node. ++ Walk through all the nodes on the list. ++ Safe against removal of currently-iterated node. ++ ++ Adds utility-macro dllist_cur() to typecast the current node. ++ ++@Input list_head List node to start the operation ++ ++*/ ++/*****************************************************************************/ ++#define dllist_foreach(list_head) \ ++ for (DLLIST_NODE *_DllNode = (list_head).psNextNode, *_DllNext = _DllNode->psNextNode; \ ++ _DllNode != &(list_head); \ ++ _DllNode = _DllNext, _DllNext = _DllNode->psNextNode) ++ ++#define dllist_foreach_backwards(list_head) \ ++ for (DLLIST_NODE *_DllNode = (list_head).psPrevNode, *_DllPrev = _DllNode->psPrevNode; \ ++ _DllNode != &(list_head); \ ++ _DllNode = _DllPrev, _DllPrev = _DllNode->psPrevNode) ++ ++#define dllist_cur(type, member) IMG_CONTAINER_OF(_DllNode, type, member) ++ ++/*************************************************************************/ /*! ++@Function dllist_init ++ ++@Description Initialize a new double linked list ++ ++@Input psListHead List head Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_init(PDLLIST_NODE psListHead) ++{ ++ psListHead->psPrevNode = psListHead; ++ psListHead->psNextNode = psListHead; ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_is_empty ++ ++@Description Returns whether the list is empty ++ ++@Input psListHead List head Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++bool dllist_is_empty(PDLLIST_NODE psListHead) ++{ ++ return ((psListHead->psPrevNode == psListHead) ++ && (psListHead->psNextNode == psListHead)); ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_add_to_head ++ ++@Description Add psNewNode to head of list psListHead ++ ++@Input psListHead Head Node ++@Input psNewNode New Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) ++{ ++ PDLLIST_NODE psTmp; ++ ++ psTmp = psListHead->psNextNode; ++ ++ psListHead->psNextNode = psNewNode; ++ psNewNode->psNextNode = psTmp; ++ ++ psTmp->psPrevNode = psNewNode; ++ psNewNode->psPrevNode = psListHead; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function dllist_add_to_tail ++ ++@Description Add psNewNode to tail of list psListHead ++ ++@Input psListHead Head Node ++@Input psNewNode New Node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode) ++{ ++ PDLLIST_NODE psTmp; ++ ++ psTmp = psListHead->psPrevNode; ++ ++ psListHead->psPrevNode = psNewNode; ++ psNewNode->psPrevNode = psTmp; ++ ++ psTmp->psNextNode = psNewNode; ++ psNewNode->psNextNode = psListHead; ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_node_is_in_list ++ ++@Description Returns true if psNode is in a list ++ ++@Input psNode List node ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++bool dllist_node_is_in_list(PDLLIST_NODE psNode) ++{ ++ return (psNode->psNextNode != NULL); ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_get_next_node ++ ++@Description Returns the list node after psListHead or NULL psListHead is ++ the only element in the list. ++ ++@Input psListHead List node to start the operation ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead) ++{ ++ if (psListHead->psNextNode == psListHead) ++ { ++ return NULL; ++ } ++ else ++ { ++ return psListHead->psNextNode; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_get_prev_node ++ ++@Description Returns the list node preceding psListHead or NULL if ++ psListHead is the only element in the list. ++ ++@Input psListHead List node to start the operation ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++PDLLIST_NODE dllist_get_prev_node(PDLLIST_NODE psListHead) ++{ ++ if (psListHead->psPrevNode == psListHead) ++ { ++ return NULL; ++ } ++ else ++ { ++ return psListHead->psPrevNode; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_remove_node ++ ++@Description Removes psListNode from the list where it currently belongs ++ ++@Input psListNode List node to be removed ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_remove_node(PDLLIST_NODE psListNode) ++{ ++ psListNode->psNextNode->psPrevNode = psListNode->psPrevNode; ++ psListNode->psPrevNode->psNextNode = psListNode->psNextNode; ++ ++ /* Clear the node to show it's not in a list */ ++ psListNode->psPrevNode = NULL; ++ psListNode->psNextNode = NULL; ++} ++ ++/*************************************************************************/ /*! ++@Function dllist_replace_head ++ ++@Description Moves the list from psOldHead to psNewHead ++ ++@Input psOldHead List node to be replaced. Will become a ++ head node of an empty list. ++@Input psNewHead List node to be inserted. Must be an ++ empty list head. ++ ++*/ ++/*****************************************************************************/ ++static INLINE ++void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead) ++{ ++ if (dllist_is_empty(psOldHead)) ++ { ++ psNewHead->psNextNode = psNewHead; ++ psNewHead->psPrevNode = psNewHead; ++ } ++ else ++ { ++ /* Change the neighbouring nodes */ ++ psOldHead->psNextNode->psPrevNode = psNewHead; ++ psOldHead->psPrevNode->psNextNode = psNewHead; ++ ++ /* Copy the old data to the new node */ ++ psNewHead->psNextNode = psOldHead->psNextNode; ++ psNewHead->psPrevNode = psOldHead->psPrevNode; ++ ++ /* Remove links to the previous list */ ++ psOldHead->psNextNode = psOldHead; ++ psOldHead->psPrevNode = psOldHead; ++ } ++} ++ ++/**************************************************************************/ /*! ++@Function dllist_insert_list_at_head ++ ++@Description Inserts psInHead list into the head of the psOutHead list. ++ After this operation psOutHead will contain psInHead at the ++ head of the list and the remaining elements that were ++ already in psOutHead will be places after the psInList (so ++ at a tail of the original list). ++ ++@Input psOutHead List node psInHead will be inserted to. ++@Input psInHead List node to be inserted to psOutHead. ++ After this operation this becomes an empty list. ++*/ /***************************************************************************/ ++static INLINE ++void dllist_insert_list_at_head(PDLLIST_NODE psOutHead, PDLLIST_NODE psInHead) ++{ ++ PDLLIST_NODE psInHeadNextNode = psInHead->psNextNode; ++ PDLLIST_NODE psOutHeadNextNode = psOutHead->psNextNode; ++ ++ if (!dllist_is_empty(psInHead)) ++ { ++ psOutHead->psNextNode = psInHeadNextNode; ++ psInHeadNextNode->psPrevNode = psOutHead; ++ ++ psInHead->psPrevNode->psNextNode = psOutHeadNextNode; ++ psOutHeadNextNode->psPrevNode = psInHead->psPrevNode; ++ ++ dllist_init(psInHead); ++ } ++ } ++ ++/*************************************************************************/ /*! ++@Description Pointer to a dllist comparison callback function. ++@Input psNode Pointer to a node in a dllist. ++@Input psNext Pointer to psNode's next neighbour. ++*/ /**************************************************************************/ ++typedef bool (*DLLIST_CMP_CB)(const DLLIST_NODE *psNode, const DLLIST_NODE *psNext); ++ ++/*************************************************************************/ /*! ++@Function dllist_sort ++ ++@Description Insert-sorts the List in place ++ The cmpr function passes the current and next node, ++ From which the user writes the function responsible ++ for choosing to swap order or not. ++ The function returns true if a swap is required ++ ++@Input psListHead List Head to be sorted. ++ ++@Input cmpr Function pointer to use for sorting ++ ++*/ ++/*****************************************************************************/ ++static INLINE void dllist_sort(PDLLIST_NODE psListHead, ++ DLLIST_CMP_CB cmpr) ++{ ++ DLLIST_NODE *node, *next; ++ DLLIST_NODE sTempHead; ++ ++ dllist_init(&sTempHead); ++ ++ dllist_foreach_node(psListHead, node, next) ++ { ++ dllist_remove_node(node); ++ dllist_add_to_head(&sTempHead, node); ++ } ++ ++ while (!dllist_is_empty(&sTempHead)) ++ { ++ DLLIST_NODE *psSmallestNode = NULL; ++ ++ dllist_foreach_node(&sTempHead, node, next) ++ { ++ if (!psSmallestNode || cmpr(psSmallestNode, node)) ++ { ++ psSmallestNode = node; ++ } ++ } ++ ++ dllist_remove_node(psSmallestNode); ++ dllist_add_to_tail(psListHead, psSmallestNode); ++ } ++} ++ ++#endif /* DLLIST_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/drm/netlink.h b/drivers/gpu/drm/img-rogue/include/drm/netlink.h +new file mode 100644 +index 000000000000..7b0a71fbaab3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/drm/netlink.h +@@ -0,0 +1,147 @@ ++/* ++ * @File ++ * @Title Nulldisp/Netlink interface definition ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++ ++#ifndef __NETLINK_H__ ++#define __NETLINK_H__ ++ ++/* For multi-plane pixel formats */ ++#define NLPVRDPY_MAX_NUM_PLANES 3 ++ ++enum nlpvrdpy_cmd { ++ __NLPVRDPY_CMD_INVALID, ++ NLPVRDPY_CMD_CONNECT, ++ NLPVRDPY_CMD_CONNECTED, ++ NLPVRDPY_CMD_DISCONNECT, ++ NLPVRDPY_CMD_FLIP, ++ NLPVRDPY_CMD_FLIPPED, ++ NLPVRDPY_CMD_COPY, ++ NLPVRDPY_CMD_COPIED, ++ __NLPVRDPY_CMD_MAX ++}; ++#define NLPVRDPY_CMD_MAX (__NLPVRDPY_CMD_MAX - 1) ++ ++enum nlpvrdpy_attr { ++ __NLPVRDPY_ATTR_INVALID, ++ NLPVRDPY_ATTR_NAME, ++ NLPVRDPY_ATTR_MINOR, ++ NLPVRDPY_ATTR_NUM_PLANES, ++ NLPVRDPY_ATTR_WIDTH, ++ NLPVRDPY_ATTR_HEIGHT, ++ NLPVRDPY_ATTR_PIXFMT, ++ NLPVRDPY_ATTR_YUV_CSC, ++ NLPVRDPY_ATTR_YUV_BPP, ++ NLPVRDPY_ATTR_PLANE0_ADDR, ++ NLPVRDPY_ATTR_PLANE0_SIZE, ++ NLPVRDPY_ATTR_PLANE0_OFFSET, ++ NLPVRDPY_ATTR_PLANE0_PITCH, ++ NLPVRDPY_ATTR_PLANE0_GEM_OBJ_NAME, ++ NLPVRDPY_ATTR_PLANE1_ADDR, ++ NLPVRDPY_ATTR_PLANE1_SIZE, ++ NLPVRDPY_ATTR_PLANE1_OFFSET, ++ NLPVRDPY_ATTR_PLANE1_PITCH, ++ NLPVRDPY_ATTR_PLANE1_GEM_OBJ_NAME, ++ NLPVRDPY_ATTR_PLANE2_ADDR, ++ NLPVRDPY_ATTR_PLANE2_SIZE, ++ NLPVRDPY_ATTR_PLANE2_OFFSET, ++ NLPVRDPY_ATTR_PLANE2_PITCH, ++ NLPVRDPY_ATTR_PLANE2_GEM_OBJ_NAME, ++ NLPVRDPY_ATTR_FB_MODIFIER, ++ NLPVRDPY_ATTR_NAMING_REQUIRED, ++ NLPVRDPY_ATTR_PAD, ++ __NLPVRDPY_ATTR_MAX ++}; ++#define NLPVRDPY_ATTR_MAX (__NLPVRDPY_ATTR_MAX - 1) ++ ++static struct nla_policy __attribute__((unused)) ++nlpvrdpy_policy[NLPVRDPY_ATTR_MAX + 1] = { ++ [NLPVRDPY_ATTR_NAME] = { .type = NLA_STRING }, ++ [NLPVRDPY_ATTR_MINOR] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_NUM_PLANES] = { .type = NLA_U8 }, ++ [NLPVRDPY_ATTR_WIDTH] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_HEIGHT] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_PIXFMT] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_YUV_CSC] = { .type = NLA_U8 }, ++ [NLPVRDPY_ATTR_YUV_BPP] = { .type = NLA_U8 }, ++ [NLPVRDPY_ATTR_PLANE0_ADDR] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE0_SIZE] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE0_OFFSET] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE0_PITCH] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE0_GEM_OBJ_NAME] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_PLANE1_ADDR] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE1_SIZE] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE1_OFFSET] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE1_PITCH] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE1_GEM_OBJ_NAME] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_PLANE2_ADDR] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE2_SIZE] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE2_OFFSET] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE2_PITCH] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_PLANE2_GEM_OBJ_NAME] = { .type = NLA_U32 }, ++ [NLPVRDPY_ATTR_FB_MODIFIER] = { .type = NLA_U64 }, ++ [NLPVRDPY_ATTR_NAMING_REQUIRED] = { .type = NLA_FLAG }, ++}; ++ ++#define NLPVRDPY_ATTR_PLANE(index, type) \ ++ ({ \ ++ enum nlpvrdpy_attr __retval; \ ++ \ ++ switch (index) { \ ++ case 0: \ ++ __retval = NLPVRDPY_ATTR_PLANE0_ ## type; \ ++ break; \ ++ case 1: \ ++ __retval = NLPVRDPY_ATTR_PLANE1_ ## type; \ ++ break; \ ++ case 2: \ ++ __retval = NLPVRDPY_ATTR_PLANE2_ ## type; \ ++ break; \ ++ default: \ ++ __retval = __NLPVRDPY_ATTR_INVALID; \ ++ break; \ ++ }; \ ++ \ ++ __retval; \ ++ }) ++ ++#endif /* __NETLINK_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/include/drm/nulldisp_drm.h b/drivers/gpu/drm/img-rogue/include/drm/nulldisp_drm.h +new file mode 100644 +index 000000000000..8403fb56f1a5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/drm/nulldisp_drm.h +@@ -0,0 +1,105 @@ ++/* ++ * @File ++ * @Title Nulldisp DRM definitions shared between kernel and user space. ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__NULLDISP_DRM_H__) ++#define __NULLDISP_DRM_H__ ++ ++#if defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++ ++struct drm_nulldisp_gem_create { ++ __u64 size; /* in */ ++ __u32 flags; /* in */ ++ __u32 handle; /* out */ ++}; ++ ++struct drm_nulldisp_gem_mmap { ++ __u32 handle; /* in */ ++ __u32 pad; ++ __u64 offset; /* out */ ++}; ++ ++#define NULLDISP_GEM_CPU_PREP_READ (1 << 0) ++#define NULLDISP_GEM_CPU_PREP_WRITE (1 << 1) ++#define NULLDISP_GEM_CPU_PREP_NOWAIT (1 << 2) ++ ++struct drm_nulldisp_gem_cpu_prep { ++ __u32 handle; /* in */ ++ __u32 flags; /* in */ ++}; ++ ++struct drm_nulldisp_gem_cpu_fini { ++ __u32 handle; /* in */ ++ __u32 pad; ++}; ++ ++/* ++ * DRM command numbers, relative to DRM_COMMAND_BASE. ++ * These defines must be prefixed with "DRM_". ++ */ ++#define DRM_NULLDISP_GEM_CREATE 0x00 ++#define DRM_NULLDISP_GEM_MMAP 0x01 ++#define DRM_NULLDISP_GEM_CPU_PREP 0x02 ++#define DRM_NULLDISP_GEM_CPU_FINI 0x03 ++ ++/* These defines must be prefixed with "DRM_IOCTL_". */ ++#define DRM_IOCTL_NULLDISP_GEM_CREATE \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CREATE, \ ++ struct drm_nulldisp_gem_create) ++ ++#define DRM_IOCTL_NULLDISP_GEM_MMAP \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_MMAP, \ ++ struct drm_nulldisp_gem_mmap) ++ ++#define DRM_IOCTL_NULLDISP_GEM_CPU_PREP \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_PREP, \ ++ struct drm_nulldisp_gem_cpu_prep) ++ ++#define DRM_IOCTL_NULLDISP_GEM_CPU_FINI \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_NULLDISP_GEM_CPU_FINI, \ ++ struct drm_nulldisp_gem_cpu_fini) ++ ++#endif /* defined(__NULLDISP_DRM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/include/drm/pdp_drm.h b/drivers/gpu/drm/img-rogue/include/drm/pdp_drm.h +new file mode 100644 +index 000000000000..f5d747d3ad10 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/drm/pdp_drm.h +@@ -0,0 +1,105 @@ ++/* ++ * @File ++ * @Title PDP DRM definitions shared between kernel and user space. ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PDP_DRM_H__) ++#define __PDP_DRM_H__ ++ ++#if defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++ ++struct drm_pdp_gem_create { ++ __u64 size; /* in */ ++ __u32 flags; /* in */ ++ __u32 handle; /* out */ ++}; ++ ++struct drm_pdp_gem_mmap { ++ __u32 handle; /* in */ ++ __u32 pad; ++ __u64 offset; /* out */ ++}; ++ ++#define PDP_GEM_CPU_PREP_READ (1 << 0) ++#define PDP_GEM_CPU_PREP_WRITE (1 << 1) ++#define PDP_GEM_CPU_PREP_NOWAIT (1 << 2) ++ ++struct drm_pdp_gem_cpu_prep { ++ __u32 handle; /* in */ ++ __u32 flags; /* in */ ++}; ++ ++struct drm_pdp_gem_cpu_fini { ++ __u32 handle; /* in */ ++ __u32 pad; ++}; ++ ++/* ++ * DRM command numbers, relative to DRM_COMMAND_BASE. ++ * These defines must be prefixed with "DRM_". ++ */ ++#define DRM_PDP_GEM_CREATE 0x00 ++#define DRM_PDP_GEM_MMAP 0x01 ++#define DRM_PDP_GEM_CPU_PREP 0x02 ++#define DRM_PDP_GEM_CPU_FINI 0x03 ++ ++/* These defines must be prefixed with "DRM_IOCTL_". */ ++#define DRM_IOCTL_PDP_GEM_CREATE \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, \ ++ struct drm_pdp_gem_create) ++ ++#define DRM_IOCTL_PDP_GEM_MMAP\ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, \ ++ struct drm_pdp_gem_mmap) ++ ++#define DRM_IOCTL_PDP_GEM_CPU_PREP \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, \ ++ struct drm_pdp_gem_cpu_prep) ++ ++#define DRM_IOCTL_PDP_GEM_CPU_FINI \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, \ ++ struct drm_pdp_gem_cpu_fini) ++ ++#endif /* defined(__PDP_DRM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/include/drm/pvr_drm.h b/drivers/gpu/drm/img-rogue/include/drm/pvr_drm.h +new file mode 100644 +index 000000000000..c0d00c98d8c0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/drm/pvr_drm.h +@@ -0,0 +1,146 @@ ++/* ++ * @File ++ * @Title PVR DRM definitions shared between kernel and user space. ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_DRM_H__) ++#define __PVR_DRM_H__ ++ ++#include ++ ++#if defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++ ++/* ++ * IMPORTANT: ++ * All structures below are designed to be the same size when compiled for 32 ++ * and/or 64 bit architectures, i.e. there should be no compiler inserted ++ * padding. This is achieved by sticking to the following rules: ++ * 1) only use fixed width types ++ * 2) always naturally align fields by arranging them appropriately and by using ++ * padding fields when necessary ++ * ++ * These rules should _always_ be followed when modifying or adding new ++ * structures to this file. ++ */ ++ ++struct drm_pvr_srvkm_cmd { ++ __u32 bridge_id; ++ __u32 bridge_func_id; ++ __u64 in_data_ptr; ++ __u64 out_data_ptr; ++ __u32 in_data_size; ++ __u32 out_data_size; ++}; ++ ++struct pvr_sync_rename_ioctl_data { ++ char szName[32]; ++}; ++ ++struct pvr_sw_sync_create_fence_data { ++ char name[32]; ++ __s32 fence; ++ __u32 pad; ++ __u64 sync_pt_idx; ++}; ++ ++struct pvr_sw_timeline_advance_data { ++ __u64 sync_pt_idx; ++}; ++ ++#define PVR_SRVKM_SERVICES_INIT 1 ++#define PVR_SRVKM_SYNC_INIT 2 ++struct drm_pvr_srvkm_init_data { ++ __u32 init_module; ++}; ++ ++/* Values used to configure the PVRSRV_DEVICE_INIT_MODE tunable (Linux-only) */ ++#define PVRSRV_LINUX_DEV_INIT_ON_PROBE 1 ++#define PVRSRV_LINUX_DEV_INIT_ON_OPEN 2 ++#define PVRSRV_LINUX_DEV_INIT_ON_CONNECT 3 ++ ++/* ++ * DRM command numbers, relative to DRM_COMMAND_BASE. ++ * These defines must be prefixed with "DRM_". ++ */ ++ ++/* PVR Services command */ ++#define DRM_PVR_SRVKM_CMD 0 ++ ++/* PVR Sync commands */ ++#define DRM_PVR_SYNC_RENAME_CMD 1 ++#define DRM_PVR_SYNC_FORCE_SW_ONLY_CMD 2 ++ ++/* PVR Software Sync commands */ ++#define DRM_PVR_SW_SYNC_CREATE_FENCE_CMD 3 ++#define DRM_PVR_SW_SYNC_INC_CMD 4 ++ ++/* PVR Services Render Device Init command */ ++#define DRM_PVR_SRVKM_INIT 5 ++ ++/* These defines must be prefixed with "DRM_IOCTL_". */ ++#define DRM_IOCTL_PVR_SRVKM_CMD \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \ ++ struct drm_pvr_srvkm_cmd) ++ ++#define DRM_IOCTL_PVR_SYNC_RENAME_CMD \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SYNC_RENAME_CMD, \ ++ struct pvr_sync_rename_ioctl_data) ++ ++#define DRM_IOCTL_PVR_SYNC_FORCE_SW_ONLY_CMD \ ++ DRM_IO(DRM_COMMAND_BASE + DRM_PVR_SYNC_FORCE_SW_ONLY_CMD) ++ ++#define DRM_IOCTL_PVR_SW_SYNC_CREATE_FENCE_CMD \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, \ ++ struct pvr_sw_sync_create_fence_data) ++ ++#define DRM_IOCTL_PVR_SW_SYNC_INC_CMD \ ++ DRM_IOR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_INC_CMD, \ ++ struct pvr_sw_timeline_advance_data) ++ ++#define DRM_IOCTL_PVR_SRVKM_INIT \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SRVKM_INIT, \ ++ struct drm_pvr_srvkm_init_data) ++ ++#endif /* defined(__PVR_DRM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/include/img_3dtypes.h b/drivers/gpu/drm/img-rogue/include/img_3dtypes.h +new file mode 100644 +index 000000000000..916e3a1eedc4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/img_3dtypes.h +@@ -0,0 +1,248 @@ ++/*************************************************************************/ /*! ++@File ++@Title Global 3D types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines 3D types for use by IMG APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_3DTYPES_H ++#define IMG_3DTYPES_H ++ ++#include ++#include "img_types.h" ++#include "img_defs.h" ++ ++/** ++ * Comparison functions ++ * This comparison function is defined as: ++ * A {CmpFunc} B ++ * A is a reference value, e.g., incoming depth etc. ++ * B is the sample value, e.g., value in depth buffer. ++ */ ++typedef enum _IMG_COMPFUNC_ ++{ ++ IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */ ++ IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */ ++ IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */ ++ IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to ++ operation */ ++ IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation ++ */ ++ IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation ++ */ ++ IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or ++ equal-to operation */ ++ IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */ ++} IMG_COMPFUNC; ++ ++/** ++ * Stencil op functions ++ */ ++typedef enum _IMG_STENCILOP_ ++{ ++ IMG_STENCILOP_KEEP, /**< Keep original value */ ++ IMG_STENCILOP_ZERO, /**< Set stencil to 0 */ ++ IMG_STENCILOP_REPLACE, /**< Replace stencil entry */ ++ IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */ ++ IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */ ++ IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */ ++ IMG_STENCILOP_INCR, /**< Increment stencil entry, ++ wrapping if necessary */ ++ IMG_STENCILOP_DECR, /**< Decrement stencil entry, ++ wrapping if necessary */ ++} IMG_STENCILOP; ++ ++/** ++ * Alpha blending allows colours and textures on one surface ++ * to be blended with transparency onto another surface. ++ * These definitions apply to both source and destination blending ++ * states ++ */ ++typedef enum _IMG_BLEND_ ++{ ++ IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */ ++ IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */ ++ IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */ ++ IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour ++ (i.e. 1-src_col) */ ++ IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */ ++ IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha ++ (i.e. 1-src_alpha) */ ++ IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */ ++ IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination ++ alpha */ ++ IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */ ++ IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination ++ colour */ ++ IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the ++ minimum of (Src alpha, ++ 1 - destination alpha)) */ ++ IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */ ++ IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/ ++ IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from ++ the pixel shader */ ++ IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour ++ outputted from the pixel shader */ ++ IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from ++ the pixel shader */ ++ IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha ++ outputted from the pixel shader */ ++} IMG_BLEND; ++ ++/** ++ * The arithmetic operation to perform when blending ++ */ ++typedef enum _IMG_BLENDOP_ ++{ ++ IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */ ++ IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */ ++ IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */ ++ IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */ ++ IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */ ++} IMG_BLENDOP; ++ ++/** ++ * Logical operation to perform when logic ops are enabled ++ */ ++typedef enum _IMG_LOGICOP_ ++{ ++ IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */ ++ IMG_LOGICOP_SET, /**< Result = -1 */ ++ IMG_LOGICOP_COPY, /**< Result = Source */ ++ IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */ ++ IMG_LOGICOP_NOOP, /**< Result = Destination */ ++ IMG_LOGICOP_INVERT, /**< Result = ~Destination */ ++ IMG_LOGICOP_AND, /**< Result = Source & Destination */ ++ IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */ ++ IMG_LOGICOP_OR, /**< Result = Source | Destination */ ++ IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */ ++ IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */ ++ IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */ ++ IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */ ++ IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */ ++ IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */ ++ IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */ ++} IMG_LOGICOP; ++ ++/** ++ * Type of fog blending supported ++ */ ++typedef enum _IMG_FOGMODE_ ++{ ++ IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are ++ * based on the value output from the vertex phase */ ++ IMG_FOGMODE_LINEAR, /**< Linear interpolation */ ++ IMG_FOGMODE_EXP, /**< Exponential */ ++ IMG_FOGMODE_EXP2, /**< Exponential squaring */ ++} IMG_FOGMODE; ++ ++/** ++ * Types of filtering ++ */ ++typedef enum _IMG_FILTER_ ++{ ++ IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */ ++ IMG_FILTER_POINT, /**< Point filtering */ ++ IMG_FILTER_LINEAR, /**< Bi-linear filtering */ ++ IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */ ++} IMG_FILTER; ++ ++/** ++ * Addressing modes for textures ++ */ ++typedef enum _IMG_ADDRESSMODE_ ++{ ++ IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */ ++ IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */ ++ IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */ ++ IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */ ++ IMG_ADDRESSMODE_CLAMPBORDER, ++ IMG_ADDRESSMODE_OGL_CLAMP, ++ IMG_ADDRESSMODE_OVG_TILEFILL, ++ IMG_ADDRESSMODE_DONTCARE, ++} IMG_ADDRESSMODE; ++ ++/** ++ * Culling based on winding order of triangle. ++ */ ++typedef enum _IMG_CULLMODE_ ++{ ++ IMG_CULLMODE_NONE, /**< Don't cull */ ++ IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */ ++ IMG_CULLMODE_BACKFACING, /**< Back facing triangles */ ++} IMG_CULLMODE; ++ ++/** ++ * Colour for clearing surfaces. ++ * The four elements of the 4 x 32 bit array will map to colour ++ * R,G,B,A components, in order. ++ * For YUV colour space the order is Y,U,V. ++ * For Depth and Stencil formats D maps to R and S maps to G. ++ */ ++typedef union IMG_CLEAR_COLOUR_TAG { ++ IMG_UINT32 aui32[4]; ++ IMG_INT32 ai32[4]; ++ IMG_FLOAT af32[4]; ++} IMG_CLEAR_COLOUR; ++ ++static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits."); ++ ++/*! ************************************************************************//** ++@brief Specifies the MSAA resolve operation. ++*/ /**************************************************************************/ ++typedef enum _IMG_RESOLVE_OP_ ++{ ++ IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */ ++ IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */ ++ IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */ ++ IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */ ++ IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */ ++ IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */ ++ IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */ ++ IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */ ++ IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */ ++ IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */ ++ IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */ ++} IMG_RESOLVE_OP; ++ ++ ++#endif /* IMG_3DTYPES_H */ ++/****************************************************************************** ++ End of file (img_3dtypes.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/img_defs.h b/drivers/gpu/drm/img-rogue/include/img_defs.h +new file mode 100644 +index 000000000000..a79e8a65d3cf +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/img_defs.h +@@ -0,0 +1,567 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common header containing type definitions for portability ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Contains variable and structure definitions. Any platform ++ specific types should be defined in this file. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_DEFS_H ++#define IMG_DEFS_H ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++#if !(defined(__linux__) && defined(__KERNEL__)) ++#include ++#endif ++ ++#include "img_types.h" ++ ++#if defined(NO_INLINE_FUNCS) ++ #define INLINE ++ #define FORCE_INLINE ++#else ++#if defined(__cplusplus) || defined(INTEGRITY_OS) ++ #if !defined(INLINE) ++ #define INLINE inline ++ #endif ++ #define FORCE_INLINE static inline ++#else ++#if !defined(INLINE) ++ #define INLINE __inline ++#endif ++#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_) ++ #define FORCE_INLINE __forceinline ++#else ++ #define FORCE_INLINE static __inline ++#endif ++#endif ++#endif ++ ++/* True if the GCC version is at least the given version. False for older ++ * versions of GCC, or other compilers. ++ */ ++#if defined(__GNUC__) ++#define GCC_VERSION_AT_LEAST(major, minor) \ ++ (__GNUC__ > (major) || \ ++ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor))) ++#else ++#define GCC_VERSION_AT_LEAST(major, minor) 0 ++#endif ++ ++/* Use Clang's __has_extension and __has_builtin macros if available. */ ++#if defined(__has_extension) ++#define has_clang_extension(e) __has_extension(e) ++#else ++#define has_clang_extension(e) 0 ++#endif ++ ++#if defined(__has_builtin) ++#define has_clang_builtin(e) __has_builtin(e) ++#else ++#define has_clang_builtin(e) 0 ++#endif ++ ++/* Use this in any file, or use attributes under GCC - see below */ ++#ifndef PVR_UNREFERENCED_PARAMETER ++#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param)) ++#endif ++ ++/* static_assert(condition, "message to print if it fails"); ++ * ++ * Assert something at compile time. If the assertion fails, try to print ++ * the message, otherwise do nothing. static_assert is available if: ++ * ++ * - It's already defined as a macro (e.g. by in C11) ++ * - We're using MSVC which exposes static_assert unconditionally ++ * - We're using a C++ compiler that supports C++11 ++ * - We're using GCC 4.6 and up in C mode (in which case it's available as ++ * _Static_assert) ++ * ++ * In all other cases, fall back to an equivalent that makes an invalid ++ * declaration. ++ */ ++#if !defined(static_assert) && !defined(_MSC_VER) && \ ++ (!defined(__cplusplus) || __cplusplus < 201103L) || defined(__KLOCWORK__) ++ /* static_assert isn't already available */ ++ #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \ ++ (defined(__clang__) && has_clang_extension(c_static_assert))) ++ #define static_assert _Static_assert ++ #else ++ #define static_assert(expr, message) \ ++ extern int static_assert_failed[(expr) ? 1 : -1] __attribute__((unused)) ++ #endif ++#endif ++ ++/* ++ * unreachable("explanation") can be used to indicate to the compiler that ++ * some parts of the code can never be reached, like the default branch ++ * of a switch that covers all real-world possibilities, even though there ++ * are other ints that exist for instance. ++ * ++ * The message will be printed as an assert() when debugging. ++ * ++ * Note: there is no need to add a 'return' or any error handling after ++ * calling unreachable(), as this call will never return. ++ */ ++#if defined(__linux__) && defined(__KERNEL__) ++/* Kernel has its own unreachable(), which is a simple infinite loop */ ++#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) ++ #define unreachable(msg) \ ++ do { \ ++ assert(!(msg)); \ ++ __builtin_unreachable(); \ ++ } while (false) ++#elif defined(_MSC_VER) ++ #define unreachable(msg) \ ++ do { \ ++ assert(!(msg)); \ ++ __assume(0); \ ++ } while (false) ++#else ++ #define unreachable(msg) \ ++ do { \ ++ assert(!(msg)); \ ++ while (1); \ ++ } while (false) ++#endif ++ ++/* ++ * assume(x > 2 && x <= 7) works like an assert(), except it hints to the ++ * compiler what it can assume to optimise the code, like a limited range ++ * of parameter values. ++ */ ++#if has_clang_builtin(__builtin_assume) ++ #define assume(expr) \ ++ do { \ ++ assert(expr); \ ++ __builtin_assume(expr); \ ++ } while (false) ++#elif defined(_MSC_VER) ++ #define assume(expr) \ ++ do { \ ++ assert(expr); \ ++ __assume(expr); \ ++ } while (false) ++#elif defined(__linux__) && defined(__KERNEL__) ++ #define assume(expr) ((void)(expr)) ++#elif GCC_VERSION_AT_LEAST(4, 5) || has_clang_builtin(__builtin_unreachable) ++ #define assume(expr) \ ++ do { \ ++ if (unlikely(!(expr))) \ ++ unreachable("Assumption isn't true: " # expr); \ ++ } while (false) ++#else ++ #define assume(expr) assert(expr) ++#endif ++ ++/*! Macro to calculate the n-byte aligned value from that supplied rounding up. ++ * n must be a power of two. ++ * ++ * Both arguments should be of a type with the same size otherwise the macro may ++ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n. ++ */ ++#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1U)) & ~((_n)-1U)) ++ ++#if defined(_WIN32) ++ ++#if defined(WINDOWS_WDF) ++ ++ /* ++ * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system. ++ * This 'empty' choice helps to resolve all the calling conv issues. ++ * ++ */ ++ #define IMG_CALLCONV ++ #define C_CALLCONV ++ ++ #define IMG_INTERNAL ++ #define IMG_RESTRICT __restrict ++ ++ /* ++ * The proper way of dll linking under MS compilers is made of two things: ++ * - decorate implementation with __declspec(dllexport) ++ * this decoration helps compiler with making the so called ++ * 'export library' ++ * - decorate forward-declaration (in a source dependent on a dll) with ++ * __declspec(dllimport), this decoration helps the compiler to make ++ * faster and smaller code in terms of calling dll-imported functions ++ * ++ * Usually these decorations are performed by having a single macro define ++ * making that expands to a proper __declspec() depending on the ++ * translation unit, dllexport inside the dll source and dllimport outside ++ * the dll source. Having IMG_EXPORT and IMG_IMPORT resolving to the same ++ * __declspec() makes no sense, but at least works. ++ */ ++ #define IMG_IMPORT __declspec(dllexport) ++ #define IMG_EXPORT __declspec(dllexport) ++ ++#else ++ ++ #define IMG_CALLCONV __stdcall ++ #define IMG_INTERNAL ++ #define IMG_EXPORT __declspec(dllexport) ++ #define IMG_RESTRICT __restrict ++ #define C_CALLCONV __cdecl ++ ++ /* ++ * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations ++ * match. Some compilers require the header to be declared IMPORT, while ++ * the implementation is declared EXPORT. ++ */ ++ #define IMG_IMPORT IMG_EXPORT ++ ++#endif ++ ++#if defined(UNDER_WDDM) ++ #ifndef _INC_STDLIB ++ #if defined(__mips) ++ /* do nothing */ ++ #elif defined(UNDER_MSBUILD) ++ /* do nothing */ ++ #else ++ _CRTIMP void __cdecl abort(void); ++ #endif ++ #endif ++#endif /* UNDER_WDDM */ ++#else ++ #if (defined(__linux__) || defined(__QNXNTO__)) && defined(__KERNEL__) ++ #define IMG_INTERNAL ++ #define IMG_EXPORT ++ #define IMG_CALLCONV ++ #elif defined(__linux__) || defined(__METAG) || defined(__mips) || defined(__QNXNTO__) || defined(__riscv) ++ #define IMG_CALLCONV ++ #define C_CALLCONV ++ ++ #if defined(__METAG) ++ #define IMG_INTERNAL ++ #else ++ #define IMG_INTERNAL __attribute__((visibility("hidden"))) ++ #endif ++ ++ #define IMG_EXPORT __attribute__((visibility("default"))) ++ #define IMG_RESTRICT __restrict__ ++ #elif defined(INTEGRITY_OS) ++ #define IMG_CALLCONV ++ #define IMG_INTERNAL ++ #define IMG_EXPORT ++ #define IMG_RESTRICT ++ #define C_CALLCONV ++ #define __cdecl ++ ++ #ifndef USE_CODE ++ #define IMG_ABORT() printf("IMG_ABORT was called.\n") ++ #endif ++ #else ++ #error("define an OS") ++ #endif ++ ++#endif ++ ++/* Use default definition if not overridden */ ++#ifndef IMG_ABORT ++ #if defined(EXIT_ON_ABORT) ++ #define IMG_ABORT() exit(1) ++ #else ++ #define IMG_ABORT() abort() ++ #endif ++#endif ++ ++/* The best way to suppress unused parameter warnings using GCC is to use a ++ * variable attribute. Place the __maybe_unused between the type and name of an ++ * unused parameter in a function parameter list e.g. 'int __maybe_unused var'. ++ * This should only be used in GCC build environments, for example, in files ++ * that compile only on Linux. ++ * Other files should use PVR_UNREFERENCED_PARAMETER ++ */ ++ ++/* Kernel macros for compiler attributes */ ++/* Note: param positions start at 1 */ ++#if defined(__linux__) && defined(__KERNEL__) ++ #include ++ ++ #if !defined(__fallthrough) ++ #if GCC_VERSION_AT_LEAST(7, 0) ++ #define __fallthrough __attribute__((__fallthrough__)) ++ #else ++ #define __fallthrough ++ #endif ++ #endif ++#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) ++ #define __must_check __attribute__((warn_unused_result)) ++ #define __maybe_unused __attribute__((unused)) ++ #define __malloc __attribute__((malloc)) ++ ++ /* Bionic's might have defined these already */ ++ /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */ ++ #if !defined(__packed) ++ #define __packed __attribute__((packed)) ++ #endif ++ #if !defined(__aligned) ++ #define __aligned(n) __attribute__((aligned(n))) ++ #endif ++ #if !defined(__noreturn) ++ #define __noreturn __attribute__((noreturn)) ++ #endif ++ ++ /* That one compiler that supports attributes but doesn't support ++ * the printf attribute... */ ++ #if defined(__GNUC__) ++ #define __printf(fmt, va) __attribute__((format(printf, (fmt), (va)))) ++ #else ++ #define __printf(fmt, va) ++ #endif /* defined(__GNUC__) */ ++ ++ #if defined(__cplusplus) && (__cplusplus >= 201703L) ++ #define __fallthrough [[fallthrough]] ++ #elif GCC_VERSION_AT_LEAST(7, 0) ++ #define __fallthrough __attribute__((__fallthrough__)) ++ #else ++ #define __fallthrough ++ #endif ++ ++ #define __user ++ #define __force ++ #define __iomem ++#else ++ /* Silently ignore those attributes */ ++ #define __printf(fmt, va) ++ #define __packed ++ #define __aligned(n) ++ #define __must_check ++ #define __maybe_unused ++ #define __malloc ++ ++ #if defined(_MSC_VER) || defined(CC_ARM) ++ #define __noreturn __declspec(noreturn) ++ #else ++ #define __noreturn ++ #endif ++ ++ /* This may already been defined, e.g. by SAL (Source Annotation Language) */ ++ #if !defined(__fallthrough) ++ #define __fallthrough ++ #endif ++ ++ #define __user ++ #define __force ++ #define __iomem ++#endif ++ ++ ++/* Other attributes, following the same style */ ++#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) ++ #define __const_function __attribute__((const)) ++#else ++ #define __const_function ++#endif ++ ++ ++/* GCC builtins */ ++#if defined(__linux__) && defined(__KERNEL__) ++ #include ++#elif defined(__GNUC__) || defined(INTEGRITY_OS) ++ ++/* Klocwork does not support __builtin_expect, which makes the actual condition ++ * expressions hidden during analysis, affecting it negatively. */ ++#if !defined(__KLOCWORK__) && !defined(INTEGRITY_OS) && !defined(DEBUG) ++ #define likely(x) __builtin_expect(!!(x), 1) ++ #define unlikely(x) __builtin_expect(!!(x), 0) ++#endif ++ ++ /* Compiler memory barrier to prevent reordering */ ++ #define barrier() __asm__ __volatile__("": : :"memory") ++#else ++ #define barrier() static_assert(0, "barrier() isn't supported by your compiler"); ++#endif ++ ++/* That one OS that defines one but not the other... */ ++#ifndef likely ++ #define likely(x) (x) ++#endif ++#ifndef unlikely ++ #define unlikely(x) (x) ++#endif ++ ++/* These two macros are also provided by the kernel */ ++#ifndef BIT ++#define BIT(b) (1UL << (b)) ++#endif ++ ++#ifndef BIT_ULL ++#define BIT_ULL(b) (1ULL << (b)) ++#endif ++ ++#define BIT_SET(f, b) BITMASK_SET((f), BIT(b)) ++#define BIT_UNSET(f, b) BITMASK_UNSET((f), BIT(b)) ++#define BIT_TOGGLE(f, b) BITMASK_TOGGLE((f), BIT(b)) ++#define BIT_ISSET(f, b) BITMASK_HAS((f), BIT(b)) ++ ++#define BITMASK_SET(f, m) do { ((f) |= (m)); } while (false) ++#define BITMASK_UNSET(f, m) do { ((f) &= ~(m)); } while (false) ++#define BITMASK_TOGGLE(f, m) do { ((f) ^= (m)); } while (false) ++#define BITMASK_HAS(f, m) (((f) & (m)) == (m)) /* the bits from the mask are all set */ ++#define BITMASK_ANY(f, m) (((f) & (m)) != 0U) /* any bit from the mask is set */ ++ ++#ifndef MAX ++#define MAX(a ,b) (((a) > (b)) ? (a) : (b)) ++#endif ++ ++#ifndef MIN ++#define MIN(a, b) (((a) < (b)) ? (a) : (b)) ++#endif ++ ++#ifndef CLAMP ++#define CLAMP(min, max, n) ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n))) ++#endif ++ ++#define SWAP(X, Y) (X) ^= (Y); (Y) ^= (X); (X) ^= (Y); ++ ++ ++#if defined(__linux__) && defined(__KERNEL__) ++ #include ++ #include ++#endif ++ ++/* Get a structure's address from the address of a member */ ++#define IMG_CONTAINER_OF(ptr, type, member) \ ++ (type *) ((uintptr_t) (ptr) - offsetof(type, member)) ++ ++/* Get a new pointer with an offset (in bytes) from a base address, useful ++ * when traversing byte buffers and accessing data in buffers through struct ++ * pointers. ++ * Note, this macro is not equivalent to or replacing offsetof() */ ++#define IMG_OFFSET_ADDR(addr, offset_in_bytes) \ ++ (void*)&(((IMG_UINT8*)(void*)(addr))[offset_in_bytes]) ++ ++/* Get a new pointer with an offset (in dwords) from a base address, useful ++ * when traversing byte buffers and accessing data in buffers through struct ++ * pointers. ++ * Note, this macro is not equivalent to or replacing offsetof() */ ++#define IMG_OFFSET_ADDR_DW(addr, offset_in_dwords) \ ++ (void*)(((IMG_UINT32*)(void*)(addr)) + (offset_in_dwords)) ++ ++/* The number of elements in a fixed-sized array */ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0])) ++#endif ++ ++/* To guarantee that __func__ can be used, define it as a macro here if it ++ isn't already provided by the compiler. */ ++#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L) ++#define __func__ __FUNCTION__ ++#endif ++ ++#if defined(__cplusplus) ++/* C++ Specific: ++ * Disallow use of copy and assignment operator within a class. ++ * Should be placed under private. */ ++#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \ ++ C(const C&); \ ++ void operator=(const C&) ++#endif ++ ++#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips) && !defined(__riscv) ++ #include "/usr/include/valgrind/memcheck.h" ++ ++ #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size) ++ #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size) ++ #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size) ++ #define VG_ASSERT_DEFINED(pvData,ui32Size) VALGRIND_CHECK_MEM_IS_DEFINED(pvData,ui32Size) ++#else ++ #if defined(_MSC_VER) ++ # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) ++ #else ++ # define PVR_MSC_SUPPRESS_4127 ++ #endif ++ ++ #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++ #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++ #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++ #define VG_ASSERT_DEFINED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while (false) ++#endif ++ ++#define IMG_STRINGIFY_IMPL(x) # x ++#define IMG_STRINGIFY(x) IMG_STRINGIFY_IMPL(x) ++ ++#if defined(INTEGRITY_OS) ++ /* Definitions not present in INTEGRITY. */ ++ #define PATH_MAX 200 ++#endif ++ ++#if defined(__clang__) || defined(__GNUC__) ++ /* __SIZEOF_POINTER__ is defined already by these compilers */ ++#elif defined(INTEGRITY_OS) ++ #if defined(__Ptr_Is_64) ++ #define __SIZEOF_POINTER__ 8 ++ #else ++ #define __SIZEOF_POINTER__ 4 ++ #endif ++#elif defined(_WIN32) ++ #define __SIZEOF_POINTER__ sizeof(char *) ++#else ++ #warning Unknown OS - using default method to determine whether CPU arch is 64-bit. ++ #define __SIZEOF_POINTER__ sizeof(char *) ++#endif ++ ++/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with ++ * uncached device memory allocations. Some pointers are made 'volatile' ++ * to prevent those optimisations being applied to writes through those ++ * pointers. ++ */ ++#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__)) ++#define NOLDSTOPT volatile ++/* after applying 'volatile' to a pointer, we may need to cast it to 'void *' ++ * to keep it compatible with its existing uses. ++ */ ++#define NOLDSTOPT_VOID (void *) ++ ++#define NOLDSTOPT_REQUIRED 1 ++#else ++#define NOLDSTOPT ++#define NOLDSTOPT_VOID ++#endif ++ ++#endif /* IMG_DEFS_H */ ++/***************************************************************************** ++ End of file (img_defs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/img_drm_fourcc_internal.h b/drivers/gpu/drm/img-rogue/include/img_drm_fourcc_internal.h +new file mode 100644 +index 000000000000..ee88e90cde1e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/img_drm_fourcc_internal.h +@@ -0,0 +1,94 @@ ++/*************************************************************************/ /*! ++@File ++@Title Wrapper around drm_fourcc.h ++@Description FourCCs and the DRM framebuffer modifiers should be added here ++ unless they are used by kernel code or a known user outside of ++ the DDK. If FourCCs or DRM framebuffer modifiers are required ++ outside of the DDK, they shall be moved to the corresponding ++ public header. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_DRM_FOURCC_INTERNAL_H ++#define IMG_DRM_FOURCC_INTERNAL_H ++ ++#include ++ ++/* ++ * Modifier names are structured using the following convention, ++ * with underscores (_) between items: ++ * - prefix: DRM_FORMAT_MOD ++ * - identifier for our driver: PVR ++ * - category: FBCDC ++ * - compression tile dimension: 8x8, 16x4, 32x2 ++ * - FBDC version: V0, V1, V2, V3, V7, V8, V10, V12 ++ */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0 fourcc_mod_code(PVR, 1) ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V0_FIX fourcc_mod_code(PVR, 2) /* Fix for HW_BRN_37464 */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V2 fourcc_mod_code(PVR, 4) ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V3 fourcc_mod_code(PVR, 5) ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V8 fourcc_mod_code(PVR, 18) ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY25_V13 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY50_V13 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_8x8_LOSSY75_V13 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0 fourcc_mod_code(PVR, 7) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V0_FIX fourcc_mod_code(PVR, 8) /* Fix for HW_BRN_37464 */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V2 fourcc_mod_code(PVR, 10) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V3 fourcc_mod_code(PVR, 11) ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V8 fourcc_mod_code(PVR, 19) ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY25_V13 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY50_V13 - moved to the public header */ ++/* DRM_FORMAT_MOD_PVR_FBCDC_16x4_LOSSY75_V13 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V1 fourcc_mod_code(PVR, 13) ++#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V3 fourcc_mod_code(PVR, 14) ++#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V8 fourcc_mod_code(PVR, 20) ++/* DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 - moved to the public header */ ++#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V12 fourcc_mod_code(PVR, 17) ++ ++#endif /* IMG_DRM_FOURCC_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/img_elf.h b/drivers/gpu/drm/img-rogue/include/img_elf.h +new file mode 100644 +index 000000000000..8837d9592599 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/img_elf.h +@@ -0,0 +1,111 @@ ++/*************************************************************************/ /*! ++@File img_elf.h ++@Title IMG ELF file definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform RGX ++@Description Definitions for ELF file structures used in the DDK. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(IMG_ELF_H) ++#define IMG_ELF_H ++ ++#include "img_types.h" ++ ++/* ELF format defines */ ++#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */ ++#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */ ++#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */ ++#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the ELF file */ ++ ++/* Redefined structs of ELF format */ ++typedef struct ++{ ++ IMG_UINT8 ui32Eident[16]; ++ IMG_UINT16 ui32Etype; ++ IMG_UINT16 ui32Emachine; ++ IMG_UINT32 ui32Eversion; ++ IMG_UINT32 ui32Eentry; ++ IMG_UINT32 ui32Ephoff; ++ IMG_UINT32 ui32Eshoff; ++ IMG_UINT32 ui32Eflags; ++ IMG_UINT16 ui32Eehsize; ++ IMG_UINT16 ui32Ephentsize; ++ IMG_UINT16 ui32Ephnum; ++ IMG_UINT16 ui32Eshentsize; ++ IMG_UINT16 ui32Eshnum; ++ IMG_UINT16 ui32Eshtrndx; ++} IMG_ELF_HDR; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Stname; ++ IMG_UINT32 ui32Stvalue; ++ IMG_UINT32 ui32Stsize; ++ IMG_UINT8 ui32Stinfo; ++ IMG_UINT8 ui32Stother; ++ IMG_UINT16 ui32Stshndx; ++} IMG_ELF_SYM; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Shname; ++ IMG_UINT32 ui32Shtype; ++ IMG_UINT32 ui32Shflags; ++ IMG_UINT32 ui32Shaddr; ++ IMG_UINT32 ui32Shoffset; ++ IMG_UINT32 ui32Shsize; ++ IMG_UINT32 ui32Shlink; ++ IMG_UINT32 ui32Shinfo; ++ IMG_UINT32 ui32Shaddralign; ++ IMG_UINT32 ui32Shentsize; ++} IMG_ELF_SHDR; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Ptype; ++ IMG_UINT32 ui32Poffset; ++ IMG_UINT32 ui32Pvaddr; ++ IMG_UINT32 ui32Ppaddr; ++ IMG_UINT32 ui32Pfilesz; ++ IMG_UINT32 ui32Pmemsz; ++ IMG_UINT32 ui32Pflags; ++ IMG_UINT32 ui32Palign; ++} IMG_ELF_PROGRAM_HDR; ++ ++#endif /* IMG_ELF_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/img_types.h b/drivers/gpu/drm/img-rogue/include/img_types.h +new file mode 100644 +index 000000000000..c2654d21edb3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/img_types.h +@@ -0,0 +1,324 @@ ++/*************************************************************************/ /*! ++@File ++@Title Global types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines type aliases for use by IMG APIs. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_TYPES_H ++#define IMG_TYPES_H ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* To use C99 types and definitions, there are two special cases we need to ++ * cater for: ++ * ++ * - Visual Studio: in VS2010 or later, some standard headers are available, ++ * and MSVC has its own built-in sized types. We can define the C99 types ++ * in terms of these. ++ * ++ * - Linux kernel code: C99 sized types are defined in , but ++ * some other features (like macros for constants or printf format ++ * strings) are missing, so we need to fill in the gaps ourselves. ++ * ++ * For other cases (userspace code under Linux, Android or Neutrino, or ++ * firmware code), we can include the standard headers. ++ */ ++#if defined(_MSC_VER) ++ #include /* bool */ ++ #include "msvc_types.h" ++#elif defined(__linux__) && defined(__KERNEL__) ++ #include ++ #include ++ #include "kernel_types.h" ++#elif defined(__linux__) || defined(__METAG) || defined(__MINGW32__) || \ ++ defined(__QNXNTO__) || defined(INTEGRITY_OS) || defined(__riscv) ++ #include /* NULL */ ++ #include ++ #include /* intX_t/uintX_t, format specifiers */ ++ #include /* INT_MIN, etc */ ++ #include /* bool */ ++#elif defined(__mips) ++ #include /* NULL */ ++ #include /* intX_t/uintX_t, format specifiers */ ++ #include /* bool */ ++#else ++ #error C99 support not set up for this build ++#endif ++ ++/* ++ * Due to a Klocwork bug, 'true'/'false' constants are not recognized to be of ++ * boolean type. This results in large number of false-positives being reported ++ * (MISRA.ETYPE.ASSIGN.2012: "An expression value of essential type 'signed char' ++ * is assigned to an object of essential type 'bool'"). Work around this by ++ * redefining those constants with cast to bool added. ++ */ ++#if defined(__KLOCWORK__) && !defined(__cplusplus) ++#undef true ++#undef false ++#define true ((bool) 1) ++#define false ((bool) 0) ++#endif ++ ++typedef unsigned int IMG_UINT; ++typedef int IMG_INT; ++ ++typedef uint8_t IMG_UINT8, *IMG_PUINT8; ++typedef uint8_t IMG_BYTE, *IMG_PBYTE; ++typedef int8_t IMG_INT8; ++typedef char IMG_CHAR, *IMG_PCHAR; ++ ++typedef uint16_t IMG_UINT16, *IMG_PUINT16; ++typedef int16_t IMG_INT16; ++typedef uint32_t IMG_UINT32, *IMG_PUINT32; ++typedef int32_t IMG_INT32, *IMG_PINT32; ++#if defined(INTEGRITY_OS) ++#if __INT_BIT >= 32U ++#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## U)) ++#elif __LONG_BIT >= 32U ++#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## UL)) ++#elif defined(__LLONG_BIT) && __LLONG_BIT >= 32U ++#define IMG_UINT32_C(n) ((IMG_UINT32)(n ## ULL)) ++#endif ++#else /* defined(INTEGRITY_OS) */ ++#define IMG_UINT32_C(c) ((IMG_UINT32)UINT32_C(c)) ++#endif /* defined(INTEGRITY_OS) */ ++ ++typedef uint64_t IMG_UINT64, *IMG_PUINT64; ++typedef int64_t IMG_INT64; ++#define IMG_INT64_C(c) INT64_C(c) ++#if defined(INTEGRITY_OS) ++#if __INT_BIT >= 64U ++#define IMG_UINT64_C(n) (n ## U) ++#elif defined(__LONG_BIT) && __LONG_BIT >= 64U ++#define IMG_UINT64_C(n) (n ## UL) ++#elif defined(__LLONG_BIT) && __LLONG_BIT >= 64U ++#define IMG_UINT64_C(n) (n ## ULL) ++#endif ++#else /* defined(INTEGRITY_OS) */ ++#define IMG_UINT64_C(c) UINT64_C(c) ++#endif /* defined(INTEGRITY_OS) */ ++#define IMG_UINT16_C(c) UINT16_C(c) ++#define IMG_UINT64_FMTSPEC PRIu64 ++#define IMG_UINT64_FMTSPECX PRIX64 ++#define IMG_UINT64_FMTSPECx PRIx64 ++#define IMG_UINT64_FMTSPECo PRIo64 ++#define IMG_INT64_FMTSPECd PRId64 ++ ++#define IMG_UINT16_MAX UINT16_MAX ++#define IMG_UINT32_MAX UINT32_MAX ++#define IMG_UINT64_MAX UINT64_MAX ++ ++#define IMG_INT16_MAX INT16_MAX ++#define IMG_INT32_MAX INT32_MAX ++#define IMG_INT64_MAX INT64_MAX ++ ++/* Linux kernel mode does not use floating point */ ++typedef float IMG_FLOAT, *IMG_PFLOAT; ++typedef double IMG_DOUBLE; ++ ++typedef union ++{ ++ IMG_UINT32 ui32; ++ IMG_FLOAT f; ++} IMG_UINT32_FLOAT; ++ ++typedef int IMG_SECURE_TYPE; ++ ++typedef enum tag_img_bool ++{ ++ IMG_FALSE = 0, ++ IMG_TRUE = 1, ++ IMG_FORCE_ALIGN = 0x7FFFFFFF ++} IMG_BOOL, *IMG_PBOOL; ++ ++#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) ++typedef IMG_CHAR const* IMG_PCCHAR; ++#endif ++ ++/* Format specifiers for 'size_t' type */ ++#if defined(_MSC_VER) || defined(__MINGW32__) ++#define IMG_SIZE_FMTSPEC "%Iu" ++#define IMG_SIZE_FMTSPECX "%Ix" ++#else ++#define IMG_SIZE_FMTSPEC "%zu" ++#define IMG_SIZE_FMTSPECX "%zx" ++#endif ++ ++#if defined(__linux__) && defined(__KERNEL__) ++/* prints the function name when used with printk */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#define IMG_PFN_FMTSPEC "%ps" ++#else ++#define IMG_PFN_FMTSPEC "%pf" ++#endif ++#else ++#define IMG_PFN_FMTSPEC "%p" ++#endif ++ ++typedef void *IMG_HANDLE; ++ ++/* Process IDs */ ++typedef IMG_UINT32 IMG_PID; ++ ++/* OS connection type */ ++typedef int IMG_OS_CONNECTION; ++ ++ ++/* ++ * Address types. ++ * All types used to refer to a block of memory are wrapped in structures ++ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot ++ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the ++ * same thing. ++ * ++ * There is an assumption that the system contains at most one non-cpu mmu, ++ * and a memory block is only mapped by the MMU once. ++ * ++ * Different devices could have offset views of the physical address space. ++ * ++ */ ++ ++ ++/* ++ * ++ * +------------+ +------------+ +------------+ +------------+ ++ * | CPU | | DEV | | DEV | | DEV | ++ * +------------+ +------------+ +------------+ +------------+ ++ * | | | | ++ * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR | ++ * | \-------------------/ | ++ * | | | ++ * +------------+ +------------+ | ++ * | MMU | | MMU | | ++ * +------------+ +------------+ | ++ * | | | ++ * | | | ++ * | | | ++ * +--------+ +---------+ +--------+ ++ * | Offset | | (Offset)| | Offset | ++ * +--------+ +---------+ +--------+ ++ * | | IMG_DEV_PHYADDR | ++ * | | | ++ * | | IMG_DEV_PHYADDR | ++ * +---------------------------------------------------------------------+ ++ * | System Address bus | ++ * +---------------------------------------------------------------------+ ++ * ++ */ ++ ++#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX ++ ++/* cpu physical address */ ++typedef struct ++{ ++#if defined(UNDER_WDDM) || defined(WINDOWS_WDF) ++ uintptr_t uiAddr; ++#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var) ++#define CPUPHYADDR_FMTARG(var) (IMG_UINT64)(var) ++#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx ++#elif defined(__linux__) && defined(__KERNEL__) ++ phys_addr_t uiAddr; ++#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var) ++#define CPUPHYADDR_FMTARG(var) (&var) ++#define CPUPHYADDR_UINT_FMTSPEC "%pa" ++#else ++ IMG_UINT64 uiAddr; ++#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var) ++#define CPUPHYADDR_FMTARG(var) (var) ++#define CPUPHYADDR_UINT_FMTSPEC "0x%016" IMG_UINT64_FMTSPECx ++#endif ++} IMG_CPU_PHYADDR; ++ ++/* device physical address */ ++typedef struct ++{ ++ IMG_UINT64 uiAddr; ++} IMG_DEV_PHYADDR; ++ ++/* dma address */ ++typedef struct ++{ ++ IMG_UINT64 uiAddr; ++} IMG_DMA_ADDR; ++ ++/* ++ rectangle structure ++*/ ++typedef struct ++{ ++ IMG_INT32 x0; ++ IMG_INT32 y0; ++ IMG_INT32 x1; ++ IMG_INT32 y1; ++} IMG_RECT; ++ ++typedef struct ++{ ++ IMG_INT16 x0; ++ IMG_INT16 y0; ++ IMG_INT16 x1; ++ IMG_INT16 y1; ++} IMG_RECT_16; ++ ++/* ++ * box structure ++ */ ++typedef struct ++{ ++ IMG_INT32 x0; ++ IMG_INT32 y0; ++ IMG_INT32 z0; ++ IMG_INT32 x1; ++ IMG_INT32 y1; ++ IMG_INT32 z1; ++} IMG_BOX; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* IMG_TYPES_H */ ++/****************************************************************************** ++ End of file (img_types.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/kernel_types.h b/drivers/gpu/drm/img-rogue/include/kernel_types.h +new file mode 100644 +index 000000000000..c3305102fc20 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/kernel_types.h +@@ -0,0 +1,137 @@ ++/*************************************************************************/ /*! ++@Title C99-compatible types and definitions for Linux kernel code ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++/* Limits of specified-width integer types */ ++ ++/* S8_MIN, etc were added in kernel version 3.14. The other versions are for ++ * earlier kernels. They can be removed once older kernels don't need to be ++ * supported. ++ */ ++#ifdef S8_MIN ++ #define INT8_MIN S8_MIN ++#else ++ #define INT8_MIN (-128) ++#endif ++ ++#ifdef S8_MAX ++ #define INT8_MAX S8_MAX ++#else ++ #define INT8_MAX 127 ++#endif ++ ++#ifdef U8_MAX ++ #define UINT8_MAX U8_MAX ++#else ++ #define UINT8_MAX 0xFF ++#endif ++ ++#ifdef S16_MIN ++ #define INT16_MIN S16_MIN ++#else ++ #define INT16_MIN (-32768) ++#endif ++ ++#ifdef S16_MAX ++ #define INT16_MAX S16_MAX ++#else ++ #define INT16_MAX 32767 ++#endif ++ ++#ifdef U16_MAX ++ #define UINT16_MAX U16_MAX ++#else ++ #define UINT16_MAX 0xFFFF ++#endif ++ ++#ifdef S32_MIN ++ #define INT32_MIN S32_MIN ++#else ++ #define INT32_MIN (-2147483647 - 1) ++#endif ++ ++#ifdef S32_MAX ++ #define INT32_MAX S32_MAX ++#else ++ #define INT32_MAX 2147483647 ++#endif ++ ++#ifdef U32_MAX ++ #define UINT32_MAX U32_MAX ++#else ++ #define UINT32_MAX 0xFFFFFFFF ++#endif ++ ++#ifdef S64_MIN ++ #define INT64_MIN S64_MIN ++#else ++ #define INT64_MIN (-9223372036854775807LL) ++#endif ++ ++#ifdef S64_MAX ++ #define INT64_MAX S64_MAX ++#else ++ #define INT64_MAX 9223372036854775807LL ++#endif ++ ++#ifdef U64_MAX ++ #define UINT64_MAX U64_MAX ++#else ++ #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL ++#endif ++ ++/* Macros for integer constants */ ++#define INT8_C S8_C ++#define UINT8_C U8_C ++#define INT16_C S16_C ++#define UINT16_C U16_C ++#define INT32_C S32_C ++#define UINT32_C U32_C ++#define INT64_C S64_C ++#define UINT64_C U64_C ++ ++/* Format conversion of integer types */ ++ ++#define PRIX64 "llX" ++#define PRIx64 "llx" ++#define PRIu64 "llu" ++#define PRId64 "lld" +diff --git a/drivers/gpu/drm/img-rogue/include/linux_sw_sync.h b/drivers/gpu/drm/img-rogue/include/linux_sw_sync.h +new file mode 100644 +index 000000000000..c12c650294a2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/linux_sw_sync.h +@@ -0,0 +1,52 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef _UAPI_LINUX_PVR_SW_SYNC_H ++#define _UAPI_LINUX_PVR_SW_SYNC_H ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ ++#include ++ ++#include "pvrsrv_sync_km.h" ++#include "pvr_drm.h" ++ ++#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/include/lock_types.h b/drivers/gpu/drm/img-rogue/include/lock_types.h +new file mode 100644 +index 000000000000..370ffc025d05 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/lock_types.h +@@ -0,0 +1,92 @@ ++/*************************************************************************/ /*! ++@File lock_types.h ++@Title Locking types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Locking specific enums, defines and structures ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef LOCK_TYPES_H ++#define LOCK_TYPES_H ++ ++/* In Linux kernel mode we are using the kernel mutex implementation directly ++ * with macros. This allows us to use the kernel lockdep feature for lock ++ * debugging. */ ++#if defined(__linux__) && defined(__KERNEL__) ++ ++#include ++#include ++/* The mutex is defined as a pointer to be compatible with the other code. This ++ * isn't ideal and usually you wouldn't do that in kernel code. */ ++typedef struct mutex *POS_LOCK; ++typedef struct rw_semaphore *POSWR_LOCK; ++typedef spinlock_t *POS_SPINLOCK; ++typedef atomic_t ATOMIC_T; ++ ++#else /* defined(__linux__) && defined(__KERNEL__) */ ++#include "img_types.h" /* needed for IMG_INT */ ++typedef struct OS_LOCK_TAG *POS_LOCK; ++ ++#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) ++typedef struct OSWR_LOCK_TAG *POSWR_LOCK; ++#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ ++typedef struct OSWR_LOCK_TAG { ++ IMG_UINT32 ui32Dummy; ++} *POSWR_LOCK; ++#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ ++ ++#if defined(__linux__) ++ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; ++#elif defined(__QNXNTO__) ++ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; ++#elif defined(_WIN32) ++ /* ++ * Dummy definition. WDDM doesn't use Services, but some headers ++ * still have to be shared. This is one such case. ++ */ ++ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; ++#elif defined(INTEGRITY_OS) ++ /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */ ++ typedef struct OS_ATOMIC_TAG {IMG_INT64 counter;} ATOMIC_T; ++#else ++ #error "Please type-define an atomic lock for this environment" ++#endif ++ ++#endif /* defined(__linux__) && defined(__KERNEL__) */ ++ ++#endif /* LOCK_TYPES_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/log2.h b/drivers/gpu/drm/img-rogue/include/log2.h +new file mode 100644 +index 000000000000..2182a0223ca6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/log2.h +@@ -0,0 +1,417 @@ ++/*************************************************************************/ /*! ++@Title Integer log2 and related functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef LOG2_H ++#define LOG2_H ++ ++#include "img_defs.h" ++ ++/*************************************************************************/ /*! ++@Description Determine if a number is a power of two. ++@Input n ++@Return True if n is a power of 2, false otherwise. True if n == 0. ++*/ /**************************************************************************/ ++static INLINE IMG_BOOL __const_function IsPower2(uint32_t n) ++{ ++ /* C++ needs this cast. */ ++ return (IMG_BOOL)((n & (n - 1U)) == 0U); ++} ++ ++/*************************************************************************/ /*! ++@Description Determine if a number is a power of two. ++@Input n ++@Return True if n is a power of 2, false otherwise. True if n == 0. ++*/ /**************************************************************************/ ++static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n) ++{ ++ /* C++ needs this cast. */ ++ return (IMG_BOOL)((n & (n - 1U)) == 0U); ++} ++ ++/* Code using GNU GCC intrinsics */ ++#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) ++ ++/* CHAR_BIT is typically found in . For all the platforms where ++ * CHAR_BIT is not available, defined it here with the assumption that there ++ * are 8 bits in a byte */ ++#ifndef CHAR_BIT ++#define CHAR_BIT 8U ++#endif ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2(uint32_t n) ++{ ++ if (unlikely(n == 0U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ return uNumBits - (uint32_t)__builtin_clz(n) - 1U; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) ++{ ++ if (unlikely(n == 0U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ return uNumBits - (uint32_t)__builtin_clzll(n) - 1U; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2(uint32_t n) ++{ ++ if (unlikely(n == 0U || n == 1U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ ++ n--; /* Handle powers of 2 */ ++ return uNumBits - (uint32_t)__builtin_clz(n); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) ++{ ++ if (unlikely(n == 0U || n == 1U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ ++ n--; /* Handle powers of 2 */ ++ return uNumBits - (uint32_t)__builtin_clzll(n); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2(uint32_t n) ++{ ++ return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clz(n) - 1U; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) ++{ ++ return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clzll(n) - 1U; ++} ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) ++{ ++ /* Cases with n greater than 2^31 needs separate handling ++ * as result of (1<<32) is undefined. */ ++ if (unlikely( n == 0U || n > (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) ++ { ++ return 0; ++ } ++ ++ /* Return n if it is already a power of 2 */ ++ if ((IMG_BOOL)((n & (n - 1U)) == 0U)) ++ { ++ return n; ++ } ++ ++ return (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - (uint32_t)__builtin_clz(n)); ++} ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) ++{ ++ /* Cases with n greater than 2^63 needs separate handling ++ * as result of (1<<64) is undefined. */ ++ if (unlikely( n == 0U || n > (uint64_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) ++ { ++ return 0; ++ } ++ ++ /* Return n if it is already a power of 2 */ ++ if ((IMG_BOOL)((n & (n - 1U)) == 0U)) ++ { ++ return n; ++ } ++ ++ return (uint64_t)1 << ((uint64_t)CHAR_BIT * sizeof(n) - (uint64_t)__builtin_clzll(n)); ++} ++ ++#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) ++{ ++ n--; ++ n |= n >> 1; /* handle 2 bit numbers */ ++ n |= n >> 2; /* handle 4 bit numbers */ ++ n |= n >> 4; /* handle 8 bit numbers */ ++ n |= n >> 8; /* handle 16 bit numbers */ ++ n |= n >> 16; /* handle 32 bit numbers */ ++ n++; ++ ++ return n; ++} ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) ++{ ++ n--; ++ n |= n >> 1; /* handle 2 bit numbers */ ++ n |= n >> 2; /* handle 4 bit numbers */ ++ n |= n >> 4; /* handle 8 bit numbers */ ++ n |= n >> 8; /* handle 16 bit numbers */ ++ n |= n >> 16; /* handle 32 bit numbers */ ++ n |= n >> 32; /* handle 64 bit numbers */ ++ n++; ++ ++ return n; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2(uint32_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ while ((n >>= 1) != 0U) ++ { ++ ui32log2++; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ while ((n >>= 1) != 0U) ++ { ++ ui32log2++; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2(uint32_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ if (n == 0U) ++ { ++ return 0; ++ } ++ ++ n--; /* Handle powers of 2 */ ++ ++ while (n != 0U) ++ { ++ ui32log2++; ++ n >>= 1; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ if (n == 0U) ++ { ++ return 0; ++ } ++ ++ n--; /* Handle powers of 2 */ ++ ++ while (n != 0U) ++ { ++ ui32log2++; ++ n >>= 1; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2(uint32_t n) ++{ ++ static const uint32_t b[] = ++ {0xAAAAAAAAU, 0xCCCCCCCCU, 0xF0F0F0F0U, 0xFF00FF00U, 0xFFFF0000U}; ++ uint32_t r = (n & b[0]) != 0U; ++ ++ r |= (uint32_t) ((n & b[4]) != 0U) << 4; ++ r |= (uint32_t) ((n & b[3]) != 0U) << 3; ++ r |= (uint32_t) ((n & b[2]) != 0U) << 2; ++ r |= (uint32_t) ((n & b[1]) != 0U) << 1; ++ ++ return r; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) ++{ ++ static const uint64_t b[] = ++ {0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL, ++ 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL, ++ 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL}; ++ uint32_t r = (n & b[0]) != 0U; ++ ++ r |= (uint32_t) ((n & b[5]) != 0U) << 5; ++ r |= (uint32_t) ((n & b[4]) != 0U) << 4; ++ r |= (uint32_t) ((n & b[3]) != 0U) << 3; ++ r |= (uint32_t) ((n & b[2]) != 0U) << 2; ++ r |= (uint32_t) ((n & b[1]) != 0U) << 1; ++ ++ return r; ++} ++ ++#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(size)) , where size is the max of 3 sizes ++ This is almost always the ONLY EVER valid use of FloorLog2. ++ Usually CeilLog2() should be used instead. ++ For a 5x5x1 texture, the 3 miplevels are: ++ 0: 5x5x1 ++ 1: 2x2x1 ++ 2: 1x1x1 ++ ++ For an 8x8x1 texture, the 4 miplevels are: ++ 0: 8x8x1 ++ 1: 4x4x1 ++ 2: 2x2x1 ++ 3: 1x1x1 ++ ++ ++@Input sizeX, sizeY, sizeZ ++@Return Count of mipmap levels for given dimensions ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ) ++{ ++ ++ uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ); ++ return FloorLog2(maxSize) + 1U; ++} ++ ++#endif /* LOG2_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/multicore_defs.h b/drivers/gpu/drm/img-rogue/include/multicore_defs.h +new file mode 100644 +index 000000000000..2ca4e064d886 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/multicore_defs.h +@@ -0,0 +1,53 @@ ++/**************************************************************************/ /*! ++@File ++@Title RGX Multicore Information flags ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_MULTICORE_DEFS_H ++#define RGX_MULTICORE_DEFS_H ++ ++/* Capability bits returned to client in RGXGetMultiCoreInfo */ ++#define RGX_MULTICORE_CAPABILITY_FRAGMENT_EN (0x00000040U) ++#define RGX_MULTICORE_CAPABILITY_GEOMETRY_EN (0x00000020U) ++#define RGX_MULTICORE_CAPABILITY_COMPUTE_EN (0x00000010U) ++#define RGX_MULTICORE_CAPABILITY_PRIMARY_EN (0x00000008U) ++#define RGX_MULTICORE_ID_CLRMSK (0xFFFFFFF8U) ++ ++#endif /* RGX_MULTICORE_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/osfunc_common.h b/drivers/gpu/drm/img-rogue/include/osfunc_common.h +new file mode 100644 +index 000000000000..539ef2c042d1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/osfunc_common.h +@@ -0,0 +1,300 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS functions header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS specific API definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef OSFUNC_COMMON_H ++/*! @cond Doxygen_Suppress */ ++#define OSFUNC_COMMON_H ++/*! @endcond */ ++ ++#if defined(__KERNEL__) && defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "img_types.h" ++ ++#ifdef __cplusplus ++extern "C" ++{ ++#endif ++ ++/**************************************************************************/ /*! ++@Function DeviceMemSet ++@Description Set memory, whose mapping may be uncached, to a given value. ++ Safe implementation for all architectures for uncached mapping, ++ optimised for speed where supported by tool chains. ++ In such cases, OSDeviceMemSet() is defined as a call to this ++ function. ++@Input pvDest void pointer to the memory to be set ++@Input ui8Value byte containing the value to be set ++@Input ui32Size the number of bytes to be set to the given value ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function DeviceMemCopy ++@Description Copy values from one area of memory. Safe implementation for ++ all architectures for uncached mapping, of either the source ++ or destination, optimised for speed where supported by tool ++ chains. In such cases, OSDeviceMemCopy() is defined as a call ++ to this function. ++@Input pvDst void pointer to the destination memory ++@Input pvSrc void pointer to the source memory ++@Input ui32Size the number of bytes to be copied ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function DeviceMemSetBytes ++@Description Potentially very slow (but safe) memset fallback for non-GNU C ++ compilers for arm64/aarch64 ++@Input pvDest void pointer to the memory to be set ++@Input ui8Value byte containing the value to be set ++@Input ui32Size the number of bytes to be set to the given value ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function DeviceMemCopyBytes ++@Description Potentially very slow (but safe) memcpy fallback for non-GNU C ++ compilers for arm64/aarch64 ++@Input pvDst void pointer to the destination memory ++@Input pvSrc void pointer to the source memory ++@Input ui32Size the number of bytes to be copied ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function StringLCopy ++@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. ++ If no null byte ('\0') is contained within the first uDataSize-1 ++ characters of the source string, the destination string will be ++ truncated. If the length of the source string is less than uDataSize ++ an additional NUL byte will be copied to the destination string ++ to ensure that the string is NUL-terminated. ++@Input pszDest char pointer to the destination string ++@Input pszSrc const char pointer to the source string ++@Input uDataSize the maximum number of bytes to be copied ++@Return Size of the source string ++ */ /**************************************************************************/ ++size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); ++ ++#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) ++#if defined(__GNUC__) ++/* Workarounds for assumptions made that memory will not be mapped uncached ++ * in kernel or user address spaces on arm64 platforms (or other testing). ++ */ ++ ++#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c)) ++#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c)) ++ ++#else /* defined __GNUC__ */ ++ ++#define OSDeviceMemSet(a,b,c) DeviceMemSetBytes((a), (b), (c)) ++#define OSDeviceMemCopy(a,b,c) DeviceMemCopyBytes((a), (b), (c)) ++ ++#endif /* defined __GNUC__ */ ++ ++#else /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ ++ ++/* Everything else */ ++ ++/**************************************************************************/ /*! ++@Function OSDeviceMemSet ++@Description Set memory, whose mapping may be uncached, to a given value. ++ On some architectures, additional processing may be needed ++ if the mapping is uncached. ++@Input a void pointer to the memory to be set ++@Input b byte containing the value to be set ++@Input c the number of bytes to be set to the given value ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSDeviceMemSet(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ (void) memset((a), (b), (c)); \ ++ (void) *(volatile IMG_UINT32*)((void*)(a)); \ ++ } \ ++ } while (false) ++ ++/**************************************************************************/ /*! ++@Function OSDeviceMemCopy ++@Description Copy values from one area of memory, to another, when one ++ or both mappings may be uncached. ++ On some architectures, additional processing may be needed ++ if mappings are uncached. ++@Input a void pointer to the destination memory ++@Input b void pointer to the source memory ++@Input c the number of bytes to be copied ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSDeviceMemCopy(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ memcpy((a), (b), (c)); \ ++ (void) *(volatile IMG_UINT32*)((void*)(a)); \ ++ } \ ++ } while (false) ++ ++#endif /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ ++ ++/**************************************************************************/ /*! ++@Function OSCachedMemSet ++@Description Set memory, where the mapping is known to be cached, to a ++ given value. This function exists to allow an optimal memset ++ to be performed when memory is known to be cached. ++@Input a void pointer to the memory to be set ++@Input b byte containing the value to be set ++@Input c the number of bytes to be set to the given value ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSCachedMemSet(a,b,c) (void) memset((a), (b), (c)) ++ ++/**************************************************************************/ /*! ++@Function OSCachedMemCopy ++@Description Copy values from one area of memory, to another, when both ++ mappings are known to be cached. ++ This function exists to allow an optimal memcpy to be ++ performed when memory is known to be cached. ++@Input a void pointer to the destination memory ++@Input b void pointer to the source memory ++@Input c the number of bytes to be copied ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c)) ++ ++#if defined(__KERNEL__) ++ ++/**************************************************************************/ /*! ++@Function OSCachedMemSetWMB ++@Description Set memory, where the mapping is known to be cached or ++ write-combine, to a given value and issue a write memory barrier ++ after. This ++ function exists to allow an optimal memset to be performed when ++ memory is known to be cached or write-combine. ++@Input a void pointer to the memory to be set ++@Input b byte containing the value to be set ++@Input c the number of bytes to be set to the given value ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#if !defined(SERVICES_SC) ++#define OSCachedMemSetWMB(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ (void) memset((a), (b), (c)); \ ++ OSWriteMemoryBarrier(a); \ ++ } \ ++ } while (false) ++#else ++#define OSCachedMemSetWMB(a,b,c) \ ++ do { \ ++ (void) memset((a), (b), (c)); \ ++ OSWriteMemoryBarrier(); \ ++ } while (false) ++#endif /* !defined(SERVICES_SC) */ ++/**************************************************************************/ /*! ++@Function OSCachedMemCopy ++@Description Copy values from one area of memory, to another, when both ++ mappings are known to be cached or write-combine and issue ++ a write memory barrier after. ++ This function exists to allow an optimal memcpy to be ++ performed when memory is known to be cached or write-combine. ++@Input a void pointer to the destination memory ++@Input b void pointer to the source memory ++@Input c the number of bytes to be copied ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#if !defined(SERVICES_SC) ++#define OSCachedMemCopyWMB(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ (void) memcpy((a), (b), (c)); \ ++ OSWriteMemoryBarrier(a); \ ++ } \ ++ } while (false) ++#else ++#define OSCachedMemCopyWMB(a,b,c) \ ++ do { \ ++ (void) memcpy((a), (b), (c)); \ ++ OSWriteMemoryBarrier(); \ ++ } while (false) ++#endif /* !defined(SERVICES_SC) */ ++#endif /* defined(__KERNEL__) */ ++ ++/**************************************************************************/ /*! ++@Function OSStringLCopy ++@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. ++ If no null byte ('\0') is contained within the first uDataSize-1 ++ characters of the source string, the destination string will be ++ truncated. If the length of the source string is less than uDataSize ++ an additional NUL byte will be copied to the destination string ++ to ensure that the string is NUL-terminated. ++@Input a char pointer to the destination string ++@Input b const char pointer to the source string ++@Input c the maximum number of bytes to be copied ++@Return Size of the source string ++ */ /**************************************************************************/ ++#if defined(__QNXNTO__) || (defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG)) ++#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c)) ++#else /* defined(__QNXNTO__) ... */ ++#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c)) ++#endif /* defined(__QNXNTO__) ... */ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* OSFUNC_COMMON_H */ ++ ++/****************************************************************************** ++ End of file (osfunc_common.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/pdumpdefs.h b/drivers/gpu/drm/img-rogue/include/pdumpdefs.h +new file mode 100644 +index 000000000000..3f8cccabc824 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pdumpdefs.h +@@ -0,0 +1,249 @@ ++/*************************************************************************/ /*! ++@File ++@Title PDUMP definitions header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description PDUMP definitions header ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PDUMPDEFS_H ++#define PDUMPDEFS_H ++ ++/*! PDump Pixel Format Enumeration */ ++typedef enum _PDUMP_PIXEL_FORMAT_ ++{ ++ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, ++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, ++/* PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */ ++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, ++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, ++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, ++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, ++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, ++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, ++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, ++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, ++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, ++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, ++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, ++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, ++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, ++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, ++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, ++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46, ++ PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49, ++ PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66, ++ ++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff ++ ++} PDUMP_PIXEL_FORMAT; ++ ++typedef enum _PDUMP_FBC_SWIZZLE_ ++{ ++ PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0, ++ PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1, ++ PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2, ++ PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3, ++ PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4, ++ PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5, ++ PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8, ++ PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9, ++ PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA, ++ PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB, ++ PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC, ++ PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD, ++} PDUMP_FBC_SWIZZLE; ++ ++/*! PDump addrmode */ ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0 ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF ++ ++#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8 ++#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12 ++#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000 ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20 ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000 ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24 ++#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25 ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28 ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000 ++ ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_SURFACE (6U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE (7U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4 (8U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4PLUS (9U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_TFBCDC (10U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++ ++/*! PDump Poll Operator */ ++typedef enum _PDUMP_POLL_OPERATOR ++{ ++ PDUMP_POLL_OPERATOR_EQUAL = 0, ++ PDUMP_POLL_OPERATOR_LESS = 1, ++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2, ++ PDUMP_POLL_OPERATOR_GREATER = 3, ++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, ++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5, ++} PDUMP_POLL_OPERATOR; ++ ++ ++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */ ++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */ ++ ++/*! ++ PDump MMU type ++ (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13) ++*/ ++typedef enum ++{ ++ PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1, ++ PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2, ++ PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3, ++ PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4, ++ PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5, ++ PDUMP_MMU_TYPE_VARPAGE_40BIT = 6, ++ PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7, ++ PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8, ++ PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9, ++ PDUMP_MMU_TYPE_LAST ++} PDUMP_MMU_TYPE; ++ ++/*! ++ PDump states ++ These values are used by the bridge call PVRSRVPDumpGetState ++*/ ++#define PDUMP_STATE_CAPTURE_FRAME (1U) /*!< Flag represents the PDump being in capture range or not*/ ++#define PDUMP_STATE_CONNECTED (2U) /*!< Flag represents the PDump Client App being connected on not */ ++#define PDUMP_STATE_SUSPENDED (4U) /*!< Flag represents the PDump being suspended or not */ ++#define PDUMP_STATE_CAPTURE_IN_INTERVAL (8U) /*!< Flag represents the PDump being in a capture range interval */ ++ ++/*! ++ PDump Capture modes ++ Values used with calls to PVRSRVPDumpSetDefaultCaptureParams ++*/ ++#define PDUMP_CAPMODE_UNSET 0x00000000UL ++#define PDUMP_CAPMODE_FRAMED 0x00000001UL ++#define PDUMP_CAPMODE_CONTINUOUS 0x00000002UL ++#define PDUMP_CAPMODE_BLOCKED 0x00000003UL ++ ++#define PDUMP_CAPMODE_MAX PDUMP_CAPMODE_BLOCKED ++ ++#endif /* PDUMPDEFS_H */ ++ ++/***************************************************************************** ++ End of file (pdumpdefs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/pdumpdesc.h b/drivers/gpu/drm/img-rogue/include/pdumpdesc.h +new file mode 100644 +index 000000000000..d159bf4ee334 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pdumpdesc.h +@@ -0,0 +1,226 @@ ++/*************************************************************************/ /*! ++@File pdumpdesc.h ++@Title PDump Descriptor format ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Describes PDump descriptors that may be passed to the ++ extraction routines (SAB). ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PDUMPDESC_H) ++#define PDUMPDESC_H ++ ++#include "pdumpdefs.h" ++ ++/* ++ * Common fields ++ */ ++#define HEADER_WORD0_TYPE_SHIFT (0) ++#define HEADER_WORD0_TYPE_CLRMSK (0xFFFFFFFFU) ++ ++#define HEADER_WORD1_SIZE_SHIFT (0) ++#define HEADER_WORD1_SIZE_CLRMSK (0x0000FFFFU) ++#define HEADER_WORD1_VERSION_SHIFT (16) ++#define HEADER_WORD1_VERSION_CLRMSK (0xFFFF0000U) ++ ++#define HEADER_WORD2_DATA_SIZE_SHIFT (0) ++#define HEADER_WORD2_DATA_SIZE_CLRMSK (0xFFFFFFFFU) ++ ++ ++/* ++ * The image type descriptor ++ */ ++ ++/* ++ * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2 ++ * Header size - 64 bytes ++ */ ++#define IMAGE_HEADER_TYPE (0x42474D49) ++#define IMAGE_HEADER_SIZE (64) ++#define IMAGE_HEADER_VERSION (2) ++ ++/* ++ * Image type-specific fields ++ */ ++#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT (0) ++#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT (0) ++#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD5_FORMAT_SHIFT (0) ++#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT (0) ++#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT (0) ++#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT (0) ++#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK (0x000000FFU) ++#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) ++#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) ++#define IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE (12 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) ++ ++ ++#define IMAGE_HEADER_WORD8_STRIDE_SHIFT (8) ++#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK (0x0000FF00U) ++#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) ++#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) ++ ++#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT (16) ++#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK (0x00FF0000U) ++#define IMAGE_HEADER_WORD8_BIFTYPE_NONE (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT) ++ ++#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT (24) ++#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK (0xFF000000U) ++#define IMAGE_HEADER_WORD8_FBCTYPE_8X8 (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) ++#define IMAGE_HEADER_WORD8_FBCTYPE_16x4 (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) ++#define IMAGE_HEADER_WORD8_FBCTYPE_32x2 (3 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) ++ ++#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT (0) ++#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK (0x000000FFU) ++#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT) ++ ++/* Align with fbcomp_export_c.h in pdump_tools branch */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT (8) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK (0x0000FF00U) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* TWIDDLED_ENHANCED */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2 (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT1 (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2 (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V30_WITH_HEADER_REMAP */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT1 (6 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2 (7 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V31_WITH_HEADER_REMAP */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4 (8 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4_PLUS (9 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC (10 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++ ++#define IMAGE_HEADER_WORD9_LOSSY_SHIFT (16) ++#define IMAGE_HEADER_WORD9_LOSSY_CLRMSK (0x00FF0000U) ++/* Non-TFBC */ ++#define IMAGE_HEADER_WORD9_LOSSY_ON (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++ ++/* TFBC */ ++#define IMAGE_HEADER_WORD9_LOSSY_75 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_37 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_50 (2 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_25 (3 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_OFF (0 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++ ++#define IMAGE_HEADER_WORD9_SWIZZLE_SHIFT (24) ++#define IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK (0xFF000000U) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARGB (0x0 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARBG (0x1 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGRB (0x2 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGBR (0x3 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABGR (0x4 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABRG (0x5 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RGBA (0x8 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RBGA (0x9 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GRBA (0xA << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GBRA (0xB << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BGRA (0xC << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BRGA (0xD << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++ ++#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT (0) ++#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT (0) ++#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT (0) ++#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT (0) ++#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT (0) ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_CLRMSK (0x000000FFU) ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75 (0 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT) ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_37_50 (1 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT) ++ ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT (8) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_CLRMSK (0x0000FF00U) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_ALL (0 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_CORR (1 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY (2 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_PTC_ONLY (3 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++ ++#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT (16) ++#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_CLRMSK (0x00FF0000U) ++#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN (1 << IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT) /* Treat YUV10 optimal formats as 8 bits */ ++ ++/* IMAGE_HEADER_WORD15_RESERVED2 */ ++ ++/* ++ * The data type descriptor ++ */ ++ ++/* ++ * Header type (IMGCv1) - 'IMGC' in hex + VERSION 0 ++ * Header size - 20 bytes (5 x 32 bit WORDS) ++ */ ++#define DATA_HEADER_TYPE (0x43474D49) ++#define DATA_HEADER_SIZE (20) ++#define DATA_HEADER_VERSION (0) ++ ++/* ++ * The IBIN type descriptor ++ */ ++ ++/* ++ * Header type (IBIN) - 'IBIN' in hex + VERSION 0 ++ * Header size - 12 bytes (3 x 32 bit WORDS) ++ */ ++#define IBIN_HEADER_TYPE (0x4e494249) ++#define IBIN_HEADER_SIZE (12) ++#define IBIN_HEADER_VERSION (0) ++ ++/* ++ * Data type-specific fields ++ */ ++#define DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT (0) ++#define DATA_HEADER_WORD3_ELEMENT_TYPE_CLRMSK (0xFFFFFFFFU) ++ ++#define DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT (0) ++#define DATA_HEADER_WORD4_ELEMENT_COUNT_CLRMSK (0xFFFFFFFFU) ++ ++#endif /* PDUMPDESC_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/public/powervr/buffer_attribs.h b/drivers/gpu/drm/img-rogue/include/public/powervr/buffer_attribs.h +new file mode 100644 +index 000000000000..41eaaaecd19e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/public/powervr/buffer_attribs.h +@@ -0,0 +1,193 @@ ++/*************************************************************************/ /*! ++@File ++@Title 3D types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef POWERVR_BUFFER_ATTRIBS_H ++#define POWERVR_BUFFER_ATTRIBS_H ++ ++/*! ++ * Memory layouts ++ * Defines how pixels are laid out within a surface. ++ */ ++typedef enum ++{ ++ IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */ ++ IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled to match HW */ ++ IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */ ++ IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */ ++ IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */ ++ IMG_MEMLAYOUT_INVNTWIDDLED, /**< Resource is 2D twiddled !N style */ ++} IMG_MEMLAYOUT; ++ ++/*! ++ * Rotation types ++ */ ++typedef enum ++{ ++ IMG_ROTATION_0DEG = 0, ++ IMG_ROTATION_90DEG = 1, ++ IMG_ROTATION_180DEG = 2, ++ IMG_ROTATION_270DEG = 3, ++ IMG_ROTATION_FLIP_Y = 4, ++ ++ IMG_ROTATION_BAD = 255, ++} IMG_ROTATION; ++ ++/*! ++ * Alpha types. ++ */ ++typedef enum ++{ ++ IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x0UL << 16, ++ IMG_COLOURSPACE_FORMAT_LINEAR = 0x1UL << 16, ++ IMG_COLOURSPACE_FORMAT_SRGB = 0x2UL << 16, ++ IMG_COLOURSPACE_FORMAT_SCRGB = 0x3UL << 16, ++ IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR = 0x4UL << 16, ++ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR = 0x5UL << 16, ++ IMG_COLOURSPACE_FORMAT_DISPLAY_P3 = 0x6UL << 16, ++ IMG_COLOURSPACE_FORMAT_BT2020_PQ = 0x7UL << 16, ++ IMG_COLOURSPACE_FORMAT_BT2020_LINEAR = 0x8UL << 16, ++ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH = 0x9UL << 16, ++ IMG_COLOURSPACE_FORMAT_MASK = 0xFUL << 16, ++} IMG_COLOURSPACE_FORMAT; ++ ++/*! ++ * Determines if FB Compression is Lossy ++ */ ++#define IS_FBCDC_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_TRUE : IMG_FALSE) ++ ++/*! ++ * Determines if FB Compression is Packed ++ */ ++#define IS_FBCDC_PACKED(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_TRUE : IMG_FALSE) ++ ++/*! ++ * Returns type of FB Compression ++ */ ++#define GET_FBCDC_BLOCK_TYPE(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) ++ ++/*! ++ * Adds Packing compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_PACKED_8x8 : mode) ++ ++/*! ++ * Removes Packing compression setting from mode ++ */ ++#define FBCDC_MODE_REMOVE_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : mode) ++ ++/*! ++ * Adds Lossy25 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY25(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2 : mode) ++ ++/*! ++ * Adds Lossy37 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY37(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2 : mode) ++ ++/*! ++ * Adds Lossy50 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY50(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2 : mode) ++ ++/*! ++ * Adds Lossy75 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY75(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2 : mode) ++ ++/*! ++ * Removes Lossy compression setting from mode ++ */ ++#define FBCDC_MODE_REMOVE_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) ++ ++/*! ++ * Types of framebuffer compression ++ */ ++typedef enum ++{ ++ IMG_FB_COMPRESSION_NONE, ++ IMG_FB_COMPRESSION_DIRECT_8x8, ++ IMG_FB_COMPRESSION_DIRECT_16x4, ++ IMG_FB_COMPRESSION_DIRECT_32x2, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2, ++ IMG_FB_COMPRESSION_DIRECT_PACKED_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2, ++} IMG_FB_COMPRESSION; ++ ++ ++#endif /* POWERVR_BUFFER_ATTRIBS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/public/powervr/img_drm_fourcc.h b/drivers/gpu/drm/img-rogue/include/public/powervr/img_drm_fourcc.h +new file mode 100644 +index 000000000000..5fd79a6c413e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/public/powervr/img_drm_fourcc.h +@@ -0,0 +1,140 @@ ++/*************************************************************************/ /*! ++@File ++@Title Wrapper around drm_fourcc.h ++@Description FourCCs and DRM framebuffer modifiers that are not in the ++ Kernel's and libdrm's drm_fourcc.h can be added here. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_DRM_FOURCC_H ++#define IMG_DRM_FOURCC_H ++ ++#if defined(__KERNEL__) ++#include ++#else ++/* ++ * Include types.h to workaround versions of libdrm older than 2.4.68 ++ * not including the correct headers. ++ */ ++#include ++ ++#include ++#endif ++ ++/* ++ * Don't get too inspired by this example :) ++ * ADF doesn't support DRM modifiers, so the memory layout had to be ++ * included in the fourcc name, but the proper way to specify information ++ * additional to pixel formats is to use DRM modifiers. ++ * ++ * See upstream drm_fourcc.h for the proper naming convention. ++ */ ++#ifndef DRM_FORMAT_BGRA8888_DIRECT_16x4 ++#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0') ++#endif ++ ++#if !defined(__KERNEL__) ++/* ++ * A definition for the same format was added in Linux kernel 5.2 in commit ++ * 88ab9c76d191ad8645b483f31e2b394b0f3e280e. As such, this definition has been ++ * deprecated and the DRM_FORMAT_ABGR16161616F kernel define should be used ++ * instead of this one. ++ */ ++#define DRM_FORMAT_ABGR16_IMG_DEPRECATED fourcc_code('I', 'M', 'G', '1') ++#endif ++ ++/* ++ * Upstream does not have a packed 10 Bits Per Channel YVU format yet, ++ * so let`s make one up. ++ * Note: at the moment this format is not intended to be used with ++ * a framebuffer, so the kernels core DRM doesn`t need to know ++ * about this format. This means that the kernel doesn`t need ++ * to be patched. ++ */ ++#if !defined(__KERNEL__) ++#define DRM_FORMAT_YVU444_PACK10_IMG fourcc_code('I', 'M', 'G', '2') ++#define DRM_FORMAT_YUV422_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '3') ++#define DRM_FORMAT_YUV420_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '4') ++#endif ++ ++/* ++ * Value chosen in the middle of 255 pool to minimise the chance of hitting ++ * the same value potentially defined by other vendors in the drm_fourcc.h ++ */ ++#define DRM_FORMAT_MOD_VENDOR_PVR 0x92 ++ ++#ifndef DRM_FORMAT_MOD_VENDOR_NONE ++#define DRM_FORMAT_MOD_VENDOR_NONE 0 ++#endif ++ ++#ifndef DRM_FORMAT_RESERVED ++#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) ++#endif ++ ++#define img_fourcc_mod_combine(uiModHi, uiModLo) \ ++ ((__u64) ((__u32) (uiModHi)) << 32 | (__u64) ((__u32) (uiModLo))) ++ ++#define img_fourcc_mod_hi(ui64Mod) \ ++ ((__u32) ((__u64) (ui64Mod) >> 32)) ++ ++#define img_fourcc_mod_lo(ui64Mod) \ ++ ((__u32) ((__u64) (ui64Mod)) & 0xffffffff) ++ ++#ifndef fourcc_mod_code ++#define fourcc_mod_code(vendor, val) \ ++ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) ++#endif ++ ++#ifndef DRM_FORMAT_MOD_INVALID ++#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) ++#endif ++ ++#ifndef DRM_FORMAT_MOD_LINEAR ++#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) ++#endif ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1 fourcc_mod_code(PVR, 3) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 fourcc_mod_code(PVR, 9) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 fourcc_mod_code(PVR, 6) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 fourcc_mod_code(PVR, 12) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 fourcc_mod_code(PVR, 21) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 fourcc_mod_code(PVR, 22) ++#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 fourcc_mod_code(PVR, 23) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 fourcc_mod_code(PVR, 15) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 fourcc_mod_code(PVR, 16) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 fourcc_mod_code(PVR, 24) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13 fourcc_mod_code(PVR, 25) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13 fourcc_mod_code(PVR, 26) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13 fourcc_mod_code(PVR, 27) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 fourcc_mod_code(PVR, 28) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13 fourcc_mod_code(PVR, 29) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 fourcc_mod_code(PVR, 30) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 fourcc_mod_code(PVR, 31) ++ ++#endif /* IMG_DRM_FOURCC_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/public/powervr/mem_types.h b/drivers/gpu/drm/img-rogue/include/public/powervr/mem_types.h +new file mode 100644 +index 000000000000..a6dce8fe9889 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/public/powervr/mem_types.h +@@ -0,0 +1,64 @@ ++/*************************************************************************/ /*! ++@File ++@Title Public types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef POWERVR_TYPES_H ++#define POWERVR_TYPES_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#if defined(_MSC_VER) ++ #include "msvc_types.h" ++#elif defined(__linux__) && defined(__KERNEL__) ++ #include ++ #include ++#else ++ #include ++ #define __iomem ++#endif ++ ++typedef void *IMG_CPU_VIRTADDR; ++ ++/* device virtual address */ ++typedef struct ++{ ++ uint64_t uiAddr; ++#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var) ++ ++} IMG_DEV_VIRTADDR; ++ ++typedef uint64_t IMG_DEVMEM_SIZE_T; ++typedef uint64_t IMG_DEVMEM_ALIGN_T; ++typedef uint64_t IMG_DEVMEM_OFFSET_T; ++typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/include/public/powervr/pvrsrv_sync_ext.h b/drivers/gpu/drm/img-rogue/include/public/powervr/pvrsrv_sync_ext.h +new file mode 100644 +index 000000000000..30f7972444cd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/public/powervr/pvrsrv_sync_ext.h +@@ -0,0 +1,72 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services external synchronisation interface header ++@Description Defines synchronisation structures that are visible internally ++ and externally ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef POWERVR_SYNC_EXT_H ++#define POWERVR_SYNC_EXT_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/*! ++ * Number of sync prims still used internally in operations ++ */ ++#define PVRSRV_MAX_SYNC_PRIMS 4U ++ ++/*! ++ * Maximum number of dev var updates passed in a kick call ++ */ ++#define PVRSRV_MAX_DEV_VARS 13U ++ ++/*! ++ * Number of UFOs in operations ++ */ ++#define PVRSRV_MAX_SYNCS (PVRSRV_MAX_SYNC_PRIMS + PVRSRV_MAX_DEV_VARS) ++ ++/*! Implementation independent types for passing fence/timeline to Services. ++ */ ++typedef int32_t PVRSRV_FENCE; ++typedef int32_t PVRSRV_TIMELINE; ++ ++/*! Maximum length for an annotation name string for fence sync model objects. ++ */ ++#define PVRSRV_SYNC_NAME_LENGTH 32U ++ ++/* Macros for API callers using the fence sync model ++ */ ++#define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1) ++#define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1) ++#define PVRSRV_NO_FENCE_PTR NULL ++#define PVRSRV_NO_TIMELINE_PTR NULL ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/include/pvr_buffer_sync_shared.h b/drivers/gpu/drm/img-rogue/include/pvr_buffer_sync_shared.h +new file mode 100644 +index 000000000000..7a110910dbd6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvr_buffer_sync_shared.h +@@ -0,0 +1,57 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR buffer sync shared ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shared definitions between client and server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_BUFFER_SYNC_SHARED_H ++#define PVR_BUFFER_SYNC_SHARED_H ++ ++#define PVR_BUFFER_FLAG_READ (1U << 0) ++#define PVR_BUFFER_FLAG_WRITE (1U << 1) ++#define PVR_BUFFER_FLAG_MASK (PVR_BUFFER_FLAG_READ | \ ++ PVR_BUFFER_FLAG_WRITE) ++ ++/* Maximum number of PMRs passed ++ * in a kick when using buffer sync ++ */ ++#define PVRSRV_MAX_BUFFERSYNC_PMRS 32 ++ ++#endif /* PVR_BUFFER_SYNC_SHARED_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvr_debug.h b/drivers/gpu/drm/img-rogue/include/pvr_debug.h +new file mode 100644 +index 000000000000..56bbb13f1c16 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvr_debug.h +@@ -0,0 +1,898 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR Debug Declarations ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides debug functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_DEBUG_H ++#define PVR_DEBUG_H ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++/*! @cond Doxygen_Suppress */ ++#if defined(_MSC_VER) ++# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) ++#else ++# define MSC_SUPPRESS_4127 ++#endif ++/*! @endcond */ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */ ++ ++/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */ ++#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */ ++#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */ ++#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */ ++#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */ ++#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */ ++#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */ ++#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */ ++#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */ ++#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */ ++#define DBGPRIV_LAST 0x100UL /*!< Always set to highest mask value. Privately used by pvr_debug. */ ++ ++/* Enable DPF logging for locally from some make targets */ ++#if defined(PVRSRV_NEED_PVR_DPF_LOCAL) ++#undef PVRSRV_NEED_PVR_DPF ++#define PVRSRV_NEED_PVR_DPF ++#endif ++ ++#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG) ++#define PVRSRV_NEED_PVR_ASSERT ++#endif ++ ++#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF) ++#define PVRSRV_NEED_PVR_DPF ++#endif ++ ++#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING)) ++#define PVRSRV_NEED_PVR_TRACE ++#endif ++ ++#if !defined(DOXYGEN) ++/*************************************************************************/ /* ++PVRSRVGetErrorString ++Returns a string describing the provided PVRSRV_ERROR code ++NB No doxygen comments provided as this function does not require porting ++ for other operating systems ++*/ /**************************************************************************/ ++const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); ++#define PVRSRVGETERRORSTRING PVRSRVGetErrorString ++#endif ++ ++/* PVR_ASSERT() and PVR_DBG_BREAK handling */ ++ ++#if defined(__KLOCWORK__) ++/* A dummy no-return function to be used under Klocwork to mark unreachable ++ paths instead of abort() in order to avoid MISRA.STDLIB.ABORT issues. */ ++__noreturn void klocwork_abort(void); ++#endif ++ ++#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN) ++ ++/* Unfortunately the Klocwork static analysis checker doesn't understand our ++ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert ++ * macros in a special way when the code is analysed by Klocwork avoids ++ * them. ++ */ ++#if defined(__KLOCWORK__) ++#define PVR_ASSERT(x) do { if (!(x)) {klocwork_abort();} } while (false) ++#else /* ! __KLOCWORKS__ */ ++ ++#if defined(_WIN32) ++#define PVR_ASSERT(expr) do \ ++ { \ ++ MSC_SUPPRESS_4127 \ ++ if (unlikely(!(expr))) \ ++ { \ ++ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\ ++ "*** Debug assertion failed!"); \ ++ __debugbreak(); \ ++ } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++#else ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#include ++#include ++ ++/* In Linux kernel mode, use WARN_ON() directly. This produces the ++ * correct filename and line number in the warning message. ++ */ ++#define PVR_ASSERT(EXPR) do \ ++ { \ ++ if (unlikely(!(EXPR))) \ ++ { \ ++ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \ ++ "Debug assertion failed!"); \ ++ WARN_ON(1); \ ++ } \ ++ } while (false) ++ ++#else /* defined(__linux__) && defined(__KERNEL__) */ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugAssertFail ++@Description Indicate to the user that a debug assertion has failed and ++ prevent the program from continuing. ++ Invoked from the macro PVR_ASSERT(). ++@Input pszFile The name of the source file where the assertion failed ++@Input ui32Line The line number of the failed assertion ++@Input pszAssertion String describing the assertion ++@Return NEVER! ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV __noreturn ++PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, ++ IMG_UINT32 ui32Line, ++ const IMG_CHAR *pszAssertion); ++ ++#define PVR_ASSERT(EXPR) do \ ++ { \ ++ if (unlikely(!(EXPR))) \ ++ { \ ++ PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \ ++ } \ ++ } while (false) ++ ++#endif /* defined(__linux__) && defined(__KERNEL__) */ ++#endif /* defined(_WIN32) */ ++#endif /* defined(__KLOCWORK__) */ ++ ++#if defined(__KLOCWORK__) ++ #define PVR_DBG_BREAK do { klocwork_abort(); } while (false) ++#else ++ #if defined(WIN32) ++ #define PVR_DBG_BREAK __debugbreak() /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */ ++ #else ++ #if defined(PVR_DBG_BREAK_ASSERT_FAIL) ++ /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */ ++ #if defined(_WIN32) ++ #define PVR_DBG_BREAK DBG_BREAK ++ #else ++ #if defined(__linux__) && defined(__KERNEL__) ++ #define PVR_DBG_BREAK BUG() ++ #else ++ #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK") ++ #endif ++ #endif ++ #else ++ /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ ++ #define PVR_DBG_BREAK ++ #endif ++ #endif ++#endif ++ ++ ++#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ /* Unfortunately the Klocwork static analysis checker doesn't understand our ++ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert ++ * macros in a special way when the code is analysed by Klocwork avoids ++ * them. ++ */ ++ #if defined(__KLOCWORK__) && !defined(SERVICES_SC) ++ #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {klocwork_abort();} } while (false) ++ #else ++ #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */ ++ #endif ++ ++ #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ ++ ++#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ ++ ++/* PVR_DPF() handling */ ++ ++#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) ++ ++ /* New logging mechanism */ ++ #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */ ++ #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */ ++ #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */ ++ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */ ++ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */ ++ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */ ++ #define PVR_DBG_ALLOC DBGPRIV_ALLOC /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */ ++ #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */ ++ #define PVR_DBG_DEBUG DBGPRIV_DEBUG /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */ ++ ++ /* These levels are always on with PVRSRV_NEED_PVR_DPF */ ++ /*! @cond Doxygen_Suppress */ ++ #define PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__) ++ #define PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__) ++ #define PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__) ++ ++ /* ++ * The AdHoc-Debug level is only supported when enabled in the local ++ * build environment and may need to be used in both debug and release ++ * builds. An error is generated in the formal build if it is checked in. ++ */ ++#if defined(PVR_DPF_ADHOC_DEBUG_ON) ++ #define PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__) ++#else ++ /* Use an undefined token here to stop compilation dead in the offending module */ ++ #define PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing ++#endif ++ ++ /* Some are compiled out completely in release builds */ ++#if defined(DEBUG) || defined(DOXYGEN) ++ #define PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__) ++ #define PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__) ++ #define PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__) ++ #define PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__) ++ #define PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__) ++#else ++ #define PVR_DPF_0x004UL(...) ++ #define PVR_DPF_0x008UL(...) ++ #define PVR_DPF_0x010UL(...) ++ #define PVR_DPF_0x020UL(...) ++ #define PVR_DPF_0x040UL(...) ++#endif ++ ++ /* Translate the different log levels to separate macros ++ * so they can each be compiled out. ++ */ ++#if defined(DEBUG) ++ #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__) ++#else ++ #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl ("", __LINE__, __VA_ARGS__) ++#endif ++ /*! @endcond */ ++ ++ /* Get rid of the double bracketing */ ++ #define PVR_DPF(x) PVR_DPF_EX x ++ ++ #define PVR_LOG_ERROR(_rc, _call) \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)) ++ ++ #define PVR_LOG_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_WARN_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \ ++ { if (unlikely(_expr == NULL)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \ ++ { if (unlikely(_expr == NULL)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ ++ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ return; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \ ++ { PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ _err = _rc; \ ++ goto _go; \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_IF_FALSE(_expr, _msg) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ return; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ ++ return PVRSRV_ERROR_INVALID_PARAMS; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \ ++ _err = PVRSRV_ERROR_INVALID_PARAMS; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_MSG(_lvl, _msg) \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__)) ++ ++ #define PVR_LOG_VA(_lvl, _msg, ...) \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)) ++ ++ #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ return _rc; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ goto _go; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ ++ return _rc; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ goto _go; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++#else /* defined(PVRSRV_NEED_PVR_DPF) */ ++ ++ #define PVR_DPF(X) /*!< Null Implementation of PowerVR Debug Printf (does nothing) */ ++ ++ #define PVR_LOG_MSG(_lvl, _msg) ++ #define PVR_LOG_VA(_lvl, _msg, ...) ++ #define PVR_LOG_ERROR(_rc, _call) (void)(_rc) ++ #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc) ++ #define PVR_WARN_IF_ERROR(_rc, _call) (void)(_rc) ++ ++ #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) (void)(_rc) ++ #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) (void)(_expr) ++ ++ #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do { _err = _rc; goto _go; MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr) ++ #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do { if (unlikely(!(_expr))) { return PVRSRV_ERROR_INVALID_PARAMS; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do { if (unlikely(!(_expr))) { _err = PVRSRV_ERROR_INVALID_PARAMS; goto _go; } MSC_SUPPRESS_4127 } while (false) ++ ++ #undef PVR_DPF_FUNCTION_TRACE_ON ++ ++#endif /* defined(PVRSRV_NEED_PVR_DPF) */ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugPrintf ++@Description Output a debug message to the user, using an OS-specific ++ method, to a log or console which can be read by developers ++ Invoked from the macro PVR_DPF(). ++@Input ui32DebugLevel The debug level of the message. This can ++ be used to restrict the output of debug ++ messages based on their severity. ++ If this is PVR_DBG_BUFFERED, the message ++ should be written into a debug circular ++ buffer instead of being output immediately ++ (useful when performance would otherwise ++ be adversely affected). ++ The debug circular buffer shall only be ++ output when PVRSRVDebugPrintfDumpCCB() is ++ called. ++@Input pszFileName The source file containing the code that is ++ generating the message ++@Input ui32Line The line number in the source file ++@Input pszFormat The formatted message string ++@Input ... Zero or more arguments for use by the ++ formatted string ++@Return None ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, ++ const IMG_CHAR *pszFileName, ++ IMG_UINT32 ui32Line, ++ const IMG_CHAR *pszFormat, ++ ...) __printf(4, 5); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugPrintfDumpCCB ++@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel ++ specified as DBGPRIV_BUFFERED, the debug shall be written to ++ the debug circular buffer instead of being output immediately. ++ (This could be used to obtain debug without incurring a ++ performance hit by printing it at that moment). ++ This function shall dump the contents of that debug circular ++ buffer to be output in an OS-specific method to a log or ++ console which can be read by developers. ++@Return None ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); ++ ++#if !defined(DOXYGEN) ++#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__)) ++#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x ++#endif /*!defined(DOXYGEN) */ ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro. ++ */ ++#define PVR_RETURN_IF_ERROR(_rc) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_FALSE macro. ++ */ ++#define PVR_RETURN_IF_FALSE(_expr, _rc) do \ ++ { if (unlikely(!(_expr))) { \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_INVALID_PARAM macro. ++ */ ++#define PVR_RETURN_IF_INVALID_PARAM(_expr) do \ ++ { if (unlikely(!(_expr))) { \ ++ return PVRSRV_ERROR_INVALID_PARAMS; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_NOMEM macro. ++ */ ++#define PVR_RETURN_IF_NOMEM(_expr) do \ ++ { if (unlikely(!(_expr))) { \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_NOMEM macro. ++ */ ++#define PVR_GOTO_IF_NOMEM(_expr, _err, _go) do \ ++ { if (unlikely(_expr == NULL)) { \ ++ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_INVALID_PARAM macro. ++ */ ++#define PVR_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ _err = PVRSRV_ERROR_INVALID_PARAMS; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_FALSE macro. ++ */ ++#define PVR_GOTO_IF_FALSE(_expr, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_ERROR macro. ++ */ ++#define PVR_GOTO_IF_ERROR(_rc, _go) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_WITH_ERROR macro. ++ */ ++#define PVR_GOTO_WITH_ERROR(_err, _rc, _go) do \ ++ { _err = _rc; goto _go; \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/*! @cond Doxygen_Suppress */ ++#if defined(PVR_DPF_FUNCTION_TRACE_ON) ++ ++ #define PVR_DPF_ENTERED \ ++ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__)) ++ ++ #define PVR_DPF_ENTERED1(p1) \ ++ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1))) ++ ++ #define PVR_DPF_RETURN_RC(a) \ ++ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN_RC1(a,p1) \ ++ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN_VAL(a) \ ++ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__)); return (a); MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN_OK \ ++ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN \ ++ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (false) ++ ++ #if !defined(DEBUG) ++ #error PVR DPF Function trace enabled in release build, rectify ++ #endif ++ ++#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ ++ ++ #define PVR_DPF_ENTERED ++ #define PVR_DPF_ENTERED1(p1) ++ #define PVR_DPF_RETURN_RC(a) return (a) ++ #define PVR_DPF_RETURN_RC1(a,p1) return (a) ++ #define PVR_DPF_RETURN_VAL(a) return (a) ++ #define PVR_DPF_RETURN_OK return PVRSRV_OK ++ #define PVR_DPF_RETURN return ++ ++#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ ++/*! @endcond */ ++ ++#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__) ++/*Use PVR_DPF() unless message is necessary in release build */ ++#define PVR_LOG(X) PVRSRVReleasePrintf X ++ ++/*************************************************************************/ /*! ++@Function PVRSRVReleasePrintf ++@Description Output an important message, using an OS-specific method, ++ to the Server log or console which will always be output in ++ both release and debug builds. ++ Invoked from the macro PVR_LOG(). Used in Services Server only. ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++@Return None ++*/ /**************************************************************************/ ++void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2); ++#endif ++ ++/* PVR_TRACE() handling */ ++ ++#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN) ++ ++ #define PVR_TRACE(X) PVRSRVTrace X /*!< PowerVR Debug Trace Macro */ ++ /* Empty string implementation that is -O0 build friendly */ ++ #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", "")) ++ ++/*************************************************************************/ /*! ++@Function PVRTrace ++@Description Output a debug message to the user ++ Invoked from the macro PVR_TRACE(). ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) ++ __printf(1, 2); ++ ++#else /* defined(PVRSRV_NEED_PVR_TRACE) */ ++ /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */ ++ #define PVR_TRACE(X) ++ ++#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ ++ ++ ++#if defined(PVRSRV_NEED_PVR_ASSERT) ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(TRUNCATE_64BITS_TO_32BITS) ++#endif ++ INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput) ++ { ++ IMG_UINT32 uiTruncated; ++ ++ uiTruncated = (IMG_UINT32)uiInput; ++ PVR_ASSERT(uiInput == uiTruncated); ++ return uiTruncated; ++ } ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(TRUNCATE_64BITS_TO_SIZE_T) ++#endif ++ INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput) ++ { ++ size_t uiTruncated; ++ ++ uiTruncated = (size_t)uiInput; ++ PVR_ASSERT(uiInput == uiTruncated); ++ return uiTruncated; ++ } ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(TRUNCATE_SIZE_T_TO_32BITS) ++#endif ++ INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput) ++ { ++ IMG_UINT32 uiTruncated; ++ ++ uiTruncated = (IMG_UINT32)uiInput; ++ PVR_ASSERT(uiInput == uiTruncated); ++ return uiTruncated; ++ } ++ ++ ++#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr)) ++ #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr)) ++ #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr)) ++#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ ++/*! @cond Doxygen_Suppress */ ++/* Macros used to trace calls */ ++#if defined(DEBUG) ++ #define PVR_DBG_FILELINE , (__FILE__), (__LINE__) ++ #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line ++ #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line ++ #define PVR_DBG_FILELINE_FMT " %s:%u" ++ #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \ ++ PVR_UNREFERENCED_PARAMETER(ui32Line); } while (false) ++#else ++ #define PVR_DBG_FILELINE ++ #define PVR_DBG_FILELINE_PARAM ++ #define PVR_DBG_FILELINE_ARG ++ #define PVR_DBG_FILELINE_FMT ++ #define PVR_DBG_FILELINE_UNREF() ++#endif ++/*! @endcond */ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++/*! ++ @def PVR_ASSERT ++ @brief Aborts the program if assertion fails. ++ ++ The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is ++ enabled. It's ignored otherwise. ++ ++ @def PVR_DPF ++ @brief PowerVR Debug Printf logging macro used throughout the driver. ++ ++ The macro allows to print logging messages to appropriate log. The ++ destination log is based on the component (user space / kernel space) and ++ operating system (Linux, Android, etc.). ++ ++ The macro also supports severity levels that allow to turn on/off messages ++ based on their importance. ++ ++ This macro will print messages with severity level higher that error only ++ if PVRSRV_NEED_PVR_DPF macro is defined. ++ ++ @def PVR_LOG_ERROR ++ @brief Logs error. ++ ++ @def PVR_LOG_IF_ERROR ++ @brief Logs error if not PVRSRV_OK. ++ ++ @def PVR_WARN_IF_ERROR ++ @brief Logs warning if not PVRSRV_OK. ++ ++ @def PVR_LOG_RETURN_IF_NOMEM ++ @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY. ++ ++ @def PVR_LOG_GOTO_IF_NOMEM ++ @brief Logs error if expression is NULL and jumps to given label. ++ ++ @def PVR_LOG_RETURN_IF_ERROR ++ @brief Logs error if not PVRSRV_OK and returns the error. ++ ++ @def PVR_LOG_RETURN_VOID_IF_ERROR ++ @brief Logs error if not PVRSRV_OK and returns (used in function that return void). ++ ++ @def PVR_LOG_GOTO_IF_ERROR ++ @brief Logs error if not PVRSRV_OK and jumps to label. ++ ++ @def PVR_LOG_GOTO_WITH_ERROR ++ @brief Logs error, goes to a label and sets the error code. ++ ++ @def PVR_LOG_IF_FALSE ++ @brief Prints error message if expression is false. ++ ++ @def PVR_LOG_RETURN_IF_FALSE ++ @brief Prints error message if expression is false and returns given error. ++ ++ @def PVR_LOG_RETURN_VOID_IF_FALSE ++ @brief Prints error message if expression is false and returns (used in function that return void). ++ ++ @def PVR_LOG_GOTO_IF_FALSE ++ @brief Prints error message if expression is false and jumps to label. ++ ++ @def PVR_LOG_RETURN_IF_INVALID_PARAM ++ @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS. ++ ++ @def PVR_LOG_GOTO_IF_INVALID_PARAM ++ @brief Prints error message if expression is false and jumps to label. ++ ++ @def PVR_RETURN_IF_ERROR ++ @brief Returns passed error code if it's different than PVRSRV_OK; ++ ++ @def PVR_RETURN_IF_FALSE ++ @brief Returns passed error code if expression is false. ++ ++ @def PVR_RETURN_IF_INVALID_PARAM ++ @brief Returns PVRSRV_ERROR_INVALID_PARAMS if expression is false. ++ ++ @def PVR_RETURN_IF_NOMEM ++ @brief Returns PVRSRV_ERROR_OUT_OF_MEMORY if expression is NULL. ++ ++ @def PVR_GOTO_IF_NOMEM ++ @brief Goes to a label if expression is NULL. ++ ++ @def PVR_GOTO_IF_INVALID_PARAM ++ @brief Goes to a label if expression is false. ++ ++ @def PVR_GOTO_IF_FALSE ++ @brief Goes to a label if expression is false. ++ ++ @def PVR_GOTO_IF_ERROR ++ @brief Goes to a label if the error code is different than PVRSRV_OK; ++ ++ @def PVR_GOTO_WITH_ERROR ++ @brief Goes to a label and sets the error code. ++ ++ @def PVR_LOG ++ @brief Prints message to a log unconditionally. ++ ++ This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined. ++ @def PVR_LOG_MSG ++ @brief Prints message to a log with the given log-level. ++ ++ @def PVR_LOG_VA ++ @brief Prints message with var-args to a log with the given log-level. ++ ++ @def PVR_LOG_IF_ERROR_VA ++ @brief Prints message with var-args to a log if the error code is different than PVRSRV_OK. ++ ++ @def PVR_LOG_IF_FALSE_VA ++ @brief Prints message with var-args if expression is false. ++ ++ @def PVR_LOG_RETURN_IF_ERROR_VA ++ @brief Prints message with var-args to a log and returns the error code. ++ ++ @def PVR_LOG_GOTO_IF_ERROR_VA ++ @brief Prints message with var-args to a log and goes to a label if the error code is different than PVRSRV_OK. ++ ++ @def PVR_LOG_RETURN_IF_FALSE_VA ++ @brief Logs the error message with var-args if the expression is false and returns the error code. ++ ++ @def PVR_LOG_GOTO_IF_FALSE_VA ++ @brief Logs the error message with var-args and goes to a label if the expression is false. ++ ++ @def PVR_TRACE_EMPTY_LINE ++ @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined). ++ ++ @def TRUNCATE_64BITS_TO_32BITS ++ @brief Truncates 64 bit value to 32 bit value (with possible precision loss). ++ ++ @def TRUNCATE_64BITS_TO_SIZE_T ++ @brief Truncates 64 bit value to size_t value (with possible precision loss). ++ ++ @def TRUNCATE_SIZE_T_TO_32BITS ++ @brief Truncates size_t value to 32 bit value (with possible precision loss). ++ */ ++ ++#endif /* PVR_DEBUG_H */ ++ ++/****************************************************************************** ++ End of file (pvr_debug.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/pvr_fd_sync_kernel.h b/drivers/gpu/drm/img-rogue/include/pvr_fd_sync_kernel.h +new file mode 100644 +index 000000000000..3645e29079b1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvr_fd_sync_kernel.h +@@ -0,0 +1,64 @@ ++/*************************************************************************/ /*! ++@File pvr_fd_sync_kernel.h ++@Title Kernel/userspace interface definitions to use the kernel sync ++ driver ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++ ++#ifndef _PVR_FD_SYNC_KERNEL_H_ ++#define _PVR_FD_SYNC_KERNEL_H_ ++ ++#include ++#include ++ ++#include "pvr_drm.h" ++ ++#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14 ++ ++struct pvr_sync_pt_info { ++ /* Output */ ++ __u32 id; ++ __u32 ui32FWAddr; ++ __u32 ui32CurrOp; ++ __u32 ui32NextOp; ++ __u32 ui32TlTaken; ++} __attribute__((packed, aligned(8))); ++ ++#endif /* _PVR_FD_SYNC_KERNEL_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvr_intrinsics.h b/drivers/gpu/drm/img-rogue/include/pvr_intrinsics.h +new file mode 100644 +index 000000000000..410a2f5a50b5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvr_intrinsics.h +@@ -0,0 +1,70 @@ ++/*************************************************************************/ /*! ++@File ++@Title Intrinsics definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_INTRINSICS_H ++#define PVR_INTRINSICS_H ++ ++/* PVR_CTZLL: ++ * Count the number of trailing zeroes in a long long integer ++ */ ++ ++#if defined(__GNUC__) ++#if defined(__x86_64__) ++ ++ #define PVR_CTZLL __builtin_ctzll ++#endif ++#endif ++ ++/* PVR_CLZLL: ++ * Count the number of leading zeroes in a long long integer ++ */ ++ ++#if defined(__GNUC__) ++#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ ++ defined(__arm__) || defined(__mips) ++ ++#define PVR_CLZLL __builtin_clzll ++ ++#endif ++#endif ++ ++#endif /* PVR_INTRINSICS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrmodule.h b/drivers/gpu/drm/img-rogue/include/pvrmodule.h +new file mode 100644 +index 000000000000..267c7b687487 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrmodule.h +@@ -0,0 +1,48 @@ ++/*************************************************************************/ /*! ++@Title Module Author and License. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef _PVRMODULE_H_ ++#define _PVRMODULE_H_ ++ ++MODULE_AUTHOR("Imagination Technologies Ltd. "); ++MODULE_LICENSE("Dual MIT/GPL"); ++ ++#endif /* _PVRMODULE_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_device_types.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_device_types.h +new file mode 100644 +index 000000000000..662e3bc17163 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_device_types.h +@@ -0,0 +1,55 @@ ++/*************************************************************************/ /*! ++@File ++@Title PowerVR device type definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PVRSRV_DEVICE_TYPES_H) ++#define PVRSRV_DEVICE_TYPES_H ++ ++#include "img_types.h" ++ ++#define PVRSRV_MAX_DEVICES 16U /*!< Largest supported number of devices on the system */ ++ ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++#endif /* PVRSRV_DEVICE_TYPES_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_devvar.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_devvar.h +new file mode 100644 +index 000000000000..a8c64e309fda +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_devvar.h +@@ -0,0 +1,291 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Device Variable interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the client side interface for device variables ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_DEVVAR_H ++#define PVRSRV_DEVVAR_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define DEVVAR_MAX_NAME_LEN 32 ++ ++typedef struct SYNC_PRIM_CONTEXT_TAG *PDEVVARCTX; ++typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG *PDEVVAR; ++ ++typedef struct PVRSRV_DEV_VAR_UPDATE_TAG ++{ ++ PDEVVAR psDevVar; /*!< Pointer to the dev var */ ++ IMG_UINT32 ui32UpdateValue; /*!< the update value */ ++} PVRSRV_DEV_VAR_UPDATE; ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarContextCreate ++ ++@Description Create a new device variable context ++ ++@Input psDevConnection Device to create the device ++ variable context on ++ ++@Output phDevVarContext Handle to the created device ++ variable context ++ ++@Return PVRSRV_OK if the device variable context was successfully ++ created ++*/ ++/*****************************************************************************/ ++IMG_EXPORT PVRSRV_ERROR ++PVRSRVDevVarContextCreate(const PVRSRV_DEV_CONNECTION *psDevConnection, ++ PDEVVARCTX *phDevVarContext); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarContextDestroy ++ ++@Description Destroy a device variable context ++ ++@Input hDevVarContext Handle to the device variable ++ context to destroy ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarContextDestroy(PDEVVARCTX hDevVarContext); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarAlloc ++ ++@Description Allocate a new device variable on the specified device ++ variable context. The device variable's value is initialised ++ with the value passed in ui32InitialValue. ++ ++@Input hDevVarContext Handle to the device variable ++ context ++@Input ui32InitialValue Value to initially assign to the ++ new variable ++@Input pszDevVarName Name assigned to the device variable ++ (for debug purposes) ++ ++@Output ppsDevVar Created device variable ++ ++@Return PVRSRV_OK if the device variable was successfully created ++*/ ++/*****************************************************************************/ ++IMG_EXPORT PVRSRV_ERROR ++PVRSRVDevVarAllocI(PDEVVARCTX hDevVarContext, ++ PDEVVAR *ppsDevVar, ++ IMG_UINT32 ui32InitialValue, ++ const IMG_CHAR *pszDevVarName ++ PVR_DBG_FILELINE_PARAM); ++#define PVRSRVDevVarAlloc(hDevVarContext, ppsDevVar, ui32InitialValue, pszDevVarName) \ ++ PVRSRVDevVarAllocI( (hDevVarContext), (ppsDevVar), (ui32InitialValue), (pszDevVarName) \ ++ PVR_DBG_FILELINE ) ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarFree ++ ++@Description Free a device variable ++ ++@Input psDevVar The device variable to free ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarFree(PDEVVAR psDevVar); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarSet ++ ++@Description Set the device variable to a value ++ ++@Input psDevVar The device variable to set ++ ++@Input ui32Value Value to set it to ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarSet(PDEVVAR psDevVar, ++ IMG_UINT32 ui32Value); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarGet ++ ++@Description Get the current value of the device variable ++ ++@Input psDevVar The device variable to get the ++ value of ++ ++@Return Value of the variable ++*/ ++/*****************************************************************************/ ++IMG_EXPORT IMG_UINT32 ++PVRSRVDevVarGet(PDEVVAR psDevVar); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarGetFirmwareAddr ++ ++@Description Returns the address of the associated firmware value for a ++ specified device integer (not exposed to client) ++ ++@Input psDevVar The device variable to resolve ++ ++@Return The firmware address of the device variable ++*/ ++/*****************************************************************************/ ++IMG_EXPORT IMG_UINT32 ++PVRSRVDevVarGetFirmwareAddr(PDEVVAR psDevVar); ++ ++#if defined(PDUMP) ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarPDump ++ ++@Description PDump the current value of the device variable ++ ++@Input psDevVar The device variable to PDump ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarPDump(PDEVVAR psDevVar); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarPDumpPol ++ ++@Description Do a PDump poll of the device variable ++ ++@Input psDevVar The device variable to PDump ++ ++@Input ui32Value Value to Poll for ++ ++@Input ui32Mask PDump mask operator ++ ++@Input ui32PDumpFlags PDump flags ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarPDumpCBP ++ ++@Description Do a PDump CB poll using the device variable ++ ++@Input psDevVar The device variable to PDump ++ ++@Input uiWriteOffset Current write offset of buffer ++ ++@Input uiPacketSize Size of the packet to write into CB ++ ++@Input uiBufferSize Size of the CB ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize); ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVDevVarPDump) ++#endif ++static INLINE void ++PVRSRVDevVarPDump(PDEVVAR psDevVar) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevVar); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVDevVarPDumpPol) ++#endif ++static INLINE void ++PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevVar); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVDevVarPDumpCBP) ++#endif ++static INLINE void ++PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevVar); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++} ++#endif /* PDUMP */ ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* PVRSRV_DEVVAR_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_error.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_error.h +new file mode 100644 +index 000000000000..0bbf8431bedc +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_error.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File pvrsrv_error.h ++@Title services error enumerant ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines error codes used by any/all services modules ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PVRSRV_ERROR_H) ++#define PVRSRV_ERROR_H ++ ++/*! ++ ***************************************************************************** ++ * Error values ++ *****************************************************************************/ ++typedef enum PVRSRV_ERROR_TAG ++{ ++ PVRSRV_OK, ++#define PVRE(x) x, ++#include "pvrsrv_errors.h" ++#undef PVRE ++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff ++ ++} PVRSRV_ERROR; ++ ++#endif /* !defined(PVRSRV_ERROR_H) */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_errors.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_errors.h +new file mode 100644 +index 000000000000..59b9cfe84cea +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_errors.h +@@ -0,0 +1,410 @@ ++/*************************************************************************/ /*! ++@File pvrsrv_errors.h ++@Title services error codes ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines error codes used by any/all services modules ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* Don't add include guards to this file! */ ++ ++PVRE(PVRSRV_ERROR_OUT_OF_MEMORY) ++PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS) ++PVRE(PVRSRV_ERROR_INVALID_PARAMS) ++PVRE(PVRSRV_ERROR_INIT_FAILURE) ++PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK) ++PVRE(PVRSRV_ERROR_INVALID_DEVICE) ++PVRE(PVRSRV_ERROR_NOT_OWNER) ++PVRE(PVRSRV_ERROR_BAD_MAPPING) ++PVRE(PVRSRV_ERROR_TIMEOUT) ++PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED) ++PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS) ++PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL) ++PVRE(PVRSRV_ERROR_SCENE_INVALID) ++PVRE(PVRSRV_ERROR_STREAM_ERROR) ++PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES) ++PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED) ++PVRE(PVRSRV_ERROR_CMD_TOO_BIG) ++PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED) ++PVRE(PVRSRV_ERROR_TOOMANYBUFFERS) ++PVRE(PVRSRV_ERROR_NOT_SUPPORTED) ++PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED) ++PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE) ++PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) ++PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS) ++PVRE(PVRSRV_ERROR_RETRY) ++PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH) ++PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH) ++PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH) ++PVRE(PVRSRV_ERROR_BVNC_MISMATCH) ++PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH) ++PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG) ++PVRE(PVRSRV_ERROR_INVALID_FLAGS) ++PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS) ++PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY) ++PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR) ++PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED) ++PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED) ++PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED) ++PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR) ++PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG) ++PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE) ++PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE) ++PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP) ++PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP) ++PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY) ++PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED) ++PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED) ++PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) ++PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY) ++PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES) ++PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE) ++PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED) ++PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED) ++PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR) ++PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR) ++PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE) ++PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS) ++PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE) ++PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE) ++PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH) ++PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK) ++PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING) ++PVRE(PVRSRV_ERROR_PMR_EMPTY) ++PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND) ++PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED) ++PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED) ++PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED) ++PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY) ++PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP) ++PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE) ++PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX) ++PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED) ++PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT) ++PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE) ++PVRE(PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED) ++PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA) ++PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM) ++PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED) ++PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP) ++PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE) ++PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND) ++PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT) ++PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND) ++PVRE(PVRSRV_ERROR_PCI_CALL_FAILED) ++PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL) ++PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE) ++PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH) ++PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY) ++PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC) ++PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL) ++PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY) ++PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES) ++PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES) ++PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES) ++PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES) ++PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES) ++PVRE(PVRSRV_ERROR_STILL_MAPPED) ++PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND) ++PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT) ++PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE) ++PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK) ++PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA) ++PVRE(PVRSRV_ERROR_INVALID_DEVINFO) ++PVRE(PVRSRV_ERROR_INVALID_MEMINFO) ++PVRE(PVRSRV_ERROR_INVALID_MISCINFO) ++PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL) ++PVRE(PVRSRV_ERROR_INVALID_CONTEXT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT) ++PVRE(PVRSRV_ERROR_INVALID_HEAP) ++PVRE(PVRSRV_ERROR_INVALID_KERNELINFO) ++PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE) ++PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE) ++PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE) ++PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR) ++PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR) ++PVRE(PVRSRV_ERROR_INVALID_HEAPINFO) ++PVRE(PVRSRV_ERROR_INVALID_PERPROC) ++PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO) ++PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST) ++PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST) ++PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP) ++PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE) ++PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS) ++PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD) ++PVRE(PVRSRV_ERROR_THREAD_READ_ERROR) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR) ++PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR) ++PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED) ++PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE) ++PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND) ++PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL) ++PVRE(PVRSRV_ERROR_FLIP_FAILED) ++PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED) ++PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE) ++PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB) ++PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED) ++PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG) ++PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG) ++PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID) ++PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED) ++PVRE(PVRSRV_ERROR_SUBMIT_NEEDED) ++PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE) ++PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL) ++PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW) ++PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ++PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES) ++PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED) ++PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR) ++PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND) ++PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED) ++PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND) ++PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED) ++PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED) ++PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED) ++PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE) ++PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND) ++PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE) ++PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH) ++PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE) ++PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND) ++PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND) ++PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND) ++PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER) ++PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT) ++PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE) ++PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED) ++PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE) ++PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE) ++PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND) ++PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE) ++PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE) ++PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE) ++PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP) ++PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE) ++PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE) ++PVRE(PVRSRV_ERROR_INVALID_DEVICEID) ++PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND) ++PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED) ++PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED) ++PVRE(PVRSRV_ERROR_COPY_TEST_FAILED) ++PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK) ++PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED) ++PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK) ++PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR) ++PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE) ++PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE) ++PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) ++PVRE(PVRSRV_ERROR_BAD_SYNC_STATE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID) ++PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION) ++PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE) ++PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID) ++PVRE(PVRSRV_ERROR_PHYSHEAP_CONFIG) ++PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG) ++PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM) ++PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP) ++PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT) ++PVRE(PVRSRV_ERROR_BP_NOT_SET) ++PVRE(PVRSRV_ERROR_BP_ALREADY_SET) ++PVRE(PVRSRV_ERROR_FEATURE_DISABLED) ++PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED) ++PVRE(PVRSRV_ERROR_REG_CONFIG_FULL) ++PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE) ++PVRE(PVRSRV_ERROR_MEMORY_ACCESS) ++PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER) ++PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG) ++PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT) ++PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT) ++PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS) ++PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM) ++PVRE(PVRSRV_ERROR_DC_INVALID_SCALE) ++PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM) ++PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES) ++PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA) ++PVRE(PVRSRV_ERROR_NOT_READY) ++PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER) ++PVRE(PVRSRV_ERROR_NOT_FOUND) ++PVRE(PVRSRV_ERROR_ALREADY_OPEN) ++PVRE(PVRSRV_ERROR_STREAM_MISUSE) ++PVRE(PVRSRV_ERROR_STREAM_FULL) ++PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED) ++PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE) ++PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED) ++PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX) ++PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN) ++PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG) ++PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED) ++PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED) ++PVRE(PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_INIT_TDFWMEM_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED) ++PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED) ++PVRE(PVRSRV_ERROR_ALREADY_EXISTS) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE) ++PVRE(PVRSRV_ERROR_TASK_FAILED) ++PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) ++PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR) ++PVRE(PVRSRV_ERROR_INVALID_OFFSET) ++PVRE(PVRSRV_ERROR_CCCB_STALLED) ++PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE) ++PVRE(PVRSRV_ERROR_NOT_ENABLED) ++PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL) ++PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH) ++PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED) ++PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL) ++PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX) ++PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT) ++PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_COMPILE_PDS) ++PVRE(PVRSRV_ERROR_INTERNAL_ERROR) ++PVRE(PVRSRV_ERROR_BRIDGE_EFAULT) ++PVRE(PVRSRV_ERROR_BRIDGE_EINVAL) ++PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM) ++PVRE(PVRSRV_ERROR_BRIDGE_ERANGE) ++PVRE(PVRSRV_ERROR_BRIDGE_EPERM) ++PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY) ++PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) ++PVRE(PVRSRV_ERROR_PROBE_DEFER) ++PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT) ++PVRE(PVRSRV_ERROR_CLOSE_FAILED) ++PVRE(PVRSRV_ERROR_NOT_INITIALISED) ++PVRE(PVRSRV_ERROR_CONVERSION_FAILED) ++PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) ++PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL) ++PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED) ++PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED) ++PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED) ++PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED) ++PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS) ++PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE) ++PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT) ++PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED) ++PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED) ++PVRE(PVRSRV_ERROR_SIGNAL_FAILED) ++PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM) ++PVRE(PVRSRV_ERROR_INVALID_SPU_MASK) ++PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED) ++PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG) ++PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED) ++PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE) ++PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT) ++PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID) ++PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE) ++PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG) ++PVRE(PVRSRV_ERROR_INTERRUPTED) ++PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) ++PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN) ++PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF) ++PVRE(PVRSRV_ERROR_MULTIPLE_SECURITY_PDUMPS) ++PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE) ++PVRE(PVRSRV_ERROR_INVALID_REQUEST) ++PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES) ++PVRE(PVRSRV_ERROR_TEST_FAILED) ++PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED) ++PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR) ++PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE) ++PVRE(PVRSRV_ERROR_TOO_MANY_SYNCS) ++PVRE(PVRSRV_ERROR_ION_NO_CLIENT) ++PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC) ++PVRE(PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) ++PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW) ++PVRE(PVRSRV_ERROR_OUT_OF_RANGE) +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_memalloc_physheap.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_memalloc_physheap.h +new file mode 100644 +index 000000000000..1072ba857c9a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_memalloc_physheap.h +@@ -0,0 +1,170 @@ ++/*************************************************************************/ /*! ++@File pvrsrv_memalloc_physheap.h ++@Title Services Phys Heap types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Used in creating and allocating from Physical Heaps. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVRSRV_MEMALLOC_PHYSHEAP_H ++#define PVRSRV_MEMALLOC_PHYSHEAP_H ++ ++#include "img_defs.h" ++ ++/* ++ * These IDs are replicated in the Device Memory allocation flags to allow ++ * allocations to be made in terms of their locality/use to ensure the correct ++ * physical heap is accessed for the given system/platform configuration. ++ * A system Phys Heap Config is linked to one or more Phys Heaps. When a heap ++ * is not present in the system configuration the allocation will fallback to ++ * the default GPU_LOCAL physical heap which all systems must define. ++ * See PVRSRV_MEMALLOCFLAGS_*_MAPPABLE_MASK. ++ * ++ * NOTE: Enum order important, table in physheap.c must change if order changed. ++ */ ++typedef IMG_UINT32 PVRSRV_PHYS_HEAP; ++/* Services client accessible heaps */ ++#define PVRSRV_PHYS_HEAP_DEFAULT 0U /* default phys heap for device memory allocations */ ++#define PVRSRV_PHYS_HEAP_GPU_LOCAL 1U /* used for buffers with more GPU access than CPU */ ++#define PVRSRV_PHYS_HEAP_CPU_LOCAL 2U /* used for buffers with more CPU access than GPU */ ++#define PVRSRV_PHYS_HEAP_GPU_PRIVATE 3U /* used for buffers that only required GPU read/write access, not visible to the CPU. */ ++ ++#define HEAPSTR(x) #x ++static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeapID) ++{ ++ switch (ePhysHeapID) ++ { ++ case PVRSRV_PHYS_HEAP_DEFAULT: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_DEFAULT); ++ case PVRSRV_PHYS_HEAP_GPU_LOCAL: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_LOCAL); ++ case PVRSRV_PHYS_HEAP_CPU_LOCAL: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_CPU_LOCAL); ++ case PVRSRV_PHYS_HEAP_GPU_PRIVATE: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_PRIVATE); ++ default: ++ return "Unknown Heap"; ++ } ++} ++ ++/* Services internal heaps */ ++#define PVRSRV_PHYS_HEAP_FW_MAIN 4U /* runtime data, e.g. CCBs, sync objects */ ++#define PVRSRV_PHYS_HEAP_EXTERNAL 5U /* used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ ++#define PVRSRV_PHYS_HEAP_GPU_COHERENT 6U /* used for a cache coherent region */ ++#define PVRSRV_PHYS_HEAP_GPU_SECURE 7U /* used by security validation */ ++#define PVRSRV_PHYS_HEAP_FW_CONFIG 8U /* subheap of FW_MAIN, configuration data for FW init */ ++#define PVRSRV_PHYS_HEAP_FW_CODE 9U /* used by security validation or dedicated fw */ ++#define PVRSRV_PHYS_HEAP_FW_PRIV_DATA 10U /* internal FW data (like the stack, FW control data structures, etc.) */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP0 11U /* Host OS premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP1 12U /* Guest OS 1 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP2 13U /* Guest OS 2 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP3 14U /* Guest OS 3 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP4 15U /* Guest OS 4 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP5 16U /* Guest OS 5 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP6 17U /* Guest OS 6 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP7 18U /* Guest OS 7 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_LAST 19U ++ ++ ++static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1FU + 1U), "Ensure enum fits in memalloc flags bitfield."); ++ ++/*! Type conveys the class of physical heap to instantiate within Services ++ * for the physical pool of memory. */ ++typedef enum _PHYS_HEAP_TYPE_ ++{ ++ PHYS_HEAP_TYPE_UNKNOWN = 0, /*!< Not a valid value for any config */ ++ PHYS_HEAP_TYPE_UMA, /*!< Heap represents OS managed physical memory heap ++ i.e. system RAM. Unified Memory Architecture ++ physmem_osmem PMR factory */ ++ PHYS_HEAP_TYPE_LMA, /*!< Heap represents physical memory pool managed by ++ Services i.e. carve out from system RAM or local ++ card memory. Local Memory Architecture ++ physmem_lma PMR factory */ ++#if defined(__KERNEL__) ++ PHYS_HEAP_TYPE_DMA, /*!< Heap represents a physical memory pool managed by ++ Services, alias of LMA and is only used on ++ VZ non-native system configurations for ++ a heap used for PHYS_HEAP_USAGE_FW_MAIN tagged ++ buffers */ ++#if defined(SUPPORT_WRAP_EXTMEMOBJECT) ++ PHYS_HEAP_TYPE_WRAP, /*!< Heap used to group UM buffers given ++ to Services. Integrity OS port only. */ ++#endif ++#endif ++} PHYS_HEAP_TYPE; ++ ++/* Defines used when interpreting the ui32PhysHeapFlags in PHYS_HEAP_MEM_STATS ++ 0x000000000000dttt ++ d = is this the default heap? (1=yes, 0=no) ++ ttt = heap type (000 = PHYS_HEAP_TYPE_UNKNOWN, ++ 001 = PHYS_HEAP_TYPE_UMA, ++ 010 = PHYS_HEAP_TYPE_LMA, ++ 011 = PHYS_HEAP_TYPE_DMA) ++*/ ++#define PVRSRV_PHYS_HEAP_FLAGS_TYPE_MASK (0x7U << 0) ++#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U << 7) ++ ++typedef struct PHYS_HEAP_MEM_STATS_TAG ++{ ++ IMG_UINT64 ui64TotalSize; ++ IMG_UINT64 ui64FreeSize; ++ IMG_UINT32 ui32PhysHeapFlags; ++}PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR; ++ ++typedef struct PHYS_HEAP_MEM_STATS_PKD_TAG ++{ ++ IMG_UINT64 ui64TotalSize; ++ IMG_UINT64 ui64FreeSize; ++ IMG_UINT32 ui32PhysHeapFlags; ++ IMG_UINT32 ui32Dummy; ++}PHYS_HEAP_MEM_STATS_PKD, *PHYS_HEAP_MEM_STATS_PKD_PTR; ++ ++static inline const IMG_CHAR *PVRSRVGetClientPhysHeapTypeName(PHYS_HEAP_TYPE ePhysHeapType) ++{ ++ switch (ePhysHeapType) ++ { ++ case PHYS_HEAP_TYPE_UMA: ++ return HEAPSTR(PHYS_HEAP_TYPE_UMA); ++ case PHYS_HEAP_TYPE_LMA: ++ return HEAPSTR(PHYS_HEAP_TYPE_LMA); ++ default: ++ return "Unknown Heap Type"; ++ } ++} ++#undef HEAPSTR ++ ++#endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags.h +new file mode 100644 +index 000000000000..3b87dbf498d5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags.h +@@ -0,0 +1,969 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This file defines flags used on memory allocations and mappings ++ These flags are relevant throughout the memory management ++ software stack and are specified by users of services and ++ understood by all levels of the memory management in both ++ client and server. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_MEMALLOCFLAGS_H ++#define PVRSRV_MEMALLOCFLAGS_H ++ ++#include "img_types.h" ++#include "pvrsrv_memalloc_physheap.h" ++ ++/*! ++ Type for specifying memory allocation flags. ++ */ ++ ++typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; ++#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx ++ ++#if defined(__KERNEL__) ++#include "pvrsrv_memallocflags_internal.h" ++#endif /* __KERNEL__ */ ++ ++/* ++ * --- MAPPING FLAGS 0..14 (15-bits) --- ++ * | 0-3 | 4-7 | 8-10 | 11-13 | 14 | ++ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable | ++ * ++ * --- MISC FLAGS 15..23 (9-bits) --- ++ * | 15 | 17 | 18 | 19 | 20 | ++ * | Defer | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page | ++ * ++ * --- DEV CONTROL FLAGS 26..27 (2-bits) --- ++ * | 26-27 | ++ * | Device-Flags | ++ * ++ * --- MISC FLAGS 28..31 (4-bits) --- ++ * | 28 | 29 | 30 | 31 | ++ * | No-Cache-Align | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc | ++ * ++ * --- VALIDATION FLAGS --- ++ * | 35 | ++ * | Shared-buffer | ++ * ++ * --- PHYS HEAP HINTS --- ++ * | 59-63 | ++ * | PhysHeap Hints | ++ * ++ */ ++ ++/* ++ * ********************************************************** ++ * * * ++ * * MAPPING FLAGS * ++ * * * ++ * ********************************************************** ++ */ ++ ++/*! ++ * This flag affects the device MMU protection flags, and specifies ++ * that the memory may be read by the GPU. ++ * ++ * Typically all device memory allocations would specify this flag. ++ * ++ * At the moment, memory allocations without this flag are not supported ++ * ++ * This flag will live with the PMR, thus subsequent mappings would ++ * honour this flag. ++ * ++ * This is a dual purpose flag. It specifies that memory is permitted ++ * to be read by the GPU, and also requests that the allocation is ++ * mapped into the GPU as a readable mapping ++ * ++ * To be clear: ++ * - When used as an argument on PMR creation; it specifies ++ * that GPU readable mappings will be _permitted_ ++ * - When used as an argument to a "map" function: it specifies ++ * that a GPU readable mapping is _desired_ ++ * - When used as an argument to "AllocDeviceMem": it specifies ++ * that the PMR will be created with permission to be mapped ++ * with a GPU readable mapping, _and_ that this PMR will be ++ * mapped with a GPU readable mapping. ++ * This distinction becomes important when (a) we export allocations; ++ * and (b) when we separate the creation of the PMR from the mapping. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1ULL<<0) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0U) ++ ++/*! ++ * This flag affects the device MMU protection flags, and specifies ++ * that the memory may be written by the GPU ++ * ++ * Using this flag on an allocation signifies that the allocation is ++ * intended to be written by the GPU. ++ * ++ * Omitting this flag causes a read-only mapping. ++ * ++ * This flag will live with the PMR, thus subsequent mappings would ++ * honour this flag. ++ * ++ * This is a dual purpose flag. It specifies that memory is permitted ++ * to be written by the GPU, and also requests that the allocation is ++ * mapped into the GPU as a writable mapping (see note above about ++ * permission vs. mapping mode, and why this flag causes permissions ++ * to be inferred from mapping mode on first allocation) ++ * ++ * N.B. This flag has no relevance to the CPU's MMU mapping, if any, ++ * and would therefore not enforce read-only mapping on CPU. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1ULL<<1) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1ULL<<2) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1ULL<<3) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0U) ++ ++/*! ++ The flag indicates that an allocation is mapped as readable to the CPU. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1ULL<<4) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0U) ++ ++/*! ++ The flag indicates that an allocation is mapped as writable to the CPU. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1ULL<<5) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1ULL<<6) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1ULL<<7) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0U) ++ ++ ++/* ++ * ********************************************************** ++ * * * ++ * * CACHE CONTROL FLAGS * ++ * * * ++ * ********************************************************** ++ */ ++ ++/* ++ GPU domain ++ ========== ++ ++ The following defines are used to control the GPU cache bit field. ++ The defines are mutually exclusive. ++ ++ A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU ++ cache bit field from the flags. This should be used whenever the GPU cache ++ mode needs to be determined. ++*/ ++ ++/*! ++ GPU domain. Flag indicating uncached memory. This means that any writes to memory ++ allocated with this flag are written straight to memory and thus are ++ coherent for any device in the system. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (1ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) ++ ++/*! ++ GPU domain. Use write combiner (if supported) to combine sequential writes ++ together to reduce memory access by doing burst writes. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC (0ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC) ++ ++/*! ++ GPU domain. This flag affects the GPU MMU protection flags. ++ The allocation will be cached. ++ Services will try to set the coherent bit in the GPU MMU tables so the ++ GPU cache is snooping the CPU cache. If coherency is not supported the ++ caller is responsible to ensure the caches are up to date. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) ++ ++/*! ++ GPU domain. Request cached memory, but not coherent (i.e. no cache ++ snooping). Services will flush the GPU internal caches after every GPU ++ task so no cache maintenance requests from the users are necessary. ++ ++ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future ++ expansion. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) ++ ++/*! ++ GPU domain. This flag is for internal use only and is used to indicate ++ that the underlying allocation should be cached on the GPU after all ++ the snooping and coherent checks have been done ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED) ++ ++/*! ++ GPU domain. GPU cache mode mask. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7ULL<<8) ++ ++/*! ++ @Description A helper macro to obtain just the GPU cache bit field from the flags. ++ This should be used whenever the GPU cache mode needs to be determined. ++ @Input uiFlags Allocation flags. ++ @Return Value of the GPU cache bit field. ++ */ ++#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) ++ ++ ++/* ++ CPU domain ++ ========== ++ ++ The following defines are used to control the CPU cache bit field. ++ The defines are mutually exclusive. ++ ++ A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU ++ cache bit field from the flags. This should be used whenever the CPU cache ++ mode needs to be determined. ++*/ ++ ++/*! ++ CPU domain. Use write combiner (if supported) to combine sequential writes ++ together to reduce memory access by doing burst writes. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC (0ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) ++ ++/*! ++ CPU domain. This flag affects the CPU MMU protection flags. ++ The allocation will be cached. ++ Services will try to set the coherent bit in the CPU MMU tables so the ++ CPU cache is snooping the GPU cache. If coherency is not supported the ++ caller is responsible to ensure the caches are up to date. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ++ ++/*! ++ CPU domain. Request cached memory, but not coherent (i.e. no cache ++ snooping). This means that if the allocation needs to transition from ++ one device to another services has to be informed so it can ++ flush/invalidate the appropriate caches. ++ ++ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future ++ expansion. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) ++ ++/*! ++ CPU domain. This flag is for internal use only and is used to indicate ++ that the underlying allocation should be cached on the CPU ++ after all the snooping and coherent checks have been done ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED) ++ ++/*! ++ CPU domain. CPU cache mode mask ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7ULL<<11) ++ ++/*! ++ @Description A helper macro to obtain just the CPU cache bit field from the flags. ++ This should be used whenever the CPU cache mode needs to be determined. ++ @Input uiFlags Allocation flags. ++ @Return Value of the CPU cache bit field. ++ */ ++#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) ++ ++/* Helper flags for usual cases */ ++ ++/*! ++ * Memory will be write-combined on CPU and GPU ++ */ ++#define PVRSRV_MEMALLOCFLAG_UNCACHED_WC (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED_WC mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED_WC) ++ ++/*! ++ * Memory will be cached. ++ * Services will try to set the correct flags in the MMU tables. ++ * In case there is no coherency support the caller has to ensure caches are up to date */ ++#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_COHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) ++ ++/*! ++ * Memory will be cache-incoherent on CPU and GPU ++ */ ++#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) ++ ++/*! ++ Cache mode mask ++*/ ++#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags)) ++ ++ ++/*! ++ CPU MMU Flags mask -- intended for use internal to services only ++ */ ++#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) ++ ++/*! ++ MMU Flags mask -- intended for use internal to services only - used for ++ partitioning the flags bits and determining which flags to pass down to ++ mmu_common.c ++ */ ++#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) ++ ++/*! ++ Indicates that the PMR created due to this allocation will support ++ in-kernel CPU mappings. Only privileged processes may use this flag as ++ it may cause wastage of precious kernel virtual memory on some platforms. ++ */ ++#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1ULL<<14) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0U) ++ ++ ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * ALLOC MEMORY FLAGS * ++ * * * ++ * ********************************************************** ++ * ++ * (Bits 15) ++ * ++ */ ++#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1ULL<<15) ++#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0U) ++ ++/*! ++ Indicates that the allocation will be accessed by the CPU and GPU using ++ the same virtual address, i.e. for all SVM allocs, ++ IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR ++ */ ++#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1ULL<<17) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0U) ++ ++/*! ++ Indicates the particular memory that's being allocated is sparse and the ++ sparse regions should not be backed by dummy page ++ */ ++#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1ULL << 18) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0U) ++ ++/*! ++ Used to force Services to carry out at least one CPU cache invalidate on a ++ CPU cached buffer during allocation of the memory. Applicable to incoherent ++ systems, it must be used for buffers which are CPU cached and which will not ++ be 100% written to by the CPU before the GPU accesses it. For performance ++ reasons, avoid usage if the whole buffer that is allocated is written to by ++ the CPU anyway before the next GPU kick, or if the system is coherent. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1ULL<<19) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0U) ++ ++/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING ++ ++ Indicates the particular memory that's being allocated is sparse and the ++ sparse regions should be backed by zero page. This is different with ++ zero on alloc flag such that only physically unbacked pages are backed ++ by zero page at the time of mapping. ++ The zero backed page is always with read only attribute irrespective of its ++ original attributes. ++ */ ++#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (1ULL << 20) ++#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) ++ ++/*! ++ @Description Macro extracting the OS id from a variable containing memalloc flags ++ @Input uiFlags Allocation flags ++ @Return returns the value of the FW_ALLOC_OSID bitfield ++ */ ++#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ ++ >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) ++ ++/*! ++ @Description Macro converting an OS id value into a memalloc bitfield ++ @Input uiFlags OS id ++ @Return returns a shifted bitfield with the OS id value ++ */ ++#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid) (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) \ ++ & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * MEMORY ZEROING AND POISONING FLAGS * ++ * * * ++ * ********************************************************** ++ * ++ * Zero / Poison, on alloc/free ++ * ++ * We think the following usecases are required: ++ * ++ * don't poison or zero on alloc or free ++ * (normal operation, also most efficient) ++ * poison on alloc ++ * (for helping to highlight bugs) ++ * poison on alloc and free ++ * (for helping to highlight bugs) ++ * zero on alloc ++ * (avoid highlighting security issues in other uses of memory) ++ * zero on alloc and poison on free ++ * (avoid highlighting security issues in other uses of memory, while ++ * helping to highlight a subset of bugs e.g. memory freed prematurely) ++ * ++ * Since there are more than 4, we can't encode this in just two bits, ++ * so we might as well have a separate flag for each of the three ++ * actions. ++ */ ++ ++/*! ++ Ensures that the memory allocated is initialised with zeroes. ++ */ ++#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1ULL<<31) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0U) ++ ++/*! ++ Scribbles over the allocated memory with a poison value ++ ++ Not compatible with ZERO_ON_ALLOC ++ ++ Poisoning is very deliberately _not_ reflected in PDump as we want ++ a simulation to cry loudly if the initialised data propagates to a ++ result. ++ */ ++#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1ULL<<30) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0U) ++ ++#if defined(DEBUG) || defined(SERVICES_SC) ++/*! ++ Causes memory to be trashed when freed, used when debugging only, not to be used ++ as a security measure. ++ */ ++#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1ULL<<29) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0U) ++#endif /* DEBUG */ ++ ++/*! ++ Avoid address alignment to a CPU or GPU cache line size. ++ */ ++#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (1ULL<<28) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN) != 0U) ++ ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * Device specific MMU flags * ++ * * * ++ * ********************************************************** ++ * ++ * (Bits 26 to 27) ++ * ++ * Some services controlled devices have device specific control bits in ++ * their page table entries, we need to allow these flags to be passed down ++ * the memory management layers so the user can control these bits. ++ * For example, RGX device has the file rgx_memallocflags.h ++ */ ++ ++/*! ++ * Offset of device specific MMU flags. ++ */ ++#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 26 ++ ++/*! ++ * Mask for retrieving device specific MMU flags. ++ */ ++#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK (0x3ULL << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) ++ ++/*! ++ @Description Helper macro for setting device specific MMU flags. ++ @Input n Flag index. ++ @Return Flag vector with the specified bit set. ++ */ ++#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \ ++ (((PVRSRV_MEMALLOCFLAGS_T)(n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * Secure validation flags * ++ * * * ++ * ********************************************************** ++ * ++ * (Bit 35) ++ * ++ */ ++ ++/*! ++ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER ++ */ ++ ++#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (1ULL<<35) ++#define PVRSRV_CHECK_SHARED_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0U) ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * Phys Heap Hints * ++ * * * ++ * ********************************************************** ++ * ++ * (Bits 59 to 63) ++ * ++ */ ++ ++/*! ++ * Value of enum PVRSRV_PHYS_HEAP stored in memalloc flags. If not set ++ * i.e. PVRSRV_PHYS_HEAP_DEFAULT (value 0) used, the system layer defined default physical heap is used. ++ */ ++#define PVRSRV_PHYS_HEAP_HINT_SHIFT (59) ++#define PVRSRV_PHYS_HEAP_HINT_MASK (0x1FULL << PVRSRV_PHYS_HEAP_HINT_SHIFT) ++ ++ ++/*! ++ @Description Macro extracting the Phys Heap hint from memalloc flag value. ++ @Input uiFlags Allocation flags ++ @Return returns the value of the PHYS_HEAP_HINT bitfield ++ */ ++#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags) (((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \ ++ >> PVRSRV_PHYS_HEAP_HINT_SHIFT) ++ ++/*! ++ @Description Macro converting a Phys Heap value into a memalloc bitfield ++ @Input uiFlags Device Phys Heap ++ @Return returns a shifted bitfield with the Device Phys Heap value ++ */ ++#define PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) ((((PVRSRV_MEMALLOCFLAGS_T)PVRSRV_PHYS_HEAP_ ## PhysHeap) << \ ++ PVRSRV_PHYS_HEAP_HINT_SHIFT) \ ++ & PVRSRV_PHYS_HEAP_HINT_MASK) ++/*! ++ @Description Macro to replace an existing phys heap hint value in flags. ++ @Input PhysHeap Phys Heap Macro ++ @Input uiFlags Allocation flags ++ @Return N/A ++ */ ++#define PVRSRV_SET_PHYS_HEAP_HINT(PhysHeap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) ++ ++/*! ++ @Description Macro to replace an existing phys heap hint value using Phys Heap value. ++ @Input PhysHeap Phys Heap Value ++ @Input uiFlags Allocation flags ++ @Return N/A ++ */ ++#define PVRSRV_CHANGE_PHYS_HEAP_HINT(Physheap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ ++ (((PVRSRV_MEMALLOCFLAGS_T)(Physheap) << \ ++ PVRSRV_PHYS_HEAP_HINT_SHIFT) \ ++ & PVRSRV_PHYS_HEAP_HINT_MASK) ++ ++/*! ++ @Description Macros checking if a Phys Heap hint is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the hint is set, false otherwise ++ */ ++#define PVRSRV_CHECK_PHYS_HEAP(PhysHeap, uiFlags) (PVRSRV_PHYS_HEAP_ ## PhysHeap == PVRSRV_GET_PHYS_HEAP_HINT(uiFlags)) ++ ++#define PVRSRV_CHECK_FW_MAIN(uiFlags) (PVRSRV_CHECK_PHYS_HEAP(FW_MAIN, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_CONFIG, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_CODE, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP0, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP1, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP2, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP3, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP4, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP5, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP6, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP7, uiFlags)) ++ ++/*! ++ * Secure buffer mask -- Flags in the mask are allowed for secure buffers ++ * because they are not related to CPU mappings. ++ */ ++#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) ++ ++/*! ++ * Trusted device mask -- Flags in the mask are allowed for trusted device ++ * because the driver cannot access the memory ++ */ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) ++#else ++#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) ++#endif ++ ++/*! ++ PMR flags mask -- for internal services use only. This is the set of flags ++ that will be passed down and stored with the PMR, this also includes the ++ MMU flags which the PMR has to pass down to mm_common.c at PMRMap time. ++*/ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ ++ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#else ++#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#endif ++ ++/*! ++ * CPU mappable mask -- Any flag set in the mask requires memory to be CPU mappable ++ */ ++#define PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) ++/*! ++ RA differentiation mask ++ ++ for use internal to services ++ ++ this is the set of flags bits that are able to determine whether a pair of ++ allocations are permitted to live in the same page table. Allocations ++ whose flags differ in any of these places would be allocated from separate ++ RA Imports and therefore would never coexist in the same page. ++ Special cases are zeroing and poisoning of memory. The caller is responsible ++ to set the sub-allocations to the value he wants it to be. To differentiate ++ between zeroed and poisoned RA Imports does not make sense because the ++ memory might be reused. ++ ++*/ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ ++ & \ ++ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)) ++#else ++#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ ++ & \ ++ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) ++#endif ++/*! ++ Flags that affect _allocation_ ++*/ ++#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU) ++ ++/*! ++ Flags that affect _mapping_ ++*/ ++#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ ++ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) ++ ++#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0U) ++#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK ++#endif ++ ++ ++/*! ++ Flags that affect _physical allocations_ in the DevMemX API ++ */ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#else ++#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#endif ++ ++/*! ++ Flags that affect _virtual allocations_ in the DevMemX API ++ */ ++#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) ++ ++#endif /* PVRSRV_MEMALLOCFLAGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags_internal.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags_internal.h +new file mode 100644 +index 000000000000..4fee3d49b927 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_memallocflags_internal.h +@@ -0,0 +1,78 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management allocation flags for internal Services ++ use only ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This file defines flags used on memory allocations and mappings ++ These flags are relevant throughout the memory management ++ software stack and are specified by users of services and ++ understood by all levels of the memory management in the server ++ and in special cases in the client. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_MEMALLOCFLAGS_INTERNAL_H ++#define PVRSRV_MEMALLOCFLAGS_INTERNAL_H ++ ++/*! ++ CPU domain. Request uncached memory. This means that any writes to memory ++ allocated with this flag are written straight to memory and thus are ++ coherent for any device in the system. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (1ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) ++ ++/*! ++ * Memory will be uncached on CPU and GPU ++ */ ++#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED) ++ ++#endif /* PVRSRV_MEMALLOCFLAGS_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_sync_km.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_sync_km.h +new file mode 100644 +index 000000000000..04611f9f7cee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_sync_km.h +@@ -0,0 +1,65 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR synchronisation interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Types for server side code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVRSRV_SYNC_KM_H ++#define PVRSRV_SYNC_KM_H ++ ++#include ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define SYNC_FB_FILE_STRING_MAX 256 ++#define SYNC_FB_MODULE_STRING_LEN_MAX (32) ++#define SYNC_FB_DESC_STRING_LEN_MAX (32) ++ ++/* By default, fence-sync module emits into HWPerf (of course, if enabled) and ++ * considers a process (sleepable) context */ ++#define PVRSRV_FENCE_FLAG_NONE (0U) ++#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0) ++#define PVRSRV_FENCE_FLAG_CTX_ATOMIC (1U << 1) ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* PVRSRV_SYNC_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_tlcommon.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_tlcommon.h +new file mode 100644 +index 000000000000..28999e5d21b7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_tlcommon.h +@@ -0,0 +1,260 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Transport Layer common types and definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport layer common types and definitions included into ++ both user mode and kernel mode source. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVR_TLCOMMON_H ++#define PVR_TLCOMMON_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++ ++ ++/*! Handle type for stream descriptor objects as created by this API */ ++typedef IMG_HANDLE PVRSRVTL_SD; ++ ++/*! Maximum stream name length including the null byte */ ++#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U ++ ++/*! Maximum number of streams expected to exist */ ++#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE) ++ ++/*! Packet lengths are always rounded up to a multiple of 8 bytes */ ++#define PVRSRVTL_PACKET_ALIGNMENT 8U ++#define PVRSRVTL_ALIGN(x) (((x)+PVRSRVTL_PACKET_ALIGNMENT-1U) & ~(PVRSRVTL_PACKET_ALIGNMENT-1U)) ++ ++ ++/*! A packet is made up of a header structure followed by the data bytes. ++ * There are 3 types of packet: normal (has data), data lost and padding, ++ * see packet flags. Header kept small to reduce data overhead. ++ * ++ * if the ORDER of the structure members is changed, please UPDATE the ++ * PVRSRVTL_PACKET_FLAG_OFFSET macro. ++ * ++ * Layout of uiTypeSize member is : ++ * ++ * |<---------------------------32-bits------------------------------>| ++ * |<----8---->|<-----1----->|<----7--->|<------------16------------->| ++ * | Type | Drop-Oldest | UNUSED | Size | ++ * ++ */ ++typedef struct ++{ ++ IMG_UINT32 uiTypeSize; /*!< Type, Drop-Oldest flag & number of bytes following header */ ++ IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */ ++ ++ /* First bytes of TL packet data follow header ... */ ++} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR; ++ ++/* Structure must always be a size multiple of 8 as stream buffer ++ * still an array of IMG_UINT32s. ++ */ ++static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8"); ++ ++/*! Packet header reserved word fingerprint "TLP1" */ ++#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U ++ ++/*! Packet header mask used to extract the size from the uiTypeSize member. ++ * Do not use directly, see GET macros. ++ */ ++#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU ++#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU) ++ ++ ++/*! Packet header mask used to extract the type from the uiTypeSize member. ++ * Do not use directly, see GET macros. ++ */ ++#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U ++#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U ++ ++/*! Packet header mask used to check if packets before this one were dropped ++ * or not. Do not use directly, see GET macros. ++ */ ++#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK 0x00800000U ++#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET 23U ++ ++/*! Packet type enumeration. ++ */ ++typedef IMG_UINT32 PVRSRVTL_PACKETTYPE; ++ ++/*! Undefined packet */ ++#define PVRSRVTL_PACKETTYPE_UNDEF 0U ++ ++/*! Normal packet type. Indicates data follows the header. ++ */ ++#define PVRSRVTL_PACKETTYPE_DATA 1U ++ ++/*! When seen this packet type indicates that at this moment in the stream ++ * packet(s) were not able to be accepted due to space constraints and ++ * that recent data may be lost - depends on how the producer handles the ++ * error. Such packets have no data, data length is 0. ++ */ ++#define PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED 2U ++ ++/*! Packets with this type set are padding packets that contain undefined ++ * data and must be ignored/skipped by the client. They are used when the ++ * circular stream buffer wraps around and there is not enough space for ++ * the data at the end of the buffer. Such packets have a length of 0 or ++ * more. ++ */ ++#define PVRSRVTL_PACKETTYPE_PADDING 3U ++ ++/*! This packet type conveys to the stream consumer that the stream ++ * producer has reached the end of data for that data sequence. The ++ * TLDaemon has several options for processing these packets that can ++ * be selected on a per stream basis. ++ */ ++#define PVRSRVTL_PACKETTYPE_MARKER_EOS 4U ++ ++/*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes ++ * old data record output file before opening new/next one ++ */ ++#define PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD 5U ++ ++/*! Packet emitted on first stream opened by writer. Packet carries a name ++ * of the opened stream in a form of null-terminated string. ++ */ ++#define PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE 6U ++ ++/*! Packet emitted on last stream closed by writer. Packet carries a name ++ * of the closed stream in a form of null-terminated string. ++ */ ++#define PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE 7U ++ ++#define PVRSRVTL_PACKETTYPE_LAST 8U ++ ++/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared: ++ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities. ++ */ ++#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0U) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++#define PVRSRVTL_SET_PACKET_HDR(len, type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++ ++/*! Returns the number of bytes of data in the packet. ++ * p may be any address type. ++ */ ++#define GET_PACKET_DATA_LEN(p) \ ++ ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR) (void *) (p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) ++ ++ ++/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */ ++#define GET_PACKET_DATA_PTR(p) \ ++ (((IMG_UINT8 *) (void *) (p)) + sizeof(PVRSRVTL_PACKETHDR)) ++ ++/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type. ++ */ ++#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR) ((void *) (p))) ++ ++/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack ++ * It is up to the caller to determine if the new address is within the ++ * packet buffer. ++ */ ++#define GET_NEXT_PACKET_ADDR(p) \ ++ GET_PACKET_HDR( \ ++ GET_PACKET_DATA_PTR(p) + \ ++ ( \ ++ (GET_PACKET_DATA_LEN(p) + (PVRSRVTL_PACKET_ALIGNMENT-1U)) & \ ++ (~(PVRSRVTL_PACKET_ALIGNMENT-1U)) \ ++ ) \ ++ ) ++ ++/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR. ++ */ ++#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++ ++/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. ++ * p is of type PVRSRVTL_PPACKETHDR. ++ */ ++#define SET_PACKETS_DROPPED(p) (((p)->uiTypeSize) | (1UL << PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)) ++ ++/*! Check if packets were dropped before this packet. ++ * p is of type PVRSRVTL_PPACKETHDR. ++ */ ++#define CHECK_PACKETS_DROPPED(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET) ++ ++/*! Flags for use with PVRSRVTLOpenStream ++ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available ++ * 0x02 - When the stream does not exist wait for a bit (2s) in ++ * PVRSRVTLOpenStream() and then exit with a timeout error if it still ++ * does not exist. ++ * 0x04 - Open stream for write only operations. ++ * If flag is not used stream is opened as read-only. This flag is ++ * required if one wants to call reserve/commit/write function on the ++ * stream descriptor. Read from on the stream descriptor opened ++ * with this flag will fail. ++ * 0x08 - Disable Producer Callback. ++ * If this flag is set and the stream becomes empty, do not call any ++ * associated producer callback to generate more data from the reader ++ * context. ++ * 0x10 - Reset stream on open. ++ * When this flag is used the stream will drop all of the stored data. ++ * 0x20 - Limit read position to the write position at time the stream ++ * was opened. Hence this flag will freeze the content read to that ++ * produced before the stream was opened for reading. ++ * 0x40 - Ignore Open Callback. ++ * When this flag is set ignore any OnReaderOpenCallback setting for ++ * the stream. This allows access to the stream to be made without ++ * generating any extra packets into the stream. ++ */ ++ ++#define PVRSRV_STREAM_FLAG_NONE (0U) ++#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0) ++#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1) ++#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2) ++#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK (1U<<3) ++#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN (1U<<4) ++#define PVRSRV_STREAM_FLAG_READ_LIMIT (1U<<5) ++#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK (1U<<6) ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* PVR_TLCOMMON_H */ ++/****************************************************************************** ++ End of file (pvrsrv_tlcommon.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrsrv_tlstreams.h b/drivers/gpu/drm/img-rogue/include/pvrsrv_tlstreams.h +new file mode 100644 +index 000000000000..9064075ad5c0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrsrv_tlstreams.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Transport Layer stream names ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport layer common types and definitions included into ++ both user mode and kernel mode source. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_TLSTREAMS_H ++#define PVRSRV_TLSTREAMS_H ++ ++#define PVRSRV_TL_CTLR_STREAM "tlctrl" ++ ++#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_" ++#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_" ++ ++/* Host HWPerf client stream names are of the form 'hwperf_client_' */ ++#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_" ++#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u" ++ ++#endif /* PVRSRV_TLSTREAMS_H */ ++ ++/****************************************************************************** ++ End of file (pvrsrv_tlstreams.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/pvrversion.h b/drivers/gpu/drm/img-rogue/include/pvrversion.h +new file mode 100644 +index 000000000000..c62b3f752b1b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/pvrversion.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@File pvrversion.h ++@Title PowerVR version numbers and strings. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Version numbers and strings for PowerVR components. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRVERSION_H ++#define PVRVERSION_H ++ ++#define PVRVERSION_MAJ 1U ++#define PVRVERSION_MIN 17U ++ ++#define PVRVERSION_FAMILY "rogueddk" ++#define PVRVERSION_BRANCHNAME "1.17" ++#define PVRVERSION_BUILD 6210866 ++#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" ++ ++#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.17@6210866" ++#define PVRVERSION_STRING_SHORT "1.17@6210866" ++ ++#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." ++ ++#define PVRVERSION_BUILD_HI 621 ++#define PVRVERSION_BUILD_LO 866 ++#define PVRVERSION_STRING_NUMERIC "1.17.621.866" ++ ++#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) ++#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) ++#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU) ++ ++#endif /* PVRVERSION_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_common.h b/drivers/gpu/drm/img-rogue/include/rgx_common.h +new file mode 100644 +index 000000000000..b6ae1500acc3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_common.h +@@ -0,0 +1,235 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Common Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common types and definitions for RGX software ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_COMMON_H ++#define RGX_COMMON_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++ ++/* Included to get the BVNC_KM_N defined and other feature defs */ ++#include "km/rgxdefs_km.h" ++ ++#include "rgx_common_asserts.h" ++ ++ ++/* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform. ++ * As such a driver can support either the vz-validation code or real virtualisation. ++ * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_OS_SUPPORTED is the internal symbol used in the DDK */ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) ++#error "Invalid build configuration: Virtualisation support (PVRSRV_VZ_NUM_OSID > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive." ++#endif ++ ++/* The RGXFWIF_DM defines assume only one of RGX_FEATURE_TLA or ++ * RGX_FEATURE_FASTRENDER_DM is present. Ensure this with a compile-time check. ++ */ ++#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM) ++#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!" ++#endif ++ ++/*! The master definition for data masters known to the firmware of RGX. ++ * When a new DM is added to this list, relevant entry should be added to ++ * RGX_HWPERF_DM enum list. ++ * The DM in a V1 HWPerf packet uses this definition. */ ++ ++typedef IMG_UINT32 RGXFWIF_DM; ++ ++#define RGXFWIF_DM_GP IMG_UINT32_C(0) ++/* Either TDM or 2D DM is present. The above build time error is present to verify this */ ++#define RGXFWIF_DM_2D IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */ ++#define RGXFWIF_DM_TDM IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */ ++ ++#define RGXFWIF_DM_GEOM IMG_UINT32_C(2) ++#define RGXFWIF_DM_3D IMG_UINT32_C(3) ++#define RGXFWIF_DM_CDM IMG_UINT32_C(4) ++#define RGXFWIF_DM_RAY IMG_UINT32_C(5) ++#define RGXFWIF_DM_GEOM2 IMG_UINT32_C(6) ++#define RGXFWIF_DM_GEOM3 IMG_UINT32_C(7) ++#define RGXFWIF_DM_GEOM4 IMG_UINT32_C(8) ++ ++#define RGXFWIF_DM_LAST RGXFWIF_DM_GEOM4 ++ ++typedef IMG_UINT32 RGX_KICK_TYPE_DM; ++#define RGX_KICK_TYPE_DM_GP IMG_UINT32_C(0x001) ++#define RGX_KICK_TYPE_DM_TDM_2D IMG_UINT32_C(0x002) ++#define RGX_KICK_TYPE_DM_TA IMG_UINT32_C(0x004) ++#define RGX_KICK_TYPE_DM_3D IMG_UINT32_C(0x008) ++#define RGX_KICK_TYPE_DM_CDM IMG_UINT32_C(0x010) ++#define RGX_KICK_TYPE_DM_RTU IMG_UINT32_C(0x020) ++#define RGX_KICK_TYPE_DM_SHG IMG_UINT32_C(0x040) ++#define RGX_KICK_TYPE_DM_TQ2D IMG_UINT32_C(0x080) ++#define RGX_KICK_TYPE_DM_TQ3D IMG_UINT32_C(0x100) ++#define RGX_KICK_TYPE_DM_RAY IMG_UINT32_C(0x200) ++#define RGX_KICK_TYPE_DM_LAST IMG_UINT32_C(0x400) ++ ++/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RDM, GEOM2, GEOM3, GEOM4 */ ++#define RGXFWIF_DM_MAX (RGXFWIF_DM_LAST + 1U) ++ ++/* ++ * Data Master Tags to be appended to resources created on behalf of each RGX ++ * Context. ++ */ ++#define RGX_RI_DM_TAG_KS 'K' ++#define RGX_RI_DM_TAG_CDM 'C' ++#define RGX_RI_DM_TAG_RC 'R' /* To be removed once TA/3D Timelines are split */ ++#define RGX_RI_DM_TAG_TA 'V' ++#define RGX_RI_DM_TAG_GEOM 'V' ++#define RGX_RI_DM_TAG_3D 'P' ++#define RGX_RI_DM_TAG_TDM 'T' ++#define RGX_RI_DM_TAG_TQ2D '2' ++#define RGX_RI_DM_TAG_TQ3D 'Q' ++#define RGX_RI_DM_TAG_RAY 'r' ++ ++/* ++ * Client API Tags to be appended to resources created on behalf of each ++ * Client API. ++ */ ++#define RGX_RI_CLIENT_API_GLES1 '1' ++#define RGX_RI_CLIENT_API_GLES3 '3' ++#define RGX_RI_CLIENT_API_VULKAN 'V' ++#define RGX_RI_CLIENT_API_EGL 'E' ++#define RGX_RI_CLIENT_API_OPENCL 'C' ++#define RGX_RI_CLIENT_API_OPENGL 'G' ++#define RGX_RI_CLIENT_API_SERVICES 'S' ++#define RGX_RI_CLIENT_API_WSEGL 'W' ++#define RGX_RI_CLIENT_API_ANDROID 'A' ++#define RGX_RI_CLIENT_API_LWS 'L' ++ ++/* ++ * Format a RI annotation for a given RGX Data Master context ++ */ ++#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \ ++ { \ ++ (annotation)[0] = (dmTag); \ ++ (annotation)[1] = (clientAPI); \ ++ (annotation)[2] = '\0'; \ ++ } while (false) ++ ++/*! ++ ****************************************************************************** ++ * RGXFW Compiler alignment definitions ++ *****************************************************************************/ ++#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) || defined(INTEGRITY_OS) ++#define RGXFW_ALIGN __attribute__ ((aligned (8))) ++#define RGXFW_ALIGN_DCACHEL __attribute__((aligned (64))) ++#elif defined(_MSC_VER) ++#define RGXFW_ALIGN __declspec(align(8)) ++#define RGXFW_ALIGN_DCACHEL __declspec(align(64)) ++#pragma warning (disable : 4324) ++#else ++#error "Align MACROS need to be defined for this compiler" ++#endif ++ ++/*! ++ ****************************************************************************** ++ * Force 8-byte alignment for structures allocated uncached. ++ *****************************************************************************/ ++#define UNCACHED_ALIGN RGXFW_ALIGN ++ ++ ++/*! ++ ****************************************************************************** ++ * GPU Utilisation states ++ *****************************************************************************/ ++#define RGXFWIF_GPU_UTIL_STATE_IDLE (0U) ++#define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) ++#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) ++#define RGXFWIF_GPU_UTIL_STATE_NUM (3U) ++#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) ++ ++ ++/* ++ * Maximum amount of register writes that can be done by the register ++ * programmer (FW or META DMA). This is not a HW limitation, it is only ++ * a protection against malformed inputs to the register programmer. ++ */ ++#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (128U) ++ ++/* FW common context priority. */ ++/*! ++ * @AddToGroup WorkloadContexts ++ * @{ ++ */ ++#define RGX_CTX_PRIORITY_REALTIME (INT32_MAX) ++#define RGX_CTX_PRIORITY_HIGH (2U) /*!< HIGH priority */ ++#define RGX_CTX_PRIORITY_MEDIUM (1U) /*!< MEDIUM priority */ ++#define RGX_CTX_PRIORITY_LOW (0) /*!< LOW priority */ ++/*! ++ * @} End of AddToGroup WorkloadContexts ++ */ ++ ++ ++/* ++ * Use of the 32-bit context property flags mask ++ * ( X = taken/in use, - = available/unused ) ++ * ++ * 0 ++ * | ++ * -------------------------------x ++ */ ++/* ++ * Context creation flags ++ * (specify a context's properties at creation time) ++ */ ++#define RGX_CONTEXT_FLAG_DISABLESLR (1UL << 0) /*!< Disable SLR */ ++ ++/* Bitmask of context flags allowed to be modified after context create. */ ++#define RGX_CONTEXT_FLAGS_WRITEABLE_MASK (RGX_CONTEXT_FLAG_DISABLESLR) ++ ++/* List of attributes that may be set for a context */ ++typedef enum _RGX_CONTEXT_PROPERTY_ ++{ ++ RGX_CONTEXT_PROPERTY_FLAGS = 0, /*!< Context flags */ ++} RGX_CONTEXT_PROPERTY; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_COMMON_H */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_common_asserts.h b/drivers/gpu/drm/img-rogue/include/rgx_common_asserts.h +new file mode 100644 +index 000000000000..c571cc6f008e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_common_asserts.h +@@ -0,0 +1,73 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Common Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common types and definitions for RGX software ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_COMMON_ASSERTS_H ++#define RGX_COMMON_ASSERTS_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/*! This macro represents a mask of LSBs that must be zero on data structure ++ * sizes and offsets to ensure they are 8-byte granular on types shared between ++ * the FW and host driver */ ++#define RGX_FW_ALIGNMENT_LSB (7U) ++ ++/*! Macro to test structure size alignment */ ++#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \ ++ static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U, \ ++ "Size of " #_a " is not properly aligned") ++ ++/*! Macro to test structure member alignment */ ++#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \ ++ static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U, \ ++ "Offset of " #_a "." #_b " is not properly aligned") ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_COMMON_ASSERTS_H */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_compat_bvnc.h b/drivers/gpu/drm/img-rogue/include/rgx_compat_bvnc.h +new file mode 100644 +index 000000000000..c3e1333cdb0f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_compat_bvnc.h +@@ -0,0 +1,140 @@ ++/*************************************************************************/ /*! ++@File rgx_compat_bvnc.h ++@Title BVNC compatibility check utilities ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used for packing BNC and V. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_COMPAT_BVNC_H) ++#define RGX_COMPAT_BVNC_H ++ ++#include "img_types.h" ++ ++#if defined(RGX_FIRMWARE) /* Services firmware */ ++# include "rgxfw_utils.h" ++# define PVR_COMPAT_ASSERT RGXFW_ASSERT ++#elif !defined(RGX_BUILD_BINARY) /* Services host driver code */ ++# include "pvr_debug.h" ++# define PVR_COMPAT_ASSERT PVR_ASSERT ++#else /* FW user-mode tools */ ++# include ++# define PVR_COMPAT_ASSERT assert ++#endif ++ ++/* 64bit endian conversion macros */ ++#if defined(__BIG_ENDIAN__) ++#define RGX_INT64_TO_BE(N) (N) ++#define RGX_INT64_FROM_BE(N) (N) ++#define RGX_INT32_TO_BE(N) (N) ++#define RGX_INT32_FROM_BE(N) (N) ++#else ++#define RGX_INT64_TO_BE(N) \ ++ ((((N) >> 56) & 0xff) \ ++ | (((N) >> 40) & 0xff00) \ ++ | (((N) >> 24) & 0xff0000) \ ++ | (((N) >> 8) & 0xff000000U) \ ++ | ((N) << 56) \ ++ | (((N) & 0xff00) << 40) \ ++ | (((N) & 0xff0000) << 24) \ ++ | (((N) & 0xff000000U) << 8)) ++#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N) ++ ++#define RGX_INT32_TO_BE(N) \ ++ ((((N) >> 24) & 0xff) \ ++ | (((N) >> 8) & 0xff00) \ ++ | ((N) << 24) \ ++ | ((((N) & 0xff00) << 8))) ++#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N) ++#endif ++ ++/****************************************************************************** ++ * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check ++ *****************************************************************************/ ++ ++#define RGX_BVNC_PACK_SHIFT_B 48 ++#define RGX_BVNC_PACK_SHIFT_V 32 ++#define RGX_BVNC_PACK_SHIFT_N 16 ++#define RGX_BVNC_PACK_SHIFT_C 0 ++ ++#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000)) ++#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000)) ++#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000)) ++#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF)) ++ ++#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B)) ++#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V)) ++#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N)) ++#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C)) ++ ++#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do { \ ++ (bvnc) = IMG_FALSE; \ ++ (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \ ++ if (version) \ ++ { \ ++ (bvnc) = ((L).ui64BVNC == (R).ui64BVNC); \ ++ } \ ++ (all) = (version) && (bvnc); \ ++ } while (false) ++ ++ ++/**************************************************************************//** ++ * Utility function for packing BVNC ++ *****************************************************************************/ ++static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C) ++{ ++ /* ++ * Test for input B, V, N and C exceeding max bit width. ++ */ ++ PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0U); ++ PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0U); ++ PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0U); ++ PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0U); ++ ++ return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) | ++ ((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) | ++ ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) | ++ ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C)); ++} ++ ++ ++#endif /* RGX_COMPAT_BVNC_H */ ++ ++/****************************************************************************** ++ End of file (rgx_compat_bvnc.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_fwif_resetframework.h b/drivers/gpu/drm/img-rogue/include/rgx_fwif_resetframework.h +new file mode 100644 +index 000000000000..e60bafd84536 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_fwif_resetframework.h +@@ -0,0 +1,70 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_resetframework.h ++@Title Post-reset work-around framework FW interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_RESETFRAMEWORK_H) ++#define RGX_FWIF_RESETFRAMEWORK_H ++ ++#include "img_types.h" ++#include "rgx_fwif_shared.h" ++ ++typedef struct ++{ ++ union ++ { ++ IMG_UINT64 uCDMReg_CDM_CB_BASE; // defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++ IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++ }; ++ IMG_UINT64 uCDMReg_CDM_CB_QUEUE; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++ IMG_UINT64 uCDMReg_CDM_CB; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++} RGXFWIF_RF_REGISTERS; ++ ++typedef struct ++{ ++ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ ++ RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters; ++ ++} RGXFWIF_RF_CMD; ++ ++/* to opaquely allocate and copy in the kernel */ ++#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD) ++ ++#endif /* RGX_FWIF_RESETFRAMEWORK_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_fwif_sf.h b/drivers/gpu/drm/img-rogue/include/rgx_fwif_sf.h +new file mode 100644 +index 000000000000..9238cf8ca589 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_fwif_sf.h +@@ -0,0 +1,931 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_sf.h ++@Title RGX firmware interface string format specifiers ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the rgx firmware logging messages. The following ++ list are the messages the firmware prints. Changing anything ++ but the first column or spelling mistakes in the strings will ++ break compatibility with log files created with older/newer ++ firmware versions. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_FWIF_SF_H ++#define RGX_FWIF_SF_H ++ ++/****************************************************************************** ++ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you ++ * WILL BREAK fw tracing message compatibility with previous ++ * fw versions. Only add new ones, if so required. ++ *****************************************************************************/ ++/* Available log groups */ ++#define RGXFW_LOG_SFGROUPLIST \ ++ X(RGXFW_GROUP_NULL,NULL) \ ++ X(RGXFW_GROUP_MAIN,MAIN) \ ++ X(RGXFW_GROUP_CLEANUP,CLEANUP) \ ++ X(RGXFW_GROUP_CSW,CSW) \ ++ X(RGXFW_GROUP_PM, PM) \ ++ X(RGXFW_GROUP_RTD,RTD) \ ++ X(RGXFW_GROUP_SPM,SPM) \ ++ X(RGXFW_GROUP_MTS,MTS) \ ++ X(RGXFW_GROUP_BIF,BIF) \ ++ X(RGXFW_GROUP_MISC,MISC) \ ++ X(RGXFW_GROUP_POW,POW) \ ++ X(RGXFW_GROUP_HWR,HWR) \ ++ X(RGXFW_GROUP_HWP,HWP) \ ++ X(RGXFW_GROUP_RPM,RPM) \ ++ X(RGXFW_GROUP_DMA,DMA) \ ++ X(RGXFW_GROUP_DBG,DBG) ++ ++/*! ++ * @InGroup SRVAndFWTracing ++ * @Brief FW Trace log groups(GID) list ++ */ ++enum RGXFW_LOG_SFGROUPS { ++#define X(A,B) A, ++ RGXFW_LOG_SFGROUPLIST ++#undef X ++}; ++ ++#define IMG_SF_STRING_MAX_SIZE 256U ++ ++typedef struct { ++ IMG_UINT32 ui32Id; ++ IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE]; ++} RGXFW_STID_FMT; /* pair of string format id and string formats */ ++ ++typedef struct { ++ IMG_UINT32 ui32Id; ++ const IMG_CHAR *psName; ++} RGXKM_STID_FMT; /* pair of string format id and string formats */ ++ ++/* Table of String Format specifiers, the group they belong and the number of ++ * arguments each expects. Xmacro styled macros are used to generate what is ++ * needed without requiring hand editing. ++ * ++ * id : id within a group ++ * gid : group id ++ * Sym name : name of enumerations used to identify message strings ++ * String : Actual string ++ * #args : number of arguments the string format requires ++ */ ++#define RGXFW_LOG_SFIDLIST \ ++/*id, gid, id name, string, # arguments */ \ ++X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \ ++\ ++X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \ ++X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \ ++X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \ ++X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \ ++X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ ++X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \ ++X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \ ++X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \ ++X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \ ++X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \ ++X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \ ++X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ ++X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \ ++X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \ ++X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \ ++X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ ++X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \ ++X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ ++X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \ ++X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \ ++X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \ ++X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ ++X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \ ++X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ ++X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ ++X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \ ++X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \ ++X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \ ++X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \ ++X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \ ++X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \ ++X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \ ++X( 34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \ ++X( 35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \ ++X( 36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \ ++X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \ ++X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \ ++X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \ ++X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \ ++X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \ ++X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \ ++X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ ++X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \ ++X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \ ++X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \ ++X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \ ++X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \ ++X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \ ++X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \ ++X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \ ++X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \ ++X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \ ++X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \ ++X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \ ++X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \ ++X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \ ++X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ ++X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \ ++X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ ++X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \ ++X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \ ++X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \ ++X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \ ++X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \ ++X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \ ++X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ ++X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \ ++X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \ ++X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \ ++X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \ ++X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ ++X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \ ++X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \ ++X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \ ++X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ ++X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \ ++X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ ++X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ ++X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \ ++X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \ ++X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ ++X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \ ++X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \ ++X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ ++X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ ++X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ ++X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ ++X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ ++X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ ++X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \ ++X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \ ++X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \ ++X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ ++X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \ ++X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \ ++X( 97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ ++X( 98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \ ++X( 99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \ ++X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \ ++X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \ ++X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \ ++X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \ ++X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ ++X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \ ++X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)", 4) \ ++X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \ ++X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \ ++X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \ ++X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \ ++X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \ ++X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \ ++X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \ ++X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \ ++X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \ ++X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \ ++X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ ++X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ ++X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ ++X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ ++X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ ++X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \ ++X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \ ++X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \ ++X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \ ++X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ ++X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ ++X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ ++X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \ ++X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ ++X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d", 2) \ ++X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \ ++X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \ ++X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \ ++X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \ ++X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ ++X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ ++X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \ ++X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \ ++X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \ ++X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \ ++X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \ ++X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \ ++X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \ ++X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \ ++X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \ ++X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \ ++X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \ ++X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \ ++X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ ++X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \ ++X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \ ++X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \ ++X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \ ++X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \ ++X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \ ++X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \ ++X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \ ++X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \ ++X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ ++X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ ++X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \ ++X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \ ++X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \ ++X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \ ++X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \ ++X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ ++X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ ++X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \ ++X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \ ++X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ ++X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \ ++X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \ ++X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ ++X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \ ++X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \ ++X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \ ++X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \ ++X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \ ++X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \ ++X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \ ++X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \ ++X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \ ++X(189, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \ ++X(190, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \ ++X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \ ++X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \ ++X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \ ++X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \ ++X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \ ++X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ ++X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \ ++X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \ ++X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \ ++X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \ ++X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \ ++X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ ++X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \ ++X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ ++X(205, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ ++X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \ ++X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \ ++X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ ++X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \ ++X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \ ++X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, " core %u, reg = %u, mask = 0x%X)", 3) \ ++X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \ ++X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \ ++X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \ ++X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \ ++X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \ ++X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \ ++X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \ ++X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\ ++X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \ ++X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \ ++X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \ ++X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \ ++X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \ ++X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \ ++X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %d", 2) \ ++X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM", 2) \ ++X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ ++X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 12) \ ++X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ ++X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u ", 1) \ ++X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u ", 1) \ ++X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x", 3) \ ++X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u ", 1) \ ++X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ ++X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8)\ ++X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u ", 1) \ ++\ ++X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \ ++X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \ ++X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \ ++X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \ ++X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \ ++X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \ ++X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \ ++X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \ ++X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \ ++X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \ ++X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \ ++X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \ ++X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \ ++X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \ ++X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \ ++X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \ ++X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \ ++X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \ ++X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \ ++X( 20, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \ ++X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u", 2) \ ++\ ++X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \ ++X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \ ++X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \ ++X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \ ++X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \ ++X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \ ++X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \ ++X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \ ++X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \ ++X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \ ++X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \ ++X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \ ++X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \ ++X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \ ++X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \ ++X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \ ++X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \ ++X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \ ++\ ++X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \ ++X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ ++X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \ ++X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \ ++X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \ ++X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \ ++X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \ ++X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \ ++X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \ ++X( 10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \ ++X( 11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \ ++X( 12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \ ++X( 13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \ ++X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ ++X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \ ++X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \ ++X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \ ++X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \ ++X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \ ++X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \ ++X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \ ++X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \ ++X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ ++X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \ ++X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \ ++X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \ ++X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \ ++X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \ ++X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \ ++X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \ ++X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \ ++X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \ ++X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \ ++X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ ++X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \ ++X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \ ++X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \ ++X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ ++X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \ ++X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \ ++X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \ ++X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \ ++X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \ ++X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \ ++X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \ ++X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \ ++X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \ ++X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \ ++X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \ ++X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \ ++X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \ ++X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \ ++X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \ ++X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \ ++\ ++X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \ ++X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \ ++X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %d", 1) \ ++X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %d refcount now %d", 2) \ ++X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %d refcount now %d", 2) \ ++X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ ++X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \ ++X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \ ++X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \ ++X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x", 3) \ ++X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ ++X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \ ++X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \ ++X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \ ++X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ ++X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \ ++X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \ ++X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \ ++X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d", 6) \ ++X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %d as register range [%u - %u]", 3) \ ++\ ++X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \ ++X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \ ++X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \ ++X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \ ++X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \ ++X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \ ++X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \ ++X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \ ++X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \ ++X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \ ++X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \ ++X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \ ++X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \ ++X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \ ++X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \ ++X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \ ++X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \ ++X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \ ++X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \ ++X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \ ++X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \ ++X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \ ++X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \ ++X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \ ++X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \ ++X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \ ++X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ ++X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ ++X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ ++X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ ++X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ ++\ ++X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ ++X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \ ++X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ ++X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ ++X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ ++X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \ ++X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ ++X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \ ++X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \ ++X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\ ++X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \ ++X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \ ++X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ ++X( 20, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ ++X( 21, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 22, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \ ++X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \ ++X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ ++X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ ++X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ ++X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \ ++X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \ ++X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %d)", 1) \ ++X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %d)", 1) \ ++X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %d)", 1) \ ++X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %d)", 1) \ ++\ ++X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ ++X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ ++X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \ ++X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \ ++X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \ ++X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \ ++X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \ ++X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \ ++X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \ ++X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \ ++X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \ ++X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \ ++X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \ ++X( 14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \ ++X( 15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \ ++X( 16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \ ++X( 17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \ ++X( 18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \ ++X( 19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ ++X( 20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ ++X( 21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ ++X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ ++\ ++X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \ ++X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \ ++X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \ ++X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \ ++X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \ ++X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ ++X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \ ++X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \ ++X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \ ++X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ ++X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ ++X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ ++X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ ++X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \ ++X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \ ++X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \ ++X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \ ++X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \ ++X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \ ++X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \ ++X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \ ++X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \ ++X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \ ++X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \ ++X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \ ++X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \ ++X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \ ++X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \ ++X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \ ++X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \ ++X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \ ++X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \ ++X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \ ++X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \ ++X( 39, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset.", 3) \ ++X( 40, RGXFW_GROUP_RTD, RGXFW_SF_RTD_GEOM_RENDERSTATE, "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \ ++X( 41, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FRAG_RENDERSTATE, "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \ ++\ ++X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \ ++X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \ ++X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \ ++X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \ ++X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \ ++X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \ ++X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \ ++X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ ++X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \ ++X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \ ++X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \ ++X( 12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \ ++X( 13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \ ++X( 14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \ ++X( 15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \ ++X( 16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \ ++X( 17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \ ++X( 18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \ ++X( 19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \ ++X( 20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \ ++X( 21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \ ++X( 22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \ ++X( 23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \ ++X( 24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \ ++X( 25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \ ++X( 26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \ ++X( 27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \ ++X( 28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \ ++X( 31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \ ++X( 32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none", 0) \ ++X( 33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \ ++X( 34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \ ++X( 35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \ ++X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \ ++X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \ ++X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \ ++X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \ ++X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \ ++X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \ ++X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \ ++X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \ ++X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \ ++X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \ ++X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \ ++X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \ ++X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \ ++X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \ ++X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \ ++X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \ ++X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \ ++X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ ++X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \ ++X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \ ++X( 69, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \ ++\ ++X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ ++X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \ ++X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \ ++X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ ++X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ ++X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \ ++X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \ ++X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \ ++X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \ ++X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \ ++X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \ ++X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ ++X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \ ++X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \ ++X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \ ++X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \ ++X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \ ++X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \ ++X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \ ++X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \ ++X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \ ++X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \ ++X( 28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \ ++X( 29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \ ++X( 30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \ ++X( 31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \ ++X( 32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \ ++X( 33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \ ++X( 34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \ ++X( 35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \ ++X( 36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \ ++X( 37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \ ++X( 38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \ ++X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \ ++X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \ ++X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \ ++X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \ ++X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \ ++X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \ ++X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \ ++X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ ++X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \ ++X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \ ++X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \ ++X( 51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \ ++X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \ ++X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ ++X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ ++X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \ ++X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ ++X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \ ++X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \ ++X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \ ++X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \ ++X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \ ++X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \ ++X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \ ++X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \ ++X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \ ++X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \ ++X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \ ++X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \ ++X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \ ++X( 70, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \ ++X( 71, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \ ++X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %d, RAC Active? %d", 2) \ ++X( 73, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \ ++\ ++X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ ++X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ ++X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \ ++X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \ ++X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \ ++X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ ++X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \ ++X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \ ++X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ ++X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ ++X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \ ++X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \ ++X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \ ++X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \ ++X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \ ++X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \ ++X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \ ++X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \ ++X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \ ++X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \ ++X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \ ++X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \ ++X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \ ++X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \ ++X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \ ++X( 26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \ ++X( 27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \ ++X( 28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \ ++X( 29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \ ++X( 30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \ ++X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \ ++X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \ ++X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \ ++X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \ ++X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \ ++X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \ ++X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \ ++X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \ ++X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \ ++X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \ ++X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \ ++X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \ ++X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \ ++X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \ ++X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \ ++X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \ ++X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \ ++X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \ ++X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \ ++X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \ ++X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \ ++X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \ ++X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \ ++X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \ ++X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \ ++X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ ++X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ ++X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \ ++X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \ ++X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ ++X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \ ++X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \ ++X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \ ++X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \ ++X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \ ++X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \ ++X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \ ++X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \ ++X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \ ++X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \ ++X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \ ++X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \ ++X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \ ++X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \ ++X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \ ++X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \ ++X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \ ++X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \ ++X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \ ++X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \ ++X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \ ++X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \ ++X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \ ++X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ ++X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ ++X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \ ++X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \ ++X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \ ++X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \ ++X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %d with value 0x%08x", 2) \ ++X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \ ++X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \ ++X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \ ++X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \ ++X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \ ++X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ ++X( 97, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \ ++\ ++X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \ ++X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \ ++X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \ ++X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \ ++X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \ ++X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \ ++X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \ ++X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \ ++X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \ ++X( 10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \ ++X( 11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \ ++X( 12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \ ++X( 13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \ ++X( 14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \ ++X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \ ++X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \ ++X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \ ++X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \ ++X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \ ++X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \ ++X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \ ++X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \ ++X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \ ++X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \ ++X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ ++X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ ++X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \ ++X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \ ++X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \ ++X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \ ++X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \ ++X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \ ++X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \ ++X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \ ++X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \ ++X( 36, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCMAX, "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x", 3) \ ++X( 37, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_IGNORE_BLOCKS, "Blocks IGNORED for GPU %u", 1) \ ++\ ++X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \ ++X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \ ++X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \ ++X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \ ++X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \ ++X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \ ++X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \ ++X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ ++X( 9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \ ++\ ++X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \ ++X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x", 1) \ ++X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \ ++X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \ ++X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \ ++X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \ ++X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \ ++X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \ ++X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \ ++X( 10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d", 1) \ ++X( 11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d", 2) \ ++X( 12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d", 3) \ ++X( 13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \ ++X( 14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \ ++X( 15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \ ++X( 16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \ ++X( 17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \ ++X( 18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u", 1) \ ++X( 19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u", 2) \ ++X( 20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \ ++X( 21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \ ++X( 22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \ ++X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \ ++X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \ ++X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \ ++\ ++X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15) ++ ++ ++/* The symbolic names found in the table above are assigned an ui32 value of ++ * the following format: ++ * 31 30 28 27 20 19 16 15 12 11 0 bits ++ * - --- ---- ---- ---- ---- ---- ---- ---- ++ * 0-11: id number ++ * 12-15: group id number ++ * 16-19: number of parameters ++ * 20-27: unused ++ * 28-30: active: identify SF packet, otherwise regular int32 ++ * 31: reserved for signed/unsigned compatibility ++ * ++ * The following macro assigns those values to the enum generated SF ids list. ++ */ ++#define RGXFW_LOG_IDMARKER (0x70000000U) ++#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER ++ ++#define RGXFW_LOG_IDMASK (0xFFF00000U) ++#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER) ++ ++typedef enum { ++#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e), ++ RGXFW_LOG_SFIDLIST ++#undef X ++} RGXFW_LOG_SFids; ++ ++/* Return the group id that the given (enum generated) id belongs to */ ++#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU) ++/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ ++#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU) ++ ++#endif /* RGX_FWIF_SF_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_heap_firmware.h b/drivers/gpu/drm/img-rogue/include/rgx_heap_firmware.h +new file mode 100644 +index 000000000000..db2b90b9f2a7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_heap_firmware.h +@@ -0,0 +1,120 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX FW heap definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_HEAP_FIRMWARE_H) ++#define RGX_HEAP_FIRMWARE_H ++ ++/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h) ++ * NOTE: ++ * The firmware heaps bases and sizes are defined here to ++ * simplify #include dependencies, see rgxheapconfig.h ++ * for the full RGX virtual address space layout. ++ */ ++ ++/* ++ * The Config heap holds initialisation data shared between the ++ * the driver and firmware (e.g. pointers to the KCCB and FWCCB). ++ * The Main Firmware heap size is adjusted accordingly but most ++ * of the map / unmap functions must take into consideration ++ * the entire range (i.e. main and config heap). ++ */ ++#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS (IMG_UINT32_C(2)) ++#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT ++#define RGX_FIRMWARE_RAW_HEAP_BASE (0xE1C0000000ULL) ++#define RGX_FIRMWARE_RAW_HEAP_SIZE (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT) ++ ++/* To enable the firmware to compute the exact address of structures allocated by the KM ++ * in the Fw Config subheap, regardless of the KM's page size (and PMR granularity), ++ * objects allocated consecutively but from different PMRs (due to differing memalloc flags) ++ * are allocated with a 64kb offset. This way, all structures will be located at the same base ++ * addresses when the KM is running with a page size of 4k, 16k or 64k. */ ++#define RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY (IMG_UINT32_C(0x10000)) ++ ++/* Ensure the heap can hold 3 PMRs of maximum supported granularity (192KB): ++ * 1st PMR: RGXFWIF_CONNECTION_CTL ++ * 2nd PMR: RGXFWIF_OSINIT ++ * 3rd PMR: RGXFWIF_SYSINIT */ ++#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (IMG_UINT32_C(3)*RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) ++ ++#define RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE) ++/* ++ * MIPS FW needs space in the Main heap to map GPU memory. ++ * This space is taken from the MAIN heap, to avoid creating a new heap. ++ */ ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL (IMG_UINT32_C(0x100000)) /* 1MB */ ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 (IMG_UINT32_C(0x400000)) /* 4MB */ ++ ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ ++ RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL) ++ ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101 (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ ++ RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101) ++ ++#if !defined(__KERNEL__) ++#if defined(FIX_HW_BRN_65101) ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101 ++ ++#include "img_defs.h" ++static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU map size cannot be increased due to BRN65101 with a small FW heap"); ++ ++#else ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL ++#endif ++#endif /* !defined(__KERNEL__) */ ++ ++#define RGX_FIRMWARE_MAIN_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE ++#define RGX_FIRMWARE_CONFIG_HEAP_BASE (RGX_FIRMWARE_MAIN_HEAP_BASE + \ ++ RGX_FIRMWARE_RAW_HEAP_SIZE - \ ++ RGX_FIRMWARE_CONFIG_HEAP_SIZE) ++ ++/* ++ * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and ++ * the minimum is 4MiB (1<<22); the default firmware heap size is set to ++ * maximum 32MiB. ++ */ ++#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25) ++#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]" ++#endif ++ ++#endif /* RGX_HEAP_FIRMWARE_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_hwperf_common.h b/drivers/gpu/drm/img-rogue/include/rgx_hwperf_common.h +new file mode 100644 +index 000000000000..0635a51578a8 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_hwperf_common.h +@@ -0,0 +1,482 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HWPerf and Debug Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common data types definitions for hardware performance API ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_HWPERF_COMMON_H_ ++#define RGX_HWPERF_COMMON_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* These structures are used on both GPU and CPU and must be a size that is a ++ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at ++ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. ++ */ ++ ++/****************************************************************************** ++ * Includes and Defines ++ *****************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#include "rgx_common_asserts.h" ++#include "pvrsrv_tlcommon.h" ++ ++ ++/****************************************************************************** ++ * Packet Event Type Enumerations ++ *****************************************************************************/ ++ ++/*! Type used to encode the event that generated the packet. ++ * NOTE: When this type is updated the corresponding hwperfbin2json tool ++ * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will ++ * also need updating when adding new types. ++ * ++ * @par ++ * The event type values are incrementing integers for use as a shift ordinal ++ * in the event filtering process at the point events are generated. ++ * This scheme thus implies a limit of 63 event types. ++ */ ++ ++typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; ++ ++#define RGX_HWPERF_INVALID 0x00U /*!< Invalid. Reserved value. */ ++ ++/*! FW types 0x01..0x06 */ ++#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE 0x01U ++ ++#define RGX_HWPERF_FW_BGSTART 0x01U /*!< Background task processing start */ ++#define RGX_HWPERF_FW_BGEND 0x02U /*!< Background task end */ ++#define RGX_HWPERF_FW_IRQSTART 0x03U /*!< IRQ task processing start */ ++ ++#define RGX_HWPERF_FW_IRQEND 0x04U /*!< IRQ task end */ ++#define RGX_HWPERF_FW_DBGSTART 0x05U /*!< Debug event start */ ++#define RGX_HWPERF_FW_DBGEND 0x06U /*!< Debug event end */ ++ ++#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE 0x06U ++ ++/*! HW types 0x07..0x19 */ ++#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE 0x07U ++ ++#define RGX_HWPERF_HW_PMOOM_TAPAUSE 0x07U /*!< TA Pause at PM Out of Memory */ ++ ++#define RGX_HWPERF_HW_TAKICK 0x08U /*!< TA task started */ ++#define RGX_HWPERF_HW_TAFINISHED 0x09U /*!< TA task finished */ ++#define RGX_HWPERF_HW_3DTQKICK 0x0AU /*!< 3D TQ started */ ++#define RGX_HWPERF_HW_3DKICK 0x0BU /*!< 3D task started */ ++#define RGX_HWPERF_HW_3DFINISHED 0x0CU /*!< 3D task finished */ ++#define RGX_HWPERF_HW_CDMKICK 0x0DU /*!< CDM task started */ ++#define RGX_HWPERF_HW_CDMFINISHED 0x0EU /*!< CDM task finished */ ++#define RGX_HWPERF_HW_TLAKICK 0x0FU /*!< TLA task started */ ++#define RGX_HWPERF_HW_TLAFINISHED 0x10U /*!< TLS task finished */ ++#define RGX_HWPERF_HW_3DSPMKICK 0x11U /*!< 3D SPM task started */ ++#define RGX_HWPERF_HW_PERIODIC 0x12U /*!< Periodic event with updated HW counters */ ++#define RGX_HWPERF_HW_RTUKICK 0x13U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_RTUFINISHED 0x14U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_SHGKICK 0x15U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_SHGFINISHED 0x16U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_3DTQFINISHED 0x17U /*!< 3D TQ finished */ ++#define RGX_HWPERF_HW_3DSPMFINISHED 0x18U /*!< 3D SPM task finished */ ++ ++#define RGX_HWPERF_HW_PMOOM_TARESUME 0x19U /*!< TA Resume after PM Out of Memory */ ++ ++/*! HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */ ++#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE 0x19U ++ ++/*! other types 0x1A..0x1F */ ++#define RGX_HWPERF_CLKS_CHG 0x1AU /*!< Clock speed change in GPU */ ++#define RGX_HWPERF_GPU_STATE_CHG 0x1BU /*!< GPU work state change */ ++ ++/*! power types 0x20..0x27 */ ++#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE 0x20U ++#define RGX_HWPERF_PWR_EST_REQUEST 0x20U /*!< Power estimate requested (via GPIO) */ ++#define RGX_HWPERF_PWR_EST_READY 0x21U /*!< Power estimate inputs ready */ ++#define RGX_HWPERF_PWR_EST_RESULT 0x22U /*!< Power estimate result calculated */ ++#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE 0x22U ++ ++#define RGX_HWPERF_PWR_CHG 0x23U /*!< Power state change */ ++ ++/*! HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */ ++#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE 0x28U ++ ++#define RGX_HWPERF_HW_TDMKICK 0x28U /*!< TDM task started */ ++#define RGX_HWPERF_HW_TDMFINISHED 0x29U /*!< TDM task finished */ ++#define RGX_HWPERF_HW_NULLKICK 0x2AU /*!< NULL event */ ++ ++#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU ++ ++/*! context switch types 0x30..0x31 */ ++#define RGX_HWPERF_CSW_START 0x30U /*!< HW context store started */ ++#define RGX_HWPERF_CSW_FINISHED 0x31U /*!< HW context store finished */ ++ ++/*! DVFS events */ ++#define RGX_HWPERF_DVFS 0x32U /*!< Dynamic voltage/frequency scaling events */ ++ ++/*! firmware misc 0x38..0x39 */ ++#define RGX_HWPERF_UFO 0x38U /*!< FW UFO Check / Update */ ++#define RGX_HWPERF_FWACT 0x39U /*!< FW Activity notification */ ++ ++/*! last */ ++#define RGX_HWPERF_LAST_TYPE 0x3BU ++ ++/*! This enumeration must have a value that is a power of two as it is ++ * used in masks and a filter bit field (currently 64 bits long). ++ */ ++#define RGX_HWPERF_MAX_TYPE 0x40U ++ ++static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types"); ++ ++/*! Macro used to check if an event type ID is present in the known set of hardware type events */ ++#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \ ++ ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE)) ++ ++/*! Macro used to check if an event type ID is present in the known set of firmware type events */ ++#define HWPERF_PACKET_IS_FW_TYPE(_etype) \ ++ ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \ ++ (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE) ++ ++ ++typedef enum { ++ RGX_HWPERF_HOST_INVALID = 0x00, /*!< Invalid, do not use. */ ++ RGX_HWPERF_HOST_ENQ = 0x01, /*!< ``0x01`` Kernel driver has queued GPU work. ++ See RGX_HWPERF_HOST_ENQ_DATA */ ++ RGX_HWPERF_HOST_UFO = 0x02, /*!< ``0x02`` UFO updated by the driver. ++ See RGX_HWPERF_HOST_UFO_DATA */ ++ RGX_HWPERF_HOST_ALLOC = 0x03, /*!< ``0x03`` Resource allocated. ++ See RGX_HWPERF_HOST_ALLOC_DATA */ ++ RGX_HWPERF_HOST_CLK_SYNC = 0x04, /*!< ``0x04`` GPU / Host clocks correlation data. ++ See RGX_HWPERF_HOST_CLK_SYNC_DATA */ ++ RGX_HWPERF_HOST_FREE = 0x05, /*!< ``0x05`` Resource freed, ++ See RGX_HWPERF_HOST_FREE_DATA */ ++ RGX_HWPERF_HOST_MODIFY = 0x06, /*!< ``0x06`` Resource modified / updated. ++ See RGX_HWPERF_HOST_MODIFY_DATA */ ++ RGX_HWPERF_HOST_DEV_INFO = 0x07, /*!< ``0x07`` Device Health status. ++ See RGX_HWPERF_HOST_DEV_INFO_DATA */ ++ RGX_HWPERF_HOST_INFO = 0x08, /*!< ``0x08`` Device memory usage information. ++ See RGX_HWPERF_HOST_INFO_DATA */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09, /*!< ``0x09`` Wait for sync event. ++ See RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA */ ++ RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE = 0x0A, /*!< ``0x0A`` Software timeline advanced. ++ See RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA */ ++ RGX_HWPERF_HOST_CLIENT_INFO = 0x0B, /*!< ``0x0B`` Additional client info. ++ See RGX_HWPERF_HOST_CLIENT_INFO_DATA */ ++ ++ /*! last */ ++ RGX_HWPERF_HOST_LAST_TYPE, ++ ++ /*! This enumeration must have a value that is a power of two as it is ++ * used in masks and a filter bit field (currently 32 bits long). ++ */ ++ RGX_HWPERF_HOST_MAX_TYPE = 0x20 ++} RGX_HWPERF_HOST_EVENT_TYPE; ++ ++/*!< The event type values are incrementing integers for use as a shift ordinal ++ * in the event filtering process at the point events are generated. ++ * This scheme thus implies a limit of 31 event types. ++ */ ++static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); ++ ++ ++/****************************************************************************** ++ * Packet Header Format Version 2 Types ++ *****************************************************************************/ ++ ++/*! Major version number of the protocol in operation ++ */ ++#define RGX_HWPERF_V2_FORMAT 2 ++ ++/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet ++ */ ++#define HWPERF_PACKET_V2_SIG 0x48575032 ++ ++/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet ++ */ ++#define HWPERF_PACKET_V2A_SIG 0x48575041 ++ ++/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet ++ */ ++#define HWPERF_PACKET_V2B_SIG 0x48575042 ++ ++/*! Signature ASCII pattern 'HWPC' found in the first word of a HWPerfV2c packet ++ */ ++#define HWPERF_PACKET_V2C_SIG 0x48575043 ++ ++#define HWPERF_PACKET_ISVALID(_val) (((_val) == HWPERF_PACKET_V2_SIG) || ((_val) == HWPERF_PACKET_V2A_SIG) || ((_val) == HWPERF_PACKET_V2B_SIG) || ((_val) == HWPERF_PACKET_V2C_SIG)) ++/*!< Checks that the packet signature is one of the supported versions */ ++ ++/*! Type defines the HWPerf packet header common to all events. */ ++typedef struct ++{ ++ IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */ ++ IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */ ++ IMG_UINT32 eTypeId; /*!< Event type information field */ ++ IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */ ++ IMG_UINT64 ui64Timestamp; /*!< Event timestamp */ ++} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR; ++ ++RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp); ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); ++ ++ ++/*! Mask for use with the IMG_UINT32 ui32Size header field */ ++#define RGX_HWPERF_SIZE_MASK 0xFFFFU ++ ++/*! This macro defines an upper limit to which the size of the largest variable ++ * length HWPerf packet must fall within, currently 3KB. This constant may be ++ * used to allocate a buffer to hold one packet. ++ * This upper limit is policed by packet producing code. ++ */ ++#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U ++ ++/*! Defines an upper limit to the size of a variable length packet payload. ++ */ ++#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\ ++ sizeof(RGX_HWPERF_V2_PACKET_HDR))) ++ ++/*! Macro which takes a structure name and provides the packet size for ++ * a fixed size payload packet, rounded up to 8 bytes to align packets ++ * for 64 bit architectures. */ ++#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT)))) ++ ++/*! Macro which takes the number of bytes written in the data payload of a ++ * packet for a variable size payload packet, rounded up to 8 bytes to ++ * align packets for 64 bit architectures. */ ++#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&((IMG_UINT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN((_size), PVRSRVTL_PACKET_ALIGNMENT)))) ++ ++/*! Macro to obtain the size of the packet */ ++#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK)) ++ ++/*! Macro to obtain the size of the packet data */ ++#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR)) ++ ++/*! Masks for use with the IMG_UINT32 eTypeId header field */ ++#define RGX_HWPERF_TYPEID_MASK 0x0007FFFFU ++#define RGX_HWPERF_TYPEID_EVENT_MASK 0x00007FFFU ++#define RGX_HWPERF_TYPEID_THREAD_MASK 0x00008000U ++#define RGX_HWPERF_TYPEID_STREAM_MASK 0x00070000U ++#define RGX_HWPERF_TYPEID_META_DMA_MASK 0x00080000U ++#define RGX_HWPERF_TYPEID_M_CORE_MASK 0x00100000U ++#define RGX_HWPERF_TYPEID_OSID_MASK 0x07000000U ++ ++/*! Meta thread macros for encoding the ID into the type field of a packet */ ++#define RGX_HWPERF_META_THREAD_SHIFT 15U ++#define RGX_HWPERF_META_THREAD_ID0 0x0U /*!< Meta Thread 0 ID */ ++#define RGX_HWPERF_META_THREAD_ID1 0x1U /*!< Meta Thread 1 ID */ ++/*! Obsolete, kept for source compatibility */ ++#define RGX_HWPERF_META_THREAD_MASK 0x1U ++/*! Stream ID macros for encoding the ID into the type field of a packet */ ++#define RGX_HWPERF_STREAM_SHIFT 16U ++/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */ ++#define RGX_HWPERF_META_DMA_SHIFT 19U ++/*! Bit-shift macro used for encoding multi-core data into the type field of a packet */ ++#define RGX_HWPERF_M_CORE_SHIFT 20U ++/*! OSID bit-shift macro used for encoding OSID into type field of a packet */ ++#define RGX_HWPERF_OSID_SHIFT 24U ++typedef enum { ++ RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */ ++ RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */ ++ RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */ ++ RGX_HWPERF_STREAM_ID_LAST, ++} RGX_HWPERF_STREAM_ID; ++ ++/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */ ++static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT), ++ "Too many HWPerf stream IDs."); ++ ++/*! Compile-time value used to seed the Multi-Core (MC) bit in the typeID field. ++ * Only set by RGX_FIRMWARE builds. ++ */ ++#if defined(RGX_FIRMWARE) ++# if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT) ++#define RGX_HWPERF_M_CORE_VALUE 1U /*!< 1 => Multi-core supported */ ++# else ++#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ ++# endif ++#else ++#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ ++#endif ++ ++/*! Macros used to set the packet type and encode meta thread ID (0|1), ++ * HWPerf stream ID, multi-core capability and OSID within the typeID */ ++#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\ ++ ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ ++ (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT)))) ++ ++/*! Obtains the event type that generated the packet */ ++#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) ++ ++/*! Obtains the META Thread number that generated the packet */ ++#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT)) ++ ++/*! Determines if the packet generated contains multi-core data */ ++#define RGX_HWPERF_GET_M_CORE(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_M_CORE_MASK) >> RGX_HWPERF_M_CORE_SHIFT) ++ ++/*! Obtains the guest OSID which resulted in packet generation */ ++#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) ++ ++/*! Obtain stream id */ ++#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT)) ++ ++/*! Obtain information about how the packet was generated, which might affect payload total size */ ++#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT)) ++ ++/*! Obtains a typed pointer to a packet given a buffer address */ ++#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR *)(void *) (_buffer_addr)) ++/*! Obtains a typed pointer to a data structure given a packet address */ ++#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))) ++/*! Obtains a typed pointer to the next packet given a packet address */ ++#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), RGX_HWPERF_SIZE_MASK&((_packet_addr)->ui32Size)))) ++ ++/*! Obtains a typed pointer to a packet header given the packet data address */ ++#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), -(IMG_INT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)))) ++ ++ ++/****************************************************************************** ++ * Other Common Defines ++ *****************************************************************************/ ++ ++/*! This macro is not a real array size, but indicates the array has a variable ++ * length only known at run-time but always contains at least 1 element. The ++ * final size of the array is deduced from the size field of a packet header. ++ */ ++#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U ++ ++/*! This macro is not a real array size, but indicates the array is optional ++ * and if present has a variable length only known at run-time. The final ++ * size of the array is deduced from the size field of a packet header. */ ++#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U ++ ++ ++/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ ++#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U ++#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU ++ ++/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */ ++#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U ++#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U ++ ++/*! Macro used to set the block info word as a combination of two 16-bit integers */ ++#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)))) ++ ++/*! Macro used to obtain the number of counter blocks present in the packet */ ++#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT) ++ ++/*! Obtains the offset of the counter block stream in the packet */ ++#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT) ++ ++/*! This macro gets the number of blocks depending on the packet version */ ++#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \ ++ do { \ ++ if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \ ++ { \ ++ (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\ ++ } \ ++ else \ ++ { \ ++ IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\ ++ (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \ ++ } \ ++ } while (0) ++ ++/*! This macro gets the counter stream pointer depending on the packet version */ ++#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \ ++{ \ ++ if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \ ++ { \ ++ (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \ ++ } \ ++ else \ ++ { \ ++ IMG_UINT32 ui32BlkStreamOffsetInWords = (((_sig) == HWPERF_PACKET_V2_SIG) ? 6 : 8); \ ++ (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR_DW((_hw_packet_data), ui32BlkStreamOffsetInWords)); \ ++ } \ ++} ++ ++/*! Masks for use with the IMG_UINT32 ui32KickInfo field */ ++#define RGX_HWPERF_KICKINFO_KICKID_MASK 0x000000FFU ++ ++/*! Shift for the Kick ID field in ui32KickInfo */ ++#define RGX_HWPERF_KICKINFO_KICKID_SHIFT 0U ++ ++/*! Macro used to set the kick info field. */ ++#define RGX_HWPERF_MAKE_KICKINFO(_kickid) ((IMG_UINT32) (RGX_HWPERF_KICKINFO_KICKID_MASK&((_kickid) << RGX_HWPERF_KICKINFO_KICKID_SHIFT))) ++ ++/*! Macro used to obtain the Kick ID if present in the packet */ ++#define RGX_HWPERF_GET_KICKID(_kickinfo) (((_kickinfo) & RGX_HWPERF_KICKINFO_KICKID_MASK) >> RGX_HWPERF_KICKINFO_KICKID_SHIFT) ++ ++/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */ ++#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U ++#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU ++ ++/*! Shift for the UFO count and data stream fields */ ++#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U ++#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U ++ ++/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */ ++#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \ ++ ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \ ++ (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)))) ++ ++/*! Macro used to obtain UFO count*/ ++#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \ ++ (((_streaminfo) & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT) ++ ++/*! Obtains the offset of the UFO stream in the packet */ ++#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \ ++ (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_HWPERF_COMMON_H_ */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_meta.h b/drivers/gpu/drm/img-rogue/include/rgx_meta.h +new file mode 100644 +index 000000000000..bdff11ffbdc1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_meta.h +@@ -0,0 +1,385 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX META definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX META helper definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_META_H) ++#define RGX_META_H ++ ++ ++/***** The META HW register definitions in the file are updated manually *****/ ++ ++ ++#include "img_defs.h" ++#include "km/rgxdefs_km.h" ++ ++ ++/****************************************************************************** ++* META registers and MACROS ++******************************************************************************/ ++#define META_CR_CTRLREG_BASE(T) (0x04800000U + (0x1000U*(T))) ++ ++#define META_CR_TXPRIVEXT (0x048000E8) ++#define META_CR_TXPRIVEXT_MINIM_EN (IMG_UINT32_C(0x1) << 7) ++ ++#define META_CR_SYSC_JTAG_THREAD (0x04830030) ++#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004) ++ ++#define META_CR_PERF_COUNT0 (0x0480FFE0) ++#define META_CR_PERF_COUNT1 (0x0480FFE8) ++#define META_CR_PERF_COUNT_CTRL_SHIFT (28) ++#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000) ++#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_CTRL_ICORE (IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_THR_SHIFT (24) ++#define META_CR_PERF_COUNT_THR_MASK (0x0F000000) ++#define META_CR_PERF_COUNT_THR_0 (IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT) ++#define META_CR_PERF_COUNT_THR_1 (IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_SHIFT) ++ ++#define META_CR_TxVECINT_BHALT (0x04820500) ++#define META_CR_PERF_ICORE0 (0x0480FFD0) ++#define META_CR_PERF_ICORE1 (0x0480FFD8) ++#define META_CR_PERF_ICORE_DCACHEMISS (0x8) ++ ++#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \ ++ (THR << META_CR_PERF_COUNT_THR_SHIFT)) ++ ++#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U) ++#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U) ++ ++#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U) /* Poll for done */ ++#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U) /* Set for read */ ++#define META_CR_TXUXXRXRQ_TX_S (12) ++#define META_CR_TXUXXRXRQ_RX_S (4) ++#define META_CR_TXUXXRXRQ_UXX_S (0) ++ ++#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */ ++#define META_CR_TXUD0_ID (0x1) /* Data unit regs */ ++#define META_CR_TXUD1_ID (0x2) /* Data unit regs */ ++#define META_CR_TXUA0_ID (0x3) /* Address unit regs */ ++#define META_CR_TXUA1_ID (0x4) /* Address unit regs */ ++#define META_CR_TXUPC_ID (0x5) /* PC registers */ ++ ++/* Macros to calculate register access values */ ++#define META_CR_CORE_REG(Thr, RegNum, Unit) (((IMG_UINT32)(Thr) << META_CR_TXUXXRXRQ_TX_S) | \ ++ ((IMG_UINT32)(RegNum) << META_CR_TXUXXRXRQ_RX_S) | \ ++ ((IMG_UINT32)(Unit) << META_CR_TXUXXRXRQ_UXX_S)) ++ ++#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID) ++#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID) ++#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID) ++ ++#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID) ++#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID) ++#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID) ++ ++#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID) ++#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID) ++ ++#define META_CR_COREREG_ENABLE (0x0000000U) ++#define META_CR_COREREG_STATUS (0x0000010U) ++#define META_CR_COREREG_DEFR (0x00000A0U) ++#define META_CR_COREREG_PRIVEXT (0x00000E8U) ++ ++#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE) ++#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS) ++#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR) ++#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT) ++ ++#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE) ++#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS) ++#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR) ++#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT) ++ ++#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */ ++#define META_CR_TXSTATUS_PRIV (0x00020000U) ++#define META_CR_TXPRIVEXT_MINIM (0x00000080U) ++ ++#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U) ++ ++#define META_CR_TXCLKCTRL (0x048000B0) ++#define META_CR_TXCLKCTRL_ALL_ON (0x55111111) ++#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222) ++ ++ ++/****************************************************************************** ++* META LDR Format ++******************************************************************************/ ++/* Block header structure */ ++typedef struct ++{ ++ IMG_UINT32 ui32DevID; ++ IMG_UINT32 ui32SLCode; ++ IMG_UINT32 ui32SLData; ++ IMG_UINT16 ui16PLCtrl; ++ IMG_UINT16 ui16CRC; ++ ++} RGX_META_LDR_BLOCK_HDR; ++ ++/* High level data stream block structure */ ++typedef struct ++{ ++ IMG_UINT16 ui16Cmd; ++ IMG_UINT16 ui16Length; ++ IMG_UINT32 ui32Next; ++ IMG_UINT32 aui32CmdData[4]; ++ ++} RGX_META_LDR_L1_DATA_BLK; ++ ++/* High level data stream block structure */ ++typedef struct ++{ ++ IMG_UINT16 ui16Tag; ++ IMG_UINT16 ui16Length; ++ IMG_UINT32 aui32BlockData[4]; ++ ++} RGX_META_LDR_L2_DATA_BLK; ++ ++/* Config command structure */ ++typedef struct ++{ ++ IMG_UINT32 ui32Type; ++ IMG_UINT32 aui32BlockData[4]; ++ ++} RGX_META_LDR_CFG_BLK; ++ ++/* Block type definitions */ ++#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010U) ++#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U) ++ ++/* Command definitions ++ * Value Name Description ++ * 0 LoadMem Load memory with binary data. ++ * 1 LoadCore Load a set of core registers. ++ * 2 LoadMMReg Load a set of memory mapped registers. ++ * 3 StartThreads Set each thread PC and SP, then enable threads. ++ * 4 ZeroMem Zeros a memory region. ++ * 5 Config Perform a configuration command. ++ */ ++#define RGX_META_LDR_CMD_MASK (0x000FU) ++ ++#define RGX_META_LDR_CMD_LOADMEM (0x0000U) ++#define RGX_META_LDR_CMD_LOADCORE (0x0001U) ++#define RGX_META_LDR_CMD_LOADMMREG (0x0002U) ++#define RGX_META_LDR_CMD_START_THREADS (0x0003U) ++#define RGX_META_LDR_CMD_ZEROMEM (0x0004U) ++#define RGX_META_LDR_CMD_CONFIG (0x0005U) ++ ++/* Config Command definitions ++ * Value Name Description ++ * 0 Pause Pause for x times 100 instructions ++ * 1 Read Read a value from register - No value return needed. ++ * Utilises effects of issuing reads to certain registers ++ * 2 Write Write to mem location ++ * 3 MemSet Set mem to value ++ * 4 MemCheck check mem for specific value. ++ */ ++#define RGX_META_LDR_CFG_PAUSE (0x0000) ++#define RGX_META_LDR_CFG_READ (0x0001) ++#define RGX_META_LDR_CFG_WRITE (0x0002) ++#define RGX_META_LDR_CFG_MEMSET (0x0003) ++#define RGX_META_LDR_CFG_MEMCHECK (0x0004) ++ ++ ++/****************************************************************************** ++* RGX FW segmented MMU definitions ++******************************************************************************/ ++/* All threads can access the segment */ ++#define RGXFW_SEGMMU_ALLTHRS (IMG_UINT32_C(0xf) << 8U) ++/* Writable */ ++#define RGXFW_SEGMMU_WRITEABLE (0x1U << 1U) ++/* All threads can access and writable */ ++#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE) ++ ++/* Direct map region 10 used for mapping GPU memory - max 8MB */ ++#define RGXFW_SEGMMU_DMAP_GPU_ID (10U) ++#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) ++#define RGXFW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U) ++ ++/* Segment IDs */ ++#define RGXFW_SEGMMU_DATA_ID (1U) ++#define RGXFW_SEGMMU_BOOTLDR_ID (2U) ++#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID) ++ ++/* ++ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU. ++ * All the segments configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are ++ * CACHED in the SLC. ++ * The interface has been kept the same to simplify the code changes. ++ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic. ++ */ ++#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) ((((IMG_UINT64) ((pers) & 0x3U)) << 52) | \ ++ (((IMG_UINT64) ((mmu_ctx) & 0xFFU)) << 44) | \ ++ (((IMG_UINT64) ((slc_policy) & 0x1U)) << 40)) ++#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3U, 0x0U, mmu_ctx) ++#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0U, 0x1U, mmu_ctx) ++ ++/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten ++ * accesses through this segment ++ */ ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) (((IMG_UINT64)((IMG_UINT64)(pc) & 0xFU) << 44U) | \ ++ ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U)) ++ ++#define RGXFW_SEGMMU_META_BIFDM_ID (0x7U) ++#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) ++#if defined(RGX_FEATURE_SLC_VIVT) ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED ++#define RGXFW_SEGMMU_OUTADDR_TOP_META RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED ++#else ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC ++#define RGXFW_SEGMMU_OUTADDR_TOP_META(pc) RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, RGXFW_SEGMMU_META_BIFDM_ID) ++#endif ++#endif ++ ++/* META segments have 4kB minimum size */ ++#define RGXFW_SEGMMU_ALIGN (0x1000U) ++ ++/* Segmented MMU registers (n = segment id) */ ++#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000U + ((n)*0x10U)) ++#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004U + ((n)*0x10U)) ++#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008U + ((n)*0x10U)) ++#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000CU + ((n)*0x10U)) ++ ++/* The following defines must be recalculated if the Meta MMU segments used ++ * to access Host-FW data are changed ++ * Current combinations are: ++ * - SLC uncached, META cached, FW base address 0x70000000 ++ * - SLC uncached, META uncached, FW base address 0xF0000000 ++ * - SLC cached, META cached, FW base address 0x10000000 ++ * - SLC cached, META uncached, FW base address 0x90000000 ++ */ ++#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U) ++#define RGXFW_SEGMMU_DATA_META_CACHED (0x0U) ++#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000 ++#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT) ++/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in ++ * the PTEs for the FW data, not in the Meta Segment MMU, which means these ++ * defines have no real effect in those cases ++ */ ++#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U) ++#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) ++#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) ++ ++ ++#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META) ++#error "SECURE_FW_CODE_OSID is not supported on META cores" ++#endif ++ ++ ++/****************************************************************************** ++* RGX FW Bootloader defaults ++******************************************************************************/ ++#define RGXFW_BOOTLDR_META_ADDR (0x40000000U) ++#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U) ++#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1) ++#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0) ++#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000) ++#define RGXFW_MAX_BOOTLDR_OFFSET (0x1000) ++ ++/* Bootloader configuration offset is in dwords (512 bytes) */ ++#define RGXFW_BOOTLDR_CONF_OFFSET (0x80) ++ ++ ++/****************************************************************************** ++* RGX META Stack ++******************************************************************************/ ++#define RGX_META_STACK_SIZE (0x1000U) ++ ++/****************************************************************************** ++ RGX META Core memory ++******************************************************************************/ ++/* code and data both map to the same physical memory */ ++#define RGX_META_COREMEM_CODE_ADDR (0x80000000U) ++#define RGX_META_COREMEM_DATA_ADDR (0x82000000U) ++#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU) ++ ++#if defined(__KERNEL__) ++#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B)))) ++#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B)))) ++#endif ++ ++/****************************************************************************** ++* 2nd thread ++******************************************************************************/ ++#define RGXFW_THR1_PC (0x18930000) ++#define RGXFW_THR1_SP (0x78890000) ++ ++/****************************************************************************** ++* META compatibility ++******************************************************************************/ ++ ++#define META_CR_CORE_ID (0x04831000) ++#define META_CR_CORE_ID_VER_SHIFT (16U) ++#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) ++ ++#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) ++ ++ #if (RGX_FEATURE_META == MTP218) ++ #define RGX_CR_META_CORE_ID_VALUE 0x19 ++ #elif (RGX_FEATURE_META == MTP219) ++ #define RGX_CR_META_CORE_ID_VALUE 0x1E ++ #elif (RGX_FEATURE_META == LTP218) ++ #define RGX_CR_META_CORE_ID_VALUE 0x1C ++ #elif (RGX_FEATURE_META == LTP217) ++ #define RGX_CR_META_CORE_ID_VALUE 0x1F ++ #else ++ #error "Unknown META ID" ++ #endif ++#else ++ ++ #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19 ++ #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E ++ #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C ++ #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F ++ ++#endif ++#define RGXFW_PROCESSOR_META "META" ++ ++ ++#endif /* RGX_META_H */ ++ ++/****************************************************************************** ++ End of file (rgx_meta.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_mips.h b/drivers/gpu/drm/img-rogue/include/rgx_mips.h +new file mode 100644 +index 000000000000..c2f381882f74 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_mips.h +@@ -0,0 +1,374 @@ ++/*************************************************************************/ /*! ++@File rgx_mips.h ++@Title ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform RGX ++@Description RGX MIPS definitions, kernel/user space ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_MIPS_H) ++#define RGX_MIPS_H ++ ++/* ++ * Utility defines for memory management ++ */ ++#define RGXMIPSFW_LOG2_PAGE_SIZE_4K (12) ++#define RGXMIPSFW_PAGE_SIZE_4K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K) ++#define RGXMIPSFW_PAGE_MASK_4K (RGXMIPSFW_PAGE_SIZE_4K - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16) ++#define RGXMIPSFW_PAGE_SIZE_64K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K) ++#define RGXMIPSFW_PAGE_MASK_64K (RGXMIPSFW_PAGE_SIZE_64K - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_256K (18) ++#define RGXMIPSFW_PAGE_SIZE_256K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_256K) ++#define RGXMIPSFW_PAGE_MASK_256K (RGXMIPSFW_PAGE_SIZE_256K - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_1MB (20) ++#define RGXMIPSFW_PAGE_SIZE_1MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_1MB) ++#define RGXMIPSFW_PAGE_MASK_1MB (RGXMIPSFW_PAGE_SIZE_1MB - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_4MB (22) ++#define RGXMIPSFW_PAGE_SIZE_4MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4MB) ++#define RGXMIPSFW_PAGE_MASK_4MB (RGXMIPSFW_PAGE_SIZE_4MB - 1) ++#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2) ++/* log2 page table sizes dependent on FW heap size and page size (for each OS) */ ++#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_4K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) ++#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_64K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) ++/* Maximum number of page table pages (both Host and MIPS pages) */ ++#define RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES (4) ++/* Total number of TLB entries */ ++#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16) ++/* "Uncached" caching policy */ ++#define RGXMIPSFW_UNCACHED_CACHE_POLICY (0X00000002U) ++/* "Write-back write-allocate" caching policy */ ++#define RGXMIPSFW_WRITEBACK_CACHE_POLICY (0X00000003) ++/* "Write-through no write-allocate" caching policy */ ++#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY (0X00000001) ++/* Cached policy used by MIPS in case of physical bus on 32 bit */ ++#define RGXMIPSFW_CACHED_POLICY (RGXMIPSFW_WRITEBACK_CACHE_POLICY) ++/* Cached policy used by MIPS in case of physical bus on more than 32 bit */ ++#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY) ++/* Total number of Remap entries */ ++#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES) ++ ++ ++/* ++ * MIPS EntryLo/PTE format ++ */ ++ ++#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U) ++#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF) ++#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000U) ++ ++#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U) ++#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF) ++#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000U) ++ ++/* Page Frame Number */ ++#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6) ++#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12) ++/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */ ++#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0) ++#define RGXMIPSFW_ENTRYLO_PFN_SIZE (20) ++/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */ ++#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0U) ++#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24) ++#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \ ++ RGXMIPSFW_ENTRYLO_PFN_SHIFT) ++ ++#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U) ++#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7U) ++ ++#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT (2U) ++#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB) ++#define RGXMIPSFW_ENTRYLO_DIRTY_EN (0X00000004U) ++ ++#define RGXMIPSFW_ENTRYLO_VALID_SHIFT (1U) ++#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD) ++#define RGXMIPSFW_ENTRYLO_VALID_EN (0X00000002U) ++ ++#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT (0U) ++#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE) ++#define RGXMIPSFW_ENTRYLO_GLOBAL_EN (0X00000001U) ++ ++#define RGXMIPSFW_ENTRYLO_DVG (RGXMIPSFW_ENTRYLO_DIRTY_EN | \ ++ RGXMIPSFW_ENTRYLO_VALID_EN | \ ++ RGXMIPSFW_ENTRYLO_GLOBAL_EN) ++#define RGXMIPSFW_ENTRYLO_UNCACHED (RGXMIPSFW_UNCACHED_CACHE_POLICY << \ ++ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT) ++#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED) ++ ++ ++/* Remap Range Config Addr Out */ ++/* These defines refer to the upper half of the Remap Range Config register */ ++#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0) ++#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register */ ++#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) ++#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ ++ RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) ++ ++#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2) ++#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) ++#elif defined(SECURE_FW_CODE_OSID) ++#define MIPS_FW_CODE_OSID (1U) ++#endif ++ ++ ++/* ++ * Pages to trampoline problematic physical addresses: ++ * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 ++ * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000 ++ * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000 ++ * - (benign trampoline) : 0x1FC0_3000 ++ * that would otherwise be erroneously remapped by the MIPS wrapper ++ * (see "Firmware virtual layout and remap configuration" section below) ++ */ ++ ++#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2) ++#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1U << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES) ++#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K) ++#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K) ++ ++#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) ++#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) ++ ++#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1UL << RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1U) & a)) ++ ++/* ++ * Firmware virtual layout and remap configuration ++ */ ++/* ++ * For each remap region we define: ++ * - the virtual base used by the Firmware to access code/data through that region ++ * - the microAptivAP physical address correspondent to the virtual base address, ++ * used as input address and remapped to the actual physical address ++ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from ++ * the bottom of the base input address that survive onto the output address ++ * (this defines both the alignment and the maximum size of the remapped region) ++ * - one or more code/data segments within the remapped region ++ */ ++ ++/* Boot remap setup */ ++#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000) ++#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000U) ++#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12) ++#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE) ++ ++/* Data remap setup */ ++#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000) ++#define RGXMIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000) ++#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000U) ++#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12) ++#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE) ++ ++/* Code remap setup */ ++#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000) ++#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000U) ++#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12) ++#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE) ++ ++/* Permanent mappings setup */ ++#define RGXMIPSFW_PT_VIRTUAL_BASE (0xCF000000) ++#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000) ++#define RGXMIPSFW_STACK_VIRTUAL_BASE (0xCF600000) ++ ++ ++/* ++ * Bootloader configuration data ++ */ ++/* Bootloader configuration offset (where RGXMIPSFW_BOOT_DATA lives) ++ * within the bootloader/NMI data page */ ++#define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0U) ++ ++ ++/* ++ * NMI shared data ++ */ ++/* Base address of the shared data within the bootloader/NMI data page */ ++#define RGXMIPSFW_NMI_SHARED_DATA_BASE (0x100) ++/* Size used by Debug dump data */ ++#define RGXMIPSFW_NMI_SHARED_SIZE (0x2B0) ++/* Offsets in the NMI shared area in 32-bit words */ ++#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET (0x0) ++#define RGXMIPSFW_NMI_STATE_OFFSET (0x1) ++#define RGXMIPSFW_NMI_ERROR_STATE_SET (0x1) ++ ++/* ++ * MIPS boot stage ++ */ ++#define RGXMIPSFW_BOOT_STAGE_OFFSET (0x400) ++ ++/* ++ * MIPS private data in the bootloader data page. ++ * Memory below this offset is used by the FW only, no interface data allowed. ++ */ ++#define RGXMIPSFW_PRIVATE_DATA_OFFSET (0x800) ++ ++ ++/* The things that follow are excluded when compiling assembly sources */ ++#if !defined(RGXMIPSFW_ASSEMBLY_CODE) ++#include "img_types.h" ++#include "km/rgxdefs_km.h" ++ ++typedef struct ++{ ++ IMG_UINT64 ui64StackPhyAddr; ++ IMG_UINT64 ui64RegBase; ++ IMG_UINT64 aui64PTPhyAddr[RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES]; ++ IMG_UINT32 ui32PTLog2PageSize; ++ IMG_UINT32 ui32PTNumPages; ++ IMG_UINT32 ui32Reserved1; ++ IMG_UINT32 ui32Reserved2; ++} RGXMIPSFW_BOOT_DATA; ++ ++#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset) (offset / sizeof(IMG_UINT32)) ++#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset) (offset / sizeof(IMG_UINT64)) ++ ++/* Used for compatibility checks */ ++#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU) ++#define RGXMIPSFW_ARCHTYPE_VER_SHIFT (10U) ++#define RGXMIPSFW_CORE_ID_VALUE (0x001U) ++#define RGXFW_PROCESSOR_MIPS "MIPS" ++ ++/* microAptivAP cache line size */ ++#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U) ++ ++/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */ ++#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U) ++ ++/* Values to put in the MIPS selectors for performance counters */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U) /* Icache accesses in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U) /* Icache misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U) /* Dcache accesses in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U) /* Dcache misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U) /* ITLB instruction accesses in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U) /* JTLB instruction accesses misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U) /* Instructions completed in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U) /* JTLB data misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U) /* Shift for the Event field in the MIPS perf ctrl registers */ ++/* Additional flags for performance counters. See MIPS manual for further reference */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U) ++#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U) ++#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U) ++ ++ ++#define RGXMIPSFW_C0_NBHWIRQ 8 ++ ++/* Macros to decode C0_Cause register */ ++#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE) (((CAUSE) & 0x7cU) >> 2U) ++#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR 9 ++/* Use only when Coprocessor Unusable exception */ ++#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28U) & 0x3U) ++#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10) ++#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1UL << 21) ++#define RGXMIPSFW_C0_CAUSE_IV (1UL << 23) ++#define RGXMIPSFW_C0_CAUSE_IC (1UL << 25) ++#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1UL << 26) ++#define RGXMIPSFW_C0_CAUSE_TIPENDING (1UL << 30) ++#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY (1UL << 31) ++ ++/* Macros to decode C0_Debug register */ ++#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10U) & 0x1fU) ++#define RGXMIPSFW_C0_DEBUG_DSS (1UL << 0) ++#define RGXMIPSFW_C0_DEBUG_DBP (1UL << 1) ++#define RGXMIPSFW_C0_DEBUG_DDBL (1UL << 2) ++#define RGXMIPSFW_C0_DEBUG_DDBS (1UL << 3) ++#define RGXMIPSFW_C0_DEBUG_DIB (1UL << 4) ++#define RGXMIPSFW_C0_DEBUG_DINT (1UL << 5) ++#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1UL << 6) ++#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1UL << 18) ++#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1UL << 19) ++#define RGXMIPSFW_C0_DEBUG_IEXI (1UL << 20) ++#define RGXMIPSFW_C0_DEBUG_DBUSEP (1UL << 21) ++#define RGXMIPSFW_C0_DEBUG_CACHEEP (1UL << 22) ++#define RGXMIPSFW_C0_DEBUG_MCHECKP (1UL << 23) ++#define RGXMIPSFW_C0_DEBUG_IBUSEP (1UL << 24) ++#define RGXMIPSFW_C0_DEBUG_DM (1UL << 30) ++#define RGXMIPSFW_C0_DEBUG_DBD (1UL << 31) ++ ++/* Macros to decode TLB entries */ ++#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK) (((PAGE_MASK) >> 13) & 0XFFFFU) ++#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK) ((((PAGE_MASK) | 0x1FFFU) + 1U) >> 11U) /* page size in KB */ ++#define RGXMIPSFW_TLB_GET_PAGE_MASK(PAGE_SIZE) ((((PAGE_SIZE) << 11) - 1) & ~0x7FF) /* page size in KB */ ++#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13) ++#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U) ++#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0XFFFFFU) ++/* GET_PA uses a non-standard PFN mask for 36 bit addresses */ ++#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO) (((IMG_UINT64)(ENTRY_LO) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6) ++#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U) ++#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U) ++#define RGXMIPSFW_TLB_GLOBAL (1U) ++#define RGXMIPSFW_TLB_VALID (1U << 1) ++#define RGXMIPSFW_TLB_DIRTY (1U << 2) ++#define RGXMIPSFW_TLB_XI (1U << 30) ++#define RGXMIPSFW_TLB_RI (1U << 31) ++ ++typedef struct { ++ IMG_UINT32 ui32TLBPageMask; ++ IMG_UINT32 ui32TLBHi; ++ IMG_UINT32 ui32TLBLo0; ++ IMG_UINT32 ui32TLBLo1; ++} RGX_MIPS_TLB_ENTRY; ++ ++typedef struct { ++ IMG_UINT32 ui32RemapAddrIn; /* always 4k aligned */ ++ IMG_UINT32 ui32RemapAddrOut; /* always 4k aligned */ ++ IMG_UINT32 ui32RemapRegionSize; ++} RGX_MIPS_REMAP_ENTRY; ++ ++typedef struct { ++ IMG_UINT32 ui32ErrorState; /* This must come first in the structure */ ++ IMG_UINT32 ui32ErrorEPC; ++ IMG_UINT32 ui32StatusRegister; ++ IMG_UINT32 ui32CauseRegister; ++ IMG_UINT32 ui32BadRegister; ++ IMG_UINT32 ui32EPC; ++ IMG_UINT32 ui32SP; ++ IMG_UINT32 ui32Debug; ++ IMG_UINT32 ui32DEPC; ++ IMG_UINT32 ui32BadInstr; ++ IMG_UINT32 ui32UnmappedAddress; ++ RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]; ++ RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; ++} RGX_MIPS_STATE; ++ ++#endif /* RGXMIPSFW_ASSEMBLY_CODE */ ++ ++#endif /* RGX_MIPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rgx_riscv.h b/drivers/gpu/drm/img-rogue/include/rgx_riscv.h +new file mode 100644 +index 000000000000..e5be2a562f34 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgx_riscv.h +@@ -0,0 +1,250 @@ ++/*************************************************************************/ /*! ++@File rgx_riscv.h ++@Title ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform RGX ++@Description RGX RISCV definitions, kernel/user space ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_RISCV_H) ++#define RGX_RISCV_H ++ ++#include "km/rgxdefs_km.h" ++ ++ ++/* Utility defines to convert regions to virtual addresses and remaps */ ++#define RGXRISCVFW_GET_REGION_BASE(r) IMG_UINT32_C((r) << 28) ++#define RGXRISCVFW_GET_REGION(a) IMG_UINT32_C((a) >> 28) ++#define RGXRISCVFW_MAX_REGION_SIZE IMG_UINT32_C(1 << 28) ++#define RGXRISCVFW_GET_REMAP(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 + ((r) * 8U)) ++ ++/* RISCV remap output is aligned to 4K */ ++#define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN (0x1000U) ++ ++/* ++ * FW bootloader defines ++ */ ++#define RGXRISCVFW_BOOTLDR_CODE_REGION IMG_UINT32_C(0xC) ++#define RGXRISCVFW_BOOTLDR_DATA_REGION IMG_UINT32_C(0x5) ++#define RGXRISCVFW_BOOTLDR_CODE_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION)) ++#define RGXRISCVFW_BOOTLDR_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION)) ++#define RGXRISCVFW_BOOTLDR_CODE_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_CODE_REGION)) ++#define RGXRISCVFW_BOOTLDR_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_DATA_REGION)) ++ ++/* Bootloader data offset in dwords from the beginning of the FW data allocation */ ++#define RGXRISCVFW_BOOTLDR_CONF_OFFSET (0x0) ++ ++/* ++ * FW coremem region defines ++ */ ++#define RGXRISCVFW_COREMEM_REGION IMG_UINT32_C(0x8) ++#define RGXRISCVFW_COREMEM_MAX_SIZE IMG_UINT32_C(0x10000000) /* 256 MB */ ++#define RGXRISCVFW_COREMEM_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_COREMEM_REGION)) ++#define RGXRISCVFW_COREMEM_END (RGXRISCVFW_COREMEM_BASE + RGXRISCVFW_COREMEM_MAX_SIZE - 1U) ++ ++ ++/* ++ * Host-FW shared data defines ++ */ ++#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x6UL) ++#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0xDUL) ++#define RGXRISCVFW_SHARED_CACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) ++#define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) ++#define RGXRISCVFW_SHARED_CACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) ++#define RGXRISCVFW_SHARED_UNCACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) ++ ++ ++/* ++ * GPU SOCIF access defines ++ */ ++#define RGXRISCVFW_SOCIF_REGION (0x2U) ++#define RGXRISCVFW_SOCIF_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SOCIF_REGION)) ++ ++ ++/* The things that follow are excluded when compiling assembly sources */ ++#if !defined(RGXRISCVFW_ASSEMBLY_CODE) ++#include "img_types.h" ++ ++#define RGXFW_PROCESSOR_RISCV "RISCV" ++#define RGXRISCVFW_CORE_ID_VALUE (0x00450B02U) ++#define RGXRISCVFW_MISA_ADDR (0x301U) ++#define RGXRISCVFW_MISA_VALUE (0x40001104U) ++#define RGXRISCVFW_MSCRATCH_ADDR (0x340U) ++ ++typedef struct ++{ ++ IMG_UINT64 ui64CorememCodeDevVAddr; ++ IMG_UINT64 ui64CorememDataDevVAddr; ++ IMG_UINT32 ui32CorememCodeFWAddr; ++ IMG_UINT32 ui32CorememDataFWAddr; ++ IMG_UINT32 ui32CorememCodeSize; ++ IMG_UINT32 ui32CorememDataSize; ++ IMG_UINT32 ui32Flags; ++ IMG_UINT32 ui32Reserved; ++} RGXRISCVFW_BOOT_DATA; ++ ++/* ++ * List of registers to be printed in debug dump. ++ * First column: register names (general purpose or control/status registers) ++ * Second column: register number to be used in abstract access register command ++ * (see RISC-V debug spec v0.13) ++ */ ++#define RGXRISCVFW_DEBUG_DUMP_REGISTERS \ ++ X(pc, 0x7b1) /* dpc */ \ ++ X(ra, 0x1001) \ ++ X(sp, 0x1002) \ ++ X(mepc, 0x341) \ ++ X(mcause, 0x342) \ ++ X(mdseac, 0xfc0) \ ++ X(mstatus, 0x300) \ ++ X(mie, 0x304) \ ++ X(mip, 0x344) \ ++ X(mscratch, 0x340) \ ++ X(mbvnc0, 0xffe) \ ++ X(mbvnc1, 0xfff) \ ++ X(micect, 0x7f0) \ ++ X(mdcect, 0x7f3) \ ++ X(mdcrfct, 0x7f4) \ ++ ++typedef struct ++{ ++#define X(name, address) \ ++ IMG_UINT32 name; ++ ++ RGXRISCVFW_DEBUG_DUMP_REGISTERS ++#undef X ++} RGXRISCVFW_STATE; ++ ++ ++#define RGXRISCVFW_MCAUSE_INTERRUPT (1U << 31) ++ ++#define RGXRISCVFW_MCAUSE_TABLE \ ++ X(0x00000000U, IMG_FALSE, "NMI pin assertion") /* Also reset value */ \ ++ X(0x00000001U, IMG_TRUE, "Instruction access fault") \ ++ X(0x00000002U, IMG_TRUE, "Illegal instruction") \ ++ X(0x00000003U, IMG_TRUE, "Breakpoint") \ ++ X(0x00000004U, IMG_TRUE, "Load address misaligned") \ ++ X(0x00000005U, IMG_TRUE, "Load access fault") \ ++ X(0x00000006U, IMG_TRUE, "Store/AMO address misaligned") \ ++ X(0x00000007U, IMG_TRUE, "Store/AMO access fault") \ ++ X(0x0000000BU, IMG_TRUE, "Environment call from M-mode (FW assert)") \ ++ X(0x80000007U, IMG_FALSE, "Machine timer interrupt") \ ++ X(0x8000000BU, IMG_FALSE, "Machine external interrupt") \ ++ X(0x8000001EU, IMG_FALSE, "Machine correctable error local interrupt") \ ++ X(0xF0000000U, IMG_TRUE, "Machine D-bus store error NMI") \ ++ X(0xF0000001U, IMG_TRUE, "Machine D-bus non-blocking load error NMI") \ ++ X(0xF0000002U, IMG_TRUE, "dCache unrecoverable NMI") ++ ++ ++/* Debug module HW defines */ ++#define RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER (0U) ++#define RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY (2U) ++#define RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT (2UL << 20) ++#define RGXRISCVFW_DMI_COMMAND_WRITE (1UL << 16) ++#define RGXRISCVFW_DMI_COMMAND_READ (0UL << 16) ++#define RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT (2U) ++ ++/* Abstract command error codes (descriptions from RISC-V debug spec v0.13) */ ++typedef enum ++{ ++ /* No error. */ ++ RISCV_ABSTRACT_CMD_NO_ERROR = 0, ++ ++ /* ++ * An abstract command was executing while command, abstractcs, or abstractauto ++ * was written, or when one of the data or progbuf registers was read or ++ * written. This status is only written if cmderr contains 0. ++ */ ++ RISCV_ABSTRACT_CMD_BUSY = 1, ++ ++ /* ++ * The requested command is not supported, regardless of whether ++ * the hart is running or not. ++ */ ++ RISCV_ABSTRACT_CMD_NOT_SUPPORTED = 2, ++ ++ /* ++ * An exception occurred while executing the command ++ * (e.g. while executing the Program Buffer). ++ */ ++ RISCV_ABSTRACT_CMD_EXCEPTION = 3, ++ ++ /* ++ * The abstract command couldn't execute because the hart wasn't in the required ++ * state (running/halted), or unavailable. ++ */ ++ RISCV_ABSTRACT_CMD_HALT_RESUME = 4, ++ ++ /* ++ * The abstract command failed due to a bus error ++ * (e.g. alignment, access size, or timeout). ++ */ ++ RISCV_ABSTRACT_CMD_BUS_ERROR = 5, ++ ++ /* The command failed for another reason. */ ++ RISCV_ABSTRACT_CMD_OTHER_ERROR = 7 ++ ++} RGXRISCVFW_ABSTRACT_CMD_ERR; ++ ++/* System Bus error codes (descriptions from RISC-V debug spec v0.13) */ ++typedef enum ++{ ++ /* There was no bus error. */ ++ RISCV_SYSBUS_NO_ERROR = 0, ++ ++ /* There was a timeout. */ ++ RISCV_SYSBUS_TIMEOUT = 1, ++ ++ /* A bad address was accessed. */ ++ RISCV_SYSBUS_BAD_ADDRESS = 2, ++ ++ /* There was an alignment error. */ ++ RISCV_SYSBUS_BAD_ALIGNMENT = 3, ++ ++ /* An access of unsupported size was requested. */ ++ RISCV_SYSBUS_UNSUPPORTED_SIZE = 4, ++ ++ /* Other. */ ++ RISCV_SYSBUS_OTHER_ERROR = 7 ++ ++} RGXRISCVFW_SYSBUS_ERR; ++ ++#endif /* RGXRISCVFW_ASSEMBLY_CODE */ ++ ++#endif /* RGX_RISCV_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rgxfw_log_helper.h b/drivers/gpu/drm/img-rogue/include/rgxfw_log_helper.h +new file mode 100644 +index 000000000000..275b63aca46b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rgxfw_log_helper.h +@@ -0,0 +1,79 @@ ++/*************************************************************************/ /*! ++@File rgxfw_log_helper.h ++@Title Firmware TBI logging helper function ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform Generic ++@Description This file contains some helper code to make TBI logging possible ++ Specifically, it uses the SFIDLIST xmacro to trace ids back to ++ the original strings. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGXFW_LOG_HELPER_H ++#define RGXFW_LOG_HELPER_H ++ ++#include "rgx_fwif_sf.h" ++ ++static const IMG_CHAR *const groups[]= { ++#define X(A,B) #B, ++ RGXFW_LOG_SFGROUPLIST ++#undef X ++}; ++ ++/* idToStringID : Search SFs tuples {id,string} for a matching id. ++ * return index to array if found or RGXFW_SF_LAST if none found. ++ * bsearch could be used as ids are in increasing order. */ ++#if defined(RGX_FIRMWARE) ++static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs) ++#else ++static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs) ++#endif ++{ ++ IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST; ++ ++ for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++) ++ { ++ if ( ui32CheckData == psSFs[i].ui32Id ) ++ { ++ ui32Id = i; ++ break; ++ } ++ } ++ return ui32Id; ++} ++ ++#endif /* RGXFW_LOG_HELPER_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/ri_typedefs.h b/drivers/gpu/drm/img-rogue/include/ri_typedefs.h +new file mode 100644 +index 000000000000..77be10e2ab03 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/ri_typedefs.h +@@ -0,0 +1,52 @@ ++/*************************************************************************/ /*! ++@File ++@Title Resource Information (RI) Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Client side part of RI management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RI_TYPEDEFS_H ++#define RI_TYPEDEFS_H ++ ++#include "img_types.h" ++ ++typedef struct RI_SUBLIST_ENTRY RI_ENTRY; ++typedef RI_ENTRY* RI_HANDLE; ++ ++#endif /* #ifndef RI_TYPEDEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_alignchecks.h +new file mode 100644 +index 000000000000..4f82b23743be +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_alignchecks.h +@@ -0,0 +1,192 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX fw interface alignment checks ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Checks to avoid disalignment in RGX fw data structures ++ shared with the host ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_ALIGNCHECKS_H) ++#define RGX_FWIF_ALIGNCHECKS_H ++ ++/* for the offsetof macro */ ++#if defined(__KERNEL__) && defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++/*! ++ ****************************************************************************** ++ * Alignment UM/FW checks array ++ *****************************************************************************/ ++ ++#define RGXFW_ALIGN_CHECKS_UM_MAX 128U ++ ++#define RGXFW_ALIGN_CHECKS_INIT0 \ ++ sizeof(RGXFWIF_TRACEBUF), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ ++ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ ++ \ ++ sizeof(RGXFWIF_SYSDATA), \ ++ offsetof(RGXFWIF_SYSDATA, ePowState), \ ++ offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ ++ offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ ++ offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ ++ offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ ++ \ ++ sizeof(RGXFWIF_OSDATA), \ ++ offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ ++ offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ ++ \ ++ sizeof(RGXFWIF_HWRINFOBUF), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ ++ \ ++ /* RGXFWIF_CMDTA checks */ \ ++ sizeof(RGXFWIF_CMDTA), \ ++ offsetof(RGXFWIF_CMDTA, sGeomRegs), \ ++ \ ++ /* RGXFWIF_CMD3D checks */ \ ++ sizeof(RGXFWIF_CMD3D), \ ++ offsetof(RGXFWIF_CMD3D, s3DRegs), \ ++ \ ++ /* RGXFWIF_CMDTRANSFER checks */ \ ++ sizeof(RGXFWIF_CMDTRANSFER), \ ++ offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \ ++ \ ++ \ ++ /* RGXFWIF_CMD_COMPUTE checks */ \ ++ sizeof(RGXFWIF_CMD_COMPUTE), \ ++ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ ++ \ ++ /* RGXFWIF_FREELIST checks */ \ ++ sizeof(RGXFWIF_FREELIST), \ ++ offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr), \ ++ offsetof(RGXFWIF_FREELIST, ui32MaxPages), \ ++ offsetof(RGXFWIF_FREELIST, ui32CurrentPages), \ ++ \ ++ /* RGXFWIF_HWRTDATA checks */ \ ++ sizeof(RGXFWIF_HWRTDATA), \ ++ offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), \ ++ offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \ ++ offsetof(RGXFWIF_HWRTDATA, apsFreeLists), \ ++ offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \ ++ offsetof(RGXFWIF_HWRTDATA, eState), \ ++ \ ++ /* RGXFWIF_HWRTDATA_COMMON checks */ \ ++ sizeof(RGXFWIF_HWRTDATA_COMMON), \ ++ offsetof(RGXFWIF_HWRTDATA_COMMON, bTACachesNeedZeroing),\ ++ \ ++ /* RGXFWIF_HWPERF_CTL_BLK checks */ \ ++ sizeof(RGXFWIF_HWPERF_CTL_BLK), \ ++ offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \ ++ \ ++ /* RGXFWIF_HWPERF_CTL checks */ \ ++ sizeof(RGXFWIF_HWPERF_CTL), \ ++ offsetof(RGXFWIF_HWPERF_CTL, SelCntr) ++ ++#if defined(RGX_FEATURE_TLA) ++#define RGXFW_ALIGN_CHECKS_INIT1 \ ++ RGXFW_ALIGN_CHECKS_INIT0, \ ++ /* RGXFWIF_CMD2D checks */ \ ++ sizeof(RGXFWIF_CMD2D), \ ++ offsetof(RGXFWIF_CMD2D, s2DRegs) ++#else ++#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0 ++#endif /* RGX_FEATURE_TLA */ ++ ++ ++#if defined(RGX_FEATURE_FASTRENDER_DM) ++#define RGXFW_ALIGN_CHECKS_INIT \ ++ RGXFW_ALIGN_CHECKS_INIT1, \ ++ /* RGXFWIF_CMDTDM checks */ \ ++ sizeof(RGXFWIF_CMDTDM), \ ++ offsetof(RGXFWIF_CMDTDM, sTDMRegs) ++#else ++#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT1 ++#endif /* ! RGX_FEATURE_FASTRENDER_DM */ ++ ++ ++ ++/*! ++ ****************************************************************************** ++ * Alignment KM checks array ++ *****************************************************************************/ ++ ++#define RGXFW_ALIGN_CHECKS_INIT_KM \ ++ sizeof(RGXFWIF_SYSINIT), \ ++ offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ ++ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ ++ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ ++ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ ++ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ ++ offsetof(RGXFWIF_SYSINIT, sFwSysData), \ ++ sizeof(RGXFWIF_OSINIT), \ ++ offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ ++ offsetof(RGXFWIF_OSINIT, psKernelCCB), \ ++ offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ ++ offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ ++ offsetof(RGXFWIF_OSINIT, sFwOsData), \ ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ ++ \ ++ /* RGXFWIF_FWRENDERCONTEXT checks */ \ ++ sizeof(RGXFWIF_FWRENDERCONTEXT), \ ++ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ ++ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ ++ \ ++ sizeof(RGXFWIF_FWCOMMONCONTEXT), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ ++ \ ++ sizeof(RGXFWIF_MMUCACHEDATA), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) ++ ++#endif /* RGX_FWIF_ALIGNCHECKS_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_alignchecks.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_hwperf.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_hwperf.h +new file mode 100644 +index 000000000000..7001092c7221 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_hwperf.h +@@ -0,0 +1,252 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_hwperf.h ++@Title RGX HWPerf support ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shared header between RGX firmware and Init process ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_FWIF_HWPERF_H ++#define RGX_FWIF_HWPERF_H ++ ++#include "rgx_fwif_shared.h" ++#include "rgx_hwperf.h" ++#include "rgxdefs_km.h" ++ ++ ++/*****************************************************************************/ ++ ++/* Structure to hold a block's parameters for passing between the BG context ++ * and the IRQ context when applying a configuration request. */ ++typedef struct ++{ ++ IMG_BOOL bValid; ++ IMG_BOOL bEnabled; ++ IMG_UINT32 eBlockID; ++ IMG_UINT32 uiCounterMask; ++ IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++} RGXFWIF_HWPERF_CTL_BLK; ++ ++/* Structure used to hold the configuration of the non-mux counters blocks */ ++typedef struct ++{ ++ IMG_UINT32 ui32NumSelectedCounters; ++ IMG_UINT32 aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS]; ++} RGXFW_HWPERF_SELECT; ++ ++/* Structure used to hold a Direct-Addressable block's parameters for passing ++ * between the BG context and the IRQ context when applying a configuration ++ * request. RGX_FEATURE_HWPERF_OCEANIC use only. ++ */ ++typedef struct ++{ ++ IMG_UINT32 uiEnabled; ++ IMG_UINT32 uiNumCounters; ++ IMG_UINT32 eBlockID; ++ RGXFWIF_DEV_VIRTADDR psModel; ++ IMG_UINT32 aui32Counters[RGX_CNTBLK_COUNTERS_MAX]; ++} RGXFWIF_HWPERF_DA_BLK; ++ ++ ++/* Structure to hold the whole configuration request details for all blocks ++ * The block masks and counts are used to optimise reading of this data. */ ++typedef struct ++{ ++ IMG_UINT32 ui32HWPerfCtlFlags; ++ ++ IMG_UINT32 ui32SelectedCountersBlockMask; ++ RGXFW_HWPERF_SELECT RGXFW_ALIGN SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS]; ++ ++ IMG_UINT32 ui32EnabledMUXBlksCount; ++ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_MUX_BLKS]; ++} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; ++ ++/* NOTE: The switch statement in this function must be kept in alignment with ++ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may ++ * result if not. ++ * The function provides a hash lookup to get a handle on the global store for ++ * a block's configuration store from it's block ID. ++ */ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(rgxfw_hwperf_get_block_ctl) ++#endif ++static INLINE RGXFWIF_HWPERF_CTL_BLK *rgxfw_hwperf_get_block_ctl( ++ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) ++{ ++ IMG_UINT32 ui32Idx; ++ ++ /* Hash the block ID into a control configuration array index */ ++ switch (eBlockID) ++ { ++ case RGX_CNTBLK_ID_TA: ++ case RGX_CNTBLK_ID_RASTER: ++ case RGX_CNTBLK_ID_HUB: ++ case RGX_CNTBLK_ID_TORNADO: ++ case RGX_CNTBLK_ID_JONES: ++ { ++ ui32Idx = eBlockID; ++ break; ++ } ++ case RGX_CNTBLK_ID_TPU_MCU0: ++ case RGX_CNTBLK_ID_TPU_MCU1: ++ case RGX_CNTBLK_ID_TPU_MCU2: ++ case RGX_CNTBLK_ID_TPU_MCU3: ++ case RGX_CNTBLK_ID_TPU_MCU4: ++ case RGX_CNTBLK_ID_TPU_MCU5: ++ case RGX_CNTBLK_ID_TPU_MCU6: ++ case RGX_CNTBLK_ID_TPU_MCU7: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_USC0: ++ case RGX_CNTBLK_ID_USC1: ++ case RGX_CNTBLK_ID_USC2: ++ case RGX_CNTBLK_ID_USC3: ++ case RGX_CNTBLK_ID_USC4: ++ case RGX_CNTBLK_ID_USC5: ++ case RGX_CNTBLK_ID_USC6: ++ case RGX_CNTBLK_ID_USC7: ++ case RGX_CNTBLK_ID_USC8: ++ case RGX_CNTBLK_ID_USC9: ++ case RGX_CNTBLK_ID_USC10: ++ case RGX_CNTBLK_ID_USC11: ++ case RGX_CNTBLK_ID_USC12: ++ case RGX_CNTBLK_ID_USC13: ++ case RGX_CNTBLK_ID_USC14: ++ case RGX_CNTBLK_ID_USC15: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_TEXAS0: ++ case RGX_CNTBLK_ID_TEXAS1: ++ case RGX_CNTBLK_ID_TEXAS2: ++ case RGX_CNTBLK_ID_TEXAS3: ++ case RGX_CNTBLK_ID_TEXAS4: ++ case RGX_CNTBLK_ID_TEXAS5: ++ case RGX_CNTBLK_ID_TEXAS6: ++ case RGX_CNTBLK_ID_TEXAS7: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_RASTER0: ++ case RGX_CNTBLK_ID_RASTER1: ++ case RGX_CNTBLK_ID_RASTER2: ++ case RGX_CNTBLK_ID_RASTER3: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_BLACKPEARL0: ++ case RGX_CNTBLK_ID_BLACKPEARL1: ++ case RGX_CNTBLK_ID_BLACKPEARL2: ++ case RGX_CNTBLK_ID_BLACKPEARL3: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_PBE0: ++ case RGX_CNTBLK_ID_PBE1: ++ case RGX_CNTBLK_ID_PBE2: ++ case RGX_CNTBLK_ID_PBE3: ++ case RGX_CNTBLK_ID_PBE4: ++ case RGX_CNTBLK_ID_PBE5: ++ case RGX_CNTBLK_ID_PBE6: ++ case RGX_CNTBLK_ID_PBE7: ++ case RGX_CNTBLK_ID_PBE8: ++ case RGX_CNTBLK_ID_PBE9: ++ case RGX_CNTBLK_ID_PBE10: ++ case RGX_CNTBLK_ID_PBE11: ++ case RGX_CNTBLK_ID_PBE12: ++ case RGX_CNTBLK_ID_PBE13: ++ case RGX_CNTBLK_ID_PBE14: ++ case RGX_CNTBLK_ID_PBE15: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + ++ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ default: ++ { ++ ui32Idx = RGX_HWPERF_MAX_DEFINED_BLKS; ++ break; ++ } ++ } ++ if (ui32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS) ++ { ++ return NULL; ++ } ++ return &psHWPerfInitData->sBlkCfg[ui32Idx]; ++} ++ ++/* Stub routine for rgxfw_hwperf_get_da_block_ctl() for non ++ * RGX_FEATURE_HWPERF_OCEANIC systems. Just return a NULL. ++ */ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(rgxfw_hwperf_get_da_block_ctl) ++#endif ++static INLINE RGXFWIF_HWPERF_DA_BLK* rgxfw_hwperf_get_da_block_ctl( ++ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) ++{ ++ PVR_UNREFERENCED_PARAMETER(eBlockID); ++ PVR_UNREFERENCED_PARAMETER(psHWPerfInitData); ++ ++ return NULL; ++} ++#endif +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_km.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_km.h +new file mode 100644 +index 000000000000..724f6eecd6fd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_km.h +@@ -0,0 +1,2341 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware interface structures used by pvrsrvkm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware interface structures used by pvrsrvkm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_KM_H) ++#define RGX_FWIF_KM_H ++ ++#include "img_types.h" ++#include "rgx_fwif_shared.h" ++#include "rgxdefs_km.h" ++#include "dllist.h" ++#include "rgx_hwperf.h" ++ ++ ++/*************************************************************************/ /*! ++ Logging type ++*/ /**************************************************************************/ ++#define RGXFWIF_LOG_TYPE_NONE 0x00000000U ++#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U ++#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U ++#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U ++#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U ++#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U ++#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U ++#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U ++#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U ++#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U ++#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U ++#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U ++#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U ++#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U ++#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U ++#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U ++#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U ++#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU ++#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU ++ ++/* String used in pvrdebug -h output */ ++#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" ++ ++/* Table entry to map log group strings to log type value */ ++typedef struct { ++ const IMG_CHAR* pszLogGroupName; ++ IMG_UINT32 ui32LogGroupType; ++} RGXFWIF_LOG_GROUP_MAP_ENTRY; ++ ++/* ++ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup ++ table where needed. Keep log group names short, no more than 20 chars. ++*/ ++#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ ++ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ ++ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ ++ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ ++ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ ++ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ ++ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ ++ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ ++ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ ++ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ ++ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ ++ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ ++ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ ++ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ ++ { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ ++ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } ++ ++ ++/* Used in print statements to display log group state, one %s per group defined */ ++#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" ++ ++/* Used in a print statement to display log group state, one per group */ ++#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U) ?("main ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U) ?("mts ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U) ?("cleanup ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U) ?("csw ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U) ?("bif ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U) ?("pm ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U) ?("rtd ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U) ?("spm ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U) ?("pow ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U) ?("hwr ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U) ?("hwp ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U) ?("rpm ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U) ?("dma ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U) ?("misc ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U) ?("debug ") :("")) ++ ++ ++/************************************************************************ ++* RGX FW signature checks ++************************************************************************/ ++#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) ++ ++#define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) ++ ++/*! ++ ****************************************************************************** ++ * Trace Buffer ++ *****************************************************************************/ ++ ++/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ ++#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U ++#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U ++#if defined(RGXFW_META_SUPPORT_2ND_THREAD) ++#define RGXFW_THREAD_NUM 2U ++#else ++#define RGXFW_THREAD_NUM 1U ++#endif ++ ++#define RGXFW_POLL_TYPE_SET 0x80000000U ++ ++typedef struct ++{ ++ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; ++ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; ++ IMG_UINT32 ui32LineNum; ++} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; ++ ++/*! ++ * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface ++ * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing ++ * @{ ++ */ ++ ++/*! ++ * @Brief Firmware trace buffer details ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer)*/ ++ ++#if defined(RGX_FIRMWARE) ++ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ ++#else ++ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/ ++#endif ++ IMG_PUINT32 pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ ++ ++ RGXFWIF_FILE_INFO_BUF sAssertBuf; ++} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; ++ ++/*! @} End of Defgroup SRVAndFWTracing */ ++ ++#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; ++ IMG_UINT32 RGXFW_ALIGN ui32Data; ++ IMG_UINT32 ui32Reserved; ++ RGXFWIF_FILE_INFO_BUF sFaultBuf; ++} UNCACHED_ALIGN RGX_FWFAULTINFO; ++ ++ ++#define RGXFWIF_POW_STATES \ ++ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ ++ X(RGXFWIF_POW_ON) /* running HW commands */ \ ++ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ ++ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ ++ ++typedef enum ++{ ++#define X(NAME) NAME, ++ RGXFWIF_POW_STATES ++#undef X ++} RGXFWIF_POW_STATE; ++ ++/* Firmware HWR states */ ++#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ ++#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ ++#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ ++#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ ++#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ ++#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ ++#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ ++ ++#define RGXFWIF_PHR_STATE_SHIFT (8U) ++#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ ++#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ ++#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) ++ ++#define RGXFWIF_PHR_MODE_OFF (0UL) ++#define RGXFWIF_PHR_MODE_RD_RESET (1UL) ++#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) ++ ++typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; ++ ++/* Firmware per-DM HWR states */ ++#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ ++#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ ++#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ ++#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ ++#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ ++#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ ++#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ ++#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ ++#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ ++#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ ++#define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ ++ ++/* Firmware's connection state */ ++typedef IMG_UINT32 RGXFWIF_CONNECTION_FW_STATE; ++#define RGXFW_CONNECTION_FW_OFFLINE 0U /*!< Firmware is offline */ ++#define RGXFW_CONNECTION_FW_READY 1U /*!< Firmware is initialised */ ++#define RGXFW_CONNECTION_FW_ACTIVE 2U /*!< Firmware connection is fully established */ ++#define RGXFW_CONNECTION_FW_OFFLOADING 3U /*!< Firmware is clearing up connection data */ ++#define RGXFW_CONNECTION_FW_STATE_COUNT 4U ++ ++/* OS' connection state */ ++typedef enum ++{ ++ RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ ++ RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ ++ RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ ++ RGXFW_CONNECTION_OS_STATE_COUNT ++} RGXFWIF_CONNECTION_OS_STATE; ++ ++typedef struct ++{ ++ IMG_UINT bfOsState : 3; ++ IMG_UINT bfFLOk : 1; ++ IMG_UINT bfFLGrowPending : 1; ++ IMG_UINT bfIsolatedOS : 1; ++ IMG_UINT bfReserved : 26; ++} RGXFWIF_OS_RUNTIME_FLAGS; ++ ++typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; ++ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++#define PVR_SLR_LOG_ENTRIES 10U ++#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64Timestamp; ++ IMG_UINT32 ui32FWCtxAddr; ++ IMG_UINT32 ui32NumUFOs; ++ IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; ++} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; ++#endif ++ ++/*! ++ * @InGroup SRVAndFWTracing ++ * @Brief Firmware trace control data ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ ++ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ ++ IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated ++ (in RGXTraceBufferInitOnDemandResources) */ ++ IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_TRACEBUF; ++ ++/*! @Brief Firmware system data shared with the Host driver */ ++typedef struct ++{ ++ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ ++ IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ ++ volatile RGXFWIF_POW_STATE ePowState; ++ volatile IMG_UINT32 ui32HWPerfRIdx; ++ volatile IMG_UINT32 ui32HWPerfWIdx; ++ volatile IMG_UINT32 ui32HWPerfWrapCount; ++ IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ ++ IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ ++ ++ /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with ++ * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ ++ IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ ++ IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ ++ IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ ++ RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ ++ RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ ++ IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ ++ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ ++ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ ++ IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ ++ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; ++#if defined(SUPPORT_POWMON_COMPONENT) ++#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) ++ RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; ++ IMG_UINT32 ui32PowerMonBufSizeInDWords; ++#endif ++#endif ++ ++#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) ++#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) ++#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) ++ IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; ++#endif ++ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ ++ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ ++ IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ ++ IMG_UINT32 ui32McConfig; /*!< Identify whether MC config is P-P or P-S */ ++} UNCACHED_ALIGN RGXFWIF_SYSDATA; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware per-os data and configuration ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ ++ IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ ++ IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ IMG_UINT32 ui32ForcedUpdatesRequested; ++ IMG_UINT8 ui8SLRLogWp; ++ RGXFWIF_SLR_ENTRY sSLRLogFirst; ++ RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; ++ IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; ++#endif ++ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ ++ IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ ++ RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ ++ IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_OSDATA; ++ ++/* Firmware trace time-stamp field breakup */ ++ ++/* RGX_CR_TIMER register read (48 bits) value*/ ++#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) ++#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) ++ ++/* Extra debug-info (16 bits) */ ++#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) ++#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK ++ ++ ++/* Debug-info sub-fields */ ++/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ ++#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) ++#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) ++ ++/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ ++#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) ++#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) ++ ++/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ ++#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) ++#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) ++ ++/* Bit 3-15: Unused bits */ ++ ++#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 ++#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " ++#define RGXFWT_DEBUG_INFO_STR_APPEND ")" ++ ++/* Table of debug info sub-field's masks and corresponding message strings ++ * to be appended to firmware trace ++ * ++ * Mask : 16 bit mask to be applied to debug-info field ++ * String : debug info message string ++ */ ++ ++#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ ++/*Mask, String*/ \ ++X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ ++X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ ++X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") ++ ++/*! ++ ****************************************************************************** ++ * HWR Data ++ *****************************************************************************/ ++/*! ++ * @Defgroup HWRInfo FW HWR shared data interface ++ * @Brief Types grouping data structures and defines used in realising the HWR record. ++ * @{ ++ */ ++/*! @Brief HWR Lockup types */ ++typedef enum ++{ ++ RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ ++ RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ ++ RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ ++ RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ ++ RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ ++ RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ ++ RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ ++ RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ ++ RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ ++ RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ ++ RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ ++} RGX_HWRTYPE; ++ ++#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) ++ ++#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ ++ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} RGX_BIFINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ ++} RGX_ECCINFO; ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} RGX_MMUINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ ++ IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ ++ IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ ++ IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} UNCACHED_ALIGN RGX_POLLINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32BadVAddr; /*!< VA address */ ++ IMG_UINT32 ui32EntryLo; ++} RGX_TLBINFO; ++ ++/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */ ++typedef struct ++{ ++ union ++ { ++ RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ ++ RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ ++ RGX_POLLINFO sPollInfo; /*!< Poll failure details */ ++ RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ ++ RGX_ECCINFO sECCInfo; /*!< ECC failure details */ ++ } uHWRData; ++ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ ++ IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ ++ IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ ++ IMG_UINT32 ui32HWRNumber; /*!< HWR number */ ++ IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ ++ IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ ++ RGX_HWRTYPE eHWRType; /*!< Type of lockup */ ++ RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ ++ IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved[2]; ++} UNCACHED_ALIGN RGX_HWRINFO; ++ ++#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ ++#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ ++#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ ++#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ ++ ++/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */ ++typedef struct ++{ ++ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ ++ IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ ++ IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ ++ IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ ++ IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ ++ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ ++ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ ++ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ ++ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ ++} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; ++ ++/*! @} End of HWRInfo */ ++ ++#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) ++#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) ++#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) ++#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) ++ ++#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) ++#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) ++ ++#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) ++#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) ++/*! ++ ****************************************************************************** ++ * RGX firmware Init Config Data ++ *****************************************************************************/ ++ ++/* Flag definitions affecting the firmware globally */ ++#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) ++#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) ++#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) ++#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) ++#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) ++/* 5 unused */ ++#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) ++#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) ++#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) ++/* 9 unused */ ++/* 10 unused */ ++/* 11 unused */ ++#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) ++#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) ++#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) ++/* 15 unused */ ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) ++#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) ++#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) ++#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) ++#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) ++#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) ++#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) ++#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ ++ RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) ++#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) ++ ++#define RGXFWIF_INICFG_ALL (0xFFFFFFFFU) ++ ++/* Extended Flag definitions affecting the firmware globally */ ++#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0) ++/* [7] YUV10 override ++ * [6:4] Quality ++ * [3] Quality enable ++ * [2:1] Compression scheme ++ * [0] Lossy group */ ++#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK (IMG_UINT32_C(0xFF)) /* RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL */ ++#define RGXFWIF_INICFG_EXT_ALL (RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) ++ ++#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ ++ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) ++ ++/* Flag definitions affecting only workloads submitted by a particular OS */ ++ ++/*! ++ * @AddToGroup ContextSwitching ++ * @{ ++ * @Name Per-OS DM context switch configuration flags ++ * @{ ++ */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM DM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */ ++ ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 4) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 5) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 6) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 7) ++ ++#define RGXFWIF_INICFG_OS_ALL (0xFFU) ++ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN) ++ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) ++ ++/*! ++ * @} End of Per-OS Context switch configuration flags ++ * @} End of AddToGroup ContextSwitching ++ */ ++ ++#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) ++#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) ++#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) ++ ++typedef IMG_UINT32 RGX_ACTIVEPM_CONF; ++#define RGX_ACTIVEPM_FORCE_OFF 0U ++#define RGX_ACTIVEPM_FORCE_ON 1U ++#define RGX_ACTIVEPM_DEFAULT 2U ++ ++typedef enum ++{ ++ RGX_RD_POWER_ISLAND_FORCE_OFF = 0, ++ RGX_RD_POWER_ISLAND_FORCE_ON = 1, ++ RGX_RD_POWER_ISLAND_DEFAULT = 2 ++} RGX_RD_POWER_ISLAND_CONF; ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++/* Unused registers re-purposed for storing counters of the Firmware's ++ * interrupts for each OS ++ */ ++#define IRQ_COUNTER_STORAGE_REGS \ ++ 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ ++ 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ \ ++ 0x2030U, /* RGX_CR_PM_START_OF_MMU_TACONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ ++#endif ++ ++typedef struct ++{ ++ IMG_UINT16 ui16RegNum; /*!< Register number */ ++ IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ ++ IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ ++ IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ ++} RGXFW_REGISTER_LIST; ++ ++#if defined(RGX_FIRMWARE) ++typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; ++#else ++typedef struct {RGXFWIF_DEV_VIRTADDR p; ++ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; ++#endif ++ ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; ++#if defined(SUPPORT_TBI_INTERFACE) ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; ++#endif ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_MUX_CNTBLK; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; ++ ++/*! ++ * This number is used to represent an invalid page catalogue physical address ++ */ ++#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU ++ ++/*! ++ * This number is used to represent unallocated page catalog base register ++ */ ++#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU ++ ++/*! ++ Firmware memory context. ++*/ ++typedef struct ++{ ++ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ ++ IMG_UINT32 uiPageCatBaseRegSet; /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCSET == unallocated) */ ++ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ ++ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ ++ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ ++ IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ IMG_UINT32 ui32OSid; ++ IMG_BOOL bOSidAxiProt; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; ++ ++/*! ++ * FW context state flags ++ */ ++#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) ++#define RGXFWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU) ++#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U) ++#define RGXFWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U) ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware GEOM/TA context suspend state ++ */ ++typedef struct ++{ ++ /* FW-accessible TA state which must be written out to memory on context store */ ++ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /*!< VDM control stream stack pointer, to store in mid-TA */ ++ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /*!< Initial value of VDM control stream stack pointer (in case is 'lost' due to a lock-up) */ ++ IMG_UINT32 uTAReg_VBS_SO_PRIM[4]; ++ IMG_UINT16 ui16TACurrentIdx; ++} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM; ++ ++typedef struct ++{ ++ /* FW-accessible TA state which must be written out to memory on context store */ ++ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES]; ++} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware FRAG/3D context suspend state ++ */ ++typedef struct ++{ ++ /* FW-accessible ISP state which must be written out to memory on context store */ ++ IMG_UINT32 u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< PM deallocation status */ ++ IMG_UINT32 u3DReg_PM_PDS_MTILEFREE_STATUS; /*!< Macro-tiles (MTs) finished status */ ++ IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ ++ /* au3DReg_ISP_STORE should be the last element of the structure ++ * as this is an array whose size is determined at runtime ++ * after detecting the RGX core */ ++ IMG_UINT32 au3DReg_ISP_STORE[]; /*!< ISP state (per-pipe) */ ++} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; ++ ++static_assert(sizeof(RGXFWIF_3DCTX_STATE) <= 16U, ++ "Size of structure RGXFWIF_3DCTX_STATE exceeds maximum expected size."); ++ ++#define RGXFWIF_CTX_USING_BUFFER_A (0) ++#define RGXFWIF_CTX_USING_BUFFER_B (1U) ++ ++typedef struct ++{ ++ IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ ++} RGXFWIF_COMPUTECTX_STATE; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware Common Context (or FWCC) ++ */ ++typedef struct RGXFWIF_FWCOMMONCONTEXT_ ++{ ++ /* CCB details for this firmware context */ ++ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ ++ PRGXFWIF_CCCB psCCB; /*!< CCB base */ ++ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; ++ ++ /* Context suspend state */ ++ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ ++ ++ /* Flags e.g. for context switching */ ++ IMG_UINT32 ui32FWComCtxFlags; ++ IMG_INT32 i32Priority; /*!< Priority level */ ++ IMG_UINT32 ui32PrioritySeqNum; ++ ++ /* Framework state */ ++ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ ++ ++ /* Statistic updates waiting to be passed back to the host... */ ++ IMG_BOOL bStatsPending; /*!< True when some stats are pending */ ++ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ ++ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ ++ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ ++ RGXFWIF_DM eDM; /*!< Data Master type */ ++ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ ++ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; ++ IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ ++ bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ ++ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ ++ RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ ++ ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ ++ ++ /* References to the host side originators */ ++ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ ++ IMG_UINT32 ui32PID; /*!< associated process ID */ ++ ++ IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ ++ ++} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; ++ ++static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U, ++ "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); ++ ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_TQ[RGX_TRP_MAX_NUM_CORES][1]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware render context. ++ */ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ ++ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ ++ ++ RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++ IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ ++ ++#if defined(SUPPORT_TRP) ++ RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; ++ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; ++#endif ++} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; ++ ++/*! ++ Firmware compute context. ++*/ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ ++ ++ RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++ IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ ++ ++ IMG_UINT32 ui32WGPState; ++ IMG_UINT32 ui32WGPChecksum; ++ IMG_UINT32 ui32CoreMaskA; ++ IMG_UINT32 ui32CoreMaskB; ++} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; ++ ++/*! ++ Firmware TDM context. ++*/ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware transfer context. ++ */ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTQContext; /*!< Firmware context for TQ3D */ ++ ++#if defined(SUPPORT_TRP) ++ IMG_UINT32 ui32TRPState; ++ RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ; ++#endif ++} UNCACHED_ALIGN RGXFWIF_FWTRANSFERCONTEXT; ++ ++/*! ++ ****************************************************************************** ++ * Defines for CMD_TYPE corruption detection and forward compatibility check ++ *****************************************************************************/ ++ ++/* CMD_TYPE 32bit contains: ++ * 31:16 Reserved for magic value to detect corruption (16 bits) ++ * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) ++ * 14:0 Bits available for CMD_TYPEs (15 bits) */ ++ ++ ++/* Magic value to detect corruption */ ++#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) ++#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) ++#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) ++#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) ++ ++/*! ++ * @InGroup KCCBTypes ClientCCBTypes ++ * @Brief Generic CCB control structure ++ */ ++typedef struct ++{ ++ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ ++ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ ++ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ ++ IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ ++} UNCACHED_ALIGN RGXFWIF_CCB_CTL; ++ ++/*! ++ * @Defgroup KCCBTypes Kernel CCB data interface ++ * @Brief Types grouping data structures and defines used in realising the KCCB functionality ++ * @{ ++ */ ++ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ ++ ++#if !defined(__KERNEL) ++ ++#if !defined(RGX_FEATURE_SLC_VIVT) ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE < 2) ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ ++#else ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB) ++#endif ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */ ++ ++#else /* RGX_FEATURE_SLC_VIVT */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ ++#endif ++ ++#else ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ ++#endif ++ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32CacheFlags; ++ RGXFWIF_DEV_VIRTADDR sMMUCacheSync; ++ IMG_UINT32 ui32MMUCacheSyncUpdateValue; ++} RGXFWIF_MMUCACHEDATA; ++ ++#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) ++#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) ++#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) ++#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) ++ ++typedef struct ++{ ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ ++ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ ++ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ ++ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ ++ IMG_UINT32 ui32BPDataFlags; ++ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ ++ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ ++ RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ ++} RGXFWIF_BPDATA; ++ ++#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ ++ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ ++ IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ ++ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ ++ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ ++ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ ++} RGXFWIF_KCCB_CMD_KICK_DATA; ++ ++/*! ++ * @Brief Command data for @Ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command ++ */ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; /*!< GEOM DM kick command data */ ++ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; /*!< FRAG DM kick command data */ ++} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ ++ IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ ++} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; ++ ++/*! ++ * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command ++ */ ++typedef enum ++{ ++ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ ++ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ ++ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ ++ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ ++} RGXFWIF_CLEANUP_TYPE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command ++ */ ++typedef struct ++{ ++ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ ++ union { ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ ++ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ ++ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ ++ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ ++ } uCleanupData; ++} RGXFWIF_CLEANUP_REQUEST; ++ ++/*! ++ * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command ++ */ ++typedef enum ++{ ++ RGXFWIF_POW_OFF_REQ = 1, /*!< GPU power-off request */ ++ RGXFWIF_POW_FORCED_IDLE_REQ, /*!< Force-idle related request */ ++ RGXFWIF_POW_NUM_UNITS_CHANGE, /*!< Request to change default powered scalable units */ ++ RGXFWIF_POW_APM_LATENCY_CHANGE /*!< Request to change the APM latency period */ ++} RGXFWIF_POWER_TYPE; ++ ++/*! ++ * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request ++ */ ++typedef enum ++{ ++ RGXFWIF_POWER_FORCE_IDLE = 1, /*!< Request to force-idle GPU */ ++ RGXFWIF_POWER_CANCEL_FORCED_IDLE, /*!< Request to cancel a previously successful force-idle transition */ ++ RGXFWIF_POWER_HOST_TIMEOUT, /*!< Notification that host timed-out waiting for force-idle state */ ++} RGXFWIF_POWER_FORCE_IDLE_TYPE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command ++ */ ++typedef struct ++{ ++ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ ++ union ++ { ++ IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */ ++ IMG_BOOL bForced; /*!< If the operation is mandatory */ ++ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ ++ } uPowerReqData; ++} RGXFWIF_POWER_REQUEST; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ ++ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ ++ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ ++ IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */ ++ IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */ ++} RGXFWIF_SLCFLUSHINVALDATA; ++ ++typedef enum ++{ ++ RGXFWIF_HWPERF_CTRL_TOGGLE = 0, ++ RGXFWIF_HWPERF_CTRL_SET = 1, ++ RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 ++} RGXFWIF_HWPERF_UPDATE_CONFIG; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command ++ */ ++typedef struct ++{ ++ RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ ++ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ ++} RGXFWIF_HWPERF_CTRL; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_MUX_CNTBLK in the array */ ++ PRGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_MUX_CNTBLK array */ ++} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ ++ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ ++} RGXFWIF_HWPERF_CONFIG_DA_BLKS; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ ++} RGXFWIF_CORECLKSPEEDCHANGE_DATA; ++ ++#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16U ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command ++ */ ++typedef struct ++{ ++ bool bEnable; ++ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ ++ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ ++} RGXFWIF_HWPERF_CTRL_BLKS; ++ ++ ++typedef struct ++{ ++ IMG_UINT16 ui16CustomBlock; ++ IMG_UINT16 ui16NumCounters; ++ PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs; ++} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands ++ */ ++typedef struct ++{ ++ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ ++ IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ ++} RGXFWIF_ZSBUFFER_BACKING_DATA; ++ ++#if defined(SUPPORT_VALIDATION) ++typedef struct ++{ ++ IMG_UINT32 ui32RegWidth; ++ IMG_BOOL bWriteOp; ++ IMG_UINT32 ui32RegAddr; ++ IMG_UINT64 RGXFW_ALIGN ui64RegVal; ++} RGXFWIF_RGXREG_DATA; ++ ++typedef struct ++{ ++ IMG_UINT64 ui64BaseAddress; ++ PRGXFWIF_FWCOMMONCONTEXT psContext; ++ IMG_UINT32 ui32Size; ++} RGXFWIF_GPUMAP_DATA; ++#endif ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command ++ */ ++typedef struct ++{ ++ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ ++ IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ ++ IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ ++ IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ ++} RGXFWIF_FREELIST_GS_DATA; ++ ++#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) ++#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistsCount; ++ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; ++} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ ++} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; ++ ++/*! ++ ****************************************************************************** ++ * Proactive DVFS Structures ++ *****************************************************************************/ ++#define NUM_OPP_VALUES 16 ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Volt; /* V */ ++ IMG_UINT32 ui32Freq; /* Hz */ ++} UNCACHED_ALIGN PDVFS_OPP; ++ ++typedef struct ++{ ++ PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; ++#if defined(DEBUG) ++ IMG_UINT32 ui32MinOPPPoint; ++#endif ++ IMG_UINT32 ui32MaxOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32MaxOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32MinOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; ++ ++/*! ++ ****************************************************************************** ++ * Register configuration structures ++ *****************************************************************************/ ++ ++#define RGXFWIF_REG_CFG_MAX_SIZE 512 ++ ++typedef enum ++{ ++ RGXFWIF_REGCFG_CMD_ADD = 101, ++ RGXFWIF_REGCFG_CMD_CLEAR = 102, ++ RGXFWIF_REGCFG_CMD_ENABLE = 103, ++ RGXFWIF_REGCFG_CMD_DISABLE = 104 ++} RGXFWIF_REGDATA_CMD_TYPE; ++ ++typedef enum ++{ ++ RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ ++ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ ++ RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ ++ RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ ++ RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ ++ RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */ ++ RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ ++ RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ ++} RGXFWIF_REG_CFG_TYPE; ++ ++typedef struct ++{ ++ IMG_UINT64 ui64Addr; ++ IMG_UINT64 ui64Mask; ++ IMG_UINT64 ui64Value; ++} RGXFWIF_REG_CFG_REC; ++ ++typedef struct ++{ ++ RGXFWIF_REGDATA_CMD_TYPE eCmdType; ++ RGXFWIF_REG_CFG_TYPE eRegConfigType; ++ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; ++ ++} RGXFWIF_REGCONFIG_DATA; ++ ++typedef struct ++{ ++ /** ++ * PDump WRW command write granularity is 32 bits. ++ * Add padding to ensure array size is 32 bit granular. ++ */ ++ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; ++ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; ++} UNCACHED_ALIGN RGXFWIF_REG_CFG; ++ ++typedef enum ++{ ++ RGXFWIF_OS_ONLINE = 1, ++ RGXFWIF_OS_OFFLINE ++} RGXFWIF_OS_STATE_CHANGE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32OSid; ++ RGXFWIF_OS_STATE_CHANGE eNewOSState; ++} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; ++ ++typedef enum ++{ ++ RGXFWIF_PWR_COUNTER_DUMP_START = 1, ++ RGXFWIF_PWR_COUNTER_DUMP_STOP, ++ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, ++} RGXFWIF_COUNTER_DUMP_REQUEST; ++ ++typedef struct ++{ ++ RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest; ++} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA; ++ ++/*! ++ * @Brief List of command types supported by the Kernel CCB ++ */ ++typedef enum ++{ ++ /* Common commands */ ++ RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ ++ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ ++ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ ++ RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ ++ RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ ++ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ ++ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ ++ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ ++ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ ++ /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */ ++ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ ++ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ ++ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ ++ ++ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ ++ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ ++ ++ /* Commands only permitted to the native or host OS */ ++ RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ ++ /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS */ ++ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ ++ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ ++ /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT*/ ++ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ ++ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ ++ RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */ ++ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ ++ /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */ ++ /*RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */ ++ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ ++ RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ ++#endif ++ RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ ++ RGXFWIF_KCCB_CMD_COUNTER_DUMP = 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ ++ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ ++ RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_KCCB_CMD_GPUMAP = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */ ++#endif ++ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */ ++} RGXFWIF_KCCB_CMD_TYPE; ++ ++#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) ++ ++/*! @Brief Kernel CCB command packet */ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ ++ IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ ++ ++ /* NOTE: Make sure that uCmdData is the last member of this struct ++ * This is to calculate actual command size for device mem copy. ++ * (Refer RGXGetCmdMemCopySize()) ++ * */ ++ union ++ { ++ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ ++ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ ++ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ ++ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ ++ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ ++ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ ++ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ ++ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ ++ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ ++ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ ++ RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */ ++ RGXFWIF_HWPERF_CONFIG_DA_BLKS sHWPerfCfgDABlks; /*!< Data for HWPerf configure Directly Addressable blocks */ ++ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ ++ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ ++ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ ++ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ ++ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ ++ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ ++ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; /*!< Data for setting the max frequency/OPP */ ++ RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ ++ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ ++ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ ++ RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */ ++ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ ++ RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */ ++#endif ++ } UNCACHED_ALIGN uCmdData; ++} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); ++ ++/*! @} End of KCCBTypes */ ++ ++/*! ++ * @Defgroup FWCCBTypes Firmware CCB data interface ++ * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality ++ * @{ ++ */ ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the ++ * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ZSBufferID; /*!< ZS buffer ID */ ++} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB ++ * command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistID; /*!< Freelist ID */ ++} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistsCount; /*!< Freelists count */ ++ IMG_UINT32 ui32HwrCounter; /*!< HWR counter */ ++ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */ ++} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; ++ ++#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF (1U<<0) /*!< 1 if a page fault happened */ ++#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS (1U<<1) /*!< 1 if applicable to all contexts */ ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ ++ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ ++ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ ++ IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ ++} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< Page fault address */ ++} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA; ++ ++/*! ++ ****************************************************************************** ++ * List of command types supported by the Firmware CCB ++ *****************************************************************************/ ++typedef enum ++{ ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages ++ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked ++ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ ++ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow ++ \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */ ++ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction ++ \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */ ++ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context ++ \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */ ++ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump ++ \n Command data: None */ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats ++ \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */ ++ ++ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart ++ \n Command data: None */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#if defined(SUPPORT_SOC_TIMER) ++ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#endif ++#endif ++ RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault ++ \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ ++} RGXFWIF_FWCCB_CMD_TYPE; ++ ++/*! ++ ****************************************************************************** ++ * List of the various stats of the process to update/increment ++ *****************************************************************************/ ++typedef enum ++{ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ ++} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB ++ * command ++ *****************************************************************************/ ++typedef struct ++{ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ ++ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ ++ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ ++} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32CoreClkRate; ++} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; ++ ++#if defined(SUPPORT_VALIDATION) ++typedef struct ++{ ++ IMG_UINT64 ui64RegValue; ++} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; ++ ++#if defined(SUPPORT_SOC_TIMER) ++typedef struct ++{ ++ IMG_UINT64 ui64timerGray; ++ IMG_UINT64 ui64timerBinary; ++ IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; ++} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; ++#endif ++#endif ++ ++/*! ++ ****************************************************************************** ++ * @Brief Firmware CCB command structure ++ *****************************************************************************/ ++typedef struct ++{ ++ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ ++ IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ ++ ++ union ++ { ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ ++ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ ++ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ ++ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ ++ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; ++ RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; ++#if defined(SUPPORT_SOC_TIMER) ++ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; ++#endif ++#endif ++ } RGXFW_ALIGN uCmdData; ++} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); ++ ++/*! @} End of FWCCBTypes */ ++ ++/*! ++ ****************************************************************************** ++ * Workload estimation Firmware CCB command structure for RGX ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */ ++ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */ ++} RGXFWIF_WORKEST_FWCCB_CMD; ++ ++/*! ++ * @Defgroup ClientCCBTypes Client CCB data interface ++ * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality ++ * @{ ++ */ ++ ++/* Required memory alignment for 64-bit variables accessible by Meta ++ (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared ++ between the host and meta that contains 64-bit variables has to maintain ++ this alignment) */ ++#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) ++ ++#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) ++#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1U)) & ~(RGXFWIF_FWALLOC_ALIGN - 1U)) ++ ++typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; ++ ++/*! ++ * @Name Client CCB command types ++ * @{ ++ */ ++#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */ ++#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */ ++#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */ ++#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */ ++#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_ABORT (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++ ++/* Leave a gap between CCB specific commands and generic commands */ ++#define RGXFWIF_CCB_CMD_TYPE_FENCE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_UPDATE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */ ++#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */ ++#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */ ++/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The ++ padding code with the CCB wrap upsets the FW if we don't have the task type ++ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. ++*/ ++#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) ++#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ ++ ++#if defined(SUPPORT_VALIDATION) ++#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) ++#endif ++ ++#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ ++/*! @} End of Client CCB command types */ ++ ++typedef struct ++{ ++ /* Index for the KM Workload estimation return data array */ ++ IMG_UINT16 RGXFW_ALIGN ui16ReturnDataIndex; ++ /* Predicted time taken to do the work in cycles */ ++ IMG_UINT32 RGXFW_ALIGN ui32CyclesPrediction; ++ /* Deadline for the workload (in usecs) */ ++ IMG_UINT64 RGXFW_ALIGN ui64Deadline; ++} RGXFWIF_WORKEST_KICK_DATA; ++ ++/*! @Brief Command header of a command in the client CCB buffer. ++ * ++ * Followed by this header is the command-data specific to the ++ * command-type as specified in the header. ++ */ ++typedef struct ++{ ++ RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ ++ IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ ++ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ ++ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ ++ RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ ++} RGXFWIF_CCB_CMD_HEADER; ++ ++/* ++ ****************************************************************************** ++ * Client CCB commands which are only required by the kernel ++ *****************************************************************************/ ++ ++/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */ ++typedef struct ++{ ++ IMG_INT32 i32Priority; /*!< Priority level */ ++} RGXFWIF_CMD_PRIORITY; ++ ++/*! @} End of ClientCCBTypes */ ++ ++/*! ++ ****************************************************************************** ++ * Signature and Checksums Buffer ++ *****************************************************************************/ ++typedef struct ++{ ++ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ ++ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; ++ ++typedef struct ++{ ++ PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */ ++ IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL; ++ ++typedef struct ++{ ++ PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ ++ IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; ++ ++/*! ++ ***************************************************************************** ++ * RGX Compatibility checks ++ *****************************************************************************/ ++ ++/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the ++ following define should be increased by 1 to indicate to the ++ compatibility logic that layout has changed. */ ++#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 ++ ++typedef struct ++{ ++ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ ++ IMG_UINT64 RGXFW_ALIGN ui64BVNC; ++} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; ++ ++typedef struct ++{ ++ IMG_UINT8 ui8OsCountSupport; ++} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; ++ ++#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ ++ RGXFWIF_COMPCHECKS_BVNC (name) = { \ ++ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ ++ 0, \ ++ } ++#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ ++ do { \ ++ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ ++ (name).ui64BVNC = 0; \ ++ } while (false) ++ ++typedef struct ++{ ++ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ ++ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ ++ IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ ++ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ ++ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ ++ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ ++ RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ ++ IMG_BOOL bUpdated; /*!< Information is valid */ ++} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; ++ ++/*! ++ ****************************************************************************** ++ * Updated configuration post FW data init. ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */ ++ IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */ ++ IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */ ++ IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */ ++ IMG_UINT32 ui32DefaultDustsNumInit; /* Last number of dusts change requested by the host */ ++ IMG_UINT32 ui32PHRMode; /* Periodic Hardware Reset configuration values */ ++ IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */ ++ IMG_UINT32 ui32WdgPeriodUs; /* The watchdog period in microseconds */ ++ IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */ ++ PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */ ++} RGXFWIF_RUNTIME_CFG; ++ ++/*! ++ ***************************************************************************** ++ * Control data for RGX ++ *****************************************************************************/ ++ ++#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) ++ ++#if defined(PDUMP) ++ ++#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U ++ ++typedef enum ++{ ++ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, ++ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT ++} RGXFWIF_PID_FILTER_MODE; ++ ++typedef struct ++{ ++ IMG_PID uiPID; ++ IMG_UINT32 ui32OSID; ++} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; ++ ++typedef struct ++{ ++ RGXFWIF_PID_FILTER_MODE eMode; ++ /* each process in the filter list is specified by a PID and OS ID pair. ++ * each PID and OS pair is an item in the items array (asItems). ++ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries ++ * then it must be terminated by an item with pid of zero. ++ */ ++ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; ++} RGXFW_ALIGN RGXFWIF_PID_FILTER; ++#endif ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) ++#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) ++#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) ++#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) ++#endif ++ ++typedef enum ++{ ++ RGXFWIF_TPU_DM_PDM = 0, ++ RGXFWIF_TPU_DM_VDM = 1, ++ RGXFWIF_TPU_DM_CDM = 2, ++ RGXFWIF_TPU_DM_TDM = 3, ++ RGXFWIF_TPU_DM_LAST ++} RGXFWIF_TPU_DM; ++ ++typedef enum ++{ ++ RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ ++ RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that ++ initiates by sending data via the ++ GPIO and then sends back any data ++ received over the GPIO */ ++ RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes ++ and reads data across the entire ++ GPIO AP address range.*/ ++#if defined(SUPPORT_STRIP_RENDERING) ++ RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ ++ RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ ++#endif ++ RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ ++ RGXFWIF_GPIO_VAL_LOOPBACK = 6, /*!< Send and then receive each byte ++ in the range 0-255. */ ++ RGXFWIF_GPIO_VAL_LOOPBACK_LITE = 7, /*!< Send and then receive each power-of-2 ++ byte in the range 0-255. */ ++ RGXFWIF_GPIO_VAL_LAST ++} RGXFWIF_GPIO_VAL_MODE; ++ ++typedef enum ++{ ++ FW_PERF_CONF_NONE = 0, ++ FW_PERF_CONF_ICACHE = 1, ++ FW_PERF_CONF_DCACHE = 2, ++ FW_PERF_CONF_JTLB_INSTR = 5, ++ FW_PERF_CONF_INSTRUCTIONS = 6 ++} FW_PERF_CONF; ++ ++typedef enum ++{ ++ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, ++ FW_BOOT_STAGE_NOT_AVAILABLE = -1, ++ FW_BOOT_NOT_STARTED = 0, ++ FW_BOOT_BLDR_STARTED = 1, ++ FW_BOOT_CACHE_DONE, ++ FW_BOOT_TLB_DONE, ++ FW_BOOT_MAIN_STARTED, ++ FW_BOOT_ALIGNCHECKS_DONE, ++ FW_BOOT_INIT_DONE, ++} FW_BOOT_STAGE; ++ ++/*! ++ * @AddToGroup KCCBTypes ++ * @{ ++ * @Name Kernel CCB return slot responses ++ * @{ ++ * Usage of bit-fields instead of bare integers ++ * allows FW to possibly pack-in several responses for each single kCCB command. ++ */ ++ ++#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /*!< Command executed (return status from FW) */ ++#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /*!< A cleanup was requested but resource busy */ ++#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /*!< Poll failed in FW for a HW operation to complete */ ++ ++#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /*!< Reset value of a kCCB return slot (set by host) */ ++/*! ++ * @} End of Name Kernel CCB return slot responses ++ * @} End of AddToGroup KCCBTypes ++ */ ++ ++typedef struct ++{ ++ /* Fw-Os connection states */ ++ volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; ++ volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; ++ volatile IMG_UINT32 ui32AliveFwToken; ++ volatile IMG_UINT32 ui32AliveOsToken; ++} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; ++ ++/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT ++ * allocated by services and used by the Firmware on boot ++ **/ ++typedef struct ++{ ++ /* Kernel CCB */ ++ PRGXFWIF_CCB_CTL psKernelCCBCtl; /*!< Kernel CCB Control */ ++ PRGXFWIF_CCB psKernelCCB; /*!< Kernel CCB */ ++ PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; /*!< Kernel CCB return slots */ ++ ++ /* Firmware CCB */ ++ PRGXFWIF_CCB_CTL psFirmwareCCBCtl; /*!< Firmware CCB control */ ++ PRGXFWIF_CCB psFirmwareCCB; /*!< Firmware CCB */ ++ ++ /* Workload Estimation Firmware CCB */ ++ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; /*!< Workload estimation control */ ++ PRGXFWIF_CCB psWorkEstFirmwareCCB; /*!< Workload estimation buffer */ ++ ++ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; /*!< HWRecoveryInfo control */ ++ ++ IMG_UINT32 ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */ ++ ++ PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ ++ ++ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ ++ ++} UNCACHED_ALIGN RGXFWIF_OSINIT; ++ ++/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT ++ * allocated by services and used by the Firmware on boot ++ **/ ++typedef struct ++{ ++ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; ++ ++ IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; ++ ++ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */ ++ ++ RGXFWIF_PDVFS_OPP sPDVFSOPPInfo; ++ ++ RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ ++ ++ RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl; ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++ RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */ ++#endif ++ ++ IMG_UINT32 ui32FilterFlags; ++ ++ PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */ ++ ++ PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< Firmware Trace buffer control */ ++ PRGXFWIF_SYSDATA sFwSysData; /*!< Firmware System shared data */ ++#if defined(SUPPORT_TBI_INTERFACE) ++ PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ ++#endif ++ ++ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ ++ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ ++ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ ++ ++ RGXFWIF_DEV_VIRTADDR sAlignChecks; /*!< Array holding Server structures alignment data */ ++ ++ IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ ++ ++ IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ ++ ++ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ ++ ++ IMG_UINT32 ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */ ++ ++ IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ ++ ++ IMG_UINT32 ui32JonesDisableMask; ++ ++ FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ ++ ++ /** ++ * FW Pointer to memory containing core clock rate in Hz. ++ * Firmware (PDVFS) updates the memory when running on non primary FW thread ++ * to communicate to host driver. ++ */ ++ PRGXFWIF_CORE_CLK_RATE sCoreClockRate; ++ ++#if defined(PDUMP) ++ RGXFWIF_PID_FILTER sPIDFilter; ++#endif ++ ++ RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; ++ ++ RGX_HWPERF_BVNC sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/ ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ IMG_UINT32 ui32SecurityTestFlags; ++ RGXFWIF_DEV_VIRTADDR pbSecureBuffer; ++ RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* ++ * Used when validation is enabled to allow the host to check ++ * that MTS sent the correct sideband in response to a kick ++ * from a given OSes schedule register. ++ * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set ++ * ++ * Set by the host to: ++ * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT ++ * reset to 0 by FW when kicked by the given OSid ++ */ ++ IMG_UINT32 ui32OSKickTest; ++#endif ++ ++ /* Value to write into RGX_CR_TFBC_COMPRESSION_CONTROL */ ++ IMG_UINT32 ui32TFBCCompressionControl; ++ ++#if defined(SUPPORT_AUTOVZ) ++ IMG_UINT32 ui32VzWdgPeriod; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_SYSINIT; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 ++#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 ++#endif ++ ++/*! ++ ***************************************************************************** ++ * Timer correlation shared data and defines ++ *****************************************************************************/ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; ++ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; ++ ++ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), ++ * where the deltas are relative to the timestamps above: ++ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; ++ ++ IMG_UINT32 ui32CoreClockSpeed; ++ IMG_UINT32 ui32Reserved; ++} UNCACHED_ALIGN RGXFWIF_TIME_CORR; ++ ++ ++/* The following macros are used to help converting FW timestamps to the Host ++ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of ++ * time; it increments by 1 every 256 GPU clock ticks, so the general ++ * formula to perform the conversion is: ++ * ++ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, ++ * otherwise if (scale == 10^6) then deltaOS is in uS ] ++ * ++ * deltaCR * 256 256 * scale ++ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] ++ * GPUclockspeed GPUclockspeed ++ * ++ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) ++ * to get some better accuracy and to avoid returning 0 in the integer ++ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. ++ * This is the same as keeping K as a decimal number. ++ * ++ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies ++ * (deltaCR * K is more or less a constant), and it's relative to the base ++ * OS timestamp sampled as a part of the timer correlation data. ++ * This base is refreshed on GPU power-on, DVFS transition and periodic ++ * frequency calibration (executed every few seconds if the FW is doing ++ * some work), so as long as the GPU is doing something and one of these ++ * events is triggered then deltaCR * K will not overflow and deltaOS will be ++ * correct. ++ */ ++ ++#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) ++ ++#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ ++ (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) ++ ++ ++/*! ++ ****************************************************************************** ++ * GPU Utilisation ++ *****************************************************************************/ ++ ++/* See rgx_common.h for a list of GPU states */ ++#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) ++ ++#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) ++#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) ++ ++/* The OS timestamps computed by the FW are approximations of the real time, ++ * which means they could be slightly behind or ahead the real timer on the Host. ++ * In some cases we can perform subtractions between FW approximated ++ * timestamps and real OS timestamps, so we need a form of protection against ++ * negative results if for instance the FW one is a bit ahead of time. ++ */ ++#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ ++ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) ++ ++#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ ++ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) ++ ++ ++/* The timer correlation array must be big enough to ensure old entries won't be ++ * overwritten before all the HWPerf events linked to those entries are processed ++ * by the MISR. The update frequency of this array depends on how fast the system ++ * can change state (basically how small the APM latency is) and perform DVFS transitions. ++ * ++ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading ++ * an entry while the Host is updating it. With 2 entries in the worst case the FW ++ * will read old data, which is still quite ok if the Host is updating the timer ++ * correlation at that time. ++ */ ++#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U ++#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) ++ ++/* Make sure the timer correlation array size is a power of 2 */ ++static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, ++ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); ++ ++typedef struct ++{ ++ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; ++ IMG_UINT32 ui32TimeCorrSeqCount; ++ ++ /* Compatibility and other flags */ ++ IMG_UINT32 ui32GpuUtilFlags; ++ ++ /* Last GPU state + OS time of the last state update */ ++ IMG_UINT64 RGXFW_ALIGN ui64LastWord; ++ ++ /* Counters for the amount of time the GPU was active/idle/blocked */ ++ IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; ++} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32RenderTargetIndex; //Render number ++ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA ++ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs ++ IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM ++ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices ++ RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target ++ IMG_UINT32 ui32MaxRTs; //Number of render targets in the array ++ IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_RTA_CTL; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief Firmware Freelist holding usage state of the Parameter Buffers ++ */ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr; /*!< Freelist page table base */ ++ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ ++ IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ ++ IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ ++ IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ ++ IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ ++ IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ ++ IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ ++#if defined(SUPPORT_SHADOW_FREELISTS) ++ IMG_UINT32 ui32HWRCounter; ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; ++#endif ++ IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ ++ IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ ++ IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ ++ IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ ++#if defined(SUPPORT_AGP) ++ IMG_UINT32 ui32PmGlobalPb; /*!< PM Global PB on which Freelist is loaded */ ++#endif ++} UNCACHED_ALIGN RGXFWIF_FREELIST; ++ ++/*! ++ ****************************************************************************** ++ * HWRTData ++ *****************************************************************************/ ++ ++/* HWRTData flags */ ++/* Deprecated flags 1:0 */ ++#define HWRTDATA_HAS_LAST_TA (1UL << 2) ++#define HWRTDATA_PARTIAL_RENDERED (1UL << 3) ++#define HWRTDATA_DISABLE_TILE_REORDERING (1UL << 4) ++#define HWRTDATA_NEED_BRN65101_BLIT (1UL << 5) ++#define HWRTDATA_FIRST_BRN65101_STRIP (1UL << 6) ++#define HWRTDATA_NEED_BRN67182_2ND_RENDER (1UL << 7) ++#if defined(SUPPORT_AGP) ++#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (1UL << 8) ++#if defined(SUPPORT_AGP4) ++#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (1UL << 9) ++#endif ++#define HWRTDATA_GEOM_NEEDS_RESUME (1UL << 10) ++#endif ++ ++typedef enum ++{ ++ RGXFWIF_RTDATA_STATE_NONE = 0, ++ RGXFWIF_RTDATA_STATE_KICKTA, ++ RGXFWIF_RTDATA_STATE_KICKTAFIRST, ++ RGXFWIF_RTDATA_STATE_TAFINISHED, ++ RGXFWIF_RTDATA_STATE_KICK3D, ++ RGXFWIF_RTDATA_STATE_3DFINISHED, ++ RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, ++ RGXFWIF_RTDATA_STATE_TAOUTOFMEM, ++ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, ++ /* In case of HWR, we can't set the RTDATA state to NONE, ++ * as this will cause any TA to become a first TA. ++ * To ensure all related TA's are skipped, we use the HWR state */ ++ RGXFWIF_RTDATA_STATE_HWR, ++ RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU ++} RGXFWIF_RTDATA_STATE; ++ ++typedef struct ++{ ++ IMG_BOOL bTACachesNeedZeroing; ++ ++ IMG_UINT32 ui32ScreenPixelMax; ++ IMG_UINT64 RGXFW_ALIGN ui64MultiSampleCtl; ++ IMG_UINT64 ui64FlippedMultiSampleCtl; ++ IMG_UINT32 ui32TPCStride; ++ IMG_UINT32 ui32TPCSize; ++ IMG_UINT32 ui32TEScreen; ++ IMG_UINT32 ui32MTileStride; ++ IMG_UINT32 ui32TEAA; ++ IMG_UINT32 ui32TEMTILE1; ++ IMG_UINT32 ui32TEMTILE2; ++ IMG_UINT32 ui32ISPMergeLowerX; ++ IMG_UINT32 ui32ISPMergeLowerY; ++ IMG_UINT32 ui32ISPMergeUpperX; ++ IMG_UINT32 ui32ISPMergeUpperY; ++ IMG_UINT32 ui32ISPMergeScaleX; ++ IMG_UINT32 ui32ISPMergeScaleY; ++ IMG_UINT32 uiRgnHeaderSize; ++ IMG_UINT32 ui32ISPMtileSize; ++} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context ++ */ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; /*!< VCE Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4]; ++ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; /*!< TE Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4]; ++ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; ++ ++ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ ++ IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ ++ ++ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ ++ ++ IMG_UINT32 ui32HWRTDataFlags; ++ RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ ++ ++ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ ++ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ ++ ++ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Render target clean up state */ ++ ++ RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sMacrotileArrayDevVAddr; /*!< Macrotiling array base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sRgnHeaderDevVAddr; /*!< Region headers base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sRTCDevVAddr; /*!< Render target cache base */ ++#if defined(RGX_FIRMWARE) ++ struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom; ++#else ++ RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost; ++#endif ++#if defined(SUPPORT_TRP) ++ IMG_UINT32 ui32KickFlagsCopy; ++ IMG_UINT32 ui32TRPState; ++ IMG_UINT32 ui32TEPageCopy; ++ IMG_UINT32 ui32VCEPageCopy; ++#endif ++#if defined(SUPPORT_AGP) ++ IMG_BOOL bTACachesNeedZeroing; ++#endif ++} UNCACHED_ALIGN RGXFWIF_HWRTDATA; ++ ++/* Sync_checkpoint firmware object. ++ * This is the FW-addressable structure use to hold the sync checkpoint's ++ * state and other information which needs to be accessed by the firmware. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ ++ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ ++} SYNC_CHECKPOINT_FW_OBJ; ++ ++/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ ++#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) ++ ++#endif /* RGX_FWIF_KM_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_km.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_shared.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_shared.h +new file mode 100644 +index 000000000000..13844ad4e801 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_fwif_shared.h +@@ -0,0 +1,335 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware interface structures ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware interface structures shared by both host client ++ and host server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_SHARED_H) ++#define RGX_FWIF_SHARED_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_common.h" ++#include "powervr/mem_types.h" ++ ++/* Indicates the number of RTDATAs per RTDATASET */ ++#if defined(SUPPORT_AGP) ++#define RGXMKIF_NUM_RTDATAS 4U ++#define RGXMKIF_NUM_GEOMDATAS 4U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 12U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (2U) ++#else ++#define RGXMKIF_NUM_RTDATAS 2U ++#define RGXMKIF_NUM_GEOMDATAS 1U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 2U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (1U) ++#endif ++ ++/* Maximum number of UFOs in a CCB command. ++ * The number is based on having 32 sync prims (as originally), plus 32 sync ++ * checkpoints. ++ * Once the use of sync prims is no longer supported, we will retain ++ * the same total (64) as the number of sync checkpoints which may be ++ * supporting a fence is not visible to the client driver and has to ++ * allow for the number of different timelines involved in fence merges. ++ */ ++#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) ++ ++/* ++ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) ++ * command passed through the bridge. ++ * Just across the bridge in the server, any incoming kick command size is ++ * checked against this maximum limit. ++ * In case the incoming command size is larger than the specified limit, ++ * the bridge call is retired with error. ++ */ ++#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) ++ ++typedef struct RGXFWIF_DEV_VIRTADDR_ ++{ ++ IMG_UINT32 ui32Addr; ++} RGXFWIF_DEV_VIRTADDR; ++ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; ++ RGXFWIF_DEV_VIRTADDR pbyFWAddr; ++} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; ++ ++typedef IMG_UINT8 RGXFWIF_CCCB; ++ ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; ++ ++ ++/*! ++ * @InGroup ClientCCBTypes ++ * @Brief Command data for fence & update types Client CCB commands. ++ */ ++typedef struct ++{ ++ PRGXFWIF_UFO_ADDR puiAddrUFO; /*!< Address to be checked/updated */ ++ IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */ ++} RGXFWIF_UFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ ++ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ ++} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; ++ ++#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0) ++#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0) ++#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1) ++#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2) ++ ++typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE; ++ ++typedef enum ++{ ++ RGXFWIF_PRBUFFER_UNBACKED = 0, ++ RGXFWIF_PRBUFFER_BACKED, ++ RGXFWIF_PRBUFFER_BACKING_PENDING, ++ RGXFWIF_PRBUFFER_UNBACKING_PENDING, ++}RGXFWIF_PRBUFFER_STATE; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief OnDemand Z/S/MSAA Buffers ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ ++ IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ ++ RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ ++ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ ++ IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_PRBUFFER; ++ ++/* ++ * Used to share frame numbers across UM-KM-FW, ++ * frame number is set in UM, ++ * frame number is required in both KM for HTB and FW for FW trace. ++ * ++ * May be used to house Kick flags in the future. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FrameNum; /*!< associated frame number */ ++} CMD_COMMON; ++ ++/* ++ * TA and 3D commands require set of firmware addresses that are stored in the ++ * Kernel. Client has handle(s) to Kernel containers storing these addresses, ++ * instead of raw addresses. We have to patch/write these addresses in KM to ++ * prevent UM from controlling FW addresses directly. ++ * Typedefs for TA and 3D commands are shared between Client and Firmware (both ++ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use ++ * TA|3D CMD type definitions directly. Therefore we have a SHARED block that ++ * is shared between UM-KM-FW across all BVNC configurations. ++ */ ++typedef struct ++{ ++ CMD_COMMON sCmn; /*!< Common command attributes */ ++ RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, ++ this is used for context selection and for storing out HW-context, ++ when TA is switched out for continuing later */ ++ ++ RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ ++ ++} CMDTA3D_SHARED; ++ ++/*! ++ * Client Circular Command Buffer (CCCB) control structure. ++ * This is shared between the Server and the Firmware and holds byte offsets ++ * into the CCCB as well as the wrapping mask to aid wrap around. A given ++ * snapshot of this queue with Cmd 1 running on the GPU might be: ++ * ++ * Roff Doff Woff ++ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] ++ * < runnable commands >< !ready to run > ++ * ++ * Cmd 1 : Currently executing on the GPU data master. ++ * Cmd 2,3,4: Fence dependencies met, commands runnable. ++ * Cmd 5... : Fence dependency not met yet. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This ++ * must be aligned to 16 bytes. */ ++ IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. ++ Points to the command that is ++ * runnable on GPU, if R!=W */ ++ IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. ++ * Points to commands not ready, i.e. ++ * fence dependencies are not met. */ ++ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity ++ in bytes of the CCB-1 */ ++#if defined(SUPPORT_AGP) ++ IMG_UINT32 ui32ReadOffset2; ++#if defined(SUPPORT_AGP4) ++ IMG_UINT32 ui32ReadOffset3; ++ IMG_UINT32 ui32ReadOffset4; ++#endif ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; ++ ++ ++typedef IMG_UINT32 RGXFW_FREELIST_TYPE; ++ ++#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) ++#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) ++#if defined(SUPPORT_AGP) ++#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2) ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL2_FREELIST + 1U) ++#else ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U) ++#endif ++#define RGXFW_MAX_HWFREELISTS (2U) ++ ++/*! ++ * @Defgroup ContextSwitching Context switching data interface ++ * @Brief Types grouping data structures and defines used in realising the Context Switching (CSW) functionality ++ * @{ ++ */ ++ ++/*! ++ * @Brief GEOM DM or TA register controls for context switch ++ */ ++typedef struct ++{ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the VDM's context state buffer */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_RESUME_ADDR; ++ IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the TA's context state buffer */ ++ ++ struct ++ { ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK0; /*!< VDM context store task 0 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK1; /*!< VDM context store task 1 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK2; /*!< VDM context store task 2 */ ++ ++ /* VDM resume state update controls */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK0; /*!< VDM context resume task 0 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK1; /*!< VDM context resume task 1 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK2; /*!< VDM context resume task 2 */ ++ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK3; ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK4; ++ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK3; ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK4; ++ } asTAState[2]; ++ ++} RGXFWIF_TAREGISTERS_CSWITCH; ++/*! @} End of Defgroup ContextSwitching */ ++ ++#define RGXFWIF_TAREGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_TAREGISTERS_CSWITCH) ++ ++typedef struct ++{ ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; ++ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; ++ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; ++ ++ /* CDM resume controls */ ++ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; ++ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; ++ ++} RGXFWIF_CDM_REGISTERS_CSWITCH; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Render context static register controls for context switch ++ */ ++typedef struct ++{ ++ RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN asCtxSwitch_GeomRegs[RGX_NUM_GEOM_CORES]; /*!< Geom registers for ctx switch */ ++} RGXFWIF_STATIC_RENDERCONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) ++ ++typedef struct ++{ ++ RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ ++} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) ++ ++/*! ++ @Brief Context reset reason. Last reset reason for a reset context. ++*/ ++typedef enum ++{ ++ RGX_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ ++ RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ ++ RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ ++ RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ ++ RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ ++ RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ ++ RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM = 6, /*!< CDM Mission/safety checksum mismatch */ ++ RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM = 7, /*!< TRP checksum mismatch */ ++ RGX_CONTEXT_RESET_REASON_GPU_ECC_OK = 8, /*!< GPU ECC error (corrected, OK) */ ++ RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR = 9, /*!< GPU ECC error (uncorrected, HWR) */ ++ RGX_CONTEXT_RESET_REASON_FW_ECC_OK = 10, /*!< FW ECC error (corrected, OK) */ ++ RGX_CONTEXT_RESET_REASON_FW_ECC_ERR = 11, /*!< FW ECC error (uncorrected, ERR) */ ++ RGX_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, /*!< FW Safety watchdog triggered */ ++ RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, /*!< FW page fault (no HWR) */ ++ RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ ++ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ ++ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ ++} RGX_CONTEXT_RESET_REASON; ++ ++/*! ++ @Brief Context reset data shared with the host ++*/ ++typedef struct ++{ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */ ++ IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */ ++} RGX_CONTEXT_RESET_REASON_DATA; ++#endif /* RGX_FWIF_SHARED_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_shared.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_heaps.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_heaps.h +new file mode 100644 +index 000000000000..e41e4002b2c4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_heaps.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX heap definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_HEAPS_H) ++#define RGX_HEAPS_H ++ ++/* ++ Identify heaps by their names ++*/ ++#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< RGX General SVM (shared virtual memory) Heap Identifier */ ++#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ ++#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ ++#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ ++#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ ++#define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX Vulkan capture replay buffer Heap Identifier */ ++#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */ ++#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ ++#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ ++#define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */ ++#define RGX_CMP_SAFETY_RMW_HEAP_IDENT "Compute Safety RMW" /*!< Compute Safety RMW Heap Identifier */ ++#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */ ++#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" /*!< Visibility Test Heap Identifier */ ++ ++/* Services client internal heap identification */ ++#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142" /*!< RGX RgnHdr BRN63142 Heap Identifier */ ++#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */ ++#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273" /*!< MMU BRN65273 Heap A Identifier */ ++#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273" /*!< MMU BRN65273 Heap B Identifier */ ++#endif /* RGX_HEAPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_hwperf.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_hwperf.h +new file mode 100644 +index 000000000000..fa711b0b6df2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_hwperf.h +@@ -0,0 +1,1607 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HWPerf and Debug Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common data types definitions for hardware performance API ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_HWPERF_H_ ++#define RGX_HWPERF_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* These structures are used on both GPU and CPU and must be a size that is a ++ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at ++ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. ++ */ ++ ++/****************************************************************************** ++ * Includes and Defines ++ *****************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#include "rgx_common.h" ++#include "rgx_hwperf_common.h" ++#include "pvrsrv_tlcommon.h" ++#include "pvrsrv_sync_km.h" ++ ++ ++#if !defined(__KERNEL__) ++/* User-mode and Firmware definitions only */ ++ ++#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) ++ ++/* HWPerf interface assumption checks */ ++static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPerf protocol definition"); ++ ++/*! The number of indirectly addressable TPU_MSC blocks in the GPU */ ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX(((IMG_UINT32)RGX_FEATURE_NUM_CLUSTERS >> 1), 1U) ++ ++/*! The number of indirectly addressable USC blocks in the GPU */ ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS) ++ ++# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ ++ /*! Defines the number of performance counter blocks that are directly ++ * addressable in the RGX register map for S. */ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 1 /* JONES */ ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 1 /* BLACKPEARL */ ++# define RGX_HWPERF_PHANTOM_DUST_BLKS 2 /* TPU, TEXAS */ ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */ ++ ++# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++ ++ /*! Defines the number of performance counter blocks that are directly ++ * addressable in the RGX register map. */ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 2 /* TORNADO, TA */ ++ ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 2 /* RASTER, TEXAS */ ++# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ ++ ++# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */ ++ ++ /*! Defines the number of performance counter blocks that are ++ * addressable in the RGX register map for Series 6. */ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 3 /* TA, RASTER, HUB */ ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM 0 /* PHANTOM is not there in Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */ ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0 ++# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ ++ ++# endif ++ ++/*! The number of performance counters in each layout block defined for UM/FW code */ ++#if defined(RGX_FEATURE_CLUSTER_GROUPING) ++ #define RGX_HWPERF_CNTRS_IN_BLK 6 ++ #else ++ #define RGX_HWPERF_CNTRS_IN_BLK 4 ++#endif ++ ++#endif /* #if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) */ ++#else /* defined(__KERNEL__) */ ++/* Kernel/server definitions - not used, hence invalid definitions */ ++ ++# define RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC 0xFF ++ ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_DUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++ ++#endif ++ ++/*! The number of custom non-mux counter blocks supported */ ++#define RGX_HWPERF_MAX_CUSTOM_BLKS 5U ++ ++/*! The number of counters supported in each non-mux counter block */ ++#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U ++ ++/*! The number of directly-addressable counters allowed in non-mux counter blocks */ ++#define RGX_CNTBLK_COUNTERS_MAX ((IMG_UINT32)PVRSRV_HWPERF_COUNTERS_PERBLK + 0U) ++ ++ ++/****************************************************************************** ++ * Data Stream Common Types ++ *****************************************************************************/ ++ ++/*! All the Data Masters HWPerf is aware of. When a new DM is added to this ++ * list, it should be appended at the end to maintain backward compatibility ++ * of HWPerf data. ++ */ ++typedef enum { ++ ++ RGX_HWPERF_DM_GP, ++ RGX_HWPERF_DM_2D, ++ RGX_HWPERF_DM_TA, ++ RGX_HWPERF_DM_3D, ++ RGX_HWPERF_DM_CDM, ++ RGX_HWPERF_DM_RTU, ++ RGX_HWPERF_DM_SHG, ++ RGX_HWPERF_DM_TDM, ++ ++ RGX_HWPERF_DM_LAST, ++ ++ RGX_HWPERF_DM_INVALID = 0x1FFFFFFF ++} RGX_HWPERF_DM; ++ ++/*! Define containing bit position for 32bit feature flags used in hwperf and api */ ++typedef IMG_UINT32 RGX_HWPERF_FEATURE_FLAGS; ++#define RGX_HWPERF_FEATURE_PERFBUS_FLAG 0x0001U ++#define RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG 0x0002U ++#define RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG 0x0004U ++#define RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG 0x0008U ++#define RGX_HWPERF_FEATURE_ROGUEXE_FLAG 0x0010U ++#define RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG 0x0020U ++#define RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG 0x0040U ++#define RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION 0x0080U ++#define RGX_HWPERF_FEATURE_MULTICORE_FLAG 0x0100U ++#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG 0x0800U ++#define RGX_HWPERF_FEATURE_ROGUE_FLAG 0x1000U ++#define RGX_HWPERF_FEATURE_OCEANIC_FLAG 0x2000U ++ ++/*! This structure holds the data of a firmware packet. */ ++typedef struct ++{ ++ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ ++ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ ++ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ ++ IMG_UINT32 ui32Padding; /*!< Reserved */ ++} RGX_HWPERF_FW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); ++ ++/*! This structure holds the data of a hardware packet, including counters. */ ++typedef struct ++{ ++ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ ++ IMG_UINT32 ui32PID; /*!< Process identifier */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ ++ IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ ++ IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ ++ IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ ++ IMG_UINT32 ui32CtxPriority; /*!< Context priority */ ++ IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ ++ IMG_UINT32 ui32KickInfo; /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */ ++ IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */ ++ IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ ++} RGX_HWPERF_HW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); ++RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); ++ ++typedef struct ++{ ++ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ ++ IMG_UINT32 ui32PID; /*!< Process identifier */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32WorkTarget[4]; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ ++ /*!< V2A Block count / Client driver context job reference used for tracking/debugging */ ++ /*!< RGX Data master context job reference used for tracking/debugging */ ++ /*!< V2 Block count / Index to the time correlation at the time the packet was generated */ ++} RGX_HWPERF_HW_DATA_V2; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA_V2); ++ ++/*! Mask for use with the aui32CountBlksStream field when decoding the ++ * counter block ID and mask word. */ ++#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U ++#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U ++ ++/*! Obtains the counter block ID word from an aui32CountBlksStream field. ++ * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit ++ * within group (3-0) */ ++#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) ++ ++/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address ++ * and stream index. May be used in decoding the counter block stream words of ++ * a RGX_HWPERF_HW_DATA structure. */ ++#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) ++ ++/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ ++#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) ++ ++#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) ++ ++/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address ++ * and stream index. May be used in decoding the counter block stream words ++ * of a RGX_HWPERF_HW_DATA structure. */ ++#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) ++ ++/*! Context switch packet event */ ++typedef struct ++{ ++ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ ++ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ ++ IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ ++ IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ ++ IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ ++} RGX_HWPERF_CSW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); ++ ++/*! Enumeration of clocks supporting this event */ ++typedef enum ++{ ++ RGX_HWPERF_CLKS_CHG_INVALID = 0, ++ ++ RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, ++ ++ RGX_HWPERF_CLKS_CHG_LAST, ++} RGX_HWPERF_CLKS_CHG_NAME; ++ ++/*! This structure holds the data of a clocks change packet. */ ++typedef struct ++{ ++ IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ ++ RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ ++ IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ ++ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ ++ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and ++ correlated to OSTimeStamp */ ++} RGX_HWPERF_CLKS_CHG_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); ++ ++/*! Enumeration of GPU utilisation states supported by this event */ ++typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; ++ ++/*! This structure holds the data of a GPU utilisation state change packet. */ ++typedef struct ++{ ++ RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ ++ IMG_UINT32 uiUnused1; /*!< Padding */ ++ IMG_UINT32 uiUnused2; /*!< Padding */ ++ IMG_UINT32 uiUnused3; /*!< Padding */ ++} RGX_HWPERF_GPU_STATE_CHG_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); ++ ++ ++/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ ++#define HWPERF_PWR_EST_V1_SIG 0x48504531 ++ ++/*! Macros to obtain a component field from a counter ID word */ ++#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) ++#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) ++/*!< Obtains the GPU ID from a counter ID word */ ++#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) ++#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) ++ ++#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) ++#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) ++#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) ++#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) ++#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) ++#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) ++ ++/*! This macro constructs a counter ID for a power estimate data stream from ++ * the component parts of: high word flag, unit id, GPU id, counter number */ ++#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ ++ ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), ++ "Space inside HWPerf packet data for BVNC string insufficient"); ++ ++#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (16U) ++ ++/*! BVNC Features */ ++typedef struct ++{ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! Number of counters in this block type */ ++ IMG_UINT16 ui16NumCounters; ++ ++ /*! Number of blocks of this type */ ++ IMG_UINT16 ui16NumBlocks; ++ ++ /*! Reserved for future use */ ++ IMG_UINT16 ui16Reserved; ++} RGX_HWPERF_BVNC_BLOCK; ++ ++/*! BVNC Features */ ++typedef struct ++{ ++ IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ ++ IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ ++ IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ ++ IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ ++ RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ ++} RGX_HWPERF_BVNC; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); ++ ++/*! Performance Counter Configuration data element. */ ++typedef struct ++{ ++ IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ ++ IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ ++} RGX_HWPERF_COUNTER_CFG_DATA_EL; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); ++ ++/*! Performance Counter Configuration data. */ ++typedef struct ++{ ++ IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ ++ RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ ++ IMG_UINT32 ui32Padding; /*!< reserved */ ++} RGX_HWPERF_COUNTER_CFG; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); ++ ++/*! Sub-event's data. */ ++typedef union ++{ ++ struct ++ { ++ RGX_HWPERF_DM eDM; /*!< Data Master ID. */ ++ RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ ++ IMG_UINT32 ui32DMContext; /*!< FW render context */ ++ } sHWR; /*!< HWR sub-event data. */ ++ ++ RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ ++ struct ++ { ++ IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ ++ IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ ++ } sEvMsk; /*!< HW Filter Mask */ ++ RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ ++} RGX_HWPERF_FWACT_DETAIL; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); ++ ++/*! This structure holds the data of a FW activity event packet */ ++typedef struct ++{ ++ RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ ++ RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. */ ++} RGX_HWPERF_FWACT_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); ++ ++ ++typedef enum { ++ RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */ ++ RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */ ++ ++ RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */ ++} RGX_HWPERF_UFO_EV; ++ ++/*! Data stream tuple. */ ++typedef union ++{ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32Value; /*!< Value of the UFO object */ ++ } sCheckSuccess; ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32Value; /*!< Value of the UFO object */ ++ IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */ ++ } sCheckFail; ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */ ++ IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */ ++ } sUpdate; ++} RGX_HWPERF_UFO_DATA_ELEMENT; ++ ++/*! This structure holds the packet payload data for UFO event. */ ++typedef struct ++{ ++ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data ++ at the time the packet was generated. ++ Used to approximate Host timestamps for ++ these events. */ ++ IMG_UINT32 ui32PID; /*!< Client process identifier */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX ++ API to track submitted work (for ++ debugging/trace purposes) */ ++ IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track ++ submitted work (for debugging / trace ++ purposes) */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context. ++ RenderContext for TA and 3D, Common ++ Context for other DMs */ ++ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the ++ stream and stream data offset in the ++ payload */ ++ RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ ++ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */ ++} RGX_HWPERF_UFO_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); ++ ++ ++/*! ++ * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent ++ * between KICK_START / KICK_END inclusively for all event types. ++ */ ++typedef enum ++{ ++ RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */ ++ RGX_HWPERF_KICK_TYPE_TQ2D, /*!< 2D TQ Kick */ ++ RGX_HWPERF_KICK_TYPE_TQ3D, /*!< 3D TQ Kick */ ++ RGX_HWPERF_KICK_TYPE_CDM, /*!< Compute Kick */ ++ RGX_HWPERF_KICK_TYPE_RS, /*!< Ray Store Kick */ ++ RGX_HWPERF_KICK_TYPE_VRDM, /*!< Vertex Ray Data Master Kick */ ++ RGX_HWPERF_KICK_TYPE_TQTDM,/*!< 2D Data Master TQ Kick */ ++ RGX_HWPERF_KICK_TYPE_SYNC, /*!< Sync Kick */ ++ RGX_HWPERF_KICK_TYPE_TA, /*!< TA Kick */ ++ RGX_HWPERF_KICK_TYPE_3D, /*!< 3D Kick */ ++ RGX_HWPERF_KICK_TYPE_LAST, ++ ++ RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff ++} RGX_HWPERF_KICK_TYPE; ++ ++typedef struct ++{ ++ RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for ++ scheduling on GPU hardware. ++ See RGX_HWPERF_KICK_TYPE */ ++ IMG_UINT32 ui32PID; /*!< Client process identifier */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API ++ to track submitted work (for debugging / ++ trace purposes) */ ++ IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted ++ work (for debugging / trace purposes) */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ ++ IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ ++ IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ ++ IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ ++ IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ ++ PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ ++ PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ ++ PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ ++ ++ /* Align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_ENQ_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef struct ++{ ++ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ ++ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and ++ stream data offset in the payload */ ++#ifdef __CHECKER__ ++ /* Since we're not conforming to the C99 standard by not using a flexible ++ * array member need to add a special case for Smatch static code analyser. */ ++ IMG_UINT32 aui32StreamData[]; ++#else ++ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ /*!< Series of tuples holding UFO objects data */ ++ ++ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ ++#endif ++} RGX_HWPERF_HOST_UFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! ++ * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been ++ * Allocated, Freed or Modified. The values are used to determine which event ++ * data structure to use to decode the data from the event stream ++ */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, ++ /*!< Timeline resource packets are ++ now emitted in client hwperf buffer */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ ++ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ ++} RGX_HWPERF_HOST_RESOURCE_TYPE; ++ ++typedef union ++{ ++ /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer ++ * generated in the HOST stream. Timeline data is now provided in the ++ * CLIENT stream instead. ++ */ ++ struct ++ { ++ IMG_UINT32 uiPid; /*!< Identifier of owning process */ ++ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sTimelineAlloc; ++ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point ++ backing this fence on the GPU */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sFenceAlloc; ++ ++ /*! Data for TYPE_SYNC_CP */ ++ struct ++ { ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ ++ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSyncCheckPointAlloc; ++ ++ /*! Data for TYPE_FENCE_SW */ ++ struct ++ { ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ ++ PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSWFenceAlloc; ++ ++ /*! Data for TYPE_SYNC */ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSyncAlloc; ++} RGX_HWPERF_HOST_ALLOC_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; ++ /*!< This describes the type of the resource ++ allocated in the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; ++ /*!< Union of structures providing further ++ data regarding the resource allocated. ++ Size of data varies with union member that ++ is present, check ``ui32AllocType`` value ++ to decode */ ++} RGX_HWPERF_HOST_ALLOC_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef union ++{ ++ /*! Data for TYPE_TIMELINE (*Deprecated*) */ ++ struct ++ { ++ IMG_UINT32 uiPid; /*!< Identifier of owning process */ ++ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sTimelineDestroy; ++ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. */ ++ } sFenceDestroy; ++ ++ /*! Data for TYPE_SYNC_CP */ ++ struct ++ { ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ ++ } sSyncCheckPointFree; ++ ++ /*! Data for TYPE_SYNC */ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ ++ } sSyncFree; ++} RGX_HWPERF_HOST_FREE_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; ++ /*!< This describes the type of the resource ++ freed or released by the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; ++ /*!< Union of structures providing further data ++ regarding the resource freed. Size of data ++ varies with union member that is present, ++ check ``ui32FreeType`` value to decode */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_FREE_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef struct ++{ ++ IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of ++ the time domains correlation table */ ++ IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the ++ time domains correlation table */ ++ IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of ++ the time domains correlation table */ ++ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_CLK_SYNC_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef union ++{ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence ++ resource that has been created */ ++ IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ ++ IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing ++ the fence on the GPU */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sFenceMerge; ++} RGX_HWPERF_HOST_MODIFY_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; ++ /*!< Describes the type of the resource ++ modified by the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ ++ RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; ++ /*!< Union of structures providing further ++ data regarding the resource modified. ++ Size of data varies with union member that ++ is present. ++ Check ``uiModifyType`` value to decode */ ++} RGX_HWPERF_HOST_MODIFY_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, /*!< Device responding to requests */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ ++ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST ++} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ ++ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST ++} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; ++ ++/*! RGX_HWPERF_DEV_INFO_EV values */ ++typedef enum ++{ ++ RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ ++ ++ RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ ++} RGX_HWPERF_DEV_INFO_EV; ++ ++/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing ++ * further data regarding the device's status ++ */ ++typedef union ++{ ++ /*! Data for device status event */ ++ struct ++ { ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; ++ /*!< Device's health status */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; ++ /*!< Reason for device's health status */ ++ } sDeviceStatus; ++} RGX_HWPERF_HOST_DEV_INFO_DETAIL; ++ ++/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ ++typedef struct ++{ ++ IMG_UINT32 ui32Padding; ++ /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_DEV_INFO_EV eEvType; ++ /*!< Type of the sub-event. See ++ RGX_HWPERF_DEV_INFO_EV */ ++ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; ++ /*!< Union of structures providing further data ++ regarding the device's status. Size of data ++ varies with union member that is present, ++ check ``eEvType`` value to decode */ ++} RGX_HWPERF_HOST_DEV_INFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ ++typedef enum ++{ ++ RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */ ++ RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ ++} RGX_HWPERF_INFO_EV; ++ ++/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the ++ * RGX_HWPERF_HOST_INFO_DATA event. ++ */ ++typedef union ++{ ++ /*! Host Memory usage statistics */ ++ struct ++ { ++ IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */ ++ /*! Detailed memory usage */ ++ struct ++ { ++ IMG_UINT32 ui32Pid; /*!< Process ID */ ++ IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */ ++ IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */ ++ } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; ++ } sMemUsageStats; ++} RGX_HWPERF_HOST_INFO_DETAIL; ++ ++/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device ++ * memory usage information. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ ++ RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; ++ /*!< Union of structures providing further data ++ regarding memory usage. Size varies with union ++ member that is present, check ``eEvType`` ++ value to decode */ ++} RGX_HWPERF_HOST_INFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! FENCE_WAIT_TYPE definitions */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ ++ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; ++ ++/*! FENCE_WAIT_RESULT definitions */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ ++ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; ++ ++/*! FENCE_WAIT_DETAIL Event Payload */ ++typedef union ++{ ++/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ ++ struct ++ { ++ IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ ++ } sBegin; ++ ++ /*! Data for SYNC_FENCE_WAIT_TYPE_END */ ++ struct ++ { ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ ++ } sEnd; ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; ++ ++/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure ++ * is received whenever the host driver handles a wait for sync event request. ++ */ ++typedef struct ++{ ++ IMG_PID uiPID; /*!< Identifier of the owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; ++ /*!< Type of the subevent, see ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; ++ /*!< Union of structures providing further data ++ regarding device's status. Size of data varies with ++ union member that is present, check ``eType`` value ++ to decode */ ++ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. ++ * Software Timeline Advanced Event Payload. This data structure is received ++ * whenever the host driver processes a Software Timeline Advanced event. ++ */ ++typedef struct ++{ ++ IMG_PID uiPID; /*!< Identifier of the owning process */ ++ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the ++ timeline has advanced */ ++ ++} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ ++ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; ++ ++typedef struct ++{ ++ IMG_PID uiClientPID; /*!< Client process identifier */ ++ IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ ++ IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */ ++} RGX_HWPERF_HOST_CLIENT_PROC_NAME; ++ ++#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ ++ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) ++ ++typedef union ++{ ++ struct ++ { ++ IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ ++ RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sProcName; ++} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; ++ ++typedef struct ++{ ++ IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; ++ /*!< Type of the subevent, see ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ ++ RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; ++ /*!< Union of structures. Size of data ++ varies with union member that is present, ++ check ``eType`` value to decode */ ++ ++} RGX_HWPERF_HOST_CLIENT_INFO_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, ++ ++ RGX_HWPERF_RESOURCE_TYPE_COUNT ++} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Height; ++ IMG_UINT32 ui32Width; ++ IMG_UINT32 ui32BPP; ++ IMG_UINT32 ui32PixFormat; ++} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; ++ ++typedef struct ++{ ++ IMG_INT32 i32XOffset; /*!< render surface X shift */ ++ IMG_INT32 i32YOffset; /*!< render surface Y shift */ ++ IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ ++ IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ ++} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; ++ ++typedef union ++{ ++ struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES ++ { ++ IMG_UINT32 ui32RenderSurfaceCount; ++ RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sRenderSurfaces; ++ ++ struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS ++ { ++ RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sTLTBuffers; ++} RGX_RESOURCE_CAPTURE_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; ++ IMG_PID uPID; ++ IMG_UINT32 ui32ContextID; ++ IMG_UINT32 ui32FrameNum; ++ IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ ++ IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ ++ RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ ++} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; ++ ++#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) ++ ++/*! Tile Lifetime Tracking header size. Only available if ++ * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via ++ * SUPPORT_TLT_PERF ++ */ ++#define RGX_TLT_HARDWARE_HDR_SIZE (16U) ++ ++/* PVRSRVGetHWPerfResourceCaptureResult */ ++typedef enum ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ ++} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; ++ ++typedef struct ++{ ++ IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ ++ IMG_UINT32 ui32CtxID; ++ RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, ++ unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ ++ IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ ++} RGX_RESOURCE_CAPTURE_RESULT; ++ ++/*! This type is a union of packet payload data structures associated with ++ * various FW and Host events */ ++typedef union ++{ ++ RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, ++ events ``0x01-0x06`` */ ++ RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, ++ events ``0x07-0x19``, ``0x28-0x29`` */ ++ RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet ++ data, events ``0x1A`` */ ++ RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state ++ change event packet data, ++ events ``0x1B`` */ ++ RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event ++ packet data, ++ events ``0x20-0x22`` */ ++ RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, ++ events ``0x23`` */ ++ RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, ++ events ``0x30-0x31`` */ ++ RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, ++ events ``0x32`` */ ++ RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ ++ RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event ++ packet data, ++ events ``0x39`` */ ++ /* */ ++ RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, ++ events ``0x01`` (Host) */ ++ RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, ++ events ``0x02`` (Host) */ ++ RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, ++ events ``0x03`` (Host) */ ++ RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, ++ events ``0x04`` (Host) */ ++ RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, ++ events ``0x05`` (Host) */ ++ RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, ++ events ``0x06`` (Host) */ ++ RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, ++ events ``0x07`` (Host) */ ++ RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, ++ events ``0x08`` (Host) */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, ++ events ``0x09`` (Host) */ ++ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance ++ data, events ``0x0A`` (Host) */ ++ RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, ++ events ``0x0B`` (Host) */ ++ ++} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); ++ ++#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) ++ ++#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ ++ ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) ++ ++/****************************************************************************** ++ * API Types ++ *****************************************************************************/ ++ ++/*! Counter block IDs for all the hardware blocks with counters. ++ * Directly addressable blocks must have a value between 0..15 [0..0xF]. ++ * Indirect groups have following encoding: ++ * First hex digit (LSB) represents a unit number within the group ++ * and the second hex digit represents the group number. ++ * Group 0 is the direct group, all others are indirect groups. ++ */ ++typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; ++ ++/*! Directly addressable counter blocks */ ++#if defined(DOXYGEN) ++/*! _RGX_HWPERF_CNTBLK_ID */ ++#endif ++#define RGX_CNTBLK_ID_TA 0x0000U ++#define RGX_CNTBLK_ID_RASTER 0x0001U /*!< Non-cluster grouping cores */ ++#define RGX_CNTBLK_ID_HUB 0x0002U /*!< Non-cluster grouping cores */ ++#define RGX_CNTBLK_ID_TORNADO 0x0003U /*!< XT cores */ ++#define RGX_CNTBLK_ID_JONES 0x0004U /*!< S7 cores */ ++#if defined(RGX_FEATURE_HWPERF_OCEANIC) ++#define RGX_CNTBLK_ID_DIRECT_LAST 0x0003U /*!< Indirect blocks start from here */ ++#else ++#define RGX_CNTBLK_ID_DIRECT_LAST 0x0005U /*!< Indirect blocks start from here */ ++#endif /* defined(RGX_FEATURE_HWPERF_OCEANIC) */ ++ ++#define RGX_CNTBLK_ID_BF_DEPRECATED 0x0005U /*!< Doppler unit (DEPRECATED) */ ++#define RGX_CNTBLK_ID_BT_DEPRECATED 0x0006U /*!< Doppler unit (DEPRECATED) */ ++#define RGX_CNTBLK_ID_RT_DEPRECATED 0x0007U /*!< Doppler unit (DEPRECATED) */ ++#define RGX_CNTBLK_ID_SH_DEPRECATED 0x0008U /*!< Ray tracing unit (DEPRECATED) */ ++ ++ ++/*! Indirectly addressable counter blocks. DA blocks indicate counter blocks ++ * where the counter registers are directly accessible ++ */ ++#define RGX_CNTBLK_ID_TPU_MCU0 0x0010U /*!< Addressable by Dust */ ++#define RGX_CNTBLK_ID_TPU_MCU0_DA 0x8010U ++#define RGX_CNTBLK_ID_TPU_MCU1 0x0011U ++#define RGX_CNTBLK_ID_TPU_MCU1_DA 0x8011U ++#define RGX_CNTBLK_ID_TPU_MCU2 0x0012U ++#define RGX_CNTBLK_ID_TPU_MCU2_DA 0x8012U ++#define RGX_CNTBLK_ID_TPU_MCU3 0x0013U ++#define RGX_CNTBLK_ID_TPU_MCU3_DA 0x8013U ++#define RGX_CNTBLK_ID_TPU_MCU4 0x0014U ++#define RGX_CNTBLK_ID_TPU_MCU4_DA 0x8014U ++#define RGX_CNTBLK_ID_TPU_MCU5 0x0015U ++#define RGX_CNTBLK_ID_TPU_MCU5_DA 0x8015U ++#define RGX_CNTBLK_ID_TPU_MCU6 0x0016U ++#define RGX_CNTBLK_ID_TPU_MCU6_DA 0x8016U ++#define RGX_CNTBLK_ID_TPU_MCU7 0x0017U ++#define RGX_CNTBLK_ID_TPU_MCU7_DA 0x8017U ++#define RGX_CNTBLK_ID_TPU_MCU_ALL 0x4010U ++#define RGX_CNTBLK_ID_TPU_MCU_ALL_DA 0xC010U ++ ++#define RGX_CNTBLK_ID_USC0 0x0020U /*!< Addressable by Cluster */ ++#define RGX_CNTBLK_ID_USC0_DA 0x8020U ++#define RGX_CNTBLK_ID_USC1 0x0021U ++#define RGX_CNTBLK_ID_USC1_DA 0x8021U ++#define RGX_CNTBLK_ID_USC2 0x0022U ++#define RGX_CNTBLK_ID_USC2_DA 0x8022U ++#define RGX_CNTBLK_ID_USC3 0x0023U ++#define RGX_CNTBLK_ID_USC3_DA 0x8023U ++#define RGX_CNTBLK_ID_USC4 0x0024U ++#define RGX_CNTBLK_ID_USC4_DA 0x8024U ++#define RGX_CNTBLK_ID_USC5 0x0025U ++#define RGX_CNTBLK_ID_USC5_DA 0x8025U ++#define RGX_CNTBLK_ID_USC6 0x0026U ++#define RGX_CNTBLK_ID_USC6_DA 0x8026U ++#define RGX_CNTBLK_ID_USC7 0x0027U ++#define RGX_CNTBLK_ID_USC7_DA 0x8027U ++#define RGX_CNTBLK_ID_USC8 0x0028U ++#define RGX_CNTBLK_ID_USC8_DA 0x8028U ++#define RGX_CNTBLK_ID_USC9 0x0029U ++#define RGX_CNTBLK_ID_USC9_DA 0x8029U ++#define RGX_CNTBLK_ID_USC10 0x002AU ++#define RGX_CNTBLK_ID_USC10_DA 0x802AU ++#define RGX_CNTBLK_ID_USC11 0x002BU ++#define RGX_CNTBLK_ID_USC11_DA 0x802BU ++#define RGX_CNTBLK_ID_USC12 0x002CU ++#define RGX_CNTBLK_ID_USC12_DA 0x802CU ++#define RGX_CNTBLK_ID_USC13 0x002DU ++#define RGX_CNTBLK_ID_USC13_DA 0x802DU ++#define RGX_CNTBLK_ID_USC14 0x002EU ++#define RGX_CNTBLK_ID_USC14_DA 0x802EU ++#define RGX_CNTBLK_ID_USC15 0x002FU ++#define RGX_CNTBLK_ID_USC15_DA 0x802FU ++#define RGX_CNTBLK_ID_USC_ALL 0x4020U ++#define RGX_CNTBLK_ID_USC_ALL_DA 0xC020U ++ ++#define RGX_CNTBLK_ID_TEXAS0 0x0030U /*!< Addressable by Phantom in XT, Dust in S7 */ ++#define RGX_CNTBLK_ID_TEXAS1 0x0031U ++#define RGX_CNTBLK_ID_TEXAS2 0x0032U ++#define RGX_CNTBLK_ID_TEXAS3 0x0033U ++#define RGX_CNTBLK_ID_TEXAS4 0x0034U ++#define RGX_CNTBLK_ID_TEXAS5 0x0035U ++#define RGX_CNTBLK_ID_TEXAS6 0x0036U ++#define RGX_CNTBLK_ID_TEXAS7 0x0037U ++#define RGX_CNTBLK_ID_TEXAS_ALL 0x4030U ++ ++#define RGX_CNTBLK_ID_RASTER0 0x0040U /*!< Addressable by Phantom, XT only */ ++#define RGX_CNTBLK_ID_RASTER1 0x0041U ++#define RGX_CNTBLK_ID_RASTER2 0x0042U ++#define RGX_CNTBLK_ID_RASTER3 0x0043U ++#define RGX_CNTBLK_ID_RASTER_ALL 0x4040U ++ ++#define RGX_CNTBLK_ID_BLACKPEARL0 0x0050U /*!< Addressable by Phantom, S7, only */ ++#define RGX_CNTBLK_ID_BLACKPEARL1 0x0051U ++#define RGX_CNTBLK_ID_BLACKPEARL2 0x0052U ++#define RGX_CNTBLK_ID_BLACKPEARL3 0x0053U ++#define RGX_CNTBLK_ID_BLACKPEARL_ALL 0x4050U ++ ++#define RGX_CNTBLK_ID_PBE0 0x0060U /*!< Addressable by Cluster in S7 and PBE2_IN_XE */ ++#define RGX_CNTBLK_ID_PBE1 0x0061U ++#define RGX_CNTBLK_ID_PBE2 0x0062U ++#define RGX_CNTBLK_ID_PBE3 0x0063U ++#define RGX_CNTBLK_ID_PBE4 0x0064U ++#define RGX_CNTBLK_ID_PBE5 0x0065U ++#define RGX_CNTBLK_ID_PBE6 0x0066U ++#define RGX_CNTBLK_ID_PBE7 0x0067U ++#define RGX_CNTBLK_ID_PBE8 0x0068U ++#define RGX_CNTBLK_ID_PBE9 0x0069U ++#define RGX_CNTBLK_ID_PBE10 0x006AU ++#define RGX_CNTBLK_ID_PBE11 0x006BU ++#define RGX_CNTBLK_ID_PBE12 0x006CU ++#define RGX_CNTBLK_ID_PBE13 0x006DU ++#define RGX_CNTBLK_ID_PBE14 0x006EU ++#define RGX_CNTBLK_ID_PBE15 0x006FU ++#define RGX_CNTBLK_ID_PBE_ALL 0x4060U ++ ++#define RGX_CNTBLK_ID_LAST 0x0070U /*!< End of PBE block */ ++ ++#define RGX_CNTBLK_ID_BX_TU0_DEPRECATED 0x0070U /*!< Doppler unit, DEPRECATED */ ++#define RGX_CNTBLK_ID_BX_TU1_DEPRECATED 0x0071U ++#define RGX_CNTBLK_ID_BX_TU2_DEPRECATED 0x0072U ++#define RGX_CNTBLK_ID_BX_TU3_DEPRECATED 0x0073U ++#define RGX_CNTBLK_ID_BX_TU_ALL_DEPRECATED 0x4070U ++ ++#define RGX_CNTBLK_ID_CUSTOM0 0x70F0U ++#define RGX_CNTBLK_ID_CUSTOM1 0x70F1U ++#define RGX_CNTBLK_ID_CUSTOM2 0x70F2U ++#define RGX_CNTBLK_ID_CUSTOM3 0x70F3U ++#define RGX_CNTBLK_ID_CUSTOM4_FW 0x70F4U /*!< Custom block used for getting statistics held in the FW */ ++#define RGX_CNTBLK_ID_CUSTOM_MASK 0x70FFU ++ ++ ++/* Masks for the counter block ID*/ ++#define RGX_CNTBLK_ID_UNIT_MASK (0x000FU) ++#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) ++#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) ++#define RGX_CNTBLK_ID_MC_GPU_MASK (0x0F00U) ++#define RGX_CNTBLK_ID_MC_GPU_SHIFT (8U) ++#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) ++#define RGX_CNTBLK_ID_DA_MASK (0x8000U) /*!< Block with directly accessible counter registers */ ++ ++#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## _n) - (IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## 0) + 1u) ++ ++/*! The number of layout blocks defined with configurable multiplexed ++ * performance counters, hence excludes custom counter blocks. ++ */ ++#if defined(RGX_FEATURE_HWPERF_OCEANIC) ++#define RGX_HWPERF_MAX_MUX_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ ++ RGX_CNTBLK_INDIRECT_COUNT(PBE, 0) ) ++ ++#define RGX_HWPERF_MAX_DA_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 0)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 0) ) ++ ++#define RGX_HWPERF_MAX_DEFINED_BLKS (\ ++ (IMG_UINT32)RGX_HWPERF_MAX_MUX_BLKS +\ ++ RGX_HWPERF_MAX_DA_BLKS ) ++#else ++#define RGX_HWPERF_MAX_DEFINED_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) ) ++#define RGX_HWPERF_MAX_MUX_BLKS (\ ++ RGX_HWPERF_MAX_DEFINED_BLKS ) ++#endif ++ ++static_assert( ++ ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), ++ "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); ++ ++#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (IMG_UINT32)(e)) ++ ++#define RGX_CUSTOM_FW_CNTRS \ ++ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ ++ \ ++ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ ++ \ ++ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ ++ \ ++ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ ++ \ ++ X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) ++ ++/*! Counter IDs for the firmware held statistics */ ++typedef enum ++{ ++#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, ++ RGX_CUSTOM_FW_CNTRS ++#undef X ++ ++ /* always the last entry in the list */ ++ RGX_CUSTOM_FW_CNTR_LAST ++} RGX_HWPERF_CUSTOM_FW_CNTR_ID; ++ ++/*! Identifier for each counter in a performance counting module */ ++typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; ++ ++#define RGX_CNTBLK_COUNTER0_ID 0U ++#define RGX_CNTBLK_COUNTER1_ID 1U ++#define RGX_CNTBLK_COUNTER2_ID 2U ++#define RGX_CNTBLK_COUNTER3_ID 3U ++#define RGX_CNTBLK_COUNTER4_ID 4U ++#define RGX_CNTBLK_COUNTER5_ID 5U ++ /* MAX value used in server handling of counter config arrays */ ++#define RGX_CNTBLK_MUX_COUNTERS_MAX 6U ++ ++ ++/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ ++#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)(b1)) ++#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) ++#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) ++ ++/*! Mask macros for use with RGXCtrlHWPerf() API. ++ */ ++#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_HWPERF_EVENT_MASK_DEFAULT RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) ++#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++ ++/*! HWPerf Firmware event masks ++ * @par ++ * All FW Start/End/Debug (SED) events. */ ++#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) ++ ++#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) ++#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) ++/*! All FW events. */ ++#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ ++ RGX_HWPERF_EVENT_MASK_FW_UFO |\ ++ RGX_HWPERF_EVENT_MASK_FW_CSW) ++ ++/*! HW Periodic events (1ms interval). */ ++#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) ++/*! All HW Kick/Finish events. */ ++#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ ++ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ ++ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ ++ RGX_HWPERF_EVENT_MASK_HW_PERIODIC) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) ++ ++/*! HWPerf Host event masks ++ */ ++#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) ++#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) ++#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) ++ ++ ++/*! Type used in the RGX API RGXConfigMuxHWPerfCounters() */ ++typedef struct ++{ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! 4 or 6 LSBs used to select counters to configure in this block. */ ++ IMG_UINT8 ui8CounterSelect; ++ ++ /*! 4 or 6 LSBs used as MODE bits for the counters in the group. */ ++ IMG_UINT8 ui8Mode; ++ ++ /*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */ ++ IMG_UINT8 aui8GroupSelect[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++ ++ /*! 16 LSBs used as the BIT_SELECT value for the counter. */ ++ IMG_UINT16 aui16BitSelect[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++ ++ /*! 14 LSBs used as the BATCH_MAX value for the counter. */ ++ IMG_UINT32 aui32BatchMax[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++ ++ /*! 14 LSBs used as the BATCH_MIN value for the counter. */ ++ IMG_UINT32 aui32BatchMin[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++} UNCACHED_ALIGN RGX_HWPERF_CONFIG_MUX_CNTBLK; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_MUX_CNTBLK); ++ ++/*! Type used in the RGX API RGXConfigHWPerfCounters() */ ++typedef struct ++{ ++ /*! Reserved for future use */ ++ IMG_UINT32 ui32Reserved; ++ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! Number of configured counters within this block */ ++ IMG_UINT16 ui16NumCounters; ++ ++ /*! Counter register values */ ++ IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX]; ++} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_HWPERF_H_ */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgx_options.h b/drivers/gpu/drm/img-rogue/include/rogue/rgx_options.h +new file mode 100644 +index 000000000000..91fc6522d7ee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgx_options.h +@@ -0,0 +1,304 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX build options ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* Each build option listed here is packed into a dword which provides up to ++ * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and ++ * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. ++ * The corresponding bit is set if the build option was enabled at compile ++ * time. ++ * ++ * In order to extract the enabled build flags the INTERNAL_TEST switch should ++ * be enabled in a client program which includes this header. Then the client ++ * can test specific build flags by reading the bit value at ++ * ##OPTIONNAME##_SET_OFFSET ++ * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. ++ * ++ * IMPORTANT: add new options to unused bits or define a new dword ++ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield ++ * remains backwards compatible. ++ */ ++ ++#ifndef RGX_OPTIONS_H ++#define RGX_OPTIONS_H ++ ++#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL ++ ++#define NO_HARDWARE_OPTION "NO_HARDWARE " ++#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) ++ #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0 ++ #define OPTIONS_BIT0 (0x1UL << 0) ++ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT0 0x0UL ++#endif /* NO_HARDWARE */ ++ ++#define PDUMP_OPTION "PDUMP " ++#if defined(PDUMP) || defined(INTERNAL_TEST) ++ #define PDUMP_SET_OFFSET OPTIONS_BIT1 ++ #define OPTIONS_BIT1 (0x1UL << 1) ++ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT1 0x0UL ++#endif /* PDUMP */ ++ ++/* No longer used */ ++#define INTERNAL_TEST_OPTION "INTERNAL_TEST " ++#if defined(INTERNAL_TEST) ++ #define UNUSED_SET_OFFSET OPTIONS_BIT2 ++ #define OPTIONS_BIT2 (0x1UL << 2) ++ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT2 0x0UL ++#endif ++ ++/* No longer used */ ++#define UNUSED_OPTION " " ++#if defined(INTERNAL_TEST) ++ #define OPTIONS_BIT3 (0x1UL << 3) ++ #define INTERNAL_TEST_OPTION "INTERNAL_TEST " ++ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT3 0x0UL ++#endif ++ ++#define SUPPORT_RGX_OPTION " " ++#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) ++ #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4 ++ #define OPTIONS_BIT4 (0x1UL << 4) ++ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT4 0x0UL ++#endif /* SUPPORT_RGX */ ++ ++#define SUPPORT_SECURE_EXPORT_OPTION "SECURE_EXPORTS " ++#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) ++ #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5 ++ #define OPTIONS_BIT5 (0x1UL << 5) ++ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT5 0x0UL ++#endif /* SUPPORT_SECURE_EXPORT */ ++ ++#define SUPPORT_INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " ++#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) ++ #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6 ++ #define OPTIONS_BIT6 (0x1UL << 6) ++ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT6 0x0UL ++#endif /* SUPPORT_INSECURE_EXPORT */ ++ ++#define SUPPORT_VFP_OPTION "VFP " ++#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) ++ #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7 ++ #define OPTIONS_BIT7 (0x1UL << 7) ++ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT7 0x0UL ++#endif /* SUPPORT_VFP */ ++ ++#define SUPPORT_WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) ++ #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8 ++ #define OPTIONS_BIT8 (0x1UL << 8) ++ #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT8 0x0UL ++#endif /* SUPPORT_WORKLOAD_ESTIMATION */ ++#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1UL << 8) ++ ++#define SUPPORT_PDVFS_OPTION "PDVFS " ++#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) ++ #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9 ++ #define OPTIONS_BIT9 (0x1UL << 9) ++ #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT9 0x0UL ++#endif /* SUPPORT_PDVFS */ ++#define OPTIONS_PDVFS_MASK (0x1UL << 9) ++ ++#define DEBUG_OPTION "DEBUG " ++#if defined(DEBUG) || defined(INTERNAL_TEST) ++ #define DEBUG_SET_OFFSET OPTIONS_BIT10 ++ #define OPTIONS_BIT10 (0x1UL << 10) ++ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT10 0x0UL ++#endif /* DEBUG */ ++/* The bit position of this should be the same as DEBUG_SET_OFFSET option ++ * when defined. ++ */ ++#define OPTIONS_DEBUG_MASK (0x1UL << 10) ++ ++#define SUPPORT_BUFFER_SYNC_OPTION "BUFFER_SYNC " ++#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) ++ #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11 ++ #define OPTIONS_BIT11 (0x1UL << 11) ++ #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT11 0x0UL ++#endif /* SUPPORT_BUFFER_SYNC */ ++ ++#define SUPPORT_AUTOVZ_OPTION "AUTOVZ " ++#if defined(SUPPORT_AUTOVZ) ++ #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT12 ++ #define OPTIONS_BIT12 (0x1UL << 12) ++ #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT12 0x0UL ++#endif /* SUPPORT_AUTOVZ */ ++ ++#define SUPPORT_AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS " ++#if defined(SUPPORT_AUTOVZ_HW_REGS) ++ #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT13 ++ #define OPTIONS_BIT13 (0x1UL << 13) ++ #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT13 0x0UL ++#endif /* SUPPORT_AUTOVZ_HW_REGS */ ++ ++#define RGX_FW_IRQ_OS_COUNTERS_OPTION "FW_IRQ_OS_COUNTERS " ++#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST) ++ #define SUPPORT_FW_IRQ_REG_COUNTERS OPTIONS_BIT14 ++ #define OPTIONS_BIT14 (0x1UL << 14) ++ #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT14 0x0UL ++#endif /* RGX_FW_IRQ_OS_COUNTERS */ ++ ++#define VALIDATION_EN_MASK (0x1UL << 15) ++#define SUPPORT_VALIDATION_OPTION "VALIDATION " ++#if defined(SUPPORT_VALIDATION) ++ #define SUPPORT_VALIDATION_OFFSET OPTIONS_BIT15 ++ #define OPTIONS_BIT15 (0x1UL << 15) ++ #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT15 0x0UL ++#endif /* SUPPORT_VALIDATION */ ++ ++#define RGX_BUILD_OPTIONS_KM \ ++ (OPTIONS_BIT0 |\ ++ OPTIONS_BIT1 |\ ++ OPTIONS_BIT2 |\ ++ OPTIONS_BIT3 |\ ++ OPTIONS_BIT4 |\ ++ OPTIONS_BIT6 |\ ++ OPTIONS_BIT7 |\ ++ OPTIONS_BIT8 |\ ++ OPTIONS_BIT9 |\ ++ OPTIONS_BIT10 |\ ++ OPTIONS_BIT11 |\ ++ OPTIONS_BIT12 |\ ++ OPTIONS_BIT13 |\ ++ OPTIONS_BIT14 |\ ++ OPTIONS_BIT15) ++ ++#define RGX_BUILD_OPTIONS_LIST \ ++ { \ ++ NO_HARDWARE_OPTION, \ ++ PDUMP_OPTION, \ ++ INTERNAL_TEST_OPTION, \ ++ UNUSED_OPTION, \ ++ SUPPORT_RGX_OPTION, \ ++ SUPPORT_SECURE_EXPORT_OPTION, \ ++ SUPPORT_INSECURE_EXPORT_OPTION, \ ++ SUPPORT_VFP_OPTION, \ ++ SUPPORT_WORKLOAD_ESTIMATION_OPTION, \ ++ SUPPORT_PDVFS_OPTION, \ ++ DEBUG_OPTION, \ ++ SUPPORT_BUFFER_SYNC_OPTION, \ ++ SUPPORT_AUTOVZ_OPTION, \ ++ SUPPORT_AUTOVZ_HW_REGS_OPTION, \ ++ RGX_FW_IRQ_OS_COUNTERS_OPTION, \ ++ SUPPORT_VALIDATION_OPTION \ ++ } ++ ++#define RGX_BUILD_OPTIONS_MASK_FW \ ++ (RGX_BUILD_OPTIONS_MASK_KM & \ ++ ~OPTIONS_BIT11) ++ ++#define OPTIONS_BIT31 (0x1UL << 31) ++#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM ++#error "Bit exceeds reserved range" ++#endif ++#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31 ++ ++#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) ++ ++#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \ ++ ~(OPTIONS_DEBUG_MASK | \ ++ OPTIONS_WORKLOAD_ESTIMATION_MASK | \ ++ OPTIONS_PDVFS_MASK)) ++ ++#endif /* RGX_OPTIONS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig.h b/drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig.h +new file mode 100644 +index 000000000000..abb63084acef +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig.h +@@ -0,0 +1,290 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Device virtual memory map ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory heaps device specific configuration ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHEAPCONFIG_H ++#define RGXHEAPCONFIG_H ++ ++#include "rgxdefs_km.h" ++ ++ ++#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) ++#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) ++#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) ++ ++#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) ++#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) ++#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) ++#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) ++#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000) ++ ++#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000) ++#define RGX_HEAP_SIZE_2GiB IMG_UINT64_C(0x0080000000) ++#define RGX_HEAP_SIZE_4GiB IMG_UINT64_C(0x0100000000) ++#define RGX_HEAP_SIZE_16GiB IMG_UINT64_C(0x0400000000) ++#define RGX_HEAP_SIZE_32GiB IMG_UINT64_C(0x0800000000) ++#define RGX_HEAP_SIZE_64GiB IMG_UINT64_C(0x1000000000) ++#define RGX_HEAP_SIZE_128GiB IMG_UINT64_C(0x2000000000) ++#define RGX_HEAP_SIZE_256GiB IMG_UINT64_C(0x4000000000) ++#define RGX_HEAP_SIZE_512GiB IMG_UINT64_C(0x8000000000) ++ ++/* ++ RGX Device Virtual Address Space Definitions ++ ++ This file defines the RGX virtual address heaps that are used in ++ application memory contexts. It also shows where the Firmware memory heap ++ fits into this, but the firmware heap is only ever created in the ++ Services KM/server component. ++ ++ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, ++ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* ++ respectively. Therefore if clients use multiple configs they must still ++ be consistent with their definitions for these heaps. ++ ++ Shared virtual memory (GENERAL_SVM) support requires half of the address ++ space (512 GiB) be reserved for SVM allocations to mirror application CPU ++ addresses. However, if BRN_65273 WA is active in which case the SVM heap ++ is disabled. This is reflected in the device connection capability bits ++ returned to user space. ++ ++ The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the ++ general (4KiB) heap and the general non-4K heap. The first 128 GiB is used ++ for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the ++ GENERAL_NON4K_HEAP. This heap has a default page-size of 16K. ++ AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it ++ to these values: 4K,64K,256K,1M,2M. ++ ++ The heaps defined for BRN_65273 _replace_ the non-BRN equivalents below ++ when this BRN WA is active on affected cores. This is different to most ++ other BRNs and hence has been given its own header file for clarity, ++ see below. This is a special case, other BRNs that need 1 or 2 additional ++ heaps should be added to this file, like BRN_63142 below. ++ NOTE: All regular heaps below greater than 1GB require a BRN_65273 WA heap. ++ ++ Base addresses have to be a multiple of 4MiB ++ Heaps must not start at 0x0000000000, as this is reserved for internal ++ use within device memory layer. ++ Range comments, those starting in column 0 below are a section heading of ++ sorts and are above the heaps in that range. Often this is the reserved ++ size of the heap within the range. ++*/ ++ ++/* This BRN requires a different virtual memory map from the standard one ++ * defined in this file below. Hence the alternative heap definitions for this ++ * BRN are provided in a separate file for clarity. */ ++#include "rgxheapconfig_65273.h" ++ ++ ++/* 0x00_0000_0000 ************************************************************/ ++ ++/* 0x00_0000_0000 - 0x00_0040_0000 **/ ++ /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/ ++ ++ /* BRN_65273 TQ3DPARAMETERS base 0x0000010000 */ ++ /* BRN_65273 GENERAL base 0x65C0000000 */ ++ /* BRN_65273 GENERAL_NON4K base 0x73C0000000 */ ++ ++/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ ++ /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/ ++ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) ++ #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB) ++ ++ ++/* 0x80_0000_0000 ************************************************************/ ++ ++/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/ ++ /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/ ++ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) ++ #define RGX_GENERAL_HEAP_SIZE RGX_HEAP_SIZE_128GiB ++ ++ /* BRN_65273 PDSCODEDATA base 0xA800000000 */ ++ ++/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/ ++ /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/ ++ ++/* B0_0000_0000 - 0xB7_FFFF_FFFF **/ ++ /* 704 GiB to 736 GiB, size of 32 GiB : FREE **/ ++ ++ /* BRN_65273 USCCODE base 0xBA00000000 */ ++ ++/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/ ++ /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/ ++ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xB800000000) ++ #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_HEAP_SIZE_32GiB ++ ++ ++/* 0xC0_0000_0000 ************************************************************/ ++ ++/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/ ++ /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/ ++ ++/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/ ++ /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/ ++ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000) ++ #define RGX_PDSCODEDATA_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF **/ ++ /* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/ ++ /* HWBRN63142 workaround requires Region Header memory to be at the top ++ of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the ++ address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB. */ ++ #define RGX_RGNHDR_BRN_63142_HEAP_BASE IMG_UINT64_C(0xDBF0000000) ++ #define RGX_RGNHDR_BRN_63142_HEAP_SIZE RGX_HEAP_SIZE_256MiB ++ ++/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF **/ ++ /* 880 GiB to 896 GiB, size of 16 GiB : FREE **/ ++ ++/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF **/ ++ /* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP **/ ++ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000) ++ #define RGX_USCCODE_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/ ++ /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xE1_C000_000 - 0xE1_FFFF_FFFF **/ ++ /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/ ++ ++ /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in ++ application memory contexts, see: ++ RGX_FIRMWARE_RAW_HEAP_BASE ++ RGX_FIRMWARE_RAW_HEAP_SIZE ++ See header for other sub-heaps details ++ */ ++ ++/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF **/ ++ /* 904 GiB to 912 GiB, size of 8 GiB : FREE **/ ++ ++ /* BRN_65273 VISIBILITY_TEST base 0xE400000000 */ ++ ++/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/ ++ /* 912 GiB to 928 GiB, size 16 GiB : TQ3DPARAMETERS_HEAP **/ ++ /* Aligned to match RGX_CR_ISP_PIXEL_BASE at 16 GiB */ ++ #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000) ++ #define RGX_TQ3DPARAMETERS_HEAP_SIZE RGX_HEAP_SIZE_16GiB ++ ++/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/ ++ /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/ ++ /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/ ++ #define RGX_VK_CAPT_REPLAY_HEAP_BASE IMG_UINT64_C(0xE900000000) ++ #define RGX_VK_CAPT_REPLAY_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ ++ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ ++ /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ ++ /* CDM Signals heap (31 signals less one reserved for Services). ++ * Size 960B rounded up to minimum heap size */ ++ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) ++ #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE ++ ++/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ ++ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/ ++ /* 940 GiB to 944 GiB, size of 4 GiB : RESERVED VOLCANIC **/ ++ ++/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/ ++ /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/ ++ #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000) ++ #define RGX_FBCDC_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/ ++ /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/ ++ #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000) ++ #define RGX_FBCDC_LARGE_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xEC_8000_0000 - 0xED_FFFF_FFFF **/ ++ /* 946 GiB to 952 GiB, size of 6 GiB : RESERVED VOLCANIC **/ ++ ++/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/ ++ /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/ ++ #define RGX_CMP_MISSION_RMW_HEAP_BASE IMG_UINT64_C(0xEE00000000) ++ #define RGX_CMP_MISSION_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/ ++ /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/ ++ /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/ ++ #define RGX_CMP_SAFETY_RMW_HEAP_BASE IMG_UINT64_C(0xEF00000000) ++ #define RGX_CMP_SAFETY_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/ ++ /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/ ++ /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */ ++ #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000) ++ #define RGX_TEXTURE_STATE_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/ ++ /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/ ++ /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/ ++ #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF200000000) ++ #define RGX_VISIBILITY_TEST_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/ ++ /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/ ++ ++ /* BRN_65273 MMU_INIA base 0xF800000000 */ ++ /* BRN_65273 MMU_INIB base 0xF900000000 */ ++ ++/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF **/ ++ /* 972 GiB to 1024 GiB, size of 52 GiB : FREE **/ ++ ++ ++ ++/* 0xFF_FFFF_FFFF ************************************************************/ ++ ++/* End of RGX Device Virtual Address Space definitions */ ++ ++#endif /* RGXHEAPCONFIG_H */ ++ ++/****************************************************************************** ++ End of file (rgxheapconfig.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig_65273.h b/drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig_65273.h +new file mode 100644 +index 000000000000..31f90fee9d42 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/rgxheapconfig_65273.h +@@ -0,0 +1,124 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Device virtual memory map for BRN_65273. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory heaps device specific configuration ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHEAPCONFIG_65273_H ++#define RGXHEAPCONFIG_65273_H ++ ++/* ++ RGX Device Virtual Address Space Definitions ++ ++ This file defines the RGX virtual address replacement heaps that are used in ++ in application memory contexts for BRN_65273. ++ ++ The heaps defined for BRN_65273 _replace_ the non-BRN equivalents when this ++ BRN WA is active on affected cores. This is different to most other BRNs ++ and hence has been given its own header file for clarity. The SVM_HEAP is ++ also disabled and unavailable when the WA is active. This is reflected ++ in the device connection capability bits returned to user space. ++ NOTE: All regular heaps in rgxheapconfig.h greater than 1GB require ++ a BRN_65273 WA heap. ++ ++ Base addresses must have to be a multiple of 4MiB ++ Heaps must not start at 0x0000000000, as this is reserved for internal ++ use within device memory layer. ++ Range comments, those starting in column 0 below are a section heading of ++ sorts and are above the heaps in that range. ++*/ ++ ++ ++/* 0x00_0000_0000 ************************************************************/ ++ ++/* 0x00_0001_0000 - 0x00_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires TQ memory to start at 64 KiB and use a ++ * unique single 0.99GiB PCE entry. */ ++ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE IMG_UINT64_C(0x0000010000) ++ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE (RGX_HEAP_SIZE_1GiB - RGX_HEAP_SIZE_64KiB) ++ ++/* 0x65_C000_0000 - 0x66_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires General Heap to use a unique PCE entry for each GiB in range */ ++ #define RGX_GENERAL_BRN_65273_HEAP_BASE IMG_UINT64_C(0x65C0000000) ++ #define RGX_GENERAL_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2GiB ++ ++/* 0x73_C000_0000 - 0x74_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires Non4K memory to use a unique PCE entry for each GiB in range */ ++ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE IMG_UINT64_C(0x73C0000000) ++ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2GiB ++ ++ ++/* 0x80_0000_0000 ************************************************************/ ++ ++/* 0xA8_0000_0000 - 0xA8_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires PDS memory to use a unique single 1GiB PCE entry. */ ++ #define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xA800000000) ++ #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xBA_0000_0000 - 0xBA_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */ ++ #define RGX_USCCODE_BRN_65273_HEAP_BASE IMG_UINT64_C(0xBA00000000) ++ #define RGX_USCCODE_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++ ++/* 0xC0_0000_0000 ************************************************************/ ++ ++/* 0xE4_0000_0000 - 0xE4_001F_FFFF **/ ++ /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */ ++ #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE IMG_UINT64_C(0xE400000000) ++ #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/ ++ /* HWBRN65273 workaround requires two Region Header buffers 4GiB apart. */ ++ #define RGX_MMU_INIA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF800000000) ++ #define RGX_MMU_INIA_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ #define RGX_MMU_INIB_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF900000000) ++ #define RGX_MMU_INIB_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++ ++/* 0xFF_FFFF_FFFF ************************************************************/ ++ ++/* End of RGX Device Virtual Address Space definitions */ ++ ++#endif /* RGXHEAPCONFIG_65273_H */ ++ ++/****************************************************************************** ++ End of file (rgxheapconfig_65273.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h b/drivers/gpu/drm/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h +new file mode 100644 +index 000000000000..431273de0827 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/rogue/system/rgx_tc/tc_clocks.h +@@ -0,0 +1,158 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(TC_CLOCKS_H) ++#define TC_CLOCKS_H ++ ++/* ++ * The core clock speed is passed through a multiplier depending on the TC ++ * version. ++ * ++ * On TC_ES1: Multiplier = x3, final speed = 270MHz ++ * On TC_ES2: Multiplier = x6, final speed = 540MHz ++ * On TCF5: Multiplier = 1x final speed = 45MHz ++ * ++ * ++ * The base (unmultiplied speed) can be adjusted using a module parameter ++ * called "sys_core_clk_speed", a number in Hz. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start ++ * ++ * would result in a core speed of 60MHz xMultiplier. ++ * ++ * ++ * The memory clock is unmultiplied and can be adjusted using a module ++ * parameter called "sys_mem_clk_speed", this should be the number in Hz for ++ * the memory clock speed. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start ++ * ++ * would attempt to start the driver with the memory clock speed set to 100MHz. ++ * ++ * ++ * Same applies to the system interface clock speed, "sys_sysif_clk_speed". ++ * Needed for TCF5 but not for TC_ES2/ES1. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start ++ * ++ * would attempt to start the driver with the system clock speed set to 45MHz. ++ * ++ * ++ * All parameters can be specified at once, e.g., ++ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start ++ */ ++ ++#define RGX_TC_SYS_CLOCK_SPEED (25000000) /*< At the moment just used for TCF5 */ ++#define RGX_TC_CLOCK_MULTIPLEX (1) ++ ++#if defined(TC_APOLLO_TCF5_22_46_54_330) ++ #undef RGX_TC_SYS_CLOCK_SPEED ++ #define RGX_TC_CORE_CLOCK_SPEED (100000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (45000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_TCF5_22_49_21_16) || \ ++ defined(TC_APOLLO_TCF5_22_60_22_29) || \ ++ defined(TC_APOLLO_TCF5_22_75_22_25) ++ #define RGX_TC_CORE_CLOCK_SPEED (20000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (50000000) ++#elif defined(TC_APOLLO_TCF5_22_67_54_30) ++ #define RGX_TC_CORE_CLOCK_SPEED (100000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_TCF5_22_89_204_18) ++ #define RGX_TC_CORE_CLOCK_SPEED (50000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_22_86_104_218) ++ #define RGX_TC_CORE_CLOCK_SPEED (30000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_22_88_104_318) ++ #define RGX_TC_CORE_CLOCK_SPEED (28000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_22_98_54_230) ++ #define RGX_TC_CORE_CLOCK_SPEED (100000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_22_102_54_38) ++ #define RGX_TC_CORE_CLOCK_SPEED (80000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_BVNC_NOT_SUPPORTED) ++ /* TC TCF5 (22.*) fallback frequencies */ ++ #undef RGX_TC_SYS_CLOCK_SPEED ++ #define RGX_TC_CORE_CLOCK_SPEED (20000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (50000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_33_8_22_1) ++ #define RGX_TC_CORE_CLOCK_SPEED (25000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_TCF5_REFERENCE) ++ /* TC TCF5 (Reference bitfile) */ ++ #undef RGX_TC_SYS_CLOCK_SPEED ++ #define RGX_TC_CORE_CLOCK_SPEED (50000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (50000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (45000000) ++#elif defined(TC_APOLLO_BONNIE) ++ /* TC Bonnie */ ++ #define RGX_TC_CORE_CLOCK_SPEED (18000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (65000000) ++#elif defined(TC_APOLLO_ES2) ++ /* TC ES2 */ ++ #define RGX_TC_CORE_CLOCK_SPEED (90000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (104000000) ++#elif defined(TC_ORION) ++ #define RGX_TC_CORE_CLOCK_SPEED (40000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (100000000) ++ #define RGX_TC_SYS_CLOCK_SPEED (25000000) ++#elif defined(TC_APOLLO_TCF5_29_19_52_202) ++ #define RGX_TC_CORE_CLOCK_SPEED (25000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++#elif defined(TC_APOLLO_TCF5_29_18_204_508) ++ #define RGX_TC_CORE_CLOCK_SPEED (15000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (35000000) ++#else ++ /* TC ES1 */ ++ #define RGX_TC_CORE_CLOCK_SPEED (90000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (65000000) ++#endif ++ ++#endif /* if !defined(TC_CLOCKS_H) */ +diff --git a/drivers/gpu/drm/img-rogue/include/services_km.h b/drivers/gpu/drm/img-rogue/include/services_km.h +new file mode 100644 +index 000000000000..91ee3b2f0976 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/services_km.h +@@ -0,0 +1,180 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services API Kernel mode Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exported services API details ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SERVICES_KM_H ++#define SERVICES_KM_H ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++/*! 4k page size definition */ ++#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */ ++#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 16k page size definition */ ++#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */ ++#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 64k page size definition */ ++#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */ ++#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 256k page size definition */ ++#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */ ++#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 1MB page size definition */ ++#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */ ++#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 2MB page size definition */ ++#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */ ++#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++ ++/*! ++ * @AddToGroup SRVConnectInterfaces ++ * @{ ++ */ ++ ++#ifndef PVRSRV_DEV_CONNECTION_TYPEDEF ++#define PVRSRV_DEV_CONNECTION_TYPEDEF ++/*! ++ * Forward declaration (look on connection.h) ++ */ ++typedef struct PVRSRV_DEV_CONNECTION_TAG PVRSRV_DEV_CONNECTION; ++#endif ++ ++/*! ++ * @Anchor SRV_FLAGS ++ * @Name SRV_FLAGS: Services connection flags ++ * Allows to define per-client policy for Services. ++ * @{ ++ */ ++ ++/* ++ * Use of the 32-bit connection flags mask ++ * ( X = taken/in use, - = available/unused ) ++ * ++ * 31 27 20 6 4 0 ++ * | | | | | | ++ * X---XXXXXXXX-------------XXX---- ++ */ ++ ++#define SRV_NO_HWPERF_CLIENT_STREAM (1UL << 4) /*!< Don't create HWPerf for this connection */ ++#define SRV_FLAGS_CLIENT_64BIT_COMPAT (1UL << 5) /*!< This flags gets set if the client is 64 Bit compatible. */ ++#define SRV_FLAGS_CLIENT_SLR_DISABLED (1UL << 6) /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */ ++#define SRV_FLAGS_PDUMPCTRL (1UL << 31) /*!< PDump Ctrl client flag */ ++ ++/*! @} SRV_FLAGS */ ++ ++/*! @} End of SRVConnectInterfaces */ ++ ++/* ++ * Bits 20 - 27 are used to pass information needed for validation ++ * of the GPU Virtualisation Validation mechanism. In particular: ++ * ++ * Bits: ++ * [20 - 22]: OSid of the memory region that will be used for allocations ++ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses ++ * regarding that memory context. ++ * [26]: If the AXI Protection register will be set to secure for that OSid ++ * [27]: If the Emulator Wrapper Register checking for protection violation ++ * will be set to secure for that OSid ++ */ ++ ++#define VIRTVAL_FLAG_OSID_SHIFT (20) ++#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT) ++ ++#define VIRTVAL_FLAG_OSIDREG_SHIFT (23) ++#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT) ++ ++#define VIRTVAL_FLAG_AXIPREG_SHIFT (26) ++#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT) ++ ++#define VIRTVAL_FLAG_AXIPTD_SHIFT (27) ++#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT) ++ ++ ++/* Size of pointer on a 64 bit machine */ ++#define POINTER_SIZE_64BIT (8U) ++ ++ ++/* ++ Pdump flags which are accessible to Services clients ++*/ ++#define PDUMP_NONE 0x00000000U /*puiAddrUFO.ui32Addr)) ++ ++/* Maximum number of sync checkpoints the firmware supports in one fence */ ++#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U ++ ++/*! ++ * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which ++ * represents a foreign sync point or collection of foreign sync points. ++ */ ++#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U) ++ ++#endif /* SYNC_CHECKPOINT_EXTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/sync_prim_internal.h b/drivers/gpu/drm/img-rogue/include/sync_prim_internal.h +new file mode 100644 +index 000000000000..77164c2356cd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/sync_prim_internal.h +@@ -0,0 +1,84 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services internal synchronisation typedef header ++@Description Defines synchronisation types that are used internally ++ only ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_INTERNAL_H ++#define SYNC_INTERNAL_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include ++ ++/* These are included here as the typedefs are required ++ * internally. ++ */ ++ ++typedef struct SYNC_PRIM_CONTEXT_TAG *PSYNC_PRIM_CONTEXT; ++typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG ++{ ++ volatile uint32_t __iomem *pui32LinAddr; /*!< User pointer to the primitive */ ++} PVRSRV_CLIENT_SYNC_PRIM; ++ ++/*! ++ * Bundled information for a sync prim operation ++ * ++ * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP ++ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP ++ */ ++typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP_TAG ++{ ++ #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1U << 0) ++ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1U << 1) ++ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2)) ++ uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */ ++ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */ ++ uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */ ++ uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */ ++} PVRSRV_CLIENT_SYNC_PRIM_OP; ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* SYNC_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/apollo_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/apollo_regs.h +new file mode 100644 +index 000000000000..4081e2123ac7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/apollo_regs.h +@@ -0,0 +1,108 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(APOLLO_REGS_H) ++#define APOLLO_REGS_H ++ ++#include "tc_clocks.h" ++ ++/* TC TCF5 */ ++#define TC5_SYS_APOLLO_REG_PCI_BASENUM (1) ++#define TC5_SYS_APOLLO_REG_PDP2_OFFSET (0x800000) ++#define TC5_SYS_APOLLO_REG_PDP2_SIZE (0x7C4) ++ ++#define TC5_SYS_APOLLO_REG_PDP2_FBDC_OFFSET (0xA00000) ++#define TC5_SYS_APOLLO_REG_PDP2_FBDC_SIZE (0x14) ++ ++#define TC5_SYS_APOLLO_REG_HDMI_OFFSET (0xC00000) ++#define TC5_SYS_APOLLO_REG_HDMI_SIZE (0x1C) ++ ++/* TC ES2 */ ++#define TCF_TEMP_SENSOR_SPI_OFFSET 0xe ++#define TCF_TEMP_SENSOR_TO_C(raw) (((raw) * 248 / 4096) - 54) ++ ++/* Number of bytes that are broken */ ++#define SYS_DEV_MEM_BROKEN_BYTES (1024 * 1024) ++#define SYS_DEV_MEM_REGION_SIZE (0x40000000 - SYS_DEV_MEM_BROKEN_BYTES) ++ ++/* Apollo reg on base register 0 */ ++#define SYS_APOLLO_REG_PCI_BASENUM (0) ++#define SYS_APOLLO_REG_REGION_SIZE (0x00010000) ++ ++#define SYS_APOLLO_REG_SYS_OFFSET (0x0000) ++#define SYS_APOLLO_REG_SYS_SIZE (0x0400) ++ ++#define SYS_APOLLO_REG_PLL_OFFSET (0x1000) ++#define SYS_APOLLO_REG_PLL_SIZE (0x0400) ++ ++#define SYS_APOLLO_REG_HOST_OFFSET (0x4050) ++#define SYS_APOLLO_REG_HOST_SIZE (0x0014) ++ ++#define SYS_APOLLO_REG_PDP1_OFFSET (0xC000) ++#define SYS_APOLLO_REG_PDP1_SIZE (0x2000) ++ ++/* Offsets for flashing Apollo PROMs from base 0 */ ++#define APOLLO_FLASH_STAT_OFFSET (0x4058) ++#define APOLLO_FLASH_DATA_WRITE_OFFSET (0x4050) ++#define APOLLO_FLASH_RESET_OFFSET (0x4060) ++ ++#define APOLLO_FLASH_FIFO_STATUS_MASK (0xF) ++#define APOLLO_FLASH_FIFO_STATUS_SHIFT (0) ++#define APOLLO_FLASH_PROGRAM_STATUS_MASK (0xF) ++#define APOLLO_FLASH_PROGRAM_STATUS_SHIFT (16) ++ ++#define APOLLO_FLASH_PROG_COMPLETE_BIT (0x1) ++#define APOLLO_FLASH_PROG_PROGRESS_BIT (0x2) ++#define APOLLO_FLASH_PROG_FAILED_BIT (0x4) ++#define APOLLO_FLASH_INV_FILETYPE_BIT (0x8) ++ ++#define APOLLO_FLASH_FIFO_SIZE (8) ++ ++/* RGX reg on base register 1 */ ++#define SYS_RGX_REG_PCI_BASENUM (1) ++#define SYS_RGX_REG_REGION_SIZE (0x7FFFF) ++ ++/* Device memory (including HP mapping) on base register 2 */ ++#define SYS_DEV_MEM_PCI_BASENUM (2) ++ ++#endif /* APOLLO_REGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/bonnie_tcf.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/bonnie_tcf.h +new file mode 100644 +index 000000000000..fc87ec790df9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/bonnie_tcf.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* bonnie_tcf.h - Bonnie TCF register definitions */ ++ ++/* tab size 4 */ ++ ++#ifndef BONNIE_TCF_DEFS_H ++#define BONNIE_TCF_DEFS_H ++ ++#define BONNIE_TCF_OFFSET_BONNIETC_REGBANK 0x00000000 ++#define BONNIE_TCF_OFFSET_TC_IFACE_COUNTERS 0x00004000 ++#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_IMGV4_RTM_TOP 0x00008000 ++#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_SECN 0x0000C000 ++#define BONNIE_TCF_OFFSET_TC_TEST_MODULE_TCF_SCRATCH_PAD_DBG 0x00010000 ++#define BONNIE_TCF_OFFSET_MULTI_CLK_ALIGN 0x00014000 ++#define BONNIE_TCF_OFFSET_ALIGN_DATA_TX 0x00018000 ++#define BONNIE_TCF_OFFSET_SAI_RX_1 0x0001C000 ++#define BONNIE_TCF_OFFSET_SAI_RX_SDR 0x00040000 ++#define BONNIE_TCF_OFFSET_SAI_TX_1 0x00044000 ++#define BONNIE_TCF_OFFSET_SAI_TX_SDR 0x00068000 ++ ++#define BONNIE_TCF_OFFSET_SAI_RX_DELTA 0x00004000 ++#define BONNIE_TCF_OFFSET_SAI_TX_DELTA 0x00004000 ++ ++#define BONNIE_TCF_OFFSET_SAI_CLK_TAPS 0x0000000C ++#define BONNIE_TCF_OFFSET_SAI_EYES 0x00000010 ++#define BONNIE_TCF_OFFSET_SAI_TRAIN_ACK 0x00000018 ++ ++ ++#endif /* BONNIE_TCF_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_defs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_defs.h +new file mode 100644 +index 000000000000..6234887a1bfd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_defs.h +@@ -0,0 +1,326 @@ ++/**************************************************************************** ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin Memory Map - View from PCIe ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++****************************************************************************/ ++ ++#ifndef _ODIN_DEFS_H_ ++#define _ODIN_DEFS_H_ ++ ++/* These defines have not been autogenerated */ ++ ++#define PCI_VENDOR_ID_ODIN (0x1AEE) ++#define DEVICE_ID_ODIN (0x1010) ++#define DEVICE_ID_TBA (0x1CF2) ++ ++/* PCI BAR 0 contains the PDP regs and the Odin system regs */ ++#define ODN_SYS_BAR 0 ++#define ODN_SYS_REGION_SIZE 0x000800000 /* 8MB */ ++ ++#define ODN_SYS_REGS_OFFSET 0 ++#define ODN_SYS_REGS_SIZE 0x000400000 /* 4MB */ ++ ++#define ODN_PDP_REGS_OFFSET 0x000440000 ++#define ODN_PDP_REGS_SIZE 0x000040000 /* 256k */ ++ ++#define ODN_PDP2_REGS_OFFSET 0x000480000 ++#define ODN_PDP2_REGS_SIZE 0x000040000 /* 256k */ ++ ++#define ODN_PDP2_PFIM_OFFSET 0x000500000 ++#define ODN_PDP2_PFIM_SIZE 0x000040000 /* 256k */ ++ ++#define ODIN_DMA_REGS_OFFSET 0x0004C0000 ++#define ODIN_DMA_REGS_SIZE 0x000040000 /* 256k */ ++ ++#define ODIN_DMA_CHAN_REGS_SIZE 0x000001000 /* 4k */ ++ ++/* PCI BAR 2 contains the Device Under Test SOCIF 64MB region */ ++#define ODN_DUT_SOCIF_BAR 2 ++#define ODN_DUT_SOCIF_OFFSET 0x000000000 ++#define ODN_DUT_SOCIF_SIZE 0x004000000 /* 64MB */ ++ ++/* PCI BAR 4 contains the on-board 1GB DDR memory */ ++#define ODN_DDR_BAR 4 ++#define ODN_DDR_MEM_OFFSET 0x000000000 ++#define ODN_DDR_MEM_SIZE 0x040000000 /* 1GB */ ++ ++/* Odin system register banks */ ++#define ODN_REG_BANK_CORE 0x00000 ++#define ODN_REG_BANK_TCF_SPI_MASTER 0x02000 ++#define ODN_REG_BANK_ODN_CLK_BLK 0x0A000 ++#define ODN_REG_BANK_ODN_MCU_COMMUNICATOR 0x0C000 ++#define ODN_REG_BANK_DB_TYPE_ID 0x0C200 ++#define ODN_REG_BANK_DB_TYPE_ID_TYPE_TCFVUOCTA 0x000000C6U ++#define ODN_REG_BANK_DB_TYPE_ID_TYPE_MASK 0x000000C0U ++#define ODN_REG_BANK_DB_TYPE_ID_TYPE_SHIFT 0x6 ++#define ODN_REG_BANK_ODN_I2C 0x0E000 ++#define ODN_REG_BANK_MULTI_CLK_ALIGN 0x20000 ++#define ODN_REG_BANK_ALIGN_DATA_TX 0x22000 ++#define ODN_REG_BANK_SAI_RX_DDR_0 0x24000 ++#define ODN_REG_BANK_SAI_RX_DDR(n) (ODN_REG_BANK_SAI_RX_DDR_0 + (0x02000*n)) ++#define ODN_REG_BANK_SAI_TX_DDR_0 0x3A000 ++#define ODN_REG_BANK_SAI_TX_DDR(n) (ODN_REG_BANK_SAI_TX_DDR_0 + (0x02000*n)) ++#define ODN_REG_BANK_SAI_TX_SDR 0x4E000 ++ ++/* Odin SPI regs */ ++#define ODN_SPI_MST_ADDR_RDNWR 0x0000 ++#define ODN_SPI_MST_WDATA 0x0004 ++#define ODN_SPI_MST_RDATA 0x0008 ++#define ODN_SPI_MST_STATUS 0x000C ++#define ODN_SPI_MST_GO 0x0010 ++ ++ ++/* ++ Odin CLK regs - the odn_clk_blk module defs are not auto generated ++ */ ++#define ODN_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 ++#define ODN_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 ++#define ODN_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define ODN_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 ++#define ODN_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define ODN_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define ODN_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U ++#define ODN_PDP_PCLK_ODIV2_EDGE_SHIFT 7 ++ ++#define ODN_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C ++ ++#define ODN_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 ++#define ODN_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 ++#define ODN_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define ODN_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C ++#define ODN_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define ODN_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define ODN_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U ++#define ODN_PDP_MCLK_ODIV2_EDGE_SHIFT 7 ++ ++#define ODN_PDP_P_CLK_MULTIPLIER_REG1 0x650 ++#define ODN_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 ++#define ODN_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 ++ ++#define ODN_PDP_P_CLK_MULTIPLIER_REG2 0x654 ++#define ODN_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U ++#define ODN_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 ++#define ODN_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U ++#define ODN_PDP_PCLK_MUL2_EDGE_SHIFT 7 ++ ++#define ODN_PDP_P_CLK_MULTIPLIER_REG3 0x64C ++ ++#define ODN_PDP_P_CLK_IN_DIVIDER_REG 0x658 ++#define ODN_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU ++#define ODN_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 ++#define ODN_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U ++#define ODN_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 ++#define ODN_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U ++#define ODN_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 ++#define ODN_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U ++#define ODN_PDP_PCLK_IDIV_EDGE_SHIFT 13 ++ ++/* ++ * DUT core clock input divider, multiplier and out divider. ++ */ ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1 (0x0028) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2 (0x002C) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_CORE_CLK_MULTIPLIER1 (0x0050) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_CORE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_CORE_CLK_MULTIPLIER2 (0x0054) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_SHIFT (12) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_EDGE_SHIFT (7) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_CORE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1 (0x0058) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_CORE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) ++ ++/* ++ * DUT interface clock input divider, multiplier and out divider. ++ */ ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1 (0x0220) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2 (0x0224) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_EDGE_SHIFT (7) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_IFACE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1 (0x0250) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER1_LO_TIME_SHIFT (0) ++ ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2 (0x0254) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_MASK (0x00007000U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_SHIFT (12) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_MASK (0x00000800U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_FRAC_EN_SHIFT (11) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_MASK (0x00000080U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_EDGE_SHIFT (7) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_MASK (0x00000040U) ++#define ODN_DUT_IFACE_CLK_MULTIPLIER2_NOCOUNT_SHIFT (6) ++ ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1 (0x0258) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_MASK (0x00002000U) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_EDGE_SHIFT (13) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_MASK (0x00001000U) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_NOCOUNT_SHIFT (12) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_MASK (0x00000FC0U) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_HI_TIME_SHIFT (6) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_MASK (0x0000003FU) ++#define ODN_DUT_IFACE_CLK_IN_DIVIDER1_LO_TIME_SHIFT (0) ++ ++ ++/* ++ * Min max values from Xilinx Virtex7 data sheet DS183, for speed grade 2 ++ * All in Hz ++ */ ++#define ODN_INPUT_CLOCK_SPEED (100000000U) ++#define ODN_INPUT_CLOCK_SPEED_MIN (10000000U) ++#define ODN_INPUT_CLOCK_SPEED_MAX (933000000U) ++#define ODN_OUTPUT_CLOCK_SPEED_MIN (4690000U) ++#define ODN_OUTPUT_CLOCK_SPEED_MAX (933000000U) ++#define ODN_VCO_MIN (600000000U) ++#define ODN_VCO_MAX (1440000000U) ++#define ODN_PFD_MIN (10000000U) ++#define ODN_PFD_MAX (500000000U) ++ ++/* ++ * Max values that can be set in DRP registers ++ */ ++#define ODN_OREG_VALUE_MAX (126.875f) ++#define ODN_MREG_VALUE_MAX (126.875f) ++#define ODN_DREG_VALUE_MAX (126U) ++ ++ ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE (0x00000001U) ++#define ODN_MMCM_LOCK_STATUS_DUT_IF (0x00000002U) ++#define ODN_MMCM_LOCK_STATUS_PDPP (0x00000008U) ++ ++/* ++ Odin interrupt flags ++*/ ++#define ODN_INTERRUPT_ENABLE_PDP1 (1 << ODN_INTERRUPT_ENABLE_PDP1_SHIFT) ++#define ODN_INTERRUPT_ENABLE_PDP2 (1 << ODN_INTERRUPT_ENABLE_PDP2_SHIFT) ++#define ODN_INTERRUPT_ENABLE_DUT (1 << ODN_INTERRUPT_ENABLE_DUT_SHIFT) ++#define ODN_INTERRUPT_STATUS_PDP1 (1 << ODN_INTERRUPT_STATUS_PDP1_SHIFT) ++#define ODN_INTERRUPT_STATUS_PDP2 (1 << ODN_INTERRUPT_STATUS_PDP2_SHIFT) ++#define ODN_INTERRUPT_STATUS_DUT (1 << ODN_INTERRUPT_STATUS_DUT_SHIFT) ++#define ODN_INTERRUPT_CLEAR_PDP1 (1 << ODN_INTERRUPT_CLR_PDP1_SHIFT) ++#define ODN_INTERRUPT_CLEAR_PDP2 (1 << ODN_INTERRUPT_CLR_PDP2_SHIFT) ++#define ODN_INTERRUPT_CLEAR_DUT (1 << ODN_INTERRUPT_CLR_DUT_SHIFT) ++ ++#define ODN_INTERRUPT_ENABLE_CDMA (1 << ODN_INTERRUPT_ENABLE_CDMA_SHIFT) ++#define ODN_INTERRUPT_STATUS_CDMA (1 << ODN_INTERRUPT_STATUS_CDMA_SHIFT) ++#define ODN_INTERRUPT_CLEAR_CDMA (1 << ODN_INTERRUPT_CLR_CDMA_SHIFT) ++ ++#define ODN_INTERRUPT_ENABLE_CDMA2 (1 << (ODN_INTERRUPT_ENABLE_CDMA_SHIFT + 1)) ++#define ODN_INTERRUPT_STATUS_CDMA2 (1 << (ODN_INTERRUPT_STATUS_CDMA_SHIFT + 1)) ++#define ODN_INTERRUPT_CLEAR_CDMA2 (1 << (ODN_INTERRUPT_CLR_CDMA_SHIFT + 1)) ++ ++/* ++ Other defines ++*/ ++#define ODN_STREAM_OFF 0 ++#define ODN_STREAM_ON 1 ++#define ODN_SYNC_GEN_DISABLE 0 ++#define ODN_SYNC_GEN_ENABLE 1 ++#define ODN_INTERLACE_DISABLE 0 ++#define ODN_INTERLACE_ENABLE 1 ++#define ODN_PIXEL_CLOCK_INVERTED 1 ++#define ODN_HSYNC_POLARITY_ACTIVE_HIGH 1 ++ ++#define ODN_PDP_INTCLR_ALL 0x000FFFFFU ++#define ODN_PDP_INTSTAT_ALL_OURUN_MASK 0x000FFFF0U ++ ++/* ++ DMA defs ++*/ ++#define ODN_CDMA_ADDR_WIDTH 35 ++#define ODN_DMA_HW_DESC_HEAP_SIZE 0x100000 ++#define ODN_DMA_CHAN_RX 0 ++#define ODN_DMA_CHAN_TX 1 ++ ++#define ODIN_DMA_TX_CHAN_NAME "tx" ++#define ODIN_DMA_RX_CHAN_NAME "rx" ++ ++/* ++ FBC defs ++*/ ++#define ODIN_PFIM_RELNUM (005U) ++ ++#endif /* _ODIN_DEFS_H_ */ ++ ++/***************************************************************************** ++ End of file (odn_defs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_pdp_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_pdp_regs.h +new file mode 100644 +index 000000000000..da47a253db31 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_pdp_regs.h +@@ -0,0 +1,8540 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* tab size 4 */ ++ ++#ifndef ODN_PDP_REGS_H ++#define ODN_PDP_REGS_H ++ ++/* Odin-PDP hardware register definitions */ ++ ++ ++#define ODN_PDP_GRPH1SURF_OFFSET (0x0000) ++ ++/* PDP, GRPH1SURF, GRPH1PIXFMT ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USEGAMMA ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USECSC ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USELUT ++*/ ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) ++#define ODN_PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2SURF_OFFSET (0x0004) ++ ++/* PDP, GRPH2SURF, GRPH2PIXFMT ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USEGAMMA ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USECSC ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USELUT ++*/ ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) ++#define ODN_PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3SURF_OFFSET (0x0008) ++ ++/* PDP, GRPH3SURF, GRPH3PIXFMT ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USEGAMMA ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USECSC ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USELUT ++*/ ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) ++#define ODN_PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4SURF_OFFSET (0x000C) ++ ++/* PDP, GRPH4SURF, GRPH4PIXFMT ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) ++#define ODN_PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USEGAMMA ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USECSC ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USELUT ++*/ ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) ++#define ODN_PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SURF_OFFSET (0x0010) ++ ++/* PDP, VID1SURF, VID1PIXFMT ++*/ ++#define ODN_PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_SHIFT (27) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_LENGTH (5) ++#define ODN_PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEGAMMA ++*/ ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USECSC ++*/ ++#define ODN_PDP_VID1SURF_VID1USECSC_MASK (0x02000000) ++#define ODN_PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USECSC_SHIFT (25) ++#define ODN_PDP_VID1SURF_VID1USECSC_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEI2P ++*/ ++#define ODN_PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) ++#define ODN_PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEI2P_SHIFT (24) ++#define ODN_PDP_VID1SURF_VID1USEI2P_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1COSITED ++*/ ++#define ODN_PDP_VID1SURF_VID1COSITED_MASK (0x00800000) ++#define ODN_PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1COSITED_SHIFT (23) ++#define ODN_PDP_VID1SURF_VID1COSITED_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEHQCD ++*/ ++#define ODN_PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_SHIFT (22) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEINSTREAM ++*/ ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) ++#define ODN_PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SURF_OFFSET (0x0014) ++ ++/* PDP, VID2SURF, VID2PIXFMT ++*/ ++#define ODN_PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_SHIFT (27) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_LENGTH (5) ++#define ODN_PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2COSITED ++*/ ++#define ODN_PDP_VID2SURF_VID2COSITED_MASK (0x00800000) ++#define ODN_PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SURF_VID2COSITED_SHIFT (23) ++#define ODN_PDP_VID2SURF_VID2COSITED_LENGTH (1) ++#define ODN_PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2USEGAMMA ++*/ ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2USECSC ++*/ ++#define ODN_PDP_VID2SURF_VID2USECSC_MASK (0x02000000) ++#define ODN_PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SURF_VID2USECSC_SHIFT (25) ++#define ODN_PDP_VID2SURF_VID2USECSC_LENGTH (1) ++#define ODN_PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SURF_OFFSET (0x0018) ++ ++/* PDP, VID3SURF, VID3PIXFMT ++*/ ++#define ODN_PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_SHIFT (27) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_LENGTH (5) ++#define ODN_PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3COSITED ++*/ ++#define ODN_PDP_VID3SURF_VID3COSITED_MASK (0x00800000) ++#define ODN_PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SURF_VID3COSITED_SHIFT (23) ++#define ODN_PDP_VID3SURF_VID3COSITED_LENGTH (1) ++#define ODN_PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3USEGAMMA ++*/ ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3USECSC ++*/ ++#define ODN_PDP_VID3SURF_VID3USECSC_MASK (0x02000000) ++#define ODN_PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SURF_VID3USECSC_SHIFT (25) ++#define ODN_PDP_VID3SURF_VID3USECSC_LENGTH (1) ++#define ODN_PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SURF_OFFSET (0x001C) ++ ++/* PDP, VID4SURF, VID4PIXFMT ++*/ ++#define ODN_PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_SHIFT (27) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_LENGTH (5) ++#define ODN_PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4COSITED ++*/ ++#define ODN_PDP_VID4SURF_VID4COSITED_MASK (0x00800000) ++#define ODN_PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SURF_VID4COSITED_SHIFT (23) ++#define ODN_PDP_VID4SURF_VID4COSITED_LENGTH (1) ++#define ODN_PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4USEGAMMA ++*/ ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) ++#define ODN_PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4USECSC ++*/ ++#define ODN_PDP_VID4SURF_VID4USECSC_MASK (0x02000000) ++#define ODN_PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SURF_VID4USECSC_SHIFT (25) ++#define ODN_PDP_VID4SURF_VID4USECSC_LENGTH (1) ++#define ODN_PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1CTRL_OFFSET (0x0020) ++ ++/* PDP, GRPH1CTRL, GRPH1STREN ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1CKEYEN ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1CKEYSRC ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1BLEND ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1BLENDPOS ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1DITHEREN ++*/ ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2CTRL_OFFSET (0x0024) ++ ++/* PDP, GRPH2CTRL, GRPH2STREN ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2CKEYEN ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2CKEYSRC ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2BLEND ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2BLENDPOS ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2DITHEREN ++*/ ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3CTRL_OFFSET (0x0028) ++ ++/* PDP, GRPH3CTRL, GRPH3STREN ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3CKEYEN ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3CKEYSRC ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3BLEND ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3BLENDPOS ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3DITHEREN ++*/ ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4CTRL_OFFSET (0x002C) ++ ++/* PDP, GRPH4CTRL, GRPH4STREN ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4CKEYEN ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4CKEYSRC ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4BLEND ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4BLENDPOS ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) ++#define ODN_PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4DITHEREN ++*/ ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) ++#define ODN_PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1CTRL_OFFSET (0x0030) ++ ++/* PDP, VID1CTRL, VID1STREN ++*/ ++#define ODN_PDP_VID1CTRL_VID1STREN_MASK (0x80000000) ++#define ODN_PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1STREN_SHIFT (31) ++#define ODN_PDP_VID1CTRL_VID1STREN_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1CKEYEN ++*/ ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1CKEYSRC ++*/ ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1BLEND ++*/ ++#define ODN_PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) ++#define ODN_PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID1CTRL_VID1BLEND_SHIFT (27) ++#define ODN_PDP_VID1CTRL_VID1BLEND_LENGTH (2) ++#define ODN_PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1BLENDPOS ++*/ ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1DITHEREN ++*/ ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) ++#define ODN_PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2CTRL_OFFSET (0x0034) ++ ++/* PDP, VID2CTRL, VID2STREN ++*/ ++#define ODN_PDP_VID2CTRL_VID2STREN_MASK (0x80000000) ++#define ODN_PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2STREN_SHIFT (31) ++#define ODN_PDP_VID2CTRL_VID2STREN_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2CKEYEN ++*/ ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2CKEYSRC ++*/ ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2BLEND ++*/ ++#define ODN_PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) ++#define ODN_PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID2CTRL_VID2BLEND_SHIFT (27) ++#define ODN_PDP_VID2CTRL_VID2BLEND_LENGTH (2) ++#define ODN_PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2BLENDPOS ++*/ ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2DITHEREN ++*/ ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) ++#define ODN_PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3CTRL_OFFSET (0x0038) ++ ++/* PDP, VID3CTRL, VID3STREN ++*/ ++#define ODN_PDP_VID3CTRL_VID3STREN_MASK (0x80000000) ++#define ODN_PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3STREN_SHIFT (31) ++#define ODN_PDP_VID3CTRL_VID3STREN_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3CKEYEN ++*/ ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3CKEYSRC ++*/ ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3BLEND ++*/ ++#define ODN_PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) ++#define ODN_PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID3CTRL_VID3BLEND_SHIFT (27) ++#define ODN_PDP_VID3CTRL_VID3BLEND_LENGTH (2) ++#define ODN_PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3BLENDPOS ++*/ ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3DITHEREN ++*/ ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) ++#define ODN_PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4CTRL_OFFSET (0x003C) ++ ++/* PDP, VID4CTRL, VID4STREN ++*/ ++#define ODN_PDP_VID4CTRL_VID4STREN_MASK (0x80000000) ++#define ODN_PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4STREN_SHIFT (31) ++#define ODN_PDP_VID4CTRL_VID4STREN_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4CKEYEN ++*/ ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4CKEYSRC ++*/ ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4BLEND ++*/ ++#define ODN_PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) ++#define ODN_PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) ++#define ODN_PDP_VID4CTRL_VID4BLEND_SHIFT (27) ++#define ODN_PDP_VID4CTRL_VID4BLEND_LENGTH (2) ++#define ODN_PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4BLENDPOS ++*/ ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) ++#define ODN_PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4DITHEREN ++*/ ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) ++#define ODN_PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1UCTRL_OFFSET (0x0050) ++ ++/* PDP, VID1UCTRL, VID1UVHALFSTR ++*/ ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2UCTRL_OFFSET (0x0054) ++ ++/* PDP, VID2UCTRL, VID2UVHALFSTR ++*/ ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3UCTRL_OFFSET (0x0058) ++ ++/* PDP, VID3UCTRL, VID3UVHALFSTR ++*/ ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4UCTRL_OFFSET (0x005C) ++ ++/* PDP, VID4UCTRL, VID4UVHALFSTR ++*/ ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) ++#define ODN_PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1STRIDE_OFFSET (0x0060) ++ ++/* PDP, GRPH1STRIDE, GRPH1STRIDE ++*/ ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2STRIDE_OFFSET (0x0064) ++ ++/* PDP, GRPH2STRIDE, GRPH2STRIDE ++*/ ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3STRIDE_OFFSET (0x0068) ++ ++/* PDP, GRPH3STRIDE, GRPH3STRIDE ++*/ ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4STRIDE_OFFSET (0x006C) ++ ++/* PDP, GRPH4STRIDE, GRPH4STRIDE ++*/ ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) ++#define ODN_PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1STRIDE_OFFSET (0x0070) ++ ++/* PDP, VID1STRIDE, VID1STRIDE ++*/ ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) ++#define ODN_PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2STRIDE_OFFSET (0x0074) ++ ++/* PDP, VID2STRIDE, VID2STRIDE ++*/ ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) ++#define ODN_PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3STRIDE_OFFSET (0x0078) ++ ++/* PDP, VID3STRIDE, VID3STRIDE ++*/ ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) ++#define ODN_PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4STRIDE_OFFSET (0x007C) ++ ++/* PDP, VID4STRIDE, VID4STRIDE ++*/ ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) ++#define ODN_PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1SIZE_OFFSET (0x0080) ++ ++/* PDP, GRPH1SIZE, GRPH1WIDTH ++*/ ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SIZE, GRPH1HEIGHT ++*/ ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2SIZE_OFFSET (0x0084) ++ ++/* PDP, GRPH2SIZE, GRPH2WIDTH ++*/ ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SIZE, GRPH2HEIGHT ++*/ ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3SIZE_OFFSET (0x0088) ++ ++/* PDP, GRPH3SIZE, GRPH3WIDTH ++*/ ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SIZE, GRPH3HEIGHT ++*/ ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4SIZE_OFFSET (0x008C) ++ ++/* PDP, GRPH4SIZE, GRPH4WIDTH ++*/ ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) ++#define ODN_PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SIZE, GRPH4HEIGHT ++*/ ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) ++#define ODN_PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SIZE_OFFSET (0x0090) ++ ++/* PDP, VID1SIZE, VID1WIDTH ++*/ ++#define ODN_PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_SHIFT (16) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_LENGTH (12) ++#define ODN_PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SIZE, VID1HEIGHT ++*/ ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) ++#define ODN_PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SIZE_OFFSET (0x0094) ++ ++/* PDP, VID2SIZE, VID2WIDTH ++*/ ++#define ODN_PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_SHIFT (16) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_LENGTH (12) ++#define ODN_PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SIZE, VID2HEIGHT ++*/ ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) ++#define ODN_PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SIZE_OFFSET (0x0098) ++ ++/* PDP, VID3SIZE, VID3WIDTH ++*/ ++#define ODN_PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_SHIFT (16) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_LENGTH (12) ++#define ODN_PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SIZE, VID3HEIGHT ++*/ ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) ++#define ODN_PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SIZE_OFFSET (0x009C) ++ ++/* PDP, VID4SIZE, VID4WIDTH ++*/ ++#define ODN_PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_SHIFT (16) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_LENGTH (12) ++#define ODN_PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SIZE, VID4HEIGHT ++*/ ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) ++#define ODN_PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1POSN_OFFSET (0x00A0) ++ ++/* PDP, GRPH1POSN, GRPH1XSTART ++*/ ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) ++#define ODN_PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1POSN, GRPH1YSTART ++*/ ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) ++#define ODN_PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2POSN_OFFSET (0x00A4) ++ ++/* PDP, GRPH2POSN, GRPH2XSTART ++*/ ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) ++#define ODN_PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2POSN, GRPH2YSTART ++*/ ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) ++#define ODN_PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3POSN_OFFSET (0x00A8) ++ ++/* PDP, GRPH3POSN, GRPH3XSTART ++*/ ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) ++#define ODN_PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3POSN, GRPH3YSTART ++*/ ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) ++#define ODN_PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4POSN_OFFSET (0x00AC) ++ ++/* PDP, GRPH4POSN, GRPH4XSTART ++*/ ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) ++#define ODN_PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4POSN, GRPH4YSTART ++*/ ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) ++#define ODN_PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1POSN_OFFSET (0x00B0) ++ ++/* PDP, VID1POSN, VID1XSTART ++*/ ++#define ODN_PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1POSN_VID1XSTART_SHIFT (16) ++#define ODN_PDP_VID1POSN_VID1XSTART_LENGTH (12) ++#define ODN_PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1POSN, VID1YSTART ++*/ ++#define ODN_PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1POSN_VID1YSTART_SHIFT (0) ++#define ODN_PDP_VID1POSN_VID1YSTART_LENGTH (12) ++#define ODN_PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2POSN_OFFSET (0x00B4) ++ ++/* PDP, VID2POSN, VID2XSTART ++*/ ++#define ODN_PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2POSN_VID2XSTART_SHIFT (16) ++#define ODN_PDP_VID2POSN_VID2XSTART_LENGTH (12) ++#define ODN_PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2POSN, VID2YSTART ++*/ ++#define ODN_PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2POSN_VID2YSTART_SHIFT (0) ++#define ODN_PDP_VID2POSN_VID2YSTART_LENGTH (12) ++#define ODN_PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3POSN_OFFSET (0x00B8) ++ ++/* PDP, VID3POSN, VID3XSTART ++*/ ++#define ODN_PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3POSN_VID3XSTART_SHIFT (16) ++#define ODN_PDP_VID3POSN_VID3XSTART_LENGTH (12) ++#define ODN_PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3POSN, VID3YSTART ++*/ ++#define ODN_PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3POSN_VID3YSTART_SHIFT (0) ++#define ODN_PDP_VID3POSN_VID3YSTART_LENGTH (12) ++#define ODN_PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4POSN_OFFSET (0x00BC) ++ ++/* PDP, VID4POSN, VID4XSTART ++*/ ++#define ODN_PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) ++#define ODN_PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4POSN_VID4XSTART_SHIFT (16) ++#define ODN_PDP_VID4POSN_VID4XSTART_LENGTH (12) ++#define ODN_PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4POSN, VID4YSTART ++*/ ++#define ODN_PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) ++#define ODN_PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4POSN_VID4YSTART_SHIFT (0) ++#define ODN_PDP_VID4POSN_VID4YSTART_LENGTH (12) ++#define ODN_PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1GALPHA_OFFSET (0x00C0) ++ ++/* PDP, GRPH1GALPHA, GRPH1GALPHA ++*/ ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2GALPHA_OFFSET (0x00C4) ++ ++/* PDP, GRPH2GALPHA, GRPH2GALPHA ++*/ ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3GALPHA_OFFSET (0x00C8) ++ ++/* PDP, GRPH3GALPHA, GRPH3GALPHA ++*/ ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4GALPHA_OFFSET (0x00CC) ++ ++/* PDP, GRPH4GALPHA, GRPH4GALPHA ++*/ ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) ++#define ODN_PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1GALPHA_OFFSET (0x00D0) ++ ++/* PDP, VID1GALPHA, VID1GALPHA ++*/ ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) ++#define ODN_PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2GALPHA_OFFSET (0x00D4) ++ ++/* PDP, VID2GALPHA, VID2GALPHA ++*/ ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) ++#define ODN_PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3GALPHA_OFFSET (0x00D8) ++ ++/* PDP, VID3GALPHA, VID3GALPHA ++*/ ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) ++#define ODN_PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4GALPHA_OFFSET (0x00DC) ++ ++/* PDP, VID4GALPHA, VID4GALPHA ++*/ ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) ++#define ODN_PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1CKEY_R_OFFSET (0x00E0) ++ ++/* PDP, GRPH1CKEY_R, GRPH1CKEY_R ++*/ ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1CKEY_GB_OFFSET (0x00E4) ++ ++/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G ++*/ ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B ++*/ ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2CKEY_R_OFFSET (0x00E8) ++ ++/* PDP, GRPH2CKEY_R, GRPH2CKEY_R ++*/ ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2CKEY_GB_OFFSET (0x00EC) ++ ++/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G ++*/ ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B ++*/ ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3CKEY_R_OFFSET (0x00F0) ++ ++/* PDP, GRPH3CKEY_R, GRPH3CKEY_R ++*/ ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3CKEY_GB_OFFSET (0x00F4) ++ ++/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G ++*/ ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B ++*/ ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4CKEY_R_OFFSET (0x00F8) ++ ++/* PDP, GRPH4CKEY_R, GRPH4CKEY_R ++*/ ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) ++#define ODN_PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4CKEY_GB_OFFSET (0x00FC) ++ ++/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G ++*/ ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B ++*/ ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) ++#define ODN_PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1CKEY_R_OFFSET (0x0100) ++ ++/* PDP, VID1CKEY_R, VID1CKEY_R ++*/ ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) ++#define ODN_PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1CKEY_GB_OFFSET (0x0104) ++ ++/* PDP, VID1CKEY_GB, VID1CKEY_G ++*/ ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CKEY_GB, VID1CKEY_B ++*/ ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) ++#define ODN_PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2CKEY_R_OFFSET (0x0108) ++ ++/* PDP, VID2CKEY_R, VID2CKEY_R ++*/ ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) ++#define ODN_PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2CKEY_GB_OFFSET (0x010C) ++ ++/* PDP, VID2CKEY_GB, VID2CKEY_G ++*/ ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CKEY_GB, VID2CKEY_B ++*/ ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) ++#define ODN_PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3CKEY_R_OFFSET (0x0110) ++ ++/* PDP, VID3CKEY_R, VID3CKEY_R ++*/ ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) ++#define ODN_PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3CKEY_GB_OFFSET (0x0114) ++ ++/* PDP, VID3CKEY_GB, VID3CKEY_G ++*/ ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CKEY_GB, VID3CKEY_B ++*/ ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) ++#define ODN_PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4CKEY_R_OFFSET (0x0118) ++ ++/* PDP, VID4CKEY_R, VID4CKEY_R ++*/ ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) ++#define ODN_PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4CKEY_GB_OFFSET (0x011C) ++ ++/* PDP, VID4CKEY_GB, VID4CKEY_G ++*/ ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CKEY_GB, VID4CKEY_B ++*/ ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) ++#define ODN_PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1BLND2_R_OFFSET (0x0120) ++ ++/* PDP, GRPH1BLND2_R, GRPH1PIXDBL ++*/ ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_R, GRPH1LINDBL ++*/ ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1BLND2_GB_OFFSET (0x0124) ++ ++/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2BLND2_R_OFFSET (0x0128) ++ ++/* PDP, GRPH2BLND2_R, GRPH2PIXDBL ++*/ ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_R, GRPH2LINDBL ++*/ ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2BLND2_GB_OFFSET (0x012C) ++ ++/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3BLND2_R_OFFSET (0x0130) ++ ++/* PDP, GRPH3BLND2_R, GRPH3PIXDBL ++*/ ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_R, GRPH3LINDBL ++*/ ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3BLND2_GB_OFFSET (0x0134) ++ ++/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4BLND2_R_OFFSET (0x0138) ++ ++/* PDP, GRPH4BLND2_R, GRPH4PIXDBL ++*/ ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_R, GRPH4LINDBL ++*/ ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R ++*/ ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4BLND2_GB_OFFSET (0x013C) ++ ++/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G ++*/ ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B ++*/ ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1BLND2_R_OFFSET (0x0140) ++ ++/* PDP, VID1BLND2_R, VID1CKEYMASK_R ++*/ ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1BLND2_GB_OFFSET (0x0144) ++ ++/* PDP, VID1BLND2_GB, VID1CKEYMASK_G ++*/ ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1BLND2_GB, VID1CKEYMASK_B ++*/ ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2BLND2_R_OFFSET (0x0148) ++ ++/* PDP, VID2BLND2_R, VID2CKEYMASK_R ++*/ ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2BLND2_GB_OFFSET (0x014C) ++ ++/* PDP, VID2BLND2_GB, VID2CKEYMASK_G ++*/ ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2BLND2_GB, VID2CKEYMASK_B ++*/ ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3BLND2_R_OFFSET (0x0150) ++ ++/* PDP, VID3BLND2_R, VID3CKEYMASK_R ++*/ ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3BLND2_GB_OFFSET (0x0154) ++ ++/* PDP, VID3BLND2_GB, VID3CKEYMASK_G ++*/ ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3BLND2_GB, VID3CKEYMASK_B ++*/ ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4BLND2_R_OFFSET (0x0158) ++ ++/* PDP, VID4BLND2_R, VID4CKEYMASK_R ++*/ ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) ++#define ODN_PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4BLND2_GB_OFFSET (0x015C) ++ ++/* PDP, VID4BLND2_GB, VID4CKEYMASK_G ++*/ ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4BLND2_GB, VID4CKEYMASK_B ++*/ ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) ++#define ODN_PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) ++ ++/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD ++*/ ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) ++ ++/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD ++*/ ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) ++ ++/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD ++*/ ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) ++ ++/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD ++*/ ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) ++#define ODN_PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) ++ ++/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD ++*/ ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) ++#define ODN_PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) ++ ++/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD ++*/ ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) ++#define ODN_PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) ++ ++/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD ++*/ ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) ++#define ODN_PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) ++ ++/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD ++*/ ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) ++#define ODN_PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1BASEADDR_OFFSET (0x0180) ++ ++/* PDP, GRPH1BASEADDR, GRPH1BASEADDR ++*/ ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2BASEADDR_OFFSET (0x0184) ++ ++/* PDP, GRPH2BASEADDR, GRPH2BASEADDR ++*/ ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3BASEADDR_OFFSET (0x0188) ++ ++/* PDP, GRPH3BASEADDR, GRPH3BASEADDR ++*/ ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4BASEADDR_OFFSET (0x018C) ++ ++/* PDP, GRPH4BASEADDR, GRPH4BASEADDR ++*/ ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) ++#define ODN_PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1BASEADDR_OFFSET (0x0190) ++ ++/* PDP, VID1BASEADDR, VID1BASEADDR ++*/ ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) ++#define ODN_PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2BASEADDR_OFFSET (0x0194) ++ ++/* PDP, VID2BASEADDR, VID2BASEADDR ++*/ ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) ++#define ODN_PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3BASEADDR_OFFSET (0x0198) ++ ++/* PDP, VID3BASEADDR, VID3BASEADDR ++*/ ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) ++#define ODN_PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4BASEADDR_OFFSET (0x019C) ++ ++/* PDP, VID4BASEADDR, VID4BASEADDR ++*/ ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) ++#define ODN_PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1UBASEADDR_OFFSET (0x01B0) ++ ++/* PDP, VID1UBASEADDR, VID1UBASEADDR ++*/ ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2UBASEADDR_OFFSET (0x01B4) ++ ++/* PDP, VID2UBASEADDR, VID2UBASEADDR ++*/ ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3UBASEADDR_OFFSET (0x01B8) ++ ++/* PDP, VID3UBASEADDR, VID3UBASEADDR ++*/ ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4UBASEADDR_OFFSET (0x01BC) ++ ++/* PDP, VID4UBASEADDR, VID4UBASEADDR ++*/ ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) ++#define ODN_PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VBASEADDR_OFFSET (0x01D0) ++ ++/* PDP, VID1VBASEADDR, VID1VBASEADDR ++*/ ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VBASEADDR_OFFSET (0x01D4) ++ ++/* PDP, VID2VBASEADDR, VID2VBASEADDR ++*/ ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VBASEADDR_OFFSET (0x01D8) ++ ++/* PDP, VID3VBASEADDR, VID3VBASEADDR ++*/ ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VBASEADDR_OFFSET (0x01DC) ++ ++/* PDP, VID4VBASEADDR, VID4VBASEADDR ++*/ ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) ++#define ODN_PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) ++ ++/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP ++*/ ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP ++*/ ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) ++ ++/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP ++*/ ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP ++*/ ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) ++ ++/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP ++*/ ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP ++*/ ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) ++ ++/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP ++*/ ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP ++*/ ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) ++#define ODN_PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN ++*/ ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN ++*/ ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN ++*/ ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN ++*/ ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN ++*/ ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) ++#define ODN_PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SKIPCTRL_OFFSET (0x0270) ++ ++/* PDP, VID1SKIPCTRL, VID1HSKIP ++*/ ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) ++#define ODN_PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SKIPCTRL, VID1VSKIP ++*/ ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) ++#define ODN_PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SKIPCTRL_OFFSET (0x0274) ++ ++/* PDP, VID2SKIPCTRL, VID2HSKIP ++*/ ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) ++#define ODN_PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SKIPCTRL, VID2VSKIP ++*/ ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) ++#define ODN_PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SKIPCTRL_OFFSET (0x0278) ++ ++/* PDP, VID3SKIPCTRL, VID3HSKIP ++*/ ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) ++#define ODN_PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SKIPCTRL, VID3VSKIP ++*/ ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) ++#define ODN_PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SKIPCTRL_OFFSET (0x027C) ++ ++/* PDP, VID4SKIPCTRL, VID4HSKIP ++*/ ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) ++#define ODN_PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SKIPCTRL, VID4VSKIP ++*/ ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) ++#define ODN_PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SCALECTRL_OFFSET (0x0460) ++ ++/* PDP, VID1SCALECTRL, VID1HSCALEBP ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VSCALEBP ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1HSBEFOREVS ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VSURUNCTRL ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1PAN_EN ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) ++#define ODN_PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VORDER ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) ++#define ODN_PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VPITCH ++*/ ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) ++#define ODN_PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VSINIT_OFFSET (0x0464) ++ ++/* PDP, VID1VSINIT, VID1VINITIAL1 ++*/ ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1VSINIT, VID1VINITIAL0 ++*/ ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF0_OFFSET (0x0468) ++ ++/* PDP, VID1VCOEFF0, VID1VCOEFF0 ++*/ ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF1_OFFSET (0x046C) ++ ++/* PDP, VID1VCOEFF1, VID1VCOEFF1 ++*/ ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF2_OFFSET (0x0470) ++ ++/* PDP, VID1VCOEFF2, VID1VCOEFF2 ++*/ ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF3_OFFSET (0x0474) ++ ++/* PDP, VID1VCOEFF3, VID1VCOEFF3 ++*/ ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF4_OFFSET (0x0478) ++ ++/* PDP, VID1VCOEFF4, VID1VCOEFF4 ++*/ ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF5_OFFSET (0x047C) ++ ++/* PDP, VID1VCOEFF5, VID1VCOEFF5 ++*/ ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF6_OFFSET (0x0480) ++ ++/* PDP, VID1VCOEFF6, VID1VCOEFF6 ++*/ ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF7_OFFSET (0x0484) ++ ++/* PDP, VID1VCOEFF7, VID1VCOEFF7 ++*/ ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1VCOEFF8_OFFSET (0x0488) ++ ++/* PDP, VID1VCOEFF8, VID1VCOEFF8 ++*/ ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HSINIT_OFFSET (0x048C) ++ ++/* PDP, VID1HSINIT, VID1HINITIAL ++*/ ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) ++#define ODN_PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1HSINIT, VID1HPITCH ++*/ ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) ++#define ODN_PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF0_OFFSET (0x0490) ++ ++/* PDP, VID1HCOEFF0, VID1HCOEFF0 ++*/ ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF1_OFFSET (0x0494) ++ ++/* PDP, VID1HCOEFF1, VID1HCOEFF1 ++*/ ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF2_OFFSET (0x0498) ++ ++/* PDP, VID1HCOEFF2, VID1HCOEFF2 ++*/ ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF3_OFFSET (0x049C) ++ ++/* PDP, VID1HCOEFF3, VID1HCOEFF3 ++*/ ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF4_OFFSET (0x04A0) ++ ++/* PDP, VID1HCOEFF4, VID1HCOEFF4 ++*/ ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF5_OFFSET (0x04A4) ++ ++/* PDP, VID1HCOEFF5, VID1HCOEFF5 ++*/ ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF6_OFFSET (0x04A8) ++ ++/* PDP, VID1HCOEFF6, VID1HCOEFF6 ++*/ ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF7_OFFSET (0x04AC) ++ ++/* PDP, VID1HCOEFF7, VID1HCOEFF7 ++*/ ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF8_OFFSET (0x04B0) ++ ++/* PDP, VID1HCOEFF8, VID1HCOEFF8 ++*/ ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF9_OFFSET (0x04B4) ++ ++/* PDP, VID1HCOEFF9, VID1HCOEFF9 ++*/ ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF10_OFFSET (0x04B8) ++ ++/* PDP, VID1HCOEFF10, VID1HCOEFF10 ++*/ ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF11_OFFSET (0x04BC) ++ ++/* PDP, VID1HCOEFF11, VID1HCOEFF11 ++*/ ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF12_OFFSET (0x04C0) ++ ++/* PDP, VID1HCOEFF12, VID1HCOEFF12 ++*/ ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF13_OFFSET (0x04C4) ++ ++/* PDP, VID1HCOEFF13, VID1HCOEFF13 ++*/ ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF14_OFFSET (0x04C8) ++ ++/* PDP, VID1HCOEFF14, VID1HCOEFF14 ++*/ ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF15_OFFSET (0x04CC) ++ ++/* PDP, VID1HCOEFF15, VID1HCOEFF15 ++*/ ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1HCOEFF16_OFFSET (0x04D0) ++ ++/* PDP, VID1HCOEFF16, VID1HCOEFF16 ++*/ ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1SCALESIZE_OFFSET (0x04D4) ++ ++/* PDP, VID1SCALESIZE, VID1SCALEWIDTH ++*/ ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT ++*/ ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CORE_ID_OFFSET (0x04E0) ++ ++/* PDP, PVR_ODN_PDP_CORE_ID, GROUP_ID ++*/ ++#define ODN_PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) ++#define ODN_PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_ID_GROUP_ID_SHIFT (24) ++#define ODN_PDP_CORE_ID_GROUP_ID_LENGTH (8) ++#define ODN_PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_ID, CORE_ID ++*/ ++#define ODN_PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) ++#define ODN_PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_ID_CORE_ID_SHIFT (16) ++#define ODN_PDP_CORE_ID_CORE_ID_LENGTH (8) ++#define ODN_PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_ID, CONFIG_ID ++*/ ++#define ODN_PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) ++#define ODN_PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) ++#define ODN_PDP_CORE_ID_CONFIG_ID_SHIFT (0) ++#define ODN_PDP_CORE_ID_CONFIG_ID_LENGTH (16) ++#define ODN_PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CORE_REV_OFFSET (0x04F0) ++ ++/* PDP, PVR_ODN_PDP_CORE_REV, MAJOR_REV ++*/ ++#define ODN_PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) ++#define ODN_PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MAJOR_REV_SHIFT (16) ++#define ODN_PDP_CORE_REV_MAJOR_REV_LENGTH (8) ++#define ODN_PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_REV, MINOR_REV ++*/ ++#define ODN_PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) ++#define ODN_PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MINOR_REV_SHIFT (8) ++#define ODN_PDP_CORE_REV_MINOR_REV_LENGTH (8) ++#define ODN_PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_CORE_REV, MAINT_REV ++*/ ++#define ODN_PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_CORE_REV_MAINT_REV_SHIFT (0) ++#define ODN_PDP_CORE_REV_MAINT_REV_LENGTH (8) ++#define ODN_PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SCALECTRL_OFFSET (0x0500) ++ ++/* PDP, VID2SCALECTRL, VID2HSCALEBP ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VSCALEBP ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2HSBEFOREVS ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VSURUNCTRL ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2PAN_EN ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) ++#define ODN_PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VORDER ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) ++#define ODN_PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VPITCH ++*/ ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) ++#define ODN_PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VSINIT_OFFSET (0x0504) ++ ++/* PDP, VID2VSINIT, VID2VINITIAL1 ++*/ ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2VSINIT, VID2VINITIAL0 ++*/ ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF0_OFFSET (0x0508) ++ ++/* PDP, VID2VCOEFF0, VID2VCOEFF0 ++*/ ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF1_OFFSET (0x050C) ++ ++/* PDP, VID2VCOEFF1, VID2VCOEFF1 ++*/ ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF2_OFFSET (0x0510) ++ ++/* PDP, VID2VCOEFF2, VID2VCOEFF2 ++*/ ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF3_OFFSET (0x0514) ++ ++/* PDP, VID2VCOEFF3, VID2VCOEFF3 ++*/ ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF4_OFFSET (0x0518) ++ ++/* PDP, VID2VCOEFF4, VID2VCOEFF4 ++*/ ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF5_OFFSET (0x051C) ++ ++/* PDP, VID2VCOEFF5, VID2VCOEFF5 ++*/ ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF6_OFFSET (0x0520) ++ ++/* PDP, VID2VCOEFF6, VID2VCOEFF6 ++*/ ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF7_OFFSET (0x0524) ++ ++/* PDP, VID2VCOEFF7, VID2VCOEFF7 ++*/ ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2VCOEFF8_OFFSET (0x0528) ++ ++/* PDP, VID2VCOEFF8, VID2VCOEFF8 ++*/ ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HSINIT_OFFSET (0x052C) ++ ++/* PDP, VID2HSINIT, VID2HINITIAL ++*/ ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) ++#define ODN_PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2HSINIT, VID2HPITCH ++*/ ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) ++#define ODN_PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF0_OFFSET (0x0530) ++ ++/* PDP, VID2HCOEFF0, VID2HCOEFF0 ++*/ ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF1_OFFSET (0x0534) ++ ++/* PDP, VID2HCOEFF1, VID2HCOEFF1 ++*/ ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF2_OFFSET (0x0538) ++ ++/* PDP, VID2HCOEFF2, VID2HCOEFF2 ++*/ ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF3_OFFSET (0x053C) ++ ++/* PDP, VID2HCOEFF3, VID2HCOEFF3 ++*/ ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF4_OFFSET (0x0540) ++ ++/* PDP, VID2HCOEFF4, VID2HCOEFF4 ++*/ ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF5_OFFSET (0x0544) ++ ++/* PDP, VID2HCOEFF5, VID2HCOEFF5 ++*/ ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF6_OFFSET (0x0548) ++ ++/* PDP, VID2HCOEFF6, VID2HCOEFF6 ++*/ ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF7_OFFSET (0x054C) ++ ++/* PDP, VID2HCOEFF7, VID2HCOEFF7 ++*/ ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF8_OFFSET (0x0550) ++ ++/* PDP, VID2HCOEFF8, VID2HCOEFF8 ++*/ ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF9_OFFSET (0x0554) ++ ++/* PDP, VID2HCOEFF9, VID2HCOEFF9 ++*/ ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF10_OFFSET (0x0558) ++ ++/* PDP, VID2HCOEFF10, VID2HCOEFF10 ++*/ ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF11_OFFSET (0x055C) ++ ++/* PDP, VID2HCOEFF11, VID2HCOEFF11 ++*/ ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF12_OFFSET (0x0560) ++ ++/* PDP, VID2HCOEFF12, VID2HCOEFF12 ++*/ ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF13_OFFSET (0x0564) ++ ++/* PDP, VID2HCOEFF13, VID2HCOEFF13 ++*/ ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF14_OFFSET (0x0568) ++ ++/* PDP, VID2HCOEFF14, VID2HCOEFF14 ++*/ ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF15_OFFSET (0x056C) ++ ++/* PDP, VID2HCOEFF15, VID2HCOEFF15 ++*/ ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2HCOEFF16_OFFSET (0x0570) ++ ++/* PDP, VID2HCOEFF16, VID2HCOEFF16 ++*/ ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2SCALESIZE_OFFSET (0x0574) ++ ++/* PDP, VID2SCALESIZE, VID2SCALEWIDTH ++*/ ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT ++*/ ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SCALECTRL_OFFSET (0x0578) ++ ++/* PDP, VID3SCALECTRL, VID3HSCALEBP ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VSCALEBP ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3HSBEFOREVS ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VSURUNCTRL ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3PAN_EN ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) ++#define ODN_PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VORDER ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) ++#define ODN_PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VPITCH ++*/ ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) ++#define ODN_PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VSINIT_OFFSET (0x057C) ++ ++/* PDP, VID3VSINIT, VID3VINITIAL1 ++*/ ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3VSINIT, VID3VINITIAL0 ++*/ ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF0_OFFSET (0x0580) ++ ++/* PDP, VID3VCOEFF0, VID3VCOEFF0 ++*/ ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF1_OFFSET (0x0584) ++ ++/* PDP, VID3VCOEFF1, VID3VCOEFF1 ++*/ ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF2_OFFSET (0x0588) ++ ++/* PDP, VID3VCOEFF2, VID3VCOEFF2 ++*/ ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF3_OFFSET (0x058C) ++ ++/* PDP, VID3VCOEFF3, VID3VCOEFF3 ++*/ ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF4_OFFSET (0x0590) ++ ++/* PDP, VID3VCOEFF4, VID3VCOEFF4 ++*/ ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF5_OFFSET (0x0594) ++ ++/* PDP, VID3VCOEFF5, VID3VCOEFF5 ++*/ ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF6_OFFSET (0x0598) ++ ++/* PDP, VID3VCOEFF6, VID3VCOEFF6 ++*/ ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF7_OFFSET (0x059C) ++ ++/* PDP, VID3VCOEFF7, VID3VCOEFF7 ++*/ ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3VCOEFF8_OFFSET (0x05A0) ++ ++/* PDP, VID3VCOEFF8, VID3VCOEFF8 ++*/ ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HSINIT_OFFSET (0x05A4) ++ ++/* PDP, VID3HSINIT, VID3HINITIAL ++*/ ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) ++#define ODN_PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3HSINIT, VID3HPITCH ++*/ ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) ++#define ODN_PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF0_OFFSET (0x05A8) ++ ++/* PDP, VID3HCOEFF0, VID3HCOEFF0 ++*/ ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF1_OFFSET (0x05AC) ++ ++/* PDP, VID3HCOEFF1, VID3HCOEFF1 ++*/ ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF2_OFFSET (0x05B0) ++ ++/* PDP, VID3HCOEFF2, VID3HCOEFF2 ++*/ ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF3_OFFSET (0x05B4) ++ ++/* PDP, VID3HCOEFF3, VID3HCOEFF3 ++*/ ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF4_OFFSET (0x05B8) ++ ++/* PDP, VID3HCOEFF4, VID3HCOEFF4 ++*/ ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF5_OFFSET (0x05BC) ++ ++/* PDP, VID3HCOEFF5, VID3HCOEFF5 ++*/ ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF6_OFFSET (0x05C0) ++ ++/* PDP, VID3HCOEFF6, VID3HCOEFF6 ++*/ ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF7_OFFSET (0x05C4) ++ ++/* PDP, VID3HCOEFF7, VID3HCOEFF7 ++*/ ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF8_OFFSET (0x05C8) ++ ++/* PDP, VID3HCOEFF8, VID3HCOEFF8 ++*/ ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF9_OFFSET (0x05CC) ++ ++/* PDP, VID3HCOEFF9, VID3HCOEFF9 ++*/ ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF10_OFFSET (0x05D0) ++ ++/* PDP, VID3HCOEFF10, VID3HCOEFF10 ++*/ ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF11_OFFSET (0x05D4) ++ ++/* PDP, VID3HCOEFF11, VID3HCOEFF11 ++*/ ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF12_OFFSET (0x05D8) ++ ++/* PDP, VID3HCOEFF12, VID3HCOEFF12 ++*/ ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF13_OFFSET (0x05DC) ++ ++/* PDP, VID3HCOEFF13, VID3HCOEFF13 ++*/ ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF14_OFFSET (0x05E0) ++ ++/* PDP, VID3HCOEFF14, VID3HCOEFF14 ++*/ ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF15_OFFSET (0x05E4) ++ ++/* PDP, VID3HCOEFF15, VID3HCOEFF15 ++*/ ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3HCOEFF16_OFFSET (0x05E8) ++ ++/* PDP, VID3HCOEFF16, VID3HCOEFF16 ++*/ ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3SCALESIZE_OFFSET (0x05EC) ++ ++/* PDP, VID3SCALESIZE, VID3SCALEWIDTH ++*/ ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT ++*/ ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SCALECTRL_OFFSET (0x05F0) ++ ++/* PDP, VID4SCALECTRL, VID4HSCALEBP ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VSCALEBP ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4HSBEFOREVS ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VSURUNCTRL ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4PAN_EN ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) ++#define ODN_PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VORDER ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) ++#define ODN_PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VPITCH ++*/ ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) ++#define ODN_PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VSINIT_OFFSET (0x05F4) ++ ++/* PDP, VID4VSINIT, VID4VINITIAL1 ++*/ ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4VSINIT, VID4VINITIAL0 ++*/ ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) ++#define ODN_PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF0_OFFSET (0x05F8) ++ ++/* PDP, VID4VCOEFF0, VID4VCOEFF0 ++*/ ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF1_OFFSET (0x05FC) ++ ++/* PDP, VID4VCOEFF1, VID4VCOEFF1 ++*/ ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF2_OFFSET (0x0600) ++ ++/* PDP, VID4VCOEFF2, VID4VCOEFF2 ++*/ ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF3_OFFSET (0x0604) ++ ++/* PDP, VID4VCOEFF3, VID4VCOEFF3 ++*/ ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF4_OFFSET (0x0608) ++ ++/* PDP, VID4VCOEFF4, VID4VCOEFF4 ++*/ ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF5_OFFSET (0x060C) ++ ++/* PDP, VID4VCOEFF5, VID4VCOEFF5 ++*/ ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF6_OFFSET (0x0610) ++ ++/* PDP, VID4VCOEFF6, VID4VCOEFF6 ++*/ ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF7_OFFSET (0x0614) ++ ++/* PDP, VID4VCOEFF7, VID4VCOEFF7 ++*/ ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) ++#define ODN_PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4VCOEFF8_OFFSET (0x0618) ++ ++/* PDP, VID4VCOEFF8, VID4VCOEFF8 ++*/ ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) ++#define ODN_PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HSINIT_OFFSET (0x061C) ++ ++/* PDP, VID4HSINIT, VID4HINITIAL ++*/ ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) ++#define ODN_PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4HSINIT, VID4HPITCH ++*/ ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) ++#define ODN_PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF0_OFFSET (0x0620) ++ ++/* PDP, VID4HCOEFF0, VID4HCOEFF0 ++*/ ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF1_OFFSET (0x0624) ++ ++/* PDP, VID4HCOEFF1, VID4HCOEFF1 ++*/ ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF2_OFFSET (0x0628) ++ ++/* PDP, VID4HCOEFF2, VID4HCOEFF2 ++*/ ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF3_OFFSET (0x062C) ++ ++/* PDP, VID4HCOEFF3, VID4HCOEFF3 ++*/ ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF4_OFFSET (0x0630) ++ ++/* PDP, VID4HCOEFF4, VID4HCOEFF4 ++*/ ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF5_OFFSET (0x0634) ++ ++/* PDP, VID4HCOEFF5, VID4HCOEFF5 ++*/ ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF6_OFFSET (0x0638) ++ ++/* PDP, VID4HCOEFF6, VID4HCOEFF6 ++*/ ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF7_OFFSET (0x063C) ++ ++/* PDP, VID4HCOEFF7, VID4HCOEFF7 ++*/ ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF8_OFFSET (0x0640) ++ ++/* PDP, VID4HCOEFF8, VID4HCOEFF8 ++*/ ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF9_OFFSET (0x0644) ++ ++/* PDP, VID4HCOEFF9, VID4HCOEFF9 ++*/ ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF10_OFFSET (0x0648) ++ ++/* PDP, VID4HCOEFF10, VID4HCOEFF10 ++*/ ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF11_OFFSET (0x064C) ++ ++/* PDP, VID4HCOEFF11, VID4HCOEFF11 ++*/ ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF12_OFFSET (0x0650) ++ ++/* PDP, VID4HCOEFF12, VID4HCOEFF12 ++*/ ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF13_OFFSET (0x0654) ++ ++/* PDP, VID4HCOEFF13, VID4HCOEFF13 ++*/ ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF14_OFFSET (0x0658) ++ ++/* PDP, VID4HCOEFF14, VID4HCOEFF14 ++*/ ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF15_OFFSET (0x065C) ++ ++/* PDP, VID4HCOEFF15, VID4HCOEFF15 ++*/ ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) ++#define ODN_PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4HCOEFF16_OFFSET (0x0660) ++ ++/* PDP, VID4HCOEFF16, VID4HCOEFF16 ++*/ ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) ++#define ODN_PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4SCALESIZE_OFFSET (0x0664) ++ ++/* PDP, VID4SCALESIZE, VID4SCALEWIDTH ++*/ ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT ++*/ ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) ++#define ODN_PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND0_OFFSET (0x0668) ++ ++/* PDP, PORTER_BLND0, BLND0BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND0, BLND0PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND1_OFFSET (0x066C) ++ ++/* PDP, PORTER_BLND1, BLND1BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND1, BLND1PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND2_OFFSET (0x0670) ++ ++/* PDP, PORTER_BLND2, BLND2BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND2, BLND2PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND3_OFFSET (0x0674) ++ ++/* PDP, PORTER_BLND3, BLND3BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND3, BLND3PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND4_OFFSET (0x0678) ++ ++/* PDP, PORTER_BLND4, BLND4BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND4, BLND4PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND5_OFFSET (0x067C) ++ ++/* PDP, PORTER_BLND5, BLND5BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND5, BLND5PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND6_OFFSET (0x0680) ++ ++/* PDP, PORTER_BLND6, BLND6BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND6, BLND6PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PORTER_BLND7_OFFSET (0x0684) ++ ++/* PDP, PORTER_BLND7, BLND7BLENDTYPE ++*/ ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) ++#define ODN_PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND7, BLND7PORTERMODE ++*/ ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) ++#define ODN_PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) ++ ++/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) ++ ++/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) ++ ++/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B ++*/ ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) ++ ++/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) ++ ++/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) ++ ++/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B ++*/ ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) ++ ++/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) ++ ++/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) ++ ++/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B ++*/ ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) ++ ++/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS ++*/ ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE ++*/ ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) ++ ++/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX ++*/ ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN ++*/ ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) ++ ++/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) ++#define ODN_PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B ++*/ ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) ++#define ODN_PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF0_OFFSET (0x0708) ++ ++/* PDP, CSCCOEFF0, CSCCOEFFRU ++*/ ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF0, CSCCOEFFRY ++*/ ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) ++#define ODN_PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF1_OFFSET (0x070C) ++ ++/* PDP, CSCCOEFF1, CSCCOEFFGY ++*/ ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF1, CSCCOEFFRV ++*/ ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) ++#define ODN_PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF2_OFFSET (0x0710) ++ ++/* PDP, CSCCOEFF2, CSCCOEFFGV ++*/ ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF2, CSCCOEFFGU ++*/ ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) ++#define ODN_PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF3_OFFSET (0x0714) ++ ++/* PDP, CSCCOEFF3, CSCCOEFFBU ++*/ ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF3, CSCCOEFFBY ++*/ ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) ++#define ODN_PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CSCCOEFF4_OFFSET (0x0718) ++ ++/* PDP, CSCCOEFF4, CSCCOEFFBV ++*/ ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) ++#define ODN_PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BGNDCOL_AR_OFFSET (0x071C) ++ ++/* PDP, BGNDCOL_AR, BGNDCOL_A ++*/ ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BGNDCOL_AR, BGNDCOL_R ++*/ ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) ++#define ODN_PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BGNDCOL_GB_OFFSET (0x0720) ++ ++/* PDP, BGNDCOL_GB, BGNDCOL_G ++*/ ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BGNDCOL_GB, BGNDCOL_B ++*/ ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) ++#define ODN_PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BORDCOL_R_OFFSET (0x0724) ++ ++/* PDP, BORDCOL_R, BORDCOL_R ++*/ ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) ++#define ODN_PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BORDCOL_GB_OFFSET (0x0728) ++ ++/* PDP, BORDCOL_GB, BORDCOL_G ++*/ ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BORDCOL_GB, BORDCOL_B ++*/ ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) ++#define ODN_PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_LINESTAT_OFFSET (0x0734) ++ ++/* PDP, LINESTAT, LINENO ++*/ ++#define ODN_PDP_LINESTAT_LINENO_MASK (0x00001FFF) ++#define ODN_PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) ++#define ODN_PDP_LINESTAT_LINENO_SHIFT (0) ++#define ODN_PDP_LINESTAT_LINENO_LENGTH (13) ++#define ODN_PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_OFFSET (0x0738) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_OFFSET (0x073C) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_OFFSET (0x0740) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_OFFSET (0x0744) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_OFFSET (0x0748) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_C33 ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_RANGE ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_C33, CR_PROCAMP_EN ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) ++ ++/* PDP, CR_ODN_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R ++*/ ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) ++#define ODN_PDP_CR_ODN_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_SIGNAT_R_OFFSET (0x075C) ++ ++/* PDP, SIGNAT_R, SIGNATURE_R ++*/ ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) ++#define ODN_PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_SIGNAT_GB_OFFSET (0x0760) ++ ++/* PDP, SIGNAT_GB, SIGNATURE_G ++*/ ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SIGNAT_GB, SIGNATURE_B ++*/ ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) ++#define ODN_PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) ++ ++/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING ++*/ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID ++*/ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK ++*/ ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) ++ ++/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED ++*/ ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) ++#define ODN_PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGCTRL_OFFSET (0x076C) ++ ++/* PDP, DBGCTRL, DBG_READ ++*/ ++#define ODN_PDP_DBGCTRL_DBG_READ_MASK (0x00000002) ++#define ODN_PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) ++#define ODN_PDP_DBGCTRL_DBG_READ_SHIFT (1) ++#define ODN_PDP_DBGCTRL_DBG_READ_LENGTH (1) ++#define ODN_PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGCTRL, DBG_ENAB ++*/ ++#define ODN_PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_SHIFT (0) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_LENGTH (1) ++#define ODN_PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGDATA_R_OFFSET (0x0770) ++ ++/* PDP, DBGDATA_R, DBG_DATA_R ++*/ ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) ++#define ODN_PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGDATA_GB_OFFSET (0x0774) ++ ++/* PDP, DBGDATA_GB, DBG_DATA_G ++*/ ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGDATA_GB, DBG_DATA_B ++*/ ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) ++#define ODN_PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DBGSIDE_OFFSET (0x0778) ++ ++/* PDP, DBGSIDE, DBG_VAL ++*/ ++#define ODN_PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) ++#define ODN_PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) ++#define ODN_PDP_DBGSIDE_DBG_VAL_SHIFT (3) ++#define ODN_PDP_DBGSIDE_DBG_VAL_LENGTH (1) ++#define ODN_PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGSIDE, DBG_SIDE ++*/ ++#define ODN_PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_SHIFT (0) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_LENGTH (3) ++#define ODN_PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_OUTPUT_OFFSET (0x077C) ++ ++/* PDP, OUTPUT, EIGHT_BIT_OUTPUT ++*/ ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) ++#define ODN_PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OUTPUT, OUTPUT_CONFIG ++*/ ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) ++#define ODN_PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_SYNCCTRL_OFFSET (0x0780) ++ ++/* PDP, SYNCCTRL, SYNCACTIVE ++*/ ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, ODN_PDP_RST ++*/ ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_MASK (0x20000000) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SHIFT (29) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_ODN_PDP_RST_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, POWERDN ++*/ ++#define ODN_PDP_SYNCCTRL_POWERDN_MASK (0x10000000) ++#define ODN_PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_POWERDN_SHIFT (28) ++#define ODN_PDP_SYNCCTRL_POWERDN_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, LOWPWRMODE ++*/ ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDSYNCTRL ++*/ ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDINTCTRL ++*/ ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDCTRL ++*/ ++#define ODN_PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_SHIFT (24) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDWAIT ++*/ ++#define ODN_PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_SHIFT (16) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_LENGTH (4) ++#define ODN_PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, FIELD_EN ++*/ ++#define ODN_PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_SHIFT (13) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, CSYNC_EN ++*/ ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, CLKPOL ++*/ ++#define ODN_PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) ++#define ODN_PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_CLKPOL_SHIFT (11) ++#define ODN_PDP_SYNCCTRL_CLKPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VS_SLAVE ++*/ ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HS_SLAVE ++*/ ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, BLNKPOL ++*/ ++#define ODN_PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_SHIFT (5) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, BLNKDIS ++*/ ++#define ODN_PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_SHIFT (4) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VSPOL ++*/ ++#define ODN_PDP_SYNCCTRL_VSPOL_MASK (0x00000008) ++#define ODN_PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_VSPOL_SHIFT (3) ++#define ODN_PDP_SYNCCTRL_VSPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VSDIS ++*/ ++#define ODN_PDP_SYNCCTRL_VSDIS_MASK (0x00000004) ++#define ODN_PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_VSDIS_SHIFT (2) ++#define ODN_PDP_SYNCCTRL_VSDIS_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HSPOL ++*/ ++#define ODN_PDP_SYNCCTRL_HSPOL_MASK (0x00000002) ++#define ODN_PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HSPOL_SHIFT (1) ++#define ODN_PDP_SYNCCTRL_HSPOL_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HSDIS ++*/ ++#define ODN_PDP_SYNCCTRL_HSDIS_MASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) ++#define ODN_PDP_SYNCCTRL_HSDIS_SHIFT (0) ++#define ODN_PDP_SYNCCTRL_HSDIS_LENGTH (1) ++#define ODN_PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HSYNC1_OFFSET (0x0784) ++ ++/* PDP, HSYNC1, HBPS ++*/ ++#define ODN_PDP_HSYNC1_HBPS_MASK (0x1FFF0000) ++#define ODN_PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC1_HBPS_SHIFT (16) ++#define ODN_PDP_HSYNC1_HBPS_LENGTH (13) ++#define ODN_PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC1, HT ++*/ ++#define ODN_PDP_HSYNC1_HT_MASK (0x00001FFF) ++#define ODN_PDP_HSYNC1_HT_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC1_HT_SHIFT (0) ++#define ODN_PDP_HSYNC1_HT_LENGTH (13) ++#define ODN_PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HSYNC2_OFFSET (0x0788) ++ ++/* PDP, HSYNC2, HAS ++*/ ++#define ODN_PDP_HSYNC2_HAS_MASK (0x1FFF0000) ++#define ODN_PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC2_HAS_SHIFT (16) ++#define ODN_PDP_HSYNC2_HAS_LENGTH (13) ++#define ODN_PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC2, HLBS ++*/ ++#define ODN_PDP_HSYNC2_HLBS_MASK (0x00001FFF) ++#define ODN_PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC2_HLBS_SHIFT (0) ++#define ODN_PDP_HSYNC2_HLBS_LENGTH (13) ++#define ODN_PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HSYNC3_OFFSET (0x078C) ++ ++/* PDP, HSYNC3, HFPS ++*/ ++#define ODN_PDP_HSYNC3_HFPS_MASK (0x1FFF0000) ++#define ODN_PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC3_HFPS_SHIFT (16) ++#define ODN_PDP_HSYNC3_HFPS_LENGTH (13) ++#define ODN_PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC3, HRBS ++*/ ++#define ODN_PDP_HSYNC3_HRBS_MASK (0x00001FFF) ++#define ODN_PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_HSYNC3_HRBS_SHIFT (0) ++#define ODN_PDP_HSYNC3_HRBS_LENGTH (13) ++#define ODN_PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VSYNC1_OFFSET (0x0790) ++ ++/* PDP, VSYNC1, VBPS ++*/ ++#define ODN_PDP_VSYNC1_VBPS_MASK (0x1FFF0000) ++#define ODN_PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC1_VBPS_SHIFT (16) ++#define ODN_PDP_VSYNC1_VBPS_LENGTH (13) ++#define ODN_PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC1, VT ++*/ ++#define ODN_PDP_VSYNC1_VT_MASK (0x00001FFF) ++#define ODN_PDP_VSYNC1_VT_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC1_VT_SHIFT (0) ++#define ODN_PDP_VSYNC1_VT_LENGTH (13) ++#define ODN_PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VSYNC2_OFFSET (0x0794) ++ ++/* PDP, VSYNC2, VAS ++*/ ++#define ODN_PDP_VSYNC2_VAS_MASK (0x1FFF0000) ++#define ODN_PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC2_VAS_SHIFT (16) ++#define ODN_PDP_VSYNC2_VAS_LENGTH (13) ++#define ODN_PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC2, VTBS ++*/ ++#define ODN_PDP_VSYNC2_VTBS_MASK (0x00001FFF) ++#define ODN_PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC2_VTBS_SHIFT (0) ++#define ODN_PDP_VSYNC2_VTBS_LENGTH (13) ++#define ODN_PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VSYNC3_OFFSET (0x0798) ++ ++/* PDP, VSYNC3, VFPS ++*/ ++#define ODN_PDP_VSYNC3_VFPS_MASK (0x1FFF0000) ++#define ODN_PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC3_VFPS_SHIFT (16) ++#define ODN_PDP_VSYNC3_VFPS_LENGTH (13) ++#define ODN_PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC3, VBBS ++*/ ++#define ODN_PDP_VSYNC3_VBBS_MASK (0x00001FFF) ++#define ODN_PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) ++#define ODN_PDP_VSYNC3_VBBS_SHIFT (0) ++#define ODN_PDP_VSYNC3_VBBS_LENGTH (13) ++#define ODN_PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTSTAT_OFFSET (0x079C) ++ ++/* PDP, INTSTAT, INTS_VID4ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID3ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID2ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID1ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH4ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH3ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH2ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH1ORUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID4URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID3URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID2URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID1URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH4URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH3URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH2URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH1URUN ++*/ ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VBLNK1 ++*/ ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VBLNK0 ++*/ ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_HBLNK1 ++*/ ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_HBLNK0 ++*/ ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) ++#define ODN_PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTENAB_OFFSET (0x07A0) ++ ++/* PDP, INTENAB, INTEN_VID4ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID3ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID2ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID1ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH4ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH3ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH2ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH1ORUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID4URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID3URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID2URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID1URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH4URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH3URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH2URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH1URUN ++*/ ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VBLNK1 ++*/ ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VBLNK0 ++*/ ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_HBLNK1 ++*/ ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_HBLNK0 ++*/ ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) ++#define ODN_PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTCLR_OFFSET (0x07A4) ++ ++/* PDP, INTCLR, INTCLR_VID4ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID3ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID2ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID1ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH4ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH3ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH2ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH1ORUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID4URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID3URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID2URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID1URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH4URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH3URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH2URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH1URUN ++*/ ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VBLNK1 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VBLNK0 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_HBLNK1 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_HBLNK0 ++*/ ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) ++#define ODN_PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_MEMCTRL_OFFSET (0x07A8) ++ ++/* PDP, MEMCTRL, MEMREFRESH ++*/ ++#define ODN_PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_SHIFT (30) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_LENGTH (2) ++#define ODN_PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEMCTRL, BURSTLEN ++*/ ++#define ODN_PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_MEMCTRL_BURSTLEN_SHIFT (0) ++#define ODN_PDP_MEMCTRL_BURSTLEN_LENGTH (8) ++#define ODN_PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_MEM_THRESH_OFFSET (0x07AC) ++ ++/* PDP, MEM_THRESH, UVTHRESHOLD ++*/ ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEM_THRESH, YTHRESHOLD ++*/ ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEM_THRESH, THRESHOLD ++*/ ++#define ODN_PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_SHIFT (0) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_LENGTH (9) ++#define ODN_PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) ++ ++/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON ++*/ ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL ++*/ ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) ++#define ODN_PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA0_R_OFFSET (0x07B4) ++ ++/* PDP, GAMMA0_R, GAMMA0_R ++*/ ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) ++#define ODN_PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA0_GB_OFFSET (0x07B8) ++ ++/* PDP, GAMMA0_GB, GAMMA0_G ++*/ ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA0_GB, GAMMA0_B ++*/ ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) ++#define ODN_PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA1_R_OFFSET (0x07BC) ++ ++/* PDP, GAMMA1_R, GAMMA1_R ++*/ ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) ++#define ODN_PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA1_GB_OFFSET (0x07C0) ++ ++/* PDP, GAMMA1_GB, GAMMA1_G ++*/ ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA1_GB, GAMMA1_B ++*/ ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) ++#define ODN_PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA2_R_OFFSET (0x07C4) ++ ++/* PDP, GAMMA2_R, GAMMA2_R ++*/ ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) ++#define ODN_PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA2_GB_OFFSET (0x07C8) ++ ++/* PDP, GAMMA2_GB, GAMMA2_G ++*/ ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA2_GB, GAMMA2_B ++*/ ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) ++#define ODN_PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA3_R_OFFSET (0x07CC) ++ ++/* PDP, GAMMA3_R, GAMMA3_R ++*/ ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) ++#define ODN_PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA3_GB_OFFSET (0x07D0) ++ ++/* PDP, GAMMA3_GB, GAMMA3_G ++*/ ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA3_GB, GAMMA3_B ++*/ ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) ++#define ODN_PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA4_R_OFFSET (0x07D4) ++ ++/* PDP, GAMMA4_R, GAMMA4_R ++*/ ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) ++#define ODN_PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA4_GB_OFFSET (0x07D8) ++ ++/* PDP, GAMMA4_GB, GAMMA4_G ++*/ ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA4_GB, GAMMA4_B ++*/ ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) ++#define ODN_PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA5_R_OFFSET (0x07DC) ++ ++/* PDP, GAMMA5_R, GAMMA5_R ++*/ ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) ++#define ODN_PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA5_GB_OFFSET (0x07E0) ++ ++/* PDP, GAMMA5_GB, GAMMA5_G ++*/ ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA5_GB, GAMMA5_B ++*/ ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) ++#define ODN_PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA6_R_OFFSET (0x07E4) ++ ++/* PDP, GAMMA6_R, GAMMA6_R ++*/ ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) ++#define ODN_PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA6_GB_OFFSET (0x07E8) ++ ++/* PDP, GAMMA6_GB, GAMMA6_G ++*/ ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA6_GB, GAMMA6_B ++*/ ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) ++#define ODN_PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA7_R_OFFSET (0x07EC) ++ ++/* PDP, GAMMA7_R, GAMMA7_R ++*/ ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) ++#define ODN_PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA7_GB_OFFSET (0x07F0) ++ ++/* PDP, GAMMA7_GB, GAMMA7_G ++*/ ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA7_GB, GAMMA7_B ++*/ ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) ++#define ODN_PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA8_R_OFFSET (0x07F4) ++ ++/* PDP, GAMMA8_R, GAMMA8_R ++*/ ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) ++#define ODN_PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA8_GB_OFFSET (0x07F8) ++ ++/* PDP, GAMMA8_GB, GAMMA8_G ++*/ ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA8_GB, GAMMA8_B ++*/ ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) ++#define ODN_PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA9_R_OFFSET (0x07FC) ++ ++/* PDP, GAMMA9_R, GAMMA9_R ++*/ ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) ++#define ODN_PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA9_GB_OFFSET (0x0800) ++ ++/* PDP, GAMMA9_GB, GAMMA9_G ++*/ ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA9_GB, GAMMA9_B ++*/ ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) ++#define ODN_PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA10_R_OFFSET (0x0804) ++ ++/* PDP, GAMMA10_R, GAMMA10_R ++*/ ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) ++#define ODN_PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA10_GB_OFFSET (0x0808) ++ ++/* PDP, GAMMA10_GB, GAMMA10_G ++*/ ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA10_GB, GAMMA10_B ++*/ ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) ++#define ODN_PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA11_R_OFFSET (0x080C) ++ ++/* PDP, GAMMA11_R, GAMMA11_R ++*/ ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) ++#define ODN_PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA11_GB_OFFSET (0x0810) ++ ++/* PDP, GAMMA11_GB, GAMMA11_G ++*/ ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA11_GB, GAMMA11_B ++*/ ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) ++#define ODN_PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA12_R_OFFSET (0x0814) ++ ++/* PDP, GAMMA12_R, GAMMA12_R ++*/ ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) ++#define ODN_PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA12_GB_OFFSET (0x0818) ++ ++/* PDP, GAMMA12_GB, GAMMA12_G ++*/ ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA12_GB, GAMMA12_B ++*/ ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) ++#define ODN_PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA13_R_OFFSET (0x081C) ++ ++/* PDP, GAMMA13_R, GAMMA13_R ++*/ ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) ++#define ODN_PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA13_GB_OFFSET (0x0820) ++ ++/* PDP, GAMMA13_GB, GAMMA13_G ++*/ ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA13_GB, GAMMA13_B ++*/ ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) ++#define ODN_PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA14_R_OFFSET (0x0824) ++ ++/* PDP, GAMMA14_R, GAMMA14_R ++*/ ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) ++#define ODN_PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA14_GB_OFFSET (0x0828) ++ ++/* PDP, GAMMA14_GB, GAMMA14_G ++*/ ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA14_GB, GAMMA14_B ++*/ ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) ++#define ODN_PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA15_R_OFFSET (0x082C) ++ ++/* PDP, GAMMA15_R, GAMMA15_R ++*/ ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) ++#define ODN_PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA15_GB_OFFSET (0x0830) ++ ++/* PDP, GAMMA15_GB, GAMMA15_G ++*/ ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA15_GB, GAMMA15_B ++*/ ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) ++#define ODN_PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA16_R_OFFSET (0x0834) ++ ++/* PDP, GAMMA16_R, GAMMA16_R ++*/ ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) ++#define ODN_PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA16_GB_OFFSET (0x0838) ++ ++/* PDP, GAMMA16_GB, GAMMA16_G ++*/ ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA16_GB, GAMMA16_B ++*/ ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) ++#define ODN_PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA17_R_OFFSET (0x083C) ++ ++/* PDP, GAMMA17_R, GAMMA17_R ++*/ ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) ++#define ODN_PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA17_GB_OFFSET (0x0840) ++ ++/* PDP, GAMMA17_GB, GAMMA17_G ++*/ ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA17_GB, GAMMA17_B ++*/ ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) ++#define ODN_PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA18_R_OFFSET (0x0844) ++ ++/* PDP, GAMMA18_R, GAMMA18_R ++*/ ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) ++#define ODN_PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA18_GB_OFFSET (0x0848) ++ ++/* PDP, GAMMA18_GB, GAMMA18_G ++*/ ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA18_GB, GAMMA18_B ++*/ ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) ++#define ODN_PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA19_R_OFFSET (0x084C) ++ ++/* PDP, GAMMA19_R, GAMMA19_R ++*/ ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) ++#define ODN_PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA19_GB_OFFSET (0x0850) ++ ++/* PDP, GAMMA19_GB, GAMMA19_G ++*/ ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA19_GB, GAMMA19_B ++*/ ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) ++#define ODN_PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA20_R_OFFSET (0x0854) ++ ++/* PDP, GAMMA20_R, GAMMA20_R ++*/ ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) ++#define ODN_PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA20_GB_OFFSET (0x0858) ++ ++/* PDP, GAMMA20_GB, GAMMA20_G ++*/ ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA20_GB, GAMMA20_B ++*/ ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) ++#define ODN_PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA21_R_OFFSET (0x085C) ++ ++/* PDP, GAMMA21_R, GAMMA21_R ++*/ ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) ++#define ODN_PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA21_GB_OFFSET (0x0860) ++ ++/* PDP, GAMMA21_GB, GAMMA21_G ++*/ ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA21_GB, GAMMA21_B ++*/ ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) ++#define ODN_PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA22_R_OFFSET (0x0864) ++ ++/* PDP, GAMMA22_R, GAMMA22_R ++*/ ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) ++#define ODN_PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA22_GB_OFFSET (0x0868) ++ ++/* PDP, GAMMA22_GB, GAMMA22_G ++*/ ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA22_GB, GAMMA22_B ++*/ ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) ++#define ODN_PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA23_R_OFFSET (0x086C) ++ ++/* PDP, GAMMA23_R, GAMMA23_R ++*/ ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) ++#define ODN_PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA23_GB_OFFSET (0x0870) ++ ++/* PDP, GAMMA23_GB, GAMMA23_G ++*/ ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA23_GB, GAMMA23_B ++*/ ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) ++#define ODN_PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA24_R_OFFSET (0x0874) ++ ++/* PDP, GAMMA24_R, GAMMA24_R ++*/ ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) ++#define ODN_PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA24_GB_OFFSET (0x0878) ++ ++/* PDP, GAMMA24_GB, GAMMA24_G ++*/ ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA24_GB, GAMMA24_B ++*/ ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) ++#define ODN_PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA25_R_OFFSET (0x087C) ++ ++/* PDP, GAMMA25_R, GAMMA25_R ++*/ ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) ++#define ODN_PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA25_GB_OFFSET (0x0880) ++ ++/* PDP, GAMMA25_GB, GAMMA25_G ++*/ ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA25_GB, GAMMA25_B ++*/ ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) ++#define ODN_PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA26_R_OFFSET (0x0884) ++ ++/* PDP, GAMMA26_R, GAMMA26_R ++*/ ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) ++#define ODN_PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA26_GB_OFFSET (0x0888) ++ ++/* PDP, GAMMA26_GB, GAMMA26_G ++*/ ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA26_GB, GAMMA26_B ++*/ ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) ++#define ODN_PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA27_R_OFFSET (0x088C) ++ ++/* PDP, GAMMA27_R, GAMMA27_R ++*/ ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) ++#define ODN_PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA27_GB_OFFSET (0x0890) ++ ++/* PDP, GAMMA27_GB, GAMMA27_G ++*/ ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA27_GB, GAMMA27_B ++*/ ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) ++#define ODN_PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA28_R_OFFSET (0x0894) ++ ++/* PDP, GAMMA28_R, GAMMA28_R ++*/ ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) ++#define ODN_PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA28_GB_OFFSET (0x0898) ++ ++/* PDP, GAMMA28_GB, GAMMA28_G ++*/ ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA28_GB, GAMMA28_B ++*/ ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) ++#define ODN_PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA29_R_OFFSET (0x089C) ++ ++/* PDP, GAMMA29_R, GAMMA29_R ++*/ ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) ++#define ODN_PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA29_GB_OFFSET (0x08A0) ++ ++/* PDP, GAMMA29_GB, GAMMA29_G ++*/ ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA29_GB, GAMMA29_B ++*/ ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) ++#define ODN_PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA30_R_OFFSET (0x08A4) ++ ++/* PDP, GAMMA30_R, GAMMA30_R ++*/ ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) ++#define ODN_PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA30_GB_OFFSET (0x08A8) ++ ++/* PDP, GAMMA30_GB, GAMMA30_G ++*/ ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA30_GB, GAMMA30_B ++*/ ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) ++#define ODN_PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA31_R_OFFSET (0x08AC) ++ ++/* PDP, GAMMA31_R, GAMMA31_R ++*/ ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) ++#define ODN_PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA31_GB_OFFSET (0x08B0) ++ ++/* PDP, GAMMA31_GB, GAMMA31_G ++*/ ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA31_GB, GAMMA31_B ++*/ ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) ++#define ODN_PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA32_R_OFFSET (0x08B4) ++ ++/* PDP, GAMMA32_R, GAMMA32_R ++*/ ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) ++#define ODN_PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GAMMA32_GB_OFFSET (0x08B8) ++ ++/* PDP, GAMMA32_GB, GAMMA32_G ++*/ ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA32_GB, GAMMA32_B ++*/ ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) ++#define ODN_PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VEVENT_OFFSET (0x08BC) ++ ++/* PDP, VEVENT, VEVENT ++*/ ++#define ODN_PDP_VEVENT_VEVENT_MASK (0x1FFF0000) ++#define ODN_PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) ++#define ODN_PDP_VEVENT_VEVENT_SHIFT (16) ++#define ODN_PDP_VEVENT_VEVENT_LENGTH (13) ++#define ODN_PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VEVENT, VFETCH ++*/ ++#define ODN_PDP_VEVENT_VFETCH_MASK (0x00001FFF) ++#define ODN_PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) ++#define ODN_PDP_VEVENT_VFETCH_SHIFT (0) ++#define ODN_PDP_VEVENT_VFETCH_LENGTH (13) ++#define ODN_PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_HDECTRL_OFFSET (0x08C0) ++ ++/* PDP, HDECTRL, HDES ++*/ ++#define ODN_PDP_HDECTRL_HDES_MASK (0x1FFF0000) ++#define ODN_PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) ++#define ODN_PDP_HDECTRL_HDES_SHIFT (16) ++#define ODN_PDP_HDECTRL_HDES_LENGTH (13) ++#define ODN_PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HDECTRL, HDEF ++*/ ++#define ODN_PDP_HDECTRL_HDEF_MASK (0x00001FFF) ++#define ODN_PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) ++#define ODN_PDP_HDECTRL_HDEF_SHIFT (0) ++#define ODN_PDP_HDECTRL_HDEF_LENGTH (13) ++#define ODN_PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VDECTRL_OFFSET (0x08C4) ++ ++/* PDP, VDECTRL, VDES ++*/ ++#define ODN_PDP_VDECTRL_VDES_MASK (0x1FFF0000) ++#define ODN_PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) ++#define ODN_PDP_VDECTRL_VDES_SHIFT (16) ++#define ODN_PDP_VDECTRL_VDES_LENGTH (13) ++#define ODN_PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VDECTRL, VDEF ++*/ ++#define ODN_PDP_VDECTRL_VDEF_MASK (0x00001FFF) ++#define ODN_PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) ++#define ODN_PDP_VDECTRL_VDEF_SHIFT (0) ++#define ODN_PDP_VDECTRL_VDEF_LENGTH (13) ++#define ODN_PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_OPMASK_R_OFFSET (0x08C8) ++ ++/* PDP, OPMASK_R, MASKLEVEL ++*/ ++#define ODN_PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_SHIFT (31) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_LENGTH (1) ++#define ODN_PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_R, BLANKLEVEL ++*/ ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) ++#define ODN_PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_R, MASKR ++*/ ++#define ODN_PDP_OPMASK_R_MASKR_MASK (0x000003FF) ++#define ODN_PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) ++#define ODN_PDP_OPMASK_R_MASKR_SHIFT (0) ++#define ODN_PDP_OPMASK_R_MASKR_LENGTH (10) ++#define ODN_PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_OPMASK_GB_OFFSET (0x08CC) ++ ++/* PDP, OPMASK_GB, MASKG ++*/ ++#define ODN_PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) ++#define ODN_PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) ++#define ODN_PDP_OPMASK_GB_MASKG_SHIFT (16) ++#define ODN_PDP_OPMASK_GB_MASKG_LENGTH (10) ++#define ODN_PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_GB, MASKB ++*/ ++#define ODN_PDP_OPMASK_GB_MASKB_MASK (0x000003FF) ++#define ODN_PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) ++#define ODN_PDP_OPMASK_GB_MASKB_SHIFT (0) ++#define ODN_PDP_OPMASK_GB_MASKB_LENGTH (10) ++#define ODN_PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) ++ ++/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN ++*/ ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) ++#define ODN_PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) ++ ++/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT ++*/ ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) ++#define ODN_PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_STAT_OFFSET (0x08D8) ++ ++/* PDP, REGLD_STAT, REGLD_ADDREN ++*/ ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) ++#define ODN_PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_REGLD_CTRL_OFFSET (0x08DC) ++ ++/* PDP, REGLD_CTRL, REGLD_ADDRLEN ++*/ ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) ++#define ODN_PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGLD_CTRL, REGLD_VAL ++*/ ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) ++#define ODN_PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_UPDCTRL_OFFSET (0x08E0) ++ ++/* PDP, UPDCTRL, UPDFIELD ++*/ ++#define ODN_PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) ++#define ODN_PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) ++#define ODN_PDP_UPDCTRL_UPDFIELD_SHIFT (0) ++#define ODN_PDP_UPDCTRL_UPDFIELD_LENGTH (1) ++#define ODN_PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_INTCTRL_OFFSET (0x08E4) ++ ++/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINE ++*/ ++#define ODN_PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_SHIFT (16) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_LENGTH (1) ++#define ODN_PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_ODN_PDP_INTCTRL, HBLNK_LINENO ++*/ ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) ++#define ODN_PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDISETUP_OFFSET (0x0900) ++ ++/* PDP, PDISETUP, PDI_BLNKLVL ++*/ ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_BLNK ++*/ ++#define ODN_PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) ++#define ODN_PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_BLNK_SHIFT (5) ++#define ODN_PDP_PDISETUP_PDI_BLNK_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_PWR ++*/ ++#define ODN_PDP_PDISETUP_PDI_PWR_MASK (0x00000010) ++#define ODN_PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_PWR_SHIFT (4) ++#define ODN_PDP_PDISETUP_PDI_PWR_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_EN ++*/ ++#define ODN_PDP_PDISETUP_PDI_EN_MASK (0x00000008) ++#define ODN_PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_EN_SHIFT (3) ++#define ODN_PDP_PDISETUP_PDI_EN_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_GDEN ++*/ ++#define ODN_PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) ++#define ODN_PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_GDEN_SHIFT (2) ++#define ODN_PDP_PDISETUP_PDI_GDEN_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_NFEN ++*/ ++#define ODN_PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) ++#define ODN_PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_NFEN_SHIFT (1) ++#define ODN_PDP_PDISETUP_PDI_NFEN_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_CR ++*/ ++#define ODN_PDP_PDISETUP_PDI_CR_MASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) ++#define ODN_PDP_PDISETUP_PDI_CR_SHIFT (0) ++#define ODN_PDP_PDISETUP_PDI_CR_LENGTH (1) ++#define ODN_PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDITIMING0_OFFSET (0x0904) ++ ++/* PDP, PDITIMING0, PDI_PWRSVGD ++*/ ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) ++#define ODN_PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING0, PDI_LSDEL ++*/ ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) ++#define ODN_PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING0, PDI_PWRSV2GD2 ++*/ ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) ++#define ODN_PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDITIMING1_OFFSET (0x0908) ++ ++/* PDP, PDITIMING1, PDI_NLDEL ++*/ ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) ++#define ODN_PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING1, PDI_ACBDEL ++*/ ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) ++#define ODN_PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDICOREID_OFFSET (0x090C) ++ ++/* PDP, PDICOREID, PDI_GROUP_ID ++*/ ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) ++#define ODN_PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREID, PDI_CORE_ID ++*/ ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) ++#define ODN_PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREID, PDI_CONFIG_ID ++*/ ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) ++#define ODN_PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_PDICOREREV_OFFSET (0x0910) ++ ++/* PDP, PDICOREREV, PDI_MAJOR_REV ++*/ ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) ++#define ODN_PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREREV, PDI_MINOR_REV ++*/ ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) ++#define ODN_PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREREV, PDI_MAINT_REV ++*/ ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) ++#define ODN_PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX2_OFFSET (0x0920) ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) ++#define ODN_PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX4_0_OFFSET (0x0924) ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX4_1_OFFSET (0x0928) ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) ++#define ODN_PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_0_OFFSET (0x092C) ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_1_OFFSET (0x0930) ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 ++*/ ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_2_OFFSET (0x0934) ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_3_OFFSET (0x0938) ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 ++*/ ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_4_OFFSET (0x093C) ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 ++*/ ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_5_OFFSET (0x0940) ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_6_OFFSET (0x0944) ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 ++*/ ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_7_OFFSET (0x0948) ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 ++*/ ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_8_OFFSET (0x094C) ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_9_OFFSET (0x0950) ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 ++*/ ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_10_OFFSET (0x0954) ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_11_OFFSET (0x0958) ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 ++*/ ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_DITHERMATRIX8_12_OFFSET (0x095C) ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 ++*/ ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) ++#define ODN_PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1_MEMCTRL_OFFSET (0x0960) ++ ++/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN ++*/ ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD ++*/ ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2_MEMCTRL_OFFSET (0x0968) ++ ++/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN ++*/ ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD ++*/ ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3_MEMCTRL_OFFSET (0x0970) ++ ++/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN ++*/ ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD ++*/ ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4_MEMCTRL_OFFSET (0x0978) ++ ++/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN ++*/ ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) ++#define ODN_PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD ++*/ ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD ++*/ ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD ++*/ ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) ++#define ODN_PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1_MEMCTRL_OFFSET (0x0980) ++ ++/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEMCTRL, VID1_BURSTLEN ++*/ ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1_MEM_THRESH_OFFSET (0x0984) ++ ++/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD ++*/ ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD ++*/ ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2_MEMCTRL_OFFSET (0x0988) ++ ++/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEMCTRL, VID2_BURSTLEN ++*/ ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2_MEM_THRESH_OFFSET (0x098C) ++ ++/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD ++*/ ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD ++*/ ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3_MEMCTRL_OFFSET (0x0990) ++ ++/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEMCTRL, VID3_BURSTLEN ++*/ ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3_MEM_THRESH_OFFSET (0x0994) ++ ++/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD ++*/ ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD ++*/ ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4_MEMCTRL_OFFSET (0x0998) ++ ++/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define ODN_PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEMCTRL, VID4_BURSTLEN ++*/ ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) ++#define ODN_PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4_MEM_THRESH_OFFSET (0x099C) ++ ++/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD ++*/ ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD ++*/ ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD ++*/ ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) ++#define ODN_PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN ++*/ ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define ODN_PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define ODN_PDP_BURST_BOUNDARY_OFFSET (0x09C0) ++ ++/* PDP, BURST_BOUNDARY, BURST_BOUNDARY ++*/ ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) ++#define ODN_PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE ++ ++ ++/* ---------------------- End of register definitions ---------------------- */ ++ ++/* NUMREG defines the extent of register address space. ++*/ ++ ++#define ODN_PDP_NUMREG ((0x09C0 >> 2)+1) ++ ++/* Info about video plane addresses */ ++#define ODN_PDP_YADDR_BITS (ODN_PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) ++#define ODN_PDP_YADDR_ALIGN 5 ++#define ODN_PDP_UADDR_BITS (ODN_PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) ++#define ODN_PDP_UADDR_ALIGN 5 ++#define ODN_PDP_VADDR_BITS (ODN_PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) ++#define ODN_PDP_VADDR_ALIGN 5 ++ ++#define ODN_PDP_YSTRIDE_BITS (ODN_PDP_VID1STRIDE_VID1STRIDE_LENGTH) ++#define ODN_PDP_YSTRIDE_ALIGN 5 ++ ++#define ODN_PDP_MAX_INPUT_WIDTH (ODN_PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) ++#define ODN_PDP_MAX_INPUT_HEIGHT (ODN_PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) ++ ++/* Maximum 6 bytes per pixel for RGB161616 */ ++#define ODN_PDP_MAX_IMAGE_BYTES (ODN_PDP_MAX_INPUT_WIDTH * ODN_PDP_MAX_INPUT_HEIGHT * 6) ++ ++/* Round up */ ++#define ODN_PDP_MAX_IMAGE_PAGES ((ODN_PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) ++ ++#define ODN_PDP_YADDR_MAX (((1 << ODN_PDP_YADDR_BITS) - 1) << ODN_PDP_YADDR_ALIGN) ++#define ODN_PDP_UADDR_MAX (((1 << ODN_PDP_UADDR_BITS) - 1) << ODN_PDP_UADDR_ALIGN) ++#define ODN_PDP_VADDR_MAX (((1 << ODN_PDP_VADDR_BITS) - 1) << ODN_PDP_VADDR_ALIGN) ++#define ODN_PDP_YSTRIDE_MAX ((1 << ODN_PDP_YSTRIDE_BITS) << ODN_PDP_YSTRIDE_ALIGN) ++#define ODN_PDP_YADDR_ALIGNMASK ((1 << ODN_PDP_YADDR_ALIGN) - 1) ++#define ODN_PDP_UADDR_ALIGNMASK ((1 << ODN_PDP_UADDR_ALIGN) - 1) ++#define ODN_PDP_VADDR_ALIGNMASK ((1 << ODN_PDP_VADDR_ALIGN) - 1) ++#define ODN_PDP_YSTRIDE_ALIGNMASK ((1 << ODN_PDP_YSTRIDE_ALIGN) - 1) ++ ++/* Field Values (some are reserved for future use) */ ++#define ODN_PDP_SURF_PIXFMT_RGB332 0x3 ++#define ODN_PDP_SURF_PIXFMT_ARGB4444 0x4 ++#define ODN_PDP_SURF_PIXFMT_ARGB1555 0x5 ++#define ODN_PDP_SURF_PIXFMT_RGB888 0x6 ++#define ODN_PDP_SURF_PIXFMT_RGB565 0x7 ++#define ODN_PDP_SURF_PIXFMT_ARGB8888 0x8 ++#define ODN_PDP_SURF_PIXFMT_420_PL8 0x9 ++#define ODN_PDP_SURF_PIXFMT_420_PL8IVU 0xA ++#define ODN_PDP_SURF_PIXFMT_420_PL8IUV 0xB ++#define ODN_PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC ++#define ODN_PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD ++#define ODN_PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE ++#define ODN_PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF ++#define ODN_PDP_SURF_PIXFMT_AYUV8888 0x10 ++#define ODN_PDP_SURF_PIXFMT_YUV101010 0x15 ++#define ODN_PDP_SURF_PIXFMT_RGB101010 0x17 ++#define ODN_PDP_SURF_PIXFMT_420_PL10IUV 0x18 ++#define ODN_PDP_SURF_PIXFMT_420_PL10IVU 0x19 ++#define ODN_PDP_SURF_PIXFMT_422_PL10IUV 0x1A ++#define ODN_PDP_SURF_PIXFMT_422_PL10IVU 0x1B ++#define ODN_PDP_SURF_PIXFMT_RGB121212 0x1E ++#define ODN_PDP_SURF_PIXFMT_RGB161616 0x1F ++ ++#define ODN_PDP_CTRL_CKEYSRC_PREV 0x0 ++#define ODN_PDP_CTRL_CKEYSRC_CUR 0x1 ++ ++#define ODN_PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 ++#define ODN_PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 ++#define ODN_PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 ++#define ODN_PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 ++ ++#define ODN_PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 ++#define ODN_PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 ++ ++#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 ++#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 ++#define ODN_PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 ++ ++#define ODN_PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 ++#define ODN_PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 ++ ++/*---------------------------------------------------------------------------*/ ++ ++#endif /* ODN_PDP_REGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_regs.h +new file mode 100644 +index 000000000000..5d5821623ac7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/odin_regs.h +@@ -0,0 +1,1026 @@ ++/****************************************************************************** ++@Title Odin system control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin FPGA register defs for IMG 3rd generation TCF ++ ++ Auto generated headers, eg. odn_core.h: ++ regconv -d . -a 8 odn_core.def ++ ++ Source files : ++ odn_core.def ++ mca_debug.def ++ sai_rx_debug.def ++ sai_tx_debug.def ++ ad_tx.def ++ ++ Changes: ++ Removed obsolete copyright dates ++ Changed lower case to upper case ++ (eg. odn_core changed to ODN_CORE) ++ Changed PVR5__ to ODN_ ++ Merged multiple .def files into one header ++ ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++ ++/* tab size 4 */ ++ ++#ifndef _ODIN_REGS_H_ ++#define _ODIN_REGS_H_ ++ ++/****************************** ++ Generated from: odn_core.def ++*******************************/ ++ ++/* ++ Register ID ++*/ ++#define ODN_CORE_ID 0x0000 ++#define ODN_ID_VARIANT_MASK 0x0000FFFFU ++#define ODN_ID_VARIANT_SHIFT 0 ++#define ODN_ID_VARIANT_SIGNED 0 ++ ++#define ODN_ID_ID_MASK 0xFFFF0000U ++#define ODN_ID_ID_SHIFT 16 ++#define ODN_ID_ID_SIGNED 0 ++ ++/* ++ Register REL ++*/ ++#define ODN_CORE_REL 0x0004 ++#define ODN_REL_MINOR_MASK 0x0000FFFFU ++#define ODN_REL_MINOR_SHIFT 0 ++#define ODN_REL_MINOR_SIGNED 0 ++ ++#define ODN_REL_MAJOR_MASK 0xFFFF0000U ++#define ODN_REL_MAJOR_SHIFT 16 ++#define ODN_REL_MAJOR_SIGNED 0 ++ ++/* ++ Register CHANGE_SET ++*/ ++#define ODN_CORE_CHANGE_SET 0x0008 ++#define ODN_CHANGE_SET_SET_MASK 0xFFFFFFFFU ++#define ODN_CHANGE_SET_SET_SHIFT 0 ++#define ODN_CHANGE_SET_SET_SIGNED 0 ++ ++/* ++ Register USER_ID ++*/ ++#define ODN_CORE_USER_ID 0x000C ++#define ODN_USER_ID_ID_MASK 0x000000FFU ++#define ODN_USER_ID_ID_SHIFT 0 ++#define ODN_USER_ID_ID_SIGNED 0 ++ ++/* ++ Register USER_BUILD ++*/ ++#define ODN_CORE_USER_BUILD 0x0010 ++#define ODN_USER_BUILD_BUILD_MASK 0xFFFFFFFFU ++#define ODN_USER_BUILD_BUILD_SHIFT 0 ++#define ODN_USER_BUILD_BUILD_SIGNED 0 ++ ++/* ++ Register SW_IF_VERSION ++*/ ++#define ODN_CORE_SW_IF_VERSION 0x0014 ++#define ODN_SW_IF_VERSION_VERSION_MASK 0x0000FFFFU ++#define ODN_SW_IF_VERSION_VERSION_SHIFT 0 ++#define ODN_SW_IF_VERSION_VERSION_SIGNED 0 ++ ++/* ++ Register INTERNAL_RESETN ++*/ ++#define ODN_CORE_INTERNAL_RESETN 0x0080 ++#define ODN_INTERNAL_RESETN_DDR_MASK 0x00000001U ++#define ODN_INTERNAL_RESETN_DDR_SHIFT 0 ++#define ODN_INTERNAL_RESETN_DDR_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_MIG0_MASK 0x00000002U ++#define ODN_INTERNAL_RESETN_MIG0_SHIFT 1 ++#define ODN_INTERNAL_RESETN_MIG0_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_MIG1_MASK 0x00000004U ++#define ODN_INTERNAL_RESETN_MIG1_SHIFT 2 ++#define ODN_INTERNAL_RESETN_MIG1_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PDP1_MASK 0x00000008U ++#define ODN_INTERNAL_RESETN_PDP1_SHIFT 3 ++#define ODN_INTERNAL_RESETN_PDP1_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PDP2_MASK 0x00000010U ++#define ODN_INTERNAL_RESETN_PDP2_SHIFT 4 ++#define ODN_INTERNAL_RESETN_PDP2_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PERIP_MASK 0x00000020U ++#define ODN_INTERNAL_RESETN_PERIP_SHIFT 5 ++#define ODN_INTERNAL_RESETN_PERIP_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_GIST_MASK 0x00000040U ++#define ODN_INTERNAL_RESETN_GIST_SHIFT 6 ++#define ODN_INTERNAL_RESETN_GIST_SIGNED 0 ++ ++#define ODN_INTERNAL_RESETN_PIKE_MASK 0x00000080U ++#define ODN_INTERNAL_RESETN_PIKE_SHIFT 7 ++#define ODN_INTERNAL_RESETN_PIKE_SIGNED 0 ++ ++/* ++ Register EXTERNAL_RESETN ++*/ ++#define ODN_CORE_EXTERNAL_RESETN 0x0084 ++#define ODN_EXTERNAL_RESETN_DUT_MASK 0x00000001U ++#define ODN_EXTERNAL_RESETN_DUT_SHIFT 0 ++#define ODN_EXTERNAL_RESETN_DUT_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT_SPI_MASK 0x00000002U ++#define ODN_EXTERNAL_RESETN_DUT_SPI_SHIFT 1 ++#define ODN_EXTERNAL_RESETN_DUT_SPI_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_MASK 0x00000004U ++#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SHIFT 2 ++#define ODN_EXTERNAL_RESETN_DUT_PEP_DDR_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT_IF_MASK 0x00000008U ++#define ODN_EXTERNAL_RESETN_DUT_IF_SHIFT 3 ++#define ODN_EXTERNAL_RESETN_DUT_IF_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT1_MASK 0x00000010U ++#define ODN_EXTERNAL_RESETN_DUT1_SHIFT 4 ++#define ODN_EXTERNAL_RESETN_DUT1_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESETN_DUT2_MASK 0x00000020U ++#define ODN_EXTERNAL_RESETN_DUT2_SHIFT 5 ++#define ODN_EXTERNAL_RESETN_DUT2_SIGNED 0 ++ ++/* ++ Register EXTERNAL_RESET ++*/ ++#define ODN_CORE_EXTERNAL_RESET 0x0088 ++#define ODN_EXTERNAL_RESET_PVT_CAL_MASK 0x00000001U ++#define ODN_EXTERNAL_RESET_PVT_CAL_SHIFT 0 ++#define ODN_EXTERNAL_RESET_PVT_CAL_SIGNED 0 ++ ++#define ODN_EXTERNAL_RESET_PLL_MASK 0x00000002U ++#define ODN_EXTERNAL_RESET_PLL_SHIFT 1 ++#define ODN_EXTERNAL_RESET_PLL_SIGNED 0 ++ ++/* ++ Register INTERNAL_AUTO_RESETN ++*/ ++#define ODN_CORE_INTERNAL_AUTO_RESETN 0x008C ++#define ODN_INTERNAL_AUTO_RESETN_AUX_MASK 0x00000001U ++#define ODN_INTERNAL_AUTO_RESETN_AUX_SHIFT 0 ++#define ODN_INTERNAL_AUTO_RESETN_AUX_SIGNED 0 ++ ++/* ++ Register CLK_GEN_RESET ++*/ ++#define ODN_CORE_CLK_GEN_RESET 0x0090 ++#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U ++#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 ++#define ODN_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 ++ ++#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U ++#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 ++#define ODN_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 ++ ++#define ODN_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U ++#define ODN_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 ++#define ODN_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 ++ ++#define ODN_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U ++#define ODN_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 ++#define ODN_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 ++ ++/* ++ Register INTERRUPT_STATUS ++*/ ++#define ODN_CORE_INTERRUPT_STATUS 0x0100 ++#define ODN_INTERRUPT_STATUS_DUT_MASK 0x00000001U ++#define ODN_INTERRUPT_STATUS_DUT_SHIFT 0 ++#define ODN_INTERRUPT_STATUS_DUT_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_PDP1_MASK 0x00000002U ++#define ODN_INTERRUPT_STATUS_PDP1_SHIFT 1 ++#define ODN_INTERRUPT_STATUS_PDP1_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_PDP2_MASK 0x00000004U ++#define ODN_INTERRUPT_STATUS_PDP2_SHIFT 2 ++#define ODN_INTERRUPT_STATUS_PDP2_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_PERIP_MASK 0x00000008U ++#define ODN_INTERRUPT_STATUS_PERIP_SHIFT 3 ++#define ODN_INTERRUPT_STATUS_PERIP_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_UART_MASK 0x00000010U ++#define ODN_INTERRUPT_STATUS_UART_SHIFT 4 ++#define ODN_INTERRUPT_STATUS_UART_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_MASK 0x00000020U ++#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SHIFT 5 ++#define ODN_INTERRUPT_STATUS_GIST_IN_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_MASK 0x00000040U ++#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SHIFT 6 ++#define ODN_INTERRUPT_STATUS_GIST_IN_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_MASK 0x00000080U ++#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SHIFT 7 ++#define ODN_INTERRUPT_STATUS_GIST_OUT_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_MASK 0x00000100U ++#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SHIFT 8 ++#define ODN_INTERRUPT_STATUS_GIST_OUT_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_DUT2_MASK 0x00000200U ++#define ODN_INTERRUPT_STATUS_DUT2_SHIFT 9 ++#define ODN_INTERRUPT_STATUS_DUT2_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_MASK 0x00000400U ++#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SHIFT 10 ++#define ODN_INTERRUPT_STATUS_AXI_LOCKUP_PROTECTION_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_CDMA_MASK 0x00001800U ++#define ODN_INTERRUPT_STATUS_CDMA_SHIFT 11 ++#define ODN_INTERRUPT_STATUS_CDMA_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_OS_IRQ_MASK 0x001FE000U ++#define ODN_INTERRUPT_STATUS_OS_IRQ_SHIFT 13 ++#define ODN_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U ++#define ODN_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 ++#define ODN_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 ++ ++#define ODN_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U ++#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 ++#define ODN_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 ++ ++/* ++ Register INTERRUPT_ENABLE ++*/ ++#define ODN_CORE_INTERRUPT_ENABLE 0x0104 ++#define ODN_INTERRUPT_ENABLE_DUT_MASK 0x00000001U ++#define ODN_INTERRUPT_ENABLE_DUT_SHIFT 0 ++#define ODN_INTERRUPT_ENABLE_DUT_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_PDP1_MASK 0x00000002U ++#define ODN_INTERRUPT_ENABLE_PDP1_SHIFT 1 ++#define ODN_INTERRUPT_ENABLE_PDP1_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_PDP2_MASK 0x00000004U ++#define ODN_INTERRUPT_ENABLE_PDP2_SHIFT 2 ++#define ODN_INTERRUPT_ENABLE_PDP2_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_PERIP_MASK 0x00000008U ++#define ODN_INTERRUPT_ENABLE_PERIP_SHIFT 3 ++#define ODN_INTERRUPT_ENABLE_PERIP_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_UART_MASK 0x00000010U ++#define ODN_INTERRUPT_ENABLE_UART_SHIFT 4 ++#define ODN_INTERRUPT_ENABLE_UART_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_MASK 0x00000020U ++#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SHIFT 5 ++#define ODN_INTERRUPT_ENABLE_GIST_IN_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_MASK 0x00000040U ++#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SHIFT 6 ++#define ODN_INTERRUPT_ENABLE_GIST_IN_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_MASK 0x00000080U ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SHIFT 7 ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_MASK 0x00000100U ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SHIFT 8 ++#define ODN_INTERRUPT_ENABLE_GIST_OUT_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_DUT2_MASK 0x00000200U ++#define ODN_INTERRUPT_ENABLE_DUT2_SHIFT 9 ++#define ODN_INTERRUPT_ENABLE_DUT2_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_MASK 0x00000400U ++#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SHIFT 10 ++#define ODN_INTERRUPT_ENABLE_AXI_LOCKUP_PROTECTION_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_CDMA_MASK 0x00001800U ++#define ODN_INTERRUPT_ENABLE_CDMA_SHIFT 11 ++#define ODN_INTERRUPT_ENABLE_CDMA_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_OS_IRQ_MASK 0x001FE000U ++#define ODN_INTERRUPT_ENABLE_OS_IRQ_SHIFT 13 ++#define ODN_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U ++#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 ++#define ODN_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 ++ ++#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U ++#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 ++#define ODN_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_CLR ++*/ ++#define ODN_CORE_INTERRUPT_CLR 0x010C ++#define ODN_INTERRUPT_CLR_DUT_MASK 0x00000001U ++#define ODN_INTERRUPT_CLR_DUT_SHIFT 0 ++#define ODN_INTERRUPT_CLR_DUT_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_PDP1_MASK 0x00000002U ++#define ODN_INTERRUPT_CLR_PDP1_SHIFT 1 ++#define ODN_INTERRUPT_CLR_PDP1_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_PDP2_MASK 0x00000004U ++#define ODN_INTERRUPT_CLR_PDP2_SHIFT 2 ++#define ODN_INTERRUPT_CLR_PDP2_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_PERIP_MASK 0x00000008U ++#define ODN_INTERRUPT_CLR_PERIP_SHIFT 3 ++#define ODN_INTERRUPT_CLR_PERIP_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_UART_MASK 0x00000010U ++#define ODN_INTERRUPT_CLR_UART_SHIFT 4 ++#define ODN_INTERRUPT_CLR_UART_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_MASK 0x00000020U ++#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SHIFT 5 ++#define ODN_INTERRUPT_CLR_GIST_IN_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_MASK 0x00000040U ++#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SHIFT 6 ++#define ODN_INTERRUPT_CLR_GIST_IN_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_MASK 0x00000080U ++#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SHIFT 7 ++#define ODN_INTERRUPT_CLR_GIST_OUT_LNK_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_MASK 0x00000100U ++#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SHIFT 8 ++#define ODN_INTERRUPT_CLR_GIST_OUT_MB_ERR_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_DUT2_MASK 0x00000200U ++#define ODN_INTERRUPT_CLR_DUT2_SHIFT 9 ++#define PVR5__INTERRUPT_CLR_DUT2_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_MASK 0x00000400U ++#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SHIFT 10 ++#define ODN_INTERRUPT_CLR_AXI_LOCKUP_PROTECTION_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_CDMA_MASK 0x00001800U ++#define ODN_INTERRUPT_CLR_CDMA_SHIFT 11 ++#define ODN_INTERRUPT_CLR_CDMA_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_OS_IRQ_MASK 0x001FE000U ++#define ODN_INTERRUPT_CLR_OS_IRQ_SHIFT 13 ++#define ODN_INTERRUPT_CLR_OS_IRQ_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U ++#define ODN_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 ++#define ODN_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 ++ ++#define ODN_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U ++#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 ++#define ODN_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TEST ++*/ ++#define ODN_CORE_INTERRUPT_TEST 0x0110 ++#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U ++#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 ++#define ODN_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT_CLR ++*/ ++#define ODN_CORE_INTERRUPT_TIMEOUT_CLR 0x0114 ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 ++ ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 ++#define ODN_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT ++*/ ++#define ODN_CORE_INTERRUPT_TIMEOUT 0x0118 ++#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU ++#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 ++#define ODN_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 ++/* ++ Register SYSTEM_ID ++*/ ++#define ODN_CORE_SYSTEM_ID 0x011C ++#define ODN_SYSTEM_ID_ID_MASK 0x0000FFFFU ++#define ODN_SYSTEM_ID_ID_SHIFT 0 ++#define ODN_SYSTEM_ID_ID_SIGNED 0 ++ ++/* ++ Register SUPPORTED_FEATURES ++*/ ++#define ODN_CORE_SUPPORTED_FEATURES 0x0120 ++#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_MASK 0xFFFFFFFEU ++#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SHIFT 1 ++#define ODN_SUPPORTED_FEATURES_UNIMPLEMENTED_FREATURES_SIGNED 0 ++ ++#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_MASK 0x00000001U ++#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SHIFT 0 ++#define ODN_SUPPORTED_FEATURES_2X_CDMA_AND_IRQS_SIGNED 0 ++ ++/* ++ Register NUM_GPIO ++*/ ++#define ODN_CORE_NUM_GPIO 0x0180 ++#define ODN_NUM_GPIO_NUMBER_MASK 0x0000000FU ++#define ODN_NUM_GPIO_NUMBER_SHIFT 0 ++#define ODN_NUM_GPIO_NUMBER_SIGNED 0 ++ ++/* ++ Register GPIO_EN ++*/ ++#define ODN_CORE_GPIO_EN 0x0184 ++#define ODN_GPIO_EN_DIRECTION_MASK 0x000000FFU ++#define ODN_GPIO_EN_DIRECTION_SHIFT 0 ++#define ODN_GPIO_EN_DIRECTION_SIGNED 0 ++ ++/* ++ Register GPIO ++*/ ++#define ODN_CORE_GPIO 0x0188 ++#define ODN_GPIO_GPIO_MASK 0x000000FFU ++#define ODN_GPIO_GPIO_SHIFT 0 ++#define ODN_GPIO_GPIO_SIGNED 0 ++ ++/* ++ Register NUM_DUT_CTRL ++*/ ++#define ODN_CORE_NUM_DUT_CTRL 0x0190 ++#define ODN_NUM_DUT_CTRL_NUM_PINS_MASK 0xFFFFFFFFU ++#define ODN_NUM_DUT_CTRL_NUM_PINS_SHIFT 0 ++#define ODN_NUM_DUT_CTRL_NUM_PINS_SIGNED 0 ++ ++/* ++ Register DUT_CTRL1 ++*/ ++#define ODN_CORE_DUT_CTRL1 0x0194 ++#define ODN_DUT_CTRL1_CONTROL1_MASK 0x3FFFFFFFU ++#define ODN_DUT_CTRL1_CONTROL1_SHIFT 0 ++#define ODN_DUT_CTRL1_CONTROL1_SIGNED 0 ++ ++#define ODN_DUT_CTRL1_FBDC_BYPASS_MASK 0x40000000U ++#define ODN_DUT_CTRL1_FBDC_BYPASS_SHIFT 30 ++#define ODN_DUT_CTRL1_FBDC_BYPASS_SIGNED 0 ++ ++#define ODN_DUT_CTRL1_DUT_MST_OFFSET_MASK 0x80000000U ++#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SHIFT 31 ++#define ODN_DUT_CTRL1_DUT_MST_OFFSET_SIGNED 0 ++ ++/* ++ Register DUT_CTRL2 ++*/ ++#define ODN_CORE_DUT_CTRL2 0x0198 ++#define ODN_DUT_CTRL2_CONTROL2_MASK 0xFFFFFFFFU ++#define ODN_DUT_CTRL2_CONTROL2_SHIFT 0 ++#define ODN_DUT_CTRL2_CONTROL2_SIGNED 0 ++ ++/* ++ Register NUM_DUT_STAT ++*/ ++#define ODN_CORE_NUM_DUT_STAT 0x019C ++#define ODN_NUM_DUT_STAT_NUM_PINS_MASK 0xFFFFFFFFU ++#define ODN_NUM_DUT_STAT_NUM_PINS_SHIFT 0 ++#define ODN_NUM_DUT_STAT_NUM_PINS_SIGNED 0 ++ ++/* ++ Register DUT_STAT1 ++*/ ++#define ODN_CORE_DUT_STAT1 0x01A0 ++#define ODN_DUT_STAT1_STATUS1_MASK 0xFFFFFFFFU ++#define ODN_DUT_STAT1_STATUS1_SHIFT 0 ++#define ODN_DUT_STAT1_STATUS1_SIGNED 0 ++ ++/* ++ Register DUT_STAT2 ++*/ ++#define ODN_CORE_DUT_STAT2 0x01A4 ++#define ODN_DUT_STAT2_STATUS2_MASK 0xFFFFFFFFU ++#define ODN_DUT_STAT2_STATUS2_SHIFT 0 ++#define ODN_DUT_STAT2_STATUS2_SIGNED 0 ++ ++/* ++ Register DASH_LEDS ++*/ ++#define ODN_CORE_DASH_LEDS 0x01A8 ++#define ODN_DASH_LEDS_REPA_MASK 0xFFF00000U ++#define ODN_DASH_LEDS_REPA_SHIFT 20 ++#define ODN_DASH_LEDS_REPA_SIGNED 0 ++ ++#define ODN_DASH_LEDS_PIKE_MASK 0x00000FFFU ++#define ODN_DASH_LEDS_PIKE_SHIFT 0 ++#define ODN_DASH_LEDS_PIKE_SIGNED 0 ++ ++/* ++ Register DUT_CLK_INFO ++*/ ++#define ODN_CORE_DUT_CLK_INFO 0x01B0 ++#define ODN_DUT_CLK_INFO_CORE_MASK 0x0000FFFFU ++#define ODN_DUT_CLK_INFO_CORE_SHIFT 0 ++#define ODN_DUT_CLK_INFO_CORE_SIGNED 0 ++ ++#define ODN_DUT_CLK_INFO_MEM_MASK 0xFFFF0000U ++#define ODN_DUT_CLK_INFO_MEM_SHIFT 16 ++#define ODN_DUT_CLK_INFO_MEM_SIGNED 0 ++ ++/* ++ Register DUT_CLK_PHSE ++*/ ++#define ODN_CORE_DUT_CLK_PHSE 0x01B4 ++#define ODN_DUT_CLK_PHSE_MEM_REQ_MASK 0x0000FFFFU ++#define ODN_DUT_CLK_PHSE_MEM_REQ_SHIFT 0 ++#define ODN_DUT_CLK_PHSE_MEM_REQ_SIGNED 0 ++ ++#define ODN_DUT_CLK_PHSE_MEM_RD_MASK 0xFFFF0000U ++#define ODN_DUT_CLK_PHSE_MEM_RD_SHIFT 16 ++#define ODN_DUT_CLK_PHSE_MEM_RD_SIGNED 0 ++ ++/* ++ Register CORE_STATUS ++*/ ++#define ODN_CORE_CORE_STATUS 0x0200 ++#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U ++#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SHIFT 0 ++#define ODN_CORE_STATUS_PCIE_USER_LNK_UP_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_MASK 0x00000010U ++#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SHIFT 4 ++#define ODN_CORE_STATUS_MIG_C0_MMCM_LOCKED_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_MASK 0x00000020U ++#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SHIFT 5 ++#define ODN_CORE_STATUS_MIG_C0_INIT_CALIB_COMPLETE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_MASK 0x00000040U ++#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SHIFT 6 ++#define ODN_CORE_STATUS_MIG_C1_MMCM_LOCKED_SIGNED 0 ++ ++#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_MASK 0x00000080U ++#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SHIFT 7 ++#define ODN_CORE_STATUS_MIG_C1_INIT_CALIB_COMPLETE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_MASK 0x00000100U ++#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SHIFT 8 ++#define ODN_CORE_STATUS_PERIP_IMG2AXI_IDLE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_MASK 0x00000200U ++#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SHIFT 9 ++#define ODN_CORE_STATUS_PERIP_AXI2IMG_IDLE_SIGNED 0 ++ ++#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00001000U ++#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 12 ++#define ODN_CORE_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00002000U ++#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 13 ++#define ODN_CORE_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++/* ++ Register CORE_CONTROL ++*/ ++#define ODN_CORE_CORE_CONTROL 0x0204 ++#define ODN_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU ++#define ODN_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 ++#define ODN_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U ++#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 ++#define ODN_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U ++#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 ++#define ODN_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_MASK 0x00002000U ++#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SHIFT 13 ++#define ODN_CORE_CONTROL_MCU_COMMUNICATOR_EN_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_PDP1_OFFSET_MASK 0x00070000U ++#define ODN_CORE_CONTROL_PDP1_OFFSET_SHIFT 16 ++#define ODN_CORE_CONTROL_PDP1_OFFSET_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_PDP2_OFFSET_MASK 0x00700000U ++#define ODN_CORE_CONTROL_PDP2_OFFSET_SHIFT 20 ++#define ODN_CORE_CONTROL_PDP2_OFFSET_SIGNED 0 ++ ++#define ODN_CORE_CONTROL_DUT_OFFSET_MASK 0x07000000U ++#define ODN_CORE_CONTROL_DUT_OFFSET_SHIFT 24 ++#define ODN_CORE_CONTROL_DUT_OFFSET_SIGNED 0 ++ ++/* ++ Register REG_BANK_STATUS ++*/ ++#define ODN_CORE_REG_BANK_STATUS 0x0208 ++#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU ++#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 ++#define ODN_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 ++ ++/* ++ Register MMCM_LOCK_STATUS ++*/ ++#define ODN_CORE_MMCM_LOCK_STATUS 0x020C ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 ++#define ODN_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 ++ ++#define ODN_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U ++#define ODN_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 ++#define ODN_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 ++ ++#define ODN_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U ++#define ODN_MMCM_LOCK_STATUS_MULTI_SHIFT 2 ++#define ODN_MMCM_LOCK_STATUS_MULTI_SIGNED 0 ++ ++#define ODN_MMCM_LOCK_STATUS_PDPP_MASK 0x00000008U ++#define ODN_MMCM_LOCK_STATUS_PDPP_SHIFT 3 ++#define ODN_MMCM_LOCK_STATUS_PDPP_SIGNED 0 ++ ++/* ++ Register GIST_STATUS ++*/ ++#define ODN_CORE_GIST_STATUS 0x0210 ++#define ODN_GIST_STATUS_MST_MASK 0x000001FFU ++#define ODN_GIST_STATUS_MST_SHIFT 0 ++#define ODN_GIST_STATUS_MST_SIGNED 0 ++ ++#define ODN_GIST_STATUS_SLV_MASK 0x001FF000U ++#define ODN_GIST_STATUS_SLV_SHIFT 12 ++#define ODN_GIST_STATUS_SLV_SIGNED 0 ++ ++#define ODN_GIST_STATUS_SLV_OUT_MASK 0x03000000U ++#define ODN_GIST_STATUS_SLV_OUT_SHIFT 24 ++#define ODN_GIST_STATUS_SLV_OUT_SIGNED 0 ++ ++#define ODN_GIST_STATUS_MST_OUT_MASK 0x70000000U ++#define ODN_GIST_STATUS_MST_OUT_SHIFT 28 ++#define ODN_GIST_STATUS_MST_OUT_SIGNED 0 ++ ++/* ++ Register DUT_MST_ADD ++*/ ++#define ODN_CORE_DUT_MST_ADD 0x0214 ++#define ODN_DUT_MST_ADD_SLV_OUT_MASK 0x0000003FU ++#define ODN_DUT_MST_ADD_SLV_OUT_SHIFT 0 ++#define ODN_DUT_MST_ADD_SLV_OUT_SIGNED 0 ++ ++/* ++ Register DUT_MULTIPLX_INFO ++*/ ++#define ODN_CORE_DUT_MULTIPLX_INFO 0x0218 ++#define ODN_DUT_MULTIPLX_INFO_MEM_MASK 0x000000FFU ++#define ODN_DUT_MULTIPLX_INFO_MEM_SHIFT 0 ++#define ODN_DUT_MULTIPLX_INFO_MEM_SIGNED 0 ++ ++/**************************** ++ Generated from: ad_tx.def ++*****************************/ ++ ++/* ++ Register ADT_CONTROL ++*/ ++#define ODN_AD_TX_DEBUG_ADT_CONTROL 0x0000 ++#define ODN_SET_ADTX_READY_MASK 0x00000004U ++#define ODN_SET_ADTX_READY_SHIFT 2 ++#define ODN_SET_ADTX_READY_SIGNED 0 ++ ++#define ODN_SEND_ALIGN_DATA_MASK 0x00000002U ++#define ODN_SEND_ALIGN_DATA_SHIFT 1 ++#define ODN_SEND_ALIGN_DATA_SIGNED 0 ++ ++#define ODN_ENABLE_FLUSHING_MASK 0x00000001U ++#define ODN_ENABLE_FLUSHING_SHIFT 0 ++#define ODN_ENABLE_FLUSHING_SIGNED 0 ++ ++/* ++ Register ADT_STATUS ++*/ ++#define ODN_AD_TX_DEBUG_ADT_STATUS 0x0004 ++#define ODN_REQUEST_COMPLETE_MASK 0x00000001U ++#define ODN_REQUEST_COMPLETE_SHIFT 0 ++#define ODN_REQUEST_COMPLETE_SIGNED 0 ++ ++ ++/****************************** ++ Generated from: mca_debug.def ++*******************************/ ++ ++/* ++ Register MCA_CONTROL ++*/ ++#define ODN_MCA_DEBUG_MCA_CONTROL 0x0000 ++#define ODN_ALIGN_START_MASK 0x00000001U ++#define ODN_ALIGN_START_SHIFT 0 ++#define ODN_ALIGN_START_SIGNED 0 ++ ++/* ++ Register MCA_STATUS ++*/ ++#define ODN_MCA_DEBUG_MCA_STATUS 0x0004 ++#define ODN_TCHECK_SDEBUG_MASK 0x40000000U ++#define ODN_TCHECK_SDEBUG_SHIFT 30 ++#define ODN_TCHECK_SDEBUG_SIGNED 0 ++ ++#define ODN_CHECK_SDEBUG_MASK 0x20000000U ++#define ODN_CHECK_SDEBUG_SHIFT 29 ++#define ODN_CHECK_SDEBUG_SIGNED 0 ++ ++#define ODN_ALIGN_SDEBUG_MASK 0x10000000U ++#define ODN_ALIGN_SDEBUG_SHIFT 28 ++#define ODN_ALIGN_SDEBUG_SIGNED 0 ++ ++#define ODN_FWAIT_SDEBUG_MASK 0x08000000U ++#define ODN_FWAIT_SDEBUG_SHIFT 27 ++#define ODN_FWAIT_SDEBUG_SIGNED 0 ++ ++#define ODN_IDLE_SDEBUG_MASK 0x04000000U ++#define ODN_IDLE_SDEBUG_SHIFT 26 ++#define ODN_IDLE_SDEBUG_SIGNED 0 ++ ++#define ODN_FIFO_FULL_MASK 0x03FF0000U ++#define ODN_FIFO_FULL_SHIFT 16 ++#define ODN_FIFO_FULL_SIGNED 0 ++ ++#define ODN_FIFO_EMPTY_MASK 0x0000FFC0U ++#define ODN_FIFO_EMPTY_SHIFT 6 ++#define ODN_FIFO_EMPTY_SIGNED 0 ++ ++#define ODN_TAG_CHECK_ERROR_MASK 0x00000020U ++#define ODN_TAG_CHECK_ERROR_SHIFT 5 ++#define ODN_TAG_CHECK_ERROR_SIGNED 0 ++ ++#define ODN_ALIGN_CHECK_ERROR_MASK 0x00000010U ++#define ODN_ALIGN_CHECK_ERROR_SHIFT 4 ++#define ODN_ALIGN_CHECK_ERROR_SIGNED 0 ++ ++#define ODN_ALIGN_ERROR_MASK 0x00000008U ++#define ODN_ALIGN_ERROR_SHIFT 3 ++#define ODN_ALIGN_ERROR_SIGNED 0 ++ ++#define ODN_TAG_CHECKING_OK_MASK 0x00000004U ++#define ODN_TAG_CHECKING_OK_SHIFT 2 ++#define ODN_TAG_CHECKING_OK_SIGNED 0 ++ ++#define ODN_ALIGN_CHECK_OK_MASK 0x00000002U ++#define ODN_ALIGN_CHECK_OK_SHIFT 1 ++#define ODN_ALIGN_CHECK_OK_SIGNED 0 ++ ++#define ODN_ALIGNMENT_FOUND_MASK 0x00000001U ++#define ODN_ALIGNMENT_FOUND_SHIFT 0 ++#define ODN_ALIGNMENT_FOUND_SIGNED 0 ++ ++ ++/********************************* ++ Generated from: sai_rx_debug.def ++**********************************/ ++ ++/* ++ Register SIG_RESULT ++*/ ++#define ODN_SAI_RX_DEBUG_SIG_RESULT 0x0000 ++#define ODN_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU ++#define ODN_SIG_RESULT_VALUE_SHIFT 0 ++#define ODN_SIG_RESULT_VALUE_SIGNED 0 ++ ++/* ++ Register INIT_SIG ++*/ ++#define ODN_SAI_RX_DEBUG_INIT_SIG 0x0004 ++#define ODN_INIT_SIG_VALUE_MASK 0x00000001U ++#define ODN_INIT_SIG_VALUE_SHIFT 0 ++#define ODN_INIT_SIG_VALUE_SIGNED 0 ++ ++/* ++ Register SAI_BYPASS ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_BYPASS 0x0008 ++#define ODN_BYPASS_CLK_TAPS_VALUE_MASK 0x000003FFU ++#define ODN_BYPASS_CLK_TAPS_VALUE_SHIFT 0 ++#define ODN_BYPASS_CLK_TAPS_VALUE_SIGNED 0 ++ ++#define ODN_BYPASS_SET_MASK 0x00010000U ++#define ODN_BYPASS_SET_SHIFT 16 ++#define ODN_BYPASS_SET_SIGNED 0 ++ ++#define ODN_BYPASS_EN_MASK 0x00100000U ++#define ODN_BYPASS_EN_SHIFT 20 ++#define ODN_BYPASS_EN_SIGNED 0 ++ ++#define ODN_EN_STATUS_MASK 0x01000000U ++#define ODN_EN_STATUS_SHIFT 24 ++#define ODN_EN_STATUS_SIGNED 0 ++ ++/* ++ Register SAI_CLK_TAPS ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CLK_TAPS 0x000C ++#define ODN_CLK_TAPS_VALUE_MASK 0x000003FFU ++#define ODN_CLK_TAPS_VALUE_SHIFT 0 ++#define ODN_CLK_TAPS_VALUE_SIGNED 0 ++ ++#define ODN_TRAINING_COMPLETE_MASK 0x00010000U ++#define ODN_TRAINING_COMPLETE_SHIFT 16 ++#define ODN_TRAINING_COMPLETE_SIGNED 0 ++ ++/* ++ Register SAI_EYES ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_EYES 0x0010 ++#define ODN_MIN_EYE_END_MASK 0x0000FFFFU ++#define ODN_MIN_EYE_END_SHIFT 0 ++#define ODN_MIN_EYE_END_SIGNED 0 ++ ++#define ODN_MAX_EYE_START_MASK 0xFFFF0000U ++#define ODN_MAX_EYE_START_SHIFT 16 ++#define ODN_MAX_EYE_START_SIGNED 0 ++ ++/* ++ Register SAI_DDR_INVERT ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_DDR_INVERT 0x0014 ++#define ODN_DDR_INVERT_MASK 0x00000001U ++#define ODN_DDR_INVERT_SHIFT 0 ++#define ODN_DDR_INVERT_SIGNED 0 ++ ++#define ODN_OVERIDE_VALUE_MASK 0x00010000U ++#define ODN_OVERIDE_VALUE_SHIFT 16 ++#define ODN_OVERIDE_VALUE_SIGNED 0 ++ ++#define ODN_INVERT_OVERIDE_MASK 0x00100000U ++#define ODN_INVERT_OVERIDE_SHIFT 20 ++#define ODN_INVERT_OVERIDE_SIGNED 0 ++ ++/* ++ Register SAI_TRAIN_ACK ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK 0x0018 ++#define ODN_TRAIN_ACK_FAIL_MASK 0x00000001U ++#define ODN_TRAIN_ACK_FAIL_SHIFT 0 ++#define ODN_TRAIN_ACK_FAIL_SIGNED 0 ++ ++#define ODN_TRAIN_ACK_FAIL_COUNT_MASK 0x000000F0U ++#define ODN_TRAIN_ACK_FAIL_COUNT_SHIFT 4 ++#define ODN_TRAIN_ACK_FAIL_COUNT_SIGNED 0 ++ ++#define ODN_TRAIN_ACK_COMPLETE_MASK 0x00000100U ++#define ODN_TRAIN_ACK_COMPLETE_SHIFT 8 ++#define ODN_TRAIN_ACK_COMPLETE_SIGNED 0 ++ ++#define ODN_TRAIN_ACK_OVERIDE_MASK 0x00001000U ++#define ODN_TRAIN_ACK_OVERIDE_SHIFT 12 ++#define ODN_TRAIN_ACK_OVERIDE_SIGNED 0 ++ ++/* ++ Register SAI_TRAIN_ACK_COUNT ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_TRAIN_ACK_COUNT 0x001C ++#define ODN_TRAIN_COUNT_MASK 0xFFFFFFFFU ++#define ODN_TRAIN_COUNT_SHIFT 0 ++#define ODN_TRAIN_COUNT_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_NUMBER ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_NUMBER 0x0020 ++#define ODN_CHANNEL_NUMBER_MASK 0x0000FFFFU ++#define ODN_CHANNEL_NUMBER_SHIFT 0 ++#define ODN_CHANNEL_NUMBER_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_START ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_START 0x0024 ++#define ODN_CHANNEL_EYE_START_MASK 0xFFFFFFFFU ++#define ODN_CHANNEL_EYE_START_SHIFT 0 ++#define ODN_CHANNEL_EYE_START_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_END ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_END 0x0028 ++#define ODN_CHANNEL_EYE_END_MASK 0xFFFFFFFFU ++#define ODN_CHANNEL_EYE_END_SHIFT 0 ++#define ODN_CHANNEL_EYE_END_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_PATTERN ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_PATTERN 0x002C ++#define ODN_CHANNEL_EYE_PATTERN_MASK 0xFFFFFFFFU ++#define ODN_CHANNEL_EYE_PATTERN_SHIFT 0 ++#define ODN_CHANNEL_EYE_PATTERN_SIGNED 0 ++ ++/* ++ Register SAI_CHANNEL_EYE_DEBUG ++*/ ++#define ODN_SAI_RX_DEBUG_SAI_CHANNEL_EYE_DEBUG 0x0030 ++#define ODN_CHANNEL_EYE_SENSE_MASK 0x00000001U ++#define ODN_CHANNEL_EYE_SENSE_SHIFT 0 ++#define ODN_CHANNEL_EYE_SENSE_SIGNED 0 ++ ++#define ODN_CHANNEL_EYE_COMPLETE_MASK 0x00000002U ++#define ODN_CHANNEL_EYE_COMPLETE_SHIFT 1 ++#define ODN_CHANNEL_EYE_COMPLETE_SIGNED 0 ++ ++ ++/********************************* ++ Generated from: sai_tx_debug.def ++**********************************/ ++ ++/* ++ Register SIG_RESULT ++*/ ++#define ODN_SAI_TX_DEBUG_SIG_RESULT 0x0000 ++#define ODN_TX_SIG_RESULT_VALUE_MASK 0xFFFFFFFFU ++#define ODN_TX_SIG_RESULT_VALUE_SHIFT 0 ++#define ODN_TX_SIG_RESULT_VALUE_SIGNED 0 ++ ++/* ++ Register INIT_SIG ++*/ ++#define ODN_SAI_TX_DEBUG_INIT_SIG 0x0004 ++#define ODN_TX_INIT_SIG_VALUE_MASK 0x00000001U ++#define ODN_TX_INIT_SIG_VALUE_SHIFT 0 ++#define ODN_TX_INIT_SIG_VALUE_SIGNED 0 ++ ++/* ++ Register SAI_BYPASS ++*/ ++#define ODN_SAI_TX_DEBUG_SAI_BYPASS 0x0008 ++#define ODN_TX_BYPASS_EN_MASK 0x00000001U ++#define ODN_TX_BYPASS_EN_SHIFT 0 ++#define ODN_TX_BYPASS_EN_SIGNED 0 ++ ++#define ODN_TX_ACK_RESEND_MASK 0x00000002U ++#define ODN_TX_ACK_RESEND_SHIFT 1 ++#define ODN_TX_ACK_RESEND_SIGNED 0 ++ ++#define ODN_TX_DISABLE_ACK_SEND_MASK 0x00000004U ++#define ODN_TX_DISABLE_ACK_SEND_SHIFT 2 ++#define ODN_TX_DISABLE_ACK_SEND_SIGNED 0 ++ ++/* ++ Register SAI_STATUS ++*/ ++#define ODN_SAI_TX_DEBUG_SAI_STATUS 0x000C ++#define ODN_TX_TRAINING_COMPLETE_MASK 0x00000001U ++#define ODN_TX_TRAINING_COMPLETE_SHIFT 0 ++#define ODN_TX_TRAINING_COMPLETE_SIGNED 0 ++ ++#define ODN_TX_TRAINING_ACK_COMPLETE_MASK 0x00000002U ++#define ODN_TX_TRAINING_ACK_COMPLETE_SHIFT 1 ++#define ODN_TX_TRAINING_ACK_COMPLETE_SIGNED 0 ++ ++ ++ ++#endif /* _ODIN_REGS_H_ */ ++ ++/****************************************************************************** ++ End of file (odin_regs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_defs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_defs.h +new file mode 100644 +index 000000000000..1691151de58d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_defs.h +@@ -0,0 +1,183 @@ ++/**************************************************************************** ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Orion Memory Map - View from PCIe ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++****************************************************************************/ ++ ++#ifndef _ORION_DEFS_H_ ++#define _ORION_DEFS_H_ ++ ++/* ++ * These defines have not been autogenerated ++ * Only values different from Odin will be included here ++ */ ++ ++#define DEVICE_ID_ORION 0x1020 ++ ++/* Odin system register banks */ ++#define SRS_REG_BANK_ODN_CLK_BLK 0x02000 ++ ++/* ++ * Orion CLK regs - the srs_clk_blk module defs are not auto generated ++ */ ++#define SRS_PDP_P_CLK_OUT_DIVIDER_REG1 0x620 ++#define SRS_PDP_PCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_PCLK_ODIV1_LO_TIME_SHIFT 0 ++#define SRS_PDP_PCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_PCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define SRS_PDP_P_CLK_OUT_DIVIDER_REG2 0x624 ++#define SRS_PDP_PCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define SRS_PDP_PCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define SRS_PDP_PCLK_ODIV2_EDGE_MASK 0x00000080U ++#define SRS_PDP_PCLK_ODIV2_EDGE_SHIFT 7 ++#define SRS_PDP_PCLK_ODIV2_FRAC_MASK 0x00007C00U ++#define SRS_PDP_PCLK_ODIV2_FRAC_SHIFT 10 ++ ++#define SRS_PDP_P_CLK_OUT_DIVIDER_REG3 0x61C ++ ++#define SRS_PDP_M_CLK_OUT_DIVIDER_REG1 0x628 ++#define SRS_PDP_MCLK_ODIV1_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_MCLK_ODIV1_LO_TIME_SHIFT 0 ++#define SRS_PDP_MCLK_ODIV1_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_MCLK_ODIV1_HI_TIME_SHIFT 6 ++ ++#define SRS_PDP_M_CLK_OUT_DIVIDER_REG2 0x62C ++#define SRS_PDP_MCLK_ODIV2_NOCOUNT_MASK 0x00000040U ++#define SRS_PDP_MCLK_ODIV2_NOCOUNT_SHIFT 6 ++#define SRS_PDP_MCLK_ODIV2_EDGE_MASK 0x00000080U ++#define SRS_PDP_MCLK_ODIV2_EDGE_SHIFT 7 ++ ++#define SRS_PDP_P_CLK_MULTIPLIER_REG1 0x650 ++#define SRS_PDP_PCLK_MUL1_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_PCLK_MUL1_LO_TIME_SHIFT 0 ++#define SRS_PDP_PCLK_MUL1_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_PCLK_MUL1_HI_TIME_SHIFT 6 ++ ++#define SRS_PDP_P_CLK_MULTIPLIER_REG2 0x654 ++#define SRS_PDP_PCLK_MUL2_NOCOUNT_MASK 0x00000040U ++#define SRS_PDP_PCLK_MUL2_NOCOUNT_SHIFT 6 ++#define SRS_PDP_PCLK_MUL2_EDGE_MASK 0x00000080U ++#define SRS_PDP_PCLK_MUL2_EDGE_SHIFT 7 ++#define SRS_PDP_PCLK_MUL2_FRAC_MASK 0x00007C00U ++#define SRS_PDP_PCLK_MUL2_FRAC_SHIFT 10 ++ ++#define SRS_PDP_P_CLK_MULTIPLIER_REG3 0x64C ++ ++#define SRS_PDP_P_CLK_IN_DIVIDER_REG 0x658 ++#define SRS_PDP_PCLK_IDIV_LO_TIME_MASK 0x0000003FU ++#define SRS_PDP_PCLK_IDIV_LO_TIME_SHIFT 0 ++#define SRS_PDP_PCLK_IDIV_HI_TIME_MASK 0x00000FC0U ++#define SRS_PDP_PCLK_IDIV_HI_TIME_SHIFT 6 ++#define SRS_PDP_PCLK_IDIV_NOCOUNT_MASK 0x00001000U ++#define SRS_PDP_PCLK_IDIV_NOCOUNT_SHIFT 12 ++#define SRS_PDP_PCLK_IDIV_EDGE_MASK 0x00002000U ++#define SRS_PDP_PCLK_IDIV_EDGE_SHIFT 13 ++ ++/* ++ * DUT core clock input divider, DUT reference clock input divider ++ */ ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1 0x0020 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 ++ ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2 0x0024 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U ++#define SRS_DUT_CORE_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 ++ ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1 0x0028 ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU ++#define SRS_DUT_REF_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 ++ ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2 0x002C ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U ++#define SRS_DUT_REF_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 ++ ++/* ++ * DUT interface reference clock input divider ++ */ ++ ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1 0x0228 ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_MASK 0x00000FC0U ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_HI_TIME_SHIFT 6 ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_MASK 0x0000003FU ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER1_LO_TIME_SHIFT 0 ++ ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2 0x022C ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_MASK 0x00000080U ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_EDGE_SHIFT 7 ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_MASK 0x00000040U ++#define SRS_DUT_MEM_CLK_OUT_DIVIDER2_NOCOUNT_SHIFT 6 ++ ++/* ++ * Min max values from Xilinx Virtex Ultrascale data sheet DS893, ++ * for speed grade 1. All in Hz. ++ */ ++#define SRS_INPUT_CLOCK_SPEED 100000000U ++#define SRS_INPUT_CLOCK_SPEED_MIN 10000000U ++#define SRS_INPUT_CLOCK_SPEED_MAX 800000000U ++#define SRS_OUTPUT_CLOCK_SPEED_MIN 4690000U ++#define SRS_OUTPUT_CLOCK_SPEED_MAX 630000000U ++#define SRS_VCO_MIN 600000000U ++#define SRS_VCO_MAX 1200000000U ++#define SRS_PFD_MIN 10000000U ++#define SRS_PFD_MAX 450000000U ++ ++/* ++ * Orion interrupt flags ++ */ ++#define SRS_INTERRUPT_ENABLE_PDP1 (1 << SRS_INTERRUPT_ENABLE_PDP_SHIFT) ++#define SRS_INTERRUPT_ENABLE_DUT (1 << SRS_INTERRUPT_ENABLE_DUT_SHIFT) ++#define SRS_INTERRUPT_STATUS_PDP1 (1 << SRS_INTERRUPT_STATUS_PDP_SHIFT) ++#define SRS_INTERRUPT_STATUS_DUT (1 << SRS_INTERRUPT_STATUS_DUT_SHIFT) ++#define SRS_INTERRUPT_CLEAR_PDP1 (1 << SRS_INTERRUPT_CLR_PDP_SHIFT) ++#define SRS_INTERRUPT_CLEAR_DUT (1 << SRS_INTERRUPT_CLR_DUT_SHIFT) ++ ++#endif /* _ORION_DEFS_H_ */ ++ ++/***************************************************************************** ++ End of file (orion_defs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_regs.h +new file mode 100644 +index 000000000000..2a626bd2b70a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/orion_regs.h +@@ -0,0 +1,439 @@ ++/****************************************************************************** ++@Title Orion system control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Orion FPGA register defs for Sirius RTL ++@Author Autogenerated ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++ ++#ifndef _OUT_DRV_H_ ++#define _OUT_DRV_H_ ++ ++/* ++ Register ID ++*/ ++#define SRS_CORE_ID 0x0000 ++#define SRS_ID_VARIANT_MASK 0x0000FFFFU ++#define SRS_ID_VARIANT_SHIFT 0 ++#define SRS_ID_VARIANT_SIGNED 0 ++ ++#define SRS_ID_ID_MASK 0xFFFF0000U ++#define SRS_ID_ID_SHIFT 16 ++#define SRS_ID_ID_SIGNED 0 ++ ++/* ++ Register REVISION ++*/ ++#define SRS_CORE_REVISION 0x0004 ++#define SRS_REVISION_MINOR_MASK 0x000000FFU ++#define SRS_REVISION_MINOR_SHIFT 0 ++#define SRS_REVISION_MINOR_SIGNED 0 ++ ++#define SRS_REVISION_MAJOR_MASK 0x00000F00U ++#define SRS_REVISION_MAJOR_SHIFT 8 ++#define SRS_REVISION_MAJOR_SIGNED 0 ++ ++/* ++ Register CHANGE_SET ++*/ ++#define SRS_CORE_CHANGE_SET 0x0008 ++#define SRS_CHANGE_SET_SET_MASK 0xFFFFFFFFU ++#define SRS_CHANGE_SET_SET_SHIFT 0 ++#define SRS_CHANGE_SET_SET_SIGNED 0 ++ ++/* ++ Register USER_ID ++*/ ++#define SRS_CORE_USER_ID 0x000C ++#define SRS_USER_ID_ID_MASK 0x0000000FU ++#define SRS_USER_ID_ID_SHIFT 0 ++#define SRS_USER_ID_ID_SIGNED 0 ++ ++/* ++ Register USER_BUILD ++*/ ++#define SRS_CORE_USER_BUILD 0x0010 ++#define SRS_USER_BUILD_BUILD_MASK 0xFFFFFFFFU ++#define SRS_USER_BUILD_BUILD_SHIFT 0 ++#define SRS_USER_BUILD_BUILD_SIGNED 0 ++ ++/* ++ Register SOFT_RESETN ++*/ ++#define SRS_CORE_SOFT_RESETN 0x0080 ++#define SRS_SOFT_RESETN_DDR_MASK 0x00000001U ++#define SRS_SOFT_RESETN_DDR_SHIFT 0 ++#define SRS_SOFT_RESETN_DDR_SIGNED 0 ++ ++#define SRS_SOFT_RESETN_USB_MASK 0x00000002U ++#define SRS_SOFT_RESETN_USB_SHIFT 1 ++#define SRS_SOFT_RESETN_USB_SIGNED 0 ++ ++#define SRS_SOFT_RESETN_PDP_MASK 0x00000004U ++#define SRS_SOFT_RESETN_PDP_SHIFT 2 ++#define SRS_SOFT_RESETN_PDP_SIGNED 0 ++ ++#define SRS_SOFT_RESETN_GIST_MASK 0x00000008U ++#define SRS_SOFT_RESETN_GIST_SHIFT 3 ++#define SRS_SOFT_RESETN_GIST_SIGNED 0 ++ ++/* ++ Register DUT_SOFT_RESETN ++*/ ++#define SRS_CORE_DUT_SOFT_RESETN 0x0084 ++#define SRS_DUT_SOFT_RESETN_EXTERNAL_MASK 0x00000001U ++#define SRS_DUT_SOFT_RESETN_EXTERNAL_SHIFT 0 ++#define SRS_DUT_SOFT_RESETN_EXTERNAL_SIGNED 0 ++ ++/* ++ Register SOFT_AUTO_RESETN ++*/ ++#define SRS_CORE_SOFT_AUTO_RESETN 0x0088 ++#define SRS_SOFT_AUTO_RESETN_CFG_MASK 0x00000001U ++#define SRS_SOFT_AUTO_RESETN_CFG_SHIFT 0 ++#define SRS_SOFT_AUTO_RESETN_CFG_SIGNED 0 ++ ++/* ++ Register CLK_GEN_RESET ++*/ ++#define SRS_CORE_CLK_GEN_RESET 0x0090 ++#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_MASK 0x00000001U ++#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SHIFT 0 ++#define SRS_CLK_GEN_RESET_DUT_CORE_MMCM_SIGNED 0 ++ ++#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_MASK 0x00000002U ++#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SHIFT 1 ++#define SRS_CLK_GEN_RESET_DUT_IF_MMCM_SIGNED 0 ++ ++#define SRS_CLK_GEN_RESET_MULTI_MMCM_MASK 0x00000004U ++#define SRS_CLK_GEN_RESET_MULTI_MMCM_SHIFT 2 ++#define SRS_CLK_GEN_RESET_MULTI_MMCM_SIGNED 0 ++ ++#define SRS_CLK_GEN_RESET_PDP_MMCM_MASK 0x00000008U ++#define SRS_CLK_GEN_RESET_PDP_MMCM_SHIFT 3 ++#define SRS_CLK_GEN_RESET_PDP_MMCM_SIGNED 0 ++ ++/* ++ Register DUT_MEM ++*/ ++#define SRS_CORE_DUT_MEM 0x0120 ++#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_MASK 0x0000FFFFU ++#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SHIFT 0 ++#define SRS_DUT_MEM_READ_RESPONSE_LATENCY_SIGNED 0 ++ ++#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_MASK 0xFFFF0000U ++#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SHIFT 16 ++#define SRS_DUT_MEM_WRITE_RESPONSE_LATENCY_SIGNED 0 ++ ++/* ++ Register APM ++*/ ++#define SRS_CORE_APM 0x0150 ++#define SRS_APM_RESET_EVENT_MASK 0x00000001U ++#define SRS_APM_RESET_EVENT_SHIFT 0 ++#define SRS_APM_RESET_EVENT_SIGNED 0 ++ ++#define SRS_APM_CAPTURE_EVENT_MASK 0x00000002U ++#define SRS_APM_CAPTURE_EVENT_SHIFT 1 ++#define SRS_APM_CAPTURE_EVENT_SIGNED 0 ++ ++/* ++ Register NUM_GPIO ++*/ ++#define SRS_CORE_NUM_GPIO 0x0180 ++#define SRS_NUM_GPIO_NUMBER_MASK 0x0000000FU ++#define SRS_NUM_GPIO_NUMBER_SHIFT 0 ++#define SRS_NUM_GPIO_NUMBER_SIGNED 0 ++ ++/* ++ Register GPIO_EN ++*/ ++#define SRS_CORE_GPIO_EN 0x0184 ++#define SRS_GPIO_EN_DIRECTION_MASK 0x000000FFU ++#define SRS_GPIO_EN_DIRECTION_SHIFT 0 ++#define SRS_GPIO_EN_DIRECTION_SIGNED 0 ++ ++/* ++ Register GPIO ++*/ ++#define SRS_CORE_GPIO 0x0188 ++#define SRS_GPIO_GPIO_MASK 0x000000FFU ++#define SRS_GPIO_GPIO_SHIFT 0 ++#define SRS_GPIO_GPIO_SIGNED 0 ++ ++/* ++ Register SPI_MASTER_IFACE ++*/ ++#define SRS_CORE_SPI_MASTER_IFACE 0x018C ++#define SRS_SPI_MASTER_IFACE_ENABLE_MASK 0x00000001U ++#define SRS_SPI_MASTER_IFACE_ENABLE_SHIFT 0 ++#define SRS_SPI_MASTER_IFACE_ENABLE_SIGNED 0 ++ ++/* ++ Register SRS_IP_STATUS ++*/ ++#define SRS_CORE_SRS_IP_STATUS 0x0200 ++#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_MASK 0x00000001U ++#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SHIFT 0 ++#define SRS_SRS_IP_STATUS_PCIE_USER_LNK_UP_SIGNED 0 ++ ++#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_MASK 0x00000002U ++#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SHIFT 1 ++#define SRS_SRS_IP_STATUS_MIG_INIT_CALIB_COMPLETE_SIGNED 0 ++ ++#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_MASK 0x00000004U ++#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SHIFT 2 ++#define SRS_SRS_IP_STATUS_GIST_SLV_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_MASK 0x00000008U ++#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SHIFT 3 ++#define SRS_SRS_IP_STATUS_GIST_MST_C2C_CONFIG_ERROR_OUT_SIGNED 0 ++ ++/* ++ Register CORE_CONTROL ++*/ ++#define SRS_CORE_CORE_CONTROL 0x0204 ++#define SRS_CORE_CONTROL_BAR4_OFFSET_MASK 0x0000001FU ++#define SRS_CORE_CONTROL_BAR4_OFFSET_SHIFT 0 ++#define SRS_CORE_CONTROL_BAR4_OFFSET_SIGNED 0 ++ ++#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_MASK 0x00000300U ++#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SHIFT 8 ++#define SRS_CORE_CONTROL_HDMI_MONITOR_OVERRIDE_SIGNED 0 ++ ++#define SRS_CORE_CONTROL_HDMI_MODULE_EN_MASK 0x00001C00U ++#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SHIFT 10 ++#define SRS_CORE_CONTROL_HDMI_MODULE_EN_SIGNED 0 ++ ++/* ++ Register REG_BANK_STATUS ++*/ ++#define SRS_CORE_REG_BANK_STATUS 0x0208 ++#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_MASK 0xFFFFFFFFU ++#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SHIFT 0 ++#define SRS_REG_BANK_STATUS_ARB_SLV_RD_TIMEOUT_SIGNED 0 ++ ++/* ++ Register MMCM_LOCK_STATUS ++*/ ++#define SRS_CORE_MMCM_LOCK_STATUS 0x020C ++#define SRS_MMCM_LOCK_STATUS_DUT_CORE_MASK 0x00000001U ++#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SHIFT 0 ++#define SRS_MMCM_LOCK_STATUS_DUT_CORE_SIGNED 0 ++ ++#define SRS_MMCM_LOCK_STATUS_DUT_IF_MASK 0x00000002U ++#define SRS_MMCM_LOCK_STATUS_DUT_IF_SHIFT 1 ++#define SRS_MMCM_LOCK_STATUS_DUT_IF_SIGNED 0 ++ ++#define SRS_MMCM_LOCK_STATUS_MULTI_MASK 0x00000004U ++#define SRS_MMCM_LOCK_STATUS_MULTI_SHIFT 2 ++#define SRS_MMCM_LOCK_STATUS_MULTI_SIGNED 0 ++ ++#define SRS_MMCM_LOCK_STATUS_PDP_MASK 0x00000008U ++#define SRS_MMCM_LOCK_STATUS_PDP_SHIFT 3 ++#define SRS_MMCM_LOCK_STATUS_PDP_SIGNED 0 ++ ++/* ++ Register GIST_STATUS ++*/ ++#define SRS_CORE_GIST_STATUS 0x0210 ++#define SRS_GIST_STATUS_MST_MASK 0x000001FFU ++#define SRS_GIST_STATUS_MST_SHIFT 0 ++#define SRS_GIST_STATUS_MST_SIGNED 0 ++ ++#define SRS_GIST_STATUS_SLV_MASK 0x001FF000U ++#define SRS_GIST_STATUS_SLV_SHIFT 12 ++#define SRS_GIST_STATUS_SLV_SIGNED 0 ++ ++#define SRS_GIST_STATUS_SLV_OUT_MASK 0x03000000U ++#define SRS_GIST_STATUS_SLV_OUT_SHIFT 24 ++#define SRS_GIST_STATUS_SLV_OUT_SIGNED 0 ++ ++#define SRS_GIST_STATUS_MST_OUT_MASK 0x70000000U ++#define SRS_GIST_STATUS_MST_OUT_SHIFT 28 ++#define SRS_GIST_STATUS_MST_OUT_SIGNED 0 ++ ++/* ++ Register SENSOR_BOARD ++*/ ++#define SRS_CORE_SENSOR_BOARD 0x0214 ++#define SRS_SENSOR_BOARD_ID_MASK 0x00000003U ++#define SRS_SENSOR_BOARD_ID_SHIFT 0 ++#define SRS_SENSOR_BOARD_ID_SIGNED 0 ++ ++/* ++ Register INTERRUPT_STATUS ++*/ ++#define SRS_CORE_INTERRUPT_STATUS 0x0218 ++#define SRS_INTERRUPT_STATUS_DUT_MASK 0x00000001U ++#define SRS_INTERRUPT_STATUS_DUT_SHIFT 0 ++#define SRS_INTERRUPT_STATUS_DUT_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_PDP_MASK 0x00000002U ++#define SRS_INTERRUPT_STATUS_PDP_SHIFT 1 ++#define SRS_INTERRUPT_STATUS_PDP_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_I2C_MASK 0x00000004U ++#define SRS_INTERRUPT_STATUS_I2C_SHIFT 2 ++#define SRS_INTERRUPT_STATUS_I2C_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_SPI_MASK 0x00000008U ++#define SRS_INTERRUPT_STATUS_SPI_SHIFT 3 ++#define SRS_INTERRUPT_STATUS_SPI_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_APM_MASK 0x00000010U ++#define SRS_INTERRUPT_STATUS_APM_SHIFT 4 ++#define SRS_INTERRUPT_STATUS_APM_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_OS_IRQ_MASK 0x00001FE0U ++#define SRS_INTERRUPT_STATUS_OS_IRQ_SHIFT 5 ++#define SRS_INTERRUPT_STATUS_OS_IRQ_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_IRQ_TEST_MASK 0x40000000U ++#define SRS_INTERRUPT_STATUS_IRQ_TEST_SHIFT 30 ++#define SRS_INTERRUPT_STATUS_IRQ_TEST_SIGNED 0 ++ ++#define SRS_INTERRUPT_STATUS_MASTER_STATUS_MASK 0x80000000U ++#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SHIFT 31 ++#define SRS_INTERRUPT_STATUS_MASTER_STATUS_SIGNED 0 ++ ++/* ++ Register INTERRUPT_ENABLE ++*/ ++#define SRS_CORE_INTERRUPT_ENABLE 0x021C ++#define SRS_INTERRUPT_ENABLE_DUT_MASK 0x00000001U ++#define SRS_INTERRUPT_ENABLE_DUT_SHIFT 0 ++#define SRS_INTERRUPT_ENABLE_DUT_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_PDP_MASK 0x00000002U ++#define SRS_INTERRUPT_ENABLE_PDP_SHIFT 1 ++#define SRS_INTERRUPT_ENABLE_PDP_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_I2C_MASK 0x00000004U ++#define SRS_INTERRUPT_ENABLE_I2C_SHIFT 2 ++#define SRS_INTERRUPT_ENABLE_I2C_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_SPI_MASK 0x00000008U ++#define SRS_INTERRUPT_ENABLE_SPI_SHIFT 3 ++#define SRS_INTERRUPT_ENABLE_SPI_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_APM_MASK 0x00000010U ++#define SRS_INTERRUPT_ENABLE_APM_SHIFT 4 ++#define SRS_INTERRUPT_ENABLE_APM_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_OS_IRQ_MASK 0x00001FE0U ++#define SRS_INTERRUPT_ENABLE_OS_IRQ_SHIFT 5 ++#define SRS_INTERRUPT_ENABLE_OS_IRQ_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_IRQ_TEST_MASK 0x40000000U ++#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SHIFT 30 ++#define SRS_INTERRUPT_ENABLE_IRQ_TEST_SIGNED 0 ++ ++#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_MASK 0x80000000U ++#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SHIFT 31 ++#define SRS_INTERRUPT_ENABLE_MASTER_ENABLE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_CLR ++*/ ++#define SRS_CORE_INTERRUPT_CLR 0x0220 ++#define SRS_INTERRUPT_CLR_DUT_MASK 0x00000001U ++#define SRS_INTERRUPT_CLR_DUT_SHIFT 0 ++#define SRS_INTERRUPT_CLR_DUT_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_PDP_MASK 0x00000002U ++#define SRS_INTERRUPT_CLR_PDP_SHIFT 1 ++#define SRS_INTERRUPT_CLR_PDP_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_I2C_MASK 0x00000004U ++#define SRS_INTERRUPT_CLR_I2C_SHIFT 2 ++#define SRS_INTERRUPT_CLR_I2C_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_SPI_MASK 0x00000008U ++#define SRS_INTERRUPT_CLR_SPI_SHIFT 3 ++#define SRS_INTERRUPT_CLR_SPI_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_APM_MASK 0x00000010U ++#define SRS_INTERRUPT_CLR_APM_SHIFT 4 ++#define SRS_INTERRUPT_CLR_APM_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_OS_IRQ_MASK 0x00001FE0U ++#define SRS_INTERRUPT_CLR_OS_IRQ_SHIFT 5 ++#define SRS_INTERRUPT_CLR_OS_IRQ_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_IRQ_TEST_MASK 0x40000000U ++#define SRS_INTERRUPT_CLR_IRQ_TEST_SHIFT 30 ++#define SRS_INTERRUPT_CLR_IRQ_TEST_SIGNED 0 ++ ++#define SRS_INTERRUPT_CLR_MASTER_CLEAR_MASK 0x80000000U ++#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SHIFT 31 ++#define SRS_INTERRUPT_CLR_MASTER_CLEAR_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TEST ++*/ ++#define SRS_CORE_INTERRUPT_TEST 0x0224 ++#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_MASK 0x00000001U ++#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SHIFT 0 ++#define SRS_INTERRUPT_TEST_INTERRUPT_TEST_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT_CLR ++*/ ++#define SRS_CORE_INTERRUPT_TIMEOUT_CLR 0x0228 ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_MASK 0x00000002U ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SHIFT 1 ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_CLR_SIGNED 0 ++ ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_MASK 0x00000001U ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SHIFT 0 ++#define SRS_INTERRUPT_TIMEOUT_CLR_INTERRUPT_MST_TIMEOUT_SIGNED 0 ++ ++/* ++ Register INTERRUPT_TIMEOUT ++*/ ++#define SRS_CORE_INTERRUPT_TIMEOUT 0x022C ++#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_MASK 0xFFFFFFFFU ++#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SHIFT 0 ++#define SRS_INTERRUPT_TIMEOUT_INTERRUPT_TIMEOUT_THRESHOLD_COUNTER_SIGNED 0 ++ ++#endif /* _OUT_DRV_H_ */ ++ ++/****************************************************************************** ++ End of file (orion_regs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pdp_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pdp_regs.h +new file mode 100644 +index 000000000000..bd26b0617f95 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pdp_regs.h +@@ -0,0 +1,75 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(__PDP_REGS_H__) ++#define __PDP_REGS_H__ ++ ++/*************************************************************************/ /*! ++ PCI Device Information ++*/ /**************************************************************************/ ++ ++#define DCPDP_VENDOR_ID_POWERVR (0x1010) ++ ++#define DCPDP_DEVICE_ID_PCI_APOLLO_FPGA (0x1CF1) ++#define DCPDP_DEVICE_ID_PCIE_APOLLO_FPGA (0x1CF2) ++ ++/*************************************************************************/ /*! ++ PCI Device Base Address Information ++*/ /**************************************************************************/ ++ ++/* PLL and PDP registers on base address register 0 */ ++#define DCPDP_REG_PCI_BASENUM (0) ++ ++#define DCPDP_PCI_PLL_REG_OFFSET (0x1000) ++#define DCPDP_PCI_PLL_REG_SIZE (0x0400) ++ ++#define DCPDP_PCI_PDP_REG_OFFSET (0xC000) ++#define DCPDP_PCI_PDP_REG_SIZE (0x2000) ++ ++/*************************************************************************/ /*! ++ Misc register information ++*/ /**************************************************************************/ ++ ++/* This information isn't captured in tcf_rgbpdp_regs.h so define it here */ ++#define DCPDP_STR1SURF_FORMAT_ARGB8888 (0xE) ++#define DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT (4) ++#define DCPDP_STR1POSN_STRIDE_SHIFT (4) ++ ++#endif /* !defined(__PDP_REGS_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_defs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_defs.h +new file mode 100644 +index 000000000000..d39c06f703bd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_defs.h +@@ -0,0 +1,69 @@ ++/****************************************************************************** ++@Title Odin PFIM definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin register defs for PDP-FBDC Interface Module ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++ ++#ifndef _PFIM_DEFS_H_ ++#define _PFIM_DEFS_H_ ++ ++/* Supported FBC modes */ ++#define ODIN_PFIM_MOD_LINEAR (0x00) ++#define ODIN_PFIM_FBCDC_8X8_V12 (0x01) ++#define ODIN_PFIM_FBCDC_16X4_V12 (0x02) ++#define ODIN_PFIM_FBCDC_MAX (0x03) ++ ++/* Supported pixel formats */ ++#define ODN_PFIM_PIXFMT_NONE (0x00) ++#define ODN_PFIM_PIXFMT_ARGB8888 (0x0C) ++#define ODN_PFIM_PIXFMT_RGB565 (0x05) ++ ++/* Tile types */ ++#define ODN_PFIM_TILETYPE_8X8 (0x01) ++#define ODN_PFIM_TILETYPE_16X4 (0x02) ++#define ODN_PFIM_TILETYPE_32x2 (0x03) ++ ++#define PFIM_ROUNDUP(X, Y) (((X) + ((Y) - 1U)) & ~((Y) - 1U)) ++#define PFIM_RND_TAG (0x10) ++ ++#endif /* _PFIM_DEFS_H_ */ ++ ++/****************************************************************************** ++ End of file (pfim_defs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_regs.h +new file mode 100644 +index 000000000000..4b8ff82138b4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/pfim_regs.h +@@ -0,0 +1,265 @@ ++/****************************************************************************** ++@Title Odin PFIM control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Odin register defs for PDP-FBDC Interface Module ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++******************************************************************************/ ++#ifndef _PFIM_REGS_H_ ++#define _PFIM_REGS_H_ ++ ++/* ++ Register CR_PFIM_NUM_TILES ++*/ ++#define CR_PFIM_NUM_TILES 0x0000 ++#define CR_PFIM_NUM_TILES_MASK 0x007FFFFFU ++#define CR_PFIM_NUM_TILES_SHIFT 0 ++#define CR_PFIM_NUM_TILES_SIGNED 0 ++ ++/* ++ Register CR_PFIM_TILES_PER_LINE ++*/ ++#define CR_PFIM_TILES_PER_LINE 0x0004 ++#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_MASK 0x000000FFU ++#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SHIFT 0 ++#define CR_PFIM_TILES_PER_LINE_PFIM_TILES_PER_LINE_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB ++*/ ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB 0x0008 ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SHIFT 0 ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_LSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB ++*/ ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB 0x000C ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_MASK 0x00000003U ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SHIFT 0 ++#define CR_PFIM_FBDC_YARGB_BASE_ADDR_MSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_UV_BASE_ADDR_LSB ++*/ ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB 0x0010 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SHIFT 0 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_LSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_UV_BASE_ADDR_MSB ++*/ ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB 0x0014 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_MASK 0x00000003U ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SHIFT 0 ++#define CR_PFIM_FBDC_UV_BASE_ADDR_MSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_PDP_Y_BASE_ADDR ++*/ ++#define CR_PFIM_PDP_Y_BASE_ADDR 0x0018 ++#define CR_PFIM_PDP_Y_BASE_ADDR_MASK 0xFFFFFFFFU ++#define CR_PFIM_PDP_Y_BASE_ADDR_SHIFT 0 ++#define CR_PFIM_PDP_Y_BASE_ADDR_SIGNED 0 ++ ++/* ++ Register CR_PFIM_PDP_UV_BASE_ADDR ++*/ ++#define CR_PFIM_PDP_UV_BASE_ADDR 0x001C ++#define CR_PFIM_PDP_UV_BASE_ADDR_MASK 0xFFFFFFFFU ++#define CR_PFIM_PDP_UV_BASE_ADDR_SHIFT 0 ++#define CR_PFIM_PDP_UV_BASE_ADDR_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_CONTEXT ++*/ ++#define CR_PFIM_FBDC_REQ_CONTEXT 0x0020 ++#define CR_PFIM_FBDC_REQ_CONTEXT_MASK 0x00000007U ++#define CR_PFIM_FBDC_REQ_CONTEXT_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_CONTEXT_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_TAG ++*/ ++#define CR_PFIM_FBDC_REQ_TAG 0x0024 ++#define CR_PFIM_FBDC_REQ_TAG_YARGB_MASK 0x00000003U ++#define CR_PFIM_FBDC_REQ_TAG_YARGB_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_TAG_YARGB_SIGNED 0 ++ ++#define CR_PFIM_FBDC_REQ_TAG_UV_MASK 0x00000030U ++#define CR_PFIM_FBDC_REQ_TAG_UV_SHIFT 4 ++#define CR_PFIM_FBDC_REQ_TAG_UV_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_SB_TAG ++*/ ++#define CR_PFIM_FBDC_REQ_SB_TAG 0x0028 ++#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_MASK 0x00000003U ++#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_SB_TAG_YARGB_SIGNED 0 ++ ++#define CR_PFIM_FBDC_REQ_SB_TAG_UV_MASK 0x00000030U ++#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SHIFT 4 ++#define CR_PFIM_FBDC_REQ_SB_TAG_UV_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_HDR_INVAL_REQ ++*/ ++#define CR_PFIM_FBDC_HDR_INVAL_REQ 0x002C ++#define CR_PFIM_FBDC_HDR_INVAL_REQ_MASK 0x00000001U ++#define CR_PFIM_FBDC_HDR_INVAL_REQ_SHIFT 0 ++#define CR_PFIM_FBDC_HDR_INVAL_REQ_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_PIX_FORMAT ++*/ ++#define CR_PFIM_FBDC_PIX_FORMAT 0x0030 ++#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_MASK 0x0000007FU ++#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SHIFT 0 ++#define CR_PFIM_FBDC_PIX_FORMAT_FBDC_PIX_FMT_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_CH0123_VAL0 ++*/ ++#define CR_PFIM_FBDC_CR_CH0123_VAL0 0x0034 ++#define CR_PFIM_FBDC_CR_CH0123_VAL0_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CR_CH0123_VAL0_SHIFT 0 ++#define CR_PFIM_FBDC_CR_CH0123_VAL0_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_CH0123_VAL1 ++*/ ++#define CR_PFIM_FBDC_CR_CH0123_VAL1 0x0038 ++#define CR_PFIM_FBDC_CR_CH0123_VAL1_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CR_CH0123_VAL1_SHIFT 0 ++#define CR_PFIM_FBDC_CR_CH0123_VAL1_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_Y_VAL0 ++*/ ++#define CR_PFIM_FBDC_CR_Y_VAL0 0x003C ++#define CR_PFIM_FBDC_CR_Y_VAL0_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_Y_VAL0_SHIFT 0 ++#define CR_PFIM_FBDC_CR_Y_VAL0_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_UV_VAL0 ++*/ ++#define CR_PFIM_FBDC_CR_UV_VAL0 0x0040 ++#define CR_PFIM_FBDC_CR_UV_VAL0_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_UV_VAL0_SHIFT 0 ++#define CR_PFIM_FBDC_CR_UV_VAL0_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_Y_VAL1 ++*/ ++#define CR_PFIM_FBDC_CR_Y_VAL1 0x0044 ++#define CR_PFIM_FBDC_CR_Y_VAL1_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_Y_VAL1_SHIFT 0 ++#define CR_PFIM_FBDC_CR_Y_VAL1_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CR_UV_VAL1 ++*/ ++#define CR_PFIM_FBDC_CR_UV_VAL1 0x0048 ++#define CR_PFIM_FBDC_CR_UV_VAL1_MASK 0x000003FFU ++#define CR_PFIM_FBDC_CR_UV_VAL1_SHIFT 0 ++#define CR_PFIM_FBDC_CR_UV_VAL1_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_FILTER_ENABLE ++*/ ++#define CR_PFIM_FBDC_FILTER_ENABLE 0x004C ++#define CR_PFIM_FBDC_FILTER_ENABLE_MASK 0x00000001U ++#define CR_PFIM_FBDC_FILTER_ENABLE_SHIFT 0 ++#define CR_PFIM_FBDC_FILTER_ENABLE_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_FILTER_STATUS ++*/ ++#define CR_PFIM_FBDC_FILTER_STATUS 0x0050 ++#define CR_PFIM_FBDC_FILTER_STATUS_MASK 0x0000000FU ++#define CR_PFIM_FBDC_FILTER_STATUS_SHIFT 0 ++#define CR_PFIM_FBDC_FILTER_STATUS_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_FILTER_CLEAR ++*/ ++#define CR_PFIM_FBDC_FILTER_CLEAR 0x0054 ++#define CR_PFIM_FBDC_FILTER_CLEAR_MASK 0x0000000FU ++#define CR_PFIM_FBDC_FILTER_CLEAR_SHIFT 0 ++#define CR_PFIM_FBDC_FILTER_CLEAR_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_TILE_TYPE ++*/ ++#define CR_PFIM_FBDC_TILE_TYPE 0x0058 ++#define CR_PFIM_FBDC_TILE_TYPE_MASK 0x00000003U ++#define CR_PFIM_FBDC_TILE_TYPE_SHIFT 0 ++#define CR_PFIM_FBDC_TILE_TYPE_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CLEAR_COLOUR_LSB ++*/ ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB 0x005C ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SHIFT 0 ++#define CR_PFIM_FBDC_CLEAR_COLOUR_LSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_CLEAR_COLOUR_MSB ++*/ ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB 0x0060 ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_MASK 0xFFFFFFFFU ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SHIFT 0 ++#define CR_PFIM_FBDC_CLEAR_COLOUR_MSB_SIGNED 0 ++ ++/* ++ Register CR_PFIM_FBDC_REQ_LOSSY ++*/ ++#define CR_PFIM_FBDC_REQ_LOSSY 0x0064 ++#define CR_PFIM_FBDC_REQ_LOSSY_MASK 0x00000001U ++#define CR_PFIM_FBDC_REQ_LOSSY_SHIFT 0 ++#define CR_PFIM_FBDC_REQ_LOSSY_SIGNED 0 ++ ++#endif /* _PFIM_REGS_H_ */ ++ ++/****************************************************************************** ++ End of file (pfim_regs.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h +new file mode 100644 +index 000000000000..cc7b10fd8116 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_clk_ctrl.h +@@ -0,0 +1,1018 @@ ++/*************************************************************************/ /*! ++@Title Test Chip Framework system control register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Autogenerated C -- do not edit ++ Generated from: tcf_clk_ctrl.def ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(_TCF_CLK_CTRL_H_) ++#define _TCF_CLK_CTRL_H_ ++ ++/* ++ * The following register definitions are valid if register 0x28 has value 0. ++ */ ++ ++/* ++ Register FPGA_ID_REG ++*/ ++#define TCF_CLK_CTRL_FPGA_ID_REG 0x0000 ++#define FPGA_ID_REG_CORE_CFG_MASK 0x0000FFFFU ++#define FPGA_ID_REG_CORE_CFG_SHIFT 0 ++#define FPGA_ID_REG_CORE_CFG_SIGNED 0 ++ ++#define FPGA_ID_REG_CORE_ID_MASK 0xFFFF0000U ++#define FPGA_ID_REG_CORE_ID_SHIFT 16 ++#define FPGA_ID_REG_CORE_ID_SIGNED 0 ++ ++/* ++ Register FPGA_REV_REG ++*/ ++#define TCF_CLK_CTRL_FPGA_REV_REG 0x0008 ++#define FPGA_REV_REG_MAINT_MASK 0x000000FFU ++#define FPGA_REV_REG_MAINT_SHIFT 0 ++#define FPGA_REV_REG_MAINT_SIGNED 0 ++ ++#define FPGA_REV_REG_MINOR_MASK 0x0000FF00U ++#define FPGA_REV_REG_MINOR_SHIFT 8 ++#define FPGA_REV_REG_MINOR_SIGNED 0 ++ ++#define FPGA_REV_REG_MAJOR_MASK 0x00FF0000U ++#define FPGA_REV_REG_MAJOR_SHIFT 16 ++#define FPGA_REV_REG_MAJOR_SIGNED 0 ++ ++#define FPGA_REV_REG_DESIGNER_MASK 0xFF000000U ++#define FPGA_REV_REG_DESIGNER_SHIFT 24 ++#define FPGA_REV_REG_DESIGNER_SIGNED 0 ++ ++/* ++ Register FPGA_DES_REV_1 ++*/ ++#define TCF_CLK_CTRL_FPGA_DES_REV_1 0x0010 ++#define FPGA_DES_REV_1_MASK 0xFFFFFFFFU ++#define FPGA_DES_REV_1_SHIFT 0 ++#define FPGA_DES_REV_1_SIGNED 0 ++ ++/* ++ Register FPGA_DES_REV_2 ++*/ ++#define TCF_CLK_CTRL_FPGA_DES_REV_2 0x0018 ++#define FPGA_DES_REV_2_MASK 0xFFFFFFFFU ++#define FPGA_DES_REV_2_SHIFT 0 ++#define FPGA_DES_REV_2_SIGNED 0 ++ ++/* ++ Register TCF_CORE_ID_REG ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_ID_REG 0x0020 ++#define TCF_CORE_ID_REG_CORE_CFG_MASK 0x0000FFFFU ++#define TCF_CORE_ID_REG_CORE_CFG_SHIFT 0 ++#define TCF_CORE_ID_REG_CORE_CFG_SIGNED 0 ++ ++#define TCF_CORE_ID_REG_CORE_ID_MASK 0xFFFF0000U ++#define TCF_CORE_ID_REG_CORE_ID_SHIFT 16 ++#define TCF_CORE_ID_REG_CORE_ID_SIGNED 0 ++ ++/* ++ Register TCF_CORE_REV_REG ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_REV_REG 0x0028 ++#define TCF_CORE_REV_REG_MAINT_MASK 0x000000FFU ++#define TCF_CORE_REV_REG_MAINT_SHIFT 0 ++#define TCF_CORE_REV_REG_MAINT_SIGNED 0 ++ ++#define TCF_CORE_REV_REG_MINOR_MASK 0x0000FF00U ++#define TCF_CORE_REV_REG_MINOR_SHIFT 8 ++#define TCF_CORE_REV_REG_MINOR_SIGNED 0 ++ ++#define TCF_CORE_REV_REG_MAJOR_MASK 0x00FF0000U ++#define TCF_CORE_REV_REG_MAJOR_SHIFT 16 ++#define TCF_CORE_REV_REG_MAJOR_SIGNED 0 ++ ++#define TCF_CORE_REV_REG_DESIGNER_MASK 0xFF000000U ++#define TCF_CORE_REV_REG_DESIGNER_SHIFT 24 ++#define TCF_CORE_REV_REG_DESIGNER_SIGNED 0 ++ ++/* ++ Register TCF_CORE_DES_REV_1 ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_DES_REV_1 0x0030 ++#define TCF_CORE_DES_REV_1_MASK 0xFFFFFFFFU ++#define TCF_CORE_DES_REV_1_SHIFT 0 ++#define TCF_CORE_DES_REV_1_SIGNED 0 ++ ++/* ++ Register TCF_CORE_DES_REV_2 ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_DES_REV_2 0x0038 ++#define TCF_CORE_DES_REV_2_MASK 0xFFFFFFFFU ++#define TCF_CORE_DES_REV_2_SHIFT 0 ++#define TCF_CORE_DES_REV_2_SIGNED 0 ++ ++ ++/* ++ * The following register definitions are valid if register 0x28 has value 1. ++ */ ++ ++/* ++ Register ID ++*/ ++#define TCF_CLK_CTRL_ID 0x0000 ++#define VARIANT_MASK 0x0000FFFFU ++#define VARIANT_SHIFT 0 ++#define VARIANT_SIGNED 0 ++ ++#define ID_MASK 0xFFFF0000U ++#define ID_SHIFT 16 ++#define ID_SIGNED 0 ++ ++/* ++ Register REL ++*/ ++#define TCF_CLK_CTRL_REL 0x0008 ++#define MINOR_MASK 0x0000FFFFU ++#define MINOR_SHIFT 0 ++#define MINOR_SIGNED 0 ++ ++#define MAJOR_MASK 0xFFFF0000U ++#define MAJOR_SHIFT 16 ++#define MAJOR_SIGNED 0 ++ ++/* ++ Register CHANGE_SET ++*/ ++#define TCF_CLK_CTRL_CHANGE_SET 0x0010 ++#define SET_MASK 0xFFFFFFFFU ++#define SET_SHIFT 0 ++#define SET_SIGNED 0 ++ ++/* ++ Register USER_ID ++*/ ++#define TCF_CLK_CTRL_USER_ID 0x0018 ++#define USER_ID_MASK 0x0000000FU ++#define USER_ID_SHIFT 0 ++#define USER_ID_SIGNED 0 ++ ++/* ++ Register USER_BUILD ++*/ ++#define TCF_CLK_CTRL_USER_BUILD 0x0020 ++#define BUILD_MASK 0xFFFFFFFFU ++#define BUILD_SHIFT 0 ++#define BUILD_SIGNED 0 ++ ++/* ++ Register SW_IF_VERSION ++*/ ++#define TCF_CLK_CTRL_SW_IF_VERSION 0x0028 ++#define VERSION_MASK 0x0000FFFFU ++#define VERSION_SHIFT 0 ++#define VERSION_SIGNED 0 ++ ++/* ++ * The following register definitions are valid for all Apollo builds, ++ * even if some of the registers are not available for certain cores. ++ */ ++ ++/* ++ Register SCB_GENERAL_CONTROL ++*/ ++#define TCF_CLK_CTRL_SCB_GENERAL_CONTROL 0x0040 ++#define SCB_GC_TRANS_HALT_MASK 0x00000200U ++#define SCB_GC_TRANS_HALT_SHIFT 9 ++#define SCB_GC_TRANS_HALT_SIGNED 0 ++ ++#define SCB_GC_CKD_REGS_MASK 0x00000100U ++#define SCB_GC_CKD_REGS_SHIFT 8 ++#define SCB_GC_CKD_REGS_SIGNED 0 ++ ++#define SCB_GC_CKD_SLAVE_MASK 0x00000080U ++#define SCB_GC_CKD_SLAVE_SHIFT 7 ++#define SCB_GC_CKD_SLAVE_SIGNED 0 ++ ++#define SCB_GC_CKD_MASTER_MASK 0x00000040U ++#define SCB_GC_CKD_MASTER_SHIFT 6 ++#define SCB_GC_CKD_MASTER_SIGNED 0 ++ ++#define SCB_GC_CKD_XDATA_MASK 0x00000020U ++#define SCB_GC_CKD_XDATA_SHIFT 5 ++#define SCB_GC_CKD_XDATA_SIGNED 0 ++ ++#define SCB_GC_SFR_REG_MASK 0x00000010U ++#define SCB_GC_SFR_REG_SHIFT 4 ++#define SCB_GC_SFR_REG_SIGNED 0 ++ ++#define SCB_GC_SFR_SLAVE_MASK 0x00000008U ++#define SCB_GC_SFR_SLAVE_SHIFT 3 ++#define SCB_GC_SFR_SLAVE_SIGNED 0 ++ ++#define SCB_GC_SFR_MASTER_MASK 0x00000004U ++#define SCB_GC_SFR_MASTER_SHIFT 2 ++#define SCB_GC_SFR_MASTER_SIGNED 0 ++ ++#define SCB_GC_SFR_DET_DATA_MASK 0x00000002U ++#define SCB_GC_SFR_DET_DATA_SHIFT 1 ++#define SCB_GC_SFR_DET_DATA_SIGNED 0 ++ ++#define SCB_GC_SFR_GEN_DATA_MASK 0x00000001U ++#define SCB_GC_SFR_GEN_DATA_SHIFT 0 ++#define SCB_GC_SFR_GEN_DATA_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_READ_COUNT ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_READ_COUNT 0x0048 ++#define MASTER_READ_COUNT_MASK 0x0000FFFFU ++#define MASTER_READ_COUNT_SHIFT 0 ++#define MASTER_READ_COUNT_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_READ_DATA ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_READ_DATA 0x0050 ++#define MASTER_READ_DATA_MASK 0x000000FFU ++#define MASTER_READ_DATA_SHIFT 0 ++#define MASTER_READ_DATA_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_ADDRESS ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_ADDRESS 0x0058 ++#define SCB_MASTER_ADDRESS_MASK 0x000003FFU ++#define SCB_MASTER_ADDRESS_SHIFT 0 ++#define SCB_MASTER_ADDRESS_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_WRITE_DATA ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_WRITE_DATA 0x0060 ++#define MASTER_WRITE_DATA_MASK 0x000000FFU ++#define MASTER_WRITE_DATA_SHIFT 0 ++#define MASTER_WRITE_DATA_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_WRITE_COUNT ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_WRITE_COUNT 0x0068 ++#define MASTER_WRITE_COUNT_MASK 0x0000FFFFU ++#define MASTER_WRITE_COUNT_SHIFT 0 ++#define MASTER_WRITE_COUNT_SIGNED 0 ++ ++/* ++ Register SCB_BUS_SELECT ++*/ ++#define TCF_CLK_CTRL_SCB_BUS_SELECT 0x0070 ++#define BUS_SELECT_MASK 0x00000003U ++#define BUS_SELECT_SHIFT 0 ++#define BUS_SELECT_SIGNED 0 ++ ++/* ++ Register SCB_MASTER_FILL_STATUS ++*/ ++#define TCF_CLK_CTRL_SCB_MASTER_FILL_STATUS 0x0078 ++#define MASTER_WRITE_FIFO_EMPTY_MASK 0x00000008U ++#define MASTER_WRITE_FIFO_EMPTY_SHIFT 3 ++#define MASTER_WRITE_FIFO_EMPTY_SIGNED 0 ++ ++#define MASTER_WRITE_FIFO_FULL_MASK 0x00000004U ++#define MASTER_WRITE_FIFO_FULL_SHIFT 2 ++#define MASTER_WRITE_FIFO_FULL_SIGNED 0 ++ ++#define MASTER_READ_FIFO_EMPTY_MASK 0x00000002U ++#define MASTER_READ_FIFO_EMPTY_SHIFT 1 ++#define MASTER_READ_FIFO_EMPTY_SIGNED 0 ++ ++#define MASTER_READ_FIFO_FULL_MASK 0x00000001U ++#define MASTER_READ_FIFO_FULL_SHIFT 0 ++#define MASTER_READ_FIFO_FULL_SIGNED 0 ++ ++/* ++ Register CLK_AND_RST_CTRL ++*/ ++#define TCF_CLK_CTRL_CLK_AND_RST_CTRL 0x0080 ++#define GLB_CLKG_EN_MASK 0x00020000U ++#define GLB_CLKG_EN_SHIFT 17 ++#define GLB_CLKG_EN_SIGNED 0 ++ ++#define CLK_GATE_CNTL_MASK 0x00010000U ++#define CLK_GATE_CNTL_SHIFT 16 ++#define CLK_GATE_CNTL_SIGNED 0 ++ ++#define DUT_DCM_RESETN_MASK 0x00000400U ++#define DUT_DCM_RESETN_SHIFT 10 ++#define DUT_DCM_RESETN_SIGNED 0 ++ ++#define MEM_RESYNC_BYPASS_MASK 0x00000200U ++#define MEM_RESYNC_BYPASS_SHIFT 9 ++#define MEM_RESYNC_BYPASS_SIGNED 0 ++ ++#define SYS_RESYNC_BYPASS_MASK 0x00000100U ++#define SYS_RESYNC_BYPASS_SHIFT 8 ++#define SYS_RESYNC_BYPASS_SIGNED 0 ++ ++#define SCB_RESETN_MASK 0x00000010U ++#define SCB_RESETN_SHIFT 4 ++#define SCB_RESETN_SIGNED 0 ++ ++#define PDP2_RESETN_MASK 0x00000008U ++#define PDP2_RESETN_SHIFT 3 ++#define PDP2_RESETN_SIGNED 0 ++ ++#define PDP1_RESETN_MASK 0x00000004U ++#define PDP1_RESETN_SHIFT 2 ++#define PDP1_RESETN_SIGNED 0 ++ ++#define DDR_RESETN_MASK 0x00000002U ++#define DDR_RESETN_SHIFT 1 ++#define DDR_RESETN_SIGNED 0 ++ ++#define DUT_RESETN_MASK 0x00000001U ++#define DUT_RESETN_SHIFT 0 ++#define DUT_RESETN_SIGNED 0 ++ ++/* ++ Register TEST_REG_OUT ++*/ ++#define TCF_CLK_CTRL_TEST_REG_OUT 0x0088 ++#define TEST_REG_OUT_MASK 0xFFFFFFFFU ++#define TEST_REG_OUT_SHIFT 0 ++#define TEST_REG_OUT_SIGNED 0 ++ ++/* ++ Register TEST_REG_IN ++*/ ++#define TCF_CLK_CTRL_TEST_REG_IN 0x0090 ++#define TEST_REG_IN_MASK 0xFFFFFFFFU ++#define TEST_REG_IN_SHIFT 0 ++#define TEST_REG_IN_SIGNED 0 ++ ++/* ++ Register TEST_CTRL ++*/ ++#define TCF_CLK_CTRL_TEST_CTRL 0x0098 ++#define PCI_TEST_OFFSET_MASK 0xF8000000U ++#define PCI_TEST_OFFSET_SHIFT 27 ++#define PCI_TEST_OFFSET_SIGNED 0 ++ ++#define PDP1_HOST_MEM_SELECT_MASK 0x00000200U ++#define PDP1_HOST_MEM_SELECT_SHIFT 9 ++#define PDP1_HOST_MEM_SELECT_SIGNED 0 ++ ++#define HOST_PHY_MODE_MASK 0x00000100U ++#define HOST_PHY_MODE_SHIFT 8 ++#define HOST_PHY_MODE_SIGNED 0 ++ ++#define HOST_ONLY_MODE_MASK 0x00000080U ++#define HOST_ONLY_MODE_SHIFT 7 ++#define HOST_ONLY_MODE_SIGNED 0 ++ ++#define PCI_TEST_MODE_MASK 0x00000040U ++#define PCI_TEST_MODE_SHIFT 6 ++#define PCI_TEST_MODE_SIGNED 0 ++ ++#define TURN_OFF_DDR_MASK 0x00000020U ++#define TURN_OFF_DDR_SHIFT 5 ++#define TURN_OFF_DDR_SIGNED 0 ++ ++#define SYS_RD_CLK_INV_MASK 0x00000010U ++#define SYS_RD_CLK_INV_SHIFT 4 ++#define SYS_RD_CLK_INV_SIGNED 0 ++ ++#define MEM_REQ_CLK_INV_MASK 0x00000008U ++#define MEM_REQ_CLK_INV_SHIFT 3 ++#define MEM_REQ_CLK_INV_SIGNED 0 ++ ++#define BURST_SPLIT_MASK 0x00000004U ++#define BURST_SPLIT_SHIFT 2 ++#define BURST_SPLIT_SIGNED 0 ++ ++#define CLK_INVERSION_MASK 0x00000002U ++#define CLK_INVERSION_SHIFT 1 ++#define CLK_INVERSION_SIGNED 0 ++ ++#define ADDRESS_FORCE_MASK 0x00000001U ++#define ADDRESS_FORCE_SHIFT 0 ++#define ADDRESS_FORCE_SIGNED 0 ++ ++/* ++ Register CLEAR_HOST_MEM_SIG ++*/ ++#define TCF_CLK_CTRL_CLEAR_HOST_MEM_SIG 0x00A0 ++#define SIGNATURE_TAG_ID_MASK 0x00000F00U ++#define SIGNATURE_TAG_ID_SHIFT 8 ++#define SIGNATURE_TAG_ID_SIGNED 0 ++ ++#define CLEAR_HOST_MEM_SIGNATURE_MASK 0x00000001U ++#define CLEAR_HOST_MEM_SIGNATURE_SHIFT 0 ++#define CLEAR_HOST_MEM_SIGNATURE_SIGNED 0 ++ ++/* ++ Register HOST_MEM_SIGNATURE ++*/ ++#define TCF_CLK_CTRL_HOST_MEM_SIGNATURE 0x00A8 ++#define HOST_MEM_SIGNATURE_MASK 0xFFFFFFFFU ++#define HOST_MEM_SIGNATURE_SHIFT 0 ++#define HOST_MEM_SIGNATURE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_STATUS ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_STATUS 0x00C8 ++#define INTERRUPT_MASTER_STATUS_MASK 0x80000000U ++#define INTERRUPT_MASTER_STATUS_SHIFT 31 ++#define INTERRUPT_MASTER_STATUS_SIGNED 0 ++ ++#define OTHER_INTS_MASK 0x7FFE0000U ++#define OTHER_INTS_SHIFT 17 ++#define OTHER_INTS_SIGNED 0 ++ ++#define HOST_MST_NORESPONSE_MASK 0x00010000U ++#define HOST_MST_NORESPONSE_SHIFT 16 ++#define HOST_MST_NORESPONSE_SIGNED 0 ++ ++#define PDP2_INT_MASK 0x00008000U ++#define PDP2_INT_SHIFT 15 ++#define PDP2_INT_SIGNED 0 ++ ++#define PDP1_INT_MASK 0x00004000U ++#define PDP1_INT_SHIFT 14 ++#define PDP1_INT_SIGNED 0 ++ ++#define EXT_INT_MASK 0x00002000U ++#define EXT_INT_SHIFT 13 ++#define EXT_INT_SIGNED 0 ++ ++#define SCB_MST_HLT_BIT_MASK 0x00001000U ++#define SCB_MST_HLT_BIT_SHIFT 12 ++#define SCB_MST_HLT_BIT_SIGNED 0 ++ ++#define SCB_SLV_EVENT_MASK 0x00000800U ++#define SCB_SLV_EVENT_SHIFT 11 ++#define SCB_SLV_EVENT_SIGNED 0 ++ ++#define SCB_TDONE_RX_MASK 0x00000400U ++#define SCB_TDONE_RX_SHIFT 10 ++#define SCB_TDONE_RX_SIGNED 0 ++ ++#define SCB_SLV_WT_RD_DAT_MASK 0x00000200U ++#define SCB_SLV_WT_RD_DAT_SHIFT 9 ++#define SCB_SLV_WT_RD_DAT_SIGNED 0 ++ ++#define SCB_SLV_WT_PRV_RD_MASK 0x00000100U ++#define SCB_SLV_WT_PRV_RD_SHIFT 8 ++#define SCB_SLV_WT_PRV_RD_SIGNED 0 ++ ++#define SCB_SLV_WT_WR_DAT_MASK 0x00000080U ++#define SCB_SLV_WT_WR_DAT_SHIFT 7 ++#define SCB_SLV_WT_WR_DAT_SIGNED 0 ++ ++#define SCB_MST_WT_RD_DAT_MASK 0x00000040U ++#define SCB_MST_WT_RD_DAT_SHIFT 6 ++#define SCB_MST_WT_RD_DAT_SIGNED 0 ++ ++#define SCB_ADD_ACK_ERR_MASK 0x00000020U ++#define SCB_ADD_ACK_ERR_SHIFT 5 ++#define SCB_ADD_ACK_ERR_SIGNED 0 ++ ++#define SCB_WR_ACK_ERR_MASK 0x00000010U ++#define SCB_WR_ACK_ERR_SHIFT 4 ++#define SCB_WR_ACK_ERR_SIGNED 0 ++ ++#define SCB_SDAT_LO_TIM_MASK 0x00000008U ++#define SCB_SDAT_LO_TIM_SHIFT 3 ++#define SCB_SDAT_LO_TIM_SIGNED 0 ++ ++#define SCB_SCLK_LO_TIM_MASK 0x00000004U ++#define SCB_SCLK_LO_TIM_SHIFT 2 ++#define SCB_SCLK_LO_TIM_SIGNED 0 ++ ++#define SCB_UNEX_START_BIT_MASK 0x00000002U ++#define SCB_UNEX_START_BIT_SHIFT 1 ++#define SCB_UNEX_START_BIT_SIGNED 0 ++ ++#define SCB_BUS_INACTIVE_MASK 0x00000001U ++#define SCB_BUS_INACTIVE_SHIFT 0 ++#define SCB_BUS_INACTIVE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_OP_CFG ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_OP_CFG 0x00D0 ++#define PULSE_NLEVEL_MASK 0x80000000U ++#define PULSE_NLEVEL_SHIFT 31 ++#define PULSE_NLEVEL_SIGNED 0 ++ ++#define INT_SENSE_MASK 0x40000000U ++#define INT_SENSE_SHIFT 30 ++#define INT_SENSE_SIGNED 0 ++ ++#define INTERRUPT_DEST_MASK 0x0000000FU ++#define INTERRUPT_DEST_SHIFT 0 ++#define INTERRUPT_DEST_SIGNED 0 ++ ++/* ++ Register INTERRUPT_ENABLE ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_ENABLE 0x00D8 ++#define INTERRUPT_MASTER_ENABLE_MASK 0x80000000U ++#define INTERRUPT_MASTER_ENABLE_SHIFT 31 ++#define INTERRUPT_MASTER_ENABLE_SIGNED 0 ++ ++#define INTERRUPT_ENABLE_MASK 0x7FFFFFFFU ++#define INTERRUPT_ENABLE_SHIFT 0 ++#define INTERRUPT_ENABLE_SIGNED 0 ++ ++/* ++ Register INTERRUPT_CLEAR ++*/ ++#define TCF_CLK_CTRL_INTERRUPT_CLEAR 0x00E0 ++#define INTERRUPT_MASTER_CLEAR_MASK 0x80000000U ++#define INTERRUPT_MASTER_CLEAR_SHIFT 31 ++#define INTERRUPT_MASTER_CLEAR_SIGNED 0 ++ ++#define INTERRUPT_CLEAR_MASK 0x7FFFFFFFU ++#define INTERRUPT_CLEAR_SHIFT 0 ++#define INTERRUPT_CLEAR_SIGNED 0 ++ ++/* ++ Register YCC_RGB_CTRL ++*/ ++#define TCF_CLK_CTRL_YCC_RGB_CTRL 0x00E8 ++#define RGB_CTRL1_MASK 0x000001FFU ++#define RGB_CTRL1_SHIFT 0 ++#define RGB_CTRL1_SIGNED 0 ++ ++#define RGB_CTRL2_MASK 0x01FF0000U ++#define RGB_CTRL2_SHIFT 16 ++#define RGB_CTRL2_SIGNED 0 ++ ++/* ++ Register EXP_BRD_CTRL ++*/ ++#define TCF_CLK_CTRL_EXP_BRD_CTRL 0x00F8 ++#define PDP1_DATA_EN_MASK 0x00000003U ++#define PDP1_DATA_EN_SHIFT 0 ++#define PDP1_DATA_EN_SIGNED 0 ++ ++#define PDP2_DATA_EN_MASK 0x00000030U ++#define PDP2_DATA_EN_SHIFT 4 ++#define PDP2_DATA_EN_SIGNED 0 ++ ++#define EXP_BRD_OUTPUT_MASK 0xFFFFFF00U ++#define EXP_BRD_OUTPUT_SHIFT 8 ++#define EXP_BRD_OUTPUT_SIGNED 0 ++ ++/* ++ Register HOSTIF_CONTROL ++*/ ++#define TCF_CLK_CTRL_HOSTIF_CONTROL 0x0100 ++#define HOSTIF_CTRL_MASK 0x000000FFU ++#define HOSTIF_CTRL_SHIFT 0 ++#define HOSTIF_CTRL_SIGNED 0 ++ ++/* ++ Register DUT_CONTROL_1 ++*/ ++#define TCF_CLK_CTRL_DUT_CONTROL_1 0x0108 ++#define DUT_CTRL_1_MASK 0xFFFFFFFFU ++#define DUT_CTRL_1_SHIFT 0 ++#define DUT_CTRL_1_SIGNED 0 ++ ++/* TC ES2 additional needs those: */ ++#define DUT_CTRL_TEST_MODE_SHIFT 0 ++#define DUT_CTRL_TEST_MODE_MASK 0x3 ++ ++#define DUT_CTRL_VCC_0V9EN (1<<12) ++#define DUT_CTRL_VCC_1V8EN (1<<13) ++#define DUT_CTRL_VCC_IO_INH (1<<14) ++#define DUT_CTRL_VCC_CORE_INH (1<<15) ++ ++/* ++ Register DUT_STATUS_1 ++*/ ++#define TCF_CLK_CTRL_DUT_STATUS_1 0x0110 ++#define DUT_STATUS_1_MASK 0xFFFFFFFFU ++#define DUT_STATUS_1_SHIFT 0 ++#define DUT_STATUS_1_SIGNED 0 ++ ++/* ++ Register DUT_CTRL_NOT_STAT_1 ++*/ ++#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_1 0x0118 ++#define DUT_STAT_NOT_CTRL_1_MASK 0xFFFFFFFFU ++#define DUT_STAT_NOT_CTRL_1_SHIFT 0 ++#define DUT_STAT_NOT_CTRL_1_SIGNED 0 ++ ++/* ++ Register DUT_CONTROL_2 ++*/ ++#define TCF_CLK_CTRL_DUT_CONTROL_2 0x0120 ++#define DUT_CTRL_2_MASK 0xFFFFFFFFU ++#define DUT_CTRL_2_SHIFT 0 ++#define DUT_CTRL_2_SIGNED 0 ++ ++/* ++ Register DUT_STATUS_2 ++*/ ++#define TCF_CLK_CTRL_DUT_STATUS_2 0x0128 ++#define DUT_STATUS_2_MASK 0xFFFFFFFFU ++#define DUT_STATUS_2_SHIFT 0 ++#define DUT_STATUS_2_SIGNED 0 ++ ++/* ++ Register DUT_CTRL_NOT_STAT_2 ++*/ ++#define TCF_CLK_CTRL_DUT_CTRL_NOT_STAT_2 0x0130 ++#define DUT_CTRL_NOT_STAT_2_MASK 0xFFFFFFFFU ++#define DUT_CTRL_NOT_STAT_2_SHIFT 0 ++#define DUT_CTRL_NOT_STAT_2_SIGNED 0 ++ ++/* ++ Register BUS_CAP_BASE_ADDR ++*/ ++#define TCF_CLK_CTRL_BUS_CAP_BASE_ADDR 0x0138 ++#define BUS_CAP_BASE_ADDR_MASK 0xFFFFFFFFU ++#define BUS_CAP_BASE_ADDR_SHIFT 0 ++#define BUS_CAP_BASE_ADDR_SIGNED 0 ++ ++/* ++ Register BUS_CAP_ENABLE ++*/ ++#define TCF_CLK_CTRL_BUS_CAP_ENABLE 0x0140 ++#define BUS_CAP_ENABLE_MASK 0x00000001U ++#define BUS_CAP_ENABLE_SHIFT 0 ++#define BUS_CAP_ENABLE_SIGNED 0 ++ ++/* ++ Register BUS_CAP_COUNT ++*/ ++#define TCF_CLK_CTRL_BUS_CAP_COUNT 0x0148 ++#define BUS_CAP_COUNT_MASK 0xFFFFFFFFU ++#define BUS_CAP_COUNT_SHIFT 0 ++#define BUS_CAP_COUNT_SIGNED 0 ++ ++/* ++ Register DCM_LOCK_STATUS ++*/ ++#define TCF_CLK_CTRL_DCM_LOCK_STATUS 0x0150 ++#define DCM_LOCK_STATUS_MASK 0x00000007U ++#define DCM_LOCK_STATUS_SHIFT 0 ++#define DCM_LOCK_STATUS_SIGNED 0 ++ ++/* ++ Register AUX_DUT_RESETNS ++*/ ++#define TCF_CLK_CTRL_AUX_DUT_RESETNS 0x0158 ++#define AUX_DUT_RESETNS_MASK 0x0000000FU ++#define AUX_DUT_RESETNS_SHIFT 0 ++#define AUX_DUT_RESETNS_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_ADDR_RDNWR ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_ADDR_RDNWR 0x0160 ++#define TCF_SPI_MST_ADDR_MASK 0x0003FFFFU ++#define TCF_SPI_MST_ADDR_SHIFT 0 ++#define TCF_SPI_MST_ADDR_SIGNED 0 ++ ++#define TCF_SPI_MST_RDNWR_MASK 0x00040000U ++#define TCF_SPI_MST_RDNWR_SHIFT 18 ++#define TCF_SPI_MST_RDNWR_SIGNED 0 ++ ++#define TCF_SPI_MST_SLAVE_ID_MASK 0x00080000U ++#define TCF_SPI_MST_SLAVE_ID_SHIFT 19 ++#define TCF_SPI_MST_SLAVE_ID_SIGNED 0 ++ ++#define TCF_SPI_MST_MASTER_ID_MASK 0x00300000U ++#define TCF_SPI_MST_MASTER_ID_SHIFT 20 ++#define TCF_SPI_MST_MASTER_ID_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_WDATA ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_WDATA 0x0168 ++#define TCF_SPI_MST_WDATA_MASK 0xFFFFFFFFU ++#define TCF_SPI_MST_WDATA_SHIFT 0 ++#define TCF_SPI_MST_WDATA_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_RDATA ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_RDATA 0x0170 ++#define TCF_SPI_MST_RDATA_MASK 0xFFFFFFFFU ++#define TCF_SPI_MST_RDATA_SHIFT 0 ++#define TCF_SPI_MST_RDATA_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_STATUS ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_STATUS 0x0178 ++#define TCF_SPI_MST_STATUS_MASK 0x0000000FU ++#define TCF_SPI_MST_STATUS_SHIFT 0 ++#define TCF_SPI_MST_STATUS_SIGNED 0 ++ ++/* ++ Register TCF_SPI_MST_GO ++*/ ++#define TCF_CLK_CTRL_TCF_SPI_MST_GO 0x0180 ++#define TCF_SPI_MST_GO_MASK 0x00000001U ++#define TCF_SPI_MST_GO_SHIFT 0 ++#define TCF_SPI_MST_GO_SIGNED 0 ++ ++/* ++ Register EXT_SIG_CTRL ++*/ ++#define TCF_CLK_CTRL_EXT_SIG_CTRL 0x0188 ++#define EXT_SYS_REQ_SIG_START_MASK 0x00000001U ++#define EXT_SYS_REQ_SIG_START_SHIFT 0 ++#define EXT_SYS_REQ_SIG_START_SIGNED 0 ++ ++#define EXT_SYS_RD_SIG_START_MASK 0x00000002U ++#define EXT_SYS_RD_SIG_START_SHIFT 1 ++#define EXT_SYS_RD_SIG_START_SIGNED 0 ++ ++#define EXT_MEM_REQ_SIG_START_MASK 0x00000004U ++#define EXT_MEM_REQ_SIG_START_SHIFT 2 ++#define EXT_MEM_REQ_SIG_START_SIGNED 0 ++ ++#define EXT_MEM_RD_SIG_START_MASK 0x00000008U ++#define EXT_MEM_RD_SIG_START_SHIFT 3 ++#define EXT_MEM_RD_SIG_START_SIGNED 0 ++ ++/* ++ Register EXT_SYS_REQ_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_REQ_SIG 0x0190 ++#define EXT_SYS_REQ_SIG_MASK 0xFFFFFFFFU ++#define EXT_SYS_REQ_SIG_SHIFT 0 ++#define EXT_SYS_REQ_SIG_SIGNED 0 ++ ++/* ++ Register EXT_SYS_RD_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_RD_SIG 0x0198 ++#define EXT_SYS_RD_SIG_MASK 0xFFFFFFFFU ++#define EXT_SYS_RD_SIG_SHIFT 0 ++#define EXT_SYS_RD_SIG_SIGNED 0 ++ ++/* ++ Register EXT_MEM_REQ_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_REQ_SIG 0x01A0 ++#define EXT_MEM_REQ_SIG_MASK 0xFFFFFFFFU ++#define EXT_MEM_REQ_SIG_SHIFT 0 ++#define EXT_MEM_REQ_SIG_SIGNED 0 ++ ++/* ++ Register EXT_MEM_RD_SIG ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_RD_SIG 0x01A8 ++#define EXT_MEM_RD_SIG_MASK 0xFFFFFFFFU ++#define EXT_MEM_RD_SIG_SHIFT 0 ++#define EXT_MEM_RD_SIG_SIGNED 0 ++ ++/* ++ Register EXT_SYS_REQ_WR_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_REQ_WR_CNT 0x01B0 ++#define EXT_SYS_REQ_WR_CNT_MASK 0xFFFFFFFFU ++#define EXT_SYS_REQ_WR_CNT_SHIFT 0 ++#define EXT_SYS_REQ_WR_CNT_SIGNED 0 ++ ++/* ++ Register EXT_SYS_REQ_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_REQ_RD_CNT 0x01B8 ++#define EXT_SYS_REQ_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_SYS_REQ_RD_CNT_SHIFT 0 ++#define EXT_SYS_REQ_RD_CNT_SIGNED 0 ++ ++/* ++ Register EXT_SYS_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_SYS_RD_CNT 0x01C0 ++#define EXT_SYS_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_SYS_RD_CNT_SHIFT 0 ++#define EXT_SYS_RD_CNT_SIGNED 0 ++ ++/* ++ Register EXT_MEM_REQ_WR_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_REQ_WR_CNT 0x01C8 ++#define EXT_MEM_REQ_WR_CNT_MASK 0xFFFFFFFFU ++#define EXT_MEM_REQ_WR_CNT_SHIFT 0 ++#define EXT_MEM_REQ_WR_CNT_SIGNED 0 ++ ++/* ++ Register EXT_MEM_REQ_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_REQ_RD_CNT 0x01D0 ++#define EXT_MEM_REQ_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_MEM_REQ_RD_CNT_SHIFT 0 ++#define EXT_MEM_REQ_RD_CNT_SIGNED 0 ++ ++/* ++ Register EXT_MEM_RD_CNT ++*/ ++#define TCF_CLK_CTRL_EXT_MEM_RD_CNT 0x01D8 ++#define EXT_MEM_RD_CNT_MASK 0xFFFFFFFFU ++#define EXT_MEM_RD_CNT_SHIFT 0 ++#define EXT_MEM_RD_CNT_SIGNED 0 ++ ++/* ++ Register TCF_CORE_TARGET_BUILD_CFG ++*/ ++#define TCF_CLK_CTRL_TCF_CORE_TARGET_BUILD_CFG 0x01E0 ++#define TCF_CORE_TARGET_BUILD_ID_MASK 0x000000FFU ++#define TCF_CORE_TARGET_BUILD_ID_SHIFT 0 ++#define TCF_CORE_TARGET_BUILD_ID_SIGNED 0 ++ ++/* ++ Register MEM_THROUGH_SYS ++*/ ++#define TCF_CLK_CTRL_MEM_THROUGH_SYS 0x01E8 ++#define MEM_THROUGH_SYS_MASK 0x00000001U ++#define MEM_THROUGH_SYS_SHIFT 0 ++#define MEM_THROUGH_SYS_SIGNED 0 ++ ++/* ++ Register HOST_PHY_OFFSET ++*/ ++#define TCF_CLK_CTRL_HOST_PHY_OFFSET 0x01F0 ++#define HOST_PHY_OFFSET_MASK 0xFFFFFFFFU ++#define HOST_PHY_OFFSET_SHIFT 0 ++#define HOST_PHY_OFFSET_SIGNED 0 ++ ++/* ++ Register DEBUG_REG_SEL ++*/ ++#define TCF_CLK_CTRL_DEBUG_REG_SEL 0x01F8 ++#define DEBUG_REG_SELECT_MASK 0xFFFFFFFFU ++#define DEBUG_REG_SELECT_SHIFT 0 ++#define DEBUG_REG_SELECT_SIGNED 0 ++ ++/* ++ Register DEBUG_REG ++*/ ++#define TCF_CLK_CTRL_DEBUG_REG 0x0200 ++#define DEBUG_REG_VALUE_MASK 0xFFFFFFFFU ++#define DEBUG_REG_VALUE_SHIFT 0 ++#define DEBUG_REG_VALUE_SIGNED 0 ++ ++/* ++ Register JTAG_CTRL ++*/ ++#define TCF_CLK_CTRL_JTAG_CTRL 0x0208 ++#define JTAG_TRST_MASK 0x00000001U ++#define JTAG_TRST_SHIFT 0 ++#define JTAG_TRST_SIGNED 0 ++ ++#define JTAG_TMS_MASK 0x00000002U ++#define JTAG_TMS_SHIFT 1 ++#define JTAG_TMS_SIGNED 0 ++ ++#define JTAG_TCK_MASK 0x00000004U ++#define JTAG_TCK_SHIFT 2 ++#define JTAG_TCK_SIGNED 0 ++ ++#define JTAG_TDO_MASK 0x00000008U ++#define JTAG_TDO_SHIFT 3 ++#define JTAG_TDO_SIGNED 0 ++ ++#define JTAG_TDI_MASK 0x00000010U ++#define JTAG_TDI_SHIFT 4 ++#define JTAG_TDI_SIGNED 0 ++ ++#define JTAG_DASH_N_REG_MASK 0x40000000U ++#define JTAG_DASH_N_REG_SHIFT 30 ++#define JTAG_DASH_N_REG_SIGNED 0 ++ ++#define JTAG_DISABLE_MASK 0x80000000U ++#define JTAG_DISABLE_SHIFT 31 ++#define JTAG_DISABLE_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_RDNWR ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_RDNWR 0x0300 ++#define SAI_DEBUG_REG_ADDR_MASK 0x000001FFU ++#define SAI_DEBUG_REG_ADDR_SHIFT 0 ++#define SAI_DEBUG_REG_ADDR_SIGNED 0 ++ ++#define SAI_DEBUG_REG_RDNWR_MASK 0x00000200U ++#define SAI_DEBUG_REG_RDNWR_SHIFT 9 ++#define SAI_DEBUG_REG_RDNWR_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_WDATA ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_WDATA 0x0308 ++#define SAI_DEBUG_REG_WDATA_MASK 0xFFFFFFFFU ++#define SAI_DEBUG_REG_WDATA_SHIFT 0 ++#define SAI_DEBUG_REG_WDATA_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_RDATA ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_RDATA 0x0310 ++#define SAI_DEBUG_REG_RDATA_MASK 0xFFFFFFFFU ++#define SAI_DEBUG_REG_RDATA_SHIFT 0 ++#define SAI_DEBUG_REG_RDATA_SIGNED 0 ++ ++/* ++ Register SAI_DEBUG_GO ++*/ ++#define TCF_CLK_CTRL_SAI_DEBUG_GO 0x0318 ++#define SAI_DEBUG_REG_GO_MASK 0x00000001U ++#define SAI_DEBUG_REG_GO_SHIFT 0 ++#define SAI_DEBUG_REG_GO_SIGNED 0 ++ ++/* ++ Register AUX_DUT_RESETS ++*/ ++#define TCF_CLK_CTRL_AUX_DUT_RESETS 0x0320 ++#define AUX_DUT_RESETS_MASK 0x0000000FU ++#define AUX_DUT_RESETS_SHIFT 0 ++#define AUX_DUT_RESETS_SIGNED 0 ++ ++/* ++ Register DUT_CLK_CTRL ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_CTRL 0x0328 ++#define MEM_REQ_PHSE_MASK 0x0000FFFFU ++#define MEM_REQ_PHSE_SHIFT 0 ++#define MEM_REQ_PHSE_SIGNED 0 ++ ++/* ++ Register DUT_CLK_STATUS ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_STATUS 0x0330 ++#define MEM_REQ_PHSE_SET_MASK 0x00000003U ++#define MEM_REQ_PHSE_SET_SHIFT 0 ++#define MEM_REQ_PHSE_SET_SIGNED 0 ++ ++/* ++ Register DUT_CLK_INFO ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_INFO 0x0340 ++#define CORE_MASK 0x0000FFFFU ++#define CORE_SHIFT 0 ++#define CORE_SIGNED 0 ++ ++#define MEM_MASK 0xFFFF0000U ++#define MEM_SHIFT 16 ++#define MEM_SIGNED 0 ++ ++/* ++ Register DUT_CLK_PHSE ++*/ ++#define TCF_CLK_CTRL_DUT_CLK_PHSE 0x0348 ++#define MEM_REQ_MASK 0x0000FFFFU ++#define MEM_REQ_SHIFT 0 ++#define MEM_REQ_SIGNED 0 ++ ++#define MEM_RD_MASK 0xFFFF0000U ++#define MEM_RD_SHIFT 16 ++#define MEM_RD_SIGNED 0 ++ ++#endif /* !defined(_TCF_CLK_CTRL_H_) */ ++ ++/***************************************************************************** ++ End of file (tcf_clk_ctrl.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_pll.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_pll.h +new file mode 100644 +index 000000000000..71eaf924bbd6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_pll.h +@@ -0,0 +1,311 @@ ++/*************************************************************************/ /*! ++@Title Test Chip Framework PDP register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Autogenerated C -- do not edit ++ Generated from tcf_pll.def ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(_TCF_PLL_H_) ++#define _TCF_PLL_H_ ++ ++/* ++ Register PLL_DDR2_CLK0 ++*/ ++#define TCF_PLL_PLL_DDR2_CLK0 0x0000 ++#define DDR2_PLL_CLK0_PHS_MASK 0x00300000U ++#define DDR2_PLL_CLK0_PHS_SHIFT 20 ++#define DDR2_PLL_CLK0_PHS_SIGNED 0 ++ ++#define DDR2_PLL_CLK0_MS_MASK 0x00030000U ++#define DDR2_PLL_CLK0_MS_SHIFT 16 ++#define DDR2_PLL_CLK0_MS_SIGNED 0 ++ ++#define DDR2_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define DDR2_PLL_CLK0_FREQ_SHIFT 0 ++#define DDR2_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_DDR2_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_DDR2_CLK1TO5 0x0008 ++#define DDR2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define DDR2_PLL_CLK1TO5_PHS_SHIFT 20 ++#define DDR2_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define DDR2_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define DDR2_PLL_CLK1TO5_MS_SHIFT 10 ++#define DDR2_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define DDR2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define DDR2_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define DDR2_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_DDR2_DRP_GO ++*/ ++#define TCF_PLL_PLL_DDR2_DRP_GO 0x0010 ++#define PLL_DDR2_DRP_GO_MASK 0x00000001U ++#define PLL_DDR2_DRP_GO_SHIFT 0 ++#define PLL_DDR2_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_PDP_CLK0 ++*/ ++#define TCF_PLL_PLL_PDP_CLK0 0x0018 ++#define PDP_PLL_CLK0_PHS_MASK 0x00300000U ++#define PDP_PLL_CLK0_PHS_SHIFT 20 ++#define PDP_PLL_CLK0_PHS_SIGNED 0 ++ ++#define PDP_PLL_CLK0_MS_MASK 0x00030000U ++#define PDP_PLL_CLK0_MS_SHIFT 16 ++#define PDP_PLL_CLK0_MS_SIGNED 0 ++ ++#define PDP_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define PDP_PLL_CLK0_FREQ_SHIFT 0 ++#define PDP_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_PDP_CLK1TO5 0x0020 ++#define PDP_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define PDP_PLL_CLK1TO5_PHS_SHIFT 20 ++#define PDP_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define PDP_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define PDP_PLL_CLK1TO5_MS_SHIFT 10 ++#define PDP_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define PDP_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define PDP_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define PDP_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP_DRP_GO ++*/ ++#define TCF_PLL_PLL_PDP_DRP_GO 0x0028 ++#define PLL_PDP_DRP_GO_MASK 0x00000001U ++#define PLL_PDP_DRP_GO_SHIFT 0 ++#define PLL_PDP_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_PDP2_CLK0 ++*/ ++#define TCF_PLL_PLL_PDP2_CLK0 0x0030 ++#define PDP2_PLL_CLK0_PHS_MASK 0x00300000U ++#define PDP2_PLL_CLK0_PHS_SHIFT 20 ++#define PDP2_PLL_CLK0_PHS_SIGNED 0 ++ ++#define PDP2_PLL_CLK0_MS_MASK 0x00030000U ++#define PDP2_PLL_CLK0_MS_SHIFT 16 ++#define PDP2_PLL_CLK0_MS_SIGNED 0 ++ ++#define PDP2_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define PDP2_PLL_CLK0_FREQ_SHIFT 0 ++#define PDP2_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP2_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_PDP2_CLK1TO5 0x0038 ++#define PDP2_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define PDP2_PLL_CLK1TO5_PHS_SHIFT 20 ++#define PDP2_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define PDP2_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define PDP2_PLL_CLK1TO5_MS_SHIFT 10 ++#define PDP2_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define PDP2_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define PDP2_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define PDP2_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_PDP2_DRP_GO ++*/ ++#define TCF_PLL_PLL_PDP2_DRP_GO 0x0040 ++#define PLL_PDP2_DRP_GO_MASK 0x00000001U ++#define PLL_PDP2_DRP_GO_SHIFT 0 ++#define PLL_PDP2_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_CORE_CLK0 ++*/ ++#define TCF_PLL_PLL_CORE_CLK0 0x0048 ++#define CORE_PLL_CLK0_PHS_MASK 0x00300000U ++#define CORE_PLL_CLK0_PHS_SHIFT 20 ++#define CORE_PLL_CLK0_PHS_SIGNED 0 ++ ++#define CORE_PLL_CLK0_MS_MASK 0x00030000U ++#define CORE_PLL_CLK0_MS_SHIFT 16 ++#define CORE_PLL_CLK0_MS_SIGNED 0 ++ ++#define CORE_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define CORE_PLL_CLK0_FREQ_SHIFT 0 ++#define CORE_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_CORE_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_CORE_CLK1TO5 0x0050 ++#define CORE_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define CORE_PLL_CLK1TO5_PHS_SHIFT 20 ++#define CORE_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define CORE_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define CORE_PLL_CLK1TO5_MS_SHIFT 10 ++#define CORE_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define CORE_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define CORE_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define CORE_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_CORE_DRP_GO ++*/ ++#define TCF_PLL_PLL_CORE_DRP_GO 0x0058 ++#define PLL_CORE_DRP_GO_MASK 0x00000001U ++#define PLL_CORE_DRP_GO_SHIFT 0 ++#define PLL_CORE_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_SYSIF_CLK0 ++*/ ++#define TCF_PLL_PLL_SYSIF_CLK0 0x0060 ++#define SYSIF_PLL_CLK0_PHS_MASK 0x00300000U ++#define SYSIF_PLL_CLK0_PHS_SHIFT 20 ++#define SYSIF_PLL_CLK0_PHS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK0_MS_MASK 0x00030000U ++#define SYSIF_PLL_CLK0_MS_SHIFT 16 ++#define SYSIF_PLL_CLK0_MS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define SYSIF_PLL_CLK0_FREQ_SHIFT 0 ++#define SYSIF_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_SYSIF_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_SYSIF_CLK1TO5 0x0068 ++#define SYSIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define SYSIF_PLL_CLK1TO5_PHS_SHIFT 20 ++#define SYSIF_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define SYSIF_PLL_CLK1TO5_MS_SHIFT 10 ++#define SYSIF_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define SYSIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define SYSIF_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define SYSIF_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_SYS_DRP_GO ++*/ ++#define TCF_PLL_PLL_SYS_DRP_GO 0x0070 ++#define PLL_SYS_DRP_GO_MASK 0x00000001U ++#define PLL_SYS_DRP_GO_SHIFT 0 ++#define PLL_SYS_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_MEMIF_CLK0 ++*/ ++#define TCF_PLL_PLL_MEMIF_CLK0 0x0078 ++#define MEMIF_PLL_CLK0_PHS_MASK 0x00300000U ++#define MEMIF_PLL_CLK0_PHS_SHIFT 20 ++#define MEMIF_PLL_CLK0_PHS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK0_MS_MASK 0x00030000U ++#define MEMIF_PLL_CLK0_MS_SHIFT 16 ++#define MEMIF_PLL_CLK0_MS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK0_FREQ_MASK 0x000001FFU ++#define MEMIF_PLL_CLK0_FREQ_SHIFT 0 ++#define MEMIF_PLL_CLK0_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_MEMIF_CLK1TO5 ++*/ ++#define TCF_PLL_PLL_MEMIF_CLK1TO5 0x0080 ++#define MEMIF_PLL_CLK1TO5_PHS_MASK 0x3FF00000U ++#define MEMIF_PLL_CLK1TO5_PHS_SHIFT 20 ++#define MEMIF_PLL_CLK1TO5_PHS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK1TO5_MS_MASK 0x000FFC00U ++#define MEMIF_PLL_CLK1TO5_MS_SHIFT 10 ++#define MEMIF_PLL_CLK1TO5_MS_SIGNED 0 ++ ++#define MEMIF_PLL_CLK1TO5_FREQ_MASK 0x000003FFU ++#define MEMIF_PLL_CLK1TO5_FREQ_SHIFT 0 ++#define MEMIF_PLL_CLK1TO5_FREQ_SIGNED 0 ++ ++/* ++ Register PLL_MEM_DRP_GO ++*/ ++#define TCF_PLL_PLL_MEM_DRP_GO 0x0088 ++#define PLL_MEM_DRP_GO_MASK 0x00000001U ++#define PLL_MEM_DRP_GO_SHIFT 0 ++#define PLL_MEM_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_ALL_DRP_GO ++*/ ++#define TCF_PLL_PLL_ALL_DRP_GO 0x0090 ++#define PLL_ALL_DRP_GO_MASK 0x00000001U ++#define PLL_ALL_DRP_GO_SHIFT 0 ++#define PLL_ALL_DRP_GO_SIGNED 0 ++ ++/* ++ Register PLL_DRP_STATUS ++*/ ++#define TCF_PLL_PLL_DRP_STATUS 0x0098 ++#define PLL_LOCKS_MASK 0x00003F00U ++#define PLL_LOCKS_SHIFT 8 ++#define PLL_LOCKS_SIGNED 0 ++ ++#define PLL_DRP_GOOD_MASK 0x0000003FU ++#define PLL_DRP_GOOD_SHIFT 0 ++#define PLL_DRP_GOOD_SIGNED 0 ++ ++#endif /* !defined(_TCF_PLL_H_) */ ++ ++/***************************************************************************** ++ End of file (tcf_pll.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h +new file mode 100644 +index 000000000000..e87ba6152411 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/system/rgx_tc/tcf_rgbpdp_regs.h +@@ -0,0 +1,559 @@ ++/*************************************************************************/ /*! ++@Title Test Chip Framework PDP register definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Autogenerated C -- do not edit ++ Generated from: tcf_rgbpdp_regs.def ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(_TCF_RGBPDP_REGS_H_) ++#define _TCF_RGBPDP_REGS_H_ ++ ++/* ++ Register PVR_TCF_RGBPDP_STR1SURF ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF 0x0000 ++#define STR1HEIGHT_MASK 0x000007FFU ++#define STR1HEIGHT_SHIFT 0 ++#define STR1HEIGHT_SIGNED 0 ++ ++#define STR1WIDTH_MASK 0x003FF800U ++#define STR1WIDTH_SHIFT 11 ++#define STR1WIDTH_SIGNED 0 ++ ++#define STR1PIXFMT_MASK 0x0F000000U ++#define STR1PIXFMT_SHIFT 24 ++#define STR1PIXFMT_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_STR1ADDRCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL 0x0004 ++#define STR1BASE_MASK 0x03FFFFFFU ++#define STR1BASE_SHIFT 0 ++#define STR1BASE_SIGNED 0 ++ ++#define STR1INTFIELD_MASK 0x40000000U ++#define STR1INTFIELD_SHIFT 30 ++#define STR1INTFIELD_SIGNED 0 ++ ++#define STR1STREN_MASK 0x80000000U ++#define STR1STREN_SHIFT 31 ++#define STR1STREN_SIGNED 0 ++ ++/* ++ Register PVR_PDP_STR1POSN ++*/ ++#define TCF_RGBPDP_PVR_PDP_STR1POSN 0x0008 ++#define STR1STRIDE_MASK 0x000003FFU ++#define STR1STRIDE_SHIFT 0 ++#define STR1STRIDE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_MEMCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_MEMCTRL 0x000C ++#define MEMREFRESH_MASK 0xC0000000U ++#define MEMREFRESH_SHIFT 30 ++#define MEMREFRESH_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_STRCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL 0x0010 ++#define BURSTLEN_GFX_MASK 0x000000FFU ++#define BURSTLEN_GFX_SHIFT 0 ++#define BURSTLEN_GFX_SIGNED 0 ++ ++#define THRESHOLD_GFX_MASK 0x0000FF00U ++#define THRESHOLD_GFX_SHIFT 8 ++#define THRESHOLD_GFX_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_SYNCCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL 0x0014 ++#define HSDIS_MASK 0x00000001U ++#define HSDIS_SHIFT 0 ++#define HSDIS_SIGNED 0 ++ ++#define HSPOL_MASK 0x00000002U ++#define HSPOL_SHIFT 1 ++#define HSPOL_SIGNED 0 ++ ++#define VSDIS_MASK 0x00000004U ++#define VSDIS_SHIFT 2 ++#define VSDIS_SIGNED 0 ++ ++#define VSPOL_MASK 0x00000008U ++#define VSPOL_SHIFT 3 ++#define VSPOL_SIGNED 0 ++ ++#define BLNKDIS_MASK 0x00000010U ++#define BLNKDIS_SHIFT 4 ++#define BLNKDIS_SIGNED 0 ++ ++#define BLNKPOL_MASK 0x00000020U ++#define BLNKPOL_SHIFT 5 ++#define BLNKPOL_SIGNED 0 ++ ++#define HS_SLAVE_MASK 0x00000040U ++#define HS_SLAVE_SHIFT 6 ++#define HS_SLAVE_SIGNED 0 ++ ++#define VS_SLAVE_MASK 0x00000080U ++#define VS_SLAVE_SHIFT 7 ++#define VS_SLAVE_SIGNED 0 ++ ++#define INTERLACE_MASK 0x00000100U ++#define INTERLACE_SHIFT 8 ++#define INTERLACE_SIGNED 0 ++ ++#define FIELDPOL_MASK 0x00000200U ++#define FIELDPOL_SHIFT 9 ++#define FIELDPOL_SIGNED 0 ++ ++#define CLKPOL_MASK 0x00000800U ++#define CLKPOL_SHIFT 11 ++#define CLKPOL_SIGNED 0 ++ ++#define CSYNC_EN_MASK 0x00001000U ++#define CSYNC_EN_SHIFT 12 ++#define CSYNC_EN_SIGNED 0 ++ ++#define FIELD_EN_MASK 0x00002000U ++#define FIELD_EN_SHIFT 13 ++#define FIELD_EN_SIGNED 0 ++ ++#define UPDWAIT_MASK 0x000F0000U ++#define UPDWAIT_SHIFT 16 ++#define UPDWAIT_SIGNED 0 ++ ++#define UPDCTRL_MASK 0x01000000U ++#define UPDCTRL_SHIFT 24 ++#define UPDCTRL_SIGNED 0 ++ ++#define UPDINTCTRL_MASK 0x02000000U ++#define UPDINTCTRL_SHIFT 25 ++#define UPDINTCTRL_SIGNED 0 ++ ++#define UPDSYNCTRL_MASK 0x04000000U ++#define UPDSYNCTRL_SHIFT 26 ++#define UPDSYNCTRL_SIGNED 0 ++ ++#define POWERDN_MASK 0x10000000U ++#define POWERDN_SHIFT 28 ++#define POWERDN_SIGNED 0 ++ ++#define DISP_RST_MASK 0x20000000U ++#define DISP_RST_SHIFT 29 ++#define DISP_RST_SIGNED 0 ++ ++#define SYNCACTIVE_MASK 0x80000000U ++#define SYNCACTIVE_SHIFT 31 ++#define SYNCACTIVE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_BORDCOL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL 0x0018 ++#define BORDCOL_MASK 0x00FFFFFFU ++#define BORDCOL_SHIFT 0 ++#define BORDCOL_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_UPDCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL 0x001C ++#define UPDFIELD_MASK 0x00000001U ++#define UPDFIELD_SHIFT 0 ++#define UPDFIELD_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HSYNC1 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1 0x0020 ++#define HT_MASK 0x00000FFFU ++#define HT_SHIFT 0 ++#define HT_SIGNED 0 ++ ++#define HBPS_MASK 0x0FFF0000U ++#define HBPS_SHIFT 16 ++#define HBPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HSYNC2 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2 0x0024 ++#define HLBS_MASK 0x00000FFFU ++#define HLBS_SHIFT 0 ++#define HLBS_SIGNED 0 ++ ++#define HAS_MASK 0x0FFF0000U ++#define HAS_SHIFT 16 ++#define HAS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HSYNC3 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3 0x0028 ++#define HRBS_MASK 0x00000FFFU ++#define HRBS_SHIFT 0 ++#define HRBS_SIGNED 0 ++ ++#define HFPS_MASK 0x0FFF0000U ++#define HFPS_SHIFT 16 ++#define HFPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VSYNC1 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1 0x002C ++#define VT_MASK 0x00000FFFU ++#define VT_SHIFT 0 ++#define VT_SIGNED 0 ++ ++#define VBPS_MASK 0x0FFF0000U ++#define VBPS_SHIFT 16 ++#define VBPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VSYNC2 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2 0x0030 ++#define VTBS_MASK 0x00000FFFU ++#define VTBS_SHIFT 0 ++#define VTBS_SIGNED 0 ++ ++#define VAS_MASK 0x0FFF0000U ++#define VAS_SHIFT 16 ++#define VAS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VSYNC3 ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3 0x0034 ++#define VBBS_MASK 0x00000FFFU ++#define VBBS_SHIFT 0 ++#define VBBS_SIGNED 0 ++ ++#define VFPS_MASK 0x0FFF0000U ++#define VFPS_SHIFT 16 ++#define VFPS_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_HDECTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL 0x0038 ++#define HDEF_MASK 0x00000FFFU ++#define HDEF_SHIFT 0 ++#define HDEF_SIGNED 0 ++ ++#define HDES_MASK 0x0FFF0000U ++#define HDES_SHIFT 16 ++#define HDES_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VDECTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL 0x003C ++#define VDEF_MASK 0x00000FFFU ++#define VDEF_SHIFT 0 ++#define VDEF_SIGNED 0 ++ ++#define VDES_MASK 0x0FFF0000U ++#define VDES_SHIFT 16 ++#define VDES_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_VEVENT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT 0x0040 ++#define VFETCH_MASK 0x00000FFFU ++#define VFETCH_SHIFT 0 ++#define VFETCH_SIGNED 0 ++ ++#define VEVENT_MASK 0x0FFF0000U ++#define VEVENT_SHIFT 16 ++#define VEVENT_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_OPMASK ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_OPMASK 0x0044 ++#define MASKR_MASK 0x000000FFU ++#define MASKR_SHIFT 0 ++#define MASKR_SIGNED 0 ++ ++#define MASKG_MASK 0x0000FF00U ++#define MASKG_SHIFT 8 ++#define MASKG_SIGNED 0 ++ ++#define MASKB_MASK 0x00FF0000U ++#define MASKB_SHIFT 16 ++#define MASKB_SIGNED 0 ++ ++#define BLANKLEVEL_MASK 0x40000000U ++#define BLANKLEVEL_SHIFT 30 ++#define BLANKLEVEL_SIGNED 0 ++ ++#define MASKLEVEL_MASK 0x80000000U ++#define MASKLEVEL_SHIFT 31 ++#define MASKLEVEL_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTSTAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT 0x0048 ++#define INTS_HBLNK0_MASK 0x00000001U ++#define INTS_HBLNK0_SHIFT 0 ++#define INTS_HBLNK0_SIGNED 0 ++ ++#define INTS_HBLNK1_MASK 0x00000002U ++#define INTS_HBLNK1_SHIFT 1 ++#define INTS_HBLNK1_SIGNED 0 ++ ++#define INTS_VBLNK0_MASK 0x00000004U ++#define INTS_VBLNK0_SHIFT 2 ++#define INTS_VBLNK0_SIGNED 0 ++ ++#define INTS_VBLNK1_MASK 0x00000008U ++#define INTS_VBLNK1_SHIFT 3 ++#define INTS_VBLNK1_SIGNED 0 ++ ++#define INTS_STR1URUN_MASK 0x00000010U ++#define INTS_STR1URUN_SHIFT 4 ++#define INTS_STR1URUN_SIGNED 0 ++ ++#define INTS_STR1ORUN_MASK 0x00000020U ++#define INTS_STR1ORUN_SHIFT 5 ++#define INTS_STR1ORUN_SIGNED 0 ++ ++#define INTS_DISPURUN_MASK 0x00000040U ++#define INTS_DISPURUN_SHIFT 6 ++#define INTS_DISPURUN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTENAB ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB 0x004C ++#define INTEN_HBLNK0_MASK 0x00000001U ++#define INTEN_HBLNK0_SHIFT 0 ++#define INTEN_HBLNK0_SIGNED 0 ++ ++#define INTEN_HBLNK1_MASK 0x00000002U ++#define INTEN_HBLNK1_SHIFT 1 ++#define INTEN_HBLNK1_SIGNED 0 ++ ++#define INTEN_VBLNK0_MASK 0x00000004U ++#define INTEN_VBLNK0_SHIFT 2 ++#define INTEN_VBLNK0_SIGNED 0 ++ ++#define INTEN_VBLNK1_MASK 0x00000008U ++#define INTEN_VBLNK1_SHIFT 3 ++#define INTEN_VBLNK1_SIGNED 0 ++ ++#define INTEN_STR1URUN_MASK 0x00000010U ++#define INTEN_STR1URUN_SHIFT 4 ++#define INTEN_STR1URUN_SIGNED 0 ++ ++#define INTEN_STR1ORUN_MASK 0x00000020U ++#define INTEN_STR1ORUN_SHIFT 5 ++#define INTEN_STR1ORUN_SIGNED 0 ++ ++#define INTEN_DISPURUN_MASK 0x00000040U ++#define INTEN_DISPURUN_SHIFT 6 ++#define INTEN_DISPURUN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTCLEAR ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR 0x0050 ++#define INTCLR_HBLNK0_MASK 0x00000001U ++#define INTCLR_HBLNK0_SHIFT 0 ++#define INTCLR_HBLNK0_SIGNED 0 ++ ++#define INTCLR_HBLNK1_MASK 0x00000002U ++#define INTCLR_HBLNK1_SHIFT 1 ++#define INTCLR_HBLNK1_SIGNED 0 ++ ++#define INTCLR_VBLNK0_MASK 0x00000004U ++#define INTCLR_VBLNK0_SHIFT 2 ++#define INTCLR_VBLNK0_SIGNED 0 ++ ++#define INTCLR_VBLNK1_MASK 0x00000008U ++#define INTCLR_VBLNK1_SHIFT 3 ++#define INTCLR_VBLNK1_SIGNED 0 ++ ++#define INTCLR_STR1URUN_MASK 0x00000010U ++#define INTCLR_STR1URUN_SHIFT 4 ++#define INTCLR_STR1URUN_SIGNED 0 ++ ++#define INTCLR_STR1ORUN_MASK 0x00000020U ++#define INTCLR_STR1ORUN_SHIFT 5 ++#define INTCLR_STR1ORUN_SIGNED 0 ++ ++#define INTCLR_DISPURUN_MASK 0x00000040U ++#define INTCLR_DISPURUN_SHIFT 6 ++#define INTCLR_DISPURUN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_INTCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_INTCTRL 0x0054 ++#define HBLNK_LINENO_MASK 0x00000FFFU ++#define HBLNK_LINENO_SHIFT 0 ++#define HBLNK_LINENO_SIGNED 0 ++ ++#define HBLNK_LINE_MASK 0x00010000U ++#define HBLNK_LINE_SHIFT 16 ++#define HBLNK_LINE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_SIGNAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_SIGNAT 0x0058 ++#define SIGNATURE_MASK 0xFFFFFFFFU ++#define SIGNATURE_SHIFT 0 ++#define SIGNATURE_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_LINESTAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_LINESTAT 0x005C ++#define LINENO_MASK 0x00000FFFU ++#define LINENO_SHIFT 0 ++#define LINENO_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_DBGCTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGCTRL 0x0060 ++#define DBG_ENAB_MASK 0x00000001U ++#define DBG_ENAB_SHIFT 0 ++#define DBG_ENAB_SIGNED 0 ++ ++#define DBG_READ_MASK 0x00000002U ++#define DBG_READ_SHIFT 1 ++#define DBG_READ_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_DBGDATA ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGDATA 0x0064 ++#define DBG_DATA_MASK 0x00FFFFFFU ++#define DBG_DATA_SHIFT 0 ++#define DBG_DATA_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_DBGSIDE ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_DBGSIDE 0x0068 ++#define DBG_SIDE_MASK 0x00000007U ++#define DBG_SIDE_SHIFT 0 ++#define DBG_SIDE_SIGNED 0 ++ ++#define DBG_VAL_MASK 0x00000008U ++#define DBG_VAL_SHIFT 3 ++#define DBG_VAL_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_REGLD_STAT ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_STAT 0x0070 ++#define REGLD_ADDROUT_MASK 0x00FFFFFFU ++#define REGLD_ADDROUT_SHIFT 0 ++#define REGLD_ADDROUT_SIGNED 0 ++ ++#define REGLD_ADDREN_MASK 0x80000000U ++#define REGLD_ADDREN_SHIFT 31 ++#define REGLD_ADDREN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_REGLD_CTRL ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_REGLD_CTRL 0x0074 ++#define REGLD_ADDRIN_MASK 0x00FFFFFFU ++#define REGLD_ADDRIN_SHIFT 0 ++#define REGLD_ADDRIN_SIGNED 0 ++ ++#define REGLD_VAL_MASK 0x01000000U ++#define REGLD_VAL_SHIFT 24 ++#define REGLD_VAL_SIGNED 0 ++ ++#define REGLD_ADDRLEN_MASK 0xFE000000U ++#define REGLD_ADDRLEN_SHIFT 25 ++#define REGLD_ADDRLEN_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_CORE_ID ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_ID 0x0078 ++#define CONFIG_ID_MASK 0x0000FFFFU ++#define CONFIG_ID_SHIFT 0 ++#define CONFIG_ID_SIGNED 0 ++ ++#define CORE_ID_MASK 0x00FF0000U ++#define CORE_ID_SHIFT 16 ++#define CORE_ID_SIGNED 0 ++ ++#define GROUP_ID_MASK 0xFF000000U ++#define GROUP_ID_SHIFT 24 ++#define GROUP_ID_SIGNED 0 ++ ++/* ++ Register PVR_TCF_RGBPDP_CORE_REV ++*/ ++#define TCF_RGBPDP_PVR_TCF_RGBPDP_CORE_REV 0x007C ++#define MAINT_REV_MASK 0x000000FFU ++#define MAINT_REV_SHIFT 0 ++#define MAINT_REV_SIGNED 0 ++ ++#define MINOR_REV_MASK 0x0000FF00U ++#define MINOR_REV_SHIFT 8 ++#define MINOR_REV_SIGNED 0 ++ ++#define MAJOR_REV_MASK 0x00FF0000U ++#define MAJOR_REV_SHIFT 16 ++#define MAJOR_REV_SIGNED 0 ++ ++#endif /* !defined(_TCF_RGBPDP_REGS_H_) */ ++ ++/***************************************************************************** ++ End of file (tcf_rgbpdp_regs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/virt_validation_defs.h b/drivers/gpu/drm/img-rogue/include/virt_validation_defs.h +new file mode 100644 +index 000000000000..5b8908f712c0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/virt_validation_defs.h +@@ -0,0 +1,63 @@ ++/*************************************************************************/ /*! ++@File ++@Title Definitions for virtualization ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Services shared header for virtualization definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SRV_VIRT_DEFS_H ++#define SRV_VIRT_DEFS_H ++ ++#if !defined(GPUVIRT_VALIDATION_MAX_STRING_LENGTH) ++ #define GPUVIRT_VALIDATION_MAX_STRING_LENGTH 512 ++#endif ++ ++#define GPUVIRT_VALIDATION_MAX_OS 8 ++ ++#define GPUVIRT_VALIDATION_NUM_REGIONS 2 ++#define GPUVIRT_VAL_REGION_SECURE 0 ++#define GPUVIRT_VAL_REGION_SHARED 1 ++ ++/* Shared region 1MB */ ++#define GPUVIRT_SIZEOF_SHARED 0x100000 ++ ++/* Min region size 64MB */ ++#define GPUVIRT_MIN_SIZE 0x4000000 ++ ++#endif /* SRV_VIRT_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_alignchecks.h +new file mode 100644 +index 000000000000..c09b369f8d2c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_alignchecks.h +@@ -0,0 +1,191 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX fw interface alignment checks ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Checks to avoid disalignment in RGX fw data structures ++ shared with the host ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_ALIGNCHECKS_H) ++#define RGX_FWIF_ALIGNCHECKS_H ++ ++/* for the offsetof macro */ ++#if defined(__KERNEL__) && defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++/*! ++ ****************************************************************************** ++ * Alignment UM/FW checks array ++ *****************************************************************************/ ++ ++#define RGXFW_ALIGN_CHECKS_UM_MAX 128U ++ ++#if defined(PM_INTERACTIVE_MODE) ++#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMMListDevVAddr), ++#define HWRTDATA_HEAPTABLE_OFFSET offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), ++#else ++#define HWRTDATA_PM_OFFSET offsetof(RGXFWIF_HWRTDATA, sPMRenderStateDevVAddr), ++#define HWRTDATA_HEAPTABLE_OFFSET ++#endif ++ ++#define RGXFW_ALIGN_CHECKS_INIT0 \ ++ sizeof(RGXFWIF_TRACEBUF), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ ++ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ ++ \ ++ sizeof(RGXFWIF_SYSDATA), \ ++ offsetof(RGXFWIF_SYSDATA, ePowState), \ ++ offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ ++ offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ ++ offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ ++ offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ ++ \ ++ sizeof(RGXFWIF_OSDATA), \ ++ offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ ++ offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ ++ \ ++ sizeof(RGXFWIF_HWRINFOBUF), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ ++ \ ++ /* RGXFWIF_CMDTA checks */ \ ++ sizeof(RGXFWIF_CMDTA), \ ++ offsetof(RGXFWIF_CMDTA, sGeomRegs), \ ++ \ ++ /* RGXFWIF_CMD3D checks */ \ ++ sizeof(RGXFWIF_CMD3D), \ ++ offsetof(RGXFWIF_CMD3D, s3DRegs), \ ++ \ ++ /* RGXFWIF_CMD_COMPUTE checks */ \ ++ sizeof(RGXFWIF_CMD_COMPUTE), \ ++ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ ++ \ ++ /* RGXFWIF_FREELIST checks */ \ ++ sizeof(RGXFWIF_FREELIST), \ ++ offsetof(RGXFWIF_FREELIST, sFreeListBaseDevVAddr),\ ++ offsetof(RGXFWIF_FREELIST, sFreeListStateDevVAddr),\ ++ offsetof(RGXFWIF_FREELIST, sFreeListLastGrowDevVAddr),\ ++ offsetof(RGXFWIF_FREELIST, ui32MaxPages),\ ++ offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\ ++ \ ++ /* RGXFWIF_HWRTDATA checks */ \ ++ sizeof(RGXFWIF_HWRTDATA), \ ++ HWRTDATA_PM_OFFSET \ ++ HWRTDATA_HEAPTABLE_OFFSET \ ++ offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\ ++ /*offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase),*/ \ ++ offsetof(RGXFWIF_HWRTDATA, eState), \ ++ \ ++\ ++ sizeof(RGXFWIF_HWPERF_CTL), \ ++ offsetof(RGXFWIF_HWPERF_CTL, sBlkCfg), \ ++ sizeof(RGXFWIF_CMDTDM), \ ++ offsetof(RGXFWIF_CMDTDM, sTDMRegs) ++ ++#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT0 ++ ++ ++ ++/*! ++ ****************************************************************************** ++ * Alignment KM checks array ++ *****************************************************************************/ ++ ++#define RGXFW_ALIGN_CHECKS_INIT_KM0 \ ++ sizeof(RGXFWIF_SYSINIT), \ ++ offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ ++ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ ++ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ ++ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ ++ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ ++ offsetof(RGXFWIF_SYSINIT, sFwSysData), \ ++ \ ++ sizeof(RGXFWIF_OSINIT), \ ++ offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ ++ offsetof(RGXFWIF_OSINIT, psKernelCCB), \ ++ offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ ++ offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ ++ offsetof(RGXFWIF_OSINIT, sFwOsData), \ ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ ++ \ ++ /* RGXFWIF_FWRENDERCONTEXT checks */ \ ++ sizeof(RGXFWIF_FWRENDERCONTEXT), \ ++ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ ++ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ ++ \ ++ sizeof(RGXFWIF_FWCOMPUTECONTEXT), \ ++ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), \ ++ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sStaticComputeContextState),\ ++ offsetof(RGXFWIF_FWCOMPUTECONTEXT, ui32WorkEstCCBSubmitted),\ ++ \ ++ sizeof(RGXFWIF_FWTDMCONTEXT), \ ++ offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), \ ++ offsetof(RGXFWIF_FWTDMCONTEXT, ui32WorkEstCCBSubmitted),\ ++ \ ++ sizeof(RGXFWIF_FWCOMMONCONTEXT), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ ++ \ ++ sizeof(RGXFWIF_MMUCACHEDATA), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) ++ ++#if defined(SUPPORT_TRP) ++#define RGXFW_ALIGN_CHECKS_INIT_KM \ ++ RGXFW_ALIGN_CHECKS_INIT_KM0, \ ++ offsetof(RGXFWIF_FWTDMCONTEXT, ui32TRPState), \ ++ offsetof(RGXFWIF_FWTDMCONTEXT, aui64TRPChecksums2D) ++#else ++#define RGXFW_ALIGN_CHECKS_INIT_KM RGXFW_ALIGN_CHECKS_INIT_KM0 ++#endif ++ ++#endif /* RGX_FWIF_ALIGNCHECKS_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_alignchecks.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_hwperf.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_hwperf.h +new file mode 100644 +index 000000000000..e430e86d62e6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_hwperf.h +@@ -0,0 +1,125 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_hwperf.h ++@Title RGX HWPerf support ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shared header between RGX firmware and Init process ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_FWIF_HWPERF_H ++#define RGX_FWIF_HWPERF_H ++ ++#include "rgx_fwif_shared.h" ++#include "rgx_hwperf.h" ++#include "rgxdefs_km.h" ++ ++/* Server and Firmware definitions only */ ++ ++/*! The number of HWPerf blocks in the GPU */ ++ ++#if defined(RGX_FIRMWARE) ++#define RGX_HWPERF_NUM_SPU ((RGX_FEATURE_NUM_SPU)) ++#define RGX_HWPERF_NUM_USC ((RGX_FEATURE_NUM_CLUSTERS)) ++#define RGX_HWPERF_NUM_ISP_PER_SPU ((RGX_FEATURE_NUM_ISP_PER_SPU)) ++#define RGX_HWPERF_NUM_PBE ((RGX_FEATURE_PBE_PER_SPU) * (RGX_FEATURE_NUM_SPU)) ++#define RGX_HWPERF_NUM_MERCER ((RGX_FEATURE_NUM_CLUSTERS)) ++#define RGX_HWPERF_NUM_PBE_SHARED ((RGX_FEATURE_NUM_SPU)) ++#define RGX_HWPERF_NUM_SWIFT ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) ++#define RGX_HWPERF_NUM_TEXAS ((RGX_FEATURE_NUM_SPU)) ++#if (RGX_FEATURE_RAY_TRACING_ARCH > 2) ++#define RGX_HWPERF_NUM_RAC ((RGX_FEATURE_NUM_SPU)) ++#else ++#define RGX_HWPERF_NUM_RAC ((0)) ++#endif ++#define RGX_HWPERF_NUM_TPU ((RGX_FEATURE_NUM_SPU * RGX_FEATURE_MAX_TPU_PER_SPU)) ++#define RGX_HWPERF_NUM_ISP ((RGX_FEATURE_NUM_CLUSTERS)) ++ ++#define RGX_CNTBLK_INDIRECT_COUNT(_class) ((RGX_HWPERF_NUM_ ## _class)) ++ ++/*! The number of layout blocks defined with configurable ++ * performance counters. Compile time constants. ++ * This is for the Series 8XT+ layout. ++ */ ++#define RGX_HWPERF_MAX_DEFINED_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ ++ RGX_CNTBLK_INDIRECT_COUNT(ISP) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(MERCER) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(PBE) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(PBE_SHARED) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(USC) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(TPU) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(SWIFT) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS) +\ ++ RGX_CNTBLK_INDIRECT_COUNT(RAC)) ++ ++#endif /* RGX_FIRMWARE */ ++ ++/*****************************************************************************/ ++ ++/* Structure used in the FW's global control data to hold the performance ++ * counters provisioned for a given block. */ ++typedef struct ++{ ++ IMG_UINT32 uiBlockID; ++ IMG_UINT32 uiNumCounters; // Number of counters held ++ // in aui32CounterCfg ++ // [0..RGX_CNTBLK_COUNTERS_MAX) ++ IMG_UINT32 uiEnabled; // 1 => enabled, 0=> disabled ++ RGXFWIF_DEV_VIRTADDR psModel; // link to model table for uiBlockID ++ IMG_UINT32 aui32CounterCfg[RGX_CNTBLK_COUNTERS_MAX]; ++} RGXFWIF_HWPERF_CTL_BLK; ++ ++ ++/*! ++ ***************************************************************************** ++ * Structure used in the FW's global RGXFW_CTL store, holding HWPerf counter ++ * block configuration. It is written to by the Server on FW initialisation ++ * (PDUMP=1) and by the FW BG kCCB command processing code. It is read by ++ * the FW IRQ register programming and HWPerf event generation routines. ++ * Size of the sBlkCfg[] array must be consistent between KM/UM and FW. ++ * FW will ASSERT if the sizes are different ++ * (ui32NumBlocks != RGX_HWPERF_MAX_DEFINED_BLKS) ++ ****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32Reserved; ++ IMG_UINT32 ui32CtrlWord; ++ IMG_UINT32 ui32EnabledBlksCount; ++ IMG_UINT32 ui32NumBlocks; ++ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[1]; // First array entry ++} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; ++#endif +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_km.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_km.h +new file mode 100644 +index 000000000000..2d7ea7ce54e4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_km.h +@@ -0,0 +1,2331 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware interface structures used by pvrsrvkm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware interface structures used by pvrsrvkm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_KM_H) ++#define RGX_FWIF_KM_H ++ ++#include "img_types.h" ++#include "rgx_fwif_shared.h" ++#include "rgxdefs_km.h" ++#include "dllist.h" ++#include "rgx_hwperf.h" ++ ++ ++/*************************************************************************/ /*! ++ Logging type ++*/ /**************************************************************************/ ++#define RGXFWIF_LOG_TYPE_NONE 0x00000000U ++#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U ++#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U ++#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U ++#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U ++#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U ++#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U ++#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U ++#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U ++#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U ++#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U ++#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U ++#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U ++#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U ++#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U ++#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U ++#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U ++#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU ++#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU ++ ++/* String used in pvrdebug -h output */ ++#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" ++ ++/* Table entry to map log group strings to log type value */ ++typedef struct { ++ const IMG_CHAR* pszLogGroupName; ++ IMG_UINT32 ui32LogGroupType; ++} RGXFWIF_LOG_GROUP_MAP_ENTRY; ++ ++/* ++ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup ++ table where needed. Keep log group names short, no more than 20 chars. ++*/ ++#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ ++ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ ++ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ ++ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ ++ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ ++ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ ++ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ ++ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ ++ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ ++ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ ++ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ ++ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ ++ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ ++ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ ++ { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ ++ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } ++ ++ ++/* Used in print statements to display log group state, one %s per group defined */ ++#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" ++ ++/* Used in a print statement to display log group state, one per group */ ++#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) ?("misc ") :("")), \ ++ (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :("")) ++ ++ ++/************************************************************************ ++* RGX FW signature checks ++************************************************************************/ ++#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) ++ ++#define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) ++ ++/*! ++ ****************************************************************************** ++ * Trace Buffer ++ *****************************************************************************/ ++ ++/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ ++#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U ++#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U ++#if defined(RGXFW_META_SUPPORT_2ND_THREAD) ++#define RGXFW_THREAD_NUM 2U ++#else ++#define RGXFW_THREAD_NUM 1U ++#endif ++ ++#define RGXFW_POLL_TYPE_SET 0x80000000U ++ ++typedef struct ++{ ++ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; ++ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; ++ IMG_UINT32 ui32LineNum; ++} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; ++ ++/*! ++ * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface ++ * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing ++ * @{ ++ */ ++ ++/*! ++ * @Brief Firmware trace buffer details ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer)*/ ++ ++#if defined(RGX_FIRMWARE) ++ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ ++#else ++ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/ ++#endif ++ IMG_PUINT32 pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ ++ ++ RGXFWIF_FILE_INFO_BUF sAssertBuf; ++} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; ++ ++/*! @} End of Defgroup SRVAndFWTracing */ ++ ++#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; ++ IMG_UINT32 RGXFW_ALIGN ui32Data; ++ IMG_UINT32 ui32Reserved; ++ RGXFWIF_FILE_INFO_BUF sFaultBuf; ++} UNCACHED_ALIGN RGX_FWFAULTINFO; ++ ++ ++#define RGXFWIF_POW_STATES \ ++ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ ++ X(RGXFWIF_POW_ON) /* running HW commands */ \ ++ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ ++ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ ++ ++typedef enum ++{ ++#define X(NAME) NAME, ++ RGXFWIF_POW_STATES ++#undef X ++} RGXFWIF_POW_STATE; ++ ++/* Firmware HWR states */ ++#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ ++#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ ++#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ ++#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ ++#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ ++#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ ++#define RGXFWIF_HWR_RESTART_REQUESTED (0x1U << 7U) /*!< The FW has requested the host to restart it */ ++ ++#define RGXFWIF_PHR_STATE_SHIFT (8U) ++#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ ++#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ ++#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) ++ ++#define RGXFWIF_PHR_MODE_OFF (0UL) ++#define RGXFWIF_PHR_MODE_RD_RESET (1UL) ++#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) ++ ++typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; ++ ++/* Firmware per-DM HWR states */ ++#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ ++#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ ++#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ ++#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ ++#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ ++#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ ++#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ ++#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ ++#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ ++#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ ++#define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ ++ ++/* Firmware's connection state */ ++typedef enum ++{ ++ RGXFW_CONNECTION_FW_OFFLINE = 0, /*!< Firmware is offline */ ++ RGXFW_CONNECTION_FW_READY, /*!< Firmware is initialised */ ++ RGXFW_CONNECTION_FW_ACTIVE, /*!< Firmware connection is fully established */ ++ RGXFW_CONNECTION_FW_OFFLOADING, /*!< Firmware is clearing up connection data */ ++ RGXFW_CONNECTION_FW_STATE_COUNT ++} RGXFWIF_CONNECTION_FW_STATE; ++ ++/* OS' connection state */ ++typedef enum ++{ ++ RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ ++ RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ ++ RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ ++ RGXFW_CONNECTION_OS_STATE_COUNT ++} RGXFWIF_CONNECTION_OS_STATE; ++ ++typedef struct ++{ ++ IMG_UINT bfOsState : 3; ++ IMG_UINT bfFLOk : 1; ++ IMG_UINT bfFLGrowPending : 1; ++ IMG_UINT bfIsolatedOS : 1; ++ IMG_UINT bfReserved : 26; ++} RGXFWIF_OS_RUNTIME_FLAGS; ++ ++typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; ++ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++#define PVR_SLR_LOG_ENTRIES 10 ++#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64Timestamp; ++ IMG_UINT32 ui32FWCtxAddr; ++ IMG_UINT32 ui32NumUFOs; ++ IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; ++} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; ++#endif ++ ++/*! ++ * @InGroup SRVAndFWTracing ++ * @Brief Firmware trace control data ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ ++ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ ++ IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated ++ (in RGXTraceBufferInitOnDemandResources) */ ++ IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_TRACEBUF; ++ ++/*! @Brief Firmware system data shared with the Host driver */ ++typedef struct ++{ ++ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ ++ IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ ++ volatile RGXFWIF_POW_STATE ePowState; ++ volatile IMG_UINT32 ui32HWPerfRIdx; ++ volatile IMG_UINT32 ui32HWPerfWIdx; ++ volatile IMG_UINT32 ui32HWPerfWrapCount; ++ IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ ++ IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ ++ ++ /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with ++ * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ ++ IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ ++ IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ ++ IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ ++ RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ ++ RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ ++ IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ ++ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ ++ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ ++ IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ ++ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; ++#if defined(SUPPORT_POWMON_COMPONENT) ++#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) ++ RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; ++ IMG_UINT32 ui32PowerMonBufSizeInDWords; ++#endif ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++ IMG_UINT32 ui32RenderKillingCtl; /*!< Rasterisation DM Killing Configuration from host */ ++ IMG_UINT32 ui32CDMTDMKillingCtl; /*!< CDM/TDM Killing Configuration from host */ ++#endif ++#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) ++#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) ++#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) ++ IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; ++#endif ++ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ ++ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ ++ IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_SYSDATA; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware per-os data and configuration ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ ++ IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ ++ IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ IMG_UINT32 ui32ForcedUpdatesRequested; ++ IMG_UINT8 ui8SLRLogWp; ++ RGXFWIF_SLR_ENTRY sSLRLogFirst; ++ RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; ++ IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; ++#endif ++ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ ++ IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ ++ RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ ++ IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_OSDATA; ++ ++/* Firmware trace time-stamp field breakup */ ++ ++/* RGX_CR_TIMER register read (48 bits) value*/ ++#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) ++#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) ++ ++/* Extra debug-info (16 bits) */ ++#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) ++#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK ++ ++ ++/* Debug-info sub-fields */ ++/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ ++#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) ++#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) ++ ++/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ ++#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) ++#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) ++ ++/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ ++#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) ++#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) ++ ++/* Bit 3-15: Unused bits */ ++ ++#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 ++#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " ++#define RGXFWT_DEBUG_INFO_STR_APPEND ")" ++ ++/* Table of debug info sub-field's masks and corresponding message strings ++ * to be appended to firmware trace ++ * ++ * Mask : 16 bit mask to be applied to debug-info field ++ * String : debug info message string ++ */ ++ ++#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ ++/*Mask, String*/ \ ++X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ ++X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ ++X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") ++ ++/*! ++ ****************************************************************************** ++ * HWR Data ++ *****************************************************************************/ ++/*! ++ * @Defgroup HWRInfo FW HWR shared data interface ++ * @Brief Types grouping data structures and defines used in realising the HWR record. ++ * @{ ++ */ ++/*! @Brief HWR Lockup types */ ++typedef enum ++{ ++ RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ ++ RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ ++ RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ ++ RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ ++ RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ ++ RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ ++ RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ ++ RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ ++ RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ ++ RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ ++ RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ ++} RGX_HWRTYPE; ++ ++#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) ++ ++#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) ++ ++/************************ ++ * GPU HW error codes * ++ ************************/ ++typedef enum ++{ ++ RGX_HW_ERR_NA = 0x0, ++ RGX_HW_ERR_PRIMID_FAILURE_DURING_DMKILL = 0x101, ++} RGX_HW_ERR; ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ ++ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} RGX_BIFINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ ++} RGX_ECCINFO; ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} RGX_MMUINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ ++ IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ ++ IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ ++ IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} UNCACHED_ALIGN RGX_POLLINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32BadVAddr; /*!< VA address */ ++ IMG_UINT32 ui32EntryLo; ++} RGX_TLBINFO; ++ ++typedef struct ++{ ++ union ++ { ++ RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ ++ RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ ++ RGX_POLLINFO sPollInfo; /*!< Poll failure details */ ++ RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ ++ RGX_ECCINFO sECCInfo; /*!< ECC failure details */ ++ } uHWRData; ++ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ ++ IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ ++ IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ ++ IMG_UINT32 ui32HWRNumber; /*!< HWR number */ ++ IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ ++ IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ ++ RGX_HWRTYPE eHWRType; /*!< Type of lockup */ ++ RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ ++ IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ ++ RGX_HW_ERR eHWErrorCode; /*!< Error code used to determine HW fault */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; /*!< Pad to 16 64-bit words */ ++} UNCACHED_ALIGN RGX_HWRINFO; ++ ++#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ ++#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ ++#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ ++#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ ++ ++typedef struct ++{ ++ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ ++ IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ ++ IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ ++ IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ ++ IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ ++ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ ++ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ ++ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ ++ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ ++} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; ++ ++/*! @} End of HWRInfo */ ++ ++#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) ++#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) ++#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) ++#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) ++ ++#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) ++#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) ++ ++#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) ++#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) ++/*! ++ ****************************************************************************** ++ * RGX firmware Init Config Data ++ * NOTE: Please be careful to keep backwards compatibility with DDKv1 for the ++ * CTXSWITCH controls. ++ *****************************************************************************/ ++ ++/* Flag definitions affecting the firmware globally */ ++#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) /*!< Randomise context switch requests */ ++#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) ++#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) ++#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) /*!< Randomise DM-killing requests */ ++#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) ++/* 5 unused */ ++#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) ++#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) ++#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) ++/* 9 unused */ ++/* 10 unused */ ++/* 11 unused */ ++#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) ++#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) ++#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) ++/* 15 unused */ ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) ++#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) ++#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) ++#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) ++#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) ++#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) ++#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) ++#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ ++ RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) ++#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) ++#define RGXFWIF_INICFG_ALL (0xFFFFF3FFU) ++ ++/* Extended Flag definitions affecting the firmware globally */ ++#define RGXFWIF_INICFG_EXT_ALL (0x0U) ++ ++#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ ++ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) ++ ++/* Flag definitions affecting only workloads submitted by a particular OS */ ++ ++/*! ++ * @AddToGroup ContextSwitching ++ * @{ ++ * @Name Per-OS DM context switch configuration flags ++ * @{ ++ */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM-TA and GEOM-SHG context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN (IMG_UINT32_C(0x1) << 4) ++ ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 5) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 6) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 7) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 8) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_RDM (IMG_UINT32_C(0x1) << 9) ++ ++#define RGXFWIF_INICFG_OS_ALL (0x3FFU) ++ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_RDM_EN) ++ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) ++ ++/*! ++ * @} End of Per-OS Context switch configuration flags ++ * @} End of AddToGroup ContextSwitching ++ */ ++ ++#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) ++#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) ++#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) ++ ++typedef enum ++{ ++ RGX_ACTIVEPM_FORCE_OFF = 0, ++ RGX_ACTIVEPM_FORCE_ON = 1, ++ RGX_ACTIVEPM_DEFAULT = 2 ++} RGX_ACTIVEPM_CONF; ++ ++typedef enum ++{ ++ RGX_RD_POWER_ISLAND_FORCE_OFF = 0, ++ RGX_RD_POWER_ISLAND_FORCE_ON = 1, ++ RGX_RD_POWER_ISLAND_DEFAULT = 2 ++} RGX_RD_POWER_ISLAND_CONF; ++ ++typedef struct ++{ ++ IMG_UINT16 ui16RegNum; /*!< Register number */ ++ IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ ++ IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ ++ IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ ++} RGXFW_REGISTER_LIST; ++ ++#if defined(RGX_FIRMWARE) ++typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; ++#else ++typedef struct {RGXFWIF_DEV_VIRTADDR p; ++ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; ++#endif ++ ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; ++#if defined(SUPPORT_TBI_INTERFACE) ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; ++#endif ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; ++ ++/*! ++ * This number is used to represent an invalid page catalogue physical address ++ */ ++#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU ++ ++/*! ++ * This number is used to represent an unallocated set of page catalog base registers ++ */ ++#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU ++ ++/*! ++ Firmware memory context. ++*/ ++typedef struct ++{ ++ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ ++ IMG_UINT32 uiPageCatBaseRegSet; /*!< index of the associated set of page catalog base registers (RGXFW_BIF_INVALID_PCSET == unallocated) */ ++ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ ++ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ ++ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ ++ IMG_UINT64 RGXFW_ALIGN ui64FBCStateIDMask; /*!< FBCDC state descriptor IDs (non-zero means defer on mem context activation) */ ++ IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ IMG_UINT32 ui32OSid; ++ IMG_BOOL bOSidAxiProt; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; ++ ++/*! ++ * FW context state flags ++ */ ++#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) ++#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000002U) ++#define RGXFWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U) ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware GEOM/TA context suspend state ++ */ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_CMD0; ++ IMG_UINT32 uTAReg_DCE_CMD1; ++ IMG_UINT32 uTAReg_DCE_WRITE; ++ IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW0; ++ IMG_UINT64 RGXFW_ALIGN uTAReg_DCE_DRAW1; ++ IMG_UINT32 uTAReg_GTA_SO_PRIM[4]; ++ IMG_UINT16 ui16TACurrentIdx; ++} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM; ++ ++typedef struct ++{ ++ /* FW-accessible TA state which must be written out to memory on context store */ ++ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES]; ++} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; ++ ++/* The following defines need to be auto generated using the HW defines ++ * rather than hard coding it */ ++#define RGXFWIF_ISP_PIPE_COUNT_MAX (20) ++#define RGXFWIF_PIPE_COUNT_PER_ISP (2) ++#define RGXFWIF_IPP_RESUME_REG_COUNT (1) ++ ++#if !defined(__KERNEL__) ++#define RGXFWIF_ISP_COUNT (RGX_FEATURE_NUM_SPU * RGX_FEATURE_NUM_ISP_PER_SPU) ++#define RGXFWIF_ISP_PIPE_COUNT (RGXFWIF_ISP_COUNT * RGXFWIF_PIPE_COUNT_PER_ISP) ++#if RGXFWIF_ISP_PIPE_COUNT > RGXFWIF_ISP_PIPE_COUNT_MAX ++#error RGXFWIF_ISP_PIPE_COUNT should not be greater than RGXFWIF_ISP_PIPE_COUNT_MAX ++#endif ++#endif /* !defined(__KERNEL__) */ ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware FRAG/3D context suspend state ++ */ ++typedef struct ++{ ++#if defined(PM_INTERACTIVE_MODE) ++ IMG_UINT32 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< Managed by PM HW in the non-interactive mode */ ++#endif ++ IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ ++ ++ /* FW-accessible ISP state which must be written out to memory on context store */ ++ /* au3DReg_ISP_STORE should be the last element of the structure ++ * as this is an array whose size is determined at runtime ++ * after detecting the RGX core */ ++ IMG_UINT64 RGXFW_ALIGN au3DReg_ISP_STORE[]; /*!< ISP state (per-pipe) */ ++} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; ++ ++#define RGXFWIF_CTX_USING_BUFFER_A (0) ++#define RGXFWIF_CTX_USING_BUFFER_B (1U) ++ ++typedef struct ++{ ++ IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ ++} RGXFWIF_COMPUTECTX_STATE; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware Common Context (or FWCC) ++ */ ++typedef struct RGXFWIF_FWCOMMONCONTEXT_ ++{ ++ /* CCB details for this firmware context */ ++ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ ++ PRGXFWIF_CCCB psCCB; /*!< CCB base */ ++ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; ++ ++ /* Context suspend state */ ++ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ ++ ++ /* Flags e.g. for context switching */ ++ IMG_UINT32 ui32FWComCtxFlags; ++ IMG_INT32 i32Priority; /*!< Priority level */ ++ IMG_UINT32 ui32PrioritySeqNum; ++ ++ /* Framework state */ ++ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ ++ ++ /* Statistic updates waiting to be passed back to the host... */ ++ IMG_BOOL bStatsPending; /*!< True when some stats are pending */ ++ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ ++ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ ++ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ ++ RGXFWIF_DM eDM; /*!< Data Master type */ ++ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ ++ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; ++ IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ ++ bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ ++ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ ++ RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ ++ ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ ++ ++ /* References to the host side originators */ ++ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ ++ IMG_UINT32 ui32PID; /*!< associated process ID */ ++ ++ IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ ++ ++ IMG_UINT32 ui32PipelinedKicks; /*!< Number of kick from this CCB currently submitted to the DM pipeline */ ++} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; ++ ++static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256, ++ "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); ++ ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware render context. ++ */ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ ++ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ ++ ++ RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ ++ ++#if defined(SUPPORT_TRP) ++ RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; ++ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; ++#endif ++} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; ++ ++/*! ++ Firmware compute context. ++*/ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ ++ ++ RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++ IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ ++ ++ IMG_UINT32 ui32WGPState; ++ IMG_UINT32 aui32WGPChecksum[RGX_WGP_MAX_NUM_CORES]; ++} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; ++ ++/*! ++ Firmware ray context. ++*/ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sRDMContext; /*!< Firmware context for the RDM */ ++ RGXFWIF_STATIC_RAYCONTEXT_STATE sStaticRayContextState; ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware TDM context. ++ */ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++#if defined(SUPPORT_TRP) ++ IMG_UINT32 ui32TRPState; ++ RGXFWIF_TRP_CHECKSUM_2D RGXFW_ALIGN aui64TRPChecksums2D; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; ++ ++/*! ++ ****************************************************************************** ++ * Defines for CMD_TYPE corruption detection and forward compatibility check ++ *****************************************************************************/ ++ ++/* CMD_TYPE 32bit contains: ++ * 31:16 Reserved for magic value to detect corruption (16 bits) ++ * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) ++ * 14:0 Bits available for CMD_TYPEs (15 bits) */ ++ ++ ++/* Magic value to detect corruption */ ++#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) ++#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) ++#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) ++#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) ++ ++/*! ++ * @InGroup KCCBTypes ClientCCBTypes ++ * @Brief Generic CCB control structure ++ */ ++typedef struct ++{ ++ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ ++ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ ++ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ ++ IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ ++} UNCACHED_ALIGN RGXFWIF_CCB_CTL; ++ ++/*! ++ * @Defgroup KCCBTypes Kernel CCB data interface ++ * @Brief Types grouping data structures and defines used in realising the KCCB functionality ++ * @{ ++ */ ++ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800U) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */ ++ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32CacheFlags; ++ RGXFWIF_DEV_VIRTADDR sMMUCacheSync; ++ IMG_UINT32 ui32MMUCacheSyncUpdateValue; ++} RGXFWIF_MMUCACHEDATA; ++ ++#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) ++#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) ++#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) ++#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) ++ ++typedef struct ++{ ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ ++ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ ++ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ ++ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ ++ IMG_UINT32 ui32BPDataFlags; ++ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ ++ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ ++ RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ ++} RGXFWIF_BPDATA; ++ ++#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ ++ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ ++ IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ ++ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ ++ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ ++ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ ++} RGXFWIF_KCCB_CMD_KICK_DATA; ++ ++/*! ++ * @Brief Command data for @Ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command ++ */ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; /*!< GEOM DM kick command data */ ++ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; /*!< FRAG DM kick command data */ ++} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ ++ IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ ++} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; ++ ++/*! ++ * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command ++ */ ++typedef enum ++{ ++ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ ++ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ ++ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ ++ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ ++} RGXFWIF_CLEANUP_TYPE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command ++ */ ++typedef struct ++{ ++ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ ++ union { ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ ++ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ ++ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ ++ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ ++ } uCleanupData; ++} RGXFWIF_CLEANUP_REQUEST; ++ ++/*! ++ * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command ++ */ ++typedef enum ++{ ++ RGXFWIF_POW_OFF_REQ = 1, /*!< GPU power-off request */ ++ RGXFWIF_POW_FORCED_IDLE_REQ, /*!< Force-idle related request */ ++ RGXFWIF_POW_NUM_UNITS_CHANGE, /*!< Request to change default powered scalable units */ ++ RGXFWIF_POW_APM_LATENCY_CHANGE /*!< Request to change the APM latency period */ ++} RGXFWIF_POWER_TYPE; ++ ++/*! ++ * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request ++ */ ++typedef enum ++{ ++ RGXFWIF_POWER_FORCE_IDLE = 1, /*!< Request to force-idle GPU */ ++ RGXFWIF_POWER_CANCEL_FORCED_IDLE, /*!< Request to cancel a previously successful force-idle transition */ ++ RGXFWIF_POWER_HOST_TIMEOUT, /*!< Notification that host timed-out waiting for force-idle state */ ++} RGXFWIF_POWER_FORCE_IDLE_TYPE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command ++ */ ++typedef struct ++{ ++ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ ++ union ++ { ++ struct ++ { ++ IMG_UINT32 ui32PowUnitsStateMask; /*!< New power units state mask */ ++ IMG_UINT32 ui32RACStateMask; /*!< New RAC state mask */ ++ }; ++ IMG_BOOL bForced; /*!< If the operation is mandatory */ ++ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ ++ } uPowerReqData; ++} RGXFWIF_POWER_REQUEST; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ ++ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ ++ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ ++ IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */ ++ IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */ ++} RGXFWIF_SLCFLUSHINVALDATA; ++ ++typedef enum ++{ ++ RGXFWIF_HWPERF_CTRL_TOGGLE = 0, ++ RGXFWIF_HWPERF_CTRL_SET = 1, ++ RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 ++} RGXFWIF_HWPERF_UPDATE_CONFIG; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command ++ */ ++typedef struct ++{ ++ RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ ++ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ ++} RGXFWIF_HWPERF_CTRL; ++ ++typedef enum ++{ ++ RGXFWIF_HWPERF_CNTR_NOOP = 0, /* No-Op */ ++ RGXFWIF_HWPERF_CNTR_ENABLE = 1, /* Enable Counters */ ++ RGXFWIF_HWPERF_CNTR_DISABLE = 2 /* Disable Counters */ ++} RGXFWIF_HWPERF_CNTR_CONFIG; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32CtrlWord; ++ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ ++ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ ++} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ ++} RGXFWIF_CORECLKSPEEDCHANGE_DATA; ++ ++#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16 ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command ++ */ ++typedef struct ++{ ++ bool bEnable; ++ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ ++ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ ++} RGXFWIF_HWPERF_CTRL_BLKS; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands ++ */ ++typedef struct ++{ ++ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ ++ IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ ++} RGXFWIF_ZSBUFFER_BACKING_DATA; ++ ++#if defined(SUPPORT_VALIDATION) ++typedef struct ++{ ++ IMG_UINT32 ui32RegWidth; ++ IMG_BOOL bWriteOp; ++ IMG_UINT32 ui32RegAddr; ++ IMG_UINT64 RGXFW_ALIGN ui64RegVal; ++} RGXFWIF_RGXREG_DATA; ++ ++typedef struct ++{ ++ IMG_UINT64 ui64BaseAddress; ++ PRGXFWIF_FWCOMMONCONTEXT psContext; ++ IMG_UINT32 ui32Size; ++} RGXFWIF_GPUMAP_DATA; ++#endif ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command ++ */ ++typedef struct ++{ ++ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ ++ IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ ++ IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ ++ IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ ++} RGXFWIF_FREELIST_GS_DATA; ++ ++#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) ++#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistsCount; ++ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; ++} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ ++} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; ++ ++/*! ++ ****************************************************************************** ++ * Proactive DVFS Structures ++ *****************************************************************************/ ++#define NUM_OPP_VALUES 16 ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Volt; /* V */ ++ IMG_UINT32 ui32Freq; /* Hz */ ++} UNCACHED_ALIGN PDVFS_OPP; ++ ++typedef struct ++{ ++ PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; ++#if defined(DEBUG) ++ IMG_UINT32 ui32MinOPPPoint; ++#endif ++ IMG_UINT32 ui32MaxOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32MaxOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32MinOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; ++ ++/*! ++ ****************************************************************************** ++ * Register configuration structures ++ *****************************************************************************/ ++ ++#define RGXFWIF_REG_CFG_MAX_SIZE 512 ++ ++typedef enum ++{ ++ RGXFWIF_REGCFG_CMD_ADD = 101, ++ RGXFWIF_REGCFG_CMD_CLEAR = 102, ++ RGXFWIF_REGCFG_CMD_ENABLE = 103, ++ RGXFWIF_REGCFG_CMD_DISABLE = 104 ++} RGXFWIF_REGDATA_CMD_TYPE; ++ ++typedef IMG_UINT32 RGXFWIF_REG_CFG_TYPE; ++#define RGXFWIF_REG_CFG_TYPE_PWR_ON 0U /* Sidekick power event */ ++#define RGXFWIF_REG_CFG_TYPE_DUST_CHANGE 1U /* Rascal / dust power event */ ++#define RGXFWIF_REG_CFG_TYPE_TA 2U /* TA kick */ ++#define RGXFWIF_REG_CFG_TYPE_3D 3U /* 3D kick */ ++#define RGXFWIF_REG_CFG_TYPE_CDM 4U /* Compute kick */ ++#define RGXFWIF_REG_CFG_TYPE_TDM 5U /* TDM kick */ ++#define RGXFWIF_REG_CFG_TYPE_ALL 6U /* Applies to all types. Keep as last element */ ++ ++typedef struct ++{ ++ IMG_UINT64 ui64Addr; ++ IMG_UINT64 ui64Mask; ++ IMG_UINT64 ui64Value; ++} RGXFWIF_REG_CFG_REC; ++ ++typedef struct ++{ ++ RGXFWIF_REGDATA_CMD_TYPE eCmdType; ++ RGXFWIF_REG_CFG_TYPE eRegConfigType; ++ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; ++ ++} RGXFWIF_REGCONFIG_DATA; ++ ++typedef struct ++{ ++ /** ++ * PDump WRW command write granularity is 32 bits. ++ * Add padding to ensure array size is 32 bit granular. ++ */ ++ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; ++ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; ++} UNCACHED_ALIGN RGXFWIF_REG_CFG; ++ ++typedef enum ++{ ++ RGXFWIF_OS_ONLINE = 1, ++ RGXFWIF_OS_OFFLINE ++} RGXFWIF_OS_STATE_CHANGE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32OSid; ++ RGXFWIF_OS_STATE_CHANGE eNewOSState; ++} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; ++ ++/*! ++ * @Brief List of command types supported by the Kernel CCB ++ */ ++typedef enum ++{ ++ /* Common commands */ ++ RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ ++ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ ++ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ ++ RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ ++ RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request (type specified in the command data) */ ++ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ ++ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ ++ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ ++ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ ++ /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */ ++ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ ++ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ ++ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ ++ ++ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ ++ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ ++ ++ /* Commands only permitted to the native or host OS */ ++ RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ ++ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 202U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ ++ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ ++ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ ++ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 205U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process */ ++ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ ++ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSID. It can only be serviced for the Host DDK */ ++ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ ++ /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */ ++ /* RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */ ++ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ ++ RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ ++#endif ++ RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_KCCB_CMD_GPUMAP = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */ ++#endif ++} RGXFWIF_KCCB_CMD_TYPE; ++ ++#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) ++ ++/*! @Brief Kernel CCB command packet */ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ ++ IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ ++ ++ /* NOTE: Make sure that uCmdData is the last member of this struct ++ * This is to calculate actual command size for device mem copy. ++ * (Refer RGXGetCmdMemCopySize()) ++ * */ ++ union ++ { ++ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ ++ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ ++ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ ++ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ ++ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ ++ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ ++ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ ++ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ ++ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ ++ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ ++ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ ++ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ ++ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ ++ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ ++ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ ++ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ ++#if defined(SUPPORT_PDVFS) ++ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; ++ RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ ++#endif ++ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ ++ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ ++ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ ++ RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */ ++#endif ++ } UNCACHED_ALIGN uCmdData; ++} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); ++ ++/*! @} End of KCCBTypes */ ++ ++/*! ++ * @Defgroup FWCCBTypes Firmware CCB data interface ++ * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality ++ * @{ ++ */ ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the ++ * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ZSBufferID; /*!< ZS buffer ID */ ++} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB ++ * command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistID; /*!< Freelist ID */ ++} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistsCount; /*!< Freelists count */ ++ IMG_UINT32 ui32HwrCounter; /*!< HWR counter */ ++ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */ ++} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; ++ ++#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF (1U<<0) /*!< 1 if a page fault happened */ ++#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS (1U<<1) /*!< 1 if applicable to all contexts */ ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ ++ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ ++ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ ++ IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ ++} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< Page fault address */ ++} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA; ++ ++/*! ++ ****************************************************************************** ++ * List of command types supported by the Firmware CCB ++ *****************************************************************************/ ++typedef enum ++{ ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages ++ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked ++ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ ++ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow ++ \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */ ++ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction ++ \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */ ++ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context ++ \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */ ++ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump ++ \n Command data: None */ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats ++ \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */ ++#if defined(SUPPORT_PDVFS) ++ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#endif ++ RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart ++ \n Command data: None */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#if defined(SUPPORT_SOC_TIMER) ++ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#endif ++#endif ++ RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault ++ \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ ++} RGXFWIF_FWCCB_CMD_TYPE; ++ ++/*! ++ ****************************************************************************** ++ * List of the various stats of the process to update/increment ++ *****************************************************************************/ ++typedef enum ++{ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ ++} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB ++ * command ++ *****************************************************************************/ ++typedef struct ++{ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ ++ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ ++ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ ++} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32CoreClkRate; ++} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; ++ ++#if defined(SUPPORT_VALIDATION) ++typedef struct ++{ ++ IMG_UINT64 ui64RegValue; ++} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; ++ ++#if defined(SUPPORT_SOC_TIMER) ++typedef struct ++{ ++ IMG_UINT64 ui64timerGray; ++ IMG_UINT64 ui64timerBinary; ++ IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; ++} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; ++#endif ++#endif ++ ++/*! ++ ****************************************************************************** ++ * @Brief Firmware CCB command structure ++ *****************************************************************************/ ++typedef struct ++{ ++ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ ++ IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ ++ ++ union ++ { ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ ++ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ ++ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ ++ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ ++ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; ++ RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; ++#if defined(SUPPORT_SOC_TIMER) ++ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; ++#endif ++#endif ++ } RGXFW_ALIGN uCmdData; ++} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); ++ ++/*! @} End of FWCCBTypes */ ++ ++/*! ++ ****************************************************************************** ++ * Workload estimation Firmware CCB command structure for RGX ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */ ++ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */ ++} RGXFWIF_WORKEST_FWCCB_CMD; ++ ++/*! ++ * @Defgroup ClientCCBTypes Client CCB data interface ++ * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality ++ * @{ ++ */ ++ ++/* Required memory alignment for 64-bit variables accessible by Meta ++ (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared ++ between the host and meta that contains 64-bit variables has to maintain ++ this alignment) */ ++#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) ++ ++#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) ++#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1)) ++ ++typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; ++ ++/*! ++ * @Name Client CCB command types ++ * @{ ++ */ ++#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */ ++#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */ ++#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */ ++#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */ ++#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_ABORT (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++ ++/* Leave a gap between CCB specific commands and generic commands */ ++#define RGXFWIF_CCB_CMD_TYPE_FENCE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_UPDATE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */ ++#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */ ++#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */ ++/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The ++ padding code with the CCB wrap upsets the FW if we don't have the task type ++ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. ++*/ ++#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) ++#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ ++ ++#if defined(SUPPORT_VALIDATION) ++#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) ++#endif ++ ++#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ ++#define RGXFWIF_CCB_CMD_TYPE_RAY (222U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++/*! @} End of Client CCB command types */ ++ ++typedef struct ++{ ++ /* Index for the KM Workload estimation return data array */ ++ IMG_UINT16 RGXFW_ALIGN ui16ReturnDataIndex; ++ /* Predicted time taken to do the work in cycles */ ++ IMG_UINT32 RGXFW_ALIGN ui32CyclesPrediction; ++ /* Deadline for the workload (in usecs) */ ++ IMG_UINT64 RGXFW_ALIGN ui64Deadline; ++} RGXFWIF_WORKEST_KICK_DATA; ++ ++/*! @Brief Command header of a command in the client CCB buffer. ++ * ++ * Followed by this header is the command-data specific to the ++ * command-type as specified in the header. ++ */ ++typedef struct ++{ ++ RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ ++ IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ ++ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ ++ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ ++ RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ ++} RGXFWIF_CCB_CMD_HEADER; ++ ++/* ++ ****************************************************************************** ++ * Client CCB commands which are only required by the kernel ++ *****************************************************************************/ ++ ++/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */ ++typedef struct ++{ ++ IMG_INT32 i32Priority; /*!< Priority level */ ++} RGXFWIF_CMD_PRIORITY; ++ ++/*! @} End of ClientCCBTypes */ ++ ++/*! ++ ****************************************************************************** ++ * Signature and Checksums Buffer ++ *****************************************************************************/ ++typedef struct ++{ ++ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ ++ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; ++ ++typedef struct ++{ ++ PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ ++ IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; ++ ++/*! ++ ***************************************************************************** ++ * RGX Compatibility checks ++ *****************************************************************************/ ++ ++/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change, ++ following define should be increased by 1 to indicate to compatibility logic, ++ that layout has changed */ ++#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 ++ ++typedef struct ++{ ++ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ ++ IMG_UINT64 RGXFW_ALIGN ui64BVNC; ++} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; ++ ++typedef struct ++{ ++ IMG_UINT8 ui8OsCountSupport; ++} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; ++ ++#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ ++ RGXFWIF_COMPCHECKS_BVNC (name) = { \ ++ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ ++ 0, \ ++ } ++#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ ++ do { \ ++ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ ++ (name).ui64BVNC = 0; \ ++ } while (0) ++ ++typedef struct ++{ ++ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ ++ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ ++ IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ ++ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ ++ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ ++ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ ++ RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ ++ IMG_BOOL bUpdated; /*!< Information is valid */ ++} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; ++ ++/*! ++ ****************************************************************************** ++ * Updated configuration post FW data init. ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */ ++ IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */ ++ IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */ ++ IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */ ++ IMG_UINT32 ui32PowUnitsStateMask; /* Power Unit state mask set by the host */ ++ IMG_UINT32 ui32RACStateMask; /* RAC state mask set by the host */ ++ IMG_UINT32 ui32PHRMode; /* Periodic Hardware Reset configuration values */ ++ IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */ ++ IMG_UINT32 ui32WdgPeriodUs; /* The watchdog period in microseconds */ ++ IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */ ++ PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */ ++ RGXFWIF_DMA_ADDR sHWPerfDMABuf; ++} RGXFWIF_RUNTIME_CFG; ++ ++/*! ++ ***************************************************************************** ++ * Control data for RGX ++ *****************************************************************************/ ++ ++#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) ++ ++#if defined(PDUMP) ++ ++#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U ++ ++typedef enum ++{ ++ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, ++ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT ++} RGXFWIF_PID_FILTER_MODE; ++ ++typedef struct ++{ ++ IMG_PID uiPID; ++ IMG_UINT32 ui32OSID; ++} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; ++ ++typedef struct ++{ ++ RGXFWIF_PID_FILTER_MODE eMode; ++ /* each process in the filter list is specified by a PID and OS ID pair. ++ * each PID and OS pair is an item in the items array (asItems). ++ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries ++ * then it must be terminated by an item with pid of zero. ++ */ ++ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; ++} RGXFW_ALIGN RGXFWIF_PID_FILTER; ++#endif ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) ++#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) ++#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) ++#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) ++#endif ++ ++typedef enum ++{ ++ RGXFWIF_USRM_DM_VDM = 0, ++ RGXFWIF_USRM_DM_DDM = 1, ++ RGXFWIF_USRM_DM_CDM = 2, ++ RGXFWIF_USRM_DM_PDM = 3, ++ RGXFWIF_USRM_DM_TDM = 4, ++ RGXFWIF_USRM_DM_LAST ++} RGXFWIF_USRM_DM; ++ ++typedef enum ++{ ++ RGXFWIF_UVBRM_DM_VDM = 0, ++ RGXFWIF_UVBRM_DM_DDM = 1, ++ RGXFWIF_UVBRM_DM_LAST ++} RGXFWIF_UVBRM_DM; ++ ++typedef enum ++{ ++ RGXFWIF_TPU_DM_PDM = 0, ++ RGXFWIF_TPU_DM_VDM = 1, ++ RGXFWIF_TPU_DM_CDM = 2, ++ RGXFWIF_TPU_DM_TDM = 3, ++ RGXFWIF_TPU_DM_LAST ++} RGXFWIF_TPU_DM; ++ ++typedef enum ++{ ++ RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ ++ RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that ++ initiates by sending data via the ++ GPIO and then sends back any data ++ received over the GPIO */ ++ RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes ++ and reads data across the entire ++ GPIO AP address range.*/ ++#if defined(SUPPORT_STRIP_RENDERING) ++ RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ ++ RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ ++#endif ++ RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ ++ RGXFWIF_GPIO_VAL_LOOPBACK = 6, /*!< Send and then receive each byte ++ in the range 0-255. */ ++ RGXFWIF_GPIO_VAL_LOOPBACK_LITE = 7, /*!< Send and then receive each power-of-2 ++ byte in the range 0-255. */ ++ RGXFWIF_GPIO_VAL_LAST ++} RGXFWIF_GPIO_VAL_MODE; ++ ++typedef enum ++{ ++ FW_PERF_CONF_NONE = 0, ++ FW_PERF_CONF_ICACHE = 1, ++ FW_PERF_CONF_DCACHE = 2, ++ FW_PERF_CONF_JTLB_INSTR = 5, ++ FW_PERF_CONF_INSTRUCTIONS = 6 ++} FW_PERF_CONF; ++ ++typedef enum ++{ ++ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, ++ FW_BOOT_STAGE_NOT_AVAILABLE = -1, ++ FW_BOOT_NOT_STARTED = 0, ++ FW_BOOT_BLDR_STARTED = 1, ++ FW_BOOT_CACHE_DONE, ++ FW_BOOT_TLB_DONE, ++ FW_BOOT_MAIN_STARTED, ++ FW_BOOT_ALIGNCHECKS_DONE, ++ FW_BOOT_INIT_DONE, ++} FW_BOOT_STAGE; ++ ++/*! ++ * @AddToGroup KCCBTypes ++ * @{ ++ * @Name Kernel CCB return slot responses ++ * @{ ++ * Usage of bit-fields instead of bare integers ++ * allows FW to possibly pack-in several responses for each single kCCB command. ++ */ ++ ++#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /*!< Command executed (return status from FW) */ ++#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /*!< A cleanup was requested but resource busy */ ++#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /*!< Poll failed in FW for a HW operation to complete */ ++ ++#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /*!< Reset value of a kCCB return slot (set by host) */ ++/*! ++ * @} End of Name Kernel CCB return slot responses ++ * @} End of AddToGroup KCCBTypes ++ */ ++ ++typedef struct ++{ ++ /* Fw-Os connection states */ ++ volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; ++ volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; ++ volatile IMG_UINT32 ui32AliveFwToken; ++ volatile IMG_UINT32 ui32AliveOsToken; ++} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; ++ ++/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT ++ * allocated by services and used by the Firmware on boot ++ **/ ++typedef struct ++{ ++ /* Kernel CCB */ ++ PRGXFWIF_CCB_CTL psKernelCCBCtl; /*!< Kernel CCB Control */ ++ PRGXFWIF_CCB psKernelCCB; /*!< Kernel CCB */ ++ PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; /*!< Kernel CCB return slots */ ++ ++ /* Firmware CCB */ ++ PRGXFWIF_CCB_CTL psFirmwareCCBCtl; /*!< Firmware CCB control */ ++ PRGXFWIF_CCB psFirmwareCCB; /*!< Firmware CCB */ ++ ++ /* Workload Estimation Firmware CCB */ ++ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; /*!< Workload estimation control */ ++ PRGXFWIF_CCB psWorkEstFirmwareCCB; /*!< Workload estimation buffer */ ++ ++ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; /*!< HWRecoveryInfo control */ ++ ++ IMG_UINT32 ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */ ++ ++ PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ ++ ++ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ ++ ++} UNCACHED_ALIGN RGXFWIF_OSINIT; ++ ++/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT ++ * allocated by services and used by the Firmware on boot ++ **/ ++typedef struct ++{ ++ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSIndirectHeapBase; /* Pixel Indirect State base */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; /*!< Address to use as a fence when issuing SLC3_CFI */ ++ ++ IMG_UINT64 RGXFW_ALIGN aui64UVBRMNumRegions[RGXFWIF_UVBRM_DM_LAST]; ++ IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; ++ IMG_UINT32 RGXFW_ALIGN aui32USRMNumRegions[RGXFWIF_USRM_DM_LAST]; ++ ++ IMG_UINT32 ui32FilterFlags; ++ ++ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_SIGBUF_CTL asValidationSigBufCtl[RGXFWIF_DM_MAX]; ++ IMG_UINT64 RGXFW_ALIGN ui64RCEDisableMask; ++#endif ++ ++ PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */ ++ ++ PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< Firmware Trace buffer control */ ++ PRGXFWIF_SYSDATA sFwSysData; /*!< Firmware System shared data */ ++#if defined(SUPPORT_TBI_INTERFACE) ++ PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ ++#endif ++ ++ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ ++ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ ++ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++ RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */ ++#endif ++ ++ RGXFWIF_DEV_VIRTADDR sAlignChecks; /*!< Array holding Server structures alignment data */ ++ ++ IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ ++ ++ IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ ++ ++ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ ++ ++ IMG_UINT32 ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */ ++ ++ IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ ++ ++ IMG_UINT32 ui32JonesDisableMask; ++ ++ RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ ++ ++ FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ ++ ++#if defined(SUPPORT_PDVFS) ++ RGXFWIF_PDVFS_OPP RGXFW_ALIGN sPDVFSOPPInfo; ++ ++ /** ++ * FW Pointer to memory containing core clock rate in Hz. ++ * Firmware (PDVFS) updates the memory when running on non primary FW thread ++ * to communicate to host driver. ++ */ ++ PRGXFWIF_CORE_CLK_RATE RGXFW_ALIGN sCoreClockRate; ++#endif ++ ++#if defined(PDUMP) ++ RGXFWIF_PID_FILTER sPIDFilter; ++#endif ++ ++ RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; ++ ++ RGX_HWPERF_BVNC sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/ ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ IMG_UINT32 ui32SecurityTestFlags; ++ RGXFWIF_DEV_VIRTADDR pbSecureBuffer; ++ RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* ++ * Used when validation is enabled to allow the host to check ++ * that MTS sent the correct sideband in response to a kick ++ * from a given OSes schedule register. ++ * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set ++ * ++ * Set by the host to: ++ * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT ++ * reset to 0 by FW when kicked by the given OSid ++ */ ++ IMG_UINT32 ui32OSKickTest; ++#endif ++ ++#if defined(SUPPORT_AUTOVZ) ++ IMG_UINT32 ui32VzWdgPeriod; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_SYSINIT; ++ ++static_assert(sizeof(RGXFWIF_SYSINIT) <= 968, ++ "Size of structure RGXFWIF_SYSINIT exceeds maximum expected size."); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 ++#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 ++#endif ++ ++/*! ++ ***************************************************************************** ++ * Timer correlation shared data and defines ++ *****************************************************************************/ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; ++ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; ++ ++ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), ++ * where the deltas are relative to the timestamps above: ++ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; ++ ++ IMG_UINT32 ui32CoreClockSpeed; ++ IMG_UINT32 ui32Reserved; ++} UNCACHED_ALIGN RGXFWIF_TIME_CORR; ++ ++ ++/* The following macros are used to help converting FW timestamps to the Host ++ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of ++ * time; it increments by 1 every 256 GPU clock ticks, so the general ++ * formula to perform the conversion is: ++ * ++ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, ++ * otherwise if (scale == 10^6) then deltaOS is in uS ] ++ * ++ * deltaCR * 256 256 * scale ++ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] ++ * GPUclockspeed GPUclockspeed ++ * ++ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) ++ * to get some better accuracy and to avoid returning 0 in the integer ++ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. ++ * This is the same as keeping K as a decimal number. ++ * ++ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies ++ * (deltaCR * K is more or less a constant), and it's relative to the base ++ * OS timestamp sampled as a part of the timer correlation data. ++ * This base is refreshed on GPU power-on, DVFS transition and periodic ++ * frequency calibration (executed every few seconds if the FW is doing ++ * some work), so as long as the GPU is doing something and one of these ++ * events is triggered then deltaCR * K will not overflow and deltaOS will be ++ * correct. ++ */ ++ ++#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) ++ ++#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ ++ (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) ++ ++ ++/*! ++ ****************************************************************************** ++ * GPU Utilisation ++ *****************************************************************************/ ++ ++/* See rgx_common.h for a list of GPU states */ ++#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) ++ ++#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) ++#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) ++ ++/* The OS timestamps computed by the FW are approximations of the real time, ++ * which means they could be slightly behind or ahead the real timer on the Host. ++ * In some cases we can perform subtractions between FW approximated ++ * timestamps and real OS timestamps, so we need a form of protection against ++ * negative results if for instance the FW one is a bit ahead of time. ++ */ ++#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ ++ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) ++ ++#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ ++ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) ++ ++ ++/* The timer correlation array must be big enough to ensure old entries won't be ++ * overwritten before all the HWPerf events linked to those entries are processed ++ * by the MISR. The update frequency of this array depends on how fast the system ++ * can change state (basically how small the APM latency is) and perform DVFS transitions. ++ * ++ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading ++ * an entry while the Host is updating it. With 2 entries in the worst case the FW ++ * will read old data, which is still quite ok if the Host is updating the timer ++ * correlation at that time. ++ */ ++#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U ++#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) ++ ++/* Make sure the timer correlation array size is a power of 2 */ ++static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, ++ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); ++ ++typedef struct ++{ ++ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; ++ IMG_UINT32 ui32TimeCorrSeqCount; ++ ++ /* Compatibility and other flags */ ++ IMG_UINT32 ui32GpuUtilFlags; ++ ++ /* Last GPU state + OS time of the last state update */ ++ IMG_UINT64 RGXFW_ALIGN ui64LastWord; ++ ++ /* Counters for the amount of time the GPU was active/idle/blocked */ ++ IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; ++} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32RenderTargetIndex; //Render number ++ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA ++ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs ++ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices ++ RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target ++ IMG_UINT32 ui32MaxRTs; //Number of render targets in the array ++ IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_RTA_CTL; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief Firmware Freelist holding usage state of the Parameter Buffers ++ */ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListBaseDevVAddr; /*!< Freelist page table base address */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListStateDevVAddr; /*!< Freelist state buffer base address */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListLastGrowDevVAddr; /*!< Freelist base address at last grow */ ++ ++#if defined(PM_INTERACTIVE_MODE) ++ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ ++ IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ ++#endif ++ ++ IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ ++ IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ ++ IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ ++#if defined(PM_INTERACTIVE_MODE) ++ IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ ++ IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ ++#endif ++#if defined(SUPPORT_SHADOW_FREELISTS) ++ IMG_UINT32 ui32HWRCounter; ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; ++#endif ++ IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ ++ IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ ++ IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ ++ IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ ++ ++ IMG_BOOL bUpdatePending; ++ IMG_UINT32 ui32UpdateNewPages; ++ IMG_UINT32 ui32UpdateNewReadyPages; ++} UNCACHED_ALIGN RGXFWIF_FREELIST; ++ ++/*! ++ ****************************************************************************** ++ * HWRTData ++ *****************************************************************************/ ++ ++/* HWRTData flags */ ++/* Deprecated flags 1:0 */ ++#define HWRTDATA_HAS_LAST_TA (1UL << 2) ++#define HWRTDATA_PARTIAL_RENDERED (1UL << 3) ++#define HWRTDATA_KILLED (1UL << 4) ++#define HWRTDATA_KILL_AFTER_TARESTART (1UL << 5) ++#if defined(SUPPORT_AGP) ++#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (1UL << 6) ++#if defined(SUPPORT_AGP4) ++#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (1UL << 7) ++#endif ++#define HWRTDATA_GEOM_NEEDS_RESUME (1UL << 8) ++#endif ++ ++typedef enum ++{ ++ RGXFWIF_RTDATA_STATE_NONE = 0, ++ RGXFWIF_RTDATA_STATE_KICKTA, ++ RGXFWIF_RTDATA_STATE_KICKTAFIRST, ++ RGXFWIF_RTDATA_STATE_TAFINISHED, ++ RGXFWIF_RTDATA_STATE_KICK3D, ++ RGXFWIF_RTDATA_STATE_3DFINISHED, ++ RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, ++ RGXFWIF_RTDATA_STATE_TAOUTOFMEM, ++ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, ++ /* In case of HWR, we can't set the RTDATA state to NONE, ++ * as this will cause any TA to become a first TA. ++ * To ensure all related TA's are skipped, we use the HWR state */ ++ RGXFWIF_RTDATA_STATE_HWR, ++ RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU ++} RGXFWIF_RTDATA_STATE; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32ScreenPixelMax; ++ IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl; ++ IMG_UINT32 ui32TEStride; ++ IMG_UINT32 ui32TPCSize; ++ IMG_UINT32 ui32TEScreen; ++ IMG_UINT32 ui32TEAA; ++ IMG_UINT32 ui32TEMTILE1; ++ IMG_UINT32 ui32TEMTILE2; ++ IMG_UINT32 ui32RgnStride; ++ IMG_UINT32 ui32ISPMergeLowerX; ++ IMG_UINT32 ui32ISPMergeLowerY; ++ IMG_UINT32 ui32ISPMergeUpperX; ++ IMG_UINT32 ui32ISPMergeUpperY; ++ IMG_UINT32 ui32ISPMergeScaleX; ++ IMG_UINT32 ui32ISPMergeScaleY; ++} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context ++ */ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; /*!< VCE Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4]; ++ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; /*!< TE Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4]; ++ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; ++ ++#if defined(PM_INTERACTIVE_MODE) ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPMMListDevVAddr; /*!< Mlist table base */ ++#else ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPMRenderStateDevVAddr; /*!< Series8 PM State buffers */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPMSecureRenderStateDevVAddr; ++#endif ++ ++ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ ++ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; ++ IMG_BOOL bRenderStateNeedsReset; ++ ++ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ ++ ++ IMG_UINT32 ui32HWRTDataFlags; ++ RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ ++ ++ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Render target clean up state */ ++ ++ RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ ++#if defined(RGX_FIRMWARE) ++ struct RGXFWIF_FWCOMMONCONTEXT_* psOwnerGeom; ++#else ++ RGXFWIF_DEV_VIRTADDR pui32OwnerGeomNotUsedByHost; ++#endif ++ ++#if defined(PM_INTERACTIVE_MODE) ++ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ ++ IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ ++#endif ++#if defined(SUPPORT_TRP) ++ IMG_UINT32 ui32KickFlagsCopy; ++ IMG_UINT32 ui32TRPState; ++#endif ++} UNCACHED_ALIGN RGXFWIF_HWRTDATA; ++ ++/* Sync_checkpoint firmware object. ++ * This is the FW-addressable structure use to hold the sync checkpoint's ++ * state and other information which needs to be accessed by the firmware. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ ++ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ ++} SYNC_CHECKPOINT_FW_OBJ; ++ ++/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ ++#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) ++ ++#endif /* RGX_FWIF_KM_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_km.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_shared.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_shared.h +new file mode 100644 +index 000000000000..ad7cb3213e4d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_fwif_shared.h +@@ -0,0 +1,376 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware interface structures ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware interface structures shared by both host client ++ and host server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_SHARED_H) ++#define RGX_FWIF_SHARED_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_common.h" ++#include "powervr/mem_types.h" ++ ++/* Indicates the number of RTDATAs per RTDATASET */ ++#if defined(SUPPORT_AGP) ++#if defined(SUPPORT_AGP4) ++#define RGXMKIF_NUM_RTDATAS 4U ++#define RGXMKIF_NUM_GEOMDATAS 4U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 20U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (4U) ++#else ++#define RGXMKIF_NUM_RTDATAS 4U ++#define RGXMKIF_NUM_GEOMDATAS 4U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 12U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (2U) ++#endif ++#else ++#define RGXMKIF_NUM_RTDATAS 2U ++#define RGXMKIF_NUM_GEOMDATAS 1U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 2U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (1U) ++#endif ++ ++/* Maximum number of UFOs in a CCB command. ++ * The number is based on having 32 sync prims (as originally), plus 32 sync ++ * checkpoints. ++ * Once the use of sync prims is no longer supported, we will retain ++ * the same total (64) as the number of sync checkpoints which may be ++ * supporting a fence is not visible to the client driver and has to ++ * allow for the number of different timelines involved in fence merges. ++ */ ++#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) ++ ++/* ++ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) ++ * command passed through the bridge. ++ * Just across the bridge in the server, any incoming kick command size is ++ * checked against this maximum limit. ++ * In case the incoming command size is larger than the specified limit, ++ * the bridge call is retired with error. ++ */ ++#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) ++ ++typedef struct RGXFWIF_DEV_VIRTADDR_ ++{ ++ IMG_UINT32 ui32Addr; ++} RGXFWIF_DEV_VIRTADDR; ++ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; ++ RGXFWIF_DEV_VIRTADDR pbyFWAddr; ++} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; ++ ++typedef IMG_UINT8 RGXFWIF_CCCB; ++ ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; ++ ++ ++/*! ++ * @InGroup ClientCCBTypes ++ * @Brief Command data for fence & update types Client CCB commands. ++ */ ++typedef struct ++{ ++ PRGXFWIF_UFO_ADDR puiAddrUFO; /*!< Address to be checked/updated */ ++ IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */ ++} RGXFWIF_UFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ ++ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ ++} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; ++ ++#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0) ++#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0) ++#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1) ++#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2) ++ ++typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE; ++ ++typedef enum ++{ ++ RGXFWIF_PRBUFFER_UNBACKED = 0, ++ RGXFWIF_PRBUFFER_BACKED, ++ RGXFWIF_PRBUFFER_BACKING_PENDING, ++ RGXFWIF_PRBUFFER_UNBACKING_PENDING, ++}RGXFWIF_PRBUFFER_STATE; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief OnDemand Z/S/MSAA Buffers ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ ++ IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ ++ RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ ++ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ ++ IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_PRBUFFER; ++ ++/* ++ * Used to share frame numbers across UM-KM-FW, ++ * frame number is set in UM, ++ * frame number is required in both KM for HTB and FW for FW trace. ++ * ++ * May be used to house Kick flags in the future. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FrameNum; /*!< associated frame number */ ++} CMD_COMMON; ++ ++/* ++ * TA and 3D commands require set of firmware addresses that are stored in the ++ * Kernel. Client has handle(s) to Kernel containers storing these addresses, ++ * instead of raw addresses. We have to patch/write these addresses in KM to ++ * prevent UM from controlling FW addresses directly. ++ * Typedefs for TA and 3D commands are shared between Client and Firmware (both ++ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use ++ * TA|3D CMD type definitions directly. Therefore we have a SHARED block that ++ * is shared between UM-KM-FW across all BVNC configurations. ++ */ ++typedef struct ++{ ++ CMD_COMMON sCmn; /*!< Common command attributes */ ++ RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, ++ this is used for context selection and for storing out HW-context, ++ when TA is switched out for continuing later */ ++ ++ RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ ++ ++} CMDTA3D_SHARED; ++ ++/*! ++ * Client Circular Command Buffer (CCCB) control structure. ++ * This is shared between the Server and the Firmware and holds byte offsets ++ * into the CCCB as well as the wrapping mask to aid wrap around. A given ++ * snapshot of this queue with Cmd 1 running on the GPU might be: ++ * ++ * Roff Doff Woff ++ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] ++ * < runnable commands >< !ready to run > ++ * ++ * Cmd 1 : Currently executing on the GPU data master. ++ * Cmd 2,3,4: Fence dependencies met, commands runnable. ++ * Cmd 5... : Fence dependency not met yet. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This ++ * must be aligned to 16 bytes. */ ++ IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. ++ Points to the command that is ++ runnable on GPU, if R!=W */ ++ IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. ++ Points to commands not ready, i.e. ++ fence dependencies are not met. */ ++ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity ++ in bytes of the CCB-1 */ ++#if defined(SUPPORT_AGP) ++ IMG_UINT32 ui32ReadOffset2; ++#if defined(SUPPORT_AGP4) ++ IMG_UINT32 ui32ReadOffset3; ++ IMG_UINT32 ui32ReadOffset4; ++#endif ++#endif ++} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; ++ ++ ++typedef IMG_UINT32 RGXFW_FREELIST_TYPE; ++ ++#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) ++#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) ++#if defined(SUPPORT_AGP4) ++#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2) ++#define RGXFW_GLOBAL3_FREELIST IMG_UINT32_C(3) ++#define RGXFW_GLOBAL4_FREELIST IMG_UINT32_C(4) ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL4_FREELIST + 1U) ++#elif defined(SUPPORT_AGP) ++#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2) ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL2_FREELIST + 1U) ++#else ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U) ++#endif ++#define RGXFW_MAX_HWFREELISTS (2U) ++ ++/*! ++ * @Defgroup ContextSwitching Context switching data interface ++ * @Brief Types grouping data structures and defines used in realising the Context Switching (CSW) functionality ++ * @{ ++ */ ++ ++/*! ++ * @Brief GEOM DM or TA register controls for context switch ++ */ ++typedef struct ++{ ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STATE_BASE_ADDR; ++ IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the TA's context state buffer */ ++ ++ struct ++ { ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM0; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM1; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_VDM2; ++ ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM0; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM1; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_DDM2; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_STORE_TASK_XFB; ++ ++ /* VDM resume state update controls */ ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM0; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM1; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_VDM2; ++ ++ ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM0; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM1; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_DDM2; ++ IMG_UINT64 uTAReg_DCE_CONTEXT_RESUME_TASK_XFB; ++ } asTAState[2]; ++ ++} RGXFWIF_TAREGISTERS_CSWITCH; ++/*! @} End of Defgroup ContextSwitching */ ++ ++typedef struct ++{ ++ IMG_UINT64 u3DReg_IPP_CONTEXT_ADDR; ++} RGXFWIF_3DREGISTERS_CSWITCH; ++ ++typedef struct ++{ ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; ++ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; ++ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; ++ ++ /* CDM resume controls */ ++ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; ++ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; ++ ++} RGXFWIF_CDM_REGISTERS_CSWITCH; ++ ++static_assert((sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) % 8U) == 0U, ++ "the size of the structure must be multiple of 8"); ++ ++#define RGXFWIF_CDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_CDM_REGISTERS_CSWITCH) ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Render context static register controls for context switch ++ */ ++typedef struct ++{ ++ RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN asCtxSwitch_GeomRegs[RGX_NUM_GEOM_CORES]; ++ RGXFWIF_3DREGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_3DRegs; /*!< 3D registers for ctx switch */ ++} RGXFWIF_STATIC_RENDERCONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) ++ ++typedef struct ++{ ++ RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ ++} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) ++ ++typedef struct ++{ ++ IMG_UINT64 uRDMReg_RDM_CONTEXT_STATE_BASE_ADDR; ++} RGXFWIF_RDM_REGISTERS_CSWITCH; ++ ++static_assert((sizeof(RGXFWIF_RDM_REGISTERS_CSWITCH) % 8U) == 0U, ++ "the size of the structure must be multiple of 8"); ++ ++#define RGXFWIF_RDM_REGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_RDM_REGISTERS_CSWITCH) ++ ++typedef struct ++{ ++ RGXFWIF_RDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< RDM registers for ctx switch */ ++} RGXFWIF_STATIC_RAYCONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_RAYCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RAYCONTEXT_STATE) ++ ++/*! ++ @Brief Context reset reason. Last reset reason for a reset context. ++*/ ++typedef enum ++{ ++ RGX_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ ++ RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ ++ RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ ++ RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ ++ RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ ++ RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ ++ RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM = 6, /*!< CDM Mission/safety checksum mismatch */ ++ RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM = 7, /*!< TRP checksum mismatch */ ++ RGX_CONTEXT_RESET_REASON_GPU_ECC_OK = 8, /*!< GPU ECC error (corrected, OK) */ ++ RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR = 9, /*!< GPU ECC error (uncorrected, HWR) */ ++ RGX_CONTEXT_RESET_REASON_FW_ECC_OK = 10, /*!< FW ECC error (corrected, OK) */ ++ RGX_CONTEXT_RESET_REASON_FW_ECC_ERR = 11, /*!< FW ECC error (uncorrected, ERR) */ ++ RGX_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, /*!< FW Safety watchdog triggered */ ++ RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, /*!< FW page fault (no HWR) */ ++ RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ ++ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ ++ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ ++} RGX_CONTEXT_RESET_REASON; ++ ++/*! ++ @Brief Context reset data shared with the host ++*/ ++typedef struct ++{ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */ ++ IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */ ++} RGX_CONTEXT_RESET_REASON_DATA; ++#endif /* RGX_FWIF_SHARED_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_shared.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_heaps.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_heaps.h +new file mode 100644 +index 000000000000..b35b4f3929cf +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_heaps.h +@@ -0,0 +1,65 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX heap definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_HEAPS_H) ++#define RGX_HEAPS_H ++ ++/* ++ Identify heaps by their names ++*/ ++#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< SVM (shared virtual memory) Heap Identifier */ ++#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ ++#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ ++#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ ++#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ ++#define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX vulkan capture replay buffer Heap Identifier */ ++#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Compute Signals Heap Identifier */ ++#define RGX_COMPONENT_CTRL_HEAP_IDENT "Component Control" /*!< RGX DCE Component Control Heap Identifier */ ++#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ ++#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ ++#define RGX_PDS_INDIRECT_STATE_HEAP_IDENT "PDS Indirect State" /*!< PDS Indirect State Table Heap Identifier */ ++#define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */ ++#define RGX_CMP_SAFETY_RMW_HEAP_IDENT "Compute Safety RMW" /*!< Compute Safety RMW Heap Identifier */ ++#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */ ++#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" /*!< Visibility Test Heap Identifier */ ++ ++#endif /* RGX_HEAPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf.h +new file mode 100644 +index 000000000000..408b76901ba6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf.h +@@ -0,0 +1,1424 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HWPerf and Debug Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common data types definitions for hardware performance API ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_HWPERF_H_ ++#define RGX_HWPERF_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* These structures are used on both GPU and CPU and must be a size that is a ++ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at ++ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. ++ */ ++ ++/****************************************************************************** ++ * Includes and Defines ++ *****************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#include "rgx_common.h" ++#include "rgx_hwperf_common.h" ++#include "pvrsrv_tlcommon.h" ++#include "pvrsrv_sync_km.h" ++ ++ ++#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) ++/* HWPerf interface assumption checks */ ++static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, ++ "Cluster count too large for HWPerf protocol definition"); ++#endif ++ ++/*! Perf counter control words */ ++#define RGX_HWPERF_CTRL_NOP (0) /*!< only update HW counters */ ++#define RGX_HWPERF_CTRL_STATE_UPDATE_EN (1U << 31) /*!< persistent state update; see other flags below */ ++#define RGX_HWPERF_CTRL_GEOM_FULLRANGE (1U) /*!< selectable geom and 3D counters are full range */ ++#define RGX_HWPERF_CTRL_COMP_FULLRANGE (2U) /*!< selectable compute counters are full range */ ++#define RGX_HWPERF_CTRL_TDM_FULLRANGE (4U) /*!< selectable TDM counters are full range */ ++ ++ ++/****************************************************************************** ++ * Data Stream Common Types ++ *****************************************************************************/ ++ ++/*! All the Data Masters HWPerf is aware of. When a new DM is added to this ++ * list, it should be appended at the end to maintain backward compatibility ++ * of HWPerf data. ++ */ ++typedef enum { ++ ++ RGX_HWPERF_DM_GP, ++ RGX_HWPERF_DM_TDM, ++ RGX_HWPERF_DM_GEOM, ++ RGX_HWPERF_DM_3D, ++ RGX_HWPERF_DM_CDM, ++ RGX_HWPERF_DM_RTU, ++ ++ RGX_HWPERF_DM_LAST, ++ ++ RGX_HWPERF_DM_INVALID = 0x1FFFFFFF ++} RGX_HWPERF_DM; ++ ++/*! Enum containing bit position for 32bit feature flags used in hwperf and api */ ++typedef enum { ++ RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x0001, ++ RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x0002, ++ RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x0004, ++ RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x0008, ++ RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x0010, ++ RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x0020, ++ RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x0040, ++ RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x0080, ++ RGX_HWPERF_FEATURE_MULTICORE_FLAG = 0x0100, ++ RGX_HWPERF_FEATURE_RAYTRACING_FLAG = 0x0200, ++ RGX_HWPERF_FEATURE_CXT_TOP_INFRASTRUCTURE_FLAG = 0x0400, ++ RGX_HWPERF_FEATURE_VOLCANIC_FLAG = 0x0800, ++ RGX_HWPERF_FEATURE_ROGUE_FLAG = 0x1000, ++ RGX_HWPERF_FEATURE_OCEANIC_FLAG = 0x2000 ++} RGX_HWPERF_FEATURE_FLAGS; ++ ++/*! This structure holds the data of a firmware packet. */ ++typedef struct ++{ ++ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ ++ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ ++ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ ++ IMG_UINT32 ui32Padding; /*!< Reserved */ ++} RGX_HWPERF_FW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); ++ ++/*! This structure holds the data of a hardware packet, including counters. */ ++typedef struct ++{ ++ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ ++ IMG_UINT32 ui32PID; /*!< Process identifier */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ ++ IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ ++ IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ ++ IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ ++ IMG_UINT32 ui32CtxPriority; /*!< Context priority */ ++ IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ ++ IMG_UINT32 ui32KickInfo; /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */ ++ IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */ ++ IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ ++} RGX_HWPERF_HW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); ++RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); ++ ++/*! Mask for use with the aui32CountBlksStream field when decoding the ++ * counter block ID and mask word. */ ++#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U ++#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U ++ ++/*! MAX value used in server handling of counter config arrays */ ++#define RGX_CNTBLK_COUNTERS_MAX PVRSRV_HWPERF_COUNTERS_PERBLK ++ ++ ++/*! Obtains the counter block ID word from an aui32CountBlksStream field. ++ * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit ++ * within group (3-0) */ ++#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) ++ ++/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address ++ * and stream index. May be used in decoding the counter block stream words of ++ * a RGX_HWPERF_HW_DATA structure. */ ++#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) ++ ++/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ ++#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) ++ ++#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) ++ ++/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address ++ * and stream index. May be used in decoding the counter block stream words ++ * of a RGX_HWPERF_HW_DATA structure. */ ++#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) ++ ++/*! Context switch packet event */ ++typedef struct ++{ ++ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ ++ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ ++ IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ ++ IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ ++ IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ ++} RGX_HWPERF_CSW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); ++ ++/*! Enumeration of clocks supporting this event */ ++typedef enum ++{ ++ RGX_HWPERF_CLKS_CHG_INVALID = 0, ++ ++ RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, ++ ++ RGX_HWPERF_CLKS_CHG_LAST, ++} RGX_HWPERF_CLKS_CHG_NAME; ++ ++/*! This structure holds the data of a clocks change packet. */ ++typedef struct ++{ ++ IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ ++ RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ ++ IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ ++ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ ++ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and ++ correlated to OSTimeStamp */ ++} RGX_HWPERF_CLKS_CHG_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); ++ ++/*! Enumeration of GPU utilisation states supported by this event */ ++typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; ++ ++/*! This structure holds the data of a GPU utilisation state change packet. */ ++typedef struct ++{ ++ RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ ++ IMG_UINT32 uiUnused1; /*!< Padding */ ++ IMG_UINT32 uiUnused2; /*!< Padding */ ++ IMG_UINT32 uiUnused3; /*!< Padding */ ++} RGX_HWPERF_GPU_STATE_CHG_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); ++ ++ ++/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ ++#define HWPERF_PWR_EST_V1_SIG 0x48504531 ++ ++/*! Macros to obtain a component field from a counter ID word */ ++#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) ++#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) ++/*!< Obtains the GPU ID from a counter ID word */ ++#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) ++#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) ++ ++#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) ++#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) ++#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) ++#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) ++#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) ++#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) ++ ++/*! This macro constructs a counter ID for a power estimate data stream from ++ * the component parts of: high word flag, unit id, GPU id, counter number */ ++#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ ++ ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), ++ "Space inside HWPerf packet data for BVNC string insufficient"); ++ ++#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (20U) ++ ++/*! BVNC Features */ ++typedef struct ++{ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! Number of counters in this block type */ ++ IMG_UINT16 ui16NumCounters; ++ ++ /*! Number of blocks of this type */ ++ IMG_UINT16 ui16NumBlocks; ++ ++ /*! Reserved for future use */ ++ IMG_UINT16 ui16Reserved; ++} RGX_HWPERF_BVNC_BLOCK; ++ ++/*! BVNC Features */ ++typedef struct ++{ ++ IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ ++ IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ ++ IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ ++ IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ ++ RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ ++} RGX_HWPERF_BVNC; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); ++ ++/*! Performance Counter Configuration data element. */ ++typedef struct ++{ ++ IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ ++ IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ ++} RGX_HWPERF_COUNTER_CFG_DATA_EL; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); ++ ++/*! Performance Counter Configuration data. */ ++typedef struct ++{ ++ IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ ++ RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ ++ IMG_UINT32 ui32Padding; /*!< reserved */ ++} RGX_HWPERF_COUNTER_CFG; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); ++ ++/*! Sub-event's data. */ ++typedef union ++{ ++ struct ++ { ++ RGX_HWPERF_DM eDM; /*!< Data Master ID. */ ++ RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ ++ IMG_UINT32 ui32DMContext; /*!< FW render context */ ++ } sHWR; /*!< HWR sub-event data. */ ++ ++ RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ ++ struct ++ { ++ IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ ++ IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ ++ } sEvMsk; /*!< HW Filter Mask */ ++ RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ ++} RGX_HWPERF_FWACT_DETAIL; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); ++ ++/*! This structure holds the data of a FW activity event packet */ ++typedef struct ++{ ++ RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ ++ RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. */ ++} RGX_HWPERF_FWACT_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); ++ ++ ++typedef enum { ++ RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */ ++ RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */ ++ ++ RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */ ++} RGX_HWPERF_UFO_EV; ++ ++/*! Data stream tuple. */ ++typedef union ++{ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32Value; /*!< Value of the UFO object */ ++ } sCheckSuccess; ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32Value; /*!< Value of the UFO object */ ++ IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */ ++ } sCheckFail; ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */ ++ IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */ ++ } sUpdate; ++} RGX_HWPERF_UFO_DATA_ELEMENT; ++ ++/*! This structure holds the packet payload data for UFO event. */ ++typedef struct ++{ ++ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data ++ at the time the packet was generated. ++ Used to approximate Host timestamps for ++ these events. */ ++ IMG_UINT32 ui32PID; /*!< Client process identifier */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX ++ API to track submitted work (for ++ debugging/trace purposes) */ ++ IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track ++ submitted work (for debugging / trace ++ purposes) */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the ++ stream and stream data offset in the ++ payload */ ++ RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ ++ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */ ++} RGX_HWPERF_UFO_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); ++ ++ ++/*! ++ * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent ++ * between KICK_START / KICK_END inclusively for all event types. ++ */ ++typedef enum ++{ ++ RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */ ++ RGX_HWPERF_KICK_TYPE_CDM, /*!< Compute Data Master Kick */ ++ RGX_HWPERF_KICK_TYPE_RS, /*!< Ray Store Kick */ ++ RGX_HWPERF_KICK_TYPE_SHG, /*!< Scene Hierarchy Generator Kick */ ++ RGX_HWPERF_KICK_TYPE_TQTDM, /*!< TQ 2D Data Master Kick */ ++ RGX_HWPERF_KICK_TYPE_SYNC, /*!< Sync Kick */ ++ RGX_HWPERF_KICK_TYPE_TA, /*!< TA Kick */ ++ RGX_HWPERF_KICK_TYPE_3D, /*!< 3D Kick */ ++ RGX_HWPERF_KICK_TYPE_LAST, ++ ++ RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff ++} RGX_HWPERF_KICK_TYPE; ++ ++typedef struct ++{ ++ RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for ++ scheduling on GPU hardware. ++ See RGX_HWPERF_KICK_TYPE */ ++ IMG_UINT32 ui32PID; /*!< Client process identifier */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API ++ to track submitted work (for debugging / ++ trace purposes) */ ++ IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted ++ work (for debugging / trace purposes) */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ ++ IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ ++ IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ ++ IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ ++ IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ ++ PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ ++ PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ ++ PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ ++ ++ /* Align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_ENQ_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef struct ++{ ++ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ ++ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and ++ stream data offset in the payload */ ++#ifdef __CHECKER__ ++ /* Since we're not conforming to the C99 standard by not using a flexible ++ * array member need to add a special case for Smatch static code analyser. */ ++ IMG_UINT32 aui32StreamData[]; ++#else ++ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ /*!< Series of tuples holding UFO objects data */ ++ ++ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ ++#endif ++} RGX_HWPERF_HOST_UFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! ++ * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been ++ * Allocated, Freed or Modified. The values are used to determine which event ++ * data structure to use to decode the data from the event stream ++ */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, ++ /*!< Timeline resource packets are ++ now emitted in client hwperf buffer */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ ++ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ ++} RGX_HWPERF_HOST_RESOURCE_TYPE; ++ ++typedef union ++{ ++ /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer ++ * generated in the HOST stream. Timeline data is now provided in the ++ * CLIENT stream instead. ++ */ ++ struct ++ { ++ IMG_UINT32 uiPid; /*!< Identifier of owning process */ ++ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sTimelineAlloc; ++ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point ++ backing this fence on the GPU */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sFenceAlloc; ++ ++ /*! Data for TYPE_SYNC_CP */ ++ struct ++ { ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ ++ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSyncCheckPointAlloc; ++ ++ /*! Data for TYPE_FENCE_SW */ ++ struct ++ { ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ ++ PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSWFenceAlloc; ++ ++ /*! Data for TYPE_SYNC */ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSyncAlloc; ++} RGX_HWPERF_HOST_ALLOC_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; ++ /*!< This describes the type of the resource ++ allocated in the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; ++ /*!< Union of structures providing further ++ data regarding the resource allocated. ++ Size of data varies with union member that ++ is present, check ``ui32AllocType`` value ++ to decode */ ++} RGX_HWPERF_HOST_ALLOC_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef union ++{ ++ /*! Data for TYPE_TIMELINE (*Deprecated*) */ ++ struct ++ { ++ IMG_UINT32 uiPid; /*!< Identifier of owning process */ ++ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sTimelineDestroy; ++ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. */ ++ } sFenceDestroy; ++ ++ /*! Data for TYPE_SYNC_CP */ ++ struct ++ { ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ ++ } sSyncCheckPointFree; ++ ++ /*! Data for TYPE_SYNC */ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ ++ } sSyncFree; ++} RGX_HWPERF_HOST_FREE_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; ++ /*!< This describes the type of the resource ++ freed or released by the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; ++ /*!< Union of structures providing further data ++ regarding the resource freed. Size of data ++ varies with union member that is present, ++ check ``ui32FreeType`` value to decode */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_FREE_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef struct ++{ ++ IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of ++ the time domains correlation table */ ++ IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the ++ time domains correlation table */ ++ IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of ++ the time domains correlation table */ ++ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_CLK_SYNC_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef union ++{ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence ++ resource that has been created */ ++ IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ ++ IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing ++ the fence on the GPU */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sFenceMerge; ++} RGX_HWPERF_HOST_MODIFY_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; ++ /*!< Describes the type of the resource ++ modified by the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ ++ RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; ++ /*!< Union of structures providing further ++ data regarding the resource modified. ++ Size of data varies with union member that ++ is present. ++ Check ``uiModifyType`` value to decode */ ++} RGX_HWPERF_HOST_MODIFY_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, /*!< Device responding to requests */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ ++ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST ++} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ ++ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST ++} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; ++ ++/*! RGX_HWPERF_DEV_INFO_EV values */ ++typedef enum ++{ ++ RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ ++ ++ RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ ++} RGX_HWPERF_DEV_INFO_EV; ++ ++/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing ++ * further data regarding the device's status ++ */ ++typedef union ++{ ++ /*! Data for device status event */ ++ struct ++ { ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; ++ /*!< Device's health status */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; ++ /*!< Reason for device's health status */ ++ } sDeviceStatus; ++} RGX_HWPERF_HOST_DEV_INFO_DETAIL; ++ ++/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ ++typedef struct ++{ ++ IMG_UINT32 ui32Padding; ++ /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_DEV_INFO_EV eEvType; ++ /*!< Type of the sub-event. See ++ RGX_HWPERF_DEV_INFO_EV */ ++ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; ++ /*!< Union of structures providing further data ++ regarding the device's status. Size of data ++ varies with union member that is present, ++ check ``eEvType`` value to decode */ ++} RGX_HWPERF_HOST_DEV_INFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ ++typedef enum ++{ ++ RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */ ++ RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ ++} RGX_HWPERF_INFO_EV; ++ ++/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the ++ * RGX_HWPERF_HOST_INFO_DATA event. ++ */ ++typedef union ++{ ++ /*! Host Memory usage statistics */ ++ struct ++ { ++ IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */ ++ /*! Detailed memory usage */ ++ struct ++ { ++ IMG_UINT32 ui32Pid; /*!< Process ID */ ++ IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */ ++ IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */ ++ } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; ++ } sMemUsageStats; ++} RGX_HWPERF_HOST_INFO_DETAIL; ++ ++/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device ++ * memory usage information. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ ++ RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; ++ /*!< Union of structures providing further data ++ regarding memory usage. Size varies with union ++ member that is present, check ``eEvType`` ++ value to decode */ ++} RGX_HWPERF_HOST_INFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! FENCE_WAIT_TYPE definitions */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ ++ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; ++ ++/*! FENCE_WAIT_RESULT definitions */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ ++ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; ++ ++/*! FENCE_WAIT_DETAIL Event Payload */ ++typedef union ++{ ++/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ ++ struct ++ { ++ IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ ++ } sBegin; ++ ++ /*! Data for SYNC_FENCE_WAIT_TYPE_END */ ++ struct ++ { ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ ++ } sEnd; ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; ++ ++/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure ++ * is received whenever the host driver handles a wait for sync event request. ++ */ ++typedef struct ++{ ++ IMG_PID uiPID; /*!< Identifier of the owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; ++ /*!< Type of the subevent, see ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; ++ /*!< Union of structures providing further data ++ regarding device's status. Size of data varies with ++ union member that is present, check ``eType`` value ++ to decode */ ++ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. ++ * Software Timeline Advanced Event Payload. This data structure is received ++ * whenever the host driver processes a Software Timeline Advanced event. ++ */ ++typedef struct ++{ ++ IMG_PID uiPID; /*!< Identifier of the owning process */ ++ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the ++ timeline has advanced */ ++ ++} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ ++ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; ++ ++typedef struct ++{ ++ IMG_PID uiClientPID; /*!< Client process identifier */ ++ IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ ++ IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */ ++} RGX_HWPERF_HOST_CLIENT_PROC_NAME; ++ ++#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ ++ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) ++ ++typedef union ++{ ++ struct ++ { ++ IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ ++ RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sProcName; ++} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; ++ ++typedef struct ++{ ++ IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; ++ /*!< Type of the subevent, see ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ ++ RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; ++ /*!< Union of structures. Size of data ++ varies with union member that is present, ++ check ``eType`` value to decode */ ++ ++} RGX_HWPERF_HOST_CLIENT_INFO_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, ++ ++ RGX_HWPERF_RESOURCE_TYPE_COUNT ++} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Height; ++ IMG_UINT32 ui32Width; ++ IMG_UINT32 ui32BPP; ++ IMG_UINT32 ui32PixFormat; ++} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; ++ ++typedef struct ++{ ++ IMG_INT32 i32XOffset; /*!< render surface X shift */ ++ IMG_INT32 i32YOffset; /*!< render surface Y shift */ ++ IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ ++ IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ ++} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; ++ ++typedef union ++{ ++ struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES ++ { ++ IMG_UINT32 ui32RenderSurfaceCount; ++ RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sRenderSurfaces; ++ ++ struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS ++ { ++ RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sTLTBuffers; ++} RGX_RESOURCE_CAPTURE_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; ++ IMG_PID uPID; ++ IMG_UINT32 ui32ContextID; ++ IMG_UINT32 ui32FrameNum; ++ IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ ++ IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ ++ RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ ++} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; ++ ++#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) ++ ++/*! Tile Lifetime Tracking header size. Only available if ++ * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via ++ * SUPPORT_TLT_PERF ++ */ ++#define RGX_TLT_HARDWARE_HDR_SIZE (16U) ++ ++/* PVRSRVGetHWPerfResourceCaptureResult */ ++typedef enum ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ ++} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; ++ ++typedef struct ++{ ++ IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ ++ IMG_UINT32 ui32CtxID; ++ RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, ++ unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ ++ IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ ++} RGX_RESOURCE_CAPTURE_RESULT; ++ ++/*! This type is a union of packet payload data structures associated with ++ * various FW and Host events */ ++typedef union ++{ ++ RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, ++ events ``0x01-0x06`` */ ++ RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, ++ events ``0x07-0x19``, ``0x28-0x29`` ++ See RGX_HWPERF_HW_DATA */ ++ RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet ++ data, events ``0x1A`` */ ++ RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state ++ change event packet data, ++ events ``0x1B`` */ ++ RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event ++ packet data, ++ events ``0x20-0x22`` */ ++ RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, ++ events ``0x23`` */ ++ RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, ++ events ``0x30-0x31`` */ ++ RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, ++ events ``0x32`` */ ++ RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ ++ RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event ++ packet data, ++ events ``0x39`` */ ++ /* */ ++ RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, ++ events ``0x01`` (Host) */ ++ RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, ++ events ``0x02`` (Host) */ ++ RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, ++ events ``0x03`` (Host) */ ++ RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, ++ events ``0x04`` (Host) */ ++ RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, ++ events ``0x05`` (Host) */ ++ RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, ++ events ``0x06`` (Host) */ ++ RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, ++ events ``0x07`` (Host) */ ++ RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, ++ events ``0x08`` (Host) */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, ++ events ``0x09`` (Host) */ ++ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance ++ data, events ``0x0A`` (Host) */ ++ RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, ++ events ``0x0B`` (Host) */ ++} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); ++ ++#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) ++ ++#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ ++ ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) ++ ++/****************************************************************************** ++ * API Types ++ *****************************************************************************/ ++ ++/*! Counter block IDs for all the hardware blocks with counters. ++ * Directly addressable blocks must have a value between 0..15 [0..0xF]. ++ * Indirect groups have following encoding: ++ * First hex digit (LSB) represents a unit number within the group ++ * and the second hex digit represents the group number. ++ * Group 0 is the direct group, all others are indirect groups. ++ */ ++typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; ++ ++/*! Directly addressable non bank-switched counter blocks */ ++#define RGX_CNTBLK_ID_JONES 0x0000U ++#define RGX_CNTBLK_ID_SLC 0x0001U /*!< SLC-specific counter control */ ++#define RGX_CNTBLK_ID_FBCDC 0x0002U ++#define RGX_CNTBLK_ID_FW_CUSTOM 0x0003U /*!< Custom FW provided counters */ ++ ++/*! Directly addressable SLC counter blocks - presence depends on GPU. */ ++#define RGX_CNTBLK_ID_SLCBANK0 0x0004U /*!< SLCBANK0 counter control */ ++#define RGX_CNTBLK_ID_SLCBANK1 0x0005U /*!< SLCBANK1 counter control */ ++#define RGX_CNTBLK_ID_SLCBANK2 0x0006U /*!< SLCBANK2 counter control */ ++#define RGX_CNTBLK_ID_SLCBANK3 0x0007U /*!< SLCBANK3 counter control */ ++#define RGX_CNTBLK_ID_SLCBANK_ALL 0x4004U /*!< SLC ALL block ID */ ++ ++#define RGX_CNTBLK_ID_DIRECT_LAST 0x0008U /*!< Indirect blocks start from here */ ++ ++/*! Indirectly addressable counter blocks */ ++#define RGX_CNTBLK_ID_ISP0 0x0010U /*!< ISP 1..N ISP */ ++#define RGX_CNTBLK_ID_ISP1 0x0011U ++#define RGX_CNTBLK_ID_ISP2 0x0012U ++#define RGX_CNTBLK_ID_ISP3 0x0013U ++#define RGX_CNTBLK_ID_ISP4 0x0014U ++#define RGX_CNTBLK_ID_ISP5 0x0015U ++#define RGX_CNTBLK_ID_ISP6 0x0016U ++#define RGX_CNTBLK_ID_ISP7 0x0017U ++#define RGX_CNTBLK_ID_ISP_ALL 0x4010U ++ ++#define RGX_CNTBLK_ID_MERCER0 0x0020U /*!< MERCER 1..N MERCER */ ++#define RGX_CNTBLK_ID_MERCER1 0x0021U ++#define RGX_CNTBLK_ID_MERCER2 0x0022U ++#define RGX_CNTBLK_ID_MERCER3 0x0023U ++#define RGX_CNTBLK_ID_MERCER4 0x0024U ++#define RGX_CNTBLK_ID_MERCER5 0x0025U ++#define RGX_CNTBLK_ID_MERCER6 0x0026U ++#define RGX_CNTBLK_ID_MERCER7 0x0027U ++#define RGX_CNTBLK_ID_MERCER_ALL 0x4020U ++ ++#define RGX_CNTBLK_ID_PBE0 0x0030U /*!< PBE 1..N PBE_PER_SPU x N SPU */ ++#define RGX_CNTBLK_ID_PBE1 0x0031U ++#define RGX_CNTBLK_ID_PBE2 0x0032U ++#define RGX_CNTBLK_ID_PBE3 0x0033U ++#define RGX_CNTBLK_ID_PBE4 0x0034U ++#define RGX_CNTBLK_ID_PBE5 0x0035U ++#define RGX_CNTBLK_ID_PBE6 0x0036U ++#define RGX_CNTBLK_ID_PBE7 0x0037U ++#define RGX_CNTBLK_ID_PBE_ALL 0x4030U ++ ++#define RGX_CNTBLK_ID_PBE_SHARED0 0x0040U /*!< PBE_SHARED 1..N SPU */ ++#define RGX_CNTBLK_ID_PBE_SHARED1 0x0041U ++#define RGX_CNTBLK_ID_PBE_SHARED2 0x0042U ++#define RGX_CNTBLK_ID_PBE_SHARED3 0x0043U ++#define RGX_CNTBLK_ID_PBE_SHARED_ALL 0x4040U ++ ++#define RGX_CNTBLK_ID_USC0 0x0050U /*!< USC 1..N USC */ ++#define RGX_CNTBLK_ID_USC1 0x0051U ++#define RGX_CNTBLK_ID_USC2 0x0052U ++#define RGX_CNTBLK_ID_USC3 0x0053U ++#define RGX_CNTBLK_ID_USC4 0x0054U ++#define RGX_CNTBLK_ID_USC5 0x0055U ++#define RGX_CNTBLK_ID_USC6 0x0056U ++#define RGX_CNTBLK_ID_USC7 0x0057U ++#define RGX_CNTBLK_ID_USC_ALL 0x4050U ++ ++#define RGX_CNTBLK_ID_TPU0 0x0060U /*!< TPU 1..N TPU */ ++#define RGX_CNTBLK_ID_TPU1 0x0061U ++#define RGX_CNTBLK_ID_TPU2 0x0062U ++#define RGX_CNTBLK_ID_TPU3 0x0063U ++#define RGX_CNTBLK_ID_TPU4 0x0064U ++#define RGX_CNTBLK_ID_TPU5 0x0065U ++#define RGX_CNTBLK_ID_TPU6 0x0066U ++#define RGX_CNTBLK_ID_TPU7 0x0067U ++#define RGX_CNTBLK_ID_TPU_ALL 0x4060U ++ ++#define RGX_CNTBLK_ID_SWIFT0 0x0070U /*!< SWIFT 1..N SWIFT */ ++#define RGX_CNTBLK_ID_SWIFT1 0x0071U ++#define RGX_CNTBLK_ID_SWIFT2 0x0072U ++#define RGX_CNTBLK_ID_SWIFT3 0x0073U ++#define RGX_CNTBLK_ID_SWIFT4 0x0074U ++#define RGX_CNTBLK_ID_SWIFT5 0x0075U ++#define RGX_CNTBLK_ID_SWIFT6 0x0076U ++#define RGX_CNTBLK_ID_SWIFT7 0x0077U ++#define RGX_CNTBLK_ID_SWIFT_ALL 0x4070U ++ ++#define RGX_CNTBLK_ID_TEXAS0 0x0080U /*!< TEXAS 1..N TEXAS */ ++#define RGX_CNTBLK_ID_TEXAS1 0x0081U ++#define RGX_CNTBLK_ID_TEXAS2 0x0082U ++#define RGX_CNTBLK_ID_TEXAS3 0x0083U ++#define RGX_CNTBLK_ID_TEXAS_ALL 0x4080U ++ ++#define RGX_CNTBLK_ID_RAC0 0x0090U /*!< RAC 1..N RAC */ ++#define RGX_CNTBLK_ID_RAC1 0x0091U ++#define RGX_CNTBLK_ID_RAC2 0x0092U ++#define RGX_CNTBLK_ID_RAC3 0x0093U ++#define RGX_CNTBLK_ID_RAC_ALL 0x4090U ++ ++#define RGX_CNTBLK_ID_LAST 0x0094U /*!< End of RAC block */ ++ ++/*! Masks for the counter block ID*/ ++#define RGX_CNTBLK_ID_UNIT_MASK (0x000FU) /*!< Unit within group */ ++#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) /*!< Group value */ ++#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) ++#define RGX_CNTBLK_ID_MC_GPU_MASK (0x0F00U) /*!< GPU ID for MC use */ ++#define RGX_CNTBLK_ID_MC_GPU_SHIFT (8U) ++#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) /*!< Program all units within a group */ ++ ++static_assert( ++ ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), ++ "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); ++ ++#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (e)) ++ ++/* When adding new counters here, make sure changes are made to rgxfw_hwperf_fwblk_valid() as well */ ++#define RGX_CUSTOM_FW_CNTRS \ ++ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ ++ \ ++ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ ++ \ ++ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ ++ \ ++ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ ++ \ ++ X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) ++ ++/*! Counter IDs for the firmware held statistics */ ++typedef enum ++{ ++#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, ++ RGX_CUSTOM_FW_CNTRS ++#undef X ++ ++ /* always the last entry in the list */ ++ RGX_CUSTOM_FW_CNTR_LAST ++} RGX_HWPERF_CUSTOM_FW_CNTR_ID; ++ ++/*! Identifier for each counter in a performance counting module */ ++typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; ++ ++/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ ++#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)b1) ++#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) ++#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) ++ ++/*! Mask macros for use with RGXCtrlHWPerf() API. ++ */ ++#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_HWPERF_EVENT_MASK_DEFAULT RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) ++#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++ ++/*! HWPerf Firmware event masks ++ * @par ++ * All FW Start/End/Debug (SED) events. */ ++#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) ++ ++#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) ++#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) ++/*! All FW events. */ ++#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ ++ RGX_HWPERF_EVENT_MASK_FW_UFO |\ ++ RGX_HWPERF_EVENT_MASK_FW_CSW) ++ ++/*! HW Periodic events (1ms interval). */ ++#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) ++/*! All HW Kick/Finish events. */ ++#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ ++ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ ++ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ ++ RGX_HWPERF_EVENT_MASK_HW_PERIODIC) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) ++ ++/*! HWPerf Host event masks ++ */ ++#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) ++#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) ++#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) ++ ++ ++/*! Type used in the RGX API RGXConfigHWPerfCounters() */ ++typedef struct ++{ ++ /*! Reserved for future use */ ++ IMG_UINT32 ui32Reserved; ++ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! Number of configured counters within this block */ ++ IMG_UINT16 ui16NumCounters; ++ ++ /*! Counter register values */ ++ IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX]; ++ ++} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_HWPERF_H_ */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf_table.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf_table.h +new file mode 100644 +index 000000000000..5c206e951ea0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_hwperf_table.h +@@ -0,0 +1,511 @@ ++/*************************************************************************/ /*! ++@File ++@Title HWPerf counter table header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used internally for HWPerf data retrieval ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_HWPERF_TABLE_H ++#define RGX_HWPERF_TABLE_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_fwif_hwperf.h" ++ ++/*****************************************************************************/ ++ ++/* Forward declaration */ ++typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL; ++ ++/* Function pointer type for functions to check dynamic power state of ++ * counter block instance. Used only in firmware. */ ++typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)( ++ RGX_HWPERF_CNTBLK_ID eBlkType, ++ IMG_UINT8 ui8UnitId); ++ ++/* Counter block run-time info */ ++typedef struct ++{ ++ IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core */ ++} RGX_HWPERF_CNTBLK_RT_INFO; ++ ++/* Function pointer type for functions to check block is valid and present ++ * on that RGX Device at runtime. It may have compile logic or run-time ++ * logic depending on where the code executes: server, srvinit or firmware. ++ * Values in the psRtInfo output parameter are only valid if true returned. ++ */ ++typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)( ++ const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc, ++ const void *pvDev_km, ++ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo); ++ ++/* This structure encodes properties of a type of performance counter block. ++ * The structure is sometimes referred to as a block type descriptor. These ++ * properties contained in this structure represent the columns in the block ++ * type model table variable below. These values vary depending on the build ++ * BVNC and core type. ++ * Each direct block has a unique type descriptor and each indirect group has ++ * a type descriptor. ++ */ ++struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ ++{ ++ IMG_UINT32 uiCntBlkIdBase; /* The starting block id for this block type */ ++ IMG_UINT32 uiIndirectReg; /* 0 if direct type otherwise the indirect register value to select indirect unit */ ++ IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core (compile time use) */ ++ const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */ ++ PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */ ++ PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */ ++ IMG_UINT16 *pszBlkCfgValid; /* Array of supported counters per block type */ ++}; ++ ++/*****************************************************************************/ ++ ++/* Shared compile-time context ASSERT macro */ ++#if defined(RGX_FIRMWARE) ++/* firmware context */ ++# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) ++#else ++/* host client/server context */ ++# define DBG_ASSERT(_c) PVR_ASSERT((_c)) ++#endif ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() ++ ++ Referenced in gasCntBlkTypeModel[] table below and only called from ++ RGX_FIRMWARE run-time context. Therefore compile time configuration is used. ++ *****************************************************************************/ ++ ++#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) ++# include "rgxfw_pow.h" ++# include "rgxfw_utils.h" ++ ++static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId); ++static inline IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui8UnitId); ++ ++ switch (eBlkType) ++ { ++ case RGX_CNTBLK_ID_JONES: ++ case RGX_CNTBLK_ID_SLC: ++ case RGX_CNTBLK_ID_SLCBANK0: ++ case RGX_CNTBLK_ID_FBCDC: ++ case RGX_CNTBLK_ID_FW_CUSTOM: ++ return IMG_TRUE; ++ ++#if !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) ++ case RGX_CNTBLK_ID_SLCBANK1: ++ if (RGX_FEATURE_NUM_MEMBUS > 1U) ++ { ++ return IMG_TRUE; ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++ ++ case RGX_CNTBLK_ID_SLCBANK2: ++ case RGX_CNTBLK_ID_SLCBANK3: ++ if (RGX_FEATURE_NUM_MEMBUS > 2U) ++ { ++ return IMG_TRUE; ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++#endif /* !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) */ ++ ++ default: ++ return IMG_FALSE; ++ } ++} ++ ++/* Only use conditional compilation when counter blocks appear in different ++ * islands for different Rogue families. ++ */ ++static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId); ++static inline IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui8UnitId); ++ ++ IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); ++ ++ // We don't have any Dusts Enabled until first DC opens the GPU. This makes ++ // setting the PDump HWPerf trace buffers very difficult. ++ // To work around this we special-case some of the 'have to be there' ++ // indirect registers (e.g., TPU0) ++ ++ switch (eBlkType) ++ { ++ case RGX_CNTBLK_ID_TPU0: ++ return IMG_TRUE; ++ /*NOTREACHED*/ ++ break; ++ default: ++ if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && ++ (ui32NumDustsEnabled > 0U)) ++ { ++ return IMG_TRUE; ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++ /*NOTREACHED*/ ++ break; ++ } ++ return IMG_TRUE; ++} ++ ++#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ ++ ++# define rgxfw_hwperf_pow_st_direct ((void *)NULL) ++# define rgxfw_hwperf_pow_st_indirect ((void *)NULL) ++ ++#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end ++ *****************************************************************************/ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start ++ ++ Referenced in gasCntBlkTypeModel[] table below and called from all build ++ contexts: ++ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). ++ ++ Therefore each function has two implementations, one for compile time and one ++ run time configuration depending on the context. The functions will inform the ++ caller whether this block is valid for this particular RGX device. Other ++ run-time dependent data is returned in psRtInfo for the caller to use. ++ *****************************************************************************/ ++ ++ ++/* Used for all block types: Direct and Indirect */ ++static inline IMG_BOOL rgx_hwperf_blk_present(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo) ++{ ++#if defined(__KERNEL__) /* Server context -- Run-time Only */ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km; ++ PVRSRV_DEVICE_NODE *psNode; ++ IMG_UINT32 ui32MaxTPUPerSPU; ++ IMG_UINT32 ui32NumMemBus; ++ IMG_UINT32 ui32RTArchVal; ++ ++ DBG_ASSERT(psDevInfo != NULL); ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ DBG_ASSERT(psRtInfo != NULL); ++ ++ if (((psDevInfo == NULL) || (psBlkTypeDesc == NULL)) || (psRtInfo == NULL)) ++ { ++ return IMG_FALSE; ++ } ++ ++ psNode = psDevInfo->psDeviceNode; ++ DBG_ASSERT(psNode != NULL); ++ ++ if (psNode == NULL) ++ { ++ return IMG_FALSE; ++ } ++ ++ ui32MaxTPUPerSPU = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, MAX_TPU_PER_SPU); ++ ++ if (PVRSRV_IS_FEATURE_SUPPORTED(psNode, CATURIX_TOP_INFRASTRUCTURE)) ++ { ++ ui32NumMemBus = 1U; ++ } ++ else ++ { ++ ui32NumMemBus = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_MEMBUS); ++ } ++ ++ ui32RTArchVal = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, RAY_TRACING_ARCH); ++ ++ switch (psBlkTypeDesc->uiCntBlkIdBase) ++ { ++ case RGX_CNTBLK_ID_JONES: ++ case RGX_CNTBLK_ID_SLC: ++ case RGX_CNTBLK_ID_SLCBANK0: ++ case RGX_CNTBLK_ID_FBCDC: ++ case RGX_CNTBLK_ID_FW_CUSTOM: ++ psRtInfo->uiNumUnits = 1; ++ break; ++ ++#if !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) ++ case RGX_CNTBLK_ID_SLCBANK1: ++ if (ui32NumMemBus >= 2U) ++ { ++ psRtInfo->uiNumUnits = 1; ++ } ++ else ++ { ++ psRtInfo->uiNumUnits = 0; ++ } ++ break; ++ ++ case RGX_CNTBLK_ID_SLCBANK2: ++ case RGX_CNTBLK_ID_SLCBANK3: ++ if (ui32NumMemBus > 2U) ++ { ++ psRtInfo->uiNumUnits = 1; ++ } ++ else ++ { ++ psRtInfo->uiNumUnits = 0; ++ } ++ break; ++#endif /* !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) */ ++ ++ case RGX_CNTBLK_ID_TPU0: ++ case RGX_CNTBLK_ID_SWIFT0: ++ psRtInfo->uiNumUnits = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); ++ psRtInfo->uiNumUnits *= ui32MaxTPUPerSPU; ++ break; ++ ++ case RGX_CNTBLK_ID_TEXAS0: ++ case RGX_CNTBLK_ID_PBE_SHARED0: ++ ++ psRtInfo->uiNumUnits = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); ++ break; ++ ++ case RGX_CNTBLK_ID_RAC0: ++ if (ui32RTArchVal > 2U) ++ { ++ psRtInfo->uiNumUnits = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); ++ } ++ else ++ { ++ psRtInfo->uiNumUnits = 0; ++ } ++ break; ++ ++ case RGX_CNTBLK_ID_USC0: ++ case RGX_CNTBLK_ID_MERCER0: ++ psRtInfo->uiNumUnits = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_CLUSTERS); ++ break; ++ ++ case RGX_CNTBLK_ID_PBE0: ++ ++ psRtInfo->uiNumUnits = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, PBE_PER_SPU); ++ psRtInfo->uiNumUnits *= ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); ++ break; ++ ++ case RGX_CNTBLK_ID_ISP0: ++ ++ psRtInfo->uiNumUnits = ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_ISP_PER_SPU); ++ /* Adjust by NUM_SPU */ ++ ++ psRtInfo->uiNumUnits *= ++ PVRSRV_GET_DEVICE_FEATURE_VALUE(psNode, NUM_SPU); ++ break; ++ ++ default: ++ return IMG_FALSE; ++ } ++ /* Verify that we have at least one unit present */ ++ if (psRtInfo->uiNumUnits > 0U) ++ { ++ return IMG_TRUE; ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++#else /* FW context -- Compile-time only */ ++ IMG_UINT32 ui32NumMemBus; ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ ++ if (unlikely(psBlkTypeDesc == NULL)) ++ { ++ return IMG_FALSE; ++ } ++ ++#if !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) ++ ui32NumMemBus = RGX_FEATURE_NUM_MEMBUS; ++#else ++ ui32NumMemBus = 1U; ++#endif /* !defined(RGX_FEATURE_CATURIX_TOP_INFRASTRUCTURE) */ ++ ++ switch (psBlkTypeDesc->uiCntBlkIdBase) ++ { ++ /* Handle the dynamic-sized SLC blocks which are only present if ++ * RGX_FEATURE_NUM_MEMBUS is appropriately set. ++ */ ++ case RGX_CNTBLK_ID_SLCBANK1: ++ if (ui32NumMemBus >= 2U) ++ { ++ psRtInfo->uiNumUnits = 1; ++ } ++ else ++ { ++ psRtInfo->uiNumUnits = 0; ++ } ++ break; ++ ++ case RGX_CNTBLK_ID_SLCBANK2: ++ case RGX_CNTBLK_ID_SLCBANK3: ++ if (ui32NumMemBus > 2U) ++ { ++ psRtInfo->uiNumUnits = 1; ++ } ++ else ++ { ++ psRtInfo->uiNumUnits = 0; ++ } ++ break; ++ ++ default: ++ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits; ++ break; ++ } ++ if (psRtInfo->uiNumUnits > 0U) ++ { ++ return IMG_TRUE; ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++#endif /* defined(__KERNEL__) */ ++} ++ ++#if !defined(__KERNEL__) /* Firmware or User-mode context */ ++ ++/* Used to instantiate a null row in the block type model table below where the ++ * block is not supported for a given build BVNC in firmware/user mode context. ++ * This is needed as the blockid to block type lookup uses the table as well ++ * and clients may try to access blocks not in the hardware. */ ++#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) X(_blkid, 0, 0, #_blkid, NULL, NULL, NULL) ++ ++#endif ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end ++ *****************************************************************************/ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table ++ ++ This table holds the entries for the performance counter block type model. ++ Where the block is not present on an RGX device in question the ++ pfnIsBlkPresent() returns false, if valid and present it returns true. ++ Columns in the table with a ** indicate the value is a default and the value ++ returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()should ++ be used at runtime by the caller. These columns are only valid for compile ++ time BVNC configured contexts. ++ ++ Order of table rows must match order of counter block IDs in the enumeration ++ RGX_HWPERF_CNTBLK_ID. ++ ++ Table contains Xmacro styled entries. Each includer of this file must define ++ a gasCntBlkTypeModel[] structure which is local to itself. Only the layout is ++ defined here. ++ ++ uiCntBlkIdBase : Block-ID ++ uiIndirectReg : 0 => Direct, non-zero => INDIRECT register address ++ uiNumUnits : Number of units present on the GPU ++ pszBlockNameComment : Name of the Performance Block ++ pfnIsBlkPowered : Function to determine power state of block ++ pfnIsBlkPresent : Function to determine block presence on the core ++ pszBlkCfgValid : Array of counters valid within this block type ++ *****************************************************************************/ ++ ++ // Furian 8XT V2 layout: ++ ++ /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ ++ ++ /* RGX_CNTBLK_ID_JONES */ ++#if defined(RGX_FIRMWARE) || defined(__KERNEL__) ++ ++/* Furian 8XT Direct Performance counter blocks */ ++ ++#define RGX_CNT_BLK_TYPE_MODEL_DIRECT_LIST \ ++ /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \ ++X(RGX_CNTBLK_ID_JONES, 0, 1, "PERF_BLK_JONES", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiJONES), \ ++X(RGX_CNTBLK_ID_SLC, 0, 1, "PERF_BLK_SLC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC), \ ++X(RGX_CNTBLK_ID_FBCDC, 0, 1, "PERF_BLK_FBCDC", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFBCDC), \ ++X(RGX_CNTBLK_ID_FW_CUSTOM, 0, 1, "PERF_BLK_FW_CUSTOM", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiFWCUSTOM), \ ++X(RGX_CNTBLK_ID_SLCBANK0, 0, 1, "PERF_BLK_SLC0", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC0), \ ++X(RGX_CNTBLK_ID_SLCBANK1, 0, 1, "PERF_BLK_SLC1", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC1), \ ++X(RGX_CNTBLK_ID_SLCBANK2, 0, 1, "PERF_BLK_SLC2", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC2), \ ++X(RGX_CNTBLK_ID_SLCBANK3, 0, 1, "PERF_BLK_SLC3", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present, g_auiSLC3) ++ ++/* Furian 8XT Indirect Performance counter blocks */ ++ ++#if !defined(RGX_CR_RAC_INDIRECT) ++#define RGX_CR_RAC_INDIRECT (0x8398U) ++#endif ++ ++#define RGX_CNT_BLK_TYPE_MODEL_INDIRECT_LIST \ ++ /* uiCntBlkIdBase, uiIndirectReg, uiNumUnits**, pszBlockNameComment, pfnIsBlkPowered, pfnIsBlkPresent */ \ ++X(RGX_CNTBLK_ID_ISP0, RGX_CR_ISP_INDIRECT, RGX_HWPERF_NUM_SPU * RGX_HWPERF_NUM_ISP_PER_SPU, "PERF_BLK_ISP", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiISP), \ ++X(RGX_CNTBLK_ID_MERCER0, RGX_CR_MERCER_INDIRECT, RGX_HWPERF_NUM_MERCER, "PERF_BLK_MERCER", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiMERCER), \ ++X(RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_INDIRECT, RGX_HWPERF_NUM_PBE, "PERF_BLK_PBE", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE), \ ++X(RGX_CNTBLK_ID_PBE_SHARED0, RGX_CR_PBE_SHARED_INDIRECT, RGX_HWPERF_NUM_PBE_SHARED, "PERF_BLK_PBE_SHARED", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiPBE_SHARED), \ ++X(RGX_CNTBLK_ID_USC0, RGX_CR_USC_INDIRECT, RGX_HWPERF_NUM_USC, "PERF_BLK_USC", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiUSC), \ ++X(RGX_CNTBLK_ID_TPU0, RGX_CR_TPU_INDIRECT, RGX_HWPERF_NUM_TPU, "PERF_BLK_TPU", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTPU), \ ++X(RGX_CNTBLK_ID_SWIFT0, RGX_CR_SWIFT_INDIRECT, RGX_HWPERF_NUM_SWIFT, "PERF_BLK_SWIFT", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiSWIFT), \ ++X(RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS_INDIRECT, RGX_HWPERF_NUM_TEXAS, "PERF_BLK_TEXAS", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiTEXAS), \ ++X(RGX_CNTBLK_ID_RAC0, RGX_CR_RAC_INDIRECT, RGX_HWPERF_NUM_RAC, "PERF_BLK_RAC", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present, g_auiRAC) ++ ++#else /* !defined(RGX_FIRMWARE) && !defined(__KERNEL__) */ ++ ++#error "RGX_FIRMWARE or __KERNEL__ *MUST* be defined" ++ ++#endif /* defined(RGX_FIRMWARE) || defined(__KERNEL__) */ ++ ++#endif /* RGX_HWPERF_TABLE_H */ ++ ++/****************************************************************************** ++ End of file (rgx_hwperf_table.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgx_options.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_options.h +new file mode 100644 +index 000000000000..e7f650bee8de +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgx_options.h +@@ -0,0 +1,294 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX build options ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* Each build option listed here is packed into a dword which provides up to ++ * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and ++ * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. ++ * The corresponding bit is set if the build option was enabled at compile ++ * time. ++ * ++ * In order to extract the enabled build flags the INTERNAL_TEST switch should ++ * be enabled in a client program which includes this header. Then the client ++ * can test specific build flags by reading the bit value at ++ * ##OPTIONNAME##_SET_OFFSET ++ * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. ++ * ++ * IMPORTANT: add new options to unused bits or define a new dword ++ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield ++ * remains backwards compatible. ++ */ ++ ++#ifndef RGX_OPTIONS_H ++#define RGX_OPTIONS_H ++ ++#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL ++ ++#define NO_HARDWARE_OPTION "NO_HARDWARE " ++#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) ++ #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0 ++ #define OPTIONS_BIT0 (0x1UL << 0) ++ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT0 0x0UL ++#endif /* NO_HARDWARE */ ++ ++#define PDUMP_OPTION "PDUMP " ++#if defined(PDUMP) || defined(INTERNAL_TEST) ++ #define PDUMP_SET_OFFSET OPTIONS_BIT1 ++ #define OPTIONS_BIT1 (0x1UL << 1) ++ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT1 0x0UL ++#endif /* PDUMP */ ++ ++/* No longer used */ ++#define INTERNAL_TEST_OPTION "INTERNAL_TEST " ++#if defined(INTERNAL_TEST) ++ #define UNUSED_SET_OFFSET OPTIONS_BIT2 ++ #define OPTIONS_BIT2 (0x1UL << 2) ++ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT2 0x0UL ++#endif ++ ++/* No longer used */ ++#define UNUSED_OPTION " " ++#if defined(INTERNAL_TEST) ++ #define OPTIONS_BIT3 (0x1UL << 3) ++ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT3 0x0UL ++#endif ++ ++#define SUPPORT_RGX_OPTION " " ++#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) ++ #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4 ++ #define OPTIONS_BIT4 (0x1UL << 4) ++ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT4 0x0UL ++#endif /* SUPPORT_RGX */ ++ ++#define SUPPORT_SECURE_EXPORT_OPTION "SECURE_EXPORTS " ++#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) ++ #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5 ++ #define OPTIONS_BIT5 (0x1UL << 5) ++ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT5 0x0UL ++#endif /* SUPPORT_SECURE_EXPORT */ ++ ++#define SUPPORT_INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " ++#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) ++ #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6 ++ #define OPTIONS_BIT6 (0x1UL << 6) ++ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT6 0x0UL ++#endif /* SUPPORT_INSECURE_EXPORT */ ++ ++#define SUPPORT_VFP_OPTION "VFP " ++#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) ++ #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7 ++ #define OPTIONS_BIT7 (0x1UL << 7) ++ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT7 0x0UL ++#endif /* SUPPORT_VFP */ ++ ++#define SUPPORT_WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) ++ #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8 ++ #define OPTIONS_BIT8 (0x1UL << 8) ++ #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT8 0x0UL ++#endif /* SUPPORT_WORKLOAD_ESTIMATION */ ++#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1UL << 8) ++ ++#define SUPPORT_PDVFS_OPTION "PDVFS " ++#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) ++ #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9 ++ #define OPTIONS_BIT9 (0x1UL << 9) ++ #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT9 0x0UL ++#endif /* SUPPORT_PDVFS */ ++#define OPTIONS_PDVFS_MASK (0x1UL << 9) ++ ++#define DEBUG_OPTION "DEBUG " ++#if defined(DEBUG) || defined(INTERNAL_TEST) ++ #define DEBUG_SET_OFFSET OPTIONS_BIT10 ++ #define OPTIONS_BIT10 (0x1UL << 10) ++ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT10 0x0UL ++#endif /* DEBUG */ ++/* The bit position of this should be the same as DEBUG_SET_OFFSET option ++ * when defined. ++ */ ++#define OPTIONS_DEBUG_MASK (0x1UL << 10) ++ ++#define SUPPORT_BUFFER_SYNC_OPTION "BUFFER_SYNC " ++#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) ++ #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11 ++ #define OPTIONS_BIT11 (0x1UL << 11) ++ #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT11 0x0UL ++#endif /* SUPPORT_BUFFER_SYNC */ ++ ++#define SUPPORT_AUTOVZ_OPTION "AUTOVZ " ++#if defined(SUPPORT_AUTOVZ) ++ #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT12 ++ #define OPTIONS_BIT12 (0x1UL << 12) ++ #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT12 0x0UL ++#endif /* SUPPORT_AUTOVZ */ ++ ++#define SUPPORT_AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS " ++#if defined(SUPPORT_AUTOVZ_HW_REGS) ++ #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT13 ++ #define OPTIONS_BIT13 (0x1UL << 13) ++ #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT13 0x0UL ++#endif /* SUPPORT_AUTOVZ_HW_REGS */ ++ ++/* Bit 14 reserved for compatibility with Rogue code base */ ++#define OPTIONS_BIT14 0x0UL ++ ++#define VALIDATION_EN_MASK (0x1UL << 15) ++#define SUPPORT_VALIDATION_OPTION "VALIDATION " ++#if defined(SUPPORT_VALIDATION) ++ #define SUPPORT_VALIDATION_OFFSET OPTIONS_BIT15 ++ #define OPTIONS_BIT15 (0x1UL << 15) ++ #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT15 0x0UL ++#endif /* SUPPORT_VALIDATION */ ++ ++#define RGX_BUILD_OPTIONS_KM \ ++ (OPTIONS_BIT0 |\ ++ OPTIONS_BIT1 |\ ++ OPTIONS_BIT2 |\ ++ OPTIONS_BIT3 |\ ++ OPTIONS_BIT4 |\ ++ OPTIONS_BIT6 |\ ++ OPTIONS_BIT7 |\ ++ OPTIONS_BIT8 |\ ++ OPTIONS_BIT9 |\ ++ OPTIONS_BIT10 |\ ++ OPTIONS_BIT11 |\ ++ OPTIONS_BIT12 |\ ++ OPTIONS_BIT13 |\ ++ OPTIONS_BIT14 |\ ++ OPTIONS_BIT15) ++ ++#define RGX_BUILD_OPTIONS_LIST \ ++ { \ ++ NO_HARDWARE_OPTION, \ ++ PDUMP_OPTION, \ ++ INTERNAL_TEST_OPTION, \ ++ UNUSED_OPTION, \ ++ SUPPORT_RGX_OPTION, \ ++ SUPPORT_SECURE_EXPORT_OPTION, \ ++ SUPPORT_INSECURE_EXPORT_OPTION, \ ++ SUPPORT_VFP_OPTION, \ ++ SUPPORT_WORKLOAD_ESTIMATION_OPTION, \ ++ SUPPORT_PDVFS_OPTION, \ ++ DEBUG_OPTION, \ ++ SUPPORT_BUFFER_SYNC_OPTION, \ ++ SUPPORT_AUTOVZ_OPTION, \ ++ SUPPORT_AUTOVZ_HW_REGS_OPTION, \ ++ SUPPORT_VALIDATION_OPTION \ ++ } ++ ++#define RGX_BUILD_OPTIONS_MASK_FW \ ++ (RGX_BUILD_OPTIONS_MASK_KM & \ ++ ~OPTIONS_BIT11) ++ ++#define OPTIONS_BIT31 (0x1UL << 31) ++#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM ++#error "Bit exceeds reserved range" ++#endif ++#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31 ++ ++#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) ++ ++#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \ ++ ~(OPTIONS_DEBUG_MASK | \ ++ OPTIONS_WORKLOAD_ESTIMATION_MASK | \ ++ OPTIONS_PDVFS_MASK)) ++ ++#endif /* RGX_OPTIONS_H */ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/rgxheapconfig.h b/drivers/gpu/drm/img-rogue/include/volcanic/rgxheapconfig.h +new file mode 100644 +index 000000000000..8b11ba3f8e17 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/rgxheapconfig.h +@@ -0,0 +1,278 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Device virtual memory map ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory heaps device specific configuration ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHEAPCONFIG_H ++#define RGXHEAPCONFIG_H ++ ++#include "rgxdefs_km.h" ++ ++ ++#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) ++#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) ++#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) ++ ++#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) ++#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) ++#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) ++#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) ++#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000) ++ ++#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000) ++#define RGX_HEAP_SIZE_2GiB IMG_UINT64_C(0x0080000000) ++#define RGX_HEAP_SIZE_4GiB IMG_UINT64_C(0x0100000000) ++#define RGX_HEAP_SIZE_16GiB IMG_UINT64_C(0x0400000000) ++#define RGX_HEAP_SIZE_32GiB IMG_UINT64_C(0x0800000000) ++#define RGX_HEAP_SIZE_64GiB IMG_UINT64_C(0x1000000000) ++#define RGX_HEAP_SIZE_128GiB IMG_UINT64_C(0x2000000000) ++#define RGX_HEAP_SIZE_256GiB IMG_UINT64_C(0x4000000000) ++#define RGX_HEAP_SIZE_512GiB IMG_UINT64_C(0x8000000000) ++ ++/* ++ RGX Device Virtual Address Space Definitions ++ ++ NOTES: ++ Base addresses have to be a multiple of 4MiB ++ ++ This file defines the RGX virtual address heaps that are used in ++ application memory contexts. It also shows where the Firmware memory heap ++ fits into this, but the firmware heap is only ever created in the ++ Services KM/server component. ++ ++ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, ++ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* ++ respectively. Therefore if clients use multiple configs they must still ++ be consistent with their definitions for these heaps. ++ ++ Shared virtual memory (GENERAL_SVM) support requires half of the address ++ space (512 GiB) be reserved for SVM allocations to mirror application CPU ++ addresses. ++ ++ The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the ++ general (4KiB) heap and the general non-4K heap. The first 128 GiB is used ++ for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the ++ GENERAL_NON4K_HEAP. This heap has a default page-size of 16K. ++ AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it ++ to these values: 4K,64K,256K,1M,2M. ++ ++ Heaps must not start at 0x0000000000, as this is reserved for internal ++ use within device memory layer. ++ Range comments, those starting in column 0 below are a section heading of ++ sorts and are above the heaps in that range. Often this is the reserved ++ size of the heap within the range. ++*/ ++ ++ ++/* 0x00_0000_0000 ************************************************************/ ++ ++/* 0x00_0000_0000 - 0x00_0040_0000 **/ ++ /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/ ++ ++/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ ++ /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/ ++ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) ++ #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB) ++ ++ ++/* 0x80_0000_0000 ************************************************************/ ++ ++/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/ ++ /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/ ++ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) ++ #define RGX_GENERAL_HEAP_SIZE RGX_HEAP_SIZE_128GiB ++ ++/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/ ++ /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/ ++ ++/* 0xB0_0000_0000 - 0xB1_FFFF_FFFF **/ ++ /* 704 GiB to 720 GiB, size of 16 GiB : RESERVED ROGUE **/ ++ ++/* B4_0000_0000 - 0xB7_FFFF_FFFF **/ ++ /* 720 GiB to 736 GiB, size of 16 GiB : FREE **/ ++ ++/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/ ++ /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/ ++ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xB800000000) ++ #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_HEAP_SIZE_32GiB ++ ++ ++/* 0xC0_0000_0000 ************************************************************/ ++ ++/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/ ++ /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/ ++ ++/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/ ++ /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/ ++ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000) ++ #define RGX_PDSCODEDATA_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xDB_0000_0000 - 0xDC_FFFF_FFFF **/ ++ /* 876 GiB to 884 GiB, size of 8 GiB : RESERVED ROGUE **/ ++ ++/* 0xDD_0000_0000 - 0xDF_FFFF_FFFF **/ ++ /* 884 GiB to 896 GiB, size of 12 GiB : FREE **/ ++ ++/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF **/ ++ /* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP **/ ++ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000) ++ #define RGX_USCCODE_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/ ++ /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xE1_C000_000 - 0xE1_FFFF_FFFF **/ ++ /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/ ++ ++ /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in ++ application memory contexts, see: ++ RGX_FIRMWARE_RAW_HEAP_BASE ++ RGX_FIRMWARE_RAW_HEAP_SIZE ++ See header for other sub-heaps details ++ */ ++ ++/* 0xE2_0000_0000 - 0xE2_FFFF_FFFF **/ ++ /* 904 GiB to 908 GiB, size of 4GiB : RESERVED ROGUE **/ ++ ++/* 0xE3_0000_0000 - 0xE3_FFFF_FFFF **/ ++ /* 908 GiB to 912 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/ ++ /* 912 GiB to 928 GiB, size 16 GiB : RESERVED_ROGUE **/ ++ ++/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/ ++ /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/ ++ /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/ ++ #define RGX_VK_CAPT_REPLAY_HEAP_BASE IMG_UINT64_C(0xE900000000) ++ #define RGX_VK_CAPT_REPLAY_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ ++ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ ++ /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ ++ /* CDM Signals heap (31 signals less one reserved for Services). ++ * Size 960B rounded up to minimum heap size */ ++ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) ++ #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE ++ ++/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ ++ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/ ++ /* 940 GiB to 944 GiB, size 4 GiB : COMPONENT_CTRL_HEAP **/ ++ #define RGX_COMPONENT_CTRL_HEAP_BASE IMG_UINT64_C(0xEB00000000) ++ #define RGX_COMPONENT_CTRL_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/ ++ /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/ ++ #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000) ++ #define RGX_FBCDC_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/ ++ /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/ ++ #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000) ++ #define RGX_FBCDC_LARGE_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xEC_8000_0000 - 0xEC_FFFF_FFFF **/ ++ /* 946 GiB to 948 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xED_0000_0000 - 0xED_00FF_FFFF */ ++ /* 948 GiB to 949 GiB, size 16 MiB : PDS_INDIRECT_STATE_HEAP */ ++ #define RGX_PDS_INDIRECT_STATE_HEAP_BASE IMG_UINT64_C(0xED00000000) ++ #define RGX_PDS_INDIRECT_STATE_HEAP_SIZE RGX_HEAP_SIZE_16MiB ++ ++/* 0xED_4000_0000 - 0xED_FFFF_FFFF **/ ++ /* 949 GiB to 952 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/ ++ /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/ ++ #define RGX_CMP_MISSION_RMW_HEAP_BASE IMG_UINT64_C(0xEE00000000) ++ #define RGX_CMP_MISSION_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/ ++ /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/ ++ /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/ ++ #define RGX_CMP_SAFETY_RMW_HEAP_BASE IMG_UINT64_C(0xEF00000000) ++ #define RGX_CMP_SAFETY_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/ ++ /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/ ++ /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */ ++ #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000) ++ #define RGX_TEXTURE_STATE_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/ ++ /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/ ++ /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/ ++ #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF200000000) ++ #define RGX_VISIBILITY_TEST_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/ ++ /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xF3_0000_0000 - 0xF7_FFFF_FFFF **/ ++ /* 972 GiB to 992 GiB, size of 20 GiB : FREE **/ ++ ++/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/ ++ /* 992 GiB to 1000 GiB, size 8 GiB : RESERVED ROGUE **/ ++ ++/* 0xFA_0000_0000 - 0xFF_FFFF_FFFF **/ ++ /* 1000 GiB to 1024 GiB, size of 24 GiB : FREE **/ ++ ++ ++/* 0xFF_FFFF_FFFF ************************************************************/ ++ ++/* End of RGX Device Virtual Address Space definitions */ ++ ++#endif /* RGXHEAPCONFIG_H */ ++ ++/****************************************************************************** ++ End of file (rgxheapconfig.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/include/volcanic/system/rgx_tc/tc_clocks.h b/drivers/gpu/drm/img-rogue/include/volcanic/system/rgx_tc/tc_clocks.h +new file mode 100644 +index 000000000000..9463830fa1c4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/include/volcanic/system/rgx_tc/tc_clocks.h +@@ -0,0 +1,101 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(TC_CLOCKS_H) ++#define TC_CLOCKS_H ++ ++/* ++ * The core clock speed is passed through a multiplier depending on the TC ++ * version. ++ * ++ * On TC_ES1: Multiplier = x3, final speed = 270MHz ++ * On TC_ES2: Multiplier = x6, final speed = 540MHz ++ * On TCF5: Multiplier = 1x final speed = 45MHz ++ * ++ * ++ * The base (unmultiplied speed) can be adjusted using a module parameter ++ * called "sys_core_clk_speed", a number in Hz. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_core_clk_speed=60000000" /etc/init.d/rc.pvr start ++ * ++ * would result in a core speed of 60MHz xMultiplier. ++ * ++ * ++ * The memory clock is unmultiplied and can be adjusted using a module ++ * parameter called "sys_mem_clk_speed", this should be the number in Hz for ++ * the memory clock speed. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=100000000" /etc/init.d/rc.pvr start ++ * ++ * would attempt to start the driver with the memory clock speed set to 100MHz. ++ * ++ * ++ * Same applies to the system interface clock speed, "sys_sysif_clk_speed". ++ * Needed for TCF5 but not for TC_ES2/ES1. ++ * As an example: ++ * ++ * PVR_SRVKM_PARAMS="sys_sysif_clk_speed=45000000" /etc/init.d/rc.pvr start ++ * ++ * would attempt to start the driver with the system clock speed set to 45MHz. ++ * ++ * ++ * All parameters can be specified at once, e.g., ++ * PVR_SRVKM_PARAMS="sys_mem_clk_speed=MEMORY_SPEED sys_core_clk_speed=CORE_SPEED sys_sysif_clk_speed=SYSIF_SPEED" /etc/init.d/rc.pvr start ++ */ ++ ++#define RGX_TC_SYS_CLOCK_SPEED (45000000) /*< Unused */ ++ ++#if defined(TC_ODIN_27_5_254_2) ++ #define RGX_TC_CORE_CLOCK_SPEED (94000000) ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) ++ #define RGX_TC_CLOCK_MULTIPLEX (16) ++#else ++ /* FPGA tcfvuquad with Odin */ ++ #define RGX_TC_CORE_CLOCK_SPEED (50000000) /* 3.125MHz */ ++ #define RGX_TC_MEM_CLOCK_SPEED (40000000) /* 3.75MHz */ ++ #define RGX_TC_CLOCK_MULTIPLEX (1) ++#endif ++ ++#endif /* if !defined(TC_CLOCKS_H) */ +diff --git a/drivers/gpu/drm/img-rogue/info_page.h b/drivers/gpu/drm/img-rogue/info_page.h +new file mode 100644 +index 000000000000..5816125d742f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/info_page.h +@@ -0,0 +1,99 @@ ++/*************************************************************************/ /*! ++@File ++@Title Kernel/User mode general purpose shared memory. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description General purpose memory shared between kernel driver and user ++ mode. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef INFO_PAGE_KM_H ++#define INFO_PAGE_KM_H ++ ++#include "pvrsrv_error.h" ++ ++#include "pmr.h" ++#include "pvrsrv.h" ++#include "info_page_defs.h" ++ ++/** ++ * @Function InfoPageCreate ++ * @Description Allocates resources for global information page. ++ * @Input psData pointer to PVRSRV data ++ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. ++ */ ++PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData); ++ ++/** ++ * @Function InfoPageDestroy ++ * @Description Frees all of the resource of global information page. ++ * @Input psData pointer to PVRSRV data ++ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. ++ */ ++void InfoPageDestroy(PVRSRV_DATA *psData); ++ ++/** ++ * @Function PVRSRVAcquireInfoPageKM() ++ * @Description This interface is used for obtaining the global information page ++ * which acts as a general purpose shared memory between KM and UM. ++ * The use of this information page outside of services is _not_ ++ * recommended. ++ * @Output ppsPMR handle to exported PMR ++ * @Return ++ */ ++PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR); ++ ++/** ++ * @Function PVRSRVReleaseInfoPageKM() ++ * @Description This function matches PVRSRVAcquireInfoPageKM(). ++ * @Input psPMR handle to exported PMR ++ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error. ++ */ ++PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR); ++ ++/** ++ * @Function GetInfoPageDebugFlagsKM() ++ * @Description Return info page debug flags ++ * @Return info page debug flags ++ */ ++static INLINE IMG_UINT32 GetInfoPageDebugFlagsKM(void) ++{ ++ return (PVRSRVGetPVRSRVData())->pui32InfoPage[DEBUG_FEATURE_FLAGS]; ++} ++ ++#endif /* INFO_PAGE_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/info_page_client.h b/drivers/gpu/drm/img-rogue/info_page_client.h +new file mode 100644 +index 000000000000..9df2461b55fb +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/info_page_client.h +@@ -0,0 +1,89 @@ ++/*************************************************************************/ /*! ++@File ++@Title Kernel/User mode general purpose shared memory. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description General purpose shared memory (i.e. information page) mapped by ++ kernel space driver and user space clients. All info page ++ entries are sizeof(IMG_UINT32) on both 32/64-bit environments. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef INFO_PAGE_CLIENT_H ++#define INFO_PAGE_CLIENT_H ++ ++#include "device_connection.h" ++#include "info_page_defs.h" ++#if defined(__KERNEL__) ++#include "pvrsrv.h" ++#endif ++ ++/*************************************************************************/ /*! ++@Function GetInfoPage ++ ++@Description Return Info Page address ++ ++@Input hDevConnection - Services device connection ++ ++@Return Info Page address ++*/ ++/*****************************************************************************/ ++static INLINE IMG_PUINT32 GetInfoPage(SHARED_DEV_CONNECTION hDevConnection) ++{ ++#if defined(__KERNEL__) ++ return (PVRSRVGetPVRSRVData())->pui32InfoPage; ++#else ++ return hDevConnection->pui32InfoPage; ++#endif ++} ++ ++/*************************************************************************/ /*! ++@Function GetInfoPageDebugFlags ++ ++@Description Return Info Page debug flags ++ ++@Input hDevConnection - Services device connection ++ ++@Return Info Page debug flags ++*/ ++/*****************************************************************************/ ++static INLINE IMG_UINT32 GetInfoPageDebugFlags(SHARED_DEV_CONNECTION hDevConnection) ++{ ++ return GetInfoPage(hDevConnection)[DEBUG_FEATURE_FLAGS]; ++} ++ ++#endif /* INFO_PAGE_CLIENT_H */ +diff --git a/drivers/gpu/drm/img-rogue/info_page_defs.h b/drivers/gpu/drm/img-rogue/info_page_defs.h +new file mode 100644 +index 000000000000..d3bc1538a0c7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/info_page_defs.h +@@ -0,0 +1,91 @@ ++/*************************************************************************/ /*! ++@File ++@Title Kernel/User mode general purpose shared memory. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description General purpose shared memory (i.e. information page) mapped by ++ kernel space driver and user space clients. All information page ++ entries are sizeof(IMG_UINT32) on both 32/64-bit environments. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef INFO_PAGE_DEFS_H ++#define INFO_PAGE_DEFS_H ++ ++ ++/* CacheOp information page entries */ ++#define CACHEOP_INFO_IDX_START 0x00 ++#define CACHEOP_INFO_UMKMTHRESHLD (CACHEOP_INFO_IDX_START + 1) /*!< UM=>KM routing threshold in bytes */ ++#define CACHEOP_INFO_KMDFTHRESHLD (CACHEOP_INFO_IDX_START + 2) /*!< KM/DF threshold in bytes */ ++#define CACHEOP_INFO_LINESIZE (CACHEOP_INFO_IDX_START + 3) /*!< CPU data cache line size */ ++#define CACHEOP_INFO_PGSIZE (CACHEOP_INFO_IDX_START + 4) /*!< CPU MMU page size */ ++#define CACHEOP_INFO_IDX_END (CACHEOP_INFO_IDX_START + 5) ++ ++/* HWPerf information page entries */ ++#define HWPERF_INFO_IDX_START (CACHEOP_INFO_IDX_END) ++#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0) ++#define HWPERF_FILTER_EGL_IDX (HWPERF_INFO_IDX_START + 1) ++#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2) ++#define HWPERF_FILTER_OPENCL_IDX (HWPERF_INFO_IDX_START + 3) ++#define HWPERF_FILTER_VULKAN_IDX (HWPERF_INFO_IDX_START + 4) ++#define HWPERF_FILTER_OPENGL_IDX (HWPERF_INFO_IDX_START + 5) ++#define HWPERF_INFO_IDX_END (HWPERF_INFO_IDX_START + 6) ++ ++/* timeout values */ ++#define TIMEOUT_INFO_IDX_START (HWPERF_INFO_IDX_END) ++#define TIMEOUT_INFO_VALUE_RETRIES (TIMEOUT_INFO_IDX_START + 0) ++#define TIMEOUT_INFO_VALUE_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 1) ++#define TIMEOUT_INFO_CONDITION_RETRIES (TIMEOUT_INFO_IDX_START + 2) ++#define TIMEOUT_INFO_CONDITION_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 3) ++#define TIMEOUT_INFO_TASK_QUEUE_RETRIES (TIMEOUT_INFO_IDX_START + 4) ++#define TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS (TIMEOUT_INFO_IDX_START + 5) ++#define TIMEOUT_INFO_IDX_END (TIMEOUT_INFO_IDX_START + 6) ++ ++/* Bridge Info */ ++#define BRIDGE_INFO_IDX_START (TIMEOUT_INFO_IDX_END) ++#define BRIDGE_INFO_RGX_BRIDGES (BRIDGE_INFO_IDX_START + 0) ++#define BRIDGE_INFO_PVR_BRIDGES (BRIDGE_INFO_IDX_START + 1) ++#define BRIDGE_INFO_IDX_END (BRIDGE_INFO_IDX_START + 2) ++ ++/* Debug features */ ++#define DEBUG_FEATURE_FLAGS (BRIDGE_INFO_IDX_END) ++#define DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED 0x1 ++#define DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED 0x2 ++#define DEBUG_FEATURE_FLAGS_IDX_END (DEBUG_FEATURE_FLAGS + 1) ++ ++ ++#endif /* INFO_PAGE_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/info_page_km.c b/drivers/gpu/drm/img-rogue/info_page_km.c +new file mode 100644 +index 000000000000..9a0067102002 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/info_page_km.c +@@ -0,0 +1,138 @@ ++/*************************************************************************/ /*! ++@File info_page_km.c ++@Title Kernel/User space shared memory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements general purpose shared memory between kernel driver ++ and user mode. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "info_page_defs.h" ++#include "info_page.h" ++#include "pvrsrv.h" ++#include "devicemem.h" ++#include "pmr.h" ++ ++PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData) ++{ ++ const PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psData != NULL); ++ ++ /* Allocate single page of memory for driver information page */ ++ eError = DevmemAllocateExportable(psData->psHostMemDeviceNode, ++ OSGetPageSize(), ++ OSGetPageSize(), ++ OSGetPageShift(), ++ uiMemFlags, ++ "PVRSRVInfoPage", ++ &psData->psInfoPageMemDesc); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); ++ ++ eError = DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc, ++ (void **) &psData->pui32InfoPage); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); ++ ++ /* Look-up the memory descriptor PMR handle */ ++ eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc, ++ (void **) &psData->psInfoPagePMR); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0); ++ ++ eError = OSLockCreate(&psData->hInfoPageLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); ++ ++ return PVRSRV_OK; ++ ++e0: ++ InfoPageDestroy(psData); ++ return eError; ++} ++ ++void InfoPageDestroy(PVRSRV_DATA *psData) ++{ ++ if (psData->psInfoPageMemDesc) ++ { ++ if (psData->pui32InfoPage != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc); ++ psData->pui32InfoPage = NULL; ++ } ++ ++ DevmemFree(psData->psInfoPageMemDesc); ++ psData->psInfoPageMemDesc = NULL; ++ } ++ ++ if (psData->hInfoPageLock) ++ { ++ OSLockDestroy(psData->hInfoPageLock); ++ psData->hInfoPageLock = NULL; ++ } ++} ++ ++PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR) ++{ ++ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); ++ ++ PVR_LOG_RETURN_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC" ++ " handle", PVRSRV_ERROR_INVALID_PARAMS); ++ PVR_LOG_RETURN_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ /* Copy the PMR import handle back */ ++ *ppsPMR = psData->psInfoPagePMR; ++ ++ /* Mark the PMR such that no layout changes can happen ++ * This is a fixed layout created during early stages of ++ * driver loading and shouldn't change later */ ++ PMR_SetLayoutFixed(psData->psInfoPagePMR, IMG_TRUE); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR) ++{ ++ /* Nothing to do here as PMR is singleton */ ++ PVR_UNREFERENCED_PARAMETER(ppsPMR); ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/interrupt_support.c b/drivers/gpu/drm/img-rogue/interrupt_support.c +new file mode 100644 +index 000000000000..c67d45352050 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/interrupt_support.c +@@ -0,0 +1,151 @@ ++/*************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++#include "pvr_debug.h" ++#include "allocmem.h" ++#include "interrupt_support.h" ++ ++typedef struct LISR_DATA_TAG ++{ ++ IMG_UINT32 ui32IRQ; ++ PFN_SYS_LISR pfnLISR; ++ void *pvData; ++} LISR_DATA; ++ ++static irqreturn_t SystemISRWrapper(int irq, void *dev_id) ++{ ++ LISR_DATA *psLISRData = (LISR_DATA *)dev_id; ++ ++ PVR_UNREFERENCED_PARAMETER(irq); ++ ++ if (psLISRData) ++ { ++ if (psLISRData->pfnLISR(psLISRData->pvData)) ++ { ++ return IRQ_HANDLED; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Missing interrupt data", __func__)); ++ } ++ ++ return IRQ_NONE; ++} ++ ++PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszDevName, ++ PFN_SYS_LISR pfnLISR, ++ void *pvData, ++ IMG_UINT32 ui32Flags) ++{ ++ LISR_DATA *psLISRData; ++ unsigned long ulIRQFlags = 0; ++ ++ if (pfnLISR == NULL || pvData == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (ui32Flags & ~SYS_IRQ_FLAG_MASK) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ switch (ui32Flags & SYS_IRQ_FLAG_TRIGGER_MASK) ++ { ++ case SYS_IRQ_FLAG_TRIGGER_DEFAULT: ++ break; ++ case SYS_IRQ_FLAG_TRIGGER_LOW: ++ ulIRQFlags |= IRQF_TRIGGER_LOW; ++ break; ++ case SYS_IRQ_FLAG_TRIGGER_HIGH: ++ ulIRQFlags |= IRQF_TRIGGER_HIGH; ++ break; ++ default: ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (ui32Flags & SYS_IRQ_FLAG_SHARED) ++ { ++ ulIRQFlags |= IRQF_SHARED; ++ } ++ ++ psLISRData = OSAllocMem(sizeof(*psLISRData)); ++ if (psLISRData == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psLISRData->ui32IRQ = ui32IRQ; ++ psLISRData->pfnLISR = pfnLISR; ++ psLISRData->pvData = pvData; ++ ++ if (request_irq(ui32IRQ, SystemISRWrapper, ulIRQFlags, pszDevName, psLISRData)) ++ { ++ OSFreeMem(psLISRData); ++ ++ return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER; ++ } ++ ++ *phLISR = (IMG_HANDLE)psLISRData; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISR) ++{ ++ LISR_DATA *psLISRData = (LISR_DATA *)hLISR; ++ ++ if (psLISRData == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ free_irq(psLISRData->ui32IRQ, psLISRData); ++ ++ OSFreeMem(psLISRData); ++ ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/interrupt_support.h b/drivers/gpu/drm/img-rogue/interrupt_support.h +new file mode 100644 +index 000000000000..b87772d223cb +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/interrupt_support.h +@@ -0,0 +1,103 @@ ++/*************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(INTERRUPT_SUPPORT_H) ++#define INTERRUPT_SUPPORT_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_device.h" ++ ++/*! Default trigger type for the interrupt line. */ ++#define SYS_IRQ_FLAG_TRIGGER_DEFAULT (0x0 << 0) ++/*! Interrupt triggered when interrupt line is low. */ ++#define SYS_IRQ_FLAG_TRIGGER_LOW (0x1 << 0) ++/*! Interrupt triggered when interrupt line is high. */ ++#define SYS_IRQ_FLAG_TRIGGER_HIGH (0x2 << 0) ++/*! Interrupt trigger mask. */ ++#define SYS_IRQ_FLAG_TRIGGER_MASK (SYS_IRQ_FLAG_TRIGGER_DEFAULT | \ ++ SYS_IRQ_FLAG_TRIGGER_LOW | \ ++ SYS_IRQ_FLAG_TRIGGER_HIGH) ++/*! The irq is allowed to be shared among several devices. */ ++#define SYS_IRQ_FLAG_SHARED (0x1 << 8) ++ ++/*! Interrupt flags mask. */ ++#define SYS_IRQ_FLAG_MASK (SYS_IRQ_FLAG_TRIGGER_MASK | \ ++ SYS_IRQ_FLAG_SHARED) ++ ++/*************************************************************************/ /*! ++@Description Pointer to a system Low-level Interrupt Service Routine (LISR). ++@Input pvData Private data provided to the LISR. ++@Return IMG_TRUE if interrupt handled, IMG_FALSE otherwise. ++*/ /**************************************************************************/ ++typedef IMG_BOOL (*PFN_SYS_LISR)(void *pvData); ++ ++/*************************************************************************/ /*! ++@Function OSInstallSystemLISR ++@Description Installs a system low-level interrupt handler ++@Output phLISR On return, contains a handle to the ++ installed LISR ++@Input ui32IRQ The IRQ number for which the ++ interrupt handler should be installed ++@Input pszDevName Name of the device for which the handler ++ is being installed ++@Input pfnLISR A pointer to an interrupt handler ++ function ++@Input pvData A pointer to data that should be passed ++ to pfnLISR when it is called ++@Input ui32Flags Interrupt flags ++@Return PVRSRV_OK on success, a failure code otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSInstallSystemLISR(IMG_HANDLE *phLISR, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszDevName, ++ PFN_SYS_LISR pfnLISR, ++ void *pvData, ++ IMG_UINT32 ui32Flags); ++ ++/*************************************************************************/ /*! ++@Function OSUninstallSystemLISR ++@Description Uninstalls a system low-level interrupt handler ++@Input hLISRData The handle to the LISR to uninstall ++@Return PVRSRV_OK on success, a failure code otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSUninstallSystemLISR(IMG_HANDLE hLISRData); ++#endif /* !defined(INTERRUPT_SUPPORT_H) */ +diff --git a/drivers/gpu/drm/img-rogue/kernel_compatibility.h b/drivers/gpu/drm/img-rogue/kernel_compatibility.h +new file mode 100644 +index 000000000000..6a94c8c12b29 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/kernel_compatibility.h +@@ -0,0 +1,521 @@ ++/*************************************************************************/ /*! ++@Title Kernel versions compatibility macros ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Per-version macros to allow code to seamlessly use older kernel ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef __KERNEL_COMPATIBILITY_H__ ++#define __KERNEL_COMPATIBILITY_H__ ++ ++#include ++ ++/* ++ * Stop supporting an old kernel? Remove the top block. ++ * New incompatible kernel? Append a new block at the bottom. ++ * ++ * Please write you version test as `VERSION < X.Y`, and use the earliest ++ * possible version :) ++ */ ++ ++/* Linux 3.6 introduced seq_vprintf(). Earlier versions don't have this ++ * so we work around the limitation by vsnprintf() + seq_puts(). ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) ++#define seq_vprintf(seq_file, fmt, args) \ ++do { \ ++ char aszBuffer[512]; /* maximum message buffer size */ \ ++ vsnprintf(aszBuffer, sizeof(aszBuffer), fmt, args); \ ++ seq_puts(seq_file, aszBuffer); \ ++} while (0) ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) ++ ++/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */ ++#define VM_DONTDUMP VM_RESERVED ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */ ++ ++/* ++ * Note: this fix had to be written backwards because get_unused_fd_flags ++ * was already defined but not exported on kernels < 3.7 ++ * ++ * When removing support for kernels < 3.7, this block should be removed ++ * and all `get_unused_fd()` should be manually replaced with ++ * `get_unused_fd_flags(0)` ++ */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) ++ ++/* Linux 3.19 removed get_unused_fd() */ ++/* get_unused_fd_flags was introduced in 3.7 */ ++#define get_unused_fd() get_unused_fd_flags(0) ++ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ++ ++/* ++ * Headers shouldn't normally be included by this file but this is a special ++ * case as it's not obvious from the name that devfreq_add_device needs this ++ * include. ++ */ ++#include ++ ++#define devfreq_add_device(dev, profile, name, data) \ ++ ({ \ ++ struct devfreq *__devfreq; \ ++ if (name && !strcmp(name, "simple_ondemand")) \ ++ __devfreq = devfreq_add_device(dev, profile, \ ++ &devfreq_simple_ondemand, data); \ ++ else \ ++ __devfreq = ERR_PTR(-EINVAL); \ ++ __devfreq; \ ++ }) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) ++ ++#define DRIVER_RENDER 0 ++#define DRM_RENDER_ALLOW 0 ++ ++/* Linux 3.12 introduced a new shrinker API */ ++#define SHRINK_STOP (~0UL) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) ++ ++#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev) ++#define dev_pm_opp_get_freq(opp) opp_get_freq(opp) ++#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp) ++#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt) ++#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq) ++ ++#if defined(CONFIG_ARM) ++/* Linux 3.13 renamed ioremap_cached to ioremap_cache */ ++#define ioremap_cache(cookie, size) ioremap_cached(cookie, size) ++#endif /* defined(CONFIG_ARM) */ ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) ++ ++/* Linux 3.14 introduced a new set of sized min and max defines */ ++#ifndef U32_MAX ++#define U32_MAX ((u32)UINT_MAX) ++#endif ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) ++ ++/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to ++ * `struct page **pages` */ ++#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) ++ ++/* ++ * Linux 4.7 removed this function but its replacement was available since 3.19. ++ */ ++#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e) ++ ++/* seq_has_overflowed() was introduced in 3.19 but the structure elements ++ * have been available since 2.x ++ */ ++#include ++static inline bool seq_has_overflowed(struct seq_file *m) ++{ ++ return m->count == m->size; ++} ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) ++ ++#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \ ++ ({ \ ++ struct dentry *de; \ ++ de = debugfs_create_file(name, mode, parent, data, fops); \ ++ if (de) \ ++ de->d_inode->i_size = file_size; \ ++ de; \ ++ }) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) ++#define drm_fb_helper_unregister_fbi(fb_helper) \ ++ ({ \ ++ if ((fb_helper) && (fb_helper)->fbdev) \ ++ unregister_framebuffer((fb_helper)->fbdev); \ ++ }) ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) ++ ++/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */ ++#define __GFP_RECLAIM __GFP_WAIT ++ ++#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) ++#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev) ++#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev) ++#else ++#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base) ++#endif ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \ ++ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) ++ ++/* Linux 4.5 added a new printf-style parameter for debug messages */ ++ ++#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \ ++ drm_encoder_init(dev, encoder, funcs, encoder_type) ++ ++#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ ++ ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); }) ++ ++#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \ ++ drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs) ++ ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) ++ ++#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \ ++ ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); }) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) ++ ++/* ++ * Linux 4.6 removed the first two parameters, the "struct task_struct" type ++ * pointer "current" is defined in asm/current.h, which makes it pointless ++ * to pass it on every function call. ++*/ ++#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ ++ get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) ++ ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) ++ ++/* Linux 4.9 replaced the write/force parameters with "gup_flags" */ ++#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \ ++ get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ ++ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) ++ ++/* ++ * Linux 4.6 removed the start and end arguments as it now always maps ++ * the entire DMA-BUF. ++ * Additionally, dma_buf_end_cpu_access() now returns an int error. ++ */ ++#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION) ++#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; }) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \ ++ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) ++ ++/* Linux 4.7 removed the first arguments as it's never been used */ ++#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle) ++ ++/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */ ++#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) ++ ++/* Linux 4.9 changed the second argument to a drm_file pointer */ ++#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp) ++#define drm_vma_node_allow(node, file_priv) drm_vma_node_allow(node, (file_priv)->filp) ++#define drm_vma_node_revoke(node, file_priv) drm_vma_node_revoke(node, (file_priv)->filp) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++#define refcount_read(r) atomic_read(r) ++#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT) ++ ++#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd) ++ ++/* ++ * In Linux Kernels >= 4.12 for x86 another level of page tables has been ++ * added. The added level (p4d) sits between pgd and pud, so when it ++ * doesn`t exist, pud_offset function takes pgd as a parameter instead ++ * of p4d. ++ */ ++#define p4d_t pgd_t ++#define p4d_offset(pgd, address) (pgd) ++#define p4d_none(p4d) (0) ++#define p4d_bad(p4d) (0) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ ++#define drm_mode_object_get(obj) drm_mode_object_reference(obj) ++#define drm_mode_object_put(obj) drm_mode_object_unreference(obj) ++#define drm_connector_get(obj) drm_connector_reference(obj) ++#define drm_connector_put(obj) drm_connector_unreference(obj) ++#define drm_framebuffer_get(obj) drm_framebuffer_reference(obj) ++#define drm_framebuffer_put(obj) drm_framebuffer_unreference(obj) ++#define drm_gem_object_get(obj) drm_gem_object_reference(obj) ++#define drm_gem_object_put_locked(obj) drm_gem_object_unreference(obj) ++#define __drm_gem_object_put(obj) __drm_gem_object_unreference(obj) ++#define drm_property_blob_get(obj) drm_property_reference_blob(obj) ++#define drm_property_blob_put(obj) drm_property_unreference_blob(obj) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) ++ ++#define drm_dev_put(dev) drm_dev_unref(dev) ++ ++#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type) ++#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) ++ ++#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ ++ min_scale, max_scale, \ ++ can_position, can_update_disabled) \ ++ ({ \ ++ const struct drm_rect __clip = { \ ++ .x2 = crtc_state->crtc->mode.hdisplay, \ ++ .y2 = crtc_state->crtc->mode.vdisplay, \ ++ }; \ ++ int __ret = drm_plane_helper_check_state(plane_state, \ ++ &__clip, \ ++ min_scale, max_scale, \ ++ can_position, \ ++ can_update_disabled); \ ++ __ret; \ ++ }) ++ ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) ++ ++#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \ ++ min_scale, max_scale, \ ++ can_position, can_update_disabled) \ ++ ({ \ ++ const struct drm_rect __clip = { \ ++ .x2 = crtc_state->crtc->mode.hdisplay, \ ++ .y2 = crtc_state->crtc->mode.vdisplay, \ ++ }; \ ++ int __ret = drm_atomic_helper_check_plane_state(plane_state, \ ++ crtc_state, \ ++ &__clip, \ ++ min_scale, max_scale, \ ++ can_position, \ ++ can_update_disabled); \ ++ __ret; \ ++ }) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) ++ ++#define drm_connector_attach_encoder(connector, encoder) \ ++ drm_mode_connector_attach_encoder(connector, encoder) ++ ++#define drm_connector_update_edid_property(connector, edid) \ ++ drm_mode_connector_update_edid_property(connector, edid) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) */ ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) ++ ++/* ++ * Work around architectures, e.g. MIPS, that define copy_from_user and ++ * copy_to_user as macros that call access_ok, as this gets redefined below. ++ * As of kernel 4.12, these functions are no longer defined per-architecture ++ * so this work around isn't needed. ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++#if defined(copy_from_user) ++ /* ++ * NOTE: This function should not be called directly as it exists simply to ++ * work around copy_from_user being defined as a macro that calls access_ok. ++ */ ++static inline int ++__pvr_copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ return copy_from_user(to, from, n); ++} ++ ++#undef copy_from_user ++#define copy_from_user(to, from, n) __copy_from_user(to, from, n) ++#endif ++ ++#if defined(copy_to_user) ++ /* ++ * NOTE: This function should not be called directly as it exists simply to ++ * work around copy_to_user being defined as a macro that calls access_ok. ++ */ ++static inline int ++__pvr_copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ return copy_to_user(to, from, n); ++} ++ ++#undef copy_to_user ++#define copy_to_user(to, from, n) __copy_to_user(to, from, n) ++#endif ++#endif ++ ++/* ++ * Linux 5.0 dropped the type argument. ++ * ++ * This is unused in at least Linux 3.4 and above for all architectures other ++ * than 'um' (User Mode Linux), which stopped using it in 4.2. ++ */ ++#if defined(access_ok) ++ /* ++ * NOTE: This function should not be called directly as it exists simply to ++ * work around access_ok being defined as a macro. ++ */ ++static inline int ++__pvr_access_ok_compat(int type, const void __user * addr, unsigned long size) ++{ ++ return access_ok(type, addr, size); ++} ++ ++#undef access_ok ++#define access_ok(addr, size) __pvr_access_ok_compat(0, addr, size) ++#else ++#define access_ok(addr, size) access_ok(0, addr, size) ++#endif ++ ++#endif ++ ++/* ++ * Before v5.8, the "struct mm" has a semaphore named "mmap_sem" which is ++ * renamed to "mmap_lock" in v5.8. Moreover, new APIs are provided to ++ * access this lock starting from v5.8. ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) ++ ++#define mmap_write_lock(mm) down_write(&mm->mmap_sem) ++#define mmap_write_unlock(mm) up_write(&mm->mmap_sem) ++ ++#define mmap_read_lock(mm) down_read(&mm->mmap_sem) ++#define mmap_read_unlock(mm) up_read(&mm->mmap_sem) ++ ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++#define drm_gem_object_put(obj) drm_gem_object_unreference_unlocked(obj) ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)) ++#define drm_gem_object_put(obj) drm_gem_object_put_unlocked(obj) ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) ++ ++#define drm_prime_pages_to_sg(dev, pages, nr_pages) \ ++ drm_prime_pages_to_sg(pages, nr_pages) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) ++ ++struct dma_buf_map { ++ void *vaddr; ++}; ++ ++#define dma_buf_vmap(dmabuf, map) \ ++ ({ \ ++ (map)->vaddr = dma_buf_vmap(dmabuf); \ ++ (map)->vaddr ? 0 : ((dmabuf) && (dmabuf)->ops->vmap) ? -ENOMEM : -EINVAL; \ ++ }) ++ ++#define dma_buf_vunmap(dmabuf, map) \ ++ ({ \ ++ dma_buf_vunmap(dmabuf, (map)->vaddr); \ ++ (map)->vaddr = NULL; \ ++ }) ++ ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) ++ ++#define drm_prime_sg_to_page_array(sgt, pages, npages) \ ++ drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, npages) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)) */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) ++ ++#define drm_gem_plane_helper_prepare_fb drm_gem_fb_prepare_fb ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)) */ ++ ++/* ++ * Linux 5.11 renames the privileged uaccess routines for arm64 and Android ++ * kernel v5.10 merges the change as well. These routines are only used for ++ * arm64 so CONFIG_ARM64 testing can be ignored. ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) || \ ++ ((LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)) && !defined(ANDROID)) ++#define uaccess_enable_privileged() uaccess_enable() ++#define uaccess_disable_privileged() uaccess_disable() ++#endif ++ ++#endif /* __KERNEL_COMPATIBILITY_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/kernel_config_compatibility.h b/drivers/gpu/drm/img-rogue/kernel_config_compatibility.h +new file mode 100644 +index 000000000000..63effd65ecf9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/kernel_config_compatibility.h +@@ -0,0 +1,54 @@ ++/*************************************************************************/ /*! ++@Title Kernel config compatibility define options ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This file is exclusively for Linux config kernel options. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef __KERNEL_CONFIG_COMPATIBILITY_H__ ++#define __KERNEL_CONFIG_COMPATIBILITY_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)) ++#ifdef SUPPORT_DRM_FBDEV_EMULATION ++#define CONFIG_DRM_FBDEV_EMULATION ++#endif ++#endif ++ ++#endif /* __KERNEL_CONFIG_COMPATIBILITY_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/kernel_nospec.h b/drivers/gpu/drm/img-rogue/kernel_nospec.h +new file mode 100644 +index 000000000000..e27a3ebc2ac6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/kernel_nospec.h +@@ -0,0 +1,71 @@ ++/*************************************************************************/ /*! ++@Title Macro to limit CPU speculative execution in kernel code ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Per-version macros to allow code to seamlessly use older kernel ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef __KERNEL_NOSPEC_H__ ++#define __KERNEL_NOSPEC_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) || \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) || \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) || \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \ ++ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118))) ++#include ++#include ++#include ++#else ++#define array_index_nospec(index, size) (index) ++#endif ++ ++/* ++ * For Ubuntu kernels, the features available for a given Linux version code ++ * may not match those in upstream kernels. This is the case for the ++ * availability of the array_index_nospec macro. ++ */ ++#if !defined(array_index_nospec) ++#define array_index_nospec(index, size) (index) ++#endif ++ ++#endif /* __KERNEL_NOSPEC_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/kernel_types.h b/drivers/gpu/drm/img-rogue/kernel_types.h +new file mode 100644 +index 000000000000..c3305102fc20 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/kernel_types.h +@@ -0,0 +1,137 @@ ++/*************************************************************************/ /*! ++@Title C99-compatible types and definitions for Linux kernel code ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++/* Limits of specified-width integer types */ ++ ++/* S8_MIN, etc were added in kernel version 3.14. The other versions are for ++ * earlier kernels. They can be removed once older kernels don't need to be ++ * supported. ++ */ ++#ifdef S8_MIN ++ #define INT8_MIN S8_MIN ++#else ++ #define INT8_MIN (-128) ++#endif ++ ++#ifdef S8_MAX ++ #define INT8_MAX S8_MAX ++#else ++ #define INT8_MAX 127 ++#endif ++ ++#ifdef U8_MAX ++ #define UINT8_MAX U8_MAX ++#else ++ #define UINT8_MAX 0xFF ++#endif ++ ++#ifdef S16_MIN ++ #define INT16_MIN S16_MIN ++#else ++ #define INT16_MIN (-32768) ++#endif ++ ++#ifdef S16_MAX ++ #define INT16_MAX S16_MAX ++#else ++ #define INT16_MAX 32767 ++#endif ++ ++#ifdef U16_MAX ++ #define UINT16_MAX U16_MAX ++#else ++ #define UINT16_MAX 0xFFFF ++#endif ++ ++#ifdef S32_MIN ++ #define INT32_MIN S32_MIN ++#else ++ #define INT32_MIN (-2147483647 - 1) ++#endif ++ ++#ifdef S32_MAX ++ #define INT32_MAX S32_MAX ++#else ++ #define INT32_MAX 2147483647 ++#endif ++ ++#ifdef U32_MAX ++ #define UINT32_MAX U32_MAX ++#else ++ #define UINT32_MAX 0xFFFFFFFF ++#endif ++ ++#ifdef S64_MIN ++ #define INT64_MIN S64_MIN ++#else ++ #define INT64_MIN (-9223372036854775807LL) ++#endif ++ ++#ifdef S64_MAX ++ #define INT64_MAX S64_MAX ++#else ++ #define INT64_MAX 9223372036854775807LL ++#endif ++ ++#ifdef U64_MAX ++ #define UINT64_MAX U64_MAX ++#else ++ #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL ++#endif ++ ++/* Macros for integer constants */ ++#define INT8_C S8_C ++#define UINT8_C U8_C ++#define INT16_C S16_C ++#define UINT16_C U16_C ++#define INT32_C S32_C ++#define UINT32_C U32_C ++#define INT64_C S64_C ++#define UINT64_C U64_C ++ ++/* Format conversion of integer types */ ++ ++#define PRIX64 "llX" ++#define PRIx64 "llx" ++#define PRIu64 "llu" ++#define PRId64 "lld" +diff --git a/drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h +new file mode 100644 +index 000000000000..0aa00beb115c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_defs_km.h +@@ -0,0 +1,377 @@ ++/*************************************************************************/ /*! ++@Title Hardware definition file rgx_bvnc_defs_km.h ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/****************************************************************************** ++ * Auto generated file by rgxbvnc_tablegen.py * ++ * This file should not be edited manually * ++ *****************************************************************************/ ++ ++#ifndef RGX_BVNC_DEFS_KM_H ++#define RGX_BVNC_DEFS_KM_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#if defined(RGX_BVNC_DEFS_UM_H) ++#error "This file should not be included in conjunction with rgx_bvnc_defs_um.h" ++#endif ++ ++#define BVNC_FIELD_WIDTH (16U) ++ ++#define PVR_ARCH_NAME "rogue" ++ ++ ++/****************************************************************************** ++ * Mask and bit-position macros for features without values ++ *****************************************************************************/ ++ ++#define RGX_FEATURE_AXI_ACELITE_POS (0U) ++#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) ++ ++#define RGX_FEATURE_CLUSTER_GROUPING_POS (1U) ++#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) ++ ++#define RGX_FEATURE_COMPUTE_POS (2U) ++#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) ++ ++#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (3U) ++#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) ++ ++#define RGX_FEATURE_COMPUTE_ONLY_POS (4U) ++#define RGX_FEATURE_COMPUTE_ONLY_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) ++ ++#define RGX_FEATURE_COMPUTE_OVERLAP_POS (5U) ++#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) ++ ++#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (6U) ++#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) ++ ++#define RGX_FEATURE_COREID_PER_OS_POS (7U) ++#define RGX_FEATURE_COREID_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) ++ ++#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (8U) ++#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) ++ ++#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (9U) ++#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) ++ ++#define RGX_FEATURE_FASTRENDER_DM_POS (10U) ++#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) ++ ++#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_POS (11U) ++#define RGX_FEATURE_GPU_MULTICORE_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) ++ ++#define RGX_FEATURE_GPU_VIRTUALISATION_POS (12U) ++#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) ++ ++#define RGX_FEATURE_GS_RTA_SUPPORT_POS (13U) ++#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) ++ ++#define RGX_FEATURE_IRQ_PER_OS_POS (14U) ++#define RGX_FEATURE_IRQ_PER_OS_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) ++ ++#define RGX_FEATURE_META_DMA_POS (15U) ++#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) ++ ++#define RGX_FEATURE_MIPS_POS (16U) ++#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) ++ ++#define RGX_FEATURE_PBE2_IN_XE_POS (17U) ++#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) ++ ++#define RGX_FEATURE_PBE_CHECKSUM_2D_POS (18U) ++#define RGX_FEATURE_PBE_CHECKSUM_2D_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) ++ ++#define RGX_FEATURE_PBVNC_COREID_REG_POS (19U) ++#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000080000)) ++ ++#define RGX_FEATURE_PDS_PER_DUST_POS (20U) ++#define RGX_FEATURE_PDS_PER_DUST_BIT_MASK (IMG_UINT64_C(0x0000000000100000)) ++ ++#define RGX_FEATURE_PDS_TEMPSIZE8_POS (21U) ++#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000200000)) ++ ++#define RGX_FEATURE_PERFBUS_POS (22U) ++#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000400000)) ++ ++#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (23U) ++#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000000000800000)) ++ ++#define RGX_FEATURE_PM_MMU_VFP_POS (24U) ++#define RGX_FEATURE_PM_MMU_VFP_BIT_MASK (IMG_UINT64_C(0x0000000001000000)) ++ ++#define RGX_FEATURE_RISCV_FW_PROCESSOR_POS (25U) ++#define RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK (IMG_UINT64_C(0x0000000002000000)) ++ ++#define RGX_FEATURE_ROGUEXE_POS (26U) ++#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000004000000)) ++ ++#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (27U) ++#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000008000000)) ++ ++#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (28U) ++#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000010000000)) ++ ++#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (29U) ++#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000020000000)) ++ ++#define RGX_FEATURE_SIGNAL_SNOOPING_POS (30U) ++#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000040000000)) ++ ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS (31U) ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000080000000)) ++ ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS (32U) ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK (IMG_UINT64_C(0x0000000100000000)) ++ ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS (33U) ++#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK (IMG_UINT64_C(0x0000000200000000)) ++ ++#define RGX_FEATURE_SINGLE_BIF_POS (34U) ++#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000400000000)) ++ ++#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS (35U) ++#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK (IMG_UINT64_C(0x0000000800000000)) ++ ++#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_POS (36U) ++#define RGX_FEATURE_SLC_SIZE_CONFIGURABLE_BIT_MASK (IMG_UINT64_C(0x0000001000000000)) ++ ++#define RGX_FEATURE_SLC_VIVT_POS (37U) ++#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000002000000000)) ++ ++#define RGX_FEATURE_SOC_TIMER_POS (38U) ++#define RGX_FEATURE_SOC_TIMER_BIT_MASK (IMG_UINT64_C(0x0000004000000000)) ++ ++#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (39U) ++#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000008000000000)) ++ ++#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (40U) ++#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000010000000000)) ++ ++#define RGX_FEATURE_TESSELLATION_POS (41U) ++#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000020000000000)) ++ ++#define RGX_FEATURE_TFBC_DELTA_CORRELATION_POS (42U) ++#define RGX_FEATURE_TFBC_DELTA_CORRELATION_BIT_MASK (IMG_UINT64_C(0x0000040000000000)) ++ ++#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_POS (43U) ++#define RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK (IMG_UINT64_C(0x0000080000000000)) ++ ++#define RGX_FEATURE_TFBC_NATIVE_YUV10_POS (44U) ++#define RGX_FEATURE_TFBC_NATIVE_YUV10_BIT_MASK (IMG_UINT64_C(0x0000100000000000)) ++ ++#define RGX_FEATURE_TILE_REGION_PROTECTION_POS (45U) ++#define RGX_FEATURE_TILE_REGION_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0000200000000000)) ++ ++#define RGX_FEATURE_TLA_POS (46U) ++#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000400000000000)) ++ ++#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (47U) ++#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000800000000000)) ++ ++#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (48U) ++#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0001000000000000)) ++ ++#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (49U) ++#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0002000000000000)) ++ ++#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (50U) ++#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0004000000000000)) ++ ++#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (51U) ++#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0008000000000000)) ++ ++#define RGX_FEATURE_WATCHDOG_TIMER_POS (52U) ++#define RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK (IMG_UINT64_C(0x0010000000000000)) ++ ++#define RGX_FEATURE_WORKGROUP_PROTECTION_POS (53U) ++#define RGX_FEATURE_WORKGROUP_PROTECTION_BIT_MASK (IMG_UINT64_C(0x0020000000000000)) ++ ++#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (54U) ++#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0040000000000000)) ++ ++#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (55U) ++#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0080000000000000)) ++ ++ ++/****************************************************************************** ++ * Defines for each feature with values used ++ * for handling the corresponding values ++ *****************************************************************************/ ++ ++#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_FBCDC_MAX_VALUE_IDX (4) ++#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (6) ++#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (4) ++#define RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_META_MAX_VALUE_IDX (4) ++#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (1) ++#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (1) ++#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (5) ++#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (9) ++#define RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4) ++#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (1) ++#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (1) ++#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4) ++#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (6) ++#define RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX (3) ++#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX (2) ++#define RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX (2) ++ ++/****************************************************************************** ++ * Features with values indexes ++ *****************************************************************************/ ++ ++typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ { ++ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX, ++ RGX_FEATURE_ECC_RAMS_IDX, ++ RGX_FEATURE_FBCDC_IDX, ++ RGX_FEATURE_FBCDC_ALGORITHM_IDX, ++ RGX_FEATURE_FBCDC_ARCHITECTURE_IDX, ++ RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_IDX, ++ RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_IDX, ++ RGX_FEATURE_LAYOUT_MARS_IDX, ++ RGX_FEATURE_META_IDX, ++ RGX_FEATURE_META_COREMEM_BANKS_IDX, ++ RGX_FEATURE_META_COREMEM_SIZE_IDX, ++ RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX, ++ RGX_FEATURE_NUM_CLUSTERS_IDX, ++ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX, ++ RGX_FEATURE_NUM_OSIDS_IDX, ++ RGX_FEATURE_NUM_RASTER_PIPES_IDX, ++ RGX_FEATURE_PHYS_BUS_WIDTH_IDX, ++ RGX_FEATURE_SCALABLE_TE_ARCH_IDX, ++ RGX_FEATURE_SCALABLE_VCE_IDX, ++ RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX, ++ RGX_FEATURE_SLC_BANKS_IDX, ++ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX, ++ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX, ++ RGX_FEATURE_TILE_SIZE_X_IDX, ++ RGX_FEATURE_TILE_SIZE_Y_IDX, ++ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX, ++ RGX_FEATURE_XE_ARCHITECTURE_IDX, ++ RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_IDX, ++ RGX_FEATURE_XPU_MAX_SLAVES_IDX, ++ RGX_FEATURE_XPU_REGISTER_BROADCAST_IDX, ++ RGX_FEATURE_WITH_VALUES_MAX_IDX, ++} RGX_FEATURE_WITH_VALUE_INDEX; ++ ++ ++/****************************************************************************** ++ * Mask and bit-position macros for ERNs and BRNs ++ *****************************************************************************/ ++ ++#define FIX_HW_BRN_38344_POS (0U) ++#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000001)) ++ ++#define HW_ERN_42290_POS (1U) ++#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000002)) ++ ++#define FIX_HW_BRN_42321_POS (2U) ++#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000004)) ++ ++#define HW_ERN_42606_POS (3U) ++#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000008)) ++ ++#define HW_ERN_46066_POS (4U) ++#define HW_ERN_46066_BIT_MASK (IMG_UINT64_C(0x0000000000000010)) ++ ++#define HW_ERN_47025_POS (5U) ++#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000000020)) ++ ++#define HW_ERN_50539_POS (6U) ++#define HW_ERN_50539_BIT_MASK (IMG_UINT64_C(0x0000000000000040)) ++ ++#define FIX_HW_BRN_50767_POS (7U) ++#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000000080)) ++ ++#define HW_ERN_57596_POS (8U) ++#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000000000100)) ++ ++#define FIX_HW_BRN_60084_POS (9U) ++#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000000000200)) ++ ++#define HW_ERN_61389_POS (10U) ++#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000000000400)) ++ ++#define FIX_HW_BRN_61450_POS (11U) ++#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000000000800)) ++ ++#define FIX_HW_BRN_63142_POS (12U) ++#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000000001000)) ++ ++#define FIX_HW_BRN_63553_POS (13U) ++#define FIX_HW_BRN_63553_BIT_MASK (IMG_UINT64_C(0x0000000000002000)) ++ ++#define FIX_HW_BRN_64502_POS (14U) ++#define FIX_HW_BRN_64502_BIT_MASK (IMG_UINT64_C(0x0000000000004000)) ++ ++#define FIX_HW_BRN_65101_POS (15U) ++#define FIX_HW_BRN_65101_BIT_MASK (IMG_UINT64_C(0x0000000000008000)) ++ ++#define FIX_HW_BRN_65273_POS (16U) ++#define FIX_HW_BRN_65273_BIT_MASK (IMG_UINT64_C(0x0000000000010000)) ++ ++#define HW_ERN_66622_POS (17U) ++#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000000020000)) ++ ++#define FIX_HW_BRN_68186_POS (18U) ++#define FIX_HW_BRN_68186_BIT_MASK (IMG_UINT64_C(0x0000000000040000)) ++ ++/* Macro used for padding the unavailable values for features with values */ ++#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFEU) ++ ++/* Macro used for marking a feature with value as disabled for a specific bvnc */ ++#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFFU) ++ ++#endif /* RGX_BVNC_DEFS_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h +new file mode 100644 +index 000000000000..4044507ca3e7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km/rgx_bvnc_table_km.h +@@ -0,0 +1,462 @@ ++/*************************************************************************/ /*! ++@Title Hardware definition file rgx_bvnc_table_km.h ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/****************************************************************************** ++ * Auto generated file by rgxbvnc_tablegen.py * ++ * This file should not be edited manually * ++ *****************************************************************************/ ++ ++#ifndef RGX_BVNC_TABLE_KM_H ++#define RGX_BVNC_TABLE_KM_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgxdefs_km.h" ++#include "rgx_bvnc_defs_km.h" ++ ++#ifndef RGXBVNC_C ++#error "This file should only be included from rgxbvnc.c" ++#endif ++ ++#if defined(RGX_BVNC_TABLE_UM_H) ++#error "This file should not be included in conjunction with rgx_bvnc_table_um.h" ++#endif ++ ++ ++/****************************************************************************** ++ * Arrays for each feature with values used ++ * for handling the corresponding values ++ *****************************************************************************/ ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_ECC_RAMS_values[RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_values[RGX_FEATURE_FBCDC_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, 4, 50, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 50, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 7, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values[RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_LAYOUT_MARS_values[RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 1, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_NUM_OSIDS_values[RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, 8, 16, 64, 128, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_X_values[RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_TILE_SIZE_Y_values[RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 16, 32, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_XE_ARCHITECTURE_values[RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values[RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 19, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_XPU_MAX_SLAVES_values[RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 3, }; ++ ++static const IMG_UINT16 aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values[RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, }; ++ ++ ++/****************************************************************************** ++ * Table contains pointers to each feature value array for features that have ++ * values. ++ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h ++ *****************************************************************************/ ++ ++static const IMG_UINT16 * const gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = { ++ aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values, ++ aui16_RGX_FEATURE_ECC_RAMS_values, ++ aui16_RGX_FEATURE_FBCDC_values, ++ aui16_RGX_FEATURE_FBCDC_ALGORITHM_values, ++ aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values, ++ aui16_RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_values, ++ aui16_RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_values, ++ aui16_RGX_FEATURE_LAYOUT_MARS_values, ++ aui16_RGX_FEATURE_META_values, ++ aui16_RGX_FEATURE_META_COREMEM_BANKS_values, ++ aui16_RGX_FEATURE_META_COREMEM_SIZE_values, ++ aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values, ++ aui16_RGX_FEATURE_NUM_CLUSTERS_values, ++ aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values, ++ aui16_RGX_FEATURE_NUM_OSIDS_values, ++ aui16_RGX_FEATURE_NUM_RASTER_PIPES_values, ++ aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values, ++ aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values, ++ aui16_RGX_FEATURE_SCALABLE_VCE_values, ++ aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values, ++ aui16_RGX_FEATURE_SLC_BANKS_values, ++ aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values, ++ aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values, ++ aui16_RGX_FEATURE_TILE_SIZE_X_values, ++ aui16_RGX_FEATURE_TILE_SIZE_Y_values, ++ aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values, ++ aui16_RGX_FEATURE_XE_ARCHITECTURE_values, ++ aui16_RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_values, ++ aui16_RGX_FEATURE_XPU_MAX_SLAVES_values, ++ aui16_RGX_FEATURE_XPU_REGISTER_BROADCAST_values, ++}; ++ ++ ++/****************************************************************************** ++ * Array containing the lengths of the arrays containing the values. ++ * Used for indexing the aui16__values defined upwards ++ *****************************************************************************/ ++ ++ ++static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = { ++ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX, ++ RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX, ++ RGX_FEATURE_FBCDC_MAX_VALUE_IDX, ++ RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX, ++ RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX, ++ RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_MAX_VALUE_IDX, ++ RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_MAX_VALUE_IDX, ++ RGX_FEATURE_LAYOUT_MARS_MAX_VALUE_IDX, ++ RGX_FEATURE_META_MAX_VALUE_IDX, ++ RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX, ++ RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX, ++ RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX, ++ RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX, ++ RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX, ++ RGX_FEATURE_NUM_OSIDS_MAX_VALUE_IDX, ++ RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX, ++ RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX, ++ RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX, ++ RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX, ++ RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX, ++ RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX, ++ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX, ++ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX, ++ RGX_FEATURE_TILE_SIZE_X_MAX_VALUE_IDX, ++ RGX_FEATURE_TILE_SIZE_Y_MAX_VALUE_IDX, ++ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX, ++ RGX_FEATURE_XE_ARCHITECTURE_MAX_VALUE_IDX, ++ RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_MAX_VALUE_IDX, ++ RGX_FEATURE_XPU_MAX_SLAVES_MAX_VALUE_IDX, ++ RGX_FEATURE_XPU_REGISTER_BROADCAST_MAX_VALUE_IDX, ++}; ++ ++ ++/****************************************************************************** ++ * Bit-positions for features with values ++ *****************************************************************************/ ++ ++static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = { ++ (0U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */ ++ (2U), /* RGX_FEATURE_ECC_RAMS_POS */ ++ (4U), /* RGX_FEATURE_FBCDC_POS */ ++ (7U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */ ++ (10U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */ ++ (13U), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_POS */ ++ (15U), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_POS */ ++ (17U), /* RGX_FEATURE_LAYOUT_MARS_POS */ ++ (19U), /* RGX_FEATURE_META_POS */ ++ (22U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */ ++ (23U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */ ++ (25U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */ ++ (26U), /* RGX_FEATURE_NUM_CLUSTERS_POS */ ++ (29U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */ ++ (33U), /* RGX_FEATURE_NUM_OSIDS_POS */ ++ (35U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */ ++ (37U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */ ++ (40U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */ ++ (41U), /* RGX_FEATURE_SCALABLE_VCE_POS */ ++ (42U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */ ++ (44U), /* RGX_FEATURE_SLC_BANKS_POS */ ++ (47U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */ ++ (49U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */ ++ (52U), /* RGX_FEATURE_TILE_SIZE_X_POS */ ++ (54U), /* RGX_FEATURE_TILE_SIZE_Y_POS */ ++ (56U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */ ++ (58U), /* RGX_FEATURE_XE_ARCHITECTURE_POS */ ++ (60U), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_POS */ ++ (62U), /* RGX_FEATURE_XPU_MAX_SLAVES_POS */ ++ (64U), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_POS */ ++}; ++ ++ ++/****************************************************************************** ++ * Bit-masks for features with values ++ *****************************************************************************/ ++ ++static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = { ++ (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */ ++ (IMG_UINT64_C(0x000000000000000C)), /* RGX_FEATURE_ECC_RAMS_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000000070)), /* RGX_FEATURE_FBCDC_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000000380)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000001C00)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000006000)), /* RGX_FEATURE_FBC_MAX_DEFAULT_DESCRIPTORS_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000018000)), /* RGX_FEATURE_FBC_MAX_LARGE_DESCRIPTORS_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000060000)), /* RGX_FEATURE_LAYOUT_MARS_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000380000)), /* RGX_FEATURE_META_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000400000)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000001800000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000002000000)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */ ++ (IMG_UINT64_C(0x000000001C000000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */ ++ (IMG_UINT64_C(0x00000001E0000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000600000000)), /* RGX_FEATURE_NUM_OSIDS_BIT_MASK */ ++ (IMG_UINT64_C(0x0000001800000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */ ++ (IMG_UINT64_C(0x000000E000000000)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */ ++ (IMG_UINT64_C(0x0000010000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */ ++ (IMG_UINT64_C(0x0000020000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */ ++ (IMG_UINT64_C(0x00000C0000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */ ++ (IMG_UINT64_C(0x0000700000000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */ ++ (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */ ++ (IMG_UINT64_C(0x000E000000000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */ ++ (IMG_UINT64_C(0x0030000000000000)), /* RGX_FEATURE_TILE_SIZE_X_BIT_MASK */ ++ (IMG_UINT64_C(0x00C0000000000000)), /* RGX_FEATURE_TILE_SIZE_Y_BIT_MASK */ ++ (IMG_UINT64_C(0x0300000000000000)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */ ++ (IMG_UINT64_C(0x0C00000000000000)), /* RGX_FEATURE_XE_ARCHITECTURE_BIT_MASK */ ++ (IMG_UINT64_C(0x3000000000000000)), /* RGX_FEATURE_XPU_MAX_REGBANKS_ADDR_WIDTH_BIT_MASK */ ++ (IMG_UINT64_C(0xC000000000000000)), /* RGX_FEATURE_XPU_MAX_SLAVES_BIT_MASK */ ++ (IMG_UINT64_C(0x0000000000000003)), /* RGX_FEATURE_XPU_REGISTER_BROADCAST_BIT_MASK */ ++}; ++ ++ ++/****************************************************************************** ++ * Table mapping bitmasks for features and features with values ++ *****************************************************************************/ ++ ++ ++static const IMG_UINT64 gaFeatures[][4]= ++{ ++ { IMG_UINT64_C(0x000100000002001e), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa8068689aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.2.30 */ ++ { IMG_UINT64_C(0x0001000000040005), IMG_UINT64_C(0x0000400000402024), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.5 */ ++ { IMG_UINT64_C(0x0001000000040013), IMG_UINT64_C(0x0000400000402025), IMG_UINT64_C(0x01aa80686c9aa481), IMG_UINT64_C(0x0000000000000000) }, /* 1.0.4.19 */ ++ { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068e912a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.51 */ ++ { IMG_UINT64_C(0x000400000002003a), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aa806ce912a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.2.58 */ ++ { IMG_UINT64_C(0x0004000000040037), IMG_UINT64_C(0x0082c04000c0222e), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.4.55 */ ++ { IMG_UINT64_C(0x000400000006003e), IMG_UINT64_C(0x0082c04000c0322f), IMG_UINT64_C(0x01aab074f112a901), IMG_UINT64_C(0x0000000000000000) }, /* 4.0.6.62 */ ++ { IMG_UINT64_C(0x000500000001002e), IMG_UINT64_C(0x0000004004402205), IMG_UINT64_C(0x05a69068248aa501), IMG_UINT64_C(0x0000000000000000) }, /* 5.0.1.46 */ ++ { IMG_UINT64_C(0x0006000000040023), IMG_UINT64_C(0x0082c04000c0222f), IMG_UINT64_C(0x01aa8068ed12a901), IMG_UINT64_C(0x0000000000000000) }, /* 6.0.4.35 */ ++ { IMG_UINT64_C(0x000f000000010040), IMG_UINT64_C(0x0000004004403205), IMG_UINT64_C(0x05a8906c448aa501), IMG_UINT64_C(0x0000000000000000) }, /* 15.0.1.64 */ ++ { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0554942c44020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.21.16 */ ++ { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c64020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.25 */ ++ { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c84020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.30 */ ++ { IMG_UINT64_C(0x0016000000360026), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944c84020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.38 */ ++ { IMG_UINT64_C(0x001600000036014a), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558942c8402a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.54.330 */ ++ { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc4020001), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.104.18 */ ++ { IMG_UINT64_C(0x00160000006800da), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558944cc402a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.104.218 */ ++ { IMG_UINT64_C(0x0016000000d0013e), IMG_UINT64_C(0x000000c5844b3025), IMG_UINT64_C(0x0558a4550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 22.0.208.318 */ ++ { IMG_UINT64_C(0x00180000003600cc), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984c8402a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.54.204 */ ++ { IMG_UINT64_C(0x00180000006801f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x0558984ca402a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.104.504 */ ++ { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.208.504 */ ++ { IMG_UINT64_C(0x0018000000d001f9), IMG_UINT64_C(0x004001c2844f7425), IMG_UINT64_C(0x055aa8550802a591), IMG_UINT64_C(0x0000000000000000) }, /* 24.0.208.505 */ ++ { IMG_UINT64_C(0x001d0000003400ca), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x0556984c4402a621), IMG_UINT64_C(0x0000000000000000) }, /* 29.0.52.202 */ ++ { IMG_UINT64_C(0x001d0000006c00d0), IMG_UINT64_C(0x004181c2844f74a5), IMG_UINT64_C(0x055aa854e802a621), IMG_UINT64_C(0x0000000000000000) }, /* 29.0.108.208 */ ++ { IMG_UINT64_C(0x00210000000b0003), IMG_UINT64_C(0x00400092844b5085), IMG_UINT64_C(0x0552984a24020001), IMG_UINT64_C(0x0000000000000000) }, /* 33.0.11.3 */ ++ { IMG_UINT64_C(0x0021000000160001), IMG_UINT64_C(0x004180c2854b70a5), IMG_UINT64_C(0x0556984c44020001), IMG_UINT64_C(0x0000000000000000) }, /* 33.0.22.1 */ ++ { IMG_UINT64_C(0x0024000000360067), IMG_UINT64_C(0x004180d2844b38a5), IMG_UINT64_C(0x0556984c8402aeb1), IMG_UINT64_C(0x0000000000000000) }, /* 36.0.54.103 */ ++ { IMG_UINT64_C(0x00240000003600b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.182 */ ++ { IMG_UINT64_C(0x00240000003600b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984c8404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.54.183 */ ++ { IMG_UINT64_C(0x00240000006800b6), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.182 */ ++ { IMG_UINT64_C(0x00240000006800b7), IMG_UINT64_C(0x004180d2844b78a5), IMG_UINT64_C(0x5556984ca404aeb1), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.183 */ ++ { IMG_UINT64_C(0x002400000068031c), IMG_UINT64_C(0x0071a0d2864a78a5), IMG_UINT64_C(0x5556984ca404aeb5), IMG_UINT64_C(0x0000000000000001) }, /* 36.0.104.796 */ ++}; ++ ++/****************************************************************************** ++ * Table mapping bitmasks for ERNs/BRNs ++ *****************************************************************************/ ++ ++ ++static const IMG_UINT64 gaErnsBrns[][2]= ++{ ++ { IMG_UINT64_C(0x0001002700040013), IMG_UINT64_C(0x0000000000000005) }, /* 1.39.4.19 */ ++ { IMG_UINT64_C(0x0001004b0002001e), IMG_UINT64_C(0x0000000000000004) }, /* 1.75.2.30 */ ++ { IMG_UINT64_C(0x0001005200040005), IMG_UINT64_C(0x0000000000000000) }, /* 1.82.4.5 */ ++ { IMG_UINT64_C(0x0004001f00040037), IMG_UINT64_C(0x000000000000108a) }, /* 4.31.4.55 */ ++ { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000000108a) }, /* 4.40.2.51 */ ++ { IMG_UINT64_C(0x0004002b0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.43.6.62 */ ++ { IMG_UINT64_C(0x0004002d0002003a), IMG_UINT64_C(0x000000000000500a) }, /* 4.45.2.58 */ ++ { IMG_UINT64_C(0x0004002e0006003e), IMG_UINT64_C(0x000000000000508a) }, /* 4.46.6.62 */ ++ { IMG_UINT64_C(0x000500090001002e), IMG_UINT64_C(0x0000000000000001) }, /* 5.9.1.46 */ ++ { IMG_UINT64_C(0x0006002200040023), IMG_UINT64_C(0x000000000000100a) }, /* 6.34.4.35 */ ++ { IMG_UINT64_C(0x000f000500010040), IMG_UINT64_C(0x0000000000004008) }, /* 15.5.1.64 */ ++ { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000000016b08) }, /* 22.30.54.25 */ ++ { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000000016b08) }, /* 22.40.54.30 */ ++ { IMG_UINT64_C(0x0016002e0036014a), IMG_UINT64_C(0x000000000001ea0a) }, /* 22.46.54.330 */ ++ { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000000016b08) }, /* 22.49.21.16 */ ++ { IMG_UINT64_C(0x001600430036001e), IMG_UINT64_C(0x0000000000016708) }, /* 22.67.54.30 */ ++ { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000000016508) }, /* 22.68.54.30 */ ++ { IMG_UINT64_C(0x00160056006800da), IMG_UINT64_C(0x000000000000e408) }, /* 22.86.104.218 */ ++ { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000000006508) }, /* 22.87.104.18 */ ++ { IMG_UINT64_C(0x0016006600360026), IMG_UINT64_C(0x0000000000006508) }, /* 22.102.54.38 */ ++ { IMG_UINT64_C(0x0016006800d0013e), IMG_UINT64_C(0x000000000000e40a) }, /* 22.104.208.318 */ ++ { IMG_UINT64_C(0x0016006900d0013e), IMG_UINT64_C(0x000000000000e40a) }, /* 22.105.208.318 */ ++ { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000000002210a) }, /* 24.50.208.504 */ ++ { IMG_UINT64_C(0x0018003800d001f9), IMG_UINT64_C(0x000000000002210a) }, /* 24.56.208.505 */ ++ { IMG_UINT64_C(0x00180042003600cc), IMG_UINT64_C(0x000000000002210a) }, /* 24.66.54.204 */ ++ { IMG_UINT64_C(0x00180043006801f8), IMG_UINT64_C(0x000000000002210a) }, /* 24.67.104.504 */ ++ { IMG_UINT64_C(0x001d000e006c00d0), IMG_UINT64_C(0x000000000006212a) }, /* 29.14.108.208 */ ++ { IMG_UINT64_C(0x001d0013003400ca), IMG_UINT64_C(0x000000000006212a) }, /* 29.19.52.202 */ ++ { IMG_UINT64_C(0x0021000800160001), IMG_UINT64_C(0x000000000000212a) }, /* 33.8.22.1 */ ++ { IMG_UINT64_C(0x0021000f000b0003), IMG_UINT64_C(0x000000000000212a) }, /* 33.15.11.3 */ ++ { IMG_UINT64_C(0x00240032003600b6), IMG_UINT64_C(0x000000000000212a) }, /* 36.50.54.182 */ ++ { IMG_UINT64_C(0x00240034006800b6), IMG_UINT64_C(0x000000000000212a) }, /* 36.52.104.182 */ ++ { IMG_UINT64_C(0x002400350068031c), IMG_UINT64_C(0x000000000000012a) }, /* 36.53.104.796 */ ++ { IMG_UINT64_C(0x00240036003600b7), IMG_UINT64_C(0x000000000000212a) }, /* 36.54.54.183 */ ++ { IMG_UINT64_C(0x0024003700360067), IMG_UINT64_C(0x000000000000212a) }, /* 36.55.54.103 */ ++ { IMG_UINT64_C(0x00240038006800b7), IMG_UINT64_C(0x000000000000212a) }, /* 36.56.104.183 */ ++}; ++ ++#if defined(DEBUG) ++ ++#define FEATURE_NO_VALUES_NAMES_MAX_IDX (56) ++ ++static const IMG_CHAR * const gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] = ++{ ++ "AXI_ACELITE", ++ "CLUSTER_GROUPING", ++ "COMPUTE", ++ "COMPUTE_MORTON_CAPABLE", ++ "COMPUTE_ONLY", ++ "COMPUTE_OVERLAP", ++ "COMPUTE_OVERLAP_WITH_BARRIERS", ++ "COREID_PER_OS", ++ "DUST_POWER_ISLAND_S7", ++ "DYNAMIC_DUST_POWER", ++ "FASTRENDER_DM", ++ "GPU_MULTICORE_SUPPORT", ++ "GPU_VIRTUALISATION", ++ "GS_RTA_SUPPORT", ++ "IRQ_PER_OS", ++ "META_DMA", ++ "MIPS", ++ "PBE2_IN_XE", ++ "PBE_CHECKSUM_2D", ++ "PBVNC_COREID_REG", ++ "PDS_PER_DUST", ++ "PDS_TEMPSIZE8", ++ "PERFBUS", ++ "PERF_COUNTER_BATCH", ++ "PM_MMU_VFP", ++ "RISCV_FW_PROCESSOR", ++ "ROGUEXE", ++ "S7_CACHE_HIERARCHY", ++ "S7_TOP_INFRASTRUCTURE", ++ "SCALABLE_VDM_GPP", ++ "SIGNAL_SNOOPING", ++ "SIMPLE_INTERNAL_PARAMETER_FORMAT", ++ "SIMPLE_INTERNAL_PARAMETER_FORMAT_V1", ++ "SIMPLE_INTERNAL_PARAMETER_FORMAT_V2", ++ "SINGLE_BIF", ++ "SLC_HYBRID_CACHELINE_64_128", ++ "SLC_SIZE_CONFIGURABLE", ++ "SLC_VIVT", ++ "SOC_TIMER", ++ "SYS_BUS_SECURE_RESET", ++ "TDM_PDS_CHECKSUM", ++ "TESSELLATION", ++ "TFBC_DELTA_CORRELATION", ++ "TFBC_LOSSY_37_PERCENT", ++ "TFBC_NATIVE_YUV10", ++ "TILE_REGION_PROTECTION", ++ "TLA", ++ "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS", ++ "TPU_DM_GLOBAL_REGISTERS", ++ "TPU_FILTERING_MODE_CONTROL", ++ "VDM_DRAWINDIRECT", ++ "VDM_OBJECT_LEVEL_LLS", ++ "WATCHDOG_TIMER", ++ "WORKGROUP_PROTECTION", ++ "XE_MEMORY_HIERARCHY", ++ "XT_TOP_INFRASTRUCTURE", ++}; ++ ++#define ERNSBRNS_IDS_MAX_IDX (19) ++ ++static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] = ++{ ++ 38344, ++ 42290, ++ 42321, ++ 42606, ++ 46066, ++ 47025, ++ 50539, ++ 50767, ++ 57596, ++ 60084, ++ 61389, ++ 61450, ++ 63142, ++ 63553, ++ 64502, ++ 65101, ++ 65273, ++ 66622, ++ 68186, ++}; ++ ++#endif /* defined(DEBUG) */ ++#endif /* RGX_BVNC_TABLE_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h b/drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h +new file mode 100644 +index 000000000000..2464d91a7bd3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km/rgx_cr_defs_km.h +@@ -0,0 +1,8077 @@ ++/*************************************************************************/ /*! ++@Title Hardware definition file rgx_cr_defs_km.h ++@Brief The file contains auto-generated hardware definitions without ++ BVNC-specific compile time conditionals. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* **** Autogenerated C -- do not edit **** */ ++ ++/* ++ */ ++ ++ ++#ifndef RGX_CR_DEFS_KM_H ++#define RGX_CR_DEFS_KM_H ++ ++#if !defined(IMG_EXPLICIT_INCLUDE_HWDEFS) ++#error This file may only be included if explicitly defined ++#endif ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++ ++#define RGX_CR_DEFS_KM_REVISION 1 ++ ++/* ++ Register RGX_CR_RASTERISATION_INDIRECT ++*/ ++#define RGX_CR_RASTERISATION_INDIRECT (0x8238U) ++#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_USC_INDIRECT ++*/ ++#define RGX_CR_USC_INDIRECT (0x8000U) ++#define RGX_CR_USC_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_USC_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_USC_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_PBE_INDIRECT ++*/ ++#define RGX_CR_PBE_INDIRECT (0x83E0U) ++#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_PBE_PERF_INDIRECT ++*/ ++#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U) ++#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_TPU_PERF_INDIRECT ++*/ ++#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U) ++#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) ++#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) ++ ++ ++/* ++ Register RGX_CR_RASTERISATION_PERF_INDIRECT ++*/ ++#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U) ++#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT ++*/ ++#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U) ++#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) ++#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) ++ ++ ++/* ++ Register RGX_CR_USC_PERF_INDIRECT ++*/ ++#define RGX_CR_USC_PERF_INDIRECT (0x8030U) ++#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_BLACKPEARL_INDIRECT ++*/ ++#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U) ++#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_BLACKPEARL_PERF_INDIRECT ++*/ ++#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U) ++#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_TEXAS3_PERF_INDIRECT ++*/ ++#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U) ++#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007)) ++#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFF8U) ++ ++ ++/* ++ Register RGX_CR_TEXAS_PERF_INDIRECT ++*/ ++#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U) ++#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_BX_TU_PERF_INDIRECT ++*/ ++#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U) ++#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U) ++#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_CLK_CTRL ++*/ ++#define RGX_CR_CLK_CTRL (0x0000U) ++#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) ++#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F)) ++#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF)) ++#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U) ++#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0x3FFFFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000)) ++#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U) ++#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0xCFFFFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U) ++#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0xF3FFFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000)) ++#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000)) ++#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U) ++#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0xFCFFFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000)) ++#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000)) ++#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U) ++#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFF3FFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000)) ++#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000)) ++#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U) ++#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0xFFCFFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000)) ++#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000)) ++#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U) ++#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0xFFF3FFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000)) ++#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U) ++#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000)) ++#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000)) ++#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U) ++#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0xFFFF3FFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000)) ++#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000)) ++#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U) ++#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFCFFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000)) ++#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000)) ++#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U) ++#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFF3FFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000)) ++#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000)) ++#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U) ++#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) ++#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000)) ++#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000)) ++#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U) ++#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) ++#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000)) ++#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000)) ++#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U) ++#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) ++#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000)) ++#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000)) ++#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U) ++#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) ++#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000)) ++#define RGX_CR_CLK_CTRL_USC_SHIFT (20U) ++#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) ++#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000)) ++#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U) ++#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) ++#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000)) ++#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000)) ++#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U) ++#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) ++#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000)) ++#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U) ++#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) ++#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000)) ++#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000)) ++#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U) ++#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) ++#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000)) ++#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000)) ++#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U) ++#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) ++#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_CLK_CTRL_PM_SHIFT (8U) ++#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) ++#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U) ++#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF3F)) ++#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040)) ++#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_CLK_CTRL_TE_SHIFT (4U) ++#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) ++#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U) ++#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF3)) ++#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U) ++#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) ++#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002)) ++ ++ ++/* ++ Register RGX_CR_CLK_STATUS ++*/ ++#define RGX_CR_CLK_STATUS (0x0008U) ++#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) ++#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL (IMG_UINT64_C(0x00000001B3101773)) ++#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF)) ++#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U) ++#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U) ++#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) ++#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U) ++#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) ++#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000)) ++#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U) ++#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) ++#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000)) ++#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U) ++#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) ++#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000)) ++#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U) ++#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) ++#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000)) ++#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U) ++#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) ++#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000)) ++#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U) ++#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) ++#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000)) ++#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U) ++#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) ++#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U) ++#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) ++#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000)) ++#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U) ++#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) ++#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000)) ++#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U) ++#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U) ++#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) ++#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000)) ++#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U) ++#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) ++#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000)) ++#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U) ++#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) ++#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000)) ++#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U) ++#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) ++#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000)) ++#define RGX_CR_CLK_STATUS_USC_SHIFT (10U) ++#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) ++#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U) ++#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) ++#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U) ++#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) ++#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U) ++#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) ++#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U) ++#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) ++#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040)) ++#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U) ++#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_CLK_STATUS_PM_SHIFT (4U) ++#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U) ++#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_CLK_STATUS_TE_SHIFT (2U) ++#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U) ++#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U) ++#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_CORE_ID ++*/ ++#define RGX_CR_CORE_ID__PBVNC (0x0020U) ++#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U) ++#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) ++#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U) ++#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) ++#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U) ++#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) ++#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U) ++#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_CORE_ID ++*/ ++#define RGX_CR_CORE_ID (0x0018U) ++#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_CORE_ID_ID_SHIFT (16U) ++#define RGX_CR_CORE_ID_ID_CLRMSK (0x0000FFFFU) ++#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U) ++#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_CORE_REVISION ++*/ ++#define RGX_CR_CORE_REVISION (0x0020U) ++#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U) ++#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0x00FFFFFFU) ++#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U) ++#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0xFF00FFFFU) ++#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U) ++#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0xFFFF00FFU) ++#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U) ++#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_DESIGNER_REV_FIELD1 ++*/ ++#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U) ++#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U) ++#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_DESIGNER_REV_FIELD2 ++*/ ++#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U) ++#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U) ++#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_CHANGESET_NUMBER ++*/ ++#define RGX_CR_CHANGESET_NUMBER (0x0040U) ++#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U) ++#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_SOC_TIMER_GRAY ++*/ ++#define RGX_CR_SOC_TIMER_GRAY (0x00E0U) ++#define RGX_CR_SOC_TIMER_GRAY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SOC_TIMER_GRAY_VALUE_SHIFT (0U) ++#define RGX_CR_SOC_TIMER_GRAY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_SOC_TIMER_BINARY ++*/ ++#define RGX_CR_SOC_TIMER_BINARY (0x00E8U) ++#define RGX_CR_SOC_TIMER_BINARY_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SOC_TIMER_BINARY_VALUE_SHIFT (0U) ++#define RGX_CR_SOC_TIMER_BINARY_VALUE_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_CLK_XTPLUS_CTRL ++*/ ++#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U) ++#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U) ++#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFCFFFFFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U) ++#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFF3FFFFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U) ++#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFCFFFFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U) ++#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF3FFFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U) ++#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFCFFFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U) ++#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF3FFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U) ++#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U) ++#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF3FFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U) ++#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U) ++#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U) ++#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) ++#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000)) ++ ++ ++/* ++ Register RGX_CR_CLK_XTPLUS_STATUS ++*/ ++#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U) ++#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U) ++#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U) ++#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U) ++#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U) ++#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) ++#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U) ++#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040)) ++#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U) ++#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U) ++#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U) ++#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U) ++#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U) ++#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U) ++#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_SOFT_RESET ++*/ ++#define RGX_CR_SOFT_RESET (0x0100U) ++#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFEFFFFFFFFFFC3D)) ++#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC3D)) ++#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_SHIFT (63U) ++#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_PHANTOM3_CORE_EN (IMG_UINT64_C(0x8000000000000000)) ++#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_SHIFT (62U) ++#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_PHANTOM2_CORE_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_SOFT_RESET_BERNADO2_CORE_SHIFT (61U) ++#define RGX_CR_SOFT_RESET_BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_BERNADO2_CORE_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_SOFT_RESET_JONES_CORE_SHIFT (60U) ++#define RGX_CR_SOFT_RESET_JONES_CORE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_JONES_CORE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_SOFT_RESET_TILING_CORE_SHIFT (59U) ++#define RGX_CR_SOFT_RESET_TILING_CORE_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_TILING_CORE_EN (IMG_UINT64_C(0x0800000000000000)) ++#define RGX_CR_SOFT_RESET_TE3_SHIFT (58U) ++#define RGX_CR_SOFT_RESET_TE3_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_TE3_EN (IMG_UINT64_C(0x0400000000000000)) ++#define RGX_CR_SOFT_RESET_VCE_SHIFT (57U) ++#define RGX_CR_SOFT_RESET_VCE_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_VCE_EN (IMG_UINT64_C(0x0200000000000000)) ++#define RGX_CR_SOFT_RESET_VBS_SHIFT (56U) ++#define RGX_CR_SOFT_RESET_VBS_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_VBS_EN (IMG_UINT64_C(0x0100000000000000)) ++#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U) ++#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0x0080000000000000)) ++#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U) ++#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0x0040000000000000)) ++#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U) ++#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0x0020000000000000)) ++#define RGX_CR_SOFT_RESET_FB_CDC_SHIFT (51U) ++#define RGX_CR_SOFT_RESET_FB_CDC_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_FB_CDC_EN (IMG_UINT64_C(0x0008000000000000)) ++#define RGX_CR_SOFT_RESET_SH_SHIFT (50U) ++#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U) ++#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0x0002000000000000)) ++#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U) ++#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0x0001000000000000)) ++#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U) ++#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0x0000800000000000)) ++#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U) ++#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0x0000400000000000)) ++#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U) ++#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0x0000200000000000)) ++#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U) ++#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0x0000100000000000)) ++#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U) ++#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0x0000080000000000)) ++#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U) ++#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0x0000040000000000)) ++#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U) ++#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0x0000020000000000)) ++#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U) ++#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0x0000010000000000)) ++#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U) ++#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0x0000008000000000)) ++#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U) ++#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0x0000004000000000)) ++#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U) ++#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0x0000002000000000)) ++#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U) ++#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0x0000001000000000)) ++#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U) ++#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0x0000000800000000)) ++#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U) ++#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0x0000000400000000)) ++#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U) ++#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0x0000000200000000)) ++#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U) ++#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_SOFT_RESET_CPU_SHIFT (32U) ++#define RGX_CR_SOFT_RESET_CPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_SOFT_RESET_CPU_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U) ++#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) ++#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U) ++#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0x0000000040000000)) ++#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U) ++#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) ++#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0x0000000020000000)) ++#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U) ++#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) ++#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0x0000000010000000)) ++#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U) ++#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) ++#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0x0000000008000000)) ++#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U) ++#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) ++#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0x0000000004000000)) ++#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U) ++#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) ++#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0x0000000002000000)) ++#define RGX_CR_SOFT_RESET_TE_SHIFT (24U) ++#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) ++#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U) ++#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) ++#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0x0000000000800000)) ++#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U) ++#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) ++#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0x0000000000400000)) ++#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U) ++#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_SOFT_RESET_PM_SHIFT (20U) ++#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) ++#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0x0000000000100000)) ++#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U) ++#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) ++#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0x0000000000080000)) ++#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U) ++#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) ++#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0x0000000000040000)) ++#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U) ++#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) ++#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0x0000000000020000)) ++#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U) ++#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) ++#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U) ++#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) ++#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0x0000000000008000)) ++#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U) ++#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) ++#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0x0000000000004000)) ++#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U) ++#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) ++#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0x0000000000002000)) ++#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U) ++#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) ++#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0x0000000000001000)) ++#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U) ++#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) ++#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U) ++#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) ++#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_SOFT_RESET_SYSARB_SHIFT (5U) ++#define RGX_CR_SOFT_RESET_SYSARB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_SOFT_RESET_SYSARB_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U) ++#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U) ++#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U) ++#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_SOFT_RESET_USC_SHIFT (0U) ++#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_SOFT_RESET2 ++*/ ++#define RGX_CR_SOFT_RESET2 (0x0108U) ++#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) ++#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U) ++#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0xFFE00FFFU) ++#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U) ++#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0xFFFFF7FFU) ++#define RGX_CR_SOFT_RESET2_TDM_EN (0x00000800U) ++#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U) ++#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0xFFFFFBFFU) ++#define RGX_CR_SOFT_RESET2_ASTC_EN (0x00000400U) ++#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U) ++#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0x00000200U) ++#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U) ++#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_SOFT_RESET2_USCPS_EN (0x00000100U) ++#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U) ++#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_SOFT_RESET2_IPF_EN (0x00000080U) ++#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U) ++#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0x00000040U) ++#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U) ++#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0x00000020U) ++#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U) ++#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0x00000010U) ++#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U) ++#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0x00000008U) ++#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U) ++#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SOFT_RESET2_PIXEL_EN (0x00000004U) ++#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U) ++#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SOFT_RESET2_CDM_EN (0x00000002U) ++#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U) ++#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SOFT_RESET2_VERTEX_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_EVENT_STATUS ++*/ ++#define RGX_CR_EVENT_STATUS (0x0130U) ++#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) ++#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) ++#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U) ++#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) ++#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0x80000000U) ++#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U) ++#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0x40000000U) ++#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) ++#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) ++#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U) ++#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0x10000000U) ++#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U) ++#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) ++#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0x08000000U) ++#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U) ++#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) ++#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0x04000000U) ++#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U) ++#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) ++#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0x02000000U) ++#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U) ++#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) ++#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0x01000000U) ++#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U) ++#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) ++#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0x00800000U) ++#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U) ++#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0x00400000U) ++#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U) ++#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) ++#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0x00200000U) ++#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U) ++#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0x00100000U) ++#define RGX_CR_EVENT_STATUS_SAFETY_SHIFT (20U) ++#define RGX_CR_EVENT_STATUS_SAFETY_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_EVENT_STATUS_SAFETY_EN (0x00100000U) ++#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U) ++#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0x00080000U) ++#define RGX_CR_EVENT_STATUS_SLAVE_REQ_SHIFT (19U) ++#define RGX_CR_EVENT_STATUS_SLAVE_REQ_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_EVENT_STATUS_SLAVE_REQ_EN (0x00080000U) ++#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U) ++#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0x00040000U) ++#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) ++#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_EVENT_STATUS_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) ++#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U) ++#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0x00020000U) ++#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) ++#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_EVENT_STATUS_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) ++#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U) ++#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0x00010000U) ++#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U) ++#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) ++#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0x00008000U) ++#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U) ++#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) ++#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0x00004000U) ++#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U) ++#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0xFFFFDFFFU) ++#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0x00002000U) ++#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U) ++#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0xFFFFEFFFU) ++#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0x00001000U) ++#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U) ++#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0xFFFFF7FFU) ++#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0x00000800U) ++#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U) ++#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) ++#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0x00000400U) ++#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U) ++#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0x00000200U) ++#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U) ++#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0x00000100U) ++#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U) ++#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0x00000080U) ++#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U) ++#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0x00000040U) ++#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U) ++#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0x00000020U) ++#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U) ++#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0x00000010U) ++#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U) ++#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0x00000008U) ++#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U) ++#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0x00000004U) ++#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U) ++#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0x00000002U) ++#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U) ++#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_EVENT_CLEAR ++*/ ++#define RGX_CR_EVENT_CLEAR (0x0138U) ++#define RGX_CR_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E01DFFFF)) ++#define RGX_CR_EVENT_CLEAR__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF)) ++#define RGX_CR_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_SHIFT (31U) ++#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_CLRMSK (0x7FFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_TDM_FENCE_FINISHED_EN (0x80000000U) ++#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_SHIFT (30U) ++#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_TDM_BUFFER_STALL_EN (0x40000000U) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_SHIFT (29U) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_SIGNAL_FAILURE_EN (0x20000000U) ++#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_SHIFT (28U) ++#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_DPX_OUT_OF_MEMORY_EN (0x10000000U) ++#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_SHIFT (27U) ++#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_CLRMSK (0xF7FFFFFFU) ++#define RGX_CR_EVENT_CLEAR_DPX_MMU_PAGE_FAULT_EN (0x08000000U) ++#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_SHIFT (26U) ++#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_CLRMSK (0xFBFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_RPM_OUT_OF_MEMORY_EN (0x04000000U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_SHIFT (25U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_CLRMSK (0xFDFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_FBA_FC3_FINISHED_EN (0x02000000U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_SHIFT (24U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_CLRMSK (0xFEFFFFFFU) ++#define RGX_CR_EVENT_CLEAR_FBA_FC2_FINISHED_EN (0x01000000U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_SHIFT (23U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_CLRMSK (0xFF7FFFFFU) ++#define RGX_CR_EVENT_CLEAR_FBA_FC1_FINISHED_EN (0x00800000U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_SHIFT (22U) ++#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_EVENT_CLEAR_FBA_FC0_FINISHED_EN (0x00400000U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_SHIFT (21U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_CLRMSK (0xFFDFFFFFU) ++#define RGX_CR_EVENT_CLEAR_RDM_FC3_FINISHED_EN (0x00200000U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_SHIFT (20U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_EVENT_CLEAR_RDM_FC2_FINISHED_EN (0x00100000U) ++#define RGX_CR_EVENT_CLEAR_SAFETY_SHIFT (20U) ++#define RGX_CR_EVENT_CLEAR_SAFETY_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_EVENT_CLEAR_SAFETY_EN (0x00100000U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_SHIFT (19U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_EVENT_CLEAR_RDM_FC1_FINISHED_EN (0x00080000U) ++#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_SHIFT (19U) ++#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_EVENT_CLEAR_SLAVE_REQ_EN (0x00080000U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_SHIFT (18U) ++#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_EVENT_CLEAR_RDM_FC0_FINISHED_EN (0x00040000U) ++#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_SHIFT (18U) ++#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_EVENT_CLEAR_TDM_CONTEXT_STORE_FINISHED_EN (0x00040000U) ++#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_SHIFT (17U) ++#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_EVENT_CLEAR_SHG_FINISHED_EN (0x00020000U) ++#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_SHIFT (17U) ++#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_EVENT_CLEAR_SPFILTER_SIGNAL_UPDATE_EN (0x00020000U) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_SHIFT (16U) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_BUFFER_STALL_EN (0x00010000U) ++#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_SHIFT (15U) ++#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_CLRMSK (0xFFFF7FFFU) ++#define RGX_CR_EVENT_CLEAR_USC_TRIGGER_EN (0x00008000U) ++#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_SHIFT (14U) ++#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_CLRMSK (0xFFFFBFFFU) ++#define RGX_CR_EVENT_CLEAR_ZLS_FINISHED_EN (0x00004000U) ++#define RGX_CR_EVENT_CLEAR_GPIO_ACK_SHIFT (13U) ++#define RGX_CR_EVENT_CLEAR_GPIO_ACK_CLRMSK (0xFFFFDFFFU) ++#define RGX_CR_EVENT_CLEAR_GPIO_ACK_EN (0x00002000U) ++#define RGX_CR_EVENT_CLEAR_GPIO_REQ_SHIFT (12U) ++#define RGX_CR_EVENT_CLEAR_GPIO_REQ_CLRMSK (0xFFFFEFFFU) ++#define RGX_CR_EVENT_CLEAR_GPIO_REQ_EN (0x00001000U) ++#define RGX_CR_EVENT_CLEAR_POWER_ABORT_SHIFT (11U) ++#define RGX_CR_EVENT_CLEAR_POWER_ABORT_CLRMSK (0xFFFFF7FFU) ++#define RGX_CR_EVENT_CLEAR_POWER_ABORT_EN (0x00000800U) ++#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_SHIFT (10U) ++#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_CLRMSK (0xFFFFFBFFU) ++#define RGX_CR_EVENT_CLEAR_POWER_COMPLETE_EN (0x00000400U) ++#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_SHIFT (9U) ++#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_EVENT_CLEAR_MMU_PAGE_FAULT_EN (0x00000200U) ++#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_SHIFT (8U) ++#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_EVENT_CLEAR_PM_3D_MEM_FREE_EN (0x00000100U) ++#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_SHIFT (7U) ++#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_EVENT_CLEAR_PM_OUT_OF_MEMORY_EN (0x00000080U) ++#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_SHIFT (6U) ++#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_EVENT_CLEAR_TA_TERMINATE_EN (0x00000040U) ++#define RGX_CR_EVENT_CLEAR_TA_FINISHED_SHIFT (5U) ++#define RGX_CR_EVENT_CLEAR_TA_FINISHED_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_EVENT_CLEAR_TA_FINISHED_EN (0x00000020U) ++#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_SHIFT (4U) ++#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_EVENT_CLEAR_ISP_END_MACROTILE_EN (0x00000010U) ++#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_SHIFT (3U) ++#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_EVENT_CLEAR_PIXELBE_END_RENDER_EN (0x00000008U) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_SHIFT (2U) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_EVENT_CLEAR_COMPUTE_FINISHED_EN (0x00000004U) ++#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_SHIFT (1U) ++#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_EVENT_CLEAR_KERNEL_FINISHED_EN (0x00000002U) ++#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_SHIFT (0U) ++#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_EVENT_CLEAR_TLA_COMPLETE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TIMER ++*/ ++#define RGX_CR_TIMER (0x0160U) ++#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF)) ++#define RGX_CR_TIMER_BIT31_SHIFT (63U) ++#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) ++#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0x8000000000000000)) ++#define RGX_CR_TIMER_VALUE_SHIFT (0U) ++#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) ++ ++ ++/* ++ Register RGX_CR_TLA_STATUS ++*/ ++#define RGX_CR_TLA_STATUS (0x0178U) ++#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U) ++#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0x0000007FFFFFFFFF)) ++#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U) ++#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0xFFFFFF800000007F)) ++#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U) ++#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF81)) ++#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U) ++#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_PM_PARTIAL_RENDER_ENABLE ++*/ ++#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U) ++#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U) ++#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SIDEKICK_IDLE ++*/ ++#define RGX_CR_SIDEKICK_IDLE (0x03C8U) ++#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F)) ++#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U) ++#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0x00000040U) ++#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U) ++#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0x00000020U) ++#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U) ++#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0x00000010U) ++#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U) ++#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0x00000008U) ++#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U) ++#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0x00000004U) ++#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U) ++#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0x00000002U) ++#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U) ++#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MARS_IDLE ++*/ ++#define RGX_CR_MARS_IDLE (0x08F8U) ++#define RGX_CR_MARS_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000000007)) ++#define RGX_CR_MARS_IDLE_MH_SYSARB0_SHIFT (2U) ++#define RGX_CR_MARS_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_MARS_IDLE_MH_SYSARB0_EN (0x00000004U) ++#define RGX_CR_MARS_IDLE_CPU_SHIFT (1U) ++#define RGX_CR_MARS_IDLE_CPU_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_MARS_IDLE_CPU_EN (0x00000002U) ++#define RGX_CR_MARS_IDLE_SOCIF_SHIFT (0U) ++#define RGX_CR_MARS_IDLE_SOCIF_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MARS_IDLE_SOCIF_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_STORE_STATUS ++*/ ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3)) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0xFFFFFF0FU) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_STORE_TASK0 ++*/ ++#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_STORE_TASK1 ++*/ ++#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_STORE_TASK2 ++*/ ++#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_RESUME_TASK0 ++*/ ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_RESUME_TASK1 ++*/ ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_RESUME_TASK2 ++*/ ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_CDM_CONTEXT_STORE_STATUS ++*/ ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0x00000002U) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_CDM_CONTEXT_PDS0 ++*/ ++#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U) ++#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) ++#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U) ++#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) ++#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U) ++#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U) ++#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) ++#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_CDM_CONTEXT_PDS1 ++*/ ++#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) ++#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) ++#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U) ++#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0x20000000U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) ++#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U) ++#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0x10000000U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) ++#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U) ++#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0x08000000U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U) ++#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) ++#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U) ++#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0x00100000U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U) ++#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) ++#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U) ++#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) ++#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) ++#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U) ++#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) ++#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U) ++#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_CDM_TERMINATE_PDS ++*/ ++#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U) ++#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) ++#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U) ++#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) ++#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U) ++#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U) ++#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) ++#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_CDM_TERMINATE_PDS1 ++*/ ++#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) ++#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) ++#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U) ++#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0x20000000U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) ++#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U) ++#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0x10000000U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) ++#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U) ++#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0x08000000U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U) ++#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) ++#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U) ++#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0x00100000U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U) ++#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) ++#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U) ++#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) ++#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) ++#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U) ++#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) ++#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U) ++#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_CDM_CONTEXT_LOAD_PDS0 ++*/ ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0)) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000000F)) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_CDM_CONTEXT_LOAD_PDS1 ++*/ ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF)) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0x40000000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0x20000000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0x20000000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0x10000000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0x10000000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0xF7FFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0x08000000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0xF03FFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0xF81FFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0xFFDFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0x00200000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0x00100000U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0xFFE00FFFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0xFFF007FFU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0xFFFFF87FU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0xFFFFF07FU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0xFFFFFF81U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_CONFIG ++*/ ++#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000001030F01FFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_SHIFT (40U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_FW_IDLE_ENABLE_EN (IMG_UINT64_C(0x0000010000000000)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT (33U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN (IMG_UINT64_C(0x0000000200000000)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF1FFFFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFE3F)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE0)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS (0x0868U) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR (0x0870U) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG (0x0878U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT (32U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT (1U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFC1)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ (0x0880U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL (IMG_UINT64_C(0x000000000000003F)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT (1U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK (0xFFFFFFC1U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA ++*/ ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA (0x0888U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFF81)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT (36U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0x0000000FFFFFFFFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT (32U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000FFF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT (11U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF87F)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE ++*/ ++#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U) ++#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS ++*/ ++#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U) ++#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR ++*/ ++#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U) ++#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE ++*/ ++#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U) ++#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_NMI_EVENT ++*/ ++#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U) ++#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_DEBUG_CONFIG ++*/ ++#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U) ++#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U) ++#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_EXCEPTION_STATUS ++*/ ++#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F)) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0x00000020U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0x00000010U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0x00000008U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0x00000004U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0x00000002U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MIPS_WRAPPER_STATUS ++*/ ++#define RGX_CR_MIPS_WRAPPER_STATUS (0x08E8U) ++#define RGX_CR_MIPS_WRAPPER_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_SHIFT (0U) ++#define RGX_CR_MIPS_WRAPPER_STATUS_OUTSTANDING_REQUESTS_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_XPU_BROADCAST ++*/ ++#define RGX_CR_XPU_BROADCAST (0x0890U) ++#define RGX_CR_XPU_BROADCAST_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_XPU_BROADCAST_MASK_SHIFT (0U) ++#define RGX_CR_XPU_BROADCAST_MASK_CLRMSK (0xFFFFFE00U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVDATAX ++*/ ++#define RGX_CR_META_SP_MSLVDATAX (0x0A00U) ++#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVDATAT ++*/ ++#define RGX_CR_META_SP_MSLVDATAT (0x0A08U) ++#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVCTRL0 ++*/ ++#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U) ++#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U) ++#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0x00000003U) ++#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U) ++#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0x00000002U) ++#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVCTRL1 ++*/ ++#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U) ++#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F)) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0x3FFFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U) ++#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0x20000000U) ++#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U) ++#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0x10000000U) ++#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U) ++#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0xFBFFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0x04000000U) ++#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U) ++#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0xFDFFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0x02000000U) ++#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U) ++#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0xFEFFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0x01000000U) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0xFF1FFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0x00100000U) ++#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U) ++#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0x00040000U) ++#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U) ++#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0xFFFFFFCFU) ++#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U) ++#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0xFFFFFFF3U) ++#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVHANDSHKE ++*/ ++#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U) ++#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U) ++#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0xFFFFFFF3U) ++#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT0KICK ++*/ ++#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U) ++#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT0KICKI ++*/ ++#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U) ++#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT1KICK ++*/ ++#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U) ++#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT1KICKI ++*/ ++#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U) ++#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT2KICK ++*/ ++#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U) ++#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT2KICKI ++*/ ++#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U) ++#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT3KICK ++*/ ++#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U) ++#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVT3KICKI ++*/ ++#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U) ++#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVRST ++*/ ++#define RGX_CR_META_SP_MSLVRST (0x0AC0U) ++#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVIRQSTATUS ++*/ ++#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C)) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0x00000008U) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0x00000004U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVIRQENABLE ++*/ ++#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U) ++#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C)) ++#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U) ++#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0x00000008U) ++#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U) ++#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0x00000004U) ++ ++ ++/* ++ Register RGX_CR_META_SP_MSLVIRQLEVEL ++*/ ++#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U) ++#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U) ++#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE ++*/ ++#define RGX_CR_MTS_SCHEDULE (0x0B00U) ++#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE1 ++*/ ++#define RGX_CR_MTS_SCHEDULE1 (0x10B00U) ++#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE2 ++*/ ++#define RGX_CR_MTS_SCHEDULE2 (0x20B00U) ++#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE3 ++*/ ++#define RGX_CR_MTS_SCHEDULE3 (0x30B00U) ++#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE4 ++*/ ++#define RGX_CR_MTS_SCHEDULE4 (0x40B00U) ++#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE5 ++*/ ++#define RGX_CR_MTS_SCHEDULE5 (0x50B00U) ++#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE6 ++*/ ++#define RGX_CR_MTS_SCHEDULE6 (0x60B00U) ++#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE7 ++*/ ++#define RGX_CR_MTS_SCHEDULE7 (0x70B00U) ++#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U) ++#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0x00000100U) ++#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U) ++#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0x00000040U) ++#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0x00000080U) ++#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0x000000C0U) ++#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U) ++#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0x00000020U) ++#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U) ++#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0x00000010U) ++#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0xFFFFFFF0U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (0x00000000U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0x00000001U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0x00000002U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0x00000003U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0x00000004U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0x00000005U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0x00000006U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0x00000007U) ++#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0x0000000FU) ++ ++ ++/* ++ Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC ++*/ ++#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U) ++#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) ++#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC ++*/ ++#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U) ++#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) ++#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC ++*/ ++#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U) ++#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U) ++#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC ++*/ ++#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U) ++#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U) ++#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG ++*/ ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFFF0FFFFFFFFFFF)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0xFFF00FFFFFFFFFFF)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_SHIFT (9U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF9FF)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_SHIFT (8U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MTS_DM0_INTERRUPT_ENABLE ++*/ ++#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE (0x0B58U) ++#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) ++#define RGX_CR_MTS_DM0_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_DM1_INTERRUPT_ENABLE ++*/ ++#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE (0x0B60U) ++#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) ++#define RGX_CR_MTS_DM1_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_DM2_INTERRUPT_ENABLE ++*/ ++#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE (0x0B68U) ++#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) ++#define RGX_CR_MTS_DM2_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_DM3_INTERRUPT_ENABLE ++*/ ++#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE (0x0B70U) ++#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) ++#define RGX_CR_MTS_DM3_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_DM4_INTERRUPT_ENABLE ++*/ ++#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE (0x0B78U) ++#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) ++#define RGX_CR_MTS_DM4_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_DM5_INTERRUPT_ENABLE ++*/ ++#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE (0x0B80U) ++#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_SHIFT (0U) ++#define RGX_CR_MTS_DM5_INTERRUPT_ENABLE_INT_ENABLE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_INTCTX ++*/ ++#define RGX_CR_MTS_INTCTX (0x0B98U) ++#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF)) ++#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U) ++#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0xC03FFFFFU) ++#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U) ++#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0xFFC3FFFFU) ++#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U) ++#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0xFFFCFFFFU) ++#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U) ++#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0xFFFF00FFU) ++#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U) ++#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_MTS_BGCTX ++*/ ++#define RGX_CR_MTS_BGCTX (0x0BA0U) ++#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) ++#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U) ++#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0xFFFFC3FFU) ++#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U) ++#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0xFFFFFCFFU) ++#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U) ++#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE ++*/ ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0x00FFFFFFFFFFFFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0xFF00FFFFFFFFFFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U) ++#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) ++ ++ ++/* ++ Register RGX_CR_MTS_GPU_INT_STATUS ++*/ ++#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U) ++#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U) ++#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_MTS_SCHEDULE_ENABLE ++*/ ++#define RGX_CR_MTS_SCHEDULE_ENABLE (0x0BC8U) ++#define RGX_CR_MTS_SCHEDULE_ENABLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_SHIFT (0U) ++#define RGX_CR_MTS_SCHEDULE_ENABLE_MASK_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS0_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS0_EVENT_STATUS (0x0BD8U) ++#define RGX_CR_IRQ_OS0_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS0_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS0_EVENT_CLEAR (0x0BE8U) ++#define RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS1_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS1_EVENT_STATUS (0x10BD8U) ++#define RGX_CR_IRQ_OS1_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS1_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS1_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS1_EVENT_CLEAR (0x10BE8U) ++#define RGX_CR_IRQ_OS1_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS1_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS2_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS2_EVENT_STATUS (0x20BD8U) ++#define RGX_CR_IRQ_OS2_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS2_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS2_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS2_EVENT_CLEAR (0x20BE8U) ++#define RGX_CR_IRQ_OS2_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS2_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS3_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS3_EVENT_STATUS (0x30BD8U) ++#define RGX_CR_IRQ_OS3_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS3_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS3_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS3_EVENT_CLEAR (0x30BE8U) ++#define RGX_CR_IRQ_OS3_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS3_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS4_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS4_EVENT_STATUS (0x40BD8U) ++#define RGX_CR_IRQ_OS4_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS4_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS4_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS4_EVENT_CLEAR (0x40BE8U) ++#define RGX_CR_IRQ_OS4_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS4_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS5_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS5_EVENT_STATUS (0x50BD8U) ++#define RGX_CR_IRQ_OS5_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS5_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS5_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS5_EVENT_CLEAR (0x50BE8U) ++#define RGX_CR_IRQ_OS5_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS5_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS6_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS6_EVENT_STATUS (0x60BD8U) ++#define RGX_CR_IRQ_OS6_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS6_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS6_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS6_EVENT_CLEAR (0x60BE8U) ++#define RGX_CR_IRQ_OS6_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS6_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS7_EVENT_STATUS ++*/ ++#define RGX_CR_IRQ_OS7_EVENT_STATUS (0x70BD8U) ++#define RGX_CR_IRQ_OS7_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS7_EVENT_STATUS_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_IRQ_OS7_EVENT_CLEAR ++*/ ++#define RGX_CR_IRQ_OS7_EVENT_CLEAR (0x70BE8U) ++#define RGX_CR_IRQ_OS7_EVENT_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_SHIFT (0U) ++#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_IRQ_OS7_EVENT_CLEAR_SOURCE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_META_BOOT ++*/ ++#define RGX_CR_META_BOOT (0x0BF8U) ++#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_META_BOOT_MODE_SHIFT (0U) ++#define RGX_CR_META_BOOT_MODE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_META_BOOT_MODE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_GARTEN_SLC ++*/ ++#define RGX_CR_GARTEN_SLC (0x0BB8U) ++#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U) ++#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_PPP ++*/ ++#define RGX_CR_PPP (0x0CD0U) ++#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PPP_CHECKSUM_SHIFT (0U) ++#define RGX_CR_PPP_CHECKSUM_CLRMSK (0x00000000U) ++ ++ ++#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U) ++/* ++Top-left to bottom-right */ ++#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U) ++/* ++Top-right to bottom-left */ ++#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U) ++/* ++Bottom-left to top-right */ ++#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U) ++/* ++Bottom-right to top-left */ ++#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U) ++ ++ ++#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U) ++/* ++Normal render */ ++#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U) ++/* ++Fast 2D render */ ++#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U) ++/* ++Fast scale render */ ++#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U) ++ ++ ++/* ++ Register RGX_CR_ISP_RENDER ++*/ ++#define RGX_CR_ISP_RENDER (0x0F08U) ++#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x00000000000001FF)) ++#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_SHIFT (8U) ++#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_ISP_RENDER_FAST_RENDER_FORCE_PROTECT_EN (0x00000100U) ++#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_SHIFT (7U) ++#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_ISP_RENDER_PROCESS_PROTECTED_TILES_EN (0x00000080U) ++#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_SHIFT (6U) ++#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_ISP_RENDER_PROCESS_UNPROTECTED_TILES_EN (0x00000040U) ++#define RGX_CR_ISP_RENDER_DISABLE_EOMT_SHIFT (5U) ++#define RGX_CR_ISP_RENDER_DISABLE_EOMT_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_ISP_RENDER_DISABLE_EOMT_EN (0x00000020U) ++#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U) ++#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_ISP_RENDER_RESUME_EN (0x00000010U) ++#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U) ++#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0xFFFFFFF3U) ++#define RGX_CR_ISP_RENDER_DIR_TL2BR (0x00000000U) ++#define RGX_CR_ISP_RENDER_DIR_TR2BL (0x00000004U) ++#define RGX_CR_ISP_RENDER_DIR_BL2TR (0x00000008U) ++#define RGX_CR_ISP_RENDER_DIR_BR2TL (0x0000000CU) ++#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U) ++#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0xFFFFFFFCU) ++#define RGX_CR_ISP_RENDER_MODE_NORM (0x00000000U) ++#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0x00000002U) ++#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0x00000003U) ++ ++ ++/* ++ Register RGX_CR_ISP_CTL ++*/ ++#define RGX_CR_ISP_CTL (0x0F38U) ++#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x00000000FFFFF3FF)) ++#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_SHIFT (31U) ++#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_CLRMSK (0x7FFFFFFFU) ++#define RGX_CR_ISP_CTL_SKIP_INIT_HDRS_EN (0x80000000U) ++#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (30U) ++#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0x40000000U) ++#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT (29U) ++#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN (0x20000000U) ++#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT (28U) ++#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN (0x10000000U) ++#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT (27U) ++#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK (0xF7FFFFFFU) ++#define RGX_CR_ISP_CTL_PAIR_TILES_EN (0x08000000U) ++#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U) ++#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0xFBFFFFFFU) ++#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0x04000000U) ++#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U) ++#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0xFDFFFFFFU) ++#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0x02000000U) ++#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U) ++#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0xFE7FFFFFU) ++#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (0x00000000U) ++#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0x00800000U) ++#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0x01000000U) ++#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U) ++#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0xFF9FFFFFU) ++#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U) ++#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0x00100000U) ++#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U) ++#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0x00080000U) ++#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U) ++#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0x00040000U) ++#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U) ++#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0x00020000U) ++#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U) ++#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0x00010000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0xFFFF0FFFU) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (0x00000000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0x00001000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0x00002000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0x00003000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0x00004000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0x00005000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0x00006000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0x00007000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0x00008000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0x00009000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0x0000A000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0x0000B000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0x0000C000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0x0000D000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0x0000E000U) ++#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0x0000F000U) ++#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U) ++#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0xFFFFFC0FU) ++#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U) ++#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_ISP_STATUS ++*/ ++#define RGX_CR_ISP_STATUS (0x1038U) ++#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007)) ++#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U) ++#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0x00000004U) ++#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U) ++#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_ISP_STATUS_ACTIVE_EN (0x00000002U) ++#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U) ++#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_ISP_STATUS_EOR_EN (0x00000001U) ++ ++ ++/* ++ Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats ++*/ ++#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64U) ++/* ++ Register RGX_CR_ISP_XTP_RESUME0 ++*/ ++#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U) ++#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF)) ++#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U) ++#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0xFFC00FFFU) ++#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U) ++#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0xFFFFFC00U) ++ ++ ++/* ++ Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats ++*/ ++#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32U) ++/* ++ Register RGX_CR_ISP_XTP_STORE0 ++*/ ++#define RGX_CR_ISP_XTP_STORE0 (0x3C00U) ++#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF)) ++#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U) ++#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0x40000000U) ++#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U) ++#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0xDFFFFFFFU) ++#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0x20000000U) ++#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U) ++#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0x10000000U) ++#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U) ++#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0xF0FFFFFFU) ++#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U) ++#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0xFFC00FFFU) ++#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U) ++#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0xFFFFFC00U) ++ ++ ++/* ++ Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats ++*/ ++#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8U) ++/* ++ Register RGX_CR_BIF_CAT_BASE0 ++*/ ++#define RGX_CR_BIF_CAT_BASE0 (0x1200U) ++#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE1 ++*/ ++#define RGX_CR_BIF_CAT_BASE1 (0x1208U) ++#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE2 ++*/ ++#define RGX_CR_BIF_CAT_BASE2 (0x1210U) ++#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE3 ++*/ ++#define RGX_CR_BIF_CAT_BASE3 (0x1218U) ++#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE4 ++*/ ++#define RGX_CR_BIF_CAT_BASE4 (0x1220U) ++#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE5 ++*/ ++#define RGX_CR_BIF_CAT_BASE5 (0x1228U) ++#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE6 ++*/ ++#define RGX_CR_BIF_CAT_BASE6 (0x1230U) ++#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE7 ++*/ ++#define RGX_CR_BIF_CAT_BASE7 (0x1238U) ++#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_CAT_BASE_INDEX ++*/ ++#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x00070707073F0707)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0xFFF8FFFFFFFFFFFF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFF8FFFFFFFF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF8FFFFFF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT (19U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC7FFFF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF8FFFF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF8FF)) ++#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U) ++#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF8)) ++ ++ ++/* ++ Register RGX_CR_BIF_PM_CAT_BASE_VCE0 ++*/ ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_BIF_PM_CAT_BASE_TE0 ++*/ ++#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_BIF_PM_CAT_BASE_ALIST0 ++*/ ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_BIF_PM_CAT_BASE_VCE1 ++*/ ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_BIF_PM_CAT_BASE_TE1 ++*/ ++#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_BIF_PM_CAT_BASE_ALIST1 ++*/ ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0xF00000FFFFFFFFFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_BIF_MMU_ENTRY_STATUS ++*/ ++#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3)) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U) ++#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) ++ ++ ++/* ++ Register RGX_CR_BIF_MMU_ENTRY ++*/ ++#define RGX_CR_BIF_MMU_ENTRY (0x1290U) ++#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U) ++#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0x00000002U) ++#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U) ++#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_BIF_CTRL_INVAL ++*/ ++#define RGX_CR_BIF_CTRL_INVAL (0x12A0U) ++#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U) ++#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0x00000008U) ++#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U) ++#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0x00000004U) ++#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U) ++#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0x00000002U) ++#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U) ++#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_BIF_CTRL ++*/ ++#define RGX_CR_BIF_CTRL (0x12A8U) ++#define RGX_CR_BIF_CTRL__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000000000033F)) ++#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_SHIFT (9U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_CPU_EN (0x00000200U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_SHIFT (8U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF4_EN (0x00000100U) ++#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U) ++#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0x00000080U) ++#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U) ++#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0x00000040U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0x00000020U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0x00000010U) ++#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U) ++#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0x00000008U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0x00000004U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0x00000002U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS ++*/ ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS ++*/ ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_SHIFT (52U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN (IMG_UINT64_C(0x0010000000000000)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT (46U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT (40U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) ++#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS ++*/ ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0x00000010U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS ++*/ ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) ++#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_BIF_MMU_STATUS ++*/ ++#define RGX_CR_BIF_MMU_STATUS (0x12D0U) ++#define RGX_CR_BIF_MMU_STATUS__XE_MEM__MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) ++#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7)) ++#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U) ++#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0x10000000U) ++#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) ++#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) ++#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) ++#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) ++#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) ++#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) ++#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) ++#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) ++#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) ++#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) ++#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) ++#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) ++ ++ ++/* ++ Register group: RGX_CR_BIF_TILING_CFG, with 8 repeats ++*/ ++#define RGX_CR_BIF_TILING_CFG_REPEATCOUNT (8U) ++/* ++ Register RGX_CR_BIF_TILING_CFG0 ++*/ ++#define RGX_CR_BIF_TILING_CFG0 (0x12D8U) ++#define RGX_CR_BIF_TILING_CFG0_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG0_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG0_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG0_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG0_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG0_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG0_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG1 ++*/ ++#define RGX_CR_BIF_TILING_CFG1 (0x12E0U) ++#define RGX_CR_BIF_TILING_CFG1_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG1_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG1_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG1_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG1_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG1_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG1_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG2 ++*/ ++#define RGX_CR_BIF_TILING_CFG2 (0x12E8U) ++#define RGX_CR_BIF_TILING_CFG2_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG2_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG2_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG2_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG2_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG2_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG2_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG3 ++*/ ++#define RGX_CR_BIF_TILING_CFG3 (0x12F0U) ++#define RGX_CR_BIF_TILING_CFG3_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG3_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG3_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG3_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG3_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG3_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG3_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG4 ++*/ ++#define RGX_CR_BIF_TILING_CFG4 (0x12F8U) ++#define RGX_CR_BIF_TILING_CFG4_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG4_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG4_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG4_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG4_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG4_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG4_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG5 ++*/ ++#define RGX_CR_BIF_TILING_CFG5 (0x1300U) ++#define RGX_CR_BIF_TILING_CFG5_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG5_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG5_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG5_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG5_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG5_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG5_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG6 ++*/ ++#define RGX_CR_BIF_TILING_CFG6 (0x1308U) ++#define RGX_CR_BIF_TILING_CFG6_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG6_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG6_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG6_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG6_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG6_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG6_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_TILING_CFG7 ++*/ ++#define RGX_CR_BIF_TILING_CFG7 (0x1310U) ++#define RGX_CR_BIF_TILING_CFG7_MASKFULL (IMG_UINT64_C(0xFFFFFFFF0FFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_SHIFT (61U) ++#define RGX_CR_BIF_TILING_CFG7_XSTRIDE_CLRMSK (IMG_UINT64_C(0x1FFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG7_ENABLE_SHIFT (60U) ++#define RGX_CR_BIF_TILING_CFG7_ENABLE_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG7_ENABLE_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_SHIFT (32U) ++#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_CLRMSK (IMG_UINT64_C(0xF0000000FFFFFFFF)) ++#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG7_MAX_ADDRESS_ALIGNSIZE (4096U) ++#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_SHIFT (0U) ++#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF0000000)) ++#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSHIFT (12U) ++#define RGX_CR_BIF_TILING_CFG7_MIN_ADDRESS_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_BIF_READS_EXT_STATUS ++*/ ++#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U) ++#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) ++#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U) ++#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0xF000FFFFU) ++#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U) ++#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_BIF_READS_INT_STATUS ++*/ ++#define RGX_CR_BIF_READS_INT_STATUS (0x1328U) ++#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000007FFFFFF)) ++#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U) ++#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0xF800FFFFU) ++#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U) ++#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_BIFPM_READS_INT_STATUS ++*/ ++#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U) ++#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U) ++#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_BIFPM_READS_EXT_STATUS ++*/ ++#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U) ++#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U) ++#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_BIFPM_STATUS_MMU ++*/ ++#define RGX_CR_BIFPM_STATUS_MMU (0x1350U) ++#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U) ++#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_BIF_STATUS_MMU ++*/ ++#define RGX_CR_BIF_STATUS_MMU (0x1358U) ++#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U) ++#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_BIF_FAULT_READ ++*/ ++#define RGX_CR_BIF_FAULT_READ (0x13E0U) ++#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) ++#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U) ++#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U) ++#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS ++*/ ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0x00000010U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS ++*/ ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFC0FFFFFFFFFFF)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) ++#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_TFBC_COMPRESSION_CONTROL ++*/ ++#define RGX_CR_TFBC_COMPRESSION_CONTROL (0x14A0U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_SHIFT (7U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN (0x00000080U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_SHIFT (4U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_SHIFT_CLRMSK (0xFFFFFF8FU) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_SHIFT (3U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_QUALITY_ENABLE_EN (0x00000008U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT (1U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK (0xFFFFFFF9U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_DEFAULT (0x00000000U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD_AND_CORRELATION (0x00000002U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_TFBC_DELTA_STANDARD (0x00000004U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_RESERVED (0x00000006U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT (0U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_0 (0x00000000U) ++#define RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_GROUP_1 (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MCU_FENCE ++*/ ++#define RGX_CR_MCU_FENCE (0x1740U) ++#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0)) ++#define RGX_CR_MCU_FENCE_DM_SHIFT (40U) ++#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000)) ++#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000)) ++#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000)) ++#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000)) ++#define RGX_CR_MCU_FENCE_DM_FASTRENDER (IMG_UINT64_C(0x0000050000000000)) ++#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U) ++#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) ++#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U) ++#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U) ++ ++ ++/* ++ Register group: RGX_CR_SCRATCH, with 16 repeats ++*/ ++#define RGX_CR_SCRATCH_REPEATCOUNT (16U) ++/* ++ Register RGX_CR_SCRATCH0 ++*/ ++#define RGX_CR_SCRATCH0 (0x1A00U) ++#define RGX_CR_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH1 ++*/ ++#define RGX_CR_SCRATCH1 (0x1A08U) ++#define RGX_CR_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH2 ++*/ ++#define RGX_CR_SCRATCH2 (0x1A10U) ++#define RGX_CR_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH2_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH3 ++*/ ++#define RGX_CR_SCRATCH3 (0x1A18U) ++#define RGX_CR_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH3_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH4 ++*/ ++#define RGX_CR_SCRATCH4 (0x1A20U) ++#define RGX_CR_SCRATCH4_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH4_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH4_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH5 ++*/ ++#define RGX_CR_SCRATCH5 (0x1A28U) ++#define RGX_CR_SCRATCH5_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH5_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH5_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH6 ++*/ ++#define RGX_CR_SCRATCH6 (0x1A30U) ++#define RGX_CR_SCRATCH6_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH6_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH6_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH7 ++*/ ++#define RGX_CR_SCRATCH7 (0x1A38U) ++#define RGX_CR_SCRATCH7_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH7_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH7_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH8 ++*/ ++#define RGX_CR_SCRATCH8 (0x1A40U) ++#define RGX_CR_SCRATCH8_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH8_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH8_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH9 ++*/ ++#define RGX_CR_SCRATCH9 (0x1A48U) ++#define RGX_CR_SCRATCH9_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH9_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH9_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH10 ++*/ ++#define RGX_CR_SCRATCH10 (0x1A50U) ++#define RGX_CR_SCRATCH10_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH10_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH10_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH11 ++*/ ++#define RGX_CR_SCRATCH11 (0x1A58U) ++#define RGX_CR_SCRATCH11_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH11_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH11_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH12 ++*/ ++#define RGX_CR_SCRATCH12 (0x1A60U) ++#define RGX_CR_SCRATCH12_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH12_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH12_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH13 ++*/ ++#define RGX_CR_SCRATCH13 (0x1A68U) ++#define RGX_CR_SCRATCH13_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH13_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH13_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH14 ++*/ ++#define RGX_CR_SCRATCH14 (0x1A70U) ++#define RGX_CR_SCRATCH14_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH14_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH14_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SCRATCH15 ++*/ ++#define RGX_CR_SCRATCH15 (0x1A78U) ++#define RGX_CR_SCRATCH15_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SCRATCH15_DATA_SHIFT (0U) ++#define RGX_CR_SCRATCH15_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register group: RGX_CR_OS0_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS0_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS0_SCRATCH0 ++*/ ++#define RGX_CR_OS0_SCRATCH0 (0x1A80U) ++#define RGX_CR_OS0_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS0_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS0_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS0_SCRATCH1 ++*/ ++#define RGX_CR_OS0_SCRATCH1 (0x1A88U) ++#define RGX_CR_OS0_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS0_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS0_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS0_SCRATCH2 ++*/ ++#define RGX_CR_OS0_SCRATCH2 (0x1A90U) ++#define RGX_CR_OS0_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS0_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS0_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS0_SCRATCH3 ++*/ ++#define RGX_CR_OS0_SCRATCH3 (0x1A98U) ++#define RGX_CR_OS0_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS0_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS0_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS1_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS1_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS1_SCRATCH0 ++*/ ++#define RGX_CR_OS1_SCRATCH0 (0x11A80U) ++#define RGX_CR_OS1_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS1_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS1_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS1_SCRATCH1 ++*/ ++#define RGX_CR_OS1_SCRATCH1 (0x11A88U) ++#define RGX_CR_OS1_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS1_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS1_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS1_SCRATCH2 ++*/ ++#define RGX_CR_OS1_SCRATCH2 (0x11A90U) ++#define RGX_CR_OS1_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS1_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS1_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS1_SCRATCH3 ++*/ ++#define RGX_CR_OS1_SCRATCH3 (0x11A98U) ++#define RGX_CR_OS1_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS1_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS1_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS2_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS2_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS2_SCRATCH0 ++*/ ++#define RGX_CR_OS2_SCRATCH0 (0x21A80U) ++#define RGX_CR_OS2_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS2_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS2_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS2_SCRATCH1 ++*/ ++#define RGX_CR_OS2_SCRATCH1 (0x21A88U) ++#define RGX_CR_OS2_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS2_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS2_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS2_SCRATCH2 ++*/ ++#define RGX_CR_OS2_SCRATCH2 (0x21A90U) ++#define RGX_CR_OS2_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS2_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS2_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS2_SCRATCH3 ++*/ ++#define RGX_CR_OS2_SCRATCH3 (0x21A98U) ++#define RGX_CR_OS2_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS2_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS2_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS3_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS3_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS3_SCRATCH0 ++*/ ++#define RGX_CR_OS3_SCRATCH0 (0x31A80U) ++#define RGX_CR_OS3_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS3_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS3_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS3_SCRATCH1 ++*/ ++#define RGX_CR_OS3_SCRATCH1 (0x31A88U) ++#define RGX_CR_OS3_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS3_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS3_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS3_SCRATCH2 ++*/ ++#define RGX_CR_OS3_SCRATCH2 (0x31A90U) ++#define RGX_CR_OS3_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS3_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS3_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS3_SCRATCH3 ++*/ ++#define RGX_CR_OS3_SCRATCH3 (0x31A98U) ++#define RGX_CR_OS3_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS3_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS3_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS4_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS4_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS4_SCRATCH0 ++*/ ++#define RGX_CR_OS4_SCRATCH0 (0x41A80U) ++#define RGX_CR_OS4_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS4_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS4_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS4_SCRATCH1 ++*/ ++#define RGX_CR_OS4_SCRATCH1 (0x41A88U) ++#define RGX_CR_OS4_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS4_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS4_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS4_SCRATCH2 ++*/ ++#define RGX_CR_OS4_SCRATCH2 (0x41A90U) ++#define RGX_CR_OS4_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS4_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS4_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS4_SCRATCH3 ++*/ ++#define RGX_CR_OS4_SCRATCH3 (0x41A98U) ++#define RGX_CR_OS4_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS4_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS4_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS5_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS5_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS5_SCRATCH0 ++*/ ++#define RGX_CR_OS5_SCRATCH0 (0x51A80U) ++#define RGX_CR_OS5_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS5_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS5_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS5_SCRATCH1 ++*/ ++#define RGX_CR_OS5_SCRATCH1 (0x51A88U) ++#define RGX_CR_OS5_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS5_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS5_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS5_SCRATCH2 ++*/ ++#define RGX_CR_OS5_SCRATCH2 (0x51A90U) ++#define RGX_CR_OS5_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS5_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS5_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS5_SCRATCH3 ++*/ ++#define RGX_CR_OS5_SCRATCH3 (0x51A98U) ++#define RGX_CR_OS5_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS5_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS5_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS6_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS6_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS6_SCRATCH0 ++*/ ++#define RGX_CR_OS6_SCRATCH0 (0x61A80U) ++#define RGX_CR_OS6_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS6_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS6_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS6_SCRATCH1 ++*/ ++#define RGX_CR_OS6_SCRATCH1 (0x61A88U) ++#define RGX_CR_OS6_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS6_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS6_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS6_SCRATCH2 ++*/ ++#define RGX_CR_OS6_SCRATCH2 (0x61A90U) ++#define RGX_CR_OS6_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS6_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS6_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS6_SCRATCH3 ++*/ ++#define RGX_CR_OS6_SCRATCH3 (0x61A98U) ++#define RGX_CR_OS6_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS6_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS6_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register group: RGX_CR_OS7_SCRATCH, with 2 repeats ++*/ ++#define RGX_CR_OS7_SCRATCH_REPEATCOUNT (2U) ++/* ++ Register RGX_CR_OS7_SCRATCH0 ++*/ ++#define RGX_CR_OS7_SCRATCH0 (0x71A80U) ++#define RGX_CR_OS7_SCRATCH0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS7_SCRATCH0_DATA_SHIFT (0U) ++#define RGX_CR_OS7_SCRATCH0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS7_SCRATCH1 ++*/ ++#define RGX_CR_OS7_SCRATCH1 (0x71A88U) ++#define RGX_CR_OS7_SCRATCH1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_OS7_SCRATCH1_DATA_SHIFT (0U) ++#define RGX_CR_OS7_SCRATCH1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OS7_SCRATCH2 ++*/ ++#define RGX_CR_OS7_SCRATCH2 (0x71A90U) ++#define RGX_CR_OS7_SCRATCH2_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS7_SCRATCH2_DATA_SHIFT (0U) ++#define RGX_CR_OS7_SCRATCH2_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_OS7_SCRATCH3 ++*/ ++#define RGX_CR_OS7_SCRATCH3 (0x71A98U) ++#define RGX_CR_OS7_SCRATCH3_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_OS7_SCRATCH3_DATA_SHIFT (0U) ++#define RGX_CR_OS7_SCRATCH3_DATA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_SPFILTER_SIGNAL_DESCR ++*/ ++#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0xFFFF0000U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN ++*/ ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0)) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U) ++#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_ADDR_REMAP_CONFIG, with 16 repeats ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG_REPEATCOUNT (16U) ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 (0x3000U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1 (0x3008U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG1_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2 (0x3010U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG2_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3 (0x3018U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG3_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4 (0x3020U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG4_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5 (0x3028U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG5_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6 (0x3030U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG6_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7 (0x3038U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG7_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8 (0x3040U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG8_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9 (0x3048U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG9_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10 (0x3050U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG10_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11 (0x3058U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG11_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12 (0x3060U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG12_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13 (0x3068U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG13_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14 (0x3070U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG14_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 ++*/ ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15 (0x3078U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_MASKFULL (IMG_UINT64_C(0x7FFFF7FFFFFFF000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_SHIFT (62U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_TRUSTED_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_SHIFT (61U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_CLRMSK (IMG_UINT64_C(0xDFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_LOAD_STORE_EN_EN (IMG_UINT64_C(0x2000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_SHIFT (60U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_CLRMSK (IMG_UINT64_C(0xEFFFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_FETCH_EN_EN (IMG_UINT64_C(0x1000000000000000)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_SHIFT (44U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_SIZE_CLRMSK (IMG_UINT64_C(0xF0000FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_SHIFT (40U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_CBASE_CLRMSK (IMG_UINT64_C(0xFFFFF8FFFFFFFFFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_ADDR_REMAP_CONFIG15_DEVVADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_BOOT ++*/ ++#define RGX_CR_FWCORE_BOOT (0x3090U) ++#define RGX_CR_FWCORE_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_FWCORE_BOOT_ENABLE_SHIFT (0U) ++#define RGX_CR_FWCORE_BOOT_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_BOOT_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_RESET_ADDR ++*/ ++#define RGX_CR_FWCORE_RESET_ADDR (0x3098U) ++#define RGX_CR_FWCORE_RESET_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) ++#define RGX_CR_FWCORE_RESET_ADDR_ADDR_SHIFT (1U) ++#define RGX_CR_FWCORE_RESET_ADDR_ADDR_CLRMSK (0x00000001U) ++#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSHIFT (1U) ++#define RGX_CR_FWCORE_RESET_ADDR_ADDR_ALIGNSIZE (2U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_WRAPPER_NMI_ADDR ++*/ ++#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR (0x30A0U) ++#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFE)) ++#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_SHIFT (1U) ++#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_CLRMSK (0x00000001U) ++#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSHIFT (1U) ++#define RGX_CR_FWCORE_WRAPPER_NMI_ADDR_ADDR_ALIGNSIZE (2U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_WRAPPER_NMI_EVENT ++*/ ++#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT (0x30A8U) ++#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_SHIFT (0U) ++#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_WRAPPER_NMI_EVENT_TRIGGER_EN_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS ++*/ ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS (0x30B0U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F771)) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT (8U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_SHIFT (5U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_SHIFT (4U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_RO_EN (0x00000010U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_SHIFT (0U) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS ++*/ ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS (0x30B8U) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0)) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_SHIFT (52U) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0010000000000000)) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT (46U) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFF03FFFFFFFFFFF)) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT (40U) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFC0FFFFFFFFFF)) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT (4U) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) ++#define RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CTRL_INVAL ++*/ ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL (0x30C0U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_SHIFT (3U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_TLB_EN (0x00000008U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_SHIFT (2U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PC_EN (0x00000004U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_SHIFT (1U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PD_EN (0x00000002U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_SHIFT (0U) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_MEM_CTRL_INVAL_PT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_MMU_STATUS ++*/ ++#define RGX_CR_FWCORE_MEM_MMU_STATUS (0x30C8U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_SHIFT (20U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_SHIFT (4U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_SHIFT (2U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_STALLED_EN (0x00000004U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_SHIFT (1U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_PAUSED_EN (0x00000002U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_SHIFT (0U) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_MEM_MMU_STATUS_BUSY_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_READS_EXT_STATUS ++*/ ++#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS (0x30D8U) ++#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) ++#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_SHIFT (0U) ++#define RGX_CR_FWCORE_MEM_READS_EXT_STATUS_MMU_CLRMSK (0xFFFFF000U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_READS_INT_STATUS ++*/ ++#define RGX_CR_FWCORE_MEM_READS_INT_STATUS (0x30E0U) ++#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF)) ++#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_SHIFT (0U) ++#define RGX_CR_FWCORE_MEM_READS_INT_STATUS_MMU_CLRMSK (0xFFFFF800U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_WRAPPER_FENCE ++*/ ++#define RGX_CR_FWCORE_WRAPPER_FENCE (0x30E8U) ++#define RGX_CR_FWCORE_WRAPPER_FENCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_SHIFT (0U) ++#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_WRAPPER_FENCE_ID_EN (0x00000001U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_MEM_CAT_BASE, with 8 repeats ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE_REPEATCOUNT (8U) ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE0 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE0 (0x30F0U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE1 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE1 (0x30F8U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE1_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE2 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE2 (0x3100U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE2_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE3 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE3 (0x3108U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE3_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE4 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE4 (0x3110U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE4_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE5 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE5 (0x3118U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE5_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE6 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE6 (0x3120U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE6_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_MEM_CAT_BASE7 ++*/ ++#define RGX_CR_FWCORE_MEM_CAT_BASE7 (0x3128U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_SHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_FWCORE_MEM_CAT_BASE7_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_WDT_RESET ++*/ ++#define RGX_CR_FWCORE_WDT_RESET (0x3130U) ++#define RGX_CR_FWCORE_WDT_RESET_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_FWCORE_WDT_RESET_EN_SHIFT (0U) ++#define RGX_CR_FWCORE_WDT_RESET_EN_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_WDT_RESET_EN_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_WDT_CTRL ++*/ ++#define RGX_CR_FWCORE_WDT_CTRL (0x3138U) ++#define RGX_CR_FWCORE_WDT_CTRL_MASKFULL (IMG_UINT64_C(0x00000000FFFF1F01)) ++#define RGX_CR_FWCORE_WDT_CTRL_PROT_SHIFT (16U) ++#define RGX_CR_FWCORE_WDT_CTRL_PROT_CLRMSK (0x0000FFFFU) ++#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_SHIFT (8U) ++#define RGX_CR_FWCORE_WDT_CTRL_THRESHOLD_CLRMSK (0xFFFFE0FFU) ++#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_WDT_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_WDT_COUNT ++*/ ++#define RGX_CR_FWCORE_WDT_COUNT (0x3140U) ++#define RGX_CR_FWCORE_WDT_COUNT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_WDT_COUNT_VALUE_SHIFT (0U) ++#define RGX_CR_FWCORE_WDT_COUNT_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_DMI_RESERVED0, with 4 repeats ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED0_REPEATCOUNT (4U) ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED00 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED00 (0x3400U) ++#define RGX_CR_FWCORE_DMI_RESERVED00_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED01 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED01 (0x3408U) ++#define RGX_CR_FWCORE_DMI_RESERVED01_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED02 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED02 (0x3410U) ++#define RGX_CR_FWCORE_DMI_RESERVED02_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED03 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED03 (0x3418U) ++#define RGX_CR_FWCORE_DMI_RESERVED03_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_DATA0 ++*/ ++#define RGX_CR_FWCORE_DMI_DATA0 (0x3420U) ++#define RGX_CR_FWCORE_DMI_DATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_DATA0_VAL_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_DATA0_VAL_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_DATA1 ++*/ ++#define RGX_CR_FWCORE_DMI_DATA1 (0x3428U) ++#define RGX_CR_FWCORE_DMI_DATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_DATA1_VAL_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_DATA1_VAL_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_DMI_RESERVED1, with 10 repeats ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED1_REPEATCOUNT (10U) ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED10 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED10 (0x3430U) ++#define RGX_CR_FWCORE_DMI_RESERVED10_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED11 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED11 (0x3438U) ++#define RGX_CR_FWCORE_DMI_RESERVED11_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED12 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED12 (0x3440U) ++#define RGX_CR_FWCORE_DMI_RESERVED12_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED13 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED13 (0x3448U) ++#define RGX_CR_FWCORE_DMI_RESERVED13_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED14 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED14 (0x3450U) ++#define RGX_CR_FWCORE_DMI_RESERVED14_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_DMCONTROL ++*/ ++#define RGX_CR_FWCORE_DMI_DMCONTROL (0x3480U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_MASKFULL (IMG_UINT64_C(0x00000000D0000003)) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_SHIFT (31U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_CLRMSK (0x7FFFFFFFU) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN (0x80000000U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_SHIFT (30U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_CLRMSK (0xBFFFFFFFU) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN (0x40000000U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_SHIFT (28U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_CLRMSK (0xEFFFFFFFU) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_ACKHAVERESET_EN (0x10000000U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_SHIFT (1U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_NDMRESET_EN (0x00000002U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_DMSTATUS ++*/ ++#define RGX_CR_FWCORE_DMI_DMSTATUS (0x3488U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_MASKFULL (IMG_UINT64_C(0x00000000004FFFFF)) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_SHIFT (22U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_IMPEBREAK_EN (0x00400000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_SHIFT (19U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHAVERESET_EN (0x00080000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_SHIFT (18U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHAVERESET_EN (0x00040000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_SHIFT (17U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN (0x00020000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_SHIFT (16U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRESUMEACK_EN (0x00010000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_SHIFT (15U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_CLRMSK (0xFFFF7FFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLNONEXISTENT_EN (0x00008000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_SHIFT (14U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_CLRMSK (0xFFFFBFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYNONEXISTENT_EN (0x00004000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_SHIFT (13U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_CLRMSK (0xFFFFDFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLUNAVAIL_EN (0x00002000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_SHIFT (12U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_CLRMSK (0xFFFFEFFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYUNAVAIL_EN (0x00001000U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_SHIFT (11U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_CLRMSK (0xFFFFF7FFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLRUNNING_EN (0x00000800U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_SHIFT (10U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_CLRMSK (0xFFFFFBFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYRUNNING_EN (0x00000400U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_SHIFT (9U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN (0x00000200U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_SHIFT (8U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_ANYHALTED_EN (0x00000100U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_SHIFT (7U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHENTICATED_EN (0x00000080U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_SHIFT (6U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_AUTHBUSY_EN (0x00000040U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_SHIFT (5U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_HASRESETHALTREQ_EN (0x00000020U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_SHIFT (4U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_CONFSTRPTRVALID_EN (0x00000010U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_DMSTATUS_VERSION_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_DMI_RESERVED2, with 4 repeats ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED2_REPEATCOUNT (4U) ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED20 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED20 (0x3490U) ++#define RGX_CR_FWCORE_DMI_RESERVED20_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED21 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED21 (0x3498U) ++#define RGX_CR_FWCORE_DMI_RESERVED21_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED22 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED22 (0x34A0U) ++#define RGX_CR_FWCORE_DMI_RESERVED22_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED23 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED23 (0x34A8U) ++#define RGX_CR_FWCORE_DMI_RESERVED23_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_ABSTRACTCS ++*/ ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS (0x34B0U) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_MASKFULL (IMG_UINT64_C(0x000000001F00170F)) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_SHIFT (24U) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_PROGBUFSIZE_CLRMSK (0xE0FFFFFFU) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_SHIFT (12U) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_CLRMSK (0xFFFFEFFFU) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN (0x00001000U) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT (8U) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK (0xFFFFF8FFU) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_ABSTRACTCS_DATACOUNT_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_COMMAND ++*/ ++#define RGX_CR_FWCORE_DMI_COMMAND (0x34B8U) ++#define RGX_CR_FWCORE_DMI_COMMAND_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT (24U) ++#define RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_CLRMSK (0x00FFFFFFU) ++#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_COMMAND_CONTROL_CLRMSK (0xFF000000U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_DMI_RESERVED3, with 32 repeats ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED3_REPEATCOUNT (32U) ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED30 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED30 (0x34C0U) ++#define RGX_CR_FWCORE_DMI_RESERVED30_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_RESERVED31 ++*/ ++#define RGX_CR_FWCORE_DMI_RESERVED31 (0x34C8U) ++#define RGX_CR_FWCORE_DMI_RESERVED31_MASKFULL (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_SBCS ++*/ ++#define RGX_CR_FWCORE_DMI_SBCS (0x35C0U) ++#define RGX_CR_FWCORE_DMI_SBCS_MASKFULL (IMG_UINT64_C(0x00000000E07FFFFF)) ++#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_SHIFT (29U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBVERSION_CLRMSK (0x1FFFFFFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_SHIFT (22U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBBUSYERROR_EN (0x00400000U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_SHIFT (21U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_CLRMSK (0xFFDFFFFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN (0x00200000U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_SHIFT (20U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN (0x00100000U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT (17U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS_CLRMSK (0xFFF1FFFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_SHIFT (16U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBAUTOINCREMENT_EN (0x00010000U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_SHIFT (15U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_CLRMSK (0xFFFF7FFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBREADONDATA_EN (0x00008000U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT (12U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK (0xFFFF8FFFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_SHIFT (5U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBASIZE_CLRMSK (0xFFFFF01FU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_SHIFT (4U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS128_EN (0x00000010U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_SHIFT (3U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS64_EN (0x00000008U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_SHIFT (2U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS32_EN (0x00000004U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_SHIFT (1U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS16_EN (0x00000002U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FWCORE_DMI_SBCS_SBACCESS8_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_SBADDRESS0 ++*/ ++#define RGX_CR_FWCORE_DMI_SBADDRESS0 (0x35C8U) ++#define RGX_CR_FWCORE_DMI_SBADDRESS0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_SBADDRESS0_ADDRESS_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register group: RGX_CR_FWCORE_DMI_SBDATA, with 4 repeats ++*/ ++#define RGX_CR_FWCORE_DMI_SBDATA_REPEATCOUNT (4U) ++/* ++ Register RGX_CR_FWCORE_DMI_SBDATA0 ++*/ ++#define RGX_CR_FWCORE_DMI_SBDATA0 (0x35E0U) ++#define RGX_CR_FWCORE_DMI_SBDATA0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_SBDATA0_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_SBDATA1 ++*/ ++#define RGX_CR_FWCORE_DMI_SBDATA1 (0x35E8U) ++#define RGX_CR_FWCORE_DMI_SBDATA1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_SBDATA1_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_SBDATA2 ++*/ ++#define RGX_CR_FWCORE_DMI_SBDATA2 (0x35F0U) ++#define RGX_CR_FWCORE_DMI_SBDATA2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_SBDATA2_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_SBDATA3 ++*/ ++#define RGX_CR_FWCORE_DMI_SBDATA3 (0x35F8U) ++#define RGX_CR_FWCORE_DMI_SBDATA3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_SBDATA3_DATA_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FWCORE_DMI_HALTSUM0 ++*/ ++#define RGX_CR_FWCORE_DMI_HALTSUM0 (0x3600U) ++#define RGX_CR_FWCORE_DMI_HALTSUM0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_SHIFT (0U) ++#define RGX_CR_FWCORE_DMI_HALTSUM0_VAL_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SLC_CTRL_MISC ++*/ ++#define RGX_CR_SLC_CTRL_MISC (0x3800U) ++#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF03FF010F)) ++#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U) ++#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_SHIFT (25U) ++#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) ++#define RGX_CR_SLC_CTRL_MISC_TAG_ID_LIMIT_CONTROL_EN (IMG_UINT64_C(0x0000000002000000)) ++#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_SHIFT (24U) ++#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) ++#define RGX_CR_SLC_CTRL_MISC_LAZYWB_OVERRIDE_EN (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000)) ++#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U) ++#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) ++#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT (3U) ++#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U) ++#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U) ++#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U) ++#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_SLC_CTRL_FLUSH_INVAL ++*/ ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x0000000080000FFF)) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0x7FFFFFFFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0x80000000U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_SHIFT (11U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_CLRMSK (0xFFFFF7FFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FASTRENDER_EN (0x00000800U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0xFFFFFBFFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0x00000400U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0x00000200U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0x00000100U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0x00000080U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0x00000040U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0x00000020U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0x00000010U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0x00000008U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0x00000004U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0x00000002U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SLC_STATUS0 ++*/ ++#define RGX_CR_SLC_STATUS0 (0x3820U) ++#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007)) ++#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U) ++#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0x00000004U) ++#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U) ++#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0x00000002U) ++#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U) ++#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SLC_CTRL_BYPASS ++*/ ++#define RGX_CR_SLC_CTRL_BYPASS (0x3828U) ++#define RGX_CR_SLC_CTRL_BYPASS__XE_MEM__MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFF7FFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_SHIFT (59U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_CLRMSK (IMG_UINT64_C(0xF7FFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_ZLS_EN (IMG_UINT64_C(0x0800000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_SHIFT (58U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_CLRMSK (IMG_UINT64_C(0xFBFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_HEADER_EN (IMG_UINT64_C(0x0400000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_SHIFT (57U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_HEADER_EN (IMG_UINT64_C(0x0200000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_SHIFT (56U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_CLRMSK (IMG_UINT64_C(0xFEFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_ZLS_DATA_EN (IMG_UINT64_C(0x0100000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_SHIFT (55U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_CLRMSK (IMG_UINT64_C(0xFF7FFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_DECOMP_TCU_DATA_EN (IMG_UINT64_C(0x0080000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_SHIFT (54U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_CLRMSK (IMG_UINT64_C(0xFFBFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TFBC_COMP_PBE_EN (IMG_UINT64_C(0x0040000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_SHIFT (53U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFDFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_DM_COMPUTE_EN (IMG_UINT64_C(0x0020000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_SHIFT (52U) ++#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFEFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_PDSRW_NOLINEFILL_EN (IMG_UINT64_C(0x0010000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_SHIFT (51U) ++#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_CLRMSK (IMG_UINT64_C(0xFFF7FFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_PBE_NOLINEFILL_EN (IMG_UINT64_C(0x0008000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_SHIFT (50U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBC_EN (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_SHIFT (49U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_CLRMSK (IMG_UINT64_C(0xFFFDFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_RREQ_EN (IMG_UINT64_C(0x0002000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_SHIFT (48U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_CLRMSK (IMG_UINT64_C(0xFFFEFFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CREQ_EN (IMG_UINT64_C(0x0001000000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_SHIFT (47U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_CLRMSK (IMG_UINT64_C(0xFFFF7FFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_PREQ_EN (IMG_UINT64_C(0x0000800000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_SHIFT (46U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_CLRMSK (IMG_UINT64_C(0xFFFFBFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_DBSC_EN (IMG_UINT64_C(0x0000400000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_SHIFT (45U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TCU_EN (IMG_UINT64_C(0x0000200000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_SHIFT (44U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_CLRMSK (IMG_UINT64_C(0xFFFFEFFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PBE_EN (IMG_UINT64_C(0x0000100000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_SHIFT (43U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_CLRMSK (IMG_UINT64_C(0xFFFFF7FFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_ISP_EN (IMG_UINT64_C(0x0000080000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_SHIFT (42U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PM_EN (IMG_UINT64_C(0x0000040000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_SHIFT (41U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_CLRMSK (IMG_UINT64_C(0xFFFFFDFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TDM_EN (IMG_UINT64_C(0x0000020000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_SHIFT (40U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_CDM_EN (IMG_UINT64_C(0x0000010000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_SHIFT (39U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_CLRMSK (IMG_UINT64_C(0xFFFFFF7FFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_PDS_STATE_EN (IMG_UINT64_C(0x0000008000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_SHIFT (38U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_DB_EN (IMG_UINT64_C(0x0000004000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_SHIFT (37U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TSPF_VTX_VAR_EN (IMG_UINT64_C(0x0000002000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_SHIFT (36U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_VDM_EN (IMG_UINT64_C(0x0000001000000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_SHIFT (35U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_STREAM_EN (IMG_UINT64_C(0x0000000800000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_SHIFT (34U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PSG_REGION_EN (IMG_UINT64_C(0x0000000400000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_SHIFT (33U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_VCE_EN (IMG_UINT64_C(0x0000000200000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_SHIFT (32U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_PPP_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_SHIFT (31U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_FASTRENDER_EN (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_SHIFT (30U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PM_ALIST_EN (IMG_UINT64_C(0x0000000040000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_SHIFT (29U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_TE_EN (IMG_UINT64_C(0x0000000020000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_SHIFT (28U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFEFFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PB_VCE_EN (IMG_UINT64_C(0x0000000010000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF7FFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (IMG_UINT64_C(0x0000000008000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (IMG_UINT64_C(0x0000000004000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFDFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (IMG_UINT64_C(0x0000000002000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFEFFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (IMG_UINT64_C(0x0000000001000000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (IMG_UINT64_C(0x0000000000800000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (IMG_UINT64_C(0x0000000000400000)) ++#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U) ++#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U) ++#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFEFFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (IMG_UINT64_C(0x0000000000100000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (IMG_UINT64_C(0x0000000000080000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (IMG_UINT64_C(0x0000000000040000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (IMG_UINT64_C(0x0000000000020000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (IMG_UINT64_C(0x0000000000008000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (IMG_UINT64_C(0x0000000000004000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (IMG_UINT64_C(0x0000000000002000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (IMG_UINT64_C(0x0000000000001000)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (IMG_UINT64_C(0x0000000000000040)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U) ++#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_SLC_STATUS1 ++*/ ++#define RGX_CR_SLC_STATUS1 (0x3870U) ++#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF)) ++#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U) ++#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0x7FFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0x8000000000000000)) ++#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U) ++#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) ++#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U) ++#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) ++#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U) ++#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) ++#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U) ++#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) ++ ++ ++/* ++ Register RGX_CR_SLC_IDLE ++*/ ++#define RGX_CR_SLC_IDLE (0x3898U) ++#define RGX_CR_SLC_IDLE__XE_MEM__MASKFULL (IMG_UINT64_C(0x00000000000003FF)) ++#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_SLC_IDLE_MH_SYSARB1_SHIFT (9U) ++#define RGX_CR_SLC_IDLE_MH_SYSARB1_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_SLC_IDLE_MH_SYSARB1_EN (0x00000200U) ++#define RGX_CR_SLC_IDLE_MH_SYSARB0_SHIFT (8U) ++#define RGX_CR_SLC_IDLE_MH_SYSARB0_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_SLC_IDLE_MH_SYSARB0_EN (0x00000100U) ++#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U) ++#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_SLC_IDLE_IMGBV4_EN (0x00000080U) ++#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U) ++#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0x00000040U) ++#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U) ++#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0x00000020U) ++#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U) ++#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0x00000010U) ++#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U) ++#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0x00000008U) ++#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U) ++#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0x00000004U) ++#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U) ++#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0x00000002U) ++#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U) ++#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SLC_IDLE_CBAR_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SLC_STATUS2 ++*/ ++#define RGX_CR_SLC_STATUS2 (0x3908U) ++#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF)) ++#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U) ++#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0xFFFFFC00FFFFFFFF)) ++#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U) ++#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC00FFFF)) ++#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U) ++#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) ++#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U) ++#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) ++ ++ ++/* ++ Register RGX_CR_SLC_CTRL_MISC2 ++*/ ++#define RGX_CR_SLC_CTRL_MISC2 (0x3930U) ++#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U) ++#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE ++*/ ++#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U) ++#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U) ++#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SLC_SIZE_IN_KB ++*/ ++#define RGX_CR_SLC_SIZE_IN_KB (0x3970U) ++#define RGX_CR_SLC_SIZE_IN_KB_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_SLC_SIZE_IN_KB_SIZE_SHIFT (0U) ++#define RGX_CR_SLC_SIZE_IN_KB_SIZE_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_USC_TIMER ++*/ ++#define RGX_CR_USC_TIMER (0x46C8U) ++#define RGX_CR_USC_TIMER_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_TIMER_CNT_SHIFT (0U) ++#define RGX_CR_USC_TIMER_CNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_TIMER_CNT ++*/ ++#define RGX_CR_USC_TIMER_CNT (0x46D0U) ++#define RGX_CR_USC_TIMER_CNT_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_USC_TIMER_CNT_RESET_SHIFT (0U) ++#define RGX_CR_USC_TIMER_CNT_RESET_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_USC_TIMER_CNT_RESET_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_USC_UVS0_CHECKSUM ++*/ ++#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U) ++#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_UVS1_CHECKSUM ++*/ ++#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U) ++#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_UVS2_CHECKSUM ++*/ ++#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U) ++#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_UVS3_CHECKSUM ++*/ ++#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U) ++#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PPP_SIGNATURE ++*/ ++#define RGX_CR_PPP_SIGNATURE (0x5020U) ++#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U) ++#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TE_SIGNATURE ++*/ ++#define RGX_CR_TE_SIGNATURE (0x5028U) ++#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U) ++#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TE_CHECKSUM ++*/ ++#define RGX_CR_TE_CHECKSUM (0x5110U) ++#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_UVB_CHECKSUM ++*/ ++#define RGX_CR_USC_UVB_CHECKSUM (0x5118U) ++#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_VCE_CHECKSUM ++*/ ++#define RGX_CR_VCE_CHECKSUM (0x5030U) ++#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_ISP_PDS_CHECKSUM ++*/ ++#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U) ++#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_ISP_TPF_CHECKSUM ++*/ ++#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U) ++#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TFPU_PLANE0_CHECKSUM ++*/ ++#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U) ++#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TFPU_PLANE1_CHECKSUM ++*/ ++#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U) ++#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PBE_CHECKSUM ++*/ ++#define RGX_CR_PBE_CHECKSUM (0x5058U) ++#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PDS_DOUTM_STM_SIGNATURE ++*/ ++#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U) ++#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U) ++#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_IFPU_ISP_CHECKSUM ++*/ ++#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U) ++#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_UVS4_CHECKSUM ++*/ ++#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U) ++#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_UVS5_CHECKSUM ++*/ ++#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U) ++#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PPP_CLIP_CHECKSUM ++*/ ++#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U) ++#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_TA_PHASE ++*/ ++#define RGX_CR_PERF_TA_PHASE (0x6008U) ++#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_3D_PHASE ++*/ ++#define RGX_CR_PERF_3D_PHASE (0x6010U) ++#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_COMPUTE_PHASE ++*/ ++#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U) ++#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_TA_CYCLE ++*/ ++#define RGX_CR_PERF_TA_CYCLE (0x6020U) ++#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_3D_CYCLE ++*/ ++#define RGX_CR_PERF_3D_CYCLE (0x6028U) ++#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_COMPUTE_CYCLE ++*/ ++#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U) ++#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_TA_OR_3D_CYCLE ++*/ ++#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U) ++#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_INITIAL_TA_CYCLE ++*/ ++#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U) ++#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC0_READ_STALL ++*/ ++#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U) ++#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC0_WRITE_STALL ++*/ ++#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U) ++#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC1_READ_STALL ++*/ ++#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U) ++#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC1_WRITE_STALL ++*/ ++#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U) ++#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC2_READ_STALL ++*/ ++#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U) ++#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC2_WRITE_STALL ++*/ ++#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U) ++#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC3_READ_STALL ++*/ ++#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U) ++#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_SLC3_WRITE_STALL ++*/ ++#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U) ++#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U) ++#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PERF_3D_SPINUP ++*/ ++#define RGX_CR_PERF_3D_SPINUP (0x6220U) ++#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U) ++#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_AXI_ACE_LITE_CONFIGURATION ++*/ ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00003FFFFFFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT (45U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_CLRMSK (IMG_UINT64_C(0xFFFFDFFFFFFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_EN (IMG_UINT64_C(0x0000200000000000)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0xFFFFE01FFFFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0x0000001000000000)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0xFFFFFFF7FFFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0x0000000800000000)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0x0000000400000000)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFC3FFFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC3FFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFC3FFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFCFFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF3FFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF3FFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFCFFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF0F)) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U) ++#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF0)) ++ ++ ++/* ++ Register RGX_CR_POWER_ESTIMATE_RESULT ++*/ ++#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U) ++#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U) ++#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TA_PERF ++*/ ++#define RGX_CR_TA_PERF (0x7600U) ++#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_TA_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_TA_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_TA_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_TA_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_SELECT0 ++*/ ++#define RGX_CR_TA_PERF_SELECT0 (0x7608U) ++#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_SELECT1 ++*/ ++#define RGX_CR_TA_PERF_SELECT1 (0x7610U) ++#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U) ++#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_SELECT2 ++*/ ++#define RGX_CR_TA_PERF_SELECT2 (0x7618U) ++#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U) ++#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_SELECT3 ++*/ ++#define RGX_CR_TA_PERF_SELECT3 (0x7620U) ++#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U) ++#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_SELECTED_BITS ++*/ ++#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U) ++#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U) ++#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_COUNTER_0 ++*/ ++#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U) ++#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_COUNTER_1 ++*/ ++#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U) ++#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U) ++#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_COUNTER_2 ++*/ ++#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U) ++#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U) ++#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TA_PERF_COUNTER_3 ++*/ ++#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U) ++#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U) ++#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_RASTERISATION_PERF ++*/ ++#define RGX_CR_RASTERISATION_PERF (0x7700U) ++#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_RASTERISATION_PERF_SELECT0 ++*/ ++#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_RASTERISATION_PERF_COUNTER_0 ++*/ ++#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U) ++#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_HUB_BIFPMCACHE_PERF ++*/ ++#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 ++*/ ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 ++*/ ++#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TPU_MCU_L0_PERF ++*/ ++#define RGX_CR_TPU_MCU_L0_PERF (0x7900U) ++#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TPU_MCU_L0_PERF_SELECT0 ++*/ ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 ++*/ ++#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U) ++#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_USC_PERF ++*/ ++#define RGX_CR_USC_PERF (0x8100U) ++#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_USC_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_USC_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_USC_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_USC_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_USC_PERF_SELECT0 ++*/ ++#define RGX_CR_USC_PERF_SELECT0 (0x8108U) ++#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_USC_PERF_COUNTER_0 ++*/ ++#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U) ++#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_JONES_IDLE ++*/ ++#define RGX_CR_JONES_IDLE (0x8328U) ++#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF)) ++#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U) ++#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0xFFFFBFFFU) ++#define RGX_CR_JONES_IDLE_TDM_EN (0x00004000U) ++#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U) ++#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0xFFFFDFFFU) ++#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0x00002000U) ++#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U) ++#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0xFFFFEFFFU) ++#define RGX_CR_JONES_IDLE_FB_CDC_EN (0x00001000U) ++#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U) ++#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0xFFFFF7FFU) ++#define RGX_CR_JONES_IDLE_MMU_EN (0x00000800U) ++#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U) ++#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0xFFFFFBFFU) ++#define RGX_CR_JONES_IDLE_TLA_EN (0x00000400U) ++#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U) ++#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0xFFFFFDFFU) ++#define RGX_CR_JONES_IDLE_GARTEN_EN (0x00000200U) ++#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U) ++#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_JONES_IDLE_HOSTIF_EN (0x00000100U) ++#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U) ++#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_JONES_IDLE_SOCIF_EN (0x00000080U) ++#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U) ++#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_JONES_IDLE_TILING_EN (0x00000040U) ++#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U) ++#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_JONES_IDLE_IPP_EN (0x00000020U) ++#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U) ++#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_JONES_IDLE_USCS_EN (0x00000010U) ++#define RGX_CR_JONES_IDLE_PM_SHIFT (3U) ++#define RGX_CR_JONES_IDLE_PM_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_JONES_IDLE_PM_EN (0x00000008U) ++#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U) ++#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_JONES_IDLE_CDM_EN (0x00000004U) ++#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U) ++#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_JONES_IDLE_VDM_EN (0x00000002U) ++#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U) ++#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_JONES_IDLE_BIF_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TORNADO_PERF ++*/ ++#define RGX_CR_TORNADO_PERF (0x8228U) ++#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_TORNADO_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_TORNADO_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_TORNADO_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_TORNADO_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TORNADO_PERF_SELECT0 ++*/ ++#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U) ++#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TORNADO_PERF_COUNTER_0 ++*/ ++#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U) ++#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_TEXAS_PERF ++*/ ++#define RGX_CR_TEXAS_PERF (0x8290U) ++#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) ++#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U) ++#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_TEXAS_PERF_CLR_5_EN (0x00000040U) ++#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U) ++#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_TEXAS_PERF_CLR_4_EN (0x00000020U) ++#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_TEXAS_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_TEXAS_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_TEXAS_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_TEXAS_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_TEXAS_PERF_SELECT0 ++*/ ++#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U) ++#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) ++#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U) ++#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) ++#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) ++#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_TEXAS_PERF_COUNTER_0 ++*/ ++#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U) ++#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_JONES_PERF ++*/ ++#define RGX_CR_JONES_PERF (0x8330U) ++#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_JONES_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_JONES_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_JONES_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_JONES_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_JONES_PERF_SELECT0 ++*/ ++#define RGX_CR_JONES_PERF_SELECT0 (0x8338U) ++#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_JONES_PERF_COUNTER_0 ++*/ ++#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U) ++#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_BLACKPEARL_PERF ++*/ ++#define RGX_CR_BLACKPEARL_PERF (0x8400U) ++#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F)) ++#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0x00000040U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0x00000020U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_BLACKPEARL_PERF_SELECT0 ++*/ ++#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF)) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFC0FFFF)) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_BLACKPEARL_PERF_COUNTER_0 ++*/ ++#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U) ++#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_PBE_PERF ++*/ ++#define RGX_CR_PBE_PERF (0x8478U) ++#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_PBE_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_PBE_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_PBE_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_PBE_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_PBE_PERF_SELECT0 ++*/ ++#define RGX_CR_PBE_PERF_SELECT0 (0x8480U) ++#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_PBE_PERF_COUNTER_0 ++*/ ++#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U) ++#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_OCP_REVINFO ++*/ ++#define RGX_CR_OCP_REVINFO (0x9000U) ++#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF)) ++#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U) ++#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFF9FFFFFFFF)) ++#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U) ++#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U) ++#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_OCP_SYSCONFIG ++*/ ++#define RGX_CR_OCP_SYSCONFIG (0x9010U) ++#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF)) ++#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U) ++#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0xFFFFF3FFU) ++#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U) ++#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0xFFFFFCFFU) ++#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U) ++#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0xFFFFFF3FU) ++#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U) ++#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0xFFFFFFCFU) ++#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U) ++#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0xFFFFFFF3U) ++#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U) ++#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0xFFFFFFFCU) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQSTATUS_RAW_0 ++*/ ++#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U) ++#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U) ++#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQSTATUS_RAW_1 ++*/ ++#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U) ++#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U) ++#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQSTATUS_RAW_2 ++*/ ++#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U) ++#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U) ++#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQSTATUS_0 ++*/ ++#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U) ++#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U) ++#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQSTATUS_1 ++*/ ++#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U) ++#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U) ++#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQSTATUS_2 ++*/ ++#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U) ++#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U) ++#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQENABLE_SET_0 ++*/ ++#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U) ++#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U) ++#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQENABLE_SET_1 ++*/ ++#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U) ++#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U) ++#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQENABLE_SET_2 ++*/ ++#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U) ++#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U) ++#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQENABLE_CLR_0 ++*/ ++#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U) ++#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U) ++#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQENABLE_CLR_1 ++*/ ++#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U) ++#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U) ++#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQENABLE_CLR_2 ++*/ ++#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U) ++#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U) ++#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_IRQ_EVENT ++*/ ++#define RGX_CR_OCP_IRQ_EVENT (0x9080U) ++#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF7FFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000080000)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000040000)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFDFFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0x0000000000020000)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFEFFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0x0000000000010000)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000008000)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000004000)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000002000)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFEFFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000001000)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF7FF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFDFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFEFF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000040)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_OCP_DEBUG_CONFIG ++*/ ++#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U) ++#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U) ++#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_OCP_DEBUG_STATUS ++*/ ++#define RGX_CR_OCP_DEBUG_STATUS (0x9090U) ++#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFE7FFFFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFBFFFFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0x0004000000000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFCFFFFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFE7FFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFBFFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0x0000040000000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFCFFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFBFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0x0000004000000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFDFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000002000000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFEFFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0x0000001000000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0xFFFFFFFBFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0x0000000400000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0xFFFFFFFDFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0x0000000200000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0xFFFFFFFEFFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0x0000000100000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFF7FFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0x0000000080000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFBFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0x0000000040000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFDFFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0x0000000020000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFE7FFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFBFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0x0000000004000000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFCFFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF7FFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0x0000000000800000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFBFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0x0000000000400000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE7FFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFBFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0x0000000000040000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFCFFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF7FFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0x0000000000008000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFBFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0x0000000000004000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFDFFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0x0000000000002000)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFE7FF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFBFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF7F)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0x0000000000000080)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFBF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0x0000000000000040)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFE7)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U) ++#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) ++ ++ ++#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0x00000040U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0x00000020U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0x00000010U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0x00000008U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0x00000004U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0x00000002U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U) ++#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0x00000001U) ++ ++ ++#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU) ++ ++ ++/* ++ Register RGX_CR_BIF_TRUST ++*/ ++#define RGX_CR_BIF_TRUST (0xA000U) ++#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF)) ++#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U) ++#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFEFFFFFU) ++#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0x00100000U) ++#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U) ++#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0xFFF7FFFFU) ++#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0x00080000U) ++#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U) ++#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0xFFFBFFFFU) ++#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0x00040000U) ++#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U) ++#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0x00020000U) ++#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U) ++#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_BIF_TRUST_ENABLE_EN (0x00010000U) ++#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U) ++#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0xFFFF01FFU) ++#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U) ++#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0x00000100U) ++#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U) ++#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0x00000080U) ++#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U) ++#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0x00000040U) ++#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U) ++#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0x00000020U) ++#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U) ++#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0x00000010U) ++#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U) ++#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0x00000008U) ++#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U) ++#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0x00000004U) ++#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U) ++#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0x00000002U) ++#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U) ++#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SYS_BUS_SECURE ++*/ ++#define RGX_CR_SYS_BUS_SECURE (0xA100U) ++#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U) ++#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FBA_FC0_CHECKSUM ++*/ ++#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U) ++#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FBA_FC1_CHECKSUM ++*/ ++#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U) ++#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FBA_FC2_CHECKSUM ++*/ ++#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U) ++#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_FBA_FC3_CHECKSUM ++*/ ++#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U) ++#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_CLK_CTRL2 ++*/ ++#define RGX_CR_CLK_CTRL2 (0xD200U) ++#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33)) ++#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U) ++#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF3FF)) ++#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400)) ++#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800)) ++#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U) ++#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFCFF)) ++#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100)) ++#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200)) ++#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U) ++#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) ++#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U) ++#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)) ++#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001)) ++#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002)) ++ ++ ++/* ++ Register RGX_CR_CLK_STATUS2 ++*/ ++#define RGX_CR_CLK_STATUS2 (0xD208U) ++#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015)) ++#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U) ++#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U) ++#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U) ++#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_RPM_SHF_FPL ++*/ ++#define RGX_CR_RPM_SHF_FPL (0xD520U) ++#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) ++#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U) ++#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) ++#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U) ++#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) ++#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U) ++#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U) ++ ++ ++/* ++ Register RGX_CR_RPM_SHF_FPL_READ ++*/ ++#define RGX_CR_RPM_SHF_FPL_READ (0xD528U) ++#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) ++#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U) ++#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0x00400000U) ++#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U) ++#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) ++ ++ ++/* ++ Register RGX_CR_RPM_SHF_FPL_WRITE ++*/ ++#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U) ++#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) ++#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U) ++#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0x00400000U) ++#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U) ++#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) ++ ++ ++/* ++ Register RGX_CR_RPM_SHG_FPL ++*/ ++#define RGX_CR_RPM_SHG_FPL (0xD538U) ++#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC)) ++#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U) ++#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) ++#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U) ++#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000003)) ++#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U) ++#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U) ++ ++ ++/* ++ Register RGX_CR_RPM_SHG_FPL_READ ++*/ ++#define RGX_CR_RPM_SHG_FPL_READ (0xD540U) ++#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) ++#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U) ++#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0x00400000U) ++#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U) ++#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0xFFC00000U) ++ ++ ++/* ++ Register RGX_CR_RPM_SHG_FPL_WRITE ++*/ ++#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U) ++#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF)) ++#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U) ++#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0xFFBFFFFFU) ++#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0x00400000U) ++#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U) ++#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0xFFC00000U) ++ ++ ++/* ++ Register RGX_CR_SH_PERF ++*/ ++#define RGX_CR_SH_PERF (0xD5F8U) ++#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U) ++#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SH_PERF_CLR_3_EN (0x00000010U) ++#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U) ++#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SH_PERF_CLR_2_EN (0x00000008U) ++#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U) ++#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SH_PERF_CLR_1_EN (0x00000004U) ++#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U) ++#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SH_PERF_CLR_0_EN (0x00000002U) ++#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U) ++#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SH_PERF_SELECT0 ++*/ ++#define RGX_CR_SH_PERF_SELECT0 (0xD600U) ++#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U) ++#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_SH_PERF_COUNTER_0 ++*/ ++#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U) ++#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U) ++#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SHF_SHG_CHECKSUM ++*/ ++#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U) ++#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM ++*/ ++#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U) ++#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SHF_VARY_BIF_CHECKSUM ++*/ ++#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U) ++#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_RPM_BIF_CHECKSUM ++*/ ++#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U) ++#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SHG_BIF_CHECKSUM ++*/ ++#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U) ++#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register RGX_CR_SHG_FE_BE_CHECKSUM ++*/ ++#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U) ++#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U) ++#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register DPX_CR_BF_PERF ++*/ ++#define DPX_CR_BF_PERF (0xC458U) ++#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U) ++#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define DPX_CR_BF_PERF_CLR_3_EN (0x00000010U) ++#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U) ++#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define DPX_CR_BF_PERF_CLR_2_EN (0x00000008U) ++#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U) ++#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define DPX_CR_BF_PERF_CLR_1_EN (0x00000004U) ++#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U) ++#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define DPX_CR_BF_PERF_CLR_0_EN (0x00000002U) ++#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U) ++#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register DPX_CR_BF_PERF_SELECT0 ++*/ ++#define DPX_CR_BF_PERF_SELECT0 (0xC460U) ++#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U) ++#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register DPX_CR_BF_PERF_COUNTER_0 ++*/ ++#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U) ++#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U) ++#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register DPX_CR_BT_PERF ++*/ ++#define DPX_CR_BT_PERF (0xC3D0U) ++#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U) ++#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define DPX_CR_BT_PERF_CLR_3_EN (0x00000010U) ++#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U) ++#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define DPX_CR_BT_PERF_CLR_2_EN (0x00000008U) ++#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U) ++#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define DPX_CR_BT_PERF_CLR_1_EN (0x00000004U) ++#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U) ++#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define DPX_CR_BT_PERF_CLR_0_EN (0x00000002U) ++#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U) ++#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register DPX_CR_BT_PERF_SELECT0 ++*/ ++#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U) ++#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U) ++#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register DPX_CR_BT_PERF_COUNTER_0 ++*/ ++#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U) ++#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U) ++#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register DPX_CR_RQ_USC_DEBUG ++*/ ++#define DPX_CR_RQ_USC_DEBUG (0xC110U) ++#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U) ++#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS ++*/ ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775)) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0xFFFF0FFFU) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0xFFFFF8FFU) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0xFFFFFF9FU) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0xFFFFFFEFU) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0x00000010U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0xFFFFFFFBU) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0x00000004U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0xFFFFFFFEU) ++#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS ++*/ ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0)) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFDFFFFFFFFFFFFFF)) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0x0200000000000000)) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFE000FFFFFFFFFFF)) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0xFFFFF0FFFFFFFFFF)) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0xFFFFFF000000000F)) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U) ++#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U) ++ ++ ++/* ++ Register DPX_CR_BIF_MMU_STATUS ++*/ ++#define DPX_CR_BIF_MMU_STATUS (0xC5D8U) ++#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7)) ++#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U) ++#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0xF00FFFFFU) ++#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U) ++#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0xFFF00FFFU) ++#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U) ++#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0xFFFFF00FU) ++#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U) ++#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0xFFFFFFFBU) ++#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0x00000004U) ++#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U) ++#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0xFFFFFFFDU) ++#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0x00000002U) ++#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U) ++#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0xFFFFFFFEU) ++#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0x00000001U) ++ ++ ++/* ++ Register DPX_CR_RT_PERF ++*/ ++#define DPX_CR_RT_PERF (0xC700U) ++#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U) ++#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define DPX_CR_RT_PERF_CLR_3_EN (0x00000010U) ++#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U) ++#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define DPX_CR_RT_PERF_CLR_2_EN (0x00000008U) ++#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U) ++#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define DPX_CR_RT_PERF_CLR_1_EN (0x00000004U) ++#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U) ++#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define DPX_CR_RT_PERF_CLR_0_EN (0x00000002U) ++#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U) ++#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register DPX_CR_RT_PERF_SELECT0 ++*/ ++#define DPX_CR_RT_PERF_SELECT0 (0xC708U) ++#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U) ++#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register DPX_CR_RT_PERF_COUNTER_0 ++*/ ++#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U) ++#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U) ++#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register DPX_CR_BX_TU_PERF ++*/ ++#define DPX_CR_BX_TU_PERF (0xC908U) ++#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U) ++#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0xFFFFFFEFU) ++#define DPX_CR_BX_TU_PERF_CLR_3_EN (0x00000010U) ++#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U) ++#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0xFFFFFFF7U) ++#define DPX_CR_BX_TU_PERF_CLR_2_EN (0x00000008U) ++#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U) ++#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0xFFFFFFFBU) ++#define DPX_CR_BX_TU_PERF_CLR_1_EN (0x00000004U) ++#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U) ++#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0xFFFFFFFDU) ++#define DPX_CR_BX_TU_PERF_CLR_0_EN (0x00000002U) ++#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U) ++#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0xFFFFFFFEU) ++#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0x00000001U) ++ ++ ++/* ++ Register DPX_CR_BX_TU_PERF_SELECT0 ++*/ ++#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U) ++#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF)) ++#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U) ++#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0xC000FFFFFFFFFFFF)) ++#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U) ++#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0xFFFFC000FFFFFFFF)) ++#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U) ++#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFDFFFFF)) ++#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0x0000000000200000)) ++#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U) ++#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE0FFFF)) ++#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U) ++#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register DPX_CR_BX_TU_PERF_COUNTER_0 ++*/ ++#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U) ++#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U) ++#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (0x00000000U) ++ ++ ++/* ++ Register DPX_CR_RS_PDS_RR_CHECKSUM ++*/ ++#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U) ++#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U) ++#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00000000)) ++ ++ ++/* ++ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT ++*/ ++#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U) ++#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U) ++#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_MMU_CBASE_MAPPING ++*/ ++#define RGX_CR_MMU_CBASE_MAPPING (0xE148U) ++#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF)) ++#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U) ++#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0xF0000000U) ++#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U) ++#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U) ++ ++ ++/* ++ Register RGX_CR_MMU_FAULT_STATUS ++*/ ++#define RGX_CR_MMU_FAULT_STATUS (0xE150U) ++#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U) ++#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) ++#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U) ++#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) ++#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U) ++#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) ++#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U) ++#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) ++#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U) ++#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) ++#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U) ++#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U) ++#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) ++#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U) ++#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_MMU_FAULT_STATUS_META ++*/ ++#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U) ++#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U) ++#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0x000000000FFFFFFF)) ++#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U) ++#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFF00FFFFF)) ++#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U) ++#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFF00FFF)) ++#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U) ++#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) ++#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U) ++#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFCF)) ++#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U) ++#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U) ++#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF9)) ++#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U) ++#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++/* ++ Register RGX_CR_SLC3_CTRL_MISC ++*/ ++#define RGX_CR_SLC3_CTRL_MISC (0xE200U) ++#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107)) ++#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U) ++#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0xFFFFFEFFU) ++#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0x00000100U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0xFFFFFFF8U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (0x00000000U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0x00000001U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0x00000002U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0x00000003U) ++#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0x00000004U) ++ ++ ++/* ++ Register RGX_CR_SLC3_SCRAMBLE ++*/ ++#define RGX_CR_SLC3_SCRAMBLE (0xE208U) ++#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U) ++#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_SLC3_SCRAMBLE2 ++*/ ++#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U) ++#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U) ++#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_SLC3_SCRAMBLE3 ++*/ ++#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U) ++#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U) ++#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_SLC3_SCRAMBLE4 ++*/ ++#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U) ++#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U) ++#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0x0000000000000000)) ++ ++ ++/* ++ Register RGX_CR_SLC3_STATUS ++*/ ++#define RGX_CR_SLC3_STATUS (0xE220U) ++#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U) ++#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) ++#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U) ++#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0xFFFF0000FFFFFFFF)) ++#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U) ++#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0xFFFFFFFF0000FFFF)) ++#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U) ++#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF0000)) ++ ++ ++/* ++ Register RGX_CR_SLC3_IDLE ++*/ ++#define RGX_CR_SLC3_IDLE (0xE228U) ++#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF)) ++#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U) ++#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0xFFF3FFFFU) ++#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U) ++#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0xFFFDFFFFU) ++#define RGX_CR_SLC3_IDLE_MMU_EN (0x00020000U) ++#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U) ++#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_SLC3_IDLE_RDI_EN (0x00010000U) ++#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U) ++#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0xFFFF0FFFU) ++#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U) ++#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0xFFFFF00FU) ++#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U) ++#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0xFFFFFFF3U) ++#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U) ++#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0x00000002U) ++#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U) ++#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SLC3_IDLE_XBAR_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SLC3_FAULT_STOP_STATUS ++*/ ++#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U) ++#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF)) ++#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U) ++#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0xFFFFE000U) ++ ++ ++/* ++ Register RGX_CR_VDM_CONTEXT_STORE_MODE ++*/ ++#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U) ++#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003)) ++#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U) ++#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0xFFFFFFFCU) ++#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (0x00000000U) ++#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0x00000001U) ++#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0x00000002U) ++ ++ ++/* ++ Register RGX_CR_CONTEXT_MAPPING0 ++*/ ++#define RGX_CR_CONTEXT_MAPPING0 (0xF078U) ++#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U) ++#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0x00FFFFFFU) ++#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U) ++#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0xFF00FFFFU) ++#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U) ++#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0xFFFF00FFU) ++#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U) ++#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_CONTEXT_MAPPING1 ++*/ ++#define RGX_CR_CONTEXT_MAPPING1 (0xF080U) ++#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U) ++#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0xFFFF00FFU) ++#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U) ++#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_CONTEXT_MAPPING2 ++*/ ++#define RGX_CR_CONTEXT_MAPPING2 (0xF088U) ++#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U) ++#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0xFF00FFFFU) ++#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U) ++#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0xFFFF00FFU) ++#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U) ++#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_CONTEXT_MAPPING3 ++*/ ++#define RGX_CR_CONTEXT_MAPPING3 (0xF090U) ++#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U) ++#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0xFF00FFFFU) ++#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U) ++#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0xFFFF00FFU) ++#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U) ++#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_BIF_JONES_OUTSTANDING_READ ++*/ ++#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U) ++#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U) ++#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ ++*/ ++#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U) ++#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U) ++#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_BIF_DUST_OUTSTANDING_READ ++*/ ++#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U) ++#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U) ++#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_JONES_FIX ++*/ ++#define RGX_CR_JONES_FIX (0xF0C0U) ++#define RGX_CR_JONES_FIX__ROGUE3__MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_JONES_FIX_MASKFULL (IMG_UINT64_C(0x000000000000FFFF)) ++#define RGX_CR_JONES_FIX_DISABLE_SHIFT (0U) ++#define RGX_CR_JONES_FIX_DISABLE_CLRMSK (0xFFFF0000U) ++ ++ ++/* ++ Register RGX_CR_CONTEXT_MAPPING4 ++*/ ++#define RGX_CR_CONTEXT_MAPPING4 (0xF210U) ++#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U) ++#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFF00FFFFFFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U) ++#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFF00FFFFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U) ++#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFF00FFFFFF)) ++#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U) ++#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFF00FFFF)) ++#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U) ++#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFF00FF)) ++#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U) ++#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFF00)) ++ ++ ++/* ++ Register RGX_CR_MULTICORE_GPU ++*/ ++#define RGX_CR_MULTICORE_GPU (0xF300U) ++#define RGX_CR_MULTICORE_GPU_MASKFULL (IMG_UINT64_C(0x000000000000007F)) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_SHIFT (6U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN (0x00000040U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_SHIFT (5U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN (0x00000020U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_SHIFT (4U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN (0x00000010U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_SHIFT (3U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN (0x00000008U) ++#define RGX_CR_MULTICORE_GPU_ID_SHIFT (0U) ++#define RGX_CR_MULTICORE_GPU_ID_CLRMSK (0xFFFFFFF8U) ++ ++ ++/* ++ Register RGX_CR_MULTICORE_SYSTEM ++*/ ++#define RGX_CR_MULTICORE_SYSTEM (0xF308U) ++#define RGX_CR_MULTICORE_SYSTEM_MASKFULL (IMG_UINT64_C(0x000000000000000F)) ++#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT (0U) ++#define RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_CLRMSK (0xFFFFFFF0U) ++ ++ ++/* ++ Register RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON ++*/ ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON (0xF310U) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) ++#define RGX_CR_MULTICORE_FRAGMENT_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON ++*/ ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON (0xF320U) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) ++#define RGX_CR_MULTICORE_GEOMETRY_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON ++*/ ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON (0xF330U) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF)) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_SHIFT (30U) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_TYPE_CLRMSK (0x3FFFFFFFU) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_SHIFT (8U) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_WORKLOAD_EXECUTE_COUNT_CLRMSK (0xC00000FFU) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_SHIFT (0U) ++#define RGX_CR_MULTICORE_COMPUTE_CTRL_COMMON_GPU_ENABLE_CLRMSK (0xFFFFFF00U) ++ ++ ++/* ++ Register RGX_CR_ECC_RAM_ERR_INJ ++*/ ++#define RGX_CR_ECC_RAM_ERR_INJ (0xF340U) ++#define RGX_CR_ECC_RAM_ERR_INJ_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_SHIFT (4U) ++#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN (0x00000010U) ++#define RGX_CR_ECC_RAM_ERR_INJ_USC_SHIFT (3U) ++#define RGX_CR_ECC_RAM_ERR_INJ_USC_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_ECC_RAM_ERR_INJ_USC_EN (0x00000008U) ++#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_SHIFT (2U) ++#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN (0x00000004U) ++#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_SHIFT (1U) ++#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN (0x00000002U) ++#define RGX_CR_ECC_RAM_ERR_INJ_MARS_SHIFT (0U) ++#define RGX_CR_ECC_RAM_ERR_INJ_MARS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_ECC_RAM_ERR_INJ_MARS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_ECC_RAM_INIT_KICK ++*/ ++#define RGX_CR_ECC_RAM_INIT_KICK (0xF348U) ++#define RGX_CR_ECC_RAM_INIT_KICK_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_SHIFT (4U) ++#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_ECC_RAM_INIT_KICK_SLC_SIDEKICK_EN (0x00000010U) ++#define RGX_CR_ECC_RAM_INIT_KICK_USC_SHIFT (3U) ++#define RGX_CR_ECC_RAM_INIT_KICK_USC_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_ECC_RAM_INIT_KICK_USC_EN (0x00000008U) ++#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_SHIFT (2U) ++#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_ECC_RAM_INIT_KICK_TPU_MCU_L0_EN (0x00000004U) ++#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_SHIFT (1U) ++#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_ECC_RAM_INIT_KICK_RASCAL_EN (0x00000002U) ++#define RGX_CR_ECC_RAM_INIT_KICK_MARS_SHIFT (0U) ++#define RGX_CR_ECC_RAM_INIT_KICK_MARS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_ECC_RAM_INIT_KICK_MARS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_ECC_RAM_INIT_DONE ++*/ ++#define RGX_CR_ECC_RAM_INIT_DONE (0xF350U) ++#define RGX_CR_ECC_RAM_INIT_DONE_MASKFULL (IMG_UINT64_C(0x000000000000001F)) ++#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_SHIFT (4U) ++#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_ECC_RAM_INIT_DONE_SLC_SIDEKICK_EN (0x00000010U) ++#define RGX_CR_ECC_RAM_INIT_DONE_USC_SHIFT (3U) ++#define RGX_CR_ECC_RAM_INIT_DONE_USC_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_ECC_RAM_INIT_DONE_USC_EN (0x00000008U) ++#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_SHIFT (2U) ++#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_ECC_RAM_INIT_DONE_TPU_MCU_L0_EN (0x00000004U) ++#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_SHIFT (1U) ++#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_ECC_RAM_INIT_DONE_RASCAL_EN (0x00000002U) ++#define RGX_CR_ECC_RAM_INIT_DONE_MARS_SHIFT (0U) ++#define RGX_CR_ECC_RAM_INIT_DONE_MARS_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_ECC_RAM_INIT_DONE_MARS_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SAFETY_EVENT_ENABLE ++*/ ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE (0xF390U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SAFETY_EVENT_STATUS ++*/ ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE (0xF398U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_SHIFT (7U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_SHIFT (3U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__TRP_FAIL_EN (0x00000008U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT (2U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN (0x00000004U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_SHIFT (1U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_GPU_EN (0x00000002U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_SAFETY_EVENT_CLEAR ++*/ ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE (0xF3A0U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_SHIFT (7U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_SHIFT (3U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__TRP_FAIL_EN (0x00000008U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_SHIFT (2U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_FW_EN (0x00000004U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_SHIFT (1U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__FAULT_GPU_EN (0x00000002U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FAULT_FW_STATUS ++*/ ++#define RGX_CR_FAULT_FW_STATUS (0xF3B0U) ++#define RGX_CR_FAULT_FW_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000010001)) ++#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT (16U) ++#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_EN (0x00010000U) ++#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT (0U) ++#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FAULT_FW_STATUS_CPU_DETECT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_FAULT_FW_CLEAR ++*/ ++#define RGX_CR_FAULT_FW_CLEAR (0xF3B8U) ++#define RGX_CR_FAULT_FW_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000010001)) ++#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_SHIFT (16U) ++#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_CLRMSK (0xFFFEFFFFU) ++#define RGX_CR_FAULT_FW_CLEAR_CPU_CORRECT_EN (0x00010000U) ++#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_SHIFT (0U) ++#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_FAULT_FW_CLEAR_CPU_DETECT_EN (0x00000001U) ++ ++ ++/* ++ Register RGX_CR_MTS_SAFETY_EVENT_ENABLE ++*/ ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE (0xF3D8U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000000000FF)) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_SHIFT (7U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_CLRMSK (0xFFFFFF7FU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_LOCKUP_EN (0x00000080U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_SHIFT (6U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_CLRMSK (0xFFFFFFBFU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__CPU_PAGE_FAULT_EN (0x00000040U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_SHIFT (5U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_CLRMSK (0xFFFFFFDFU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__SAFE_COMPUTE_FAIL_EN (0x00000020U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT (4U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_CLRMSK (0xFFFFFFEFU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__WATCHDOG_TIMEOUT_EN (0x00000010U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_SHIFT (3U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_CLRMSK (0xFFFFFFF7U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__TRP_FAIL_EN (0x00000008U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_SHIFT (2U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_CLRMSK (0xFFFFFFFBU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_FW_EN (0x00000004U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_SHIFT (1U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_CLRMSK (0xFFFFFFFDU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__FAULT_GPU_EN (0x00000002U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_SHIFT (0U) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_CLRMSK (0xFFFFFFFEU) ++#define RGX_CR_MTS_SAFETY_EVENT_ENABLE__ROGUEXE__GPU_PAGE_FAULT_EN (0x00000001U) ++ ++ ++#endif /* RGX_CR_DEFS_KM_H */ ++/***************************************************************************** ++ End of file (rgx_cr_defs_km.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/km/rgxdefs_km.h b/drivers/gpu/drm/img-rogue/km/rgxdefs_km.h +new file mode 100644 +index 000000000000..64f4b366a59b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km/rgxdefs_km.h +@@ -0,0 +1,338 @@ ++/*************************************************************************/ /*! ++@Title Rogue hw definitions (kernel mode) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXDEFS_KM_H ++#define RGXDEFS_KM_H ++ ++#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) ++#include RGX_BVNC_CORE_KM_HEADER ++#include RGX_BNC_CONFIG_KM_HEADER ++#endif ++ ++#define IMG_EXPLICIT_INCLUDE_HWDEFS ++#if defined(__KERNEL__) ++#include "rgx_cr_defs_km.h" ++#endif ++#undef IMG_EXPLICIT_INCLUDE_HWDEFS ++ ++#include "rgx_heap_firmware.h" ++ ++/* The following Macros are picked up through BVNC headers for no hardware ++ * operations to be compatible with old build infrastructure. ++ */ ++#if defined(NO_HARDWARE) ++/****************************************************************************** ++ * Check for valid B.X.N.C ++ *****************************************************************************/ ++#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C) ++#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)" ++#endif ++ ++/* Check core/config compatibility */ ++#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C) ++#error "BVNC headers are mismatching (KM core/config)" ++#endif ++#endif ++ ++/****************************************************************************** ++ * RGX Version name ++ *****************************************************************************/ ++#define RGX_BVNC_KM_ST2(S) #S ++#define RGX_BVNC_KM_ST(S) RGX_BVNC_KM_ST2(S) ++#define RGX_BVNC_KM RGX_BVNC_KM_ST(RGX_BVNC_KM_B) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_V) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_N) "." RGX_BVNC_KM_ST(RGX_BVNC_KM_C) ++#define RGX_BVNC_KM_V_ST RGX_BVNC_KM_ST(RGX_BVNC_KM_V) ++ ++/* Maximum string size is [bb.vvvp.nnnn.cccc\0], includes null char */ ++#define RGX_BVNC_STR_SIZE_MAX (2U+1U+4U+1U+4U+1U+4U+1U) ++#define RGX_BVNC_STR_FMTSPEC "%u.%u.%u.%u" ++#define RGX_BVNC_STRP_FMTSPEC "%u.%up.%u.%u" ++ ++ ++/****************************************************************************** ++ * RGX Defines ++ *****************************************************************************/ ++ ++#define BVNC_FIELD_MASK ((1UL << BVNC_FIELD_WIDTH) - 1U) ++#define C_POSITION (0U) ++#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH)) ++#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH)) ++#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH)) ++ ++#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION))) ++#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION))) ++#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION))) ++#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION))) ++ ++#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION)) ++#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION)) ++#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION)) ++#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION)) ++ ++#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)(B))) << (B_POSITION) | \ ++ (((IMG_UINT64)(V))) << (V_POSITION) | \ ++ (((IMG_UINT64)(N))) << (N_POSITION) | \ ++ (((IMG_UINT64)(C))) << (C_POSITION) \ ++ ) ++ ++#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U) ++#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U) ++ ++#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU) ++#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U) ++ ++#define RGXFW_MAX_NUM_OS (8U) ++#define RGXFW_HOST_OS (0U) ++#define RGXFW_GUEST_OSID_START (1U) ++ ++#define RGXFW_THREAD_0 (0U) ++#define RGXFW_THREAD_1 (1U) ++ ++/* META cores (required for the RGX_FEATURE_META) */ ++#define MTP218 (1U) ++#define MTP219 (2U) ++#define LTP218 (3U) ++#define LTP217 (4U) ++ ++/* META Core memory feature depending on META variants */ ++#define RGX_META_COREMEM_32K (32*1024) ++#define RGX_META_COREMEM_48K (48*1024) ++#define RGX_META_COREMEM_64K (64*1024) ++#define RGX_META_COREMEM_96K (96*1024) ++#define RGX_META_COREMEM_128K (128*1024) ++#define RGX_META_COREMEM_256K (256*1024) ++ ++#if !defined(__KERNEL__) ++#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \ ++ (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0) ++#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024U) ++#define RGX_META_COREMEM (1) ++#define RGX_META_COREMEM_CODE (1) ++#if !defined(FIX_HW_BRN_50767) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) ++#define RGX_META_COREMEM_DATA (1) ++#endif ++#else ++#undef SUPPORT_META_COREMEM ++#undef RGX_FEATURE_META_COREMEM_SIZE ++#undef RGX_FEATURE_META_DMA ++#define RGX_FEATURE_META_COREMEM_SIZE (0) ++#define RGX_META_COREMEM_SIZE (0) ++#endif ++#endif ++ ++#define GET_ROGUE_CACHE_LINE_SIZE(x) ((((IMG_UINT32)(x)) > 0U) ? ((IMG_UINT32)(x)/8U) : (0U)) ++ ++ ++#if defined(SUPPORT_AGP) ++#define MAX_HW_TA3DCONTEXTS 3U ++#else ++#define MAX_HW_TA3DCONTEXTS 2U ++#endif ++ ++#define RGX_CR_CLK_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL) ++#define RGX_CR_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL) ++#define RGX_CR_CLK_CTRL2_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL2_MASKFULL) ++#define RGX_CR_CLK_CTRL2_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL2_MASKFULL) ++#define RGX_CR_CLK_XTPLUS_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_XTPLUS_CTRL_MASKFULL) ++#define RGX_CR_CLK_XTPLUS_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_XTPLUS_CTRL_MASKFULL) ++#define DPX_CR_DPX_CLK_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&DPX_CR_DPX_CLK_CTRL_MASKFULL) ++#define DPX_CR_DPX_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&DPX_CR_DPX_CLK_CTRL_MASKFULL) ++ ++#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN (RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_H_CORE_EN) ++ ++/* SOFT_RESET Rascal and DUSTs bits */ ++#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN (RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \ ++ RGX_CR_SOFT_RESET_DUST_n_CORE_EN) ++ ++ ++ ++ ++/* SOFT_RESET steps as defined in the TRM */ ++#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN) ++ ++#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \ ++ RGX_CR_SOFT_RESET_VDM_EN | \ ++ RGX_CR_SOFT_RESET_ISP_EN) ++ ++#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES | \ ++ RGX_CR_SOFT_RESET_BIF_EN | \ ++ RGX_CR_SOFT_RESET_SLC_EN | \ ++ RGX_CR_SOFT_RESET_GARTEN_EN) ++ ++#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \ ++ RGX_CR_SOFT_RESET2_PIXEL_EN | \ ++ RGX_CR_SOFT_RESET2_CDM_EN | \ ++ RGX_CR_SOFT_RESET2_VERTEX_EN) ++ ++ ++ ++#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12U) ++#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1UL << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) ++ ++#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14U) ++#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1UL << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT) ++ ++#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16U) ++ ++/* To get the number of required Dusts, divide the number of ++ * clusters by 2 and round up ++ */ ++#define RGX_REQ_NUM_DUSTS(CLUSTERS) (((CLUSTERS) + 1U) / 2U) ++ ++/* To get the number of required Bernado/Phantom(s), divide ++ * the number of clusters by 4 and round up ++ */ ++#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) ++#define RGX_REQ_NUM_BERNADOS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) ++#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) (((CLUSTERS) + 3U) / 4U) ++ ++#if !defined(__KERNEL__) ++# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS)) ++#endif ++ ++ ++/* META second thread feature depending on META variants and ++ * available CoreMem ++ */ ++#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256) ++#define RGXFW_META_SUPPORT_2ND_THREAD ++#endif ++ ++ ++/* ++ * FW MMU contexts ++ */ ++#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META) ++#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) /* FW code/private data */ ++#define MMU_CONTEXT_MAPPING_FWIF (0x7U) /* Host/FW data */ ++#else ++#define MMU_CONTEXT_MAPPING_FWPRIV (0x0U) ++#define MMU_CONTEXT_MAPPING_FWIF (0x0U) ++#endif ++ ++ ++/* ++ * Utility macros to calculate CAT_BASE register addresses ++ */ ++#define BIF_CAT_BASEx(n) \ ++ (RGX_CR_BIF_CAT_BASE0 + ((n) * (RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0))) ++ ++#define FWCORE_MEM_CAT_BASEx(n) \ ++ (RGX_CR_FWCORE_MEM_CAT_BASE0 + ((n) * (RGX_CR_FWCORE_MEM_CAT_BASE1 - RGX_CR_FWCORE_MEM_CAT_BASE0))) ++ ++/* ++ * FWCORE wrapper register defines ++ */ ++#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_SHIFT ++#define FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_CLRMSK RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_CBASE_CLRMSK ++#define FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT (12U) ++ ++ ++/****************************************************************************** ++ * WA HWBRNs ++ *****************************************************************************/ ++ ++#if defined(RGX_CR_JONES_IDLE_MASKFULL) ++/* Workaround for HW BRN 57289 */ ++#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF) ++#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!! ++#endif ++#undef RGX_CR_JONES_IDLE_MASKFULL ++#undef RGX_CR_JONES_IDLE_TDM_SHIFT ++#undef RGX_CR_JONES_IDLE_TDM_CLRMSK ++#undef RGX_CR_JONES_IDLE_TDM_EN ++#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF)) ++#endif ++ ++#if !defined(__KERNEL__) ++ ++#if defined(RGX_FEATURE_ROGUEXE) ++#define RGX_NUM_RASTERISATION_MODULES RGX_FEATURE_NUM_CLUSTERS ++#else ++#define RGX_NUM_RASTERISATION_MODULES RGX_NUM_PHANTOMS ++#endif ++ ++#endif /* defined(__KERNEL__) */ ++ ++/* GPU CR timer tick in GPU cycles */ ++#define RGX_CRTIME_TICK_IN_CYCLES (256U) ++ ++/* for nohw multicore return max cores possible to client */ ++#define RGX_MULTICORE_MAX_NOHW_CORES (4U) ++ ++/* ++ If the size of the SLC is less than this value then the TPU bypasses the SLC. ++ */ ++#define RGX_TPU_CACHED_SLC_SIZE_THRESHOLD_KB (128U) ++ ++/* ++ * If the size of the SLC is bigger than this value then the TCU must not be bypassed in the SLC. ++ * In XE_MEMORY_HIERARCHY cores, the TCU is bypassed by default. ++ */ ++#define RGX_TCU_CACHED_SLC_SIZE_THRESHOLD_KB (32U) ++ ++/* ++ * Register used by the FW to track the current boot stage (not used in MIPS) ++ */ ++#define RGX_FW_BOOT_STAGE_REGISTER (RGX_CR_POWER_ESTIMATE_RESULT) ++ ++/* ++ * Virtualisation definitions ++ */ ++#define RGX_VIRTUALISATION_REG_SIZE_PER_OS (RGX_CR_MTS_SCHEDULE1 - RGX_CR_MTS_SCHEDULE) ++ ++/* ++ * Macro used to indicate which version of HWPerf is active ++ */ ++#define RGX_FEATURE_HWPERF_ROGUE ++ ++/* ++ * Maximum number of cores supported by TRP ++ */ ++#define RGX_TRP_MAX_NUM_CORES (4U) ++ ++#endif /* RGXDEFS_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h b/drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h +new file mode 100644 +index 000000000000..fe8272b8584f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km/rgxmhdefs_km.h +@@ -0,0 +1,286 @@ ++/*************************************************************************/ /*! ++@Title Hardware definition file rgxmhdefs_km.h ++@Brief The file contains auto-generated hardware definitions without ++ BVNC-specific compile time conditionals. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* **** Autogenerated C -- do not edit **** */ ++ ++/* ++ * rogue_mh.def ++ */ ++ ++ ++#ifndef RGXMHDEFS_KM_H ++#define RGXMHDEFS_KM_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++ ++#define RGXMHDEFS_KM_REVISION 0 ++ ++#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE (0x00000000U) ++#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U) ++#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE (0x00000002U) ++ ++ ++#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U) ++#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U) ++#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U) ++ ++ ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK (0x00000008U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST (0x00000009U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK (0x0000000aU) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST (0x0000000bU) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0 (0x0000000cU) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1 (0x0000002dU) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK (0x0000000fU) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK (0x00000012U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK (0x00000013U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK (0x00000016U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK (0x00000017U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP (0x00000019U) ++#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP (0x0000001aU) ++ ++ ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK (0x00000000U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST (0x00000001U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK (0x00000002U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST (0x00000003U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0 (0x00000004U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1 (0x00000025U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP (0x00000006U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK (0x00000007U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK (0x00000008U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK (0x00000009U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK (0x00000014U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK (0x00000015U) ++#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP (0x00000018U) ++ ++ ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP (0x00000008U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC (0x00000007U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC (0x00000006U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC (0x00000005U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR (0x00000004U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS (0x00000003U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC (0x00000002U) ++#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE (0x00000001U) ++ ++ ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U) ++ ++ ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU) ++ ++ ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U) ++ ++ ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU) ++#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU) ++ ++ ++#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE (0x00000000U) ++#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS (0x00000001U) ++#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U) ++#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA (0x00000003U) ++ ++ ++#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS (0x00000000U) ++#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS (0x00000001U) ++ ++ ++#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL (0x00000000U) ++#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE (0x00000001U) ++#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX (0x00000002U) ++#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK (0x00000004U) ++#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT (0x00000008U) ++ ++ ++#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U) ++#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA (0x00000001U) ++#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA (0x00000002U) ++#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE (0x00000003U) ++ ++ ++#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U) ++#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS (0x00000003U) ++ ++ ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST (0x00000000U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST (0x00000001U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST (0x00000002U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST (0x00000003U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST (0x00000004U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST (0x00000005U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U) ++#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U) ++ ++ ++#define RGX_MH_TAG_ENCODING_MH_TAG_MMU (0x00000000U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU (0x00000001U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU (0x00000002U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU (0x00000003U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS (0x00000004U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0 (0x00000005U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1 (0x00000006U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2 (0x00000007U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3 (0x00000008U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0 (0x00000009U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1 (0x0000000aU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2 (0x0000000bU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3 (0x0000000cU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4 (0x0000000dU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0 (0x0000000eU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1 (0x0000000fU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA (0x00000010U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB (0x00000011U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC (0x00000012U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD (0x00000013U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA (0x00000014U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB (0x00000015U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC (0x00000016U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD (0x00000017U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW (0x00000018U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0 (0x00000019U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1 (0x0000001aU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0 (0x0000001bU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1 (0x0000001cU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2 (0x0000001dU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3 (0x0000001eU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_USC (0x0000001fU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS (0x00000020U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS (0x00000021U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TPF (0x00000022U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS (0x00000023U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF (0x00000024U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ (0x00000025U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS (0x00000026U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5 (0x00000027U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP (0x00000028U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC (0x00000029U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC (0x0000002aU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC (0x0000002bU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION (0x0000002cU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM (0x0000002dU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW (0x0000002eU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC (0x0000002fU) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC (0x00000030U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC (0x00000031U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA (0x00000032U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL (0x00000033U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0 (0x00000034U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1 (0x00000035U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PBE2 (0x00000036U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_PBE3 (0x00000037U) ++ ++ ++#endif /* RGXMHDEFS_KM_H */ ++/***************************************************************************** ++ End of file (rgxmhdefs_km.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h b/drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h +new file mode 100644 +index 000000000000..65186643a20a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km/rgxmmudefs_km.h +@@ -0,0 +1,216 @@ ++/*************************************************************************/ /*! ++@Title Hardware definition file rgxmmudefs_km.h ++@Brief The file contains auto-generated hardware definitions without ++ BVNC-specific compile time conditionals. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* **** Autogenerated C -- do not edit **** */ ++ ++/* ++ * rogue_bif.def ++ */ ++ ++ ++#ifndef RGXMMUDEFS_KM_H ++#define RGXMMUDEFS_KM_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++ ++#define RGXMMUDEFS_KM_REVISION 0 ++ ++#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U) ++#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U) ++#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U) ++#define RGX_BIF_DM_ENCODING_TLA (0x00000003U) ++#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U) ++#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U) ++#define RGX_BIF_DM_ENCODING_META (0x00000007U) ++#define RGX_BIF_DM_ENCODING_HOST (0x00000008U) ++#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U) ++ ++ ++#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U) ++#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFF003FFFFFFF)) ++#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U) ++#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFC01FFFFF)) ++#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) ++#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFE00FFF)) ++ ++ ++#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U) ++ ++ ++#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U) ++ ++ ++#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U) ++ ++ ++#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U) ++ ++ ++#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U) ++ ++ ++#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U) ++ ++ ++#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U) ++#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U) ++#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U) ++#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U) ++#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U) ++#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U) ++#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U) ++ ++ ++#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U) ++#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++ ++ ++#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U) ++#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000003FFF)) ++ ++ ++#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U) ++#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000FFFF)) ++ ++ ++#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U) ++#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000003FFFF)) ++ ++ ++#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U) ++#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000FFFFF)) ++ ++ ++#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U) ++#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00001FFFFF)) ++ ++ ++#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U) ++#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++ ++ ++#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U) ++#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000003FF)) ++ ++ ++#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U) ++#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF00000000FF)) ++ ++ ++#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U) ++#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000003F)) ++ ++ ++#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U) ++#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) ++ ++ ++#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U) ++#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) ++ ++ ++#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U) ++#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0xBFFFFFFFFFFFFFFF)) ++#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0x4000000000000000)) ++#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U) ++#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0xC00000FFFFFFFFFF)) ++#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U) ++#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0xFFFFFF0000000FFF)) ++#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U) ++#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFF03F)) ++#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U) ++#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFDF)) ++#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000000000000020)) ++#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U) ++#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFEF)) ++#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0x0000000000000010)) ++#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U) ++#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF7)) ++#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U) ++#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFB)) ++#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U) ++#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFD)) ++#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U) ++#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U) ++#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0xFFFFFEFFFFFFFFFF)) ++#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0x0000010000000000)) ++#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U) ++#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0xFFFFFF000000001F)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFF1)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008)) ++#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a)) ++#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U) ++#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFE)) ++#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0x0000000000000001)) ++ ++ ++#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U) ++#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0x0000000FU) ++#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U) ++#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U) ++#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U) ++#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0xFFFFFFFDU) ++#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0x00000002U) ++#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U) ++#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0xFFFFFFFEU) ++#define RGX_MMUCTRL_PC_DATA_VALID_EN (0x00000001U) ++ ++ ++#endif /* RGXMMUDEFS_KM_H */ ++/***************************************************************************** ++ End of file (rgxmmudefs_km.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/km_apphint.c b/drivers/gpu/drm/img-rogue/km_apphint.c +new file mode 100644 +index 000000000000..654d0850e4d5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km_apphint.c +@@ -0,0 +1,1751 @@ ++/*************************************************************************/ /*! ++@File km_apphint.c ++@Title Apphint routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "di_server.h" ++#include "pvr_uaccess.h" ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) ++#include ++#endif ++ ++/* Common and SO layer */ ++#include "img_defs.h" ++#include "sofunc_pvr.h" ++ ++/* for action device access */ ++#include "pvrsrv.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "rgxfwutils.h" ++#include "rgxhwperf.h" ++#include "htbserver.h" ++#include "rgxutils.h" ++#include "rgxapi_km.h" ++ ++ ++/* defines for default values */ ++#include "rgx_fwif_km.h" ++#include "htbuffer_types.h" ++ ++#include "pvr_notifier.h" ++ ++#include "km_apphint_defs.h" ++#include "km_apphint.h" ++ ++#if defined(PDUMP) ++#include ++#include "pdump_km.h" ++#endif ++ ++/* Size of temporary buffers used to read and write AppHint data. ++ * Must be large enough to contain any strings read or written but no larger ++ * than 4096: which is the buffer size for the kernel_param_ops .get ++ * function. And less than 1024 to keep the stack frame size within bounds. ++ */ ++#define APPHINT_BUFFER_SIZE 512 ++ ++#define APPHINT_DEVICES_MAX 16 ++ ++/* Apphint Debug output level */ ++#define APPHINT_DPF_LEVEL PVR_DBG_VERBOSE ++ ++/* ++******************************************************************************* ++ * AppHint mnemonic data type helper tables ++******************************************************************************/ ++struct apphint_lookup { ++ const char *name; ++ int value; ++}; ++ ++static const struct apphint_lookup fwt_logtype_tbl[] = { ++ { "trace", 0}, ++ { "none", 0} ++#if defined(SUPPORT_TBI_INTERFACE) ++ , { "tbi", 1} ++#endif ++}; ++ ++static const struct apphint_lookup fwt_loggroup_tbl[] = { ++ RGXFWIF_LOG_GROUP_NAME_VALUE_MAP ++}; ++ ++static const struct apphint_lookup htb_loggroup_tbl[] = { ++#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, ++ HTB_LOG_SFGROUPLIST ++#undef X ++}; ++ ++static const struct apphint_lookup htb_opmode_tbl[] = { ++ { "droplatest", HTB_OPMODE_DROPLATEST}, ++ { "dropoldest", HTB_OPMODE_DROPOLDEST}, ++ { "block", HTB_OPMODE_BLOCK} ++}; ++ ++__maybe_unused ++static const struct apphint_lookup htb_logmode_tbl[] = { ++ { "all", HTB_LOGMODE_ALLPID}, ++ { "restricted", HTB_LOGMODE_RESTRICTEDPID} ++}; ++ ++__maybe_unused ++static const struct apphint_lookup timecorr_clk_tbl[] = { ++ { "mono", 0 }, ++ { "mono_raw", 1 }, ++ { "sched", 2 } ++}; ++ ++/* ++******************************************************************************* ++ Data types ++******************************************************************************/ ++union apphint_value { ++ IMG_UINT64 UINT64; ++ IMG_UINT32 UINT32; ++ IMG_BOOL BOOL; ++ IMG_CHAR *STRING; ++}; ++ ++union apphint_query_action { ++ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_UINT64 *value); ++ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_UINT32 *value); ++ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_BOOL *value); ++ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_CHAR **value); ++}; ++ ++union apphint_set_action { ++ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_UINT64 value); ++ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_UINT32 value); ++ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_BOOL value); ++ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, IMG_CHAR *value); ++}; ++ ++struct apphint_action { ++ union apphint_query_action query; /*!< Query callbacks. */ ++ union apphint_set_action set; /*!< Set callbacks. */ ++ const PVRSRV_DEVICE_NODE *device; /*!< Pointer to the device node.*/ ++ const void *private_data; /*!< Opaque data passed to `query` and ++ `set` callbacks. */ ++ union apphint_value stored; /*!< Value of the AppHint. */ ++ bool free; /*!< Flag indicating that memory has been ++ allocated for this AppHint and it ++ needs to be freed on deinit. */ ++ bool initialised; /*!< Flag indicating if the AppHint has ++ been already initialised. */ ++}; ++ ++struct apphint_param { ++ IMG_UINT32 id; ++ APPHINT_DATA_TYPE data_type; ++ const void *data_type_helper; ++ IMG_UINT32 helper_size; ++}; ++ ++struct apphint_init_data { ++ IMG_UINT32 id; /* index into AppHint Table */ ++ APPHINT_CLASS class; ++ const IMG_CHAR *name; ++ union apphint_value default_value; ++}; ++ ++struct apphint_init_data_mapping { ++ IMG_UINT32 device_apphint_id; ++ IMG_UINT32 modparam_apphint_id; ++}; ++ ++struct apphint_class_state { ++ APPHINT_CLASS class; ++ IMG_BOOL enabled; ++}; ++ ++struct apphint_work { ++ struct work_struct work; ++ union apphint_value new_value; ++ struct apphint_action *action; ++}; ++ ++/* ++******************************************************************************* ++ Initialization / configuration table data ++******************************************************************************/ ++#define UINT32Bitfield UINT32 ++#define UINT32List UINT32 ++ ++static const struct apphint_init_data init_data_buildvar[] = { ++#define X(a, b, c, d, e) \ ++ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, ++ APPHINT_LIST_BUILDVAR_COMMON ++ APPHINT_LIST_BUILDVAR ++#undef X ++}; ++ ++static const struct apphint_init_data init_data_modparam[] = { ++#define X(a, b, c, d, e) \ ++ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, ++ APPHINT_LIST_MODPARAM_COMMON ++ APPHINT_LIST_MODPARAM ++#undef X ++}; ++ ++static const struct apphint_init_data init_data_debuginfo[] = { ++#define X(a, b, c, d, e) \ ++ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, ++ APPHINT_LIST_DEBUGINFO_COMMON ++ APPHINT_LIST_DEBUGINFO ++#undef X ++}; ++ ++static const struct apphint_init_data init_data_debuginfo_device[] = { ++#define X(a, b, c, d, e) \ ++ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} }, ++ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON ++ APPHINT_LIST_DEBUGINFO_DEVICE ++#undef X ++}; ++ ++static const struct apphint_init_data_mapping init_data_debuginfo_device_to_modparams[] = { ++#define X(a, b) \ ++ {APPHINT_ID_ ## a, APPHINT_ID_ ## b}, ++ APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON ++ APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT ++#undef X ++}; ++ ++#undef UINT32Bitfield ++#undef UINT32List ++ ++__maybe_unused static const char NO_PARAM_TABLE[] = {}; ++ ++static const struct apphint_param param_lookup[] = { ++#define X(a, b, c, d, e) \ ++ {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) }, ++ APPHINT_LIST_ALL ++#undef X ++}; ++ ++static const struct apphint_class_state class_state[] = { ++#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a}, ++ APPHINT_CLASS_LIST ++#undef X ++}; ++ ++/* ++******************************************************************************* ++ Global state ++******************************************************************************/ ++/* If the union apphint_value becomes such that it is not possible to read ++ * and write atomically, a mutex may be desirable to prevent a read returning ++ * a partially written state. ++ * This would require a statically initialized mutex outside of the ++ * struct apphint_state to prevent use of an uninitialized mutex when ++ * module_params are provided on the command line. ++ * static DEFINE_MUTEX(apphint_mutex); ++ */ ++static struct apphint_state ++{ ++ struct workqueue_struct *workqueue; ++ DI_GROUP *debuginfo_device_rootdir[APPHINT_DEVICES_MAX]; ++ DI_ENTRY *debuginfo_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGINFO_DEVICE_ID_MAX]; ++ DI_GROUP *debuginfo_rootdir; ++ DI_ENTRY *debuginfo_entry[APPHINT_DEBUGINFO_ID_MAX]; ++ DI_GROUP *buildvar_rootdir; ++ DI_ENTRY *buildvar_entry[APPHINT_BUILDVAR_ID_MAX]; ++ ++ unsigned int num_devices; ++ PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX]; ++ unsigned int initialized; ++ ++ /* Array contains value space for 1 copy of all apphint values defined ++ * (for device 1) and N copies of device specific apphint values for ++ * multi-device platforms. ++ */ ++ struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGINFO_DEVICE_ID_MAX)]; ++ ++} apphint = { ++/* statically initialise default values to ensure that any module_params ++ * provided on the command line are not overwritten by defaults. ++ */ ++ .val = { ++#define UINT32Bitfield UINT32 ++#define UINT32List UINT32 ++#define X(a, b, c, d, e) \ ++ { {NULL}, {NULL}, NULL, NULL, {.b=d}, false }, ++ APPHINT_LIST_ALL ++#undef X ++#undef UINT32Bitfield ++#undef UINT32List ++ }, ++ .initialized = 0, ++ .num_devices = 0 ++}; ++ ++#define APPHINT_DEBUGINFO_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGINFO_DEVICE_ID_MAX) ++ ++static inline void ++get_apphint_id_from_action_addr(const struct apphint_action * const addr, ++ APPHINT_ID * const id) ++{ ++ *id = (APPHINT_ID)(addr - apphint.val); ++ if (*id >= APPHINT_ID_MAX) { ++ *id -= APPHINT_DEBUGINFO_DEVICE_ID_OFFSET; ++ *id %= APPHINT_DEBUGINFO_DEVICE_ID_MAX; ++ *id += APPHINT_DEBUGINFO_DEVICE_ID_OFFSET; ++ } ++} ++ ++static inline void ++get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device, ++ int * const offset, ++ APPHINT_ID id) ++{ ++ int i; ++ IMG_BOOL bFound = IMG_FALSE; ++ ++ /* No device offset if not a device specific apphint */ ++ if (APPHINT_OF_DRIVER_NO_DEVICE == device) { ++ *offset = 0; ++ return; ++ } ++ ++ /* Check that the specified ID is a device-specific one. If not we ++ * set the offset to 0 for the global MODPARAM / BUILDVAR etc. AppHint ++ */ ++ for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device); i++) ++ { ++ const struct apphint_init_data *device_init = &init_data_debuginfo_device[i]; ++ ++ if ((IMG_UINT32)id == device_init->id) { ++ bFound = IMG_TRUE; ++ break; ++ } ++ } ++ ++ if (!bFound) { ++ *offset = 0; ++ return; ++ } ++ ++ for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) { ++ if (apphint.devices[i] == device) ++ break; ++ } ++ if (APPHINT_DEVICES_MAX == i) { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__)); ++ i = 0; ++ } ++ *offset = i * APPHINT_DEBUGINFO_DEVICE_ID_MAX; ++} ++ ++/** ++ * apphint_action_worker - perform an action after an AppHint update has been ++ * requested by a UM process ++ * And update the record of the current active value ++ */ ++static void apphint_action_worker(struct work_struct *work) ++{ ++ struct apphint_work *work_pkt = container_of(work, ++ struct apphint_work, ++ work); ++ struct apphint_action *a = work_pkt->action; ++ union apphint_value value = work_pkt->new_value; ++ APPHINT_ID id; ++ PVRSRV_ERROR result = PVRSRV_OK; ++ ++ get_apphint_id_from_action_addr(a, &id); ++ ++ if (a->set.UINT64) { ++ switch (param_lookup[id].data_type) { ++ case APPHINT_DATA_TYPE_UINT64: ++ result = a->set.UINT64(a->device, ++ a->private_data, ++ value.UINT64); ++ break; ++ ++ case APPHINT_DATA_TYPE_UINT32: ++ case APPHINT_DATA_TYPE_UINT32Bitfield: ++ case APPHINT_DATA_TYPE_UINT32List: ++ result = a->set.UINT32(a->device, ++ a->private_data, ++ value.UINT32); ++ break; ++ ++ case APPHINT_DATA_TYPE_BOOL: ++ result = a->set.BOOL(a->device, ++ a->private_data, ++ value.BOOL); ++ break; ++ ++ case APPHINT_DATA_TYPE_STRING: ++ result = a->set.STRING(a->device, ++ a->private_data, ++ value.STRING); ++ kfree(value.STRING); ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: unrecognised data type (%d), index (%d)", ++ __func__, param_lookup[id].data_type, id)); ++ } ++ ++ /* Do not log errors if running in GUEST mode */ ++ if ((PVRSRV_OK != result) && !PVRSRV_VZ_MODE_IS(GUEST)) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed (%s)", ++ __func__, PVRSRVGetErrorString(result))); ++ } ++ } else { ++ if (a->free) { ++ kfree(a->stored.STRING); ++ } ++ a->stored = value; ++ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { ++ a->free = true; ++ } ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: AppHint value updated before handler is registered, ID(%d)", ++ __func__, id)); ++ } ++ kfree((void *)work_pkt); ++} ++ ++static void apphint_action(union apphint_value new_value, ++ struct apphint_action *action) ++{ ++ struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL); ++ ++ /* queue apphint update on a serialized workqueue to avoid races */ ++ if (work_pkt) { ++ work_pkt->new_value = new_value; ++ work_pkt->action = action; ++ INIT_WORK(&work_pkt->work, apphint_action_worker); ++ if (0 == queue_work(apphint.workqueue, &work_pkt->work)) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to queue apphint change request", ++ __func__)); ++ goto err_exit; ++ } ++ } else { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to alloc memory for apphint change request", ++ __func__)); ++ goto err_exit; ++ } ++ return; ++err_exit: ++ kfree(new_value.STRING); ++} ++ ++/** ++ * apphint_read - read the different AppHint data types ++ * return -errno or the buffer size ++ */ ++static int apphint_read(char *buffer, size_t count, APPHINT_ID ue, ++ union apphint_value *value) ++{ ++ APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type; ++ int result = 0; ++ ++ switch (data_type) { ++ case APPHINT_DATA_TYPE_UINT64: ++ if (kstrtou64(buffer, 0, &value->UINT64) < 0) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid UINT64 input data for id %d: %s", ++ __func__, ue, buffer)); ++ result = -EINVAL; ++ goto err_exit; ++ } ++ break; ++ case APPHINT_DATA_TYPE_UINT32: ++ if (kstrtou32(buffer, 0, &value->UINT32) < 0) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid UINT32 input data for id %d: %s", ++ __func__, ue, buffer)); ++ result = -EINVAL; ++ goto err_exit; ++ } ++ break; ++ case APPHINT_DATA_TYPE_BOOL: ++ switch (buffer[0]) { ++ case '0': ++ case 'n': ++ case 'N': ++ case 'f': ++ case 'F': ++ value->BOOL = IMG_FALSE; ++ break; ++ case '1': ++ case 'y': ++ case 'Y': ++ case 't': ++ case 'T': ++ value->BOOL = IMG_TRUE; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid BOOL input data for id %d: %s", ++ __func__, ue, buffer)); ++ result = -EINVAL; ++ goto err_exit; ++ } ++ break; ++ case APPHINT_DATA_TYPE_UINT32List: ++ { ++ int i; ++ struct apphint_lookup *lookup = ++ (struct apphint_lookup *) ++ param_lookup[ue].data_type_helper; ++ int size = param_lookup[ue].helper_size; ++ /* buffer may include '\n', remove it */ ++ char *arg = strsep(&buffer, "\n"); ++ ++ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++ for (i = 0; i < size; i++) { ++ if (strcasecmp(lookup[i].name, arg) == 0) { ++ value->UINT32 = lookup[i].value; ++ break; ++ } ++ } ++ if (i == size) { ++ if (OSStringLength(arg) == 0) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: No value set for AppHint", ++ __func__)); ++ } else { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unrecognised AppHint value (%s)", ++ __func__, arg)); ++ } ++ result = -EINVAL; ++ } ++ break; ++ } ++ case APPHINT_DATA_TYPE_UINT32Bitfield: ++ { ++ int i; ++ struct apphint_lookup *lookup = ++ (struct apphint_lookup *) ++ param_lookup[ue].data_type_helper; ++ int size = param_lookup[ue].helper_size; ++ /* buffer may include '\n', remove it */ ++ char *string = strsep(&buffer, "\n"); ++ char *token = strsep(&string, ","); ++ ++ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++ value->UINT32 = 0; ++ /* empty string is valid to clear the bitfield */ ++ while (token && *token) { ++ for (i = 0; i < size; i++) { ++ if (strcasecmp(lookup[i].name, token) == 0) { ++ value->UINT32 |= lookup[i].value; ++ break; ++ } ++ } ++ if (i == size) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unrecognised AppHint value (%s)", ++ __func__, token)); ++ result = -EINVAL; ++ goto err_exit; ++ } ++ token = strsep(&string, ","); ++ } ++ break; ++ } ++ case APPHINT_DATA_TYPE_STRING: ++ { ++ /* buffer may include '\n', remove it */ ++ char *string = strsep(&buffer, "\n"); ++ size_t len = OSStringLength(string); ++ ++ if (!len) { ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++ ++len; ++ ++ value->STRING = kmalloc(len , GFP_KERNEL); ++ if (!value->STRING) { ++ result = -ENOMEM; ++ goto err_exit; ++ } ++ ++ OSStringLCopy(value->STRING, string, len); ++ break; ++ } ++ default: ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++err_exit: ++ return (result < 0) ? result : count; ++} ++ ++static PVRSRV_ERROR get_apphint_value_from_action(const struct apphint_action * const action, ++ union apphint_value * const value, ++ const PVRSRV_DEVICE_NODE * const psDevNode) ++{ ++ APPHINT_ID id; ++ APPHINT_DATA_TYPE data_type; ++ PVRSRV_ERROR result = PVRSRV_OK; ++ const PVRSRV_DEVICE_NODE *psDevice; ++ ++ get_apphint_id_from_action_addr(action, &id); ++ data_type = param_lookup[id].data_type; ++ ++ /* If we've got an entry that is APPHINT_OF_DRIVER_NO_DEVICE we should use ++ * the higher-level psDevNode value instead. This is the device-node that is ++ * associated with the original debug_dump request. ++ * Note: if we're called with psDevNode == APPHINT_OF_DRIVER_NO_DEVICE ++ * we attempt to use the first registered apphint.devices[0] (if any ++ * devices have been presented). If we have no devices hooked into the ++ * apphint mechanism we just return the default value for the AppHint. ++ */ ++ if (psDevNode == APPHINT_OF_DRIVER_NO_DEVICE) { ++ if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) { ++ if (apphint.num_devices > 0) { ++ psDevice = apphint.devices[0]; ++ } else { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Uninitialised AppHint device for AppHint index (%d)", ++ id)); ++ return PVRSRV_ERROR_RETRY; ++ } ++ } else { ++ psDevice = action->device; ++ } ++ } else { ++ if (action->device == APPHINT_OF_DRIVER_NO_DEVICE) { ++ psDevice = psDevNode; ++ } else { ++ psDevice = action->device; ++ } ++ } ++ ++ if (action->query.UINT64) { ++ switch (data_type) { ++ case APPHINT_DATA_TYPE_UINT64: ++ result = action->query.UINT64(psDevice, ++ action->private_data, ++ &value->UINT64); ++ break; ++ ++ case APPHINT_DATA_TYPE_UINT32: ++ case APPHINT_DATA_TYPE_UINT32Bitfield: ++ case APPHINT_DATA_TYPE_UINT32List: ++ result = action->query.UINT32(psDevice, ++ action->private_data, ++ &value->UINT32); ++ break; ++ ++ case APPHINT_DATA_TYPE_BOOL: ++ result = action->query.BOOL(psDevice, ++ action->private_data, ++ &value->BOOL); ++ break; ++ ++ case APPHINT_DATA_TYPE_STRING: ++ result = action->query.STRING(psDevice, ++ action->private_data, ++ &value->STRING); ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: unrecognised data type (%d), index (%d)", ++ __func__, data_type, id)); ++ } ++ } else { ++ *value = action->stored; ++ } ++ ++ if (PVRSRV_OK != result) { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)", __func__, result, id)); ++ } ++ ++ return result; ++} ++ ++/** ++ * apphint_write - write the current AppHint data to a buffer ++ * ++ * Returns length written or -errno ++ */ ++static int apphint_write(char *buffer, const size_t size, ++ const struct apphint_action *a) ++{ ++ const struct apphint_param *hint; ++ int result = 0; ++ APPHINT_ID id; ++ union apphint_value value; ++ ++ get_apphint_id_from_action_addr(a, &id); ++ hint = ¶m_lookup[id]; ++ ++ result = get_apphint_value_from_action(a, &value, a->device); ++ ++ switch (hint->data_type) { ++ case APPHINT_DATA_TYPE_UINT64: ++ result += snprintf(buffer + result, size - result, ++ "0x%016llx", ++ value.UINT64); ++ break; ++ case APPHINT_DATA_TYPE_UINT32: ++ result += snprintf(buffer + result, size - result, ++ "0x%08x", ++ value.UINT32); ++ break; ++ case APPHINT_DATA_TYPE_BOOL: ++ result += snprintf(buffer + result, size - result, ++ "%s", ++ value.BOOL ? "Y" : "N"); ++ break; ++ case APPHINT_DATA_TYPE_STRING: ++ if (value.STRING) { ++ result += snprintf(buffer + result, size - result, ++ "%s", ++ *value.STRING ? value.STRING : "(none)"); ++ } else { ++ result += snprintf(buffer + result, size - result, ++ "(none)"); ++ } ++ break; ++ case APPHINT_DATA_TYPE_UINT32List: ++ { ++ struct apphint_lookup *lookup = ++ (struct apphint_lookup *) hint->data_type_helper; ++ IMG_UINT32 i; ++ ++ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++ for (i = 0; i < hint->helper_size; i++) { ++ if (lookup[i].value == value.UINT32) { ++ result += snprintf(buffer + result, ++ size - result, ++ "%s", ++ lookup[i].name); ++ break; ++ } ++ } ++ break; ++ } ++ case APPHINT_DATA_TYPE_UINT32Bitfield: ++ { ++ struct apphint_lookup *lookup = ++ (struct apphint_lookup *) hint->data_type_helper; ++ IMG_UINT32 i; ++ ++ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) { ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++ for (i = 0; i < hint->helper_size; i++) { ++ if (lookup[i].value & value.UINT32) { ++ result += snprintf(buffer + result, ++ size - result, ++ "%s,", ++ lookup[i].name); ++ } ++ } ++ if (result) { ++ /* remove any trailing ',' */ ++ --result; ++ *(buffer + result) = '\0'; ++ } else { ++ result += snprintf(buffer + result, ++ size - result, "none"); ++ } ++ break; ++ } ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: unrecognised data type (%d), index (%d)", ++ __func__, hint->data_type, id)); ++ result = -EINVAL; ++ } ++ ++err_exit: ++ return result; ++} ++ ++/* ++******************************************************************************* ++ Module parameters initialization - different from debuginfo ++******************************************************************************/ ++/** ++ * apphint_kparam_set - Handle an update of a module parameter ++ * ++ * Returns 0, or -errno. arg is in kp->arg. ++ */ ++static int apphint_kparam_set(const char *val, const struct kernel_param *kp) ++{ ++ char val_copy[APPHINT_BUFFER_SIZE]; ++ APPHINT_ID id; ++ union apphint_value value; ++ int result; ++ ++ /* need to discard const in case of string comparison */ ++ result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE); ++ ++ get_apphint_id_from_action_addr(kp->arg, &id); ++ if (result < APPHINT_BUFFER_SIZE) { ++ result = apphint_read(val_copy, result, id, &value); ++ if (result >= 0) { ++ ((struct apphint_action *)kp->arg)->stored = value; ++ ((struct apphint_action *)kp->arg)->initialised = true; ++ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) { ++ ((struct apphint_action *)kp->arg)->free = true; ++ } ++ } ++ } else { ++ PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__)); ++ } ++ return (result > 0) ? 0 : result; ++} ++ ++/** ++ * apphint_kparam_get - handle a read of a module parameter ++ * ++ * Returns length written or -errno. Buffer is 4k (ie. be short!) ++ */ ++static int apphint_kparam_get(char *buffer, const struct kernel_param *kp) ++{ ++ return apphint_write(buffer, PAGE_SIZE, kp->arg); ++} ++ ++__maybe_unused ++static const struct kernel_param_ops apphint_kparam_fops = { ++ .set = apphint_kparam_set, ++ .get = apphint_kparam_get, ++}; ++ ++/* ++ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM_COMMON + APPHINT_LIST_MODPARAM ++ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for ++ * AppHint classes that have been disabled. ++ */ ++ ++#define apphint_modparam_enable(name, number, perm) \ ++ module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm); ++ ++#define X(a, b, c, d, e) \ ++ apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444) ++ APPHINT_LIST_MODPARAM_COMMON ++ APPHINT_LIST_MODPARAM ++#undef X ++ ++/* ++******************************************************************************* ++ Debug Info get (seq file) operations - supporting functions ++******************************************************************************/ ++static void *apphint_di_start(OSDI_IMPL_ENTRY *s, IMG_UINT64 *pos) ++{ ++ if (*pos == 0) { ++ /* We want only one entry in the sequence, one call to show() */ ++ return (void *) 1; ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(s); ++ ++ return NULL; ++} ++ ++static void apphint_di_stop(OSDI_IMPL_ENTRY *s, void *v) ++{ ++ PVR_UNREFERENCED_PARAMETER(s); ++ PVR_UNREFERENCED_PARAMETER(v); ++} ++ ++static void *apphint_di_next(OSDI_IMPL_ENTRY *s, void *v, IMG_UINT64 *pos) ++{ ++ PVR_UNREFERENCED_PARAMETER(s); ++ PVR_UNREFERENCED_PARAMETER(v); ++ PVR_UNREFERENCED_PARAMETER(pos); ++ return NULL; ++} ++ ++static int apphint_di_show(OSDI_IMPL_ENTRY *s, void *v) ++{ ++ IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE]; ++ int result; ++ void *private = DIGetPrivData(s); ++ ++ PVR_UNREFERENCED_PARAMETER(v); ++ ++ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, private); ++ if (result < 0) { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__)); ++ } else { ++ /* debuginfo requires a trailing \n, module_params don't */ ++ result += snprintf(km_buffer + result, ++ APPHINT_BUFFER_SIZE - result, ++ "\n"); ++ DIPuts(s, km_buffer); ++ } ++ ++ /* have to return 0 to see output */ ++ return (result < 0) ? result : 0; ++} ++ ++/* ++******************************************************************************* ++ Debug Info supporting functions ++******************************************************************************/ ++ ++/** ++ * apphint_set - Handle a DI value update ++ */ ++static IMG_INT64 apphint_set(const IMG_CHAR *buffer, IMG_UINT64 count, ++ IMG_UINT64 *ppos, void *data) ++{ ++ APPHINT_ID id; ++ union apphint_value value; ++ struct apphint_action *action = data; ++ char km_buffer[APPHINT_BUFFER_SIZE]; ++ int result = 0; ++ ++ if (ppos == NULL) ++ return -EIO; ++ ++ if (count >= APPHINT_BUFFER_SIZE) { ++ PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%" IMG_INT64_FMTSPECd ")", ++ __func__, count)); ++ result = -EINVAL; ++ goto err_exit; ++ } ++ ++ /* apphint_read() modifies the buffer so we need to copy it */ ++ memcpy(km_buffer, buffer, count); ++ /* count is larger than real buffer by 1 because DI framework appends ++ * a '\0' character at the end, but here we're ignoring this */ ++ count -= 1; ++ km_buffer[count] = '\0'; ++ ++ get_apphint_id_from_action_addr(action, &id); ++ result = apphint_read(km_buffer, count, id, &value); ++ if (result >= 0) ++ apphint_action(value, action); ++ ++ *ppos += count; ++err_exit: ++ return result; ++} ++ ++/** ++ * apphint_debuginfo_init - Create the specified debuginfo entries ++ */ ++static int apphint_debuginfo_init(const char *sub_dir, ++ unsigned int device_num, ++ unsigned int init_data_size, ++ const struct apphint_init_data *init_data, ++ DI_GROUP *parentdir, ++ DI_GROUP **rootdir, ++ DI_ENTRY *entry[]) ++{ ++ PVRSRV_ERROR result; ++ unsigned int i; ++ unsigned int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX; ++ const DI_ITERATOR_CB iterator = { ++ .pfnStart = apphint_di_start, .pfnStop = apphint_di_stop, ++ .pfnNext = apphint_di_next, .pfnShow = apphint_di_show, ++ .pfnWrite = apphint_set, .ui32WriteLenMax = APPHINT_BUFFER_SIZE ++ }; ++ ++ if (*rootdir) { ++ PVR_DPF((PVR_DBG_WARNING, ++ "AppHint DebugFS already created, skipping")); ++ result = -EEXIST; ++ goto err_exit; ++ } ++ ++ result = DICreateGroup(sub_dir, parentdir, rootdir); ++ if (result != PVRSRV_OK) { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Failed to create \"%s\" DebugFS directory.", sub_dir)); ++ goto err_exit; ++ } ++ ++ for (i = 0; i < init_data_size; i++) { ++ if (!class_state[init_data[i].class].enabled) ++ continue; ++ ++ result = DICreateEntry(init_data[i].name, ++ *rootdir, ++ &iterator, ++ (void *) &apphint.val[init_data[i].id + device_value_offset], ++ DI_ENTRY_TYPE_GENERIC, ++ &entry[i]); ++ if (result != PVRSRV_OK) { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Failed to create \"%s/%s\" DebugFS entry.", ++ sub_dir, init_data[i].name)); ++ } ++ } ++ ++ return 0; ++ ++err_exit: ++ return result; ++} ++ ++/** ++ * apphint_debuginfo_deinit- destroy the debuginfo entries ++ */ ++static void apphint_debuginfo_deinit(unsigned int num_entries, ++ DI_GROUP **rootdir, ++ DI_ENTRY *entry[]) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < num_entries; i++) { ++ if (entry[i]) { ++ DIDestroyEntry(entry[i]); ++ } ++ } ++ ++ if (*rootdir) { ++ DIDestroyGroup(*rootdir); ++ *rootdir = NULL; ++ } ++} ++ ++/* ++******************************************************************************* ++ AppHint status dump implementation ++******************************************************************************/ ++#if defined(PDUMP) ++static void apphint_pdump_values(void *pvDeviceNode, ++ const IMG_CHAR *format, ...) ++{ ++ char km_buffer[APPHINT_BUFFER_SIZE]; ++ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS; ++ va_list ap; ++ ++ va_start(ap, format); ++ (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap); ++ va_end(ap); ++ ++ /* ui32CommentSize set to 0 here as function does not make use of the value. */ ++ PDumpCommentKM(NULL, (PVRSRV_DEVICE_NODE*)pvDeviceNode, 0, km_buffer, ui32Flags); ++} ++#endif ++ ++static IMG_BOOL is_apphint_value_equal(const APPHINT_DATA_TYPE data_type, ++ const union apphint_value * const left, ++ const union apphint_value * const right) ++{ ++ switch (data_type) { ++ case APPHINT_DATA_TYPE_UINT64: ++ return left->UINT64 == right->UINT64; ++ case APPHINT_DATA_TYPE_UINT32: ++ case APPHINT_DATA_TYPE_UINT32List: ++ case APPHINT_DATA_TYPE_UINT32Bitfield: ++ return left->UINT32 == right->UINT32; ++ case APPHINT_DATA_TYPE_BOOL: ++ return left->BOOL == right->BOOL; ++ case APPHINT_DATA_TYPE_STRING: ++ return (OSStringNCompare(left->STRING, right->STRING, OSStringLength(right->STRING) + 1) == 0 ? IMG_TRUE : IMG_FALSE); ++ default: ++ PVR_DPF((PVR_DBG_WARNING, "%s: unhandled data type (%d)", __func__, data_type)); ++ return IMG_FALSE; ++ } ++} ++ ++static void apphint_dump_values(const char *group_name, ++ int device_num, ++ const struct apphint_init_data *group_data, ++ int group_size, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ bool list_all, ++ PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ int i, result; ++ int device_value_offset = device_num * APPHINT_DEBUGINFO_DEVICE_ID_MAX; ++ char km_buffer[APPHINT_BUFFER_SIZE]; ++ char count = 0; ++ ++ PVR_DUMPDEBUG_LOG(" %s", group_name); ++ for (i = 0; i < group_size; i++) ++ { ++ IMG_UINT32 id = group_data[i].id; ++ APPHINT_DATA_TYPE data_type = param_lookup[id].data_type; ++ const struct apphint_action *action = &apphint.val[id + device_value_offset]; ++ union apphint_value value; ++ ++ result = get_apphint_value_from_action(action, &value, psDevNode); ++ ++ if (PVRSRV_OK != result) { ++ continue; ++ } ++ ++ /* List only apphints with non-default values */ ++ if (!list_all && ++ is_apphint_value_equal(data_type, &value, &group_data[i].default_value)) { ++ continue; ++ } ++ ++ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, action); ++ count++; ++ ++ if (result <= 0) { ++ PVR_DUMPDEBUG_LOG(" %s: ", ++ group_data[i].name); ++ } else { ++ PVR_DUMPDEBUG_LOG(" %s: %s", ++ group_data[i].name, km_buffer); ++ } ++ } ++ ++ if (count == 0) { ++ PVR_DUMPDEBUG_LOG(" none"); ++ } ++} ++ ++/** ++ * Callback for debug dump ++ */ ++static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ int i, result; ++ char km_buffer[APPHINT_BUFFER_SIZE]; ++ PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) { ++ PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------"); ++ ++ apphint_dump_values("Build Vars", 0, ++ init_data_buildvar, ARRAY_SIZE(init_data_buildvar), ++ pfnDumpDebugPrintf, pvDumpDebugFile, true, device); ++ ++ apphint_dump_values("Module Params", 0, ++ init_data_modparam, ARRAY_SIZE(init_data_modparam), ++ pfnDumpDebugPrintf, pvDumpDebugFile, false, device); ++ ++ apphint_dump_values("Debug Info Params", 0, ++ init_data_debuginfo, ARRAY_SIZE(init_data_debuginfo), ++ pfnDumpDebugPrintf, pvDumpDebugFile, false, device); ++ ++ for (i = 0; i < APPHINT_DEVICES_MAX; i++) { ++ if (!apphint.devices[i] ++ || (device && device != apphint.devices[i])) ++ continue; ++ ++ result = snprintf(km_buffer, ++ APPHINT_BUFFER_SIZE, ++ "Debug Info Params Device ID: %d", ++ i); ++ if (0 > result) ++ continue; ++ ++ apphint_dump_values(km_buffer, i, ++ init_data_debuginfo_device, ++ ARRAY_SIZE(init_data_debuginfo_device), ++ pfnDumpDebugPrintf, ++ pvDumpDebugFile, ++ false, device); ++ } ++ } ++} ++ ++/* ++******************************************************************************* ++ Public interface ++******************************************************************************/ ++int pvr_apphint_init(void) ++{ ++ int result, i; ++ ++ if (apphint.initialized) { ++ result = -EEXIST; ++ goto err_out; ++ } ++ ++ for (i = 0; i < APPHINT_DEVICES_MAX; i++) ++ apphint.devices[i] = NULL; ++ ++ /* create workqueue with strict execution ordering to ensure no ++ * race conditions when setting/updating apphints from different ++ * contexts ++ */ ++ apphint.workqueue = alloc_workqueue("apphint_workqueue", ++ WQ_UNBOUND | WQ_FREEZABLE, 1); ++ if (!apphint.workqueue) { ++ result = -ENOMEM; ++ goto err_out; ++ } ++ ++ result = apphint_debuginfo_init("apphint", 0, ++ ARRAY_SIZE(init_data_debuginfo), init_data_debuginfo, ++ NULL, ++ &apphint.debuginfo_rootdir, apphint.debuginfo_entry); ++ if (0 != result) ++ goto err_out; ++ ++ result = apphint_debuginfo_init("buildvar", 0, ++ ARRAY_SIZE(init_data_buildvar), init_data_buildvar, ++ NULL, ++ &apphint.buildvar_rootdir, apphint.buildvar_entry); ++ ++ apphint.initialized = 1; ++ ++err_out: ++ return result; ++} ++ ++int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device) ++{ ++ int result, i; ++ char device_num[APPHINT_BUFFER_SIZE]; ++ unsigned int device_value_offset; ++ ++ if (!apphint.initialized) { ++ result = -EAGAIN; ++ goto err_out; ++ } ++ ++ if (apphint.num_devices+1 > APPHINT_DEVICES_MAX) { ++ result = -EMFILE; ++ goto err_out; ++ } ++ ++ result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%u", apphint.num_devices); ++ if (result < 0) { ++ PVR_DPF((PVR_DBG_WARNING, ++ "snprintf failed (%d)", result)); ++ result = -EINVAL; ++ goto err_out; ++ } ++ ++ /* Set the default values for the new device */ ++ device_value_offset = apphint.num_devices * APPHINT_DEBUGINFO_DEVICE_ID_MAX; ++ for (i = 0; i < APPHINT_DEBUGINFO_DEVICE_ID_MAX; i++) { ++ apphint.val[init_data_debuginfo_device[i].id + device_value_offset].stored ++ = init_data_debuginfo_device[i].default_value; ++ } ++ ++ /* Set value of an apphint if mapping to module param exists for it ++ * and this module parameter has been initialised */ ++ for (i = 0; i < ARRAY_SIZE(init_data_debuginfo_device_to_modparams); i++) { ++ const struct apphint_init_data_mapping *mapping = ++ &init_data_debuginfo_device_to_modparams[i]; ++ const struct apphint_action *modparam_action = ++ &apphint.val[mapping->modparam_apphint_id]; ++ struct apphint_action *device_action = ++ &apphint.val[mapping->device_apphint_id + device_value_offset]; ++ ++ /* Set only if the module parameter was explicitly set during the module ++ * load. */ ++ if (modparam_action->initialised) { ++ device_action->stored = modparam_action->stored; ++ } ++ } ++ ++ result = apphint_debuginfo_init(device_num, apphint.num_devices, ++ ARRAY_SIZE(init_data_debuginfo_device), ++ init_data_debuginfo_device, ++ apphint.debuginfo_rootdir, ++ &apphint.debuginfo_device_rootdir[apphint.num_devices], ++ apphint.debuginfo_device_entry[apphint.num_devices]); ++ if (0 != result) ++ goto err_out; ++ ++ apphint.devices[apphint.num_devices] = device; ++ apphint.num_devices++; ++ ++ (void)SOPvrDbgRequestNotifyRegister( ++ &device->hAppHintDbgReqNotify, ++ device, ++ apphint_dump_state, ++ DEBUG_REQUEST_APPHINT, ++ device); ++ ++err_out: ++ return result; ++} ++ ++void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device) ++{ ++ int i; ++ ++ if (!apphint.initialized) ++ return; ++ ++ /* find the device */ ++ for (i = 0; i < APPHINT_DEVICES_MAX; i++) { ++ if (apphint.devices[i] == device) ++ break; ++ } ++ ++ if (APPHINT_DEVICES_MAX == i) ++ return; ++ ++ if (device->hAppHintDbgReqNotify) { ++ (void)SOPvrDbgRequestNotifyUnregister( ++ device->hAppHintDbgReqNotify); ++ device->hAppHintDbgReqNotify = NULL; ++ } ++ ++ apphint_debuginfo_deinit(APPHINT_DEBUGINFO_DEVICE_ID_MAX, ++ &apphint.debuginfo_device_rootdir[i], ++ apphint.debuginfo_device_entry[i]); ++ ++ apphint.devices[i] = NULL; ++ ++ WARN_ON(apphint.num_devices==0); ++ apphint.num_devices--; ++} ++ ++void pvr_apphint_deinit(void) ++{ ++ int i; ++ ++ if (!apphint.initialized) ++ return; ++ ++ /* remove any remaining device data */ ++ for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) { ++ if (apphint.devices[i]) ++ pvr_apphint_device_unregister(apphint.devices[i]); ++ } ++ ++ /* free all alloc'd string apphints and set to NULL */ ++ for (i = 0; i < ARRAY_SIZE(apphint.val); i++) { ++ if (apphint.val[i].free && apphint.val[i].stored.STRING) { ++ kfree(apphint.val[i].stored.STRING); ++ apphint.val[i].stored.STRING = NULL; ++ apphint.val[i].free = false; ++ } ++ } ++ ++ apphint_debuginfo_deinit(APPHINT_DEBUGINFO_ID_MAX, ++ &apphint.debuginfo_rootdir, apphint.debuginfo_entry); ++ apphint_debuginfo_deinit(APPHINT_BUILDVAR_ID_MAX, ++ &apphint.buildvar_rootdir, apphint.buildvar_entry); ++ ++ destroy_workqueue(apphint.workqueue); ++ ++ apphint.initialized = 0; ++} ++ ++void pvr_apphint_dump_state(PVRSRV_DEVICE_NODE *device) ++{ ++#if defined(PDUMP) ++ /* NB. apphint_pdump_values() is the pfnDumpDebugPrintf ++ * function used when PDUMP is defined. ++ * apphintpdump_values() calls PDumpCommentKM(), which ++ * requires the device but as it is only called as a ++ * DUMPDEBUG_PRINTF_FUNC it is only passed pvDumpDebugFile ++ * (which happens to be the 4th parameter in the call to ++ * apphint_dump_state() below). ++ * Hence, we also need to pass device in the 4th parameter. ++ */ ++ apphint_dump_state(device, DEBUG_REQUEST_VERBOSITY_HIGH, ++ apphint_pdump_values, device); ++#endif ++ apphint_dump_state(device, DEBUG_REQUEST_VERBOSITY_HIGH, ++ NULL, NULL); ++} ++ ++ ++int pvr_apphint_get_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 *pVal) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if (ue < APPHINT_ID_MAX) { ++ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints ++ { ++ *pVal = apphint.val[ue + device_offset].stored.UINT64; ++ error = 0; ++ } ++ else ++ { ++ *pVal = apphint.val[ue].stored.UINT64; ++ error = 0; ++ } ++ } ++ return error; ++} ++ ++int pvr_apphint_get_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 *pVal) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if (ue < APPHINT_ID_MAX) { ++ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints ++ { ++ *pVal = apphint.val[ue + device_offset].stored.UINT32; ++ error = 0; ++ } ++ else ++ { ++ *pVal = apphint.val[ue].stored.UINT32; ++ error = 0; ++ } ++ } ++ return error; ++} ++ ++int pvr_apphint_get_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL *pVal) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if (ue < APPHINT_ID_MAX) { ++ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints ++ { ++ *pVal = apphint.val[ue + device_offset].stored.BOOL; ++ error = 0; ++ } ++ else ++ { ++ *pVal = apphint.val[ue].stored.BOOL; ++ error = 0; ++ } ++ } ++ return error; ++} ++ ++int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) { ++ if ((int)ue > APPHINT_DEBUGINFO_DEVICE_ID_OFFSET) // From this point, we're in the device apphints ++ { ++ if (OSStringLCopy(pBuffer, apphint.val[ue + device_offset].stored.STRING, size) < size) { ++ error = 0; ++ } ++ } ++ else ++ { ++ if (OSStringLCopy(pBuffer, apphint.val[ue].stored.STRING, size) < size) { ++ error = 0; ++ } ++ } ++ } ++ return error; ++} ++ ++int pvr_apphint_set_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 Val) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if ((ue < APPHINT_ID_MAX) && ++ (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT64)) { ++ ++ if (apphint.val[ue + device_offset].set.UINT64) { ++ apphint.val[ue + device_offset].set.UINT64(apphint.val[ue + device_offset].device, ++ apphint.val[ue + device_offset].private_data, ++ Val); ++ } else { ++ apphint.val[ue + device_offset].stored.UINT64 = Val; ++ } ++ apphint.val[ue].device = device; ++ error = 0; ++ } ++ ++ return error; ++} ++ ++int pvr_apphint_set_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 Val) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if ((ue < APPHINT_ID_MAX) && ++ (param_lookup[ue].data_type == APPHINT_DATA_TYPE_UINT32)) { ++ ++ if (apphint.val[ue + device_offset].set.UINT32) { ++ apphint.val[ue + device_offset].set.UINT32(apphint.val[ue + device_offset].device, ++ apphint.val[ue + device_offset].private_data, ++ Val); ++ } else { ++ apphint.val[ue + device_offset].stored.UINT32 = Val; ++ } ++ apphint.val[ue].device = device; ++ error = 0; ++ } ++ ++ return error; ++} ++ ++int pvr_apphint_set_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL Val) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if ((ue < APPHINT_ID_MAX) && ++ (param_lookup[ue].data_type == APPHINT_DATA_TYPE_BOOL)) { ++ ++ error = 0; ++ if (apphint.val[ue + device_offset].set.BOOL) { ++ apphint.val[ue + device_offset].set.BOOL(apphint.val[ue + device_offset].device, ++ apphint.val[ue + device_offset].private_data, ++ Val); ++ } else { ++ apphint.val[ue + device_offset].stored.BOOL = Val; ++ } ++ apphint.val[ue].device = device; ++ } ++ ++ return error; ++} ++ ++int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size) ++{ ++ int error = -ERANGE; ++ int device_offset = (device != NULL) ? device->sDevId.ui32InternalID * APPHINT_DEBUGINFO_DEVICE_ID_MAX : 0; ++ ++ if ((ue < APPHINT_ID_MAX) && ++ ((param_lookup[ue].data_type == APPHINT_DATA_TYPE_STRING) && ++ apphint.val[ue + device_offset].stored.STRING)) { ++ ++ if (apphint.val[ue + device_offset].set.STRING) { ++ error = apphint.val[ue + device_offset].set.STRING(apphint.val[ue + device_offset].device, ++ apphint.val[ue + device_offset].private_data, ++ pBuffer); ++ } else { ++ if (strlcpy(apphint.val[ue + device_offset].stored.STRING, pBuffer, size) < size) { ++ error = 0; ++ } ++ } ++ apphint.val[ue].device = device; ++ } ++ ++ return error; ++} ++ ++void pvr_apphint_register_handlers_uint64(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data) ++{ ++ int device_value_offset; ++ ++ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", ++ __func__, id, query, set, device, private_data)); ++ ++ if (id >= APPHINT_ID_MAX) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: AppHint ID (%d) is out of range, max (%d)", ++ __func__, id, APPHINT_ID_MAX-1)); ++ return; ++ } ++ ++ get_value_offset_from_device(device, &device_value_offset, id); ++ ++ switch (param_lookup[id].data_type) { ++ case APPHINT_DATA_TYPE_UINT64: ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Does not match AppHint data type for ID (%d)", ++ __func__, id)); ++ return; ++ } ++ ++ apphint.val[id + device_value_offset] = (struct apphint_action){ ++ .query.UINT64 = query, ++ .set.UINT64 = set, ++ .device = device, ++ .private_data = private_data, ++ .stored = apphint.val[id + device_value_offset].stored ++ }; ++} ++ ++void pvr_apphint_register_handlers_uint32(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data) ++{ ++ int device_value_offset; ++ ++ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", ++ __func__, id, query, set, device, private_data)); ++ ++ if (id >= APPHINT_ID_MAX) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: AppHint ID (%d) is out of range, max (%d)", ++ __func__, id, APPHINT_ID_MAX-1)); ++ return; ++ } ++ ++ get_value_offset_from_device(device, &device_value_offset, id); ++ ++ switch (param_lookup[id].data_type) { ++ case APPHINT_DATA_TYPE_UINT32: ++ case APPHINT_DATA_TYPE_UINT32Bitfield: ++ case APPHINT_DATA_TYPE_UINT32List: ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Does not match AppHint data type for ID (%d)", ++ __func__, id)); ++ return; ++ } ++ ++ apphint.val[id + device_value_offset] = (struct apphint_action){ ++ .query.UINT32 = query, ++ .set.UINT32 = set, ++ .device = device, ++ .private_data = private_data, ++ .stored = apphint.val[id + device_value_offset].stored ++ }; ++} ++ ++void pvr_apphint_register_handlers_bool(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data) ++{ ++ int device_value_offset; ++ ++ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", ++ __func__, id, query, set, device, private_data)); ++ ++ if (id >= APPHINT_ID_MAX) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: AppHint ID (%d) is out of range, max (%d)", ++ __func__, id, APPHINT_ID_MAX-1)); ++ return; ++ } ++ ++ get_value_offset_from_device(device, &device_value_offset, id); ++ ++ switch (param_lookup[id].data_type) { ++ case APPHINT_DATA_TYPE_BOOL: ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Does not match AppHint data type for ID (%d)", ++ __func__, id)); ++ return; ++ } ++ ++ apphint.val[id + device_value_offset] = (struct apphint_action){ ++ .query.BOOL = query, ++ .set.BOOL = set, ++ .device = device, ++ .private_data = private_data, ++ .stored = apphint.val[id + device_value_offset].stored ++ }; ++} ++ ++void pvr_apphint_register_handlers_string(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data) ++{ ++ int device_value_offset; ++ ++ PVR_DPF((APPHINT_DPF_LEVEL, "%s(%d, %p, %p, %p, %p)", ++ __func__, id, query, set, device, private_data)); ++ ++ if (id >= APPHINT_ID_MAX) { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: AppHint ID (%d) is out of range, max (%d)", ++ __func__, id, APPHINT_ID_MAX-1)); ++ return; ++ } ++ ++ get_value_offset_from_device(device, &device_value_offset, id); ++ ++ switch (param_lookup[id].data_type) { ++ case APPHINT_DATA_TYPE_STRING: ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Does not match AppHint data type for ID (%d)", ++ __func__, id)); ++ return; ++ } ++ ++ apphint.val[id + device_value_offset] = (struct apphint_action){ ++ .query.STRING = query, ++ .set.STRING = set, ++ .device = device, ++ .private_data = private_data, ++ .stored = apphint.val[id + device_value_offset].stored ++ }; ++} ++ ++/* EOF */ +diff --git a/drivers/gpu/drm/img-rogue/km_apphint.h b/drivers/gpu/drm/img-rogue/km_apphint.h +new file mode 100644 +index 000000000000..71e2ce94000a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km_apphint.h +@@ -0,0 +1,99 @@ ++/*************************************************************************/ /*! ++@File km_apphint.h ++@Title Apphint internal header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Linux kernel AppHint control ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef KM_APPHINT_H ++#define KM_APPHINT_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "pvrsrv_apphint.h" ++#include "km_apphint_defs.h" ++#include "device.h" ++ ++int pvr_apphint_init(void); ++void pvr_apphint_deinit(void); ++int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device); ++void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device); ++void pvr_apphint_dump_state(PVRSRV_DEVICE_NODE *device); ++ ++int pvr_apphint_get_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 *pVal); ++int pvr_apphint_get_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 *pVal); ++int pvr_apphint_get_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL *pVal); ++int pvr_apphint_get_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); ++ ++int pvr_apphint_set_uint64(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT64 Val); ++int pvr_apphint_set_uint32(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_UINT32 Val); ++int pvr_apphint_set_bool(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_BOOL Val); ++int pvr_apphint_set_string(PVRSRV_DEVICE_NODE *device, APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size); ++ ++void pvr_apphint_register_handlers_uint64(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void * private_data); ++void pvr_apphint_register_handlers_uint32(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data); ++void pvr_apphint_register_handlers_bool(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data); ++void pvr_apphint_register_handlers_string(APPHINT_ID id, ++ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value), ++ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value), ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data); ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* KM_APPHINT_H */ ++ ++/****************************************************************************** ++ End of file (km_apphint.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/km_apphint_defs.h b/drivers/gpu/drm/img-rogue/km_apphint_defs.h +new file mode 100644 +index 000000000000..16fc36b4e0a1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km_apphint_defs.h +@@ -0,0 +1,160 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services AppHint definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "km_apphint_defs_common.h" ++ ++#ifndef KM_APPHINT_DEFS_H ++#define KM_APPHINT_DEFS_H ++ ++/* NB: The 'DEVICE' AppHints must be last in this list as they will be ++ * duplicated in the case of a driver supporting multiple devices ++ */ ++#define APPHINT_LIST_ALL \ ++ APPHINT_LIST_BUILDVAR_COMMON \ ++ APPHINT_LIST_BUILDVAR \ ++ APPHINT_LIST_MODPARAM_COMMON \ ++ APPHINT_LIST_MODPARAM \ ++ APPHINT_LIST_DEBUGINFO_COMMON \ ++ APPHINT_LIST_DEBUGINFO \ ++ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \ ++ APPHINT_LIST_DEBUGINFO_DEVICE ++ ++ ++/* ++******************************************************************************* ++ Build variables (rogue-specific) ++ All of these should be configurable only through the 'default' value ++******************************************************************************/ ++#define APPHINT_LIST_BUILDVAR ++ ++/* ++******************************************************************************* ++ Module parameters (rogue-specific) ++******************************************************************************/ ++#define APPHINT_LIST_MODPARAM \ ++/* name, type, class, default, helper, */ \ ++X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NO_PARAM_TABLE ) \ ++\ ++X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE ) \ ++X(ValidateSOCUSCTimer, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATESOCUSCTIMERS, NO_PARAM_TABLE ) \ ++X(ECCRAMErrInj, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ ++\ ++X(TFBCCompressionControlGroup, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLGROUP, NO_PARAM_TABLE ) \ ++X(TFBCCompressionControlScheme, UINT32, VALIDATION, PVRSRV_APPHINT_TFBCCOMPRESSIONCONTROLSCHEME, NO_PARAM_TABLE ) \ ++X(TFBCCompressionControlYUVFormat, BOOL, VALIDATION, 0, NO_PARAM_TABLE ) \ ++ ++/* ++******************************************************************************* ++ Debugfs parameters (rogue-specific) - driver configuration ++******************************************************************************/ ++#define APPHINT_LIST_DEBUGINFO \ ++/* name, type, class, default, helper, */ \ ++ ++/* ++******************************************************************************* ++ Debugfs parameters (rogue-specific) - device configuration ++******************************************************************************/ ++#define APPHINT_LIST_DEBUGINFO_DEVICE \ ++/* name, type, class, default, helper, */ \ ++ ++/* ++******************************************************************************* ++ Mapping between debugfs parameters and module parameters. ++ This mapping is used to initialise device specific apphints from module ++ parameters. Each entry in this table will provide a default value to all ++ devices (i.e. if there is more than one device each device's value will ++ be initialised). ++******************************************************************************/ ++#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT \ ++/* debuginfo device apphint name modparam name */ ++ ++/* ++******************************************************************************* ++ ++ Table generated enums ++ ++******************************************************************************/ ++/* Unique ID for all AppHints */ ++typedef enum { ++#define X(a, b, c, d, e) APPHINT_ID_ ## a, ++ APPHINT_LIST_ALL ++#undef X ++ APPHINT_ID_MAX ++} APPHINT_ID; ++ ++/* ID for build variable Apphints - used for build variable only structures */ ++typedef enum { ++#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a, ++ APPHINT_LIST_BUILDVAR_COMMON ++ APPHINT_LIST_BUILDVAR ++#undef X ++ APPHINT_BUILDVAR_ID_MAX ++} APPHINT_BUILDVAR_ID; ++ ++/* ID for Modparam Apphints - used for modparam only structures */ ++typedef enum { ++#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a, ++ APPHINT_LIST_MODPARAM_COMMON ++ APPHINT_LIST_MODPARAM ++#undef X ++ APPHINT_MODPARAM_ID_MAX ++} APPHINT_MODPARAM_ID; ++ ++/* ID for Debugfs Apphints - used for debugfs only structures */ ++typedef enum { ++#define X(a, b, c, d, e) APPHINT_DEBUGINFO_ID_ ## a, ++ APPHINT_LIST_DEBUGINFO_COMMON ++ APPHINT_LIST_DEBUGINFO ++#undef X ++ APPHINT_DEBUGINFO_ID_MAX ++} APPHINT_DEBUGINFO_ID; ++ ++/* ID for Debugfs Device Apphints - used for debugfs device only structures */ ++typedef enum { ++#define X(a, b, c, d, e) APPHINT_DEBUGINFO_DEVICE_ID_ ## a, ++ APPHINT_LIST_DEBUGINFO_DEVICE_COMMON ++ APPHINT_LIST_DEBUGINFO_DEVICE ++#undef X ++ APPHINT_DEBUGINFO_DEVICE_ID_MAX ++} APPHINT_DEBUGINFO_DEVICE_ID; ++ ++#endif /* KM_APPHINT_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/km_apphint_defs_common.h b/drivers/gpu/drm/img-rogue/km_apphint_defs_common.h +new file mode 100644 +index 000000000000..987d37c10c42 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/km_apphint_defs_common.h +@@ -0,0 +1,280 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services AppHint definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++ ++#ifndef KM_APPHINT_DEFS_COMMON_H ++#define KM_APPHINT_DEFS_COMMON_H ++ ++/* ++******************************************************************************* ++ Build variables ++ All of these should be configurable only through the 'default' value ++******************************************************************************/ ++#define APPHINT_LIST_BUILDVAR_COMMON \ ++/* name, type, class, default, helper, */ \ ++X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE ) \ ++X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE ) \ ++X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE ) \ ++X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE ) \ ++ ++/* ++******************************************************************************* ++ Module parameters ++******************************************************************************/ ++#define APPHINT_LIST_MODPARAM_COMMON \ ++/* name, type, class, default, helper, */ \ ++X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE, NO_PARAM_TABLE ) \ ++\ ++X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE ) \ ++X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE ) \ ++\ ++X(DisableClockGating, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE ) \ ++X(DisableDMOverlap, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE ) \ ++\ ++X(EnableRandomContextSwitch, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLERANDOMCONTEXTSWITCH, NO_PARAM_TABLE ) \ ++X(EnableSoftResetContextSwitch, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLESOFTRESETCNTEXTSWITCH, NO_PARAM_TABLE ) \ ++X(EnableFWContextSwitch, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE ) \ ++X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE ) \ ++\ ++X(EnableRDPowerIsland, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE ) \ ++\ ++X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE ) \ ++\ ++X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE ) \ ++\ ++X(HWPerfFWBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE ) \ ++X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE ) \ ++X(HWPerfHostThreadTimeoutInMS, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS, NO_PARAM_TABLE ) \ ++\ ++X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE ) \ ++X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE ) \ ++X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE ) \ ++X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE ) \ ++X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE ) \ ++X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE ) \ ++\ ++X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE ) \ ++X(ValidateIrq, BOOL, VALIDATION, PVRSRV_APPHINT_VALIDATEIRQ, NO_PARAM_TABLE ) \ ++\ ++X(TPUTrilinearFracMaskPDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ ++X(TPUTrilinearFracMaskVDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ ++X(TPUTrilinearFracMaskCDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ ++X(TPUTrilinearFracMaskTDM, UINT32, VALIDATION, 0xF, NO_PARAM_TABLE ) \ ++X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE ) \ ++X(FWTraceBufSizeInDWords, UINT32, ALWAYS, PVRSRV_APPHINT_FWTRACEBUFSIZEINDWORDS, NO_PARAM_TABLE ) \ ++\ ++X(EnablePageFaultDebug, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG, NO_PARAM_TABLE ) \ ++X(EnableFullSyncTracking, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING, NO_PARAM_TABLE ) \ ++X(IgnoreHWReportedBVNC, BOOL, ALWAYS, PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC, NO_PARAM_TABLE ) \ ++\ ++X(PhysMemTestPasses, UINT32, ALWAYS, PVRSRV_APPHINT_PHYSMEMTESTPASSES, NO_PARAM_TABLE ) \ ++\ ++X(FBCDCVersionOverride, UINT32, VALIDATION, PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE, NO_PARAM_TABLE ) \ ++X(TestSLRInterval, UINT32, VALIDATION, PVRSRV_APPHINT_TESTSLRINTERVAL, NO_PARAM_TABLE ) \ ++X(EnablePollOnChecksumErrorStatus, UINT32, VALIDATION, 0, NO_PARAM_TABLE ) \ ++X(RiscvDmiTest, BOOL, VALIDATION, PVRSRV_APPHINT_RISCVDMITEST, NO_PARAM_TABLE ) \ ++X(DevMemFWHeapPolicy, UINT32, ALWAYS, PVRSRV_APPHINT_FIRMWARE_HEAP_POLICY, NO_PARAM_TABLE ) \ ++\ ++X(EnableAPMAll, UINT32, VALIDATION, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \ ++X(KernelCCBSizeLog2, UINT32, VALIDATION, PVRSRV_APPHINT_KCCB_SIZE_LOG2, NO_PARAM_TABLE ) ++ ++/* ++******************************************************************************* ++ Debugfs parameters - driver configuration ++******************************************************************************/ ++#define APPHINT_LIST_DEBUGINFO_COMMON \ ++/* name, type, class, default, helper, */ \ ++X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl ) \ ++X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl ) \ ++X(EnableFTraceGPU, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE ) \ ++X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE ) \ ++X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE ) \ ++X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE ) \ ++X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE ) \ ++X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE ) \ ++X(HWPerfClientFilter_OpenGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGL, NO_PARAM_TABLE ) \ ++X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE ) \ ++X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE ) \ ++ ++/* ++******************************************************************************* ++ Debugfs parameters - device configuration ++******************************************************************************/ ++#define APPHINT_LIST_DEBUGINFO_DEVICE_COMMON \ ++/* name, type, class, default, helper, */ \ ++/* Device Firmware config */\ ++X(AssertOnHWRTrigger, BOOL, ALWAYS, APPHNT_BLDVAR_ASSERTONHWRTRIGGER, NO_PARAM_TABLE ) \ ++X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE ) \ ++X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE ) \ ++X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl ) \ ++X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl ) \ ++X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE ) \ ++X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl ) \ ++X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE ) \ ++/* Device host config */ \ ++X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \ ++X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE ) \ ++X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE ) \ ++X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE ) \ ++X(EnableFWPoisonOnFree, BOOL, DEBUG, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE ) \ ++X(GPUUnitsPowerChange, BOOL, VALIDATION, PVRSRV_APPHINT_GPUUNITSPOWERCHANGE, NO_PARAM_TABLE ) \ ++X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE ) ++ ++/* ++******************************************************************************* ++ Mapping between debugfs parameters and module parameters. ++ This mapping is used to initialise device specific apphints from module ++ parameters. ++******************************************************************************/ ++#define APPHINT_LIST_DEBUIGINFO_DEVICE_X_MODPARAM_INIT_COMMON \ ++/* debuginfo device apphint name modparam name */ \ ++X(EnableAPM, EnableAPMAll) ++ ++/* ++******************************************************************************* ++ * Types used in the APPHINT_LIST_ lists must be defined here. ++ * New types require specific handling code to be added ++******************************************************************************/ ++#define APPHINT_DATA_TYPE_LIST \ ++X(BOOL) \ ++X(UINT64) \ ++X(UINT32) \ ++X(UINT32Bitfield) \ ++X(UINT32List) \ ++X(STRING) ++ ++#define APPHINT_CLASS_LIST \ ++X(ALWAYS) \ ++X(NEVER) \ ++X(DEBUG) \ ++X(PDUMP) \ ++X(VALIDATION) \ ++X(GPUVIRT_VAL) ++ ++/* ++******************************************************************************* ++ Visibility control for module parameters ++ These bind build variables to AppHint Visibility Groups. ++******************************************************************************/ ++#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE ++#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE ++#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c) ++#if defined(DEBUG) ++ #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE ++ #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c) ++#else ++ #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE ++ #define apphint_modparam_class_DEBUG(a, b, c) ++#endif ++#if defined(PDUMP) ++ #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE ++ #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c) ++#else ++ #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE ++ #define apphint_modparam_class_PDUMP(a, b, c) ++#endif ++#if defined(SUPPORT_VALIDATION) ++ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE ++ #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c) ++#else ++ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE ++ #define apphint_modparam_class_VALIDATION(a, b, c) ++#endif ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE ++ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c) ++#else ++ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE ++ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) ++#endif ++ ++/* ++******************************************************************************* ++ AppHint defaults based on other build parameters ++******************************************************************************/ ++#if defined(ASSERTONHWRTRIGGER_DEFAULT_ENABLED) ++ #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER 1 ++#else ++ #define APPHNT_BLDVAR_ASSERTONHWRTRIGGER 0 ++#endif ++#if defined(DEBUG) ++ #define APPHNT_BLDVAR_DEBUG 1 ++ #define APPHNT_BLDVAR_DBGDUMPLIMIT RGXFWIF_HWR_DEBUG_DUMP_ALL ++#else ++ #define APPHNT_BLDVAR_DEBUG 0 ++ #define APPHNT_BLDVAR_DBGDUMPLIMIT 1 ++#endif ++#if defined(PDUMP) ++#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_TRUE ++#else ++#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE ++#endif ++#if defined(DEBUG) || defined(SUPPORT_VALIDATION) ++#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_TRUE ++#else ++#define APPHNT_BLDVAR_ENABLEPAGEFAULTDEBUG IMG_FALSE ++#endif ++ ++#if defined(DEBUG) ++ #define APPHNT_PHYSMEMTEST_ENABLE 1 ++#else ++ #define APPHNT_PHYSMEMTEST_ENABLE 0 ++#endif ++ ++/* Data types and actions */ ++typedef enum { ++ APPHINT_DATA_TYPE_INVALID = 0, ++#define X(a) APPHINT_DATA_TYPE_ ## a, ++ APPHINT_DATA_TYPE_LIST ++#undef X ++ APPHINT_DATA_TYPE_MAX ++} APPHINT_DATA_TYPE; ++ ++typedef enum { ++#define X(a) APPHINT_CLASS_ ## a, ++ APPHINT_CLASS_LIST ++#undef X ++ APPHINT_CLASS_MAX ++} APPHINT_CLASS; ++ ++#endif /* KM_APPHINT_DEFS_COMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/linkage.h b/drivers/gpu/drm/img-rogue/linkage.h +new file mode 100644 +index 000000000000..3f24dc68eff6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/linkage.h +@@ -0,0 +1,52 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linux specific Services code internal interfaces ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Interfaces between various parts of the Linux specific ++ Services code, that don't have any other obvious ++ header file to go into. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(LINKAGE_H) ++#define LINKAGE_H ++ ++PVRSRV_ERROR PVROSFuncInit(void); ++void PVROSFuncDeInit(void); ++ ++#endif /* !defined(LINKAGE_H) */ +diff --git a/drivers/gpu/drm/img-rogue/linux_sw_sync.h b/drivers/gpu/drm/img-rogue/linux_sw_sync.h +new file mode 100644 +index 000000000000..c12c650294a2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/linux_sw_sync.h +@@ -0,0 +1,52 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef _UAPI_LINUX_PVR_SW_SYNC_H ++#define _UAPI_LINUX_PVR_SW_SYNC_H ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ ++#include ++ ++#include "pvrsrv_sync_km.h" ++#include "pvr_drm.h" ++ ++#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/lists.c b/drivers/gpu/drm/img-rogue/lists.c +new file mode 100644 +index 000000000000..e8e7088a3296 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/lists.c +@@ -0,0 +1,60 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linked list shared functions implementation. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implementation of the list iterators for types shared among ++ more than one file in the services code. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "lists.h" ++ ++/*=================================================================== ++ LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just ++ once are implemented locally). ++ ===================================================================*/ ++ ++IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE) ++IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE) ++IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) ++IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE) ++IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK) ++IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE) ++IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE) ++IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE) ++IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE) +diff --git a/drivers/gpu/drm/img-rogue/lists.h b/drivers/gpu/drm/img-rogue/lists.h +new file mode 100644 +index 000000000000..2e2c29a0b7b0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/lists.h +@@ -0,0 +1,367 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linked list shared functions templates. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Definition of the linked list function templates. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef LISTS_UTILS_H ++#define LISTS_UTILS_H ++ ++/* instruct QAC to ignore warnings about the following custom formatted macros */ ++/* PRQA S 0881,3410 ++ */ ++ ++#if defined(__linux__) ++ #include ++ ++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++ #include ++ #else ++ #include ++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ ++#else ++ #include ++#endif /* __linux__ */ ++ ++#include "img_types.h" ++#include "device.h" ++#include "power.h" ++ ++/* ++ - USAGE - ++ ++ The list functions work with any structure that provides the fields psNext and ++ ppsThis. In order to make a function available for a given type, it is required ++ to use the function template macro that creates the actual code. ++ ++ There are 5 main types of functions: ++ - INSERT : given a pointer to the head pointer of the list and a pointer ++ to the node, inserts it as the new head. ++ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer ++ to the node, inserts the node at the tail of the list. ++ - REMOVE : given a pointer to a node, removes it from its list. ++ - FOR EACH : apply a function over all the elements of a list. ++ - ANY : apply a function over the elements of a list, until one of them ++ return a non null value, and then returns it. ++ ++ The two last functions can have a variable argument form, with allows to pass ++ additional parameters to the callback function. In order to do this, the ++ callback function must take two arguments, the first is the current node and ++ the second is a list of variable arguments (va_list). ++ ++ The ANY functions have also another for which specifies the return type of the ++ callback function and the default value returned by the callback function. ++ ++*/ ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_ForEach ++@Description Apply a callback function to all the elements of a list. ++@Input psHead The head of the list to be processed. ++@Input pfnCallBack The function to be applied to each element of the list. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_FOR_EACH(TYPE) \ ++void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) ++ ++#define IMPLEMENT_LIST_FOR_EACH(TYPE) \ ++void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ ++{\ ++ while (psHead)\ ++ {\ ++ pfnCallBack(psHead);\ ++ psHead = psHead->psNext;\ ++ }\ ++} ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_ForEachSafe ++@Description Apply a callback function to all the elements of a list. Do it ++ in a safe way that handles the fact that a node might remove ++ itself from the list during the iteration. ++@Input psHead The head of the list to be processed. ++@Input pfnCallBack The function to be applied to each element of the list. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \ ++void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode)) ++ ++#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \ ++void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\ ++{\ ++ TYPE *psNext;\ ++\ ++ while (psHead)\ ++ {\ ++ psNext = psHead->psNext; \ ++ pfnCallBack(psHead);\ ++ psHead = psNext;\ ++ }\ ++} ++ ++ ++#define DECLARE_LIST_FOR_EACH_VA(TYPE) \ ++void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) ++ ++#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \ ++void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \ ++{\ ++ va_list ap;\ ++ while (psHead)\ ++ {\ ++ va_start(ap, pfnCallBack);\ ++ pfnCallBack(psHead, ap);\ ++ psHead = psHead->psNext;\ ++ va_end(ap);\ ++ }\ ++} ++ ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_Any ++@Description Applies a callback function to the elements of a list until ++ the function returns a non null value, then returns it. ++@Input psHead The head of the list to be processed. ++@Input pfnCallBack The function to be applied to each element of the list. ++@Return The first non null value returned by the callback function. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_ANY(TYPE) \ ++void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode)) ++ ++#define IMPLEMENT_LIST_ANY(TYPE) \ ++void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\ ++{ \ ++ void *pResult;\ ++ TYPE *psNextNode;\ ++ pResult = NULL;\ ++ psNextNode = psHead;\ ++ while (psHead && !pResult)\ ++ {\ ++ psNextNode = psNextNode->psNext;\ ++ pResult = pfnCallBack(psHead);\ ++ psHead = psNextNode;\ ++ }\ ++ return pResult;\ ++} ++ ++ ++/*with variable arguments, that will be passed as a va_list to the callback function*/ ++ ++#define DECLARE_LIST_ANY_VA(TYPE) \ ++void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...) ++ ++#define IMPLEMENT_LIST_ANY_VA(TYPE) \ ++void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ ++{\ ++ va_list ap;\ ++ TYPE *psNextNode;\ ++ void* pResult = NULL;\ ++ while (psHead && !pResult)\ ++ {\ ++ psNextNode = psHead->psNext;\ ++ va_start(ap, pfnCallBack);\ ++ pResult = pfnCallBack(psHead, ap);\ ++ va_end(ap);\ ++ psHead = psNextNode;\ ++ }\ ++ return pResult;\ ++} ++ ++/*those ones are for extra type safety, so there's no need to use castings for the results*/ ++ ++#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ ++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode)) ++ ++#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \ ++RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\ ++{ \ ++ RTYPE result;\ ++ TYPE *psNextNode;\ ++ result = CONTINUE;\ ++ psNextNode = psHead;\ ++ while (psHead && result == CONTINUE)\ ++ {\ ++ psNextNode = psNextNode->psNext;\ ++ result = pfnCallBack(psHead);\ ++ psHead = psNextNode;\ ++ }\ ++ return result;\ ++} ++ ++ ++#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ ++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...) ++ ++#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \ ++RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\ ++{\ ++ va_list ap;\ ++ TYPE *psNextNode;\ ++ RTYPE result = CONTINUE;\ ++ while (psHead && result == CONTINUE)\ ++ {\ ++ psNextNode = psHead->psNext;\ ++ va_start(ap, pfnCallBack);\ ++ result = pfnCallBack(psHead, ap);\ ++ va_end(ap);\ ++ psHead = psNextNode;\ ++ }\ ++ return result;\ ++} ++ ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_Remove ++@Description Removes a given node from the list. ++@Input psNode The pointer to the node to be removed. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_REMOVE(TYPE) \ ++void List_##TYPE##_Remove(TYPE *psNode) ++ ++#define IMPLEMENT_LIST_REMOVE(TYPE) \ ++void List_##TYPE##_Remove(TYPE *psNode)\ ++{\ ++ (*psNode->ppsThis)=psNode->psNext;\ ++ if (psNode->psNext)\ ++ {\ ++ psNode->psNext->ppsThis = psNode->ppsThis;\ ++ }\ ++} ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_Insert ++@Description Inserts a given node at the beginning of the list. ++@Input psHead The pointer to the pointer to the head node. ++@Input psNode The pointer to the node to be inserted. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_INSERT(TYPE) \ ++void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode) ++ ++#define IMPLEMENT_LIST_INSERT(TYPE) \ ++void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\ ++{\ ++ psNewNode->ppsThis = ppsHead;\ ++ psNewNode->psNext = *ppsHead;\ ++ *ppsHead = psNewNode;\ ++ if (psNewNode->psNext)\ ++ {\ ++ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\ ++ }\ ++} ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_InsertTail ++@Description Inserts a given node at the end of the list. ++@Input psHead The pointer to the pointer to the head node. ++@Input psNode The pointer to the node to be inserted. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_INSERT_TAIL(TYPE) \ ++void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode) ++ ++#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \ ++void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\ ++{\ ++ TYPE *psTempNode = *ppsHead;\ ++ if (psTempNode != NULL)\ ++ {\ ++ while (psTempNode->psNext)\ ++ psTempNode = psTempNode->psNext;\ ++ ppsHead = &psTempNode->psNext;\ ++ }\ ++ psNewNode->ppsThis = ppsHead;\ ++ psNewNode->psNext = NULL;\ ++ *ppsHead = psNewNode;\ ++} ++ ++/*************************************************************************/ /*! ++@Function List_##TYPE##_Reverse ++@Description Reverse a list in place ++@Input ppsHead The pointer to the pointer to the head node. ++*/ /**************************************************************************/ ++#define DECLARE_LIST_REVERSE(TYPE) \ ++void List_##TYPE##_Reverse(TYPE **ppsHead) ++ ++#define IMPLEMENT_LIST_REVERSE(TYPE) \ ++void List_##TYPE##_Reverse(TYPE **ppsHead)\ ++{\ ++ TYPE *psTmpNode1; \ ++ TYPE *psTmpNode2; \ ++ TYPE *psCurNode; \ ++ psTmpNode1 = NULL; \ ++ psCurNode = *ppsHead; \ ++ while (psCurNode) { \ ++ psTmpNode2 = psCurNode->psNext; \ ++ psCurNode->psNext = psTmpNode1; \ ++ psTmpNode1 = psCurNode; \ ++ psCurNode = psTmpNode2; \ ++ if (psCurNode) \ ++ { \ ++ psTmpNode1->ppsThis = &(psCurNode->psNext); \ ++ } \ ++ else \ ++ { \ ++ psTmpNode1->ppsThis = ppsHead; \ ++ } \ ++ } \ ++ *ppsHead = psTmpNode1; \ ++} ++ ++#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL) ++ ++ ++DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE); ++DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE); ++DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); ++DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE); ++DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK); ++DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE); ++DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE); ++DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE); ++DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE); ++ ++#undef DECLARE_LIST_ANY_2 ++#undef DECLARE_LIST_ANY_VA ++#undef DECLARE_LIST_ANY_VA_2 ++#undef DECLARE_LIST_FOR_EACH ++#undef DECLARE_LIST_FOR_EACH_VA ++#undef DECLARE_LIST_INSERT ++#undef DECLARE_LIST_REMOVE ++ ++#endif ++ ++/* re-enable warnings */ ++/* PRQA S 0881,3410 -- */ +diff --git a/drivers/gpu/drm/img-rogue/lock.h b/drivers/gpu/drm/img-rogue/lock.h +new file mode 100644 +index 000000000000..3ef78215f624 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/lock.h +@@ -0,0 +1,431 @@ ++/*************************************************************************/ /*! ++@File lock.h ++@Title Locking interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Services internal locking interface ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef LOCK_H ++#define LOCK_H ++ ++/* In Linux kernel mode we are using the kernel mutex implementation directly ++ * with macros. This allows us to use the kernel lockdep feature for lock ++ * debugging. */ ++#include "lock_types.h" ++ ++#if defined(__linux__) && defined(__KERNEL__) ++ ++#include "allocmem.h" ++#include ++ ++#define OSLockCreateNoStats(phLock) ({ \ ++ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \ ++ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ ++ e;}) ++#define OSLockCreate(phLock) ({ \ ++ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ *(phLock) = OSAllocMem(sizeof(struct mutex)); \ ++ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \ ++ e;}) ++#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock));}) ++#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock));}) ++ ++#define OSLockAcquire(hLock) ({mutex_lock((hLock));}) ++#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass));}) ++#define OSLockRelease(hLock) ({mutex_unlock((hLock));}) ++ ++#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE) ++#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE) ++ ++#define OSSpinLockCreate(_ppsLock) ({ \ ++ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \ ++ if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \ ++ e;}) ++#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);}) ++ ++typedef unsigned long OS_SPINLOCK_FLAGS; ++#define OSSpinLockAcquire(_pLock, _flags) spin_lock_irqsave(_pLock, _flags) ++#define OSSpinLockRelease(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags) ++ ++/* These _may_ be reordered or optimized away entirely by the compiler/hw */ ++#define OSAtomicRead(pCounter) atomic_read(pCounter) ++#define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i) ++ ++/* The following atomic operations, in addition to being SMP-safe, also ++ imply a memory barrier around the operation */ ++#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter) ++#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter) ++#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv) ++#define OSAtomicExchange(pCounter, iNewVal) atomic_xchg(pCounter, iNewVal) ++ ++static inline IMG_INT OSAtomicOr(ATOMIC_T *pCounter, IMG_INT iVal) ++{ ++ IMG_INT iOldVal, iLastVal, iNewVal; ++ ++ iLastVal = OSAtomicRead(pCounter); ++ do ++ { ++ iOldVal = iLastVal; ++ iNewVal = iOldVal | iVal; ++ ++ iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); ++ } ++ while (iOldVal != iLastVal); ++ ++ return iNewVal; ++} ++ ++#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter) ++#define OSAtomicAddUnless(pCounter, incr, test) atomic_add_unless(pCounter, (incr), (test)) ++ ++#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter) ++#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), (test)) ++ ++#else /* defined(__linux__) && defined(__KERNEL__) */ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++/**************************************************************************/ /*! ++@Function OSLockCreate ++@Description Creates an operating system lock object. ++@Output phLock The created lock. ++@Return PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver ++ cannot allocate CPU memory needed for the lock. ++ PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to ++ allocate the lock. ++ */ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock); ++#if defined(INTEGRITY_OS) ++#define OSLockCreateNoStats OSLockCreate ++#endif ++ ++/**************************************************************************/ /*! ++@Function OSLockDestroy ++@Description Destroys an operating system lock object. ++@Input hLock The lock to be destroyed. ++@Return None. ++ */ /**************************************************************************/ ++IMG_INTERNAL ++void OSLockDestroy(POS_LOCK hLock); ++ ++#if defined(INTEGRITY_OS) ++#define OSLockDestroyNoStats OSLockDestroy ++#endif ++/**************************************************************************/ /*! ++@Function OSLockAcquire ++@Description Acquires an operating system lock. ++ NB. This function must not return until the lock is acquired ++ (meaning the implementation should not timeout or return with ++ an error, as the caller will assume they have the lock). ++@Input hLock The lock to be acquired. ++@Return None. ++ */ /**************************************************************************/ ++IMG_INTERNAL ++void OSLockAcquire(POS_LOCK hLock); ++ ++/**************************************************************************/ /*! ++@Function OSTryLockAcquire ++@Description Try to acquire an operating system lock. ++ NB. If lock is acquired successfully in the first attempt, ++ then the function returns true and else it will return false. ++@Input hLock The lock to be acquired. ++@Return IMG_TRUE if lock acquired successfully, ++ IMG_FALSE otherwise. ++ */ /**************************************************************************/ ++IMG_INTERNAL ++IMG_BOOL OSTryLockAcquire(POS_LOCK hLock); ++ ++/* Nested notation isn't used in UM or other OS's */ ++/**************************************************************************/ /*! ++@Function OSLockAcquireNested ++@Description For operating systems other than Linux, this equates to an ++ OSLockAcquire() call. On Linux, this function wraps a call ++ to mutex_lock_nested(). This recognises the scenario where ++ there may be multiple subclasses within a particular class ++ of lock. In such cases, the order in which the locks belonging ++ these various subclasses are acquired is important and must be ++ validated. ++@Input hLock The lock to be acquired. ++@Input subclass The subclass of the lock. ++@Return None. ++ */ /**************************************************************************/ ++#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock)) ++ ++/**************************************************************************/ /*! ++@Function OSLockRelease ++@Description Releases an operating system lock. ++@Input hLock The lock to be released. ++@Return None. ++ */ /**************************************************************************/ ++IMG_INTERNAL ++void OSLockRelease(POS_LOCK hLock); ++ ++/**************************************************************************/ /*! ++@Function OSLockIsLocked ++@Description Tests whether or not an operating system lock is currently ++ locked. ++@Input hLock The lock to be tested. ++@Return IMG_TRUE if locked, IMG_FALSE if not locked. ++ */ /**************************************************************************/ ++IMG_INTERNAL ++IMG_BOOL OSLockIsLocked(POS_LOCK hLock); ++ ++#if defined(__linux__) ++ ++/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */ ++#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter) ++#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i) ++#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1) ++#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1) ++#define OSAtomicCompareExchange(pCounter, oldv, newv) \ ++ __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv) ++#define OSAtomicOr(pCounter, iVal) __sync_or_and_fetch((&(pCounter)->counter), iVal) ++ ++static inline IMG_UINT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_UINT32 iNewVal) ++{ ++ IMG_UINT32 iOldVal; ++ IMG_UINT32 iLastVal; ++ ++ iLastVal = OSAtomicRead(pCounter); ++ do ++ { ++ iOldVal = iLastVal; ++ iLastVal = OSAtomicCompareExchange(pCounter, iOldVal, iNewVal); ++ } ++ while (iOldVal != iLastVal); ++ ++ return iOldVal; ++} ++ ++#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr) ++#define OSAtomicAddUnless(pCounter, incr, test) ({ \ ++ IMG_INT32 c; IMG_INT32 old; \ ++ c = OSAtomicRead(pCounter); \ ++ while (1) { \ ++ if (c == (test)) break; \ ++ old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \ ++ if (old == c) break; \ ++ c = old; \ ++ } c; }) ++ ++#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr)) ++#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test) ++ ++#else ++ ++/*************************************************************************/ /*! ++@Function OSAtomicRead ++@Description Read the value of a variable atomically. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to read ++@Return The value of the atomic variable ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicWrite ++@Description Write the value of a variable atomically. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to be written to ++@Input v The value to write ++@Return None ++*/ /**************************************************************************/ ++IMG_INTERNAL ++void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v); ++ ++/* For the following atomic operations, in addition to being SMP-safe, ++ should also have a memory barrier around each operation */ ++/*************************************************************************/ /*! ++@Function OSAtomicIncrement ++@Description Increment the value of a variable atomically. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to be incremented ++@Return The new value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicDecrement ++@Description Decrement the value of a variable atomically. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to be decremented ++@Return The new value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicAdd ++@Description Add a specified value to a variable atomically. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to add the value to ++@Input v The value to be added ++@Return The new value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicAddUnless ++@Description Add a specified value to a variable atomically unless it ++ already equals a particular value. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to add the value to ++@Input v The value to be added to 'pCounter' ++@Input t The test value. If 'pCounter' equals this, ++ its value will not be adjusted ++@Return The old value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicSubtract ++@Description Subtract a specified value to a variable atomically. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to subtract the value from ++@Input v The value to be subtracted ++@Return The new value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicSubtractUnless ++@Description Subtract a specified value from a variable atomically unless ++ it already equals a particular value. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to subtract the value from ++@Input v The value to be subtracted from 'pCounter' ++@Input t The test value. If 'pCounter' equals this, ++ its value will not be adjusted ++@Return The old value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicCompareExchange ++@Description Set a variable to a given value only if it is currently ++ equal to a specified value. The whole operation must be atomic. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to be checked and ++ possibly updated ++@Input oldv The value the atomic variable must have in ++ order to be modified ++@Input newv The value to write to the atomic variable if ++ it equals 'oldv' ++@Return The old value of *pCounter ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicExchange ++@Description Set a variable to a given value and retrieve previous value. ++ The whole operation must be atomic. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to be updated ++@Input iNewVal The value to write to the atomic variable ++@Return The previous value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicExchange(ATOMIC_T *pCounter, IMG_INT32 iNewVal); ++ ++/*************************************************************************/ /*! ++@Function OSAtomicOr ++@Description Set a variable to the bitwise or of its current value and the ++ specified value. Equivalent to *pCounter |= iVal. ++ The whole operation must be atomic. ++ Atomic functions must be implemented in a manner that ++ is both symmetric multiprocessor (SMP) safe and has a memory ++ barrier around each operation. ++@Input pCounter The atomic variable to be updated ++@Input iVal The value to bitwise or against ++@Return The new value of *pCounter. ++*/ /**************************************************************************/ ++IMG_INTERNAL ++IMG_INT32 OSAtomicOr(ATOMIC_T *pCounter, IMG_INT32 iVal); ++ ++/* For now, spin-locks are required on Linux only, so other platforms fake ++ * spinlocks with normal mutex locks */ ++/*! Type definitions for OS_SPINLOCK accessor and creation / deletion */ ++typedef unsigned long OS_SPINLOCK_FLAGS; ++/*! Pointer to an OS Spinlock */ ++#define POS_SPINLOCK POS_LOCK ++/*! Wrapper for OSLockCreate() */ ++#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock) ++/*! Wrapper for OSLockDestroy() */ ++#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock) ++/*! Wrapper for OSLockAcquire() */ ++#define OSSpinLockAcquire(pLock, flags) {flags = 0; OSLockAcquire(pLock);} ++/*! Wrapper for OSLockRelease() */ ++#define OSSpinLockRelease(pLock, flags) {flags = 0; OSLockRelease(pLock);} ++ ++#endif /* defined(__linux__) */ ++#endif /* defined(__linux__) && defined(__KERNEL__) */ ++ ++#endif /* LOCK_H */ +diff --git a/drivers/gpu/drm/img-rogue/lock_types.h b/drivers/gpu/drm/img-rogue/lock_types.h +new file mode 100644 +index 000000000000..370ffc025d05 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/lock_types.h +@@ -0,0 +1,92 @@ ++/*************************************************************************/ /*! ++@File lock_types.h ++@Title Locking types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Locking specific enums, defines and structures ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef LOCK_TYPES_H ++#define LOCK_TYPES_H ++ ++/* In Linux kernel mode we are using the kernel mutex implementation directly ++ * with macros. This allows us to use the kernel lockdep feature for lock ++ * debugging. */ ++#if defined(__linux__) && defined(__KERNEL__) ++ ++#include ++#include ++/* The mutex is defined as a pointer to be compatible with the other code. This ++ * isn't ideal and usually you wouldn't do that in kernel code. */ ++typedef struct mutex *POS_LOCK; ++typedef struct rw_semaphore *POSWR_LOCK; ++typedef spinlock_t *POS_SPINLOCK; ++typedef atomic_t ATOMIC_T; ++ ++#else /* defined(__linux__) && defined(__KERNEL__) */ ++#include "img_types.h" /* needed for IMG_INT */ ++typedef struct OS_LOCK_TAG *POS_LOCK; ++ ++#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) ++typedef struct OSWR_LOCK_TAG *POSWR_LOCK; ++#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ ++typedef struct OSWR_LOCK_TAG { ++ IMG_UINT32 ui32Dummy; ++} *POSWR_LOCK; ++#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ ++ ++#if defined(__linux__) ++ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; ++#elif defined(__QNXNTO__) ++ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; ++#elif defined(_WIN32) ++ /* ++ * Dummy definition. WDDM doesn't use Services, but some headers ++ * still have to be shared. This is one such case. ++ */ ++ typedef struct OS_ATOMIC_TAG {IMG_INT32 counter;} ATOMIC_T; ++#elif defined(INTEGRITY_OS) ++ /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */ ++ typedef struct OS_ATOMIC_TAG {IMG_INT64 counter;} ATOMIC_T; ++#else ++ #error "Please type-define an atomic lock for this environment" ++#endif ++ ++#endif /* defined(__linux__) && defined(__KERNEL__) */ ++ ++#endif /* LOCK_TYPES_H */ +diff --git a/drivers/gpu/drm/img-rogue/log2.h b/drivers/gpu/drm/img-rogue/log2.h +new file mode 100644 +index 000000000000..2182a0223ca6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/log2.h +@@ -0,0 +1,417 @@ ++/*************************************************************************/ /*! ++@Title Integer log2 and related functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef LOG2_H ++#define LOG2_H ++ ++#include "img_defs.h" ++ ++/*************************************************************************/ /*! ++@Description Determine if a number is a power of two. ++@Input n ++@Return True if n is a power of 2, false otherwise. True if n == 0. ++*/ /**************************************************************************/ ++static INLINE IMG_BOOL __const_function IsPower2(uint32_t n) ++{ ++ /* C++ needs this cast. */ ++ return (IMG_BOOL)((n & (n - 1U)) == 0U); ++} ++ ++/*************************************************************************/ /*! ++@Description Determine if a number is a power of two. ++@Input n ++@Return True if n is a power of 2, false otherwise. True if n == 0. ++*/ /**************************************************************************/ ++static INLINE IMG_BOOL __const_function IsPower2_64(uint64_t n) ++{ ++ /* C++ needs this cast. */ ++ return (IMG_BOOL)((n & (n - 1U)) == 0U); ++} ++ ++/* Code using GNU GCC intrinsics */ ++#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) ++ ++/* CHAR_BIT is typically found in . For all the platforms where ++ * CHAR_BIT is not available, defined it here with the assumption that there ++ * are 8 bits in a byte */ ++#ifndef CHAR_BIT ++#define CHAR_BIT 8U ++#endif ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2(uint32_t n) ++{ ++ if (unlikely(n == 0U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ return uNumBits - (uint32_t)__builtin_clz(n) - 1U; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) ++{ ++ if (unlikely(n == 0U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ return uNumBits - (uint32_t)__builtin_clzll(n) - 1U; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2(uint32_t n) ++{ ++ if (unlikely(n == 0U || n == 1U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ ++ n--; /* Handle powers of 2 */ ++ return uNumBits - (uint32_t)__builtin_clz(n); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) ++{ ++ if (unlikely(n == 0U || n == 1U)) ++ { ++ return 0; ++ } ++ else ++ { ++ uint32_t uNumBits = (uint32_t)CHAR_BIT * (uint32_t)sizeof(n); ++ ++ n--; /* Handle powers of 2 */ ++ return uNumBits - (uint32_t)__builtin_clzll(n); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2(uint32_t n) ++{ ++ return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clz(n) - 1U; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) ++{ ++ return (uint32_t)CHAR_BIT * (uint32_t)sizeof(n) - (uint32_t)__builtin_clzll(n) - 1U; ++} ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) ++{ ++ /* Cases with n greater than 2^31 needs separate handling ++ * as result of (1<<32) is undefined. */ ++ if (unlikely( n == 0U || n > (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) ++ { ++ return 0; ++ } ++ ++ /* Return n if it is already a power of 2 */ ++ if ((IMG_BOOL)((n & (n - 1U)) == 0U)) ++ { ++ return n; ++ } ++ ++ return (uint32_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - (uint32_t)__builtin_clz(n)); ++} ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) ++{ ++ /* Cases with n greater than 2^63 needs separate handling ++ * as result of (1<<64) is undefined. */ ++ if (unlikely( n == 0U || n > (uint64_t)1 << ((uint32_t)CHAR_BIT * sizeof(n) - 1U))) ++ { ++ return 0; ++ } ++ ++ /* Return n if it is already a power of 2 */ ++ if ((IMG_BOOL)((n & (n - 1U)) == 0U)) ++ { ++ return n; ++ } ++ ++ return (uint64_t)1 << ((uint64_t)CHAR_BIT * sizeof(n) - (uint64_t)__builtin_clzll(n)); ++} ++ ++#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function RoundUpToNextPowerOfTwo(uint32_t n) ++{ ++ n--; ++ n |= n >> 1; /* handle 2 bit numbers */ ++ n |= n >> 2; /* handle 4 bit numbers */ ++ n |= n >> 4; /* handle 8 bit numbers */ ++ n |= n >> 8; /* handle 16 bit numbers */ ++ n |= n >> 16; /* handle 32 bit numbers */ ++ n++; ++ ++ return n; ++} ++ ++/*************************************************************************/ /*! ++@Description Round a non-power-of-two number up to the next power of two. ++@Input n ++@Return n rounded up to the next power of two. If n is zero or ++ already a power of two, return n unmodified. ++*/ /**************************************************************************/ ++static INLINE uint64_t __const_function RoundUpToNextPowerOfTwo_64(uint64_t n) ++{ ++ n--; ++ n |= n >> 1; /* handle 2 bit numbers */ ++ n |= n >> 2; /* handle 4 bit numbers */ ++ n |= n >> 4; /* handle 8 bit numbers */ ++ n |= n >> 8; /* handle 16 bit numbers */ ++ n |= n >> 16; /* handle 32 bit numbers */ ++ n |= n >> 32; /* handle 64 bit numbers */ ++ n++; ++ ++ return n; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2(uint32_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ while ((n >>= 1) != 0U) ++ { ++ ui32log2++; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(n)) ++@Input n ++@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function FloorLog2_64(uint64_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ while ((n >>= 1) != 0U) ++ { ++ ui32log2++; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2(uint32_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ if (n == 0U) ++ { ++ return 0; ++ } ++ ++ n--; /* Handle powers of 2 */ ++ ++ while (n != 0U) ++ { ++ ui32log2++; ++ n >>= 1; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute ceil(log2(n)) ++@Input n ++@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0 ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function CeilLog2_64(uint64_t n) ++{ ++ uint32_t ui32log2 = 0; ++ ++ if (n == 0U) ++ { ++ return 0; ++ } ++ ++ n--; /* Handle powers of 2 */ ++ ++ while (n != 0U) ++ { ++ ui32log2++; ++ n >>= 1; ++ } ++ ++ return ui32log2; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2(uint32_t n) ++{ ++ static const uint32_t b[] = ++ {0xAAAAAAAAU, 0xCCCCCCCCU, 0xF0F0F0F0U, 0xFF00FF00U, 0xFFFF0000U}; ++ uint32_t r = (n & b[0]) != 0U; ++ ++ r |= (uint32_t) ((n & b[4]) != 0U) << 4; ++ r |= (uint32_t) ((n & b[3]) != 0U) << 3; ++ r |= (uint32_t) ((n & b[2]) != 0U) << 2; ++ r |= (uint32_t) ((n & b[1]) != 0U) << 1; ++ ++ return r; ++} ++ ++/*************************************************************************/ /*! ++@Description Compute log2(n) for exact powers of two only ++@Input n Must be a power of two ++@Return log2(n) ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function ExactLog2_64(uint64_t n) ++{ ++ static const uint64_t b[] = ++ {0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL, ++ 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL, ++ 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL}; ++ uint32_t r = (n & b[0]) != 0U; ++ ++ r |= (uint32_t) ((n & b[5]) != 0U) << 5; ++ r |= (uint32_t) ((n & b[4]) != 0U) << 4; ++ r |= (uint32_t) ((n & b[3]) != 0U) << 3; ++ r |= (uint32_t) ((n & b[2]) != 0U) << 2; ++ r |= (uint32_t) ((n & b[1]) != 0U) << 1; ++ ++ return r; ++} ++ ++#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */ ++ ++/*************************************************************************/ /*! ++@Description Compute floor(log2(size)) , where size is the max of 3 sizes ++ This is almost always the ONLY EVER valid use of FloorLog2. ++ Usually CeilLog2() should be used instead. ++ For a 5x5x1 texture, the 3 miplevels are: ++ 0: 5x5x1 ++ 1: 2x2x1 ++ 2: 1x1x1 ++ ++ For an 8x8x1 texture, the 4 miplevels are: ++ 0: 8x8x1 ++ 1: 4x4x1 ++ 2: 2x2x1 ++ 3: 1x1x1 ++ ++ ++@Input sizeX, sizeY, sizeZ ++@Return Count of mipmap levels for given dimensions ++*/ /**************************************************************************/ ++static INLINE uint32_t __const_function NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ) ++{ ++ ++ uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ); ++ return FloorLog2(maxSize) + 1U; ++} ++ ++#endif /* LOG2_H */ +diff --git a/drivers/gpu/drm/img-rogue/mem_utils.c b/drivers/gpu/drm/img-rogue/mem_utils.c +new file mode 100644 +index 000000000000..1244e246afb9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/mem_utils.c +@@ -0,0 +1,449 @@ ++/*************************************************************************/ /*! ++@File ++@Title Memory manipulation functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory related functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "osfunc_common.h" ++#include "img_defs.h" ++ ++/* This workaround is only *required* on ARM64. Avoid building or including ++ * it by default on other architectures, unless the 'safe memcpy' test flag ++ * is enabled. (The code should work on other architectures.) ++ */ ++ ++ ++ ++/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching ++ * by the compiler to stdlib functions, and it must only use the below ++ * headers. Do not include any IMG or services headers in this file. ++ */ ++#if defined(__KERNEL__) && defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++/* The attribute "vector_size" will generate floating point instructions ++ * and use FPU registers. In kernel OS, the FPU registers might be corrupted ++ * when CPU is doing context switch because FPU registers are not expected to ++ * be stored. ++ * GCC enables compiler option, -mgeneral-regs-only, by default. ++ * This option restricts the generated code to use general registers only ++ * so that we don't have issues on that. ++ */ ++#if defined(__KERNEL__) && defined(__clang__) ++ ++#define DEVICE_MEMSETCPY_NON_VECTOR_KM ++#if !defined(BITS_PER_BYTE) ++#define BITS_PER_BYTE (8) ++#endif /* BITS_PER_BYTE */ ++ ++/* Loading or storing 16 or 32 bytes is only supported on 64-bit machines. */ ++#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES > 8 ++typedef __uint128_t uint128_t; ++ ++typedef struct ++{ ++ uint128_t ui128DataFields[2]; ++} ++uint256_t; ++#endif ++ ++#endif ++ ++/* This file is only intended to be used on platforms which use GCC or Clang, ++ * due to its requirement on __attribute__((vector_size(n))), typeof() and ++ * __SIZEOF__ macros. ++ */ ++ ++#if defined(__GNUC__) ++ ++#ifndef MIN ++#define MIN(a, b) \ ++ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;}) ++#endif ++ ++#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES) ++#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__ ++#endif ++#if (DEVICE_MEMSETCPY_ALIGN_IN_BYTES & (DEVICE_MEMSETCPY_ALIGN_IN_BYTES - 1)) != 0 ++#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2" ++#endif ++#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4 ++#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4" ++#endif ++ ++#if __SIZEOF_POINTER__ != __SIZEOF_LONG__ ++#error No support for architectures where void* and long are sized differently ++#endif ++ ++#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES ++/* Meaningless, and harder to do correctly */ ++# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long) ++typedef unsigned long block_t; ++#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES ++# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) ++# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 ++ typedef uint64_t block_t; ++# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 ++ typedef uint128_t block_t; ++# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 ++ typedef uint256_t block_t; ++# endif ++# else ++typedef unsigned int block_t ++ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES))); ++# endif ++# if defined(__arm64__) || defined(__aarch64__) ++# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 ++# define DEVICE_MEMSETCPY_ARM64 ++# define REGSZ "w" ++# define REGCL "w" ++# define BVCLB "r" ++# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 ++# define DEVICE_MEMSETCPY_ARM64 ++# define REGSZ "x" ++# define REGCL "x" ++# define BVCLB "r" ++# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 ++# if defined(__ARM_NEON_FP) ++# define DEVICE_MEMSETCPY_ARM64 ++# define REGSZ "q" ++# define REGCL "v" ++# define BVCLB "w" ++# endif ++# endif ++# if defined(DEVICE_MEMSETCPY_ARM64) ++# if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL) ++# define NSHLD() __asm__ ("dmb nshld") ++# define NSHST() __asm__ ("dmb nshst") ++# define LDP "ldnp" ++# define STP "stnp" ++# else ++# define NSHLD() ++# define NSHST() ++# define LDP "ldp" ++# define STP "stp" ++# endif ++# if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) ++# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8 ++typedef uint32_t block_half_t; ++# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16 ++typedef uint64_t block_half_t; ++# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32 ++typedef uint128_t block_half_t; ++# endif ++# else ++ typedef unsigned int block_half_t ++ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2))); ++# endif ++# endif ++# endif ++#endif ++ ++__attribute__((visibility("hidden"))) ++void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize) ++{ ++ volatile const char *pcSrc = pvSrc; ++ volatile char *pcDst = pvDst; ++ size_t uPreambleBytes; ++ int bBlockCopy = 0; ++ ++ size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t); ++ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); ++ ++ if (!uSrcUnaligned && !uDstUnaligned) ++ { ++ /* Neither pointer is unaligned. Optimal case. */ ++ bBlockCopy = 1; ++ } ++ else ++ { ++ if (uSrcUnaligned == uDstUnaligned) ++ { ++ /* Neither pointer is usefully aligned, but they are misaligned in ++ * the same way, so we can copy a preamble in a slow way, then ++ * optimize the rest. ++ */ ++ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); ++ uSize -= uPreambleBytes; ++ while (uPreambleBytes) ++ { ++ *pcDst++ = *pcSrc++; ++ uPreambleBytes--; ++ } ++ ++ bBlockCopy = 1; ++ } ++ else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0) ++ { ++ /* Both pointers are at least 32-bit aligned, and we assume that ++ * the processor must handle all kinds of 32-bit load-stores. ++ * NOTE: Could we optimize this with a non-temporal version? ++ */ ++ if (uSize >= sizeof(int)) ++ { ++ volatile int *piSrc = (int *)((void *)pcSrc); ++ volatile int *piDst = (int *)((void *)pcDst); ++ ++ while (uSize >= sizeof(int)) ++ { ++ *piDst++ = *piSrc++; ++ uSize -= sizeof(int); ++ } ++ ++ pcSrc = (char *)((void *)piSrc); ++ pcDst = (char *)((void *)piDst); ++ } ++ } ++ } ++ ++ if (bBlockCopy && uSize >= sizeof(block_t)) ++ { ++ volatile block_t *pSrc = (block_t *)((void *)pcSrc); ++ volatile block_t *pDst = (block_t *)((void *)pcDst); ++ ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ NSHLD(); ++#endif ++ ++ while (uSize >= sizeof(block_t)) ++ { ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t" ++ STP " " REGSZ "0, " REGSZ "1, [%[pDst]]" ++ : ++ : [pSrc] "r" (pSrc), [pDst] "r" (pDst) ++ : "memory", REGCL "0", REGCL "1"); ++#else ++ *pDst = *pSrc; ++#endif ++ pDst++; ++ pSrc++; ++ uSize -= sizeof(block_t); ++ } ++ ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ NSHST(); ++#endif ++ ++ pcSrc = (char *)((void *)pSrc); ++ pcDst = (char *)((void *)pDst); ++ } ++ ++ while (uSize) ++ { ++ *pcDst++ = *pcSrc++; ++ uSize--; ++ } ++} ++ ++__attribute__((visibility("hidden"))) ++void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize) ++{ ++ volatile char *pcDst = pvDst; ++ size_t uPreambleBytes; ++ ++ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t); ++ ++ if (uDstUnaligned) ++ { ++ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize); ++ uSize -= uPreambleBytes; ++ while (uPreambleBytes) ++ { ++ *pcDst++ = ui8Value; ++ uPreambleBytes--; ++ } ++ } ++ ++ if (uSize >= sizeof(block_t)) ++ { ++ volatile block_t *pDst = (block_t *)((void *)pcDst); ++ size_t i, uBlockSize; ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ typedef block_half_t BLK_t; ++#else ++ typedef block_t BLK_t; ++#endif /* defined(DEVICE_MEMSETCPY_ARM64) */ ++ ++#if defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) ++ BLK_t bValue = 0; ++ ++ uBlockSize = sizeof(BLK_t) / sizeof(ui8Value); ++ ++ for (i = 0; i < uBlockSize; i++) ++ { ++ bValue |= (BLK_t)ui8Value << ((uBlockSize - i - 1) * BITS_PER_BYTE); ++ } ++#else ++ BLK_t bValue = {0}; ++ ++ uBlockSize = sizeof(bValue) / sizeof(unsigned int); ++ for (i = 0; i < uBlockSize; i++) ++ bValue[i] = ui8Value << 24U | ++ ui8Value << 16U | ++ ui8Value << 8U | ++ ui8Value; ++#endif /* defined(DEVICE_MEMSETCPY_NON_VECTOR_KM) */ ++ ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ NSHLD(); ++#endif ++ ++ while (uSize >= sizeof(block_t)) ++ { ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]" ++ : ++ : [bValue] BVCLB (bValue), [pDst] "r" (pDst) ++ : "memory"); ++#else ++ *pDst = bValue; ++#endif ++ pDst++; ++ uSize -= sizeof(block_t); ++ } ++ ++#if defined(DEVICE_MEMSETCPY_ARM64) ++ NSHST(); ++#endif ++ ++ pcDst = (char *)((void *)pDst); ++ } ++ ++ while (uSize) ++ { ++ *pcDst++ = ui8Value; ++ uSize--; ++ } ++} ++ ++#endif /* defined(__GNUC__) */ ++ ++/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */ ++IMG_INTERNAL ++void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t uSize) ++{ ++ volatile const char *pcSrc = pvSrc; ++ volatile char *pcDst = pvDst; ++ ++ while (uSize) ++ { ++ *pcDst++ = *pcSrc++; ++ uSize--; ++ } ++} ++ ++IMG_INTERNAL ++void DeviceMemSetBytes(void *pvDst, unsigned char ui8Value, size_t uSize) ++{ ++ volatile char *pcDst = pvDst; ++ ++ while (uSize) ++ { ++ *pcDst++ = ui8Value; ++ uSize--; ++ } ++} ++ ++#if !defined(__QNXNTO__) /* Ignore Neutrino as it uses strlcpy */ ++ ++#if defined(__KERNEL__) && defined(__linux__) ++/* ++ * In case of Linux kernel-mode in a debug build, choose the variant ++ * of StringLCopy that uses strlcpy and logs truncation via a stack dump. ++ * For Linux kernel-mode in a release build, strlcpy alone is used. ++ */ ++#if defined(DEBUG) ++IMG_INTERNAL ++size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) ++{ ++ /* ++ * Let strlcpy handle any truncation cases correctly. ++ * We will definitely get a NUL-terminated string set in pszDest ++ */ ++ size_t uSrcSize = strlcpy(pszDest, pszSrc, uDataSize); ++ ++#if defined(PVR_DEBUG_STRLCPY) ++ /* Handle truncation by dumping calling stack if debug allows */ ++ if (uSrcSize >= uDataSize) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", ++ __func__, pszSrc, (long)uDataSize, pszDest)); ++ OSDumpStack(); ++ } ++#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ ++ ++ return uSrcSize; ++} ++#endif /* defined(DEBUG) */ ++ ++#else /* defined(__KERNEL__) && defined(__linux__) */ ++/* ++ * For every other platform, make use of the strnlen and strncpy ++ * implementation of StringLCopy. ++ * NOTE: It is crucial to avoid memcpy as this has a hidden side-effect of ++ * dragging in whatever the build-environment flavour of GLIBC is which can ++ * cause unexpected failures for host-side command execution. ++ */ ++IMG_INTERNAL ++size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize) ++{ ++ size_t uSrcSize = strnlen(pszSrc, uDataSize); ++ ++ (void)strncpy(pszDest, pszSrc, uSrcSize); ++ if (uSrcSize == uDataSize) ++ { ++ pszDest[uSrcSize-1] = '\0'; ++ } ++ else ++ { ++ pszDest[uSrcSize] = '\0'; ++ } ++ ++ return uSrcSize; ++} ++ ++#endif /* defined(__KERNEL__) && defined(__linux__) */ ++ ++#endif /* !defined(__QNXNTO__) */ +diff --git a/drivers/gpu/drm/img-rogue/mmu_common.c b/drivers/gpu/drm/img-rogue/mmu_common.c +new file mode 100644 +index 000000000000..1d9bfd71d76e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/mmu_common.c +@@ -0,0 +1,4464 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common MMU Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements basic low level control of MMU. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "pvr_debug.h" ++#include "dllist.h" ++#include "osfunc.h" ++#include "allocmem.h" ++ ++#if defined(SUPPORT_RGX) ++# include "rgx_memallocflags.h" ++# include "rgxmmudefs_km.h" ++#endif ++ ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "htbuffer.h" ++#include "pvr_ricommon.h" ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++# include "process_stats.h" ++# include "proc_stats.h" ++#endif ++ ++#if defined(PDUMP) ++#include "pdump_km.h" ++#include "pdump_physmem.h" ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "physmem_lma.h" ++#endif ++ ++/* ++Major Interfaces to other modules: ++ ++Let's keep this graph up-to-date: ++ ++ +-----------+ ++ | devicemem | ++ +-----------+ ++ | ++ +============+ ++ | mmu_common | ++ +============+ ++ | ++ +-----------------+ ++ | | ++ +---------+ +----------+ ++ | pmr | | device | ++ +---------+ +----------+ ++ */ ++ ++#include "mmu_common.h" ++#include "pmr.h" ++#include "devicemem_server_utils.h" ++ ++/* #define MMU_OBJECT_REFCOUNT_DEBUGING 1 */ ++#if defined(MMU_OBJECT_REFCOUNT_DEBUGING) ++#define MMU_OBJ_DBG(x) PVR_DPF(x) ++#else ++#define MMU_OBJ_DBG(x) ++#endif ++ ++/*! ++ * Refcounted structure that is shared between the context and ++ * the cleanup thread items. ++ * It is used to keep track of all cleanup items and whether the creating ++ * MMU context has been destroyed and therefore is not allowed to be ++ * accessed any more. ++ * ++ * The cleanup thread is used to defer the freeing of the page tables ++ * because we have to make sure that the MMU cache has been invalidated. ++ * If we don't take care of this the MMU might partially access cached ++ * and uncached tables which might lead to inconsistencies and in the ++ * worst case to MMU pending faults on random memory. ++ */ ++typedef struct _MMU_CTX_CLEANUP_DATA_ ++{ ++ /*! Refcount to know when this structure can be destroyed */ ++ ATOMIC_T iRef; ++ /*! Protect items in this structure, especially the refcount */ ++ POS_LOCK hCleanupLock; ++ /*! List of all cleanup items currently in flight */ ++ DLLIST_NODE sMMUCtxCleanupItemsHead; ++ /*! Was the MMU context destroyed and should not be accessed any more? */ ++ IMG_BOOL bMMUContextExists; ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /*! Associated OSid for this context */ ++ IMG_UINT32 ui32OSid; ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++} MMU_CTX_CLEANUP_DATA; ++ ++ ++/*! ++ * Structure holding one or more page tables that need to be ++ * freed after the MMU cache has been flushed which is signalled when ++ * the stored sync has a value that is <= the required value. ++ */ ++typedef struct _MMU_CLEANUP_ITEM_ ++{ ++ /*! Cleanup thread data */ ++ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn; ++ /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */ ++ DLLIST_NODE sMMUMappingHead; ++ /*! Node of the cleanup item list for the context */ ++ DLLIST_NODE sMMUCtxCleanupItem; ++ /* Pointer to the cleanup meta data */ ++ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData; ++ /* Sync to query if the MMU cache was flushed */ ++ PVRSRV_CLIENT_SYNC_PRIM *psSync; ++ /*! The update value of the sync to signal that the cache was flushed */ ++ IMG_UINT32 uiRequiredSyncVal; ++ /*! The device node needed to free the page tables */ ++ PVRSRV_DEVICE_NODE *psDevNode; ++} MMU_CLEANUP_ITEM; ++ ++/*! ++ All physical allocations and frees are relative to this context, so ++ we would get all the allocations of PCs, PDs, and PTs from the same ++ RA. ++ ++ We have one per MMU context in case we have mixed UMA/LMA devices ++ within the same system. ++ */ ++typedef struct _MMU_PHYSMEM_CONTEXT_ ++{ ++ /*! Associated MMU_CONTEXT */ ++ struct _MMU_CONTEXT_ *psMMUContext; ++ ++ /*! Parent device node */ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ /*! Refcount so we know when to free up the arena */ ++ IMG_UINT32 uiNumAllocations; ++ ++ /*! Arena from which physical memory is derived */ ++ RA_ARENA *psPhysMemRA; ++ /*! Arena name */ ++ IMG_CHAR *pszPhysMemRAName; ++ /*! Size of arena name string */ ++ size_t uiPhysMemRANameAllocSize; ++ ++ /*! Meta data for deferred cleanup */ ++ MMU_CTX_CLEANUP_DATA *psCleanupData; ++ /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */ ++ DLLIST_NODE sTmpMMUMappingHead; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ IMG_UINT32 ui32OSid; ++ IMG_UINT32 ui32OSidReg; ++ IMG_BOOL bOSidAxiProt; ++#endif ++ ++} MMU_PHYSMEM_CONTEXT; ++ ++/*! ++ Mapping structure for MMU memory allocation ++ */ ++typedef struct _MMU_MEMORY_MAPPING_ ++{ ++ /*! Physmem context to allocate from */ ++ MMU_PHYSMEM_CONTEXT *psContext; ++ /*! OS/system Handle for this allocation */ ++ PG_HANDLE sMemHandle; ++ /*! CPU virtual address of this allocation */ ++ void *pvCpuVAddr; ++ /*! Device physical address of this allocation */ ++ IMG_DEV_PHYADDR sDevPAddr; ++ /*! Size of this allocation */ ++ size_t uiSize; ++ /*! Number of current mappings of this allocation */ ++ IMG_UINT32 uiCpuVAddrRefCount; ++ /*! Node for the defer free list */ ++ DLLIST_NODE sMMUMappingItem; ++} MMU_MEMORY_MAPPING; ++ ++/*! ++ Memory descriptor for MMU objects. There can be more than one memory ++ descriptor per MMU memory allocation. ++ */ ++typedef struct _MMU_MEMORY_DESC_ ++{ ++ /* NB: bValid is set if this descriptor describes physical ++ memory. This allows "empty" descriptors to exist, such that we ++ can allocate them in batches. */ ++ /*! Does this MMU object have physical backing */ ++ IMG_BOOL bValid; ++ /*! Device Physical address of physical backing */ ++ IMG_DEV_PHYADDR sDevPAddr; ++ /*! CPU virtual address of physical backing */ ++ void *pvCpuVAddr; ++ /*! Mapping data for this MMU object */ ++ MMU_MEMORY_MAPPING *psMapping; ++ /*! Memdesc offset into the psMapping */ ++ IMG_UINT32 uiOffset; ++ /*! Size of the Memdesc */ ++ IMG_UINT32 uiSize; ++} MMU_MEMORY_DESC; ++ ++/*! ++ MMU levelx structure. This is generic and is used ++ for all levels (PC, PD, PT). ++ */ ++typedef struct _MMU_Levelx_INFO_ ++{ ++ /*! The Number of entries in this level */ ++ IMG_UINT32 ui32NumOfEntries; ++ ++ /*! Number of times this level has been reference. Note: For Level1 (PTE) ++ we still take/drop the reference when setting up the page tables rather ++ then at map/unmap time as this simplifies things */ ++ IMG_UINT32 ui32RefCount; ++ ++ /*! MemDesc for this level */ ++ MMU_MEMORY_DESC sMemDesc; ++ ++ /*! Array of infos for the next level. Must be last member in structure */ ++ struct _MMU_Levelx_INFO_ *apsNextLevel[1]; ++} MMU_Levelx_INFO; ++ ++/*! ++ MMU context structure ++ */ ++struct _MMU_CONTEXT_ ++{ ++ /*! Originating Connection */ ++ CONNECTION_DATA *psConnection; ++ ++ MMU_DEVICEATTRIBS *psDevAttrs; ++ ++ /*! For allocation and deallocation of the physical memory where ++ the pagetables live */ ++ struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx; ++ ++#if defined(PDUMP) ++ /*! PDump context ID (required for PDump commands with virtual addresses) */ ++ IMG_UINT32 uiPDumpContextID; ++ ++ /*! The refcount of the PDump context ID */ ++ IMG_UINT32 ui32PDumpContextIDRefCount; ++#endif ++ ++ /*! MMU cache invalidation flags (only used on Volcanic driver) */ ++ ATOMIC_T sCacheFlags; ++ ++ /*! Lock to ensure exclusive access when manipulating the MMU context or ++ * reading and using its content ++ */ ++ POS_LOCK hLock; ++ ++ /*! Base level info structure. Must be last member in structure */ ++ MMU_Levelx_INFO sBaseLevelInfo; ++ /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */ ++}; ++ ++static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR}; ++ ++#if defined(DEBUG) ++#include "log2.h" ++#endif ++ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) ++static IMG_UINT32 g_ui32MMULeakCounter = 0; ++static DEFINE_MUTEX(g_sMMULeakMutex); ++#endif ++ ++/***************************************************************************** ++ * Utility functions * ++ *****************************************************************************/ ++ ++/*************************************************************************/ /*! ++@Function _FreeMMUMapping ++ ++@Description Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables ++ they represent. ++ ++@Input psDevNode Device node ++ ++@Input psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free ++ */ ++/*****************************************************************************/ ++static void ++_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode, ++ PDLLIST_NODE psTmpMMUMappingHead) ++{ ++ PDLLIST_NODE psNode, psNextNode; ++ ++ /* Free the current list unconditionally */ ++ dllist_foreach_node(psTmpMMUMappingHead, ++ psNode, ++ psNextNode) ++ { ++ MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode, ++ MMU_MEMORY_MAPPING, ++ sMMUMappingItem); ++ ++ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, &psMapping->sMemHandle); ++ dllist_remove_node(psNode); ++ OSFreeMem(psMapping); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function _CleanupThread_FreeMMUMapping ++ ++@Description Function to be executed by the cleanup thread to free ++ MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated. ++ ++ This function will request a MMU cache invalidate once and ++ retry to free the MMU_MEMORY_MAPPINGs until the invalidate ++ has been executed. ++ ++ If the memory context that created this cleanup item has been ++ destroyed in the meantime this function will directly free the ++ MMU_MEMORY_MAPPINGs without waiting for any MMU cache ++ invalidation. ++ ++@Input pvData Cleanup data in form of a MMU_CLEANUP_ITEM ++ ++@Return PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY ++ */ ++/*****************************************************************************/ ++static PVRSRV_ERROR ++_CleanupThread_FreeMMUMapping(void* pvData) ++{ ++ PVRSRV_ERROR eError; ++ MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *)pvData; ++ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData; ++ PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode; ++ IMG_BOOL bFreeNow; ++ IMG_UINT32 uiSyncCurrent; ++ IMG_UINT32 uiSyncReq; ++ ++ OSLockAcquire(psMMUCtxCleanupData->hCleanupLock); ++ ++ /* Don't attempt to free anything when the context has been destroyed. ++ * Especially don't access any device specific structures any more!*/ ++ if (!psMMUCtxCleanupData->bMMUContextExists) ++ { ++ OSFreeMem(psCleanup); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_OK, e0); ++ } ++ ++ if (psCleanup->psSync == NULL) ++ { ++ /* Kick to invalidate the MMU caches and get sync info */ ++ eError = psDevNode->pfnMMUCacheInvalidateKick(psDevNode, ++ &psCleanup->uiRequiredSyncVal); ++ if (eError != PVRSRV_OK) ++ { ++ OSLockRelease(psMMUCtxCleanupData->hCleanupLock); ++ return PVRSRV_ERROR_RETRY; ++ } ++ psCleanup->psSync = psDevNode->psMMUCacheSyncPrim; ++ } ++ ++ uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr); ++ uiSyncReq = psCleanup->uiRequiredSyncVal; ++ ++ /* Has the invalidate executed */ ++ bFreeNow = (uiSyncCurrent >= uiSyncReq) ? ++ /* ... with the counter wrapped around ... ++ * There can't be 3*1024*1024 transactions completed, so consider wrapped */ ++ (((uiSyncCurrent - uiSyncReq) > 0xF0000000UL)? IMG_FALSE : IMG_TRUE): ++ /* There can't be 3*1024*1024 transactions pending, so consider wrapped */ ++ (((uiSyncReq - uiSyncCurrent) > 0xF0000000UL)? IMG_TRUE : IMG_FALSE); ++ ++#if defined(NO_HARDWARE) ++ /* In NOHW the syncs will never be updated so just free the tables */ ++ bFreeNow = IMG_TRUE; ++#endif ++ /* If the Invalidate operation is not completed, check if the operation timed out */ ++ if (!bFreeNow) ++ { ++ /* If the time left for the completion of invalidate operation is ++ * within 500ms of time-out, consider the operation as timed out */ ++ if ((psCleanup->sCleanupThreadFn.ui32TimeEnd - psCleanup->sCleanupThreadFn.ui32TimeStart - 500) <= ++ (OSClockms() - psCleanup->sCleanupThreadFn.ui32TimeStart)) ++ { ++ /* Consider the operation is timed out */ ++ bFreeNow = IMG_TRUE; ++ } ++ } ++ ++ /* Free if the invalidate operation completed or the operation itself timed out */ ++ if (bFreeNow) ++ { ++ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); ++ ++ dllist_remove_node(&psCleanup->sMMUCtxCleanupItem); ++ OSFreeMem(psCleanup); ++ ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_RETRY; ++ } ++ ++e0: ++ ++ /* If this cleanup task has been successfully executed we can ++ * decrease the context cleanup data refcount. Successfully ++ * means here that the MMU_MEMORY_MAPPINGs have been freed by ++ * either this cleanup task of when the MMU context has been ++ * destroyed. */ ++ if (eError == PVRSRV_OK) ++ { ++ OSLockRelease(psMMUCtxCleanupData->hCleanupLock); ++ ++ if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0) ++ { ++ OSLockDestroy(psMMUCtxCleanupData->hCleanupLock); ++ OSFreeMem(psMMUCtxCleanupData); ++ } ++ } ++ else ++ { ++ OSLockRelease(psMMUCtxCleanupData->hCleanupLock); ++ } ++ ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function _SetupCleanup_FreeMMUMapping ++ ++@Description Setup a cleanup item for the cleanup thread that will ++ kick off a MMU invalidate request and free the associated ++ MMU_MEMORY_MAPPINGs when the invalidate was successful. ++ ++@Input psPhysMemCtx The current MMU physmem context ++ */ ++/*****************************************************************************/ ++static void ++_SetupCleanup_FreeMMUMapping(MMU_PHYSMEM_CONTEXT *psPhysMemCtx) ++{ ++ ++ MMU_CLEANUP_ITEM *psCleanupItem; ++ MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData; ++ PVRSRV_DEVICE_NODE *psDevNode = psPhysMemCtx->psDevNode; ++ ++ if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead)) ++ { ++ goto e0; ++ } ++ ++#if defined(PDUMP) ++ /* Free the page tables immediately in case of pdump, which avoids ++ * changing script files due to the additional invalidation kick */ ++ goto e1; ++#endif ++ ++ /* Don't defer the freeing if we are currently unloading the driver ++ * or if the sync has been destroyed */ ++ if (PVRSRVGetPVRSRVData()->bUnload || ++ psDevNode->psMMUCacheSyncPrim == NULL) ++ { ++ goto e1; ++ } ++ ++ /* Allocate a cleanup item */ ++ psCleanupItem = OSAllocMem(sizeof(*psCleanupItem)); ++ if (!psCleanupItem) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to get memory for deferred page table cleanup. " ++ "Freeing tables immediately", ++ __func__)); ++ goto e1; ++ } ++ ++ /* Set sync to NULL to indicate we did not interact with ++ * the FW yet. Kicking off an MMU cache invalidate should ++ * be done in the cleanup thread to not waste time here. */ ++ psCleanupItem->psSync = NULL; ++ psCleanupItem->uiRequiredSyncVal = 0; ++ psCleanupItem->psDevNode = psDevNode; ++ psCleanupItem->psMMUCtxCleanupData = psCleanupData; ++ ++ OSAtomicIncrement(&psCleanupData->iRef); ++ ++ /* Move the page tables to free to the cleanup item */ ++ dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead, ++ &psCleanupItem->sMMUMappingHead); ++ ++ /* Add the cleanup item itself to the context list */ ++ dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead, ++ &psCleanupItem->sMMUCtxCleanupItem); ++ ++ /* Setup the cleanup thread data and add the work item */ ++ psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping; ++ psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem; ++ psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE; ++ CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn, ++ CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); ++ ++ PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn); ++ ++ return; ++ ++e1: ++ /* Free the page tables now */ ++ _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead); ++e0: ++ return; ++} ++ ++/*************************************************************************/ /*! ++@Function _CalcPCEIdx ++ ++@Description Calculate the page catalogue index ++ ++@Input sDevVAddr Device virtual address ++ ++@Input psDevVAddrConfig Configuration of the virtual address ++ ++@Input bRoundUp Round up the index ++ ++@Return The page catalogue index ++ */ ++/*****************************************************************************/ ++static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr, ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, ++ IMG_BOOL bRoundUp) ++{ ++ IMG_DEV_VIRTADDR sTmpDevVAddr; ++ IMG_UINT32 ui32RetVal; ++ ++ sTmpDevVAddr = sDevVAddr; ++ ++ if (bRoundUp) ++ { ++ sTmpDevVAddr.uiAddr--; ++ } ++ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask) ++ >> psDevVAddrConfig->uiPCIndexShift); ++ ++ if (bRoundUp) ++ { ++ ui32RetVal++; ++ } ++ ++ return ui32RetVal; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _CalcPDEIdx ++ ++@Description Calculate the page directory index ++ ++@Input sDevVAddr Device virtual address ++ ++@Input psDevVAddrConfig Configuration of the virtual address ++ ++@Input bRoundUp Round up the index ++ ++@Return The page directory index ++ */ ++/*****************************************************************************/ ++static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr, ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, ++ IMG_BOOL bRoundUp) ++{ ++ IMG_DEV_VIRTADDR sTmpDevVAddr; ++ IMG_UINT32 ui32RetVal; ++ ++ sTmpDevVAddr = sDevVAddr; ++ ++ if (bRoundUp) ++ { ++ sTmpDevVAddr.uiAddr--; ++ } ++ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask) ++ >> psDevVAddrConfig->uiPDIndexShift); ++ ++ if (bRoundUp) ++ { ++ ui32RetVal++; ++ } ++ ++ return ui32RetVal; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _CalcPTEIdx ++ ++@Description Calculate the page entry index ++ ++@Input sDevVAddr Device virtual address ++ ++@Input psDevVAddrConfig Configuration of the virtual address ++ ++@Input bRoundUp Round up the index ++ ++@Return The page entry index ++ */ ++/*****************************************************************************/ ++static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr, ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, ++ IMG_BOOL bRoundUp) ++{ ++ IMG_DEV_VIRTADDR sTmpDevVAddr; ++ IMG_UINT32 ui32RetVal; ++ ++ sTmpDevVAddr = sDevVAddr; ++ sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes; ++ if (bRoundUp) ++ { ++ sTmpDevVAddr.uiAddr--; ++ } ++ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask) ++ >> psDevVAddrConfig->uiPTIndexShift); ++ ++ if (bRoundUp) ++ { ++ ui32RetVal++; ++ } ++ ++ return ui32RetVal; ++} ++ ++#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) ++/* ++ * RGXMapBRN71422TargetPhysicalAddress ++ * ++ * Set-up a special MMU tree mapping with a single page that eventually points to ++ * RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR. ++ * ++ * PC entries are 32b, with the last 4 bits being 0 except for the LSB bit that should be 1 (Valid). Addr is 4KB aligned. ++ * PD entries are 64b, with addr in bits 39:5 and everything else 0 except for LSB bit that is Valid. Addr is byte aligned? ++ * PT entries are 64b, with phy addr in bits 39:12 and everything else 0 except for LSB bit that is Valid. Addr is 4KB aligned. ++ * So, we can construct the page tables in a single page like this: ++ * 0x00 : PCE (PCE index 0) ++ * 0x04 : 0x0 ++ * 0x08 : PDEa (PDE index 1) ++ * 0x0C : PDEb ++ * 0x10 : PTEa (PTE index 2) ++ * 0x14 : PTEb ++ * ++ * With the PCE and the PDE pointing to this same page. ++ * The VA address that we are mapping is therefore: ++ * VA = PCE_idx*PCE_size + PDE_idx*PDE_size + PTE_idx*PTE_size = ++ * = 0 * 1GB + 1 * 2MB + 2 * 4KB = ++ * = 0 + 0x20_0000 + 0x2000 = ++ * = 0x00_0020_2000 ++ */ ++void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext) ++{ ++ MMU_MEMORY_DESC *psMemDesc = &psMMUContext->sBaseLevelInfo.sMemDesc; ++ IMG_DEV_PHYADDR sPhysAddrPC = psMemDesc->sDevPAddr; ++ IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; ++ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; ++ IMG_UINT64 ui64Entry; ++ ++ /* PCE points to PC */ ++ ui64Entry = sPhysAddrPC.uiAddr; ++ ui64Entry = ui64Entry >> RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; ++ ui64Entry = ui64Entry << RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT; ++ ui64Entry = ui64Entry & ~RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK; ++ ui64Entry = ui64Entry | RGX_MMUCTRL_PC_DATA_VALID_EN; ++ pui32Px[0] = (IMG_UINT32) ui64Entry; ++ ++ /* PDE points to PC */ ++ ui64Entry = sPhysAddrPC.uiAddr; ++ ui64Entry = ui64Entry & ~RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK; ++ ui64Entry = ui64Entry | RGX_MMUCTRL_PD_DATA_VALID_EN; ++ pui64Px[1] = ui64Entry; ++ ++ /* PTE points to PAddr */ ++ ui64Entry = RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR; ++ ui64Entry = ui64Entry & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK; ++ ui64Entry = ui64Entry | RGX_MMUCTRL_PT_DATA_VALID_EN; ++ pui64Px[2] = ui64Entry; ++ ++ { ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psMemDesc->psMapping->sMemHandle, ++ psMemDesc->uiOffset, ++ psMemDesc->uiSize); ++ PVR_LOG_IF_ERROR(eError, "pfnDevPxClean"); ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Mapping the BRN71422 workaround to target physical address 0x%" IMG_UINT64_FMTSPECx ".", ++ __func__, RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR)); ++} ++#endif ++ ++/***************************************************************************** ++ * MMU memory allocation/management functions (mem desc) * ++ *****************************************************************************/ ++ ++/*************************************************************************/ /*! ++@Function _MMU_PhysMem_RAImportAlloc ++ ++@Description Imports MMU Px memory into the RA. This is where the ++ actual allocation of physical memory happens. ++ ++@Input hArenaHandle Handle that was passed in during the ++ creation of the RA ++ ++@Input uiSize Size of the memory to import ++ ++@Input uiFlags Flags that where passed in the allocation. ++ ++@Output puiBase The address of where to insert this import ++ ++@Output puiActualSize The actual size of the import ++ ++@Output phPriv Handle which will be passed back when ++ this import is freed ++ ++@Return PVRSRV_OK if import alloc was successful ++ */ ++/*****************************************************************************/ ++static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle, ++ RA_LENGTH_T uiSize, ++ RA_FLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ RA_BASE_T *puiBase, ++ RA_LENGTH_T *puiActualSize, ++ RA_PERISPAN_HANDLE *phPriv) ++{ ++ MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psPhysMemCtx->psDevNode; ++ MMU_MEMORY_MAPPING *psMapping; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 uiPid = 0; ++ ++ PVR_UNREFERENCED_PARAMETER(pszAnnotation); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ ++ PVR_ASSERT(psDevNode != NULL); ++ PVR_GOTO_IF_INVALID_PARAM(psDevNode, eError, e0); ++ ++ psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING)); ++ PVR_GOTO_IF_NOMEM(psMapping, eError, e0); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? ++ PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* ++ * Store the OSid in the PG_HANDLE.uiOSid field for use by the ++ * pfnDevPxFree() routine. ++ */ ++ psMapping->sMemHandle.uiOSid = psPhysMemCtx->ui32OSid; ++ eError = PhysHeapPagesAllocGPV(psDevNode->psMMUPhysHeap, ++ TRUNCATE_64BITS_TO_SIZE_T(uiSize), ++ &psMapping->sMemHandle, ++ &psMapping->sDevPAddr, ++ psPhysMemCtx->ui32OSid, ++ uiPid); ++#else ++ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, ++ TRUNCATE_64BITS_TO_SIZE_T(uiSize), ++ &psMapping->sMemHandle, ++ &psMapping->sDevPAddr, ++ uiPid); ++#endif ++ if (eError != PVRSRV_OK) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++ goto e1; ++ } ++ ++ psMapping->psContext = psPhysMemCtx; ++ psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize); ++ ++ psMapping->uiCpuVAddrRefCount = 0; ++ ++ *phPriv = (RA_PERISPAN_HANDLE) psMapping; ++ ++ /* Note: This assumes this memory never gets paged out */ ++ *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr; ++ *puiActualSize = uiSize; ++ ++ return PVRSRV_OK; ++ ++e1: ++ OSFreeMem(psMapping); ++e0: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function _MMU_PhysMem_RAImportFree ++ ++@Description Imports MMU Px memory into the RA. This is where the ++ actual free of physical memory happens. ++ ++@Input hArenaHandle Handle that was passed in during the ++ creation of the RA ++ ++@Input puiBase The address of where to insert this import ++ ++@Output phPriv Private data that the import alloc provided ++ ++@Return None ++ */ ++/*****************************************************************************/ ++static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle, ++ RA_BASE_T uiBase, ++ RA_PERISPAN_HANDLE hPriv) ++{ ++ MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *)hPriv; ++ MMU_PHYSMEM_CONTEXT *psPhysMemCtx = (MMU_PHYSMEM_CONTEXT *)hArenaHandle; ++ ++ PVR_UNREFERENCED_PARAMETER(uiBase); ++ ++ /* Check we have dropped all CPU mappings */ ++ PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0); ++ ++ /* Add mapping to defer free list */ ++ psMapping->psContext = NULL; ++ dllist_add_to_tail(&psPhysMemCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem); ++} ++ ++/*************************************************************************/ /*! ++@Function _MMU_PhysMemAlloc ++ ++@Description Allocates physical memory for MMU objects ++ ++@Input psPhysMemCtx Physmem context to do the allocation from ++ ++@Output psMemDesc Allocation description ++ ++@Input uiBytes Size of the allocation in bytes ++ ++@Input uiAlignment Alignment requirement of this allocation ++ ++@Return PVRSRV_OK if allocation was successful ++ */ ++/*****************************************************************************/ ++ ++static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, ++ MMU_MEMORY_DESC *psMemDesc, ++ size_t uiBytes, ++ size_t uiAlignment) ++{ ++ PVRSRV_ERROR eError; ++ RA_BASE_T uiPhysAddr; ++ ++ PVR_RETURN_IF_INVALID_PARAM(psMemDesc); ++ PVR_RETURN_IF_INVALID_PARAM(!psMemDesc->bValid); ++ ++ eError = RA_Alloc(psPhysMemCtx->psPhysMemRA, ++ uiBytes, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, /* flags */ ++ uiAlignment, ++ "", ++ &uiPhysAddr, ++ NULL, ++ (RA_PERISPAN_HANDLE *)&psMemDesc->psMapping); ++ ++ PVR_LOG_RETURN_IF_ERROR(eError, "RA_Alloc"); ++ ++ psMemDesc->bValid = IMG_TRUE; ++ psMemDesc->pvCpuVAddr = NULL; ++ psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr; ++ ++ if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0) ++ { ++ eError = PhysHeapPagesMap(psPhysMemCtx->psDevNode->psMMUPhysHeap, ++ &psMemDesc->psMapping->sMemHandle, ++ psMemDesc->psMapping->uiSize, ++ &psMemDesc->psMapping->sDevPAddr, ++ &psMemDesc->psMapping->pvCpuVAddr); ++ if (eError != PVRSRV_OK) ++ { ++ RA_Free(psPhysMemCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr); ++ return eError; ++ } ++ } ++ ++ psMemDesc->psMapping->uiCpuVAddrRefCount++; ++ psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr); ++ psMemDesc->pvCpuVAddr = (IMG_UINT8 *)psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset; ++ psMemDesc->uiSize = uiBytes; ++ PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function _MMU_PhysMemFree ++ ++@Description Allocates physical memory for MMU objects ++ ++@Input psPhysMemCtx Physmem context to do the free on ++ ++@Input psMemDesc Allocation description ++ ++@Return None ++ */ ++/*****************************************************************************/ ++static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psPhysMemCtx, ++ MMU_MEMORY_DESC *psMemDesc) ++{ ++ RA_BASE_T uiPhysAddr; ++ ++ PVR_ASSERT(psMemDesc->bValid); ++ ++ if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0) ++ { ++ PhysHeapPagesUnMap(psPhysMemCtx->psDevNode->psMMUPhysHeap, ++ &psMemDesc->psMapping->sMemHandle, ++ psMemDesc->psMapping->pvCpuVAddr); ++ } ++ ++ psMemDesc->pvCpuVAddr = NULL; ++ ++ uiPhysAddr = psMemDesc->sDevPAddr.uiAddr; ++ RA_Free(psPhysMemCtx->psPhysMemRA, uiPhysAddr); ++ ++ psMemDesc->bValid = IMG_FALSE; ++} ++ ++ ++/***************************************************************************** ++ * MMU object allocation/management functions * ++ *****************************************************************************/ ++ ++static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ MMU_PROTFLAGS_T *uiMMUProtFlags, ++ MMU_CONTEXT *psMMUContext) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 uiGPUCacheMode; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++ /* Do flag conversion between devmem flags and MMU generic flags */ ++ if (bInvalidate == IMG_FALSE) ++ { ++ *uiMMUProtFlags |= ((uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) ++ >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) ++ << MMU_PROTFLAGS_DEVICE_OFFSET; ++ ++ if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags)) ++ { ++ *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; ++ } ++ if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags)) ++ { ++ *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE; ++ } ++ ++ eError = DevmemDeviceCacheMode(psDevNode, uiMappingFlags, &uiGPUCacheMode); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ switch (uiGPUCacheMode) ++ { ++ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED: ++ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC: ++ break; ++ case PVRSRV_MEMALLOCFLAG_GPU_CACHED: ++ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Wrong parameters", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (DevmemDeviceCacheCoherency(psDevNode, uiMappingFlags)) ++ { ++ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT; ++ } ++ /* Only compile if RGX_FEATURE_MIPS_BIT_MASK is defined to avoid compilation ++ * errors on volcanic cores. ++ */ ++ #if defined(SUPPORT_RGX) && defined(RGX_FEATURE_MIPS_BIT_MASK) ++ if ((psDevNode->pfnCheckDeviceFeature) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) ++ { ++ /* If we are allocating on the MMU of the firmware processor, the ++ * cached/uncached attributes must depend on the FIRMWARE_CACHED ++ * allocation flag. ++ */ ++ if (psMMUContext->psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) ++ { ++ if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) ++ { ++ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED; ++ } ++ else ++ { ++ *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED; ++ ++ } ++ *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT; ++ } ++ } ++#endif ++ } ++ else ++ { ++ *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function _PxMemAlloc ++ ++@Description Allocates physical memory for MMU objects, initialises ++ and PDumps it. ++ ++@Input psMMUContext MMU context ++ ++@Input uiNumEntries Number of entries to allocate ++ ++@Input psConfig MMU Px config ++ ++@Input eMMULevel MMU level that that allocation is for ++ ++@Output psMemDesc Description of allocation ++ ++@Return PVRSRV_OK if allocation was successful ++ */ ++/*****************************************************************************/ ++static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 uiNumEntries, ++ const MMU_PxE_CONFIG *psConfig, ++ MMU_LEVEL eMMULevel, ++ MMU_MEMORY_DESC *psMemDesc, ++ IMG_UINT32 uiLog2Align) ++{ ++ PVRSRV_ERROR eError; ++ size_t uiBytes; ++ size_t uiAlign; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++ PVR_ASSERT(psConfig->uiBytesPerEntry != 0); ++ ++ uiBytes = uiNumEntries * psConfig->uiBytesPerEntry; ++ /* We need here the alignment of the previous level because that is the entry for we generate here */ ++ uiAlign = 1 << uiLog2Align; ++ ++ /* ++ * If the hardware specifies an alignment requirement for a page table then ++ * it also requires that all memory up to the next aligned address is ++ * zeroed. ++ * ++ * Failing to do this can result in uninitialised data outside of the actual ++ * page table range being read by the MMU and treated as valid, e.g. the ++ * pending flag. ++ * ++ * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16 ++ * and 8 bytes respectively but an alignment requirement of 64 bytes each. ++ */ ++ uiBytes = PVR_ALIGN(uiBytes, uiAlign); ++ ++ /* allocate the object */ ++ eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx, ++ psMemDesc, uiBytes, uiAlign); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("_MMU_PhysMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); ++ } ++ ++ /* ++ Clear the object ++ Note: if any MMUs are cleared with non-zero values then will need a ++ custom clear function ++ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is ++ unlikely ++ */ ++ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes); ++ ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psMemDesc->psMapping->sMemHandle, ++ psMemDesc->uiOffset, ++ psMemDesc->uiSize); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, "Alloc MMU object"); ++ ++ PDumpMMUMalloc(psDevNode, ++ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ eMMULevel, ++ &psMemDesc->sDevPAddr, ++ uiBytes, ++ uiAlign, ++ psMMUContext->psDevAttrs->eMMUType); ++ ++ PDumpMMUDumpPxEntries(psDevNode, ++ eMMULevel, ++ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ psMemDesc->pvCpuVAddr, ++ psMemDesc->sDevPAddr, ++ 0, ++ uiNumEntries, ++ NULL, NULL, 0, /* pdump symbolic info is irrelevant here */ ++ psConfig->uiBytesPerEntry, ++ uiLog2Align, ++ psConfig->uiAddrShift, ++ psConfig->uiAddrMask, ++ psConfig->uiProtMask, ++ psConfig->uiValidEnMask, ++ 0, ++ psMMUContext->psDevAttrs->eMMUType); ++#endif ++ ++ return PVRSRV_OK; ++e1: ++ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, ++ psMemDesc); ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function _PxMemFree ++ ++@Description Frees physical memory for MMU objects, de-initialises ++ and PDumps it. ++ ++@Input psMemDesc Description of allocation ++ ++@Return PVRSRV_OK if allocation was successful ++ */ ++/*****************************************************************************/ ++ ++static void _PxMemFree(MMU_CONTEXT *psMMUContext, ++ MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel) ++{ ++#if defined(MMU_CLEARMEM_ON_FREE) ++ /* ++ Clear the MMU object ++ Note: if any MMUs are cleared with non-zero values then will need a ++ custom clear function ++ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is ++ unlikely ++ */ ++ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Clear MMU object before freeing it"); ++#endif ++#endif/* MMU_CLEARMEM_ON_FREE */ ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Free MMU object"); ++ PDumpMMUFree(psMMUContext->psPhysMemCtx->psDevNode, ++ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ eMMULevel, ++ &psMemDesc->sDevPAddr, ++ psMMUContext->psDevAttrs->eMMUType); ++#else ++ PVR_UNREFERENCED_PARAMETER(eMMULevel); ++#endif ++ /* free the PC */ ++ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc); ++} ++ ++static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext, ++ MMU_Levelx_INFO *psLevel, ++ IMG_UINT32 uiIndex, ++ const MMU_PxE_CONFIG *psConfig, ++ const IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_BOOL bUnmap, ++#if defined(PDUMP) ++ const IMG_CHAR *pszMemspaceName, ++ const IMG_CHAR *pszSymbolicAddr, ++ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, ++#endif ++ IMG_UINT64 uiProtFlags) ++{ ++ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; ++ IMG_UINT64 ui64PxE64; ++ IMG_UINT64 uiAddr = psDevPAddr->uiAddr; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++ if (psDevNode->pfnValidateOrTweakPhysAddrs) ++ { ++ PVRSRV_ERROR eErr = psDevNode->pfnValidateOrTweakPhysAddrs(psDevNode, ++ psMMUContext->psDevAttrs, ++ &uiAddr); ++ /* return if error */ ++ PVR_LOG_RETURN_IF_ERROR(eErr, "_SetupPTE"); ++ } ++ ++ /* Calculate Entry */ ++ ui64PxE64 = uiAddr /* Calculate the offset to that base */ ++ >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */ ++ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ ++ & psConfig->uiAddrMask; /* Delete unused bits */ ++ ui64PxE64 |= uiProtFlags; ++ ++ /* Set the entry */ ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ ++ ++ pui64Px[uiIndex] = ui64PxE64; ++ } ++ else if (psConfig->uiBytesPerEntry == 4) ++ { ++ IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ ++ ++ /* assert that the result fits into 32 bits before writing ++ it into the 32-bit array with a cast */ ++ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); ++ ++ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; ++ } ++ else ++ { ++ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; ++ } ++ ++ ++ /* Log modification */ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, ++ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), ++ uiIndex, MMU_LEVEL_1, ++ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), ++ !bUnmap); ++ ++#if defined(PDUMP) ++ PDumpMMUDumpPxEntries(psDevNode, ++ MMU_LEVEL_1, ++ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ psMemDesc->pvCpuVAddr, ++ psMemDesc->sDevPAddr, ++ uiIndex, ++ 1, ++ pszMemspaceName, ++ pszSymbolicAddr, ++ uiSymbolicAddrOffset, ++ psConfig->uiBytesPerEntry, ++ psConfig->uiAddrLog2Align, ++ psConfig->uiAddrShift, ++ psConfig->uiAddrMask, ++ psConfig->uiProtMask, ++ psConfig->uiValidEnMask, ++ 0, ++ psMMUContext->psDevAttrs->eMMUType); ++#endif /*PDUMP*/ ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function _SetupPxE ++ ++@Description Sets up an entry of an MMU object to point to the ++ provided address ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input psLevel Level info for MMU object ++ ++@Input uiIndex Index into the MMU object to setup ++ ++@Input psConfig MMU Px config ++ ++@Input eMMULevel Level of MMU object ++ ++@Input psDevPAddr Address to setup the MMU object to point to ++ ++@Input pszMemspaceName Name of the PDump memory space that the entry ++ will point to ++ ++@Input pszSymbolicAddr PDump symbolic address that the entry will ++ point to ++ ++@Input uiProtFlags MMU protection flags ++ ++@Return PVRSRV_OK if the setup was successful ++ */ ++/*****************************************************************************/ ++static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext, ++ MMU_Levelx_INFO *psLevel, ++ IMG_UINT32 uiIndex, ++ const MMU_PxE_CONFIG *psConfig, ++ MMU_LEVEL eMMULevel, ++ const IMG_DEV_PHYADDR *psDevPAddr, ++#if defined(PDUMP) ++ const IMG_CHAR *pszMemspaceName, ++ const IMG_CHAR *pszSymbolicAddr, ++ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset, ++#endif ++ MMU_PROTFLAGS_T uiProtFlags, ++ IMG_UINT32 uiLog2DataPageSize) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc; ++ ++ IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32); ++ IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32); ++ ++ if (!psDevPAddr) ++ { ++ /* Invalidate entry */ ++ if (~uiProtFlags & MMU_PROTFLAGS_INVALID) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry")); ++ uiProtFlags |= MMU_PROTFLAGS_INVALID; ++ } ++ psDevPAddr = &gsBadDevPhyAddr; ++ } ++ else ++ { ++ if (uiProtFlags & MMU_PROTFLAGS_INVALID) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry")); ++ uiProtFlags |= MMU_PROTFLAGS_INVALID; ++ } ++ } ++ ++ switch (eMMULevel) ++ { ++ case MMU_LEVEL_3: ++ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4; ++ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8; ++ break; ++ ++ case MMU_LEVEL_2: ++ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4; ++ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8; ++ break; ++ ++ case MMU_LEVEL_1: ++ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4; ++ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8; ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* How big is a PxE in bytes? */ ++ /* Filling the actual Px entry with an address */ ++ switch (psConfig->uiBytesPerEntry) ++ { ++ case 4: ++ { ++ IMG_UINT32 *pui32Px; ++ IMG_UINT64 ui64PxE64; ++ ++ pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ ++ ++ ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */ ++ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ ++ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ ++ & psConfig->uiAddrMask; /* Delete unused higher bits */ ++ ++ ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags); ++ /* assert that the result fits into 32 bits before writing ++ it into the 32-bit array with a cast */ ++ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU)); ++ ++ /* We should never invalidate an invalid page */ ++ if (uiProtFlags & MMU_PROTFLAGS_INVALID) ++ { ++ PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64); ++ } ++ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64; ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, ++ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), ++ uiIndex, eMMULevel, ++ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64), ++ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); ++ break; ++ } ++ case 8: ++ { ++ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */ ++ ++ pui64Px[uiIndex] = psDevPAddr->uiAddr /* Calculate the offset to that base */ ++ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */ ++ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */ ++ & psConfig->uiAddrMask; /* Delete unused higher bits */ ++ pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize); ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, ++ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), ++ uiIndex, eMMULevel, ++ HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]), ++ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1); ++ break; ++ } ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d", ++ __func__, psConfig->uiBytesPerEntry, eMMULevel)); ++ ++ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG; ++ } ++ ++#if defined(PDUMP) ++ PDumpMMUDumpPxEntries(psDevNode, ++ eMMULevel, ++ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ psMemDesc->pvCpuVAddr, ++ psMemDesc->sDevPAddr, ++ uiIndex, ++ 1, ++ pszMemspaceName, ++ pszSymbolicAddr, ++ uiSymbolicAddrOffset, ++ psConfig->uiBytesPerEntry, ++ psConfig->uiAddrLog2Align, ++ psConfig->uiAddrShift, ++ psConfig->uiAddrMask, ++ psConfig->uiProtMask, ++ psConfig->uiValidEnMask, ++ 0, ++ psMMUContext->psDevAttrs->eMMUType); ++#endif ++ ++ psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext, ++ eMMULevel, ++ uiProtFlags & MMU_PROTFLAGS_INVALID); ++ ++ return PVRSRV_OK; ++} ++ ++/***************************************************************************** ++ * MMU host control functions (Level Info) * ++ *****************************************************************************/ ++ ++ ++/*************************************************************************/ /*! ++@Function _MMU_FreeLevel ++ ++@Description Recursively frees the specified range of Px entries. If any ++ level has its last reference dropped then the MMU object ++ memory and the MMU_Levelx_Info will be freed. ++ ++ At each level we might be crossing a boundary from one Px to ++ another. The values for auiStartArray should be by used for ++ the first call into each level and the values in auiEndArray ++ should only be used in the last call for each level. ++ In order to determine if this is the first/last call we pass ++ in bFirst and bLast. ++ When one level calls down to the next only if bFirst/bLast is set ++ and it's the first/last iteration of the loop at its level will ++ bFirst/bLast set for the next recursion. ++ This means that each iteration has the knowledge of the previous ++ level which is required. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input psLevel Level info on which to free the ++ specified range ++ ++@Input auiStartArray Array of start indexes (one for each level) ++ ++@Input auiEndArray Array of end indexes (one for each level) ++ ++@Input auiEntriesPerPxArray Array of number of entries for the Px ++ (one for each level) ++ ++@Input apsConfig Array of PxE configs (one for each level) ++ ++@Input aeMMULevel Array of MMU levels (one for each level) ++ ++@Input pui32CurrentLevel Pointer to a variable which is set to our ++ current level ++ ++@Input uiStartIndex Start index of the range to free ++ ++@Input uiEndIndex End index of the range to free ++ ++@Input bFirst This is the first call for this level ++ ++@Input bLast This is the last call for this level ++ ++@Return IMG_TRUE if the last reference to psLevel was dropped ++ */ ++/*****************************************************************************/ ++static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext, ++ MMU_Levelx_INFO *psLevel, ++ IMG_UINT32 auiStartArray[], ++ IMG_UINT32 auiEndArray[], ++ IMG_UINT32 auiEntriesPerPxArray[], ++ const MMU_PxE_CONFIG *apsConfig[], ++ MMU_LEVEL aeMMULevel[], ++ IMG_UINT32 *pui32CurrentLevel, ++ IMG_UINT32 uiStartIndex, ++ IMG_UINT32 uiEndIndex, ++ IMG_BOOL bFirst, ++ IMG_BOOL bLast, ++ IMG_UINT32 uiLog2DataPageSize) ++{ ++ IMG_UINT32 uiThisLevel = *pui32CurrentLevel; ++ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; ++ IMG_UINT32 i; ++ IMG_BOOL bFreed = IMG_FALSE; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++ /* Parameter checks */ ++ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); ++ PVR_ASSERT(psLevel != NULL); ++ ++ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d", ++ aeMMULevel[uiThisLevel], uiStartIndex, ++ uiEndIndex, psLevel->ui32RefCount)); ++ ++ for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++) ++ { ++ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) ++ { ++ MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i]; ++ IMG_UINT32 uiNextStartIndex; ++ IMG_UINT32 uiNextEndIndex; ++ IMG_BOOL bNextFirst; ++ IMG_BOOL bNextLast; ++ ++ /* If we're crossing a Px then the start index changes */ ++ if (bFirst && (i == uiStartIndex)) ++ { ++ uiNextStartIndex = auiStartArray[uiThisLevel + 1]; ++ bNextFirst = IMG_TRUE; ++ } ++ else ++ { ++ uiNextStartIndex = 0; ++ bNextFirst = IMG_FALSE; ++ } ++ ++ /* If we're crossing a Px then the end index changes */ ++ if (bLast && (i == (uiEndIndex - 1))) ++ { ++ uiNextEndIndex = auiEndArray[uiThisLevel + 1]; ++ bNextLast = IMG_TRUE; ++ } ++ else ++ { ++ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; ++ bNextLast = IMG_FALSE; ++ } ++ ++ /* Recurse into the next level */ ++ (*pui32CurrentLevel)++; ++ if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray, ++ auiEndArray, auiEntriesPerPxArray, ++ apsConfig, aeMMULevel, pui32CurrentLevel, ++ uiNextStartIndex, uiNextEndIndex, ++ bNextFirst, bNextLast, uiLog2DataPageSize)) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Un-wire the entry */ ++ eError = _SetupPxE(psMMUContext, ++ psLevel, ++ i, ++ psConfig, ++ aeMMULevel[uiThisLevel], ++ NULL, ++#if defined(PDUMP) ++ NULL, /* Only required for data page */ ++ NULL, /* Only required for data page */ ++ 0, /* Only required for data page */ ++#endif ++ MMU_PROTFLAGS_INVALID, ++ uiLog2DataPageSize); ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Free table of the level below, pointed to by this table entry. ++ * We don't destroy the table inside the above _MMU_FreeLevel call because we ++ * first have to set the table entry of the level above to invalid. */ ++ _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]); ++ OSFreeMem(psNextLevel); ++ ++ /* The level below us is empty, drop the refcount and clear the pointer */ ++ psLevel->ui32RefCount--; ++ psLevel->apsNextLevel[i] = NULL; ++ ++ /* Check we haven't wrapped around */ ++ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); ++ } ++ (*pui32CurrentLevel)--; ++ } ++ else ++ { ++ psLevel->ui32RefCount--; ++ } ++ ++ /* ++ Free this level if it is no longer referenced, unless it's the base ++ level in which case it's part of the MMU context and should be freed ++ when the MMU context is freed ++ */ ++ if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo)) ++ { ++ bFreed = IMG_TRUE; ++ } ++ } ++ ++ /* Level one flushing is done when we actually write the table entries */ ++ if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL)) ++ { ++ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); ++ } ++ ++ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d", ++ aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1)); ++ ++ return bFreed; ++} ++ ++/*************************************************************************/ /*! ++@Function _MMU_AllocLevel ++ ++@Description Recursively allocates the specified range of Px entries. If any ++ level has its last reference dropped then the MMU object ++ memory and the MMU_Levelx_Info will be freed. ++ ++ At each level we might be crossing a boundary from one Px to ++ another. The values for auiStartArray should be by used for ++ the first call into each level and the values in auiEndArray ++ should only be used in the last call for each level. ++ In order to determine if this is the first/last call we pass ++ in bFirst and bLast. ++ When one level calls down to the next only if bFirst/bLast is set ++ and it's the first/last iteration of the loop at its level will ++ bFirst/bLast set for the next recursion. ++ This means that each iteration has the knowledge of the previous ++ level which is required. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input psLevel Level info on which to free the ++ specified range ++ ++@Input auiStartArray Array of start indexes (one for each level) ++ ++@Input auiEndArray Array of end indexes (one for each level) ++ ++@Input auiEntriesPerPxArray Array of number of entries for the Px ++ (one for each level) ++ ++@Input apsConfig Array of PxE configs (one for each level) ++ ++@Input aeMMULevel Array of MMU levels (one for each level) ++ ++@Input pui32CurrentLevel Pointer to a variable which is set to our ++ current level ++ ++@Input uiStartIndex Start index of the range to free ++ ++@Input uiEndIndex End index of the range to free ++ ++@Input bFirst This is the first call for this level ++ ++@Input bLast This is the last call for this level ++ ++@Return IMG_TRUE if the last reference to psLevel was dropped ++ */ ++/*****************************************************************************/ ++static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext, ++ MMU_Levelx_INFO *psLevel, ++ IMG_UINT32 auiStartArray[], ++ IMG_UINT32 auiEndArray[], ++ IMG_UINT32 auiEntriesPerPxArray[], ++ const MMU_PxE_CONFIG *apsConfig[], ++ MMU_LEVEL aeMMULevel[], ++ IMG_UINT32 *pui32CurrentLevel, ++ IMG_UINT32 uiStartIndex, ++ IMG_UINT32 uiEndIndex, ++ IMG_BOOL bFirst, ++ IMG_BOOL bLast, ++ IMG_UINT32 uiLog2DataPageSize) ++{ ++ IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */ ++ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */ ++ PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */ ++ IMG_UINT32 i; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++ /* Parameter check */ ++ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL); ++ ++ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d", ++ aeMMULevel[uiThisLevel], uiStartIndex, ++ uiEndIndex, psLevel->ui32RefCount)); ++ ++ /* Go from uiStartIndex to uiEndIndex through the Px */ ++ for (i = uiStartIndex;i < uiEndIndex;i++) ++ { ++ /* Only try an allocation if this is not the last level */ ++ /*Because a PT allocation is already done while setting the entry in PD */ ++ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) ++ { ++ IMG_UINT32 uiNextStartIndex; ++ IMG_UINT32 uiNextEndIndex; ++ IMG_BOOL bNextFirst; ++ IMG_BOOL bNextLast; ++ ++ /* If there is already a next Px level existing, do not allocate it */ ++ if (!psLevel->apsNextLevel[i]) ++ { ++ MMU_Levelx_INFO *psNextLevel; ++ IMG_UINT32 ui32AllocSize; ++ IMG_UINT32 uiNextEntries; ++ ++ /* Allocate and setup the next level */ ++ uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1]; ++ ui32AllocSize = sizeof(MMU_Levelx_INFO); ++ if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1) ++ { ++ ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1); ++ } ++ psNextLevel = OSAllocZMem(ui32AllocSize); ++ if (psNextLevel == NULL) ++ { ++ uiAllocState = 0; ++ goto e0; ++ } ++ ++ /* Hook in this level for next time */ ++ psLevel->apsNextLevel[i] = psNextLevel; ++ ++ psNextLevel->ui32NumOfEntries = uiNextEntries; ++ psNextLevel->ui32RefCount = 0; ++ /* Allocate Px memory for a sub level*/ ++ eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1], ++ aeMMULevel[uiThisLevel + 1], ++ &psNextLevel->sMemDesc, ++ psConfig->uiAddrLog2Align); ++ if (eError != PVRSRV_OK) ++ { ++ uiAllocState = 1; ++ goto e0; ++ } ++ ++ /* Wire up the entry */ ++ eError = _SetupPxE(psMMUContext, ++ psLevel, ++ i, ++ psConfig, ++ aeMMULevel[uiThisLevel], ++ &psNextLevel->sMemDesc.sDevPAddr, ++#if defined(PDUMP) ++ NULL, /* Only required for data page */ ++ NULL, /* Only required for data page */ ++ 0, /* Only required for data page */ ++#endif ++ 0, ++ uiLog2DataPageSize); ++ ++ if (eError != PVRSRV_OK) ++ { ++ uiAllocState = 2; ++ goto e0; ++ } ++ ++ psLevel->ui32RefCount++; ++ } ++ ++ /* If we're crossing a Px then the start index changes */ ++ if (bFirst && (i == uiStartIndex)) ++ { ++ uiNextStartIndex = auiStartArray[uiThisLevel + 1]; ++ bNextFirst = IMG_TRUE; ++ } ++ else ++ { ++ uiNextStartIndex = 0; ++ bNextFirst = IMG_FALSE; ++ } ++ ++ /* If we're crossing a Px then the end index changes */ ++ if (bLast && (i == (uiEndIndex - 1))) ++ { ++ uiNextEndIndex = auiEndArray[uiThisLevel + 1]; ++ bNextLast = IMG_TRUE; ++ } ++ else ++ { ++ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; ++ bNextLast = IMG_FALSE; ++ } ++ ++ /* Recurse into the next level */ ++ (*pui32CurrentLevel)++; ++ eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i], ++ auiStartArray, ++ auiEndArray, ++ auiEntriesPerPxArray, ++ apsConfig, ++ aeMMULevel, ++ pui32CurrentLevel, ++ uiNextStartIndex, ++ uiNextEndIndex, ++ bNextFirst, ++ bNextLast, ++ uiLog2DataPageSize); ++ (*pui32CurrentLevel)--; ++ if (eError != PVRSRV_OK) ++ { ++ uiAllocState = 2; ++ goto e0; ++ } ++ } ++ else ++ { ++ /* All we need to do for level 1 is bump the refcount */ ++ psLevel->ui32RefCount++; ++ } ++ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); ++ } ++ ++ /* Level one flushing is done when we actually write the table entries */ ++ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) ++ { ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ } ++ ++ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d", ++ aeMMULevel[uiThisLevel], psLevel->ui32RefCount)); ++ return PVRSRV_OK; ++ ++e0: ++ /* Confirm that we've not come down this route unexpectedly */ ++ PVR_ASSERT(uiAllocState!=99); ++ PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d" ++ ,eError, aeMMULevel[uiThisLevel], uiAllocState)); ++ ++ /* The start value of index variable i is not initialised on purpose. ++ * This clean-up loop deinitialises what was already initialised in ++ * reverse order, so the i index already has the correct value. ++ */ ++ for (/* i already set */; i>= uiStartIndex && i< uiEndIndex; i--) ++ { ++ switch (uiAllocState) ++ { ++ IMG_UINT32 uiNextStartIndex; ++ IMG_UINT32 uiNextEndIndex; ++ IMG_BOOL bNextFirst; ++ IMG_BOOL bNextLast; ++ ++ case 3: ++ /* If we're crossing a Px then the start index changes */ ++ if (bFirst && (i == uiStartIndex)) ++ { ++ uiNextStartIndex = auiStartArray[uiThisLevel + 1]; ++ bNextFirst = IMG_TRUE; ++ } ++ else ++ { ++ uiNextStartIndex = 0; ++ bNextFirst = IMG_FALSE; ++ } ++ ++ /* If we're crossing a Px then the end index changes */ ++ if (bLast && (i == (uiEndIndex - 1))) ++ { ++ uiNextEndIndex = auiEndArray[uiThisLevel + 1]; ++ bNextLast = IMG_TRUE; ++ } ++ else ++ { ++ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1]; ++ bNextLast = IMG_FALSE; ++ } ++ ++ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1) ++ { ++ (*pui32CurrentLevel)++; ++ if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i], ++ auiStartArray, auiEndArray, ++ auiEntriesPerPxArray, apsConfig, ++ aeMMULevel, pui32CurrentLevel, ++ uiNextStartIndex, uiNextEndIndex, ++ bNextFirst, bNextLast, uiLog2DataPageSize)) ++ { ++ psLevel->ui32RefCount--; ++ psLevel->apsNextLevel[i] = NULL; ++ ++ /* Check we haven't wrapped around */ ++ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); ++ } ++ (*pui32CurrentLevel)--; ++ } ++ else ++ { ++ /* We should never come down this path, but it's here ++ for completeness */ ++ psLevel->ui32RefCount--; ++ ++ /* Check we haven't wrapped around */ ++ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); ++ } ++ ++ __fallthrough; ++ case 2: ++ if (psLevel->apsNextLevel[i] != NULL && ++ psLevel->apsNextLevel[i]->ui32RefCount == 0) ++ { ++ _PxMemFree(psMMUContext, &psLevel->sMemDesc, ++ aeMMULevel[uiThisLevel]); ++ } ++ ++ __fallthrough; ++ case 1: ++ if (psLevel->apsNextLevel[i] != NULL && ++ psLevel->apsNextLevel[i]->ui32RefCount == 0) ++ { ++ OSFreeMem(psLevel->apsNextLevel[i]); ++ psLevel->apsNextLevel[i] = NULL; ++ } ++ ++ __fallthrough; ++ case 0: ++ uiAllocState = 3; ++ break; ++ } ++ } ++ return eError; ++} ++ ++/***************************************************************************** ++ * MMU page table functions * ++ *****************************************************************************/ ++ ++/*************************************************************************/ /*! ++@Function _MMU_GetLevelData ++ ++@Description Get the all the level data and calculates the indexes for the ++ specified address range ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddrStart Start device virtual address ++ ++@Input sDevVAddrEnd End device virtual address ++ ++@Input uiLog2DataPageSize Log2 of the page size to use ++ ++@Input auiStartArray Array of start indexes (one for each level) ++ ++@Input auiEndArray Array of end indexes (one for each level) ++ ++@Input uiEntriesPerPxArray Array of number of entries for the Px ++ (one for each level) ++ ++@Input apsConfig Array of PxE configs (one for each level) ++ ++@Input aeMMULevel Array of MMU levels (one for each level) ++ ++@Input ppsMMUDevVAddrConfig Device virtual address config ++ ++@Input phPriv Private data of page size config ++ ++@Return IMG_TRUE if the last reference to psLevel was dropped ++ */ ++/*****************************************************************************/ ++static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddrStart, ++ IMG_DEV_VIRTADDR sDevVAddrEnd, ++ IMG_UINT32 uiLog2DataPageSize, ++ IMG_UINT32 auiStartArray[], ++ IMG_UINT32 auiEndArray[], ++ IMG_UINT32 auiEntriesPerPx[], ++ const MMU_PxE_CONFIG *apsConfig[], ++ MMU_LEVEL aeMMULevel[], ++ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, ++ IMG_HANDLE *phPriv) ++{ ++ const MMU_PxE_CONFIG *psMMUPDEConfig; ++ const MMU_PxE_CONFIG *psMMUPTEConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i = 0; ++ ++ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, ++ &psMMUPDEConfig, ++ &psMMUPTEConfig, ++ ppsMMUDevVAddrConfig, ++ phPriv); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ psDevVAddrConfig = *ppsMMUDevVAddrConfig; ++ ++ if (psDevVAddrConfig->uiPCIndexMask != 0) ++ { ++ auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); ++ auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); ++ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC; ++ apsConfig[i] = psDevAttrs->psBaseConfig; ++ aeMMULevel[i] = MMU_LEVEL_3; ++ i++; ++ } ++ ++ if (psDevVAddrConfig->uiPDIndexMask != 0) ++ { ++ auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); ++ auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); ++ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD; ++ if (i == 0) ++ { ++ apsConfig[i] = psDevAttrs->psBaseConfig; ++ } ++ else ++ { ++ apsConfig[i] = psMMUPDEConfig; ++ } ++ aeMMULevel[i] = MMU_LEVEL_2; ++ i++; ++ } ++ ++ /* ++ There is always a PTE entry so we have a slightly different behaviour than above. ++ E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there ++ is a PT with one entry. ++ ++ */ ++ auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE); ++ if (psDevVAddrConfig->uiPTIndexMask !=0) ++ { ++ auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE); ++ } ++ else ++ { ++ /* ++ If the PTE mask is zero it means there is only 1 PTE and thus, as an ++ an exclusive bound, the end array index is equal to the start index + 1. ++ */ ++ ++ auiEndArray[i] = auiStartArray[i] + 1; ++ } ++ ++ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT; ++ ++ if (i == 0) ++ { ++ apsConfig[i] = psDevAttrs->psBaseConfig; ++ } ++ else ++ { ++ apsConfig[i] = psMMUPTEConfig; ++ } ++ aeMMULevel[i] = MMU_LEVEL_1; ++} ++ ++static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv) ++{ ++ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; ++ ++ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); ++} ++ ++/*************************************************************************/ /*! ++@Function _AllocPageTables ++ ++@Description Allocate page tables and any higher level MMU objects required ++ for the specified virtual range ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddrStart Start device virtual address ++ ++@Input sDevVAddrEnd End device virtual address ++ ++@Input uiLog2DataPageSize Page size of the data pages ++ ++@Return PVRSRV_OK if the allocation was successful ++ */ ++/*****************************************************************************/ ++static PVRSRV_ERROR ++_AllocPageTables(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddrStart, ++ IMG_DEV_VIRTADDR sDevVAddrEnd, ++ IMG_UINT32 uiLog2DataPageSize) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; ++ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; ++ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; ++ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; ++ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ IMG_HANDLE hPriv; ++ IMG_UINT32 ui32CurrentLevel = 0; ++ ++ PVR_DPF((PVR_DBG_ALLOC, ++ "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, ++ sDevVAddrStart.uiAddr, ++ sDevVAddrEnd.uiAddr ++ )); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, ++ "Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: " ++ IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, ++ (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr, ++ (IMG_UINT64)sDevVAddrStart.uiAddr, ++ (IMG_UINT64)sDevVAddrEnd.uiAddr); ++#endif ++ ++ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, ++ (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray, ++ auiEntriesPerPx, apsConfig, aeMMULevel, ++ &psDevVAddrConfig, &hPriv); ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC, ++ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), ++ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); ++ ++ eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, ++ auiStartArray, auiEndArray, auiEntriesPerPx, ++ apsConfig, aeMMULevel, &ui32CurrentLevel, ++ auiStartArray[0], auiEndArray[0], ++ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); ++ ++ _MMU_PutLevelData(psMMUContext, hPriv); ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function _FreePageTables ++ ++@Description Free page tables and any higher level MMU objects at are no ++ longer referenced for the specified virtual range. ++ This will fill the temporary free list of the MMU context which ++ needs cleanup after the call. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddrStart Start device virtual address ++ ++@Input sDevVAddrEnd End device virtual address ++ ++@Input uiLog2DataPageSize Page size of the data pages ++ ++@Return None ++ */ ++/*****************************************************************************/ ++static void _FreePageTables(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddrStart, ++ IMG_DEV_VIRTADDR sDevVAddrEnd, ++ IMG_UINT32 uiLog2DataPageSize) ++{ ++ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL]; ++ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL]; ++ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL]; ++ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL]; ++ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL]; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ IMG_UINT32 ui32CurrentLevel = 0; ++ IMG_HANDLE hPriv; ++ ++ PVR_DPF((PVR_DBG_ALLOC, ++ "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC, ++ sDevVAddrStart.uiAddr, ++ sDevVAddrEnd.uiAddr ++ )); ++ ++ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd, ++ uiLog2DataPageSize, auiStartArray, auiEndArray, ++ auiEntriesPerPx, apsConfig, aeMMULevel, ++ &psDevVAddrConfig, &hPriv); ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE, ++ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr), ++ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr)); ++ ++ /* ignoring return code, in this case there should be no references ++ * to the level anymore, and at this stage there is nothing to do with ++ * the return status */ ++ (void) _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo, ++ auiStartArray, auiEndArray, auiEntriesPerPx, ++ apsConfig, aeMMULevel, &ui32CurrentLevel, ++ auiStartArray[0], auiEndArray[0], ++ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize); ++ ++ _MMU_PutLevelData(psMMUContext, hPriv); ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _MMU_GetPTInfo ++ ++@Description Get the PT level information and PT entry index for the specified ++ virtual address ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input psDevVAddr Device virtual address to get the PTE info ++ from. ++ ++@Input psDevVAddrConfig The current virtual address config obtained ++ by another function call before. ++ ++@Output psLevel Level info of the PT ++ ++@Output pui32PTEIndex Index into the PT the address corresponds to ++ ++@Return None ++ */ ++/*****************************************************************************/ ++static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, ++ MMU_Levelx_INFO **ppsLevel, ++ IMG_UINT32 *pui32PTEIndex) ++{ ++ MMU_Levelx_INFO *psLocalLevel = NULL; ++ MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel; ++ IMG_UINT32 uiPCEIndex; ++ IMG_UINT32 uiPDEIndex; ++ ++ if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level")); ++ PVR_ASSERT(0); ++ } ++ ++ for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) ++ { ++ if (eMMULevel == MMU_LEVEL_3) ++ { ++ /* find the page directory containing the PCE */ ++ uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig, ++ IMG_FALSE); ++ psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex]; ++ } ++ ++ if (eMMULevel == MMU_LEVEL_2) ++ { ++ /* find the page table containing the PDE */ ++ uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig, ++ IMG_FALSE); ++ if (psLocalLevel != NULL) ++ { ++ psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex]; ++ } ++ else ++ { ++ psLocalLevel = ++ psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex]; ++ } ++ } ++ ++ if (eMMULevel == MMU_LEVEL_1) ++ { ++ /* find PTE index into page table */ ++ *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig, ++ IMG_FALSE); ++ if (psLocalLevel == NULL) ++ { ++ psLocalLevel = &psMMUContext->sBaseLevelInfo; ++ } ++ } ++ } ++ *ppsLevel = psLocalLevel; ++} ++ ++/*************************************************************************/ /*! ++@Function _MMU_GetPTConfig ++ ++@Description Get the level config. Call _MMU_PutPTConfig after use! ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input uiLog2DataPageSize Log 2 of the page size ++ ++@Output ppsConfig Config of the PTE ++ ++@Output phPriv Private data handle to be passed back ++ when the info is put ++ ++@Output ppsDevVAddrConfig Config of the device virtual addresses ++ ++@Return None ++ */ ++/*****************************************************************************/ ++static INLINE void _MMU_GetPTConfig(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 uiLog2DataPageSize, ++ const MMU_PxE_CONFIG **ppsConfig, ++ IMG_HANDLE *phPriv, ++ const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig) ++{ ++ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ const MMU_PxE_CONFIG *psPDEConfig; ++ const MMU_PxE_CONFIG *psPTEConfig; ++ ++ if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize, ++ &psPDEConfig, ++ &psPTEConfig, ++ &psDevVAddrConfig, ++ phPriv) != PVRSRV_OK) ++ { ++ /* ++ There should be no way we got here unless uiLog2DataPageSize ++ has changed after the MMU_Alloc call (in which case it's a bug in ++ the MM code) ++ */ ++ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config")); ++ PVR_ASSERT(0); ++ } ++ ++ *ppsConfig = psPTEConfig; ++ *ppsDevVAddrConfig = psDevVAddrConfig; ++} ++ ++/*************************************************************************/ /*! ++@Function _MMU_PutPTConfig ++ ++@Description Put the level info. Has to be called after _MMU_GetPTConfig to ++ ensure correct refcounting. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input phPriv Private data handle created by ++ _MMU_GetPTConfig. ++ ++@Return None ++ */ ++/*****************************************************************************/ ++static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext, ++ IMG_HANDLE hPriv) ++{ ++ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; ++ ++ if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Could not put page size config", ++ __func__)); ++ PVR_ASSERT(0); ++ } ++} ++ ++ ++/***************************************************************************** ++ * Public interface functions * ++ *****************************************************************************/ ++ ++/* ++ MMU_ContextCreate ++ */ ++PVRSRV_ERROR ++MMU_ContextCreate(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ MMU_CONTEXT **ppsMMUContext, ++ MMU_DEVICEATTRIBS *psDevAttrs) ++{ ++ MMU_CONTEXT *psMMUContext; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ const MMU_PxE_CONFIG *psConfig; ++ MMU_PHYSMEM_CONTEXT *psPhysMemCtx; ++ IMG_UINT32 ui32BaseObjects; ++ IMG_UINT32 ui32Size; ++ IMG_CHAR sBuf[40]; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, "MMU context create"); ++#endif ++ ++ psConfig = psDevAttrs->psBaseConfig; ++ psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig; ++ ++ switch (psDevAttrs->eTopLevel) ++ { ++ case MMU_LEVEL_3: ++ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC; ++ break; ++ ++ case MMU_LEVEL_2: ++ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD; ++ break; ++ ++ case MMU_LEVEL_1: ++ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT; ++ break; ++ ++ default: ++ PVR_LOG_GOTO_WITH_ERROR("psDevAttrs->eTopLevel", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ ++ /* Allocate the MMU context with the Level 1 Px info's */ ++ ui32Size = sizeof(MMU_CONTEXT) + ++ ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *)); ++ ++ psMMUContext = OSAllocZMem(ui32Size); ++ PVR_LOG_GOTO_IF_NOMEM(psMMUContext, eError, e0); ++ ++#if defined(PDUMP) ++ /* Clear the refcount */ ++ psMMUContext->ui32PDumpContextIDRefCount = 0; ++#endif ++ /* Record Device specific attributes in the context for subsequent use */ ++ psMMUContext->psDevAttrs = psDevAttrs; ++ ++ /* ++ Allocate physmem context and set it up ++ */ ++ psPhysMemCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT)); ++ PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx, eError, e1); ++ ++ psMMUContext->psPhysMemCtx = psPhysMemCtx; ++ psMMUContext->psConnection = psConnection; ++ ++ psPhysMemCtx->psDevNode = psDevNode; /* Needed for Direct Bridge case */ ++ psPhysMemCtx->psMMUContext = psMMUContext; /* Back-link to self */ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* Save the app-specific values for external reference via MMU_GetOSids. */ ++ if (psConnection != NULL) ++ { ++ psPhysMemCtx->ui32OSid = psConnection->ui32OSid; ++ psPhysMemCtx->ui32OSidReg = psConnection->ui32OSidReg; ++ psPhysMemCtx->bOSidAxiProt = psConnection->bOSidAxiProtReg; ++ } ++ else ++ { ++ /* Direct Bridge calling sequence e.g. Firmware */ ++ psPhysMemCtx->ui32OSid = 0; ++ psPhysMemCtx->ui32OSidReg = 0; ++ psPhysMemCtx->bOSidAxiProt = IMG_FALSE; ++ } ++#endif ++ ++ OSSNPrintf(sBuf, sizeof(sBuf), "pgtables %p", psPhysMemCtx); ++ psPhysMemCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1; ++ psPhysMemCtx->pszPhysMemRAName = OSAllocMem(psPhysMemCtx->uiPhysMemRANameAllocSize); ++ PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->pszPhysMemRAName, eError, e2); ++ ++ OSStringLCopy(psPhysMemCtx->pszPhysMemRAName, sBuf, psPhysMemCtx->uiPhysMemRANameAllocSize); ++ ++ psPhysMemCtx->psPhysMemRA = RA_Create(psPhysMemCtx->pszPhysMemRAName, ++ /* subsequent import */ ++ PhysHeapGetPageShift(psDevNode->psMMUPhysHeap), ++ RA_LOCKCLASS_1, ++ _MMU_PhysMem_RAImportAlloc, ++ _MMU_PhysMem_RAImportFree, ++ psPhysMemCtx, /* priv */ ++ RA_POLICY_DEFAULT); ++ if (psPhysMemCtx->psPhysMemRA == NULL) ++ { ++ OSFreeMem(psPhysMemCtx->pszPhysMemRAName); ++ psPhysMemCtx->pszPhysMemRAName = NULL; ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, e3); ++ } ++ ++ /* Setup cleanup meta data to check if a MMU context ++ * has been destroyed and should not be accessed anymore */ ++ psPhysMemCtx->psCleanupData = OSAllocMem(sizeof(*(psPhysMemCtx->psCleanupData))); ++ PVR_LOG_GOTO_IF_NOMEM(psPhysMemCtx->psCleanupData, eError, e4); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* Record the originating OSid for all allocation / free for this context */ ++ psPhysMemCtx->psCleanupData->ui32OSid = psPhysMemCtx->ui32OSid; ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++ OSLockCreate(&psPhysMemCtx->psCleanupData->hCleanupLock); ++ psPhysMemCtx->psCleanupData->bMMUContextExists = IMG_TRUE; ++ dllist_init(&psPhysMemCtx->psCleanupData->sMMUCtxCleanupItemsHead); ++ OSAtomicWrite(&psPhysMemCtx->psCleanupData->iRef, 1); ++ ++ /* allocate the base level object */ ++ /* ++ Note: Although this is not required by the this file until ++ the 1st allocation is made, a device specific callback ++ might request the base object address so we allocate ++ it up front. ++ */ ++ if (_PxMemAlloc(psMMUContext, ++ ui32BaseObjects, ++ psConfig, ++ psDevAttrs->eTopLevel, ++ &psMMUContext->sBaseLevelInfo.sMemDesc, ++ psDevAttrs->ui32BaseAlign)) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("_PxMemAlloc", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e5); ++ } ++ ++ dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); ++ ++ psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects; ++ psMMUContext->sBaseLevelInfo.ui32RefCount = 0; ++ ++ eError = OSLockCreate(&psMMUContext->hLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e6); ++ ++ /* return context */ ++ *ppsMMUContext = psMMUContext; ++ ++ return PVRSRV_OK; ++ ++e6: ++ _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel); ++e5: ++ OSFreeMem(psPhysMemCtx->psCleanupData); ++e4: ++ RA_Delete(psPhysMemCtx->psPhysMemRA); ++e3: ++ OSFreeMem(psPhysMemCtx->pszPhysMemRAName); ++e2: ++ OSFreeMem(psPhysMemCtx); ++e1: ++ OSFreeMem(psMMUContext); ++e0: ++ return eError; ++} ++ ++/* ++ MMU_ContextDestroy ++ */ ++void ++MMU_ContextDestroy (MMU_CONTEXT *psMMUContext) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PDLLIST_NODE psNode, psNextNode; ++ ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psMMUContext->psPhysMemCtx->psDevNode; ++ MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Enter", __func__)); ++ ++ if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) ++ { ++ /* There should be no way to get here with live pages unless ++ there is a bug in this module or the MM code */ ++ PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0); ++ } ++ ++ /* Cleanup lock must be acquired before MMUContext lock. Reverse order ++ * may lead to a deadlock and is reported by lockdep. */ ++ OSLockAcquire(psCleanupData->hCleanupLock); ++ OSLockAcquire(psMMUContext->hLock); ++ ++ /* Free the top level MMU object - will be put on defer free list. ++ * This has to be done before the step below that will empty the ++ * defer-free list. */ ++ _PxMemFree(psMMUContext, ++ &psMMUContext->sBaseLevelInfo.sMemDesc, ++ psMMUContext->psDevAttrs->eTopLevel); ++ ++ /* Empty the temporary defer-free list of Px */ ++ _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead); ++ PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead)); ++ ++ /* Empty the defer free list so the cleanup thread will ++ * not have to access any MMU context related structures anymore */ ++ dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead, ++ psNode, ++ psNextNode) ++ { ++ MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode, ++ MMU_CLEANUP_ITEM, ++ sMMUCtxCleanupItem); ++ ++ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead); ++ ++ dllist_remove_node(psNode); ++ } ++ PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead)); ++ ++ psCleanupData->bMMUContextExists = IMG_FALSE; ++ ++ /* Free physmem context */ ++ RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA); ++ psMMUContext->psPhysMemCtx->psPhysMemRA = NULL; ++ OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName); ++ psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL; ++ ++ OSFreeMem(psMMUContext->psPhysMemCtx); ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ OSLockRelease(psCleanupData->hCleanupLock); ++ ++ if (OSAtomicDecrement(&psCleanupData->iRef) == 0) ++ { ++ OSLockDestroy(psCleanupData->hCleanupLock); ++ OSFreeMem(psCleanupData); ++ } ++ ++ OSLockDestroy(psMMUContext->hLock); ++ ++ /* free the context itself. */ ++ OSFreeMem(psMMUContext); ++ /*not nulling pointer, copy on stack*/ ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Exit", __func__)); ++} ++ ++/* ++ MMU_Alloc ++ */ ++PVRSRV_ERROR ++MMU_Alloc (MMU_CONTEXT *psMMUContext, ++ IMG_DEVMEM_SIZE_T uSize, ++ IMG_DEVMEM_SIZE_T *puActualSize, ++ IMG_UINT32 uiProtFlags, ++ IMG_DEVMEM_SIZE_T uDevVAddrAlignment, ++ IMG_DEV_VIRTADDR *psDevVAddr, ++ IMG_UINT32 uiLog2PageSize) ++{ ++ PVRSRV_ERROR eError; ++ IMG_DEV_VIRTADDR sDevVAddrEnd; ++ ++ const MMU_PxE_CONFIG *psPDEConfig; ++ const MMU_PxE_CONFIG *psPTEConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ ++ MMU_DEVICEATTRIBS *psDevAttrs; ++ IMG_HANDLE hPriv; ++ ++#if !defined(DEBUG) ++ PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment); ++#endif ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: uSize=" IMG_DEVMEM_SIZE_FMTSPEC ++ ", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC, ++ __func__, uSize, uiProtFlags, uDevVAddrAlignment)); ++ ++ /* check params */ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psMMUContext, "psMMUContext"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevVAddr, "psDevVAddr"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(puActualSize, "puActualSize"); ++ ++ psDevAttrs = psMMUContext->psDevAttrs; ++ ++ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize, ++ &psPDEConfig, ++ &psPTEConfig, ++ &psDevVAddrConfig, ++ &hPriv); ++ PVR_LOG_RETURN_IF_ERROR(eError, "pfnGetPageSizeConfiguration"); ++ ++ /* size and alignment must be datapage granular */ ++ if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0) ++ || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: invalid address or size granularity", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ sDevVAddrEnd = *psDevVAddr; ++ sDevVAddrEnd.uiAddr += uSize; ++ ++ OSLockAcquire(psMMUContext->hLock); ++ eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize); ++ OSLockRelease(psMMUContext->hLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "_AllocPageTables"); ++ return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES; ++ } ++ ++ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ MMU_Free ++ */ ++void ++MMU_Free (MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiLog2DataPageSize) ++{ ++ IMG_DEV_VIRTADDR sDevVAddrEnd; ++ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ IMG_UINT32 ui32MMULeakMax = psPVRSRVData->sMemLeakIntervals.ui32MMU; ++ ++ mutex_lock(&g_sMMULeakMutex); ++ ++ g_ui32MMULeakCounter++; ++ if (ui32MMULeakMax && g_ui32MMULeakCounter >= ui32MMULeakMax) ++ { ++ g_ui32MMULeakCounter = 0; ++ mutex_unlock(&g_sMMULeakMutex); ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Skipped MMU free for address 0x%016" IMG_UINT64_FMTSPECx " to trigger memory leak.", ++ __func__, ++ sDevVAddr.uiAddr)); ++ return; ++ } ++ ++ mutex_unlock(&g_sMMULeakMutex); ++#endif ++ ++ PVR_ASSERT(psMMUContext != NULL); ++ PVR_LOG_RETURN_VOID_IF_FALSE(psMMUContext != NULL, "psMMUContext"); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC, ++ __func__, sDevVAddr.uiAddr)); ++ ++ /* ensure the address range to free is inside the heap */ ++ sDevVAddrEnd = sDevVAddr; ++ sDevVAddrEnd.uiAddr += uiSize; ++ ++ /* The Cleanup lock has to be taken before the MMUContext hLock to ++ * prevent deadlock scenarios. It is necessary only for parts of ++ * _SetupCleanup_FreeMMUMapping though.*/ ++ OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ _FreePageTables(psMMUContext, ++ sDevVAddr, ++ sDevVAddrEnd, ++ uiLog2DataPageSize); ++ ++ _SetupCleanup_FreeMMUMapping(psMMUContext->psPhysMemCtx); ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock); ++ ++ return; ++} ++ ++PVRSRV_ERROR ++MMU_MapPages(MMU_CONTEXT *psMMUContext, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ PMR *psPMR, ++ IMG_UINT32 ui32PhysPgOffset, ++ IMG_UINT32 ui32MapPageCount, ++ IMG_UINT32 *paui32MapIndices, ++ IMG_UINT32 uiLog2HeapPageSize) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hPriv; ++ ++ MMU_Levelx_INFO *psLevel = NULL; ++ ++ MMU_Levelx_INFO *psPrevLevel = NULL; ++ ++ IMG_UINT32 uiPTEIndex = 0; ++ IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize); ++ IMG_UINT32 uiLoop = 0; ++ IMG_UINT32 ui32MappedCount = 0; ++ IMG_DEVMEM_OFFSET_T uiPgOffset = 0; ++ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; ++ ++ IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0, uiDefProtFlags=0; ++ IMG_UINT64 uiDummyProtFlags = 0; ++ MMU_PROTFLAGS_T uiMMUProtFlags = 0; ++ ++ const MMU_PxE_CONFIG *psConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ ++ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; ++ ++ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_DEV_PHYADDR *psDevPAddr; ++ IMG_DEV_PHYADDR sDevPAddr; ++ IMG_BOOL *pbValid; ++ IMG_BOOL bValid; ++ IMG_BOOL bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; ++ IMG_BOOL bNeedBacking = IMG_FALSE; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++#if defined(PDUMP) ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; ++ ++ PDUMPCOMMENT(psMMUContext->psPhysMemCtx->psDevNode, "Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", ++ (IMG_UINT64)(ui32MapPageCount * uiPageSize)); ++#endif /*PDUMP*/ ++ ++ /* Validate the most essential parameters */ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psMMUContext, eError, e0); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psPMR, eError, e0); ++ ++ psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++ /* Allocate memory for page-frame-numbers and validity states, ++ N.B. assert could be triggered by an illegal uiSizeBytes */ ++ if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR)); ++ PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, e0); ++ ++ pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL)); ++ if (pbValid == NULL) ++ { ++ /* Should allocation fail, clean-up here before exit */ ++ OSFreeMem(psDevPAddr); ++ PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, e0); ++ } ++ } ++ else ++ { ++ psDevPAddr = asDevPAddr; ++ pbValid = abValid; ++ } ++ ++ /* Get the Device physical addresses of the pages we are trying to map ++ * In the case of non indexed mapping we can get all addresses at once */ ++ if (NULL == paui32MapIndices) ++ { ++ eError = PMR_DevPhysAddr(psPMR, ++ uiLog2HeapPageSize, ++ ui32MapPageCount, ++ ((IMG_DEVMEM_OFFSET_T) ui32PhysPgOffset << uiLog2HeapPageSize), ++ psDevPAddr, ++ pbValid); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ } ++ ++ /*Get the Page table level configuration */ ++ _MMU_GetPTConfig(psMMUContext, ++ (IMG_UINT32) uiLog2HeapPageSize, ++ &psConfig, ++ &hPriv, ++ &psDevVAddrConfig); ++ ++ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, ++ uiMappingFlags, ++ &uiMMUProtFlags, ++ psMMUContext); ++ PVR_GOTO_IF_ERROR(eError, e2); ++ ++ /* Callback to get device specific protection flags */ ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); ++ uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; ++ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE), ++ uiLog2HeapPageSize); ++ } ++ else if (psConfig->uiBytesPerEntry == 4) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); ++ uiMMUProtFlags |= MMU_PROTFLAGS_READABLE; ++ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4((uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE)); ++ } ++ else ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_INVALID_PARAMS, e2); ++ } ++ uiDummyProtFlags = uiProtFlags; ++ ++ if (PMR_IsSparse(psPMR)) ++ { ++ /* We know there will not be 4G number of PMR's */ ++ bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(PMR_Flags(psPMR)); ++ if (bDummyBacking) ++ { ++ bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(PMR_Flags(psPMR)); ++ } ++ ++ if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiMappingFlags)) ++ { ++ /* Obtain non-coherent protection flags as we cannot have multiple coherent ++ virtual pages pointing to the same physical page so all dummy page ++ mappings have to be non-coherent even in a coherent allocation */ ++ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, ++ uiMappingFlags & ~PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT, ++ &uiMMUProtFlags, ++ psMMUContext); ++ PVR_GOTO_IF_ERROR(eError, e2); ++ ++ /* Callback to get device specific protection flags */ ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); ++ } ++ else ++ { ++ /* We've already validated possible values of uiBytesPerEntry at the start of this function */ ++ PVR_ASSERT(psConfig->uiBytesPerEntry == 4); ++ uiDummyProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); ++ } ++ } ++ } ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++) ++ { ++ ++#if defined(PDUMP) ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++#endif /*PDUMP*/ ++ ++ if (NULL != paui32MapIndices) ++ { ++ uiPgOffset = paui32MapIndices[uiLoop]; ++ ++ /*Calculate the Device Virtual Address of the page */ ++ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize); ++ ++ /* Get the physical address to map */ ++ eError = PMR_DevPhysAddr(psPMR, ++ uiLog2HeapPageSize, ++ 1, ++ uiPgOffset * uiPageSize, ++ &sDevPAddr, ++ &bValid); ++ PVR_GOTO_IF_ERROR(eError, e3); ++ } ++ else ++ { ++ uiPgOffset = uiLoop + ui32PhysPgOffset; ++ sDevPAddr = psDevPAddr[uiLoop]; ++ bValid = pbValid[uiLoop]; ++ } ++ ++ uiDefProtFlags = uiProtFlags; ++ /* ++ The default value of the entry is invalid so we don't need to mark ++ it as such if the page wasn't valid, we just advance pass that address ++ */ ++ if (bValid || bDummyBacking) ++ { ++ if (!bValid) ++ { ++ if (bZeroBacking) ++ { ++ sDevPAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; ++ /* Ensure the zero back page PTE is read only */ ++ uiDefProtFlags = uiProtFlagsReadOnly; ++ } ++ else ++ { ++ sDevPAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; ++ } ++ } ++ else ++ { ++ /* check the physical alignment of the memory to map */ ++ PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0); ++ } ++ ++#if defined(DEBUG) ++ { ++ IMG_INT32 i32FeatureVal = 0; ++ IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr); ++ ++ i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); ++ do { ++ /* i32FeatureVal can be negative for cases where this feature is undefined ++ * In that situation we need to bail out than go ahead with debug comparison */ ++ if (0 > i32FeatureVal) ++ break; ++ ++ if (ui32BitLength > i32FeatureVal) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s Failed. The physical address bitlength (%d)" ++ " is greater than the chip can handle (%d).", ++ __func__, ui32BitLength, i32FeatureVal)); ++ ++ PVR_ASSERT(ui32BitLength <= i32FeatureVal); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e3; ++ } ++ } while (0); ++ } ++#endif /*DEBUG*/ ++ ++#if defined(PDUMP) ++ if (bValid) ++ { ++ eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize, ++ sizeof(aszMemspaceName), &aszMemspaceName[0], ++ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], ++ &uiSymbolicAddrOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++#endif /*PDUMP*/ ++ ++ psPrevLevel = psLevel; ++ /* Calculate PT index and get new table descriptor */ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTEIndex); ++ ++ if (psPrevLevel == psLevel) ++ { ++ /* ++ * Sparse allocations may have page offsets which ++ * decrement as well as increment, so make sure we ++ * update the range we will flush correctly. ++ */ ++ if (uiPTEIndex > uiFlushEnd) ++ uiFlushEnd = uiPTEIndex; ++ else if (uiPTEIndex < uiFlushStart) ++ uiFlushStart = uiPTEIndex; ++ } ++ else ++ { ++ /* Flush if we moved to another psLevel, i.e. page table */ ++ if (psPrevLevel != NULL) ++ { ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psPrevLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, ++ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ PVR_GOTO_IF_ERROR(eError, e3); ++ } ++ ++ uiFlushStart = uiPTEIndex; ++ uiFlushEnd = uiFlushStart; ++ } ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP, ++ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), ++ HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr)); ++ ++ /* Set the PT entry with the specified address and protection flags */ ++ eError = _SetupPTE(psMMUContext, ++ psLevel, ++ uiPTEIndex, ++ psConfig, ++ &sDevPAddr, ++ IMG_FALSE, ++#if defined(PDUMP) ++ (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName), ++ ((bValid)?aszSymbolicAddress:((bZeroBacking)?DEV_ZERO_PAGE:DUMMY_PAGE)), ++ (bValid)?uiSymbolicAddrOffset:0, ++#endif /*PDUMP*/ ++ uiDefProtFlags); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_SetupPTE", e3); ++ ++ if (bValid) ++ { ++ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); ++ PVR_DPF ((PVR_DBG_MESSAGE, ++ "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", " ++ "size=" IMG_DEVMEM_OFFSET_FMTSPEC, ++ __func__, ++ sDevVAddr.uiAddr, ++ uiPgOffset * uiPageSize)); ++ ++ ui32MappedCount++; ++ } ++ } ++ ++ sDevVAddr.uiAddr += uiPageSize; ++ } ++ ++ /* Flush the last level we touched */ ++ if (psLevel != NULL) ++ { ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ PVR_GOTO_IF_ERROR(eError, e3); ++ } ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ if (psDevPAddr != asDevPAddr) ++ { ++ OSFreeMem(pbValid); ++ OSFreeMem(psDevPAddr); ++ } ++ ++ /* Flush TLB for PTs*/ ++ psDevNode->pfnMMUCacheInvalidate(psDevNode, ++ psMMUContext, ++ MMU_LEVEL_1, ++ IMG_FALSE); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount); ++#endif /*PDUMP*/ ++ ++ return PVRSRV_OK; ++ ++e3: ++ OSLockRelease(psMMUContext->hLock); ++ ++ if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags)) ++ { ++ bNeedBacking = IMG_TRUE; ++ } ++ ++ MMU_UnmapPages(psMMUContext, ++ (bNeedBacking) ? uiMappingFlags : 0, ++ sDevVAddrBase, ++ uiLoop, ++ paui32MapIndices, ++ uiLog2HeapPageSize, ++ PMR_IsSparse(psPMR)); ++e2: ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++e1: ++ if (psDevPAddr != asDevPAddr) ++ { ++ OSFreeMem(pbValid); ++ OSFreeMem(psDevPAddr); ++ } ++e0: ++ return eError; ++} ++ ++/* ++ MMU_UnmapPages ++ */ ++void ++MMU_UnmapPages(MMU_CONTEXT *psMMUContext, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags) ++{ ++ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; ++ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; ++ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0; ++ MMU_Levelx_INFO *psLevel = NULL; ++ MMU_Levelx_INFO *psPrevLevel = NULL; ++ IMG_HANDLE hPriv; ++ const MMU_PxE_CONFIG *psConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ IMG_UINT64 uiProtFlags = 0, uiProtFlagsReadOnly = 0; ++ MMU_PROTFLAGS_T uiMMUProtFlags = 0, uiMMUReadOnlyProtFlags = 0; ++ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; ++ IMG_DEV_PHYADDR sBackingPgDevPhysAddr; ++ IMG_BOOL bUnmap = IMG_TRUE, bDummyBacking = IMG_FALSE, bZeroBacking = IMG_FALSE; ++ IMG_CHAR *pcBackingPageName = NULL; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, ++ "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, ++ ui32PageCount, ++ (IMG_UINT64)sDevVAddr.uiAddr, ++ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); ++#endif ++ bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMemAllocFlags); ++ bZeroBacking = PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiMemAllocFlags); ++ ++ if (bZeroBacking) ++ { ++ sBackingPgDevPhysAddr.uiAddr = psDevNode->sDevZeroPage.ui64PgPhysAddr; ++ pcBackingPageName = DEV_ZERO_PAGE; ++ } ++ else ++ { ++ sBackingPgDevPhysAddr.uiAddr = psDevNode->sDummyPage.ui64PgPhysAddr; ++ pcBackingPageName = DUMMY_PAGE; ++ } ++ ++ bUnmap = (uiMappingFlags)? !bDummyBacking : IMG_TRUE; ++ /* Get PT and address configs */ ++ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, ++ &psConfig, &hPriv, &psDevVAddrConfig); ++ ++ if (_MMU_ConvertDevMemFlags(bUnmap, ++ uiMappingFlags, ++ &uiMMUProtFlags, ++ psMMUContext) != PVRSRV_OK) ++ { ++ return; ++ } ++ ++ uiMMUReadOnlyProtFlags = (uiMMUProtFlags & ~MMU_PROTFLAGS_WRITEABLE) | MMU_PROTFLAGS_READABLE; ++ ++ /* Callback to get device specific protection flags */ ++ if (psConfig->uiBytesPerEntry == 4) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); ++ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUReadOnlyProtFlags); ++ } ++ else if (psConfig->uiBytesPerEntry == 8) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); ++ uiProtFlagsReadOnly = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUReadOnlyProtFlags, uiLog2PageSize); ++ } ++ ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ /* Unmap page by page */ ++ while (ui32Loop < ui32PageCount) ++ { ++ if (NULL != pai32FreeIndices) ++ { ++ /*Calculate the Device Virtual Address of the page */ ++ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + ++ pai32FreeIndices[ui32Loop] * (IMG_UINT64) uiPageSize; ++ } ++ ++ psPrevLevel = psLevel; ++ /* Calculate PT index and get new table descriptor */ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTEIndex); ++ ++ if (psPrevLevel == psLevel) ++ { ++ /* ++ * Sparse allocations may have page offsets which ++ * decrement as well as increment, so make sure we ++ * update the range we will flush correctly. ++ */ ++ if (uiPTEIndex > uiFlushEnd) ++ uiFlushEnd = uiPTEIndex; ++ else if (uiPTEIndex < uiFlushStart) ++ uiFlushStart = uiPTEIndex; ++ } ++ else ++ { ++ /* Flush if we moved to another psLevel, i.e. page table */ ++ if (psPrevLevel != NULL) ++ { ++ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psPrevLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset, ++ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ } ++ ++ uiFlushStart = uiPTEIndex; ++ uiFlushEnd = uiFlushStart; ++ } ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, ++ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); ++ ++ /* Set the PT entry to invalid and poison it with a bad address */ ++ if (_SetupPTE(psMMUContext, ++ psLevel, ++ uiPTEIndex, ++ psConfig, ++ (bDummyBacking)? &sBackingPgDevPhysAddr : &gsBadDevPhyAddr, ++ bUnmap, ++#if defined(PDUMP) ++ (bDummyBacking)? (psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName): NULL, ++ (bDummyBacking)? pcBackingPageName: NULL, ++ 0U, ++#endif ++ (bZeroBacking)? uiProtFlagsReadOnly: uiProtFlags) != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ /* Check we haven't wrapped around */ ++ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries); ++ ui32Loop++; ++ sDevVAddr.uiAddr += uiPageSize; ++ } ++ ++ /* Flush the last level we touched */ ++ if (psLevel != NULL) ++ { ++ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ } ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ /* Flush TLB for PTs*/ ++ psDevNode->pfnMMUCacheInvalidate(psDevNode, ++ psMMUContext, ++ MMU_LEVEL_1, ++ IMG_TRUE); ++ ++ return; ++ ++e0: ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table")); ++ PVR_ASSERT(0); ++ OSLockRelease(psMMUContext->hLock); ++ return; ++} ++ ++PVRSRV_ERROR ++MMU_MapPMRFast (MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ const PMR *psPMR, ++ IMG_DEVMEM_SIZE_T uiSizeBytes, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ IMG_UINT32 uiLog2HeapPageSize) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 uiCount, i; ++ IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize; ++ IMG_UINT32 uiPTEIndex = 0; ++ IMG_UINT64 uiProtFlags; ++ MMU_PROTFLAGS_T uiMMUProtFlags = 0; ++ MMU_Levelx_INFO *psLevel = NULL; ++ IMG_HANDLE hPriv; ++ const MMU_PxE_CONFIG *psConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; ++ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_DEV_PHYADDR *psDevPAddr; ++ IMG_BOOL *pbValid; ++ IMG_UINT32 uiFlushStart = 0; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++#if defined(PDUMP) ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; ++ IMG_UINT32 ui32MappedCount = 0; ++ PDUMPCOMMENT(psDevNode, "Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes); ++#endif /*PDUMP*/ ++ ++ /* We should verify the size and contiguity when supporting variable page size */ ++ ++ PVR_ASSERT (psMMUContext != NULL); ++ PVR_ASSERT (psPMR != NULL); ++ ++ /* Allocate memory for page-frame-numbers and validity states, ++ N.B. assert could be triggered by an illegal uiSizeBytes */ ++ uiCount = uiSizeBytes >> uiLog2HeapPageSize; ++ PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes); ++ if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR)); ++ PVR_LOG_GOTO_IF_NOMEM(psDevPAddr, eError, return_error); ++ ++ pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL)); ++ if (pbValid == NULL) ++ { ++ /* Should allocation fail, clean-up here before exit */ ++ OSFreeMem(psDevPAddr); ++ PVR_LOG_GOTO_WITH_ERROR("pbValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_paddr_array); ++ } ++ } ++ else ++ { ++ psDevPAddr = asDevPAddr; ++ pbValid = abValid; ++ } ++ ++ /* Get general PT and address configs */ ++ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize, ++ &psConfig, &hPriv, &psDevVAddrConfig); ++ ++ eError = _MMU_ConvertDevMemFlags(IMG_FALSE, ++ uiMappingFlags, ++ &uiMMUProtFlags, ++ psMMUContext); ++ PVR_GOTO_IF_ERROR(eError, put_mmu_context); ++ ++ /* Callback to get device specific protection flags */ ++ ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize); ++ } ++ else if (psConfig->uiBytesPerEntry == 4) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); ++ } ++ else ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, put_mmu_context); ++ } ++ ++ ++ /* "uiSize" is the amount of contiguity in the underlying ++ page. Normally this would be constant for the system, but, ++ that constant needs to be communicated, in case it's ever ++ different; caller guarantees that PMRLockSysPhysAddr() has ++ already been called */ ++ eError = PMR_DevPhysAddr(psPMR, ++ uiLog2HeapPageSize, ++ uiCount, ++ 0, ++ psDevPAddr, ++ pbValid); ++ PVR_GOTO_IF_ERROR(eError, put_mmu_context); ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTEIndex); ++ uiFlushStart = uiPTEIndex; ++ ++ /* Map in all pages of that PMR page by page*/ ++ for (i=0, uiCount=0; uiCount < uiSizeBytes; i++) ++ { ++#if defined(DEBUG) ++ { ++ IMG_INT32 i32FeatureVal = 0; ++ IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr); ++ i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, PHYS_BUS_WIDTH); ++ do { ++ if (0 > i32FeatureVal) ++ break; ++ ++ if (ui32BitLength > i32FeatureVal) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s Failed. The physical address bitlength (%d)" ++ " is greater than the chip can handle (%d).", ++ __func__, ui32BitLength, i32FeatureVal)); ++ ++ PVR_ASSERT(ui32BitLength <= i32FeatureVal); ++ OSLockRelease(psMMUContext->hLock); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, put_mmu_context); ++ } ++ } while (0); ++ } ++#endif /*DEBUG*/ ++#if defined(PDUMP) ++ { ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, uiCount, ++ sizeof(aszMemspaceName), &aszMemspaceName[0], ++ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0], ++ &uiSymbolicAddrOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ui32MappedCount++; ++ } ++#endif /*PDUMP*/ ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP, ++ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr), ++ HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr)); ++ ++ /* Set the PT entry with the specified address and protection flags */ ++ eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex, ++ psConfig, &psDevPAddr[i], IMG_FALSE, ++#if defined(PDUMP) ++ aszMemspaceName, ++ aszSymbolicAddress, ++ uiSymbolicAddrOffset, ++#endif /*PDUMP*/ ++ uiProtFlags); ++ PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); ++ ++ sDevVAddr.uiAddr += uiPageSize; ++ uiCount += uiPageSize; ++ ++ /* Calculate PT index and get new table descriptor */ ++ if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes)) ++ { ++ uiPTEIndex++; ++ } ++ else ++ { ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ PVR_GOTO_IF_ERROR(eError, unlock_mmu_context); ++ ++ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTEIndex); ++ uiFlushStart = uiPTEIndex; ++ } ++ } ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ if (psDevPAddr != asDevPAddr) ++ { ++ OSFreeMem(pbValid); ++ OSFreeMem(psDevPAddr); ++ } ++ ++ /* Flush TLB for PTs*/ ++ psDevNode->pfnMMUCacheInvalidate(psDevNode, ++ psMMUContext, ++ MMU_LEVEL_1, ++ IMG_FALSE); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, "Wired up %d Page Table entries (out of %d)", ui32MappedCount, i); ++#endif /*PDUMP*/ ++ ++ return PVRSRV_OK; ++ ++unlock_mmu_context: ++ OSLockRelease(psMMUContext->hLock); ++ MMU_UnmapPMRFast(psMMUContext, ++ sDevVAddrBase, ++ uiSizeBytes >> uiLog2HeapPageSize, ++ uiLog2HeapPageSize); ++ ++put_mmu_context: ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ if (pbValid != abValid) ++ { ++ OSFreeMem(pbValid); ++ } ++ ++free_paddr_array: ++ if (psDevPAddr != asDevPAddr) ++ { ++ OSFreeMem(psDevPAddr); ++ } ++ ++return_error: ++ PVR_ASSERT(eError == PVRSRV_OK); ++ return eError; ++} ++ ++/* ++ MMU_UnmapPages ++ */ ++void ++MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 uiLog2PageSize) ++{ ++ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0; ++ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; ++ MMU_Levelx_INFO *psLevel = NULL; ++ IMG_HANDLE hPriv; ++ const MMU_PxE_CONFIG *psConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase; ++ IMG_UINT64 uiProtFlags = 0; ++ MMU_PROTFLAGS_T uiMMUProtFlags = 0; ++ IMG_UINT64 uiEntry = 0; ++ IMG_UINT32 uiFlushStart = 0; ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevNode, ++ "Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX, ++ ui32PageCount, ++ (IMG_UINT64)sDevVAddr.uiAddr, ++ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1); ++#endif ++ ++ /* Get PT and address configs */ ++ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize, ++ &psConfig, &hPriv, &psDevVAddrConfig); ++ ++ if (_MMU_ConvertDevMemFlags(IMG_TRUE, ++ 0, ++ &uiMMUProtFlags, ++ psMMUContext) != PVRSRV_OK) ++ { ++ return; ++ } ++ ++ /* Callback to get device specific protection flags */ ++ ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize); ++ ++ /* Fill the entry with a bad address but leave space for protection flags */ ++ uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags; ++ } ++ else if (psConfig->uiBytesPerEntry == 4) ++ { ++ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags); ++ ++ /* Fill the entry with a bad address but leave space for protection flags */ ++ uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: The page table entry byte length is not supported", ++ __func__)); ++ goto e0; ++ } ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTEIndex); ++ uiFlushStart = uiPTEIndex; ++ ++ /* Unmap page by page and keep the loop as quick as possible. ++ * Only use parts of _SetupPTE that need to be executed. */ ++ while (ui32Loop < ui32PageCount) ++ { ++ ++ /* Set the PT entry to invalid and poison it with a bad address */ ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ ((IMG_UINT64*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry; ++ } ++ else ++ { ++ PVR_ASSERT(psConfig->uiBytesPerEntry == 4); ++ ((IMG_UINT32*)psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry; ++ } ++ ++ /* Log modifications */ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP, ++ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr)); ++ ++ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE, ++ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel), ++ uiPTEIndex, MMU_LEVEL_1, ++ HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry), ++ IMG_FALSE); ++ ++#if defined(PDUMP) ++ PDumpMMUDumpPxEntries(psDevNode, ++ MMU_LEVEL_1, ++ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ psLevel->sMemDesc.pvCpuVAddr, ++ psLevel->sMemDesc.sDevPAddr, ++ uiPTEIndex, ++ 1, ++ NULL, ++ NULL, ++ 0, ++ psConfig->uiBytesPerEntry, ++ psConfig->uiAddrLog2Align, ++ psConfig->uiAddrShift, ++ psConfig->uiAddrMask, ++ psConfig->uiProtMask, ++ psConfig->uiValidEnMask, ++ 0, ++ psMMUContext->psDevAttrs->eMMUType); ++#endif /*PDUMP*/ ++ ++ sDevVAddr.uiAddr += uiPageSize; ++ ui32Loop++; ++ ++ /* Calculate PT index and get new table descriptor */ ++ if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount)) ++ { ++ uiPTEIndex++; ++ } ++ else ++ { ++ PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTEIndex); ++ uiFlushStart = uiPTEIndex; ++ } ++ } ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ /* Flush TLB for PTs*/ ++ psDevNode->pfnMMUCacheInvalidate(psDevNode, ++ psMMUContext, ++ MMU_LEVEL_1, ++ IMG_TRUE); ++ ++ return; ++ ++e0: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map/unmap page table", __func__)); ++ PVR_ASSERT(0); ++ return; ++} ++ ++/* ++ MMU_ChangeValidity ++ */ ++PVRSRV_ERROR ++MMU_ChangeValidity(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiNumPages, ++ IMG_UINT32 uiLog2PageSize, ++ IMG_BOOL bMakeValid, ++ PMR *psPMR) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ IMG_HANDLE hPriv; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ const MMU_PxE_CONFIG *psConfig; ++ MMU_Levelx_INFO *psLevel = NULL; ++ IMG_UINT32 uiFlushStart = 0; ++ IMG_UINT32 uiPTIndex = 0; ++ IMG_UINT32 i; ++ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize; ++ IMG_BOOL bValid; ++ ++ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psPhysMemCtx->psDevNode; ++ ++#if defined(PDUMP) ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ ++ PDUMPCOMMENT(psDevNode, ++ "Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")", ++ bMakeValid, ++ sDevVAddr.uiAddr, ++ sDevVAddr.uiAddr + (uiNumPages<uiBytesPerEntry == 8) ++ { ++ ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask); ++ } ++ else if (psConfig->uiBytesPerEntry == 4) ++ { ++ ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask); ++ } ++ else ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit); ++ } ++ } ++ } ++ else ++ { ++ if (psConfig->uiBytesPerEntry == 8) ++ { ++ ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask); ++ } ++ else if (psConfig->uiBytesPerEntry == 4) ++ { ++ ((IMG_UINT32 *)psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask); ++ } ++ else ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psConfig->uiBytesPerEntry", eError, PVRSRV_ERROR_MMU_CONFIG_IS_WRONG, e_exit); ++ } ++ } ++ ++#if defined(PDUMP) ++ ++ PMR_PDumpSymbolicAddr(psPMR, i<psDevAttrs->pszMMUPxPDumpMemSpaceName, ++ psLevel->sMemDesc.pvCpuVAddr, ++ psLevel->sMemDesc.sDevPAddr, ++ uiPTIndex, ++ 1, ++ aszMemspaceName, ++ aszSymbolicAddress, ++ uiSymbolicAddrOffset, ++ psConfig->uiBytesPerEntry, ++ psConfig->uiAddrLog2Align, ++ psConfig->uiAddrShift, ++ psConfig->uiAddrMask, ++ psConfig->uiProtMask, ++ psConfig->uiValidEnMask, ++ 0, ++ psMMUContext->psDevAttrs->eMMUType); ++#endif /*PDUMP*/ ++ ++ sDevVAddr.uiAddr += uiPageSize; ++ i++; ++ ++ /* Calculate PT index and get new table descriptor */ ++ if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages)) ++ { ++ uiPTIndex++; ++ } ++ else ++ { ++ ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ &psLevel->sMemDesc.psMapping->sMemHandle, ++ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset, ++ (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry); ++ PVR_GOTO_IF_ERROR(eError, e_exit); ++ ++ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig, ++ &psLevel, &uiPTIndex); ++ uiFlushStart = uiPTIndex; ++ } ++ } ++ ++e_exit: ++ ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ /* Flush TLB for PTs*/ ++ psDevNode->pfnMMUCacheInvalidate(psDevNode, ++ psMMUContext, ++ MMU_LEVEL_1, ++ !bMakeValid); ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ return eError; ++} ++ ++ ++/* ++ MMU_AcquireBaseAddr ++ */ ++PVRSRV_ERROR ++MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr) ++{ ++ if (!psMMUContext) ++ { ++ psPhysAddr->uiAddr = 0; ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ MMU_AcquireCPUBaseAddr ++ */ ++PVRSRV_ERROR ++MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr) ++{ ++ if (!psMMUContext) ++ { ++ *ppvCPUVAddr = NULL; ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *ppvCPUVAddr = psMMUContext->sBaseLevelInfo.sMemDesc.pvCpuVAddr; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ MMU_ReleaseBaseAddr ++ */ ++void ++MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMMUContext); ++} ++ ++/* ++ MMU_AppendCacheFlags, MMU_ExchangeCacheFlags ++*/ ++ ++void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32AppendFlags) ++{ ++ PVR_ASSERT(psMMUContext != NULL); ++ ++ if (psMMUContext == NULL) ++ { ++ return; ++ } ++ ++ OSAtomicOr(&psMMUContext->sCacheFlags, (IMG_INT)ui32AppendFlags); ++} ++ ++IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags) ++{ ++ PVR_ASSERT(psMMUContext != NULL); ++ ++ if (psMMUContext == NULL) ++ { ++ return 0; ++ } ++ ++ return (IMG_UINT32)OSAtomicExchange(&psMMUContext->sCacheFlags, (IMG_INT)ui32NewCacheFlags); ++} ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++/* ++ MMU_GetOSids ++ */ ++ ++void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt) ++{ ++ *pui32OSid = psMMUContext->psPhysMemCtx->ui32OSid; ++ *pui32OSidReg = psMMUContext->psPhysMemCtx->ui32OSidReg; ++ *pbOSidAxiProt = psMMUContext->psPhysMemCtx->bOSidAxiProt; ++ ++ return; ++} ++ ++#endif ++ ++/* ++ MMU_CheckFaultAddress ++ */ ++void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR *psDevVAddr, ++ MMU_FAULT_DATA *psOutFaultData) ++{ ++ /* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */ ++#if defined(SUPPORT_RGX) ++# define MMU_MASK_VALID_FOR_32BITS(level) \ ++ ((RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN | \ ++ RGX_MMUCTRL_##level##_DATA_VALID_EN) <= 0xFFFFFFFF) ++# define MMU_VALID_STR(entry,level) \ ++ (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \ ++ ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)]) ++ static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid", ++ /*-V*/ "valid", ++ /*P-*/ "pending", ++ /*PV*/ "inconsistent (pending and valid)"}; ++#else ++# define MMU_MASK_VALID_FOR_32BITS(level) 0 ++# define MMU_VALID_STR(entry,level) ("??") ++#endif ++ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs; ++ MMU_LEVEL eMMULevel = psDevAttrs->eTopLevel; ++ const MMU_PxE_CONFIG *psConfig; ++ const MMU_PxE_CONFIG *psMMUPDEConfig; ++ const MMU_PxE_CONFIG *psMMUPTEConfig; ++ const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig; ++ IMG_HANDLE hPriv; ++ MMU_Levelx_INFO *psLevel = NULL; ++ PVRSRV_ERROR eError; ++ IMG_UINT64 uiIndex; ++ IMG_UINT32 ui32PCIndex = 0xFFFFFFFF; ++ IMG_UINT32 ui32PDIndex = 0xFFFFFFFF; ++ IMG_UINT32 ui32PTIndex = 0xFFFFFFFF; ++ IMG_UINT32 ui32Log2PageSize; ++ MMU_FAULT_DATA sMMUFaultData = {0}; ++ MMU_LEVEL_DATA *psMMULevelData; ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ /* ++ At this point we don't know the page size so assume it's 4K. ++ When we get the PD level (MMU_LEVEL_2) we can check to see ++ if this assumption is correct. ++ */ ++ eError = psDevAttrs->pfnGetPageSizeConfiguration(12, ++ &psMMUPDEConfig, ++ &psMMUPTEConfig, ++ &psMMUDevVAddrConfig, ++ &hPriv); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get the page size info for log2 page sizeof 12")); ++ } ++ ++ psLevel = &psMMUContext->sBaseLevelInfo; ++ psConfig = psDevAttrs->psBaseConfig; ++ ++ sMMUFaultData.eTopLevel = psDevAttrs->eTopLevel; ++ sMMUFaultData.eType = MMU_FAULT_TYPE_NON_PM; ++ ++ ++ for (; eMMULevel > MMU_LEVEL_0; eMMULevel--) ++ { ++ if (eMMULevel == MMU_LEVEL_3) ++ { ++ /* Determine the PC index */ ++ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask; ++ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift; ++ ui32PCIndex = (IMG_UINT32) uiIndex; ++ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex)); ++ ++ psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_3]; ++ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; ++ psMMULevelData->ui32Index = ui32PCIndex; ++ ++ if (ui32PCIndex >= psLevel->ui32NumOfEntries) ++ { ++ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; ++ break; ++ } ++ ++ if (psConfig->uiBytesPerEntry == 4) ++ { ++ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; ++ ++ psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex]; ++ if (MMU_MASK_VALID_FOR_32BITS(PC)) ++ { ++ psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); ++ } ++ else ++ { ++ psMMULevelData->psDebugStr = ""; ++ PVR_LOG(("Invalid RGX_MMUCTRL_PC_DATA_ENTRY mask for 32-bit entry")); ++ } ++ } ++ else ++ { ++ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; ++ ++ psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex]; ++ psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PCIndex] & psConfig->uiProtMask, PC); ++ ++ } ++ ++ psLevel = psLevel->apsNextLevel[ui32PCIndex]; ++ if (!psLevel) ++ { ++ break; ++ } ++ psConfig = psMMUPDEConfig; ++ continue; /* continue to the next level */ ++ } ++ ++ ++ if (eMMULevel == MMU_LEVEL_2) ++ { ++ /* Determine the PD index */ ++ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask; ++ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift; ++ ui32PDIndex = (IMG_UINT32) uiIndex; ++ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex)); ++ ++ psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_2]; ++ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; ++ psMMULevelData->ui32Index = ui32PDIndex; ++ ++ if (ui32PDIndex >= psLevel->ui32NumOfEntries) ++ { ++ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; ++ break; ++ } ++ ++ if (psConfig->uiBytesPerEntry == 4) ++ { ++ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; ++ ++ psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex]; ++ if (MMU_MASK_VALID_FOR_32BITS(PD)) ++ { ++ psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); ++ } ++ else ++ { ++ psMMULevelData->psDebugStr = ""; ++ PVR_LOG(("Invalid RGX_MMUCTRL_PD_DATA_ENTRY mask for 32-bit entry")); ++ } ++ ++ if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get the page size from the PDE")); ++ } ++ } ++ else ++ { ++ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; ++ ++ psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex]; ++ psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PDIndex] & psMMUPDEConfig->uiProtMask, PD); ++ ++ if (psDevAttrs->pfnGetPageSizeFromVirtAddr != NULL) ++ { ++ /* MMU_VERSION >= 4 */ ++ if (psDevAttrs->pfnGetPageSizeFromVirtAddr(psMMUContext->psPhysMemCtx->psDevNode, *psDevVAddr, &ui32Log2PageSize) != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get the page size from the virtual address")); ++ } ++ } ++ else if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get the page size from the PDE")); ++ } ++ } ++ ++ /* ++ We assumed the page size was 4K, now we have the actual size ++ from the PDE we can confirm if our assumption was correct. ++ Until now it hasn't mattered as the PC and PD are the same ++ regardless of the page size ++ */ ++ if (ui32Log2PageSize != 12) ++ { ++ /* Put the 4K page size data */ ++ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); ++ ++ /* Get the correct size data */ ++ eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize, ++ &psMMUPDEConfig, ++ &psMMUPTEConfig, ++ &psMMUDevVAddrConfig, ++ &hPriv); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize)); ++ break; ++ } ++ } ++ psLevel = psLevel->apsNextLevel[ui32PDIndex]; ++ if (!psLevel) ++ { ++ break; ++ } ++ psConfig = psMMUPTEConfig; ++ continue; /* continue to the next level */ ++ } ++ ++ ++ if (eMMULevel == MMU_LEVEL_1) ++ { ++ /* Determine the PT index */ ++ uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask; ++ uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift; ++ ui32PTIndex = (IMG_UINT32) uiIndex; ++ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex)); ++ ++ psMMULevelData = &sMMUFaultData.sLevelData[MMU_LEVEL_1]; ++ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry; ++ psMMULevelData->ui32Index = ui32PTIndex; ++ ++ if (ui32PTIndex >= psLevel->ui32NumOfEntries) ++ { ++ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries; ++ break; ++ } ++ ++ if (psConfig->uiBytesPerEntry == 4) ++ { ++ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr; ++ ++ psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex]; ++ if (MMU_MASK_VALID_FOR_32BITS(PT)) ++ { ++ psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); ++ } ++ else ++ { ++ psMMULevelData->psDebugStr = ""; ++ PVR_LOG(("Invalid RGX_MMUCTRL_PT_DATA_ENTRY mask for 32-bit entry")); ++ } ++ } ++ else ++ { ++ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr; ++ ++ psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex]; ++ psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PTIndex] & psMMUPTEConfig->uiProtMask, PT); ++ ++ } ++ goto e1; ++ } ++ ++ PVR_LOG(("Unsupported MMU setup: %d", eMMULevel)); ++ break; ++ } ++ ++e1: ++ /* Put the page size data back */ ++ psDevAttrs->pfnPutPageSizeConfiguration(hPriv); ++ OSLockRelease(psMMUContext->hLock); ++ ++ *psOutFaultData = sMMUFaultData; ++} ++ ++static IMG_UINT64 MMU_GetVDevAddrPTE(MMU_CONTEXT *psMMUContext, ++ const MMU_PxE_CONFIG *psConfig, ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig, ++ IMG_UINT32 uiLog2PageSize, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_BOOL *pbStatusOut) ++{ ++ MMU_Levelx_INFO *psLevel = NULL; ++ IMG_UINT32 uiIndex = 0; ++ IMG_BOOL bStatus = IMG_FALSE; ++ IMG_UINT64 ui64Entry = 0; ++ ++ OSLockAcquire(psMMUContext->hLock); ++ ++ switch (psMMUContext->psDevAttrs->eTopLevel) ++ { ++ case MMU_LEVEL_3: ++ uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); ++ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; ++ if (psLevel == NULL) ++ break; ++ ++ __fallthrough; ++ case MMU_LEVEL_2: ++ uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); ++ ++ if (psLevel != NULL) ++ psLevel = psLevel->apsNextLevel[uiIndex]; ++ else ++ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex]; ++ ++ if (psLevel == NULL) ++ break; ++ ++ __fallthrough; ++ case MMU_LEVEL_1: ++ uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE); ++ ++ if (psLevel == NULL) ++ psLevel = &psMMUContext->sBaseLevelInfo; ++ ++ ui64Entry = ((IMG_UINT64 *)psLevel->sMemDesc.pvCpuVAddr)[uiIndex]; ++ bStatus = ui64Entry & psConfig->uiValidEnMask; ++ ++ break; ++ default: ++ PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup")); ++ break; ++ } ++ ++ OSLockRelease(psMMUContext->hLock); ++ ++ *pbStatusOut = bStatus; ++ ++ return ui64Entry; ++} ++ ++IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 uiLog2PageSize, ++ IMG_DEV_VIRTADDR sDevVAddr) ++{ ++ IMG_BOOL bStatus; ++ const MMU_PxE_CONFIG *psConfig; ++ IMG_HANDLE hPriv; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ ++ _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig); ++ ++ MMU_GetVDevAddrPTE(psMMUContext, ++ psConfig, ++ psDevVAddrConfig, ++ uiLog2PageSize, ++ sDevVAddr, ++ &bStatus); ++ ++ _MMU_PutPTConfig(psMMUContext, hPriv); ++ ++ return bStatus; ++} ++ ++#if defined(PDUMP) ++/* ++ MMU_ContextDerivePCPDumpSymAddr ++ */ ++PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, ++ IMG_CHAR *pszPDumpSymbolicNameBuffer, ++ size_t uiPDumpSymbolicNameBufferSize) ++{ ++ size_t uiCount; ++ IMG_UINT64 ui64PhysAddr; ++ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; ++ ++ if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid) ++ { ++ /* We don't have any allocations. You're not allowed to ask ++ * for the page catalogue base address until you've made at ++ * least one allocation. ++ */ ++ return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR; ++ } ++ ++ ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr; ++ ++ PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName))); ++ ++ /* Page table Symbolic Name is formed from page table phys addr ++ prefixed with MMUPT_. */ ++ uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer, ++ uiPDumpSymbolicNameBufferSize, ++ ":%s:%s%016"IMG_UINT64_FMTSPECX, ++ psDevId->pszPDumpDevName, ++ psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX", ++ ui64PhysAddr); ++ ++ if (uiCount + 1 > uiPDumpSymbolicNameBufferSize) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ MMU_PDumpWritePageCatBase ++ */ ++PVRSRV_ERROR ++MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, ++ const IMG_CHAR *pszSpaceName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32WordSize, ++ IMG_UINT32 ui32AlignShift, ++ IMG_UINT32 ui32Shift, ++ PDUMP_FLAGS_T uiPdumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszPageCatBaseSymbolicAddr[100]; ++ const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName; ++ ++ eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext, ++ &aszPageCatBaseSymbolicAddr[0], ++ sizeof(aszPageCatBaseSymbolicAddr)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = PDumpWriteSymbAddress(psMMUContext->psPhysMemCtx->psDevNode, ++ pszSpaceName, ++ uiOffset, ++ aszPageCatBaseSymbolicAddr, ++ 0, /* offset -- Could be non-zero for var. pgsz */ ++ pszPDumpDevName, ++ ui32WordSize, ++ ui32AlignShift, ++ ui32Shift, ++ uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS); ++ } ++ ++ return eError; ++} ++ ++/* ++ MMU_AcquirePDumpMMUContext ++ */ ++PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 *pui32PDumpMMUContextID, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; ++ ++ if (!psMMUContext->ui32PDumpContextIDRefCount) ++ { ++ PDUMP_MMU_ALLOC_MMUCONTEXT(psMMUContext->psPhysMemCtx->psDevNode, ++ psDevId->pszPDumpDevName, ++ psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr, ++ psMMUContext->psDevAttrs->eMMUType, ++ &psMMUContext->uiPDumpContextID, ++ ui32PDumpFlags); ++ } ++ ++ psMMUContext->ui32PDumpContextIDRefCount++; ++ *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ MMU_ReleasePDumpMMUContext ++ */ ++PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psPhysMemCtx->psDevNode->sDevId; ++ ++ PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0); ++ psMMUContext->ui32PDumpContextIDRefCount--; ++ ++ if (psMMUContext->ui32PDumpContextIDRefCount == 0) ++ { ++ PDUMP_MMU_FREE_MMUCONTEXT(psMMUContext->psPhysMemCtx->psDevNode, ++ psDevId->pszPDumpDevName, ++ psMMUContext->uiPDumpContextID, ++ ui32PDumpFlags); ++ } ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++/****************************************************************************** ++ End of file (mmu_common.c) ++ ******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/mmu_common.h b/drivers/gpu/drm/img-rogue/mmu_common.h +new file mode 100644 +index 000000000000..a84fa697f572 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/mmu_common.h +@@ -0,0 +1,792 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common MMU Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements basic low level control of MMU. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef MMU_COMMON_H ++#define MMU_COMMON_H ++ ++/* ++ The Memory Management Unit (MMU) performs device virtual to physical ++ translation. ++ ++ Terminology: ++ - page catalogue, PC (optional, 3 tier MMU) ++ - page directory, PD ++ - page table, PT (can be variable sized) ++ - data page, DP (can be variable sized) ++ Note: PD and PC are fixed size and can't be larger than the native ++ physical (CPU) page size ++ Shifts and AlignShift variables: ++ - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0 ++ - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units ++ by applying a bit shift left by 'xxxAlignShift' bits ++*/ ++ ++/* ++ Device Virtual Address Config: ++ ++ Incoming Device Virtual Address is deconstructed into up to 4 ++ fields, where the virtual address is up to 64bits: ++ MSB-----------------------------------------------LSB ++ | PC Index: | PD Index: | PT Index: | DP offset: | ++ | d bits | c bits | b-v bits | a+v bits | ++ ----------------------------------------------------- ++ where v is the variable page table modifier, e.g. ++ v == 0 -> 4KB DP ++ v == 2 -> 16KB DP ++ v == 4 -> 64KB DP ++ v == 6 -> 256KB DP ++ v == 8 -> 1MB DP ++ v == 10 -> 4MB DP ++*/ ++ ++/* services/server/include/ */ ++#include "pmr.h" ++ ++/* include/ */ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_notifier.h" ++#include "pvrsrv_error.h" ++#include "servicesext.h" ++ ++ ++/*! ++ The level of the MMU ++*/ ++typedef enum ++{ ++ MMU_LEVEL_0 = 0, /* Level 0 = Page */ ++ ++ MMU_LEVEL_1, ++ MMU_LEVEL_2, ++ MMU_LEVEL_3, ++ MMU_LEVEL_LAST ++} MMU_LEVEL; ++ ++/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */ ++#include "pdump_mmu.h" ++ ++#define MMU_MAX_LEVEL 3 ++ ++typedef struct _MMU_LEVEL_DATA_ ++{ ++ IMG_UINT32 ui32Index; ++ IMG_UINT32 ui32NumOfEntries; ++ IMG_CHAR const *psDebugStr; ++ IMG_UINT8 uiBytesPerEntry; ++ IMG_UINT64 ui64Address; ++} MMU_LEVEL_DATA; ++ ++typedef enum _MMU_FAULT_TYPE_ ++{ ++ MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */ ++ MMU_FAULT_TYPE_PM, ++ MMU_FAULT_TYPE_NON_PM, ++} MMU_FAULT_TYPE; ++ ++typedef struct _MMU_FAULT_DATA_ ++{ ++ MMU_LEVEL eTopLevel; ++ MMU_FAULT_TYPE eType; ++ MMU_LEVEL_DATA sLevelData[MMU_LEVEL_LAST]; ++} MMU_FAULT_DATA; ++ ++struct _MMU_DEVVADDR_CONFIG_; ++ ++/*! ++ MMU device attributes. This structure is the interface between the generic ++ MMU code and the device specific MMU code. ++*/ ++typedef struct _MMU_DEVICEATTRIBS_ ++{ ++ PDUMP_MMU_TYPE eMMUType; ++ ++ IMG_CHAR *pszMMUPxPDumpMemSpaceName; ++ ++ /*! The type of the top level object */ ++ MMU_LEVEL eTopLevel; ++ ++ /*! Alignment requirement of the base object */ ++ IMG_UINT32 ui32BaseAlign; ++ ++ /*! HW config of the base object */ ++ struct _MMU_PxE_CONFIG_ *psBaseConfig; ++ ++ /*! Address split for the base object */ ++ const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig; ++ ++ /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */ ++ IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++ /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */ ++ IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32 uiProtFlags); ++ /*! Callback for creating protection bits for the page directory entry with 8 byte entry */ ++ IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++ /*! Callback for creating protection bits for the page directory entry with 4 byte entry */ ++ IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32 uiProtFlags); ++ /*! Callback for creating protection bits for the page table entry with 8 byte entry */ ++ IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++ /*! Callback for creating protection bits for the page table entry with 4 byte entry */ ++ IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32 uiProtFlags); ++ ++ /*! Callback for getting the MMU configuration based on the specified page size */ ++ PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize, ++ const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig, ++ const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig, ++ const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig, ++ IMG_HANDLE *phPriv2); ++ /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */ ++ PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv); ++ ++ /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */ ++ PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *); ++ /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */ ++ PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *); ++ /*! Callback for getting the page size directly from the address. Supported on MMU4 */ ++ PVRSRV_ERROR (*pfnGetPageSizeFromVirtAddr)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_DEV_VIRTADDR, IMG_UINT32 *); ++ ++ /*! Private data handle */ ++ IMG_HANDLE hGetPageSizeFnPriv; ++} MMU_DEVICEATTRIBS; ++ ++/*! ++ MMU virtual address split ++*/ ++typedef struct _MMU_DEVVADDR_CONFIG_ ++{ ++ /*! Page catalogue index mask */ ++ IMG_UINT64 uiPCIndexMask; ++ /*! Page catalogue index shift */ ++ IMG_UINT8 uiPCIndexShift; ++ /*! Total number of PC entries */ ++ IMG_UINT32 uiNumEntriesPC; ++ /*! Page directory mask */ ++ IMG_UINT64 uiPDIndexMask; ++ /*! Page directory shift */ ++ IMG_UINT8 uiPDIndexShift; ++ /*! Total number of PD entries */ ++ IMG_UINT32 uiNumEntriesPD; ++ /*! Page table mask */ ++ IMG_UINT64 uiPTIndexMask; ++ /*! Page index shift */ ++ IMG_UINT8 uiPTIndexShift; ++ /*! Total number of PT entries */ ++ IMG_UINT32 uiNumEntriesPT; ++ /*! Page offset mask */ ++ IMG_UINT64 uiPageOffsetMask; ++ /*! Page offset shift */ ++ IMG_UINT8 uiPageOffsetShift; ++ /*! First virtual address mappable for this config */ ++ IMG_UINT64 uiOffsetInBytes; ++ ++} MMU_DEVVADDR_CONFIG; ++ ++/* ++ P(C/D/T) Entry Config: ++ ++ MSB-----------------------------------------------LSB ++ | PT Addr: | variable PT ctrl | protection flags: | ++ | bits c+v | b bits | a bits | ++ ----------------------------------------------------- ++ where v is the variable page table modifier and is optional ++*/ ++/*! ++ Generic MMU entry description. This is used to describe PC, PD and PT entries. ++*/ ++typedef struct _MMU_PxE_CONFIG_ ++{ ++ IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */ ++ ++ IMG_UINT64 uiAddrMask; /*! Physical address mask */ ++ IMG_UINT8 uiAddrShift; /*! Physical address shift */ ++ IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */ ++ ++ IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */ ++ IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */ ++ ++ IMG_UINT64 uiProtMask; /*! Protection flags mask */ ++ IMG_UINT8 uiProtShift; /*! Protection flags shift */ ++ ++ IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */ ++ IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */ ++} MMU_PxE_CONFIG; ++ ++/* MMU Protection flags */ ++ ++ ++/* These are specified generically and in a h/w independent way, and ++ are interpreted at each level (PC/PD/PT) separately. */ ++ ++/* The following flags are for internal use only, and should not ++ traverse the API */ ++#define MMU_PROTFLAGS_INVALID 0x80000000U ++ ++typedef IMG_UINT32 MMU_PROTFLAGS_T; ++ ++/* The following flags should be supplied by the caller: */ ++#define MMU_PROTFLAGS_READABLE (1U<<0) ++#define MMU_PROTFLAGS_WRITEABLE (1U<<1) ++#define MMU_PROTFLAGS_CACHE_COHERENT (1U<<2) ++#define MMU_PROTFLAGS_CACHED (1U<<3) ++ ++/* Device specific flags*/ ++#define MMU_PROTFLAGS_DEVICE_OFFSET 16 ++#define MMU_PROTFLAGS_DEVICE_MASK 0x000f0000UL ++#define MMU_PROTFLAGS_DEVICE(n) \ ++ (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \ ++ MMU_PROTFLAGS_DEVICE_MASK) ++ ++ ++typedef struct _MMU_CONTEXT_ MMU_CONTEXT; ++ ++struct _PVRSRV_DEVICE_NODE_; ++ ++struct _CONNECTION_DATA_; ++ ++typedef struct _MMU_PAGESIZECONFIG_ ++{ ++ const MMU_PxE_CONFIG *psPDEConfig; ++ const MMU_PxE_CONFIG *psPTEConfig; ++ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig; ++ IMG_UINT32 uiRefCount; ++ IMG_UINT32 uiMaxRefCount; ++} MMU_PAGESIZECONFIG; ++ ++/*************************************************************************/ /*! ++@Function MMU_ContextCreate ++ ++@Description Create a new MMU context ++ ++@Input psConnection Connection requesting the MMU context ++ creation. Can be NULL for kernel/FW ++ memory context. ++@Input psDevNode Device node of the device to create the ++ MMU context for ++@Output ppsMMUContext The created MMU context ++ ++@Return PVRSRV_OK if the MMU context was successfully created ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_ContextCreate(struct _CONNECTION_DATA_ *psConnection, ++ struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_CONTEXT **ppsMMUContext, ++ MMU_DEVICEATTRIBS *psDevAttrs); ++ ++ ++/*************************************************************************/ /*! ++@Function MMU_ContextDestroy ++ ++@Description Destroy a MMU context ++ ++@Input psMMUContext MMU context to destroy ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++MMU_ContextDestroy(MMU_CONTEXT *psMMUContext); ++ ++/*************************************************************************/ /*! ++@Function MMU_Alloc ++ ++@Description Allocate the page tables required for the specified virtual range ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input uSize The size of the allocation ++ ++@Output puActualSize Actual size of allocation ++ ++@Input uiProtFlags Generic MMU protection flags ++ ++@Input uDevVAddrAlignment Alignment requirement of the virtual ++ allocation ++ ++@Input psDevVAddr Virtual address to start the allocation ++ from ++ ++@Return PVRSRV_OK if the allocation of the page tables was successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_Alloc(MMU_CONTEXT *psMMUContext, ++ IMG_DEVMEM_SIZE_T uSize, ++ IMG_DEVMEM_SIZE_T *puActualSize, ++ IMG_UINT32 uiProtFlags, ++ IMG_DEVMEM_SIZE_T uDevVAddrAlignment, ++ IMG_DEV_VIRTADDR *psDevVAddr, ++ IMG_UINT32 uiLog2PageSize); ++ ++ ++/*************************************************************************/ /*! ++@Function MMU_Free ++ ++@Description Free the page tables of the specified virtual range ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddr Virtual address to start the free ++ from ++ ++@Input uiSize The size of the allocation ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++MMU_Free(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiLog2DataPageSize); ++ ++ ++/*************************************************************************/ /*! ++@Function MMU_MapPages ++ ++@Description Map pages to the MMU. ++ Two modes of operation: One requires a list of physical page ++ indices that are going to be mapped, the other just takes ++ the PMR and a possible offset to map parts of it. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input uiMappingFlags Memalloc flags for the mapping ++ ++@Input sDevVAddrBase Device virtual address of the 1st page ++ ++@Input psPMR PMR to map ++ ++@Input ui32PhysPgOffset Physical offset into the PMR ++ ++@Input ui32MapPageCount Number of pages to map ++ ++@Input paui32MapIndices List of page indices to map, ++ can be NULL ++ ++@Input uiLog2PageSize Log2 page size of the pages to map ++ ++@Return PVRSRV_OK if the mapping was successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_MapPages(MMU_CONTEXT *psMMUContext, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ PMR *psPMR, ++ IMG_UINT32 ui32PhysPgOffset, ++ IMG_UINT32 ui32MapPageCount, ++ IMG_UINT32 *paui32MapIndices, ++ IMG_UINT32 uiLog2PageSize); ++ ++/*************************************************************************/ /*! ++@Function MMU_UnmapPages ++ ++@Description Unmap pages from the MMU. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input uiMappingFlags Memalloc flags for the mapping ++ ++@Input sDevVAddr Device virtual address of the 1st page ++ ++@Input ui32PageCount Number of pages to unmap ++ ++@Input pai32UnmapIndicies Array of page indices to be unmapped ++ ++@Input uiLog2PageSize log2 size of the page ++ ++ ++@Input uiMemAllocFlags Indicates if the unmapped regions need ++ to be backed by dummy or zero page ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++MMU_UnmapPages(MMU_CONTEXT *psMMUContext, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 *pai32UnmapIndicies, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags); ++ ++/*************************************************************************/ /*! ++@Function MMU_MapPMRFast ++ ++@Description Map a PMR into the MMU. Must be not sparse. ++ This is supposed to cover most mappings and, as the name suggests, ++ should be as fast as possible. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddr Device virtual address to map the PMR ++ into ++ ++@Input psPMR PMR to map ++ ++@Input uiSizeBytes Size in bytes to map ++ ++@Input uiMappingFlags Memalloc flags for the mapping ++ ++@Return PVRSRV_OK if the PMR was successfully mapped ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_MapPMRFast(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ const PMR *psPMR, ++ IMG_DEVMEM_SIZE_T uiSizeBytes, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags, ++ IMG_UINT32 uiLog2PageSize); ++ ++/*************************************************************************/ /*! ++@Function MMU_UnmapPMRFast ++ ++@Description Unmap pages from the MMU as fast as possible. ++ PMR must be non-sparse! ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddrBase Device virtual address of the 1st page ++ ++@Input ui32PageCount Number of pages to unmap ++ ++@Input uiLog2PageSize log2 size of the page ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddrBase, ++ IMG_UINT32 ui32PageCount, ++ IMG_UINT32 uiLog2PageSize); ++ ++/*************************************************************************/ /*! ++@Function MMU_ChangeValidity ++ ++@Description Sets or unsets the valid bit of page table entries for a given ++ address range. ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input sDevVAddr The device virtual base address of ++ the range we want to modify ++ ++@Input uiSizeBytes The size of the range in bytes ++ ++@Input uiLog2PageSize Log2 of the used page size ++ ++@Input bMakeValid Choose to set or unset the valid bit. ++ (bMakeValid == IMG_TRUE ) -> SET ++ (bMakeValid == IMG_FALSE) -> UNSET ++ ++@Input psPMR The PMR backing the allocation. ++ Needed in case we have sparse memory ++ where we have to check whether a physical ++ address actually backs the virtual. ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_ChangeValidity(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSizeBytes, ++ IMG_UINT32 uiLog2PageSize, ++ IMG_BOOL bMakeValid, ++ PMR *psPMR); ++ ++/*************************************************************************/ /*! ++@Function MMU_AcquireBaseAddr ++ ++@Description Acquire the device physical address of the base level MMU object ++ ++@Input psMMUContext MMU context to operate on ++ ++@Output psPhysAddr Device physical address of the base level ++ MMU object ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr); ++ ++/*************************************************************************/ /*! ++@Function MMU_AcquireCPUBaseAddr ++ ++@Description Acquire the CPU Virtual Address of the base level MMU object ++ ++@Input psMMUContext MMU context to operate on ++ ++@Output ppvCPUVAddr CPU Virtual Address of the base level ++ MMU object ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_AcquireCPUBaseAddr(MMU_CONTEXT *psMMUContext, void **ppvCPUVAddr); ++ ++/*************************************************************************/ /*! ++@Function MMU_ReleaseBaseAddr ++ ++@Description Release the device physical address of the base level MMU object ++ ++@Input psMMUContext MMU context to operate on ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++void ++MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++/***********************************************************************************/ /*! ++@Function MMU_SetOSid ++ ++@Description Set the OSid associated with the application (and the MMU Context) ++ ++@Input psMMUContext MMU context to store the OSid on ++ ++@Input ui32OSid the OSid in question ++ ++@Input ui32OSidReg The value that the firmware will assign to the ++ registers. ++ ++@Input bOSidAxiProt Toggles whether the AXI prot bit will be set or ++ not. ++@Return None ++*/ ++/***********************************************************************************/ ++ ++void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt); ++ ++/***********************************************************************************/ /*! ++@Function MMU_GetOSid ++ ++@Description Retrieve the OSid associated with the MMU context. ++ ++@Input psMMUContext MMU context in which the OSid is stored ++ ++@Output pui32OSid The OSid in question ++ ++@Output pui32OSidReg The OSid that the firmware will assign to the ++ registers. ++ ++@Output pbOSidAxiProt Toggles whether the AXI prot bit will be set or ++ not. ++@Return None ++*/ ++/***********************************************************************************/ ++ ++void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, ++ IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt); ++#endif ++ ++/*************************************************************************/ /*! ++@Function MMU_AppendCacheFlags ++ ++@Description Set the cache flags to the bitwise or of themselves and the ++ specified input flags, i.e. ui32CacheFlags |= ui32NewCacheFlags, ++ atomically. ++ ++@Input psMMUContext MMU context ++ ++@Input ui32NewCacheFlags Cache flags to append. ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void MMU_AppendCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); ++ ++/*************************************************************************/ /*! ++@Function MMU_ExchangeCacheFlags ++ ++@Description Exchange MMU context flags with specified value, atomically. ++ ++@Input psMMUContext MMU context ++ ++@Input ui32CacheFlags Cache flags to set. ++ ++@Return Previous MMU context cache flags. ++*/ ++/*****************************************************************************/ ++IMG_UINT32 MMU_ExchangeCacheFlags(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32NewCacheFlags); ++ ++/*************************************************************************/ /*! ++@Function MMU_CheckFaultAddress ++ ++@Description Check the specified MMU context to see if the provided address ++ should be valid ++ ++@Input psMMUContext MMU context to store the data on ++ ++@Input psDevVAddr Address to check ++ ++@Output psOutFaultData To store fault details after checking ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR *psDevVAddr, ++ MMU_FAULT_DATA *psOutFaultData); ++ ++/*************************************************************************/ /*! ++@Function MMU_IsVDevAddrValid ++@Description Checks if given address is valid. ++@Input psMMUContext MMU context to store the data on ++@Input uiLog2PageSize page size ++@Input sDevVAddr Address to check ++@Return IMG_TRUE of address is valid ++*/ /**************************************************************************/ ++IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 uiLog2PageSize, ++ IMG_DEV_VIRTADDR sDevVAddr); ++ ++#if defined(PDUMP) ++ ++/*************************************************************************/ /*! ++@Function MMU_ContextDerivePCPDumpSymAddr ++ ++@Description Derives a PDump Symbolic address for the top level MMU object ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input pszPDumpSymbolicNameBuffer Buffer to write the PDump symbolic ++ address to ++ ++@Input uiPDumpSymbolicNameBufferSize Size of the buffer ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext, ++ IMG_CHAR *pszPDumpSymbolicNameBuffer, ++ size_t uiPDumpSymbolicNameBufferSize); ++ ++/*************************************************************************/ /*! ++@Function MMU_PDumpWritePageCatBase ++ ++@Description PDump write of the top level MMU object to a device register ++ ++@Input psMMUContext MMU context to operate on ++ ++@Input pszSpaceName PDump name of the mem/reg space ++ ++@Input uiOffset Offset to write the address to ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, ++ const IMG_CHAR *pszSpaceName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32WordSize, ++ IMG_UINT32 ui32AlignShift, ++ IMG_UINT32 ui32Shift, ++ PDUMP_FLAGS_T uiPdumpFlags); ++ ++/*************************************************************************/ /*! ++@Function MMU_AcquirePDumpMMUContext ++ ++@Description Acquire a reference to the PDump MMU context for this MMU ++ context ++ ++@Input psMMUContext MMU context to operate on ++ ++@Output pui32PDumpMMUContextID PDump MMU context ID ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 *pui32PDumpMMUContextID, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function MMU_ReleasePDumpMMUContext ++ ++@Description Release a reference to the PDump MMU context for this MMU context ++ ++@Input psMMUContext MMU context to operate on ++ ++@Return PVRSRV_OK if successful ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext, ++ IMG_UINT32 ui32PDumpFlags); ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(MMU_PDumpWritePageCatBase) ++#endif ++static INLINE void ++MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext, ++ const IMG_CHAR *pszSpaceName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32WordSize, ++ IMG_UINT32 ui32AlignShift, ++ IMG_UINT32 ui32Shift, ++ PDUMP_FLAGS_T uiPdumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psMMUContext); ++ PVR_UNREFERENCED_PARAMETER(pszSpaceName); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32WordSize); ++ PVR_UNREFERENCED_PARAMETER(ui32AlignShift); ++ PVR_UNREFERENCED_PARAMETER(ui32Shift); ++ PVR_UNREFERENCED_PARAMETER(uiPdumpFlags); ++} ++#endif /* PDUMP */ ++ ++void RGXMapBRN71422TargetPhysicalAddress(MMU_CONTEXT *psMMUContext); ++ ++#endif /* #ifdef MMU_COMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/module_common.c b/drivers/gpu/drm/img-rogue/module_common.c +new file mode 100644 +index 000000000000..aa7ace0dabd1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/module_common.c +@@ -0,0 +1,730 @@ ++/*************************************************************************/ /*! ++@File ++@Title Common Linux module setup ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++#if defined(CONFIG_DEBUG_FS) ++#include "pvr_debugfs.h" ++#endif /* defined(CONFIG_DEBUG_FS) */ ++#if defined(CONFIG_PROC_FS) ++#include "pvr_procfs.h" ++#endif /* defined(CONFIG_PROC_FS) */ ++#include "di_server.h" ++#include "private_data.h" ++#include "linkage.h" ++#include "power.h" ++#include "env_connection.h" ++#include "process_stats.h" ++#include "module_common.h" ++#include "pvrsrv.h" ++#include "srvcore.h" ++#if defined(SUPPORT_RGX) ++#include "rgxdevice.h" ++#endif ++#include "pvrsrv_error.h" ++#include "pvr_drv.h" ++#include "pvr_bridge_k.h" ++ ++#include "pvr_fence.h" ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++#include "pvr_sync.h" ++#if !defined(USE_PVRSYNC_DEVNODE) ++#include "pvr_sync_ioctl_drm.h" ++#endif ++#endif ++ ++#include "ospvr_gputrace.h" ++ ++#include "km_apphint.h" ++#include "srvinit.h" ++ ++#include "pvr_ion_stats.h" ++ ++#if defined(SUPPORT_DISPLAY_CLASS) ++/* Display class interface */ ++#include "kerneldisplay.h" ++EXPORT_SYMBOL(DCRegisterDevice); ++EXPORT_SYMBOL(DCUnregisterDevice); ++EXPORT_SYMBOL(DCDisplayConfigurationRetired); ++EXPORT_SYMBOL(DCDisplayHasPendingCommand); ++EXPORT_SYMBOL(DCImportBufferAcquire); ++EXPORT_SYMBOL(DCImportBufferRelease); ++ ++/* Physmem interface (required by LMA DC drivers) */ ++#include "physheap.h" ++EXPORT_SYMBOL(PhysHeapAcquireByUsage); ++EXPORT_SYMBOL(PhysHeapRelease); ++EXPORT_SYMBOL(PhysHeapGetType); ++EXPORT_SYMBOL(PhysHeapGetCpuPAddr); ++EXPORT_SYMBOL(PhysHeapGetSize); ++EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr); ++ ++EXPORT_SYMBOL(PVRSRVGetDriverStatus); ++EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR); ++EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR); ++ ++#include "pvr_notifier.h" ++EXPORT_SYMBOL(PVRSRVCheckStatus); ++ ++#include "pvr_debug.h" ++EXPORT_SYMBOL(PVRSRVGetErrorString); ++EXPORT_SYMBOL(PVRSRVGetDeviceInstance); ++#endif /* defined(SUPPORT_DISPLAY_CLASS) */ ++ ++#if defined(SUPPORT_RGX) ++#include "rgxapi_km.h" ++#if defined(SUPPORT_SHARED_SLC) ++EXPORT_SYMBOL(RGXInitSLC); ++#endif ++EXPORT_SYMBOL(RGXHWPerfConnect); ++EXPORT_SYMBOL(RGXHWPerfDisconnect); ++EXPORT_SYMBOL(RGXHWPerfControl); ++#if defined(RGX_FEATURE_HWPERF_VOLCANIC) ++EXPORT_SYMBOL(RGXHWPerfConfigureCounters); ++#else ++EXPORT_SYMBOL(RGXHWPerfConfigMuxCounters); ++EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters); ++#endif ++EXPORT_SYMBOL(RGXHWPerfDisableCounters); ++EXPORT_SYMBOL(RGXHWPerfAcquireEvents); ++EXPORT_SYMBOL(RGXHWPerfReleaseEvents); ++EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp); ++#if defined(SUPPORT_KERNEL_HWPERF_TEST) ++EXPORT_SYMBOL(OSAddTimer); ++EXPORT_SYMBOL(OSEnableTimer); ++EXPORT_SYMBOL(OSDisableTimer); ++EXPORT_SYMBOL(OSRemoveTimer); ++#endif ++#endif ++ ++static int PVRSRVDeviceSyncOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ struct drm_file *psDRMFile); ++ ++CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile) ++{ ++ if (pFile) ++ { ++ struct drm_file *psDRMFile = pFile->private_data; ++ PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; ++ ++ return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData; ++ } ++ ++ return NULL; ++} ++ ++CONNECTION_DATA *LinuxSyncConnectionFromFile(struct file *pFile) ++{ ++ if (pFile) ++ { ++ struct drm_file *psDRMFile = pFile->private_data; ++ PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; ++ ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ return (CONNECTION_DATA*)psConnectionPriv->pvConnectionData; ++#else ++ return (CONNECTION_DATA*)psConnectionPriv->pvSyncConnectionData; ++#endif ++ } ++ ++ return NULL; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDriverInit ++@Description Common one time driver initialisation ++@Return int 0 on success and a Linux error code otherwise ++*/ /***************************************************************************/ ++int PVRSRVDriverInit(void) ++{ ++ PVRSRV_ERROR error; ++ int os_err; ++ ++ error = PVROSFuncInit(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++ ++ error = PVRSRVCommonDriverInit(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENODEV; ++ } ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ error = pvr_sync_register_functions(); ++ if (error != PVRSRV_OK) ++ { ++ return -EPERM; ++ } ++ ++ os_err = pvr_sync_init(); ++ if (os_err != 0) ++ { ++ return os_err; ++ } ++#endif ++ ++ os_err = pvr_apphint_init(); ++ if (os_err != 0) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed AppHint setup(%d)", __func__, ++ os_err)); ++ } ++ ++#if defined(SUPPORT_RGX) ++ error = PVRGpuTraceSupportInit(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++#endif ++ ++#if defined(ANDROID) ++#if defined(CONFIG_PROC_FS) ++ error = PVRProcFsRegister(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++#elif defined(CONFIG_DEBUG_FS) ++ error = PVRDebugFsRegister(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++#endif /* defined(CONFIG_PROC_FS) || defined(CONFIG_DEBUG_FS) */ ++#else ++#if defined(CONFIG_DEBUG_FS) ++ error = PVRDebugFsRegister(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++#elif defined(CONFIG_PROC_FS) ++ error = PVRProcFsRegister(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENOMEM; ++ } ++#endif /* defined(CONFIG_DEBUG_FS) || defined(CONFIG_PROC_FS) */ ++#endif /* defined(ANDROID) */ ++ ++ error = PVRSRVIonStatsInitialise(); ++ if (error != PVRSRV_OK) ++ { ++ return -ENODEV; ++ } ++ ++#if defined(SUPPORT_RGX) ++ /* calling here because we need to handle input from the file even ++ * before the devices are initialised ++ * note: we're not passing a device node because apphint callbacks don't ++ * need it */ ++ PVRGpuTraceInitAppHintCallbacks(NULL); ++#endif ++ ++ return 0; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDriverDeinit ++@Description Common one time driver de-initialisation ++@Return void ++*/ /***************************************************************************/ ++void PVRSRVDriverDeinit(void) ++{ ++ pvr_apphint_deinit(); ++ ++ PVRSRVIonStatsDestroy(); ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ pvr_sync_deinit(); ++#endif ++ ++ PVRSRVCommonDriverDeInit(); ++ ++#if defined(SUPPORT_RGX) ++ PVRGpuTraceSupportDeInit(); ++#endif ++ ++ PVROSFuncDeInit(); ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceInit ++@Description Common device related initialisation. ++@Input psDeviceNode The device node for which initialisation should be ++ performed ++@Return int 0 on success and a Linux error code otherwise ++*/ /***************************************************************************/ ++int PVRSRVDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ { ++ PVRSRV_ERROR eError = pvr_sync_device_init(psDeviceNode->psDevConfig->pvOSDevice); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)", ++ __func__, eError)); ++ return -EBUSY; ++ } ++ } ++#endif ++ ++#if defined(SUPPORT_RGX) ++ { ++ int error = PVRGpuTraceInitDevice(psDeviceNode); ++ if (error != 0) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: failed to initialise PVR GPU Tracing on device%d (%d)", ++ __func__, psDeviceNode->sDevId.i32OsDeviceID, error)); ++ } ++ } ++#endif ++ ++ return 0; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceDeinit ++@Description Common device related de-initialisation. ++@Input psDeviceNode The device node for which de-initialisation should ++ be performed ++@Return void ++*/ /***************************************************************************/ ++void PVRSRVDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(SUPPORT_RGX) ++ PVRGpuTraceDeInitDevice(psDeviceNode); ++#endif ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ pvr_sync_device_deinit(psDeviceNode->psDevConfig->pvOSDevice); ++#endif ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ PVRSRVDeInitialiseDMA(psDeviceNode); ++#endif ++ ++ pvr_fence_cleanup(); ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceShutdown ++@Description Common device shutdown. ++@Input psDeviceNode The device node representing the device that should ++ be shutdown ++@Return void ++*/ /***************************************************************************/ ++ ++void PVRSRVDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* ++ * Disable the bridge to stop processes trying to use the driver ++ * after it has been shut down. ++ */ ++ eError = LinuxBridgeBlockClientsAccess(IMG_TRUE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to suspend driver (%d)", ++ __func__, eError)); ++ return; ++ } ++ ++ (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode, ++ PVRSRV_SYS_POWER_STATE_OFF, ++ PVRSRV_POWER_FLAGS_NONE); ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceSuspend ++@Description Common device suspend. ++@Input psDeviceNode The device node representing the device that should ++ be suspended ++@Return int 0 on success and a Linux error code otherwise ++*/ /***************************************************************************/ ++int PVRSRVDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ /* ++ * LinuxBridgeBlockClientsAccess prevents processes from using the driver ++ * while it's suspended (this is needed for Android). Acquire the bridge ++ * lock first to ensure the driver isn't currently in use. ++ */ ++ LinuxBridgeBlockClientsAccess(IMG_FALSE); ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* To allow the driver to power down the GPU under AutoVz, the firmware must ++ * declared as offline, otherwise all power requests will be ignored. */ ++ psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; ++#endif ++ ++ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, ++ PVRSRV_SYS_POWER_STATE_OFF, ++ PVRSRV_POWER_FLAGS_SUSPEND) != PVRSRV_OK) ++ { ++ LinuxBridgeUnblockClientsAccess(); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceResume ++@Description Common device resume. ++@Input psDeviceNode The device node representing the device that should ++ be resumed ++@Return int 0 on success and a Linux error code otherwise ++*/ /***************************************************************************/ ++int PVRSRVDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode, ++ PVRSRV_SYS_POWER_STATE_ON, ++ PVRSRV_POWER_FLAGS_SUSPEND) != PVRSRV_OK) ++ { ++ return -EINVAL; ++ } ++ ++ LinuxBridgeUnblockClientsAccess(); ++ ++ /* ++ * Reprocess the device queues in case commands were blocked during ++ * suspend. ++ */ ++ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ PVRSRVCheckStatus(NULL); ++ } ++ ++ return 0; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceServicesOpen ++@Description Services device open. ++@Input psDeviceNode The device node representing the device being ++ opened by a user mode process ++@Input psDRMFile The DRM file data that backs the file handle ++ returned to the user mode process ++@Return int 0 on success and a Linux error code otherwise ++*/ /***************************************************************************/ ++int PVRSRVDeviceServicesOpen(PVRSRV_DEVICE_NODE *psDeviceNode, ++ struct drm_file *psDRMFile) ++{ ++ static DEFINE_MUTEX(sDeviceInitMutex); ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ENV_CONNECTION_PRIVATE_DATA sPrivData; ++ PVRSRV_CONNECTION_PRIV *psConnectionPriv; ++ PVRSRV_ERROR eError; ++ int iErr = 0; ++ ++ if (!psPVRSRVData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__)); ++ iErr = -ENODEV; ++ goto out; ++ } ++ ++ mutex_lock(&sDeviceInitMutex); ++ /* ++ * If the first attempt already set the state to bad, ++ * there is no point in going the second time, so get out ++ */ ++ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.", ++ __func__)); ++ iErr = -ENODEV; ++ mutex_unlock(&sDeviceInitMutex); ++ goto out; ++ } ++ ++ if (psDRMFile->driver_priv == NULL) ++ { ++ /* Allocate psConnectionPriv (stores private data and release pfn under driver_priv) */ ++ psConnectionPriv = kzalloc(sizeof(*psConnectionPriv), GFP_KERNEL); ++ if (!psConnectionPriv) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate driver_priv data", __func__)); ++ iErr = -ENOMEM; ++ mutex_unlock(&sDeviceInitMutex); ++ goto fail_alloc_connection_priv; ++ } ++ } ++ else ++ { ++ psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; ++ } ++ ++ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT) ++ { ++ eError = PVRSRVCommonDeviceInitialise(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ iErr = -ENODEV; ++ mutex_unlock(&sDeviceInitMutex); ++ goto fail_device_init; ++ } ++ ++#if defined(SUPPORT_RGX) ++ PVRGpuTraceInitIfEnabled(psDeviceNode); ++#endif ++ } ++ mutex_unlock(&sDeviceInitMutex); ++ ++ sPrivData.psDevNode = psDeviceNode; ++ ++ /* ++ * Here we pass the file pointer which will passed through to our ++ * OSConnectionPrivateDataInit function where we can save it so ++ * we can back reference the file structure from its connection ++ */ ++ eError = PVRSRVCommonConnectionConnect(&psConnectionPriv->pvConnectionData, ++ (void *)&sPrivData); ++ if (eError != PVRSRV_OK) ++ { ++ iErr = -ENOMEM; ++ goto fail_connect; ++ } ++ ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ psConnectionPriv->pfDeviceRelease = PVRSRVCommonConnectionDisconnect; ++#endif ++ psDRMFile->driver_priv = (void*)psConnectionPriv; ++ goto out; ++ ++fail_connect: ++fail_device_init: ++ kfree(psConnectionPriv); ++fail_alloc_connection_priv: ++out: ++ return iErr; ++} ++ ++static void wrap_pvr_sync_close(void *connection_data) ++{ ++ CONNECTION_DATA *psConnection = (CONNECTION_DATA *)connection_data; ++ pvr_sync_close(connection_data); ++ OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); ++ kfree(psConnection); ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceSyncOpen ++@Description Sync device open. ++@Input psDeviceNode The device node representing the device being ++ opened by a user mode process ++@Input psDRMFile The DRM file data that backs the file handle ++ returned to the user mode process ++@Return int 0 on success and a Linux error code otherwise ++*/ /***************************************************************************/ ++static int PVRSRVDeviceSyncOpen(PVRSRV_DEVICE_NODE *psDeviceNode, ++ struct drm_file *psDRMFile) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ CONNECTION_DATA *psConnection = NULL; ++ ENV_CONNECTION_PRIVATE_DATA sPrivData; ++ PVRSRV_CONNECTION_PRIV *psConnectionPriv; ++ PVRSRV_ERROR eError; ++ int iErr = 0; ++ ++ if (!psPVRSRVData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__)); ++ iErr = -ENODEV; ++ goto out; ++ } ++ ++ if (psDRMFile->driver_priv == NULL) ++ { ++ /* Allocate psConnectionPriv (stores private data and release pfn under driver_priv) */ ++ psConnectionPriv = kzalloc(sizeof(*psConnectionPriv), GFP_KERNEL); ++ if (!psConnectionPriv) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate driver_priv data", __func__)); ++ iErr = -ENOMEM; ++ goto out; ++ } ++ } ++ else ++ { ++ psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; ++ } ++ ++ /* Allocate connection data area, no stats since process not registered yet */ ++ psConnection = kzalloc(sizeof(*psConnection), GFP_KERNEL); ++ if (!psConnection) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: No memory to allocate connection data", __func__)); ++ iErr = -ENOMEM; ++ goto fail_alloc_connection; ++ } ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ psConnectionPriv->pvConnectionData = (void*)psConnection; ++#else ++ psConnectionPriv->pvSyncConnectionData = (void*)psConnection; ++#endif ++ ++ sPrivData.psDevNode = psDeviceNode; ++ ++ /* Call environment specific connection data init function */ ++ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, &sPrivData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OSConnectionPrivateDataInit() failed (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto fail_private_data_init; ++ } ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ iErr = pvr_sync_open(psConnectionPriv->pvConnectionData, psDRMFile); ++#else ++ iErr = pvr_sync_open(psConnectionPriv->pvSyncConnectionData, psDRMFile); ++#endif ++ if (iErr) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_sync_open() failed(%d)", ++ __func__, iErr)); ++ goto fail_pvr_sync_open; ++ } ++#endif ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ psConnectionPriv->pfDeviceRelease = wrap_pvr_sync_close; ++#endif ++#endif ++ psDRMFile->driver_priv = psConnectionPriv; ++ goto out; ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) ++fail_pvr_sync_open: ++ OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData); ++#endif ++fail_private_data_init: ++ kfree(psConnection); ++fail_alloc_connection: ++ kfree(psConnectionPriv); ++out: ++ return iErr; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceRelease ++@Description Common device release. ++@Input psDeviceNode The device node for the device that the given file ++ represents ++@Input psDRMFile The DRM file data that's being released ++@Return void ++*/ /***************************************************************************/ ++void PVRSRVDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode, ++ struct drm_file *psDRMFile) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ ++ if (psDRMFile->driver_priv) ++ { ++ PVRSRV_CONNECTION_PRIV *psConnectionPriv = (PVRSRV_CONNECTION_PRIV*)psDRMFile->driver_priv; ++ ++ if (psConnectionPriv->pvConnectionData) ++ { ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ if (psConnectionPriv->pfDeviceRelease) ++ { ++ psConnectionPriv->pfDeviceRelease(psConnectionPriv->pvConnectionData); ++ } ++#else ++ if (psConnectionPriv->pvConnectionData) ++ PVRSRVCommonConnectionDisconnect(psConnectionPriv->pvConnectionData); ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) ++ if (psConnectionPriv->pvSyncConnectionData) ++ pvr_sync_close(psConnectionPriv->pvSyncConnectionData); ++#endif ++#endif ++ } ++ ++ kfree(psDRMFile->driver_priv); ++ psDRMFile->driver_priv = NULL; ++ } ++} ++ ++int ++drm_pvr_srvkm_init(struct drm_device *dev, void *arg, struct drm_file *psDRMFile) ++{ ++ struct drm_pvr_srvkm_init_data *data = arg; ++ struct pvr_drm_private *priv = dev->dev_private; ++ int iErr = 0; ++ ++ switch (data->init_module) ++ { ++ case PVR_SRVKM_SYNC_INIT: ++ { ++ iErr = PVRSRVDeviceSyncOpen(priv->dev_node, psDRMFile); ++ break; ++ } ++ case PVR_SRVKM_SERVICES_INIT: ++ { ++ iErr = PVRSRVDeviceServicesOpen(priv->dev_node, psDRMFile); ++ break; ++ } ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid init_module (%d)", ++ __func__, data->init_module)); ++ iErr = -EINVAL; ++ } ++ } ++ ++ return iErr; ++} +diff --git a/drivers/gpu/drm/img-rogue/module_common.h b/drivers/gpu/drm/img-rogue/module_common.h +new file mode 100644 +index 000000000000..7317a0a79eaa +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/module_common.h +@@ -0,0 +1,101 @@ ++/*************************************************************************/ /*! ++@File module_common.h ++@Title Common linux module setup header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef MODULE_COMMON_H ++#define MODULE_COMMON_H ++ ++#include "pvr_drm.h" ++ ++/* DRVNAME is the name we use to register our driver. */ ++#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME ++ ++struct _PVRSRV_DEVICE_NODE_; ++struct drm_file; ++struct drm_device; ++ ++/* psDRMFile->driver_priv will point to a PVRSV_CONNECTION_PRIV ++ * struct, which will contain a ptr to the CONNECTION_DATA and ++ * a pfn to the release function (which will differ depending ++ * on whether the connection is to Sync or Services). ++ */ ++typedef void (*PFN_PVRSRV_DEV_RELEASE)(void *pvData); ++typedef struct ++{ ++ /* pvConnectionData is used to hold Services connection data ++ * for all PVRSRV_DEVICE_INIT_MODE options. ++ */ ++ void *pvConnectionData; ++ ++ /* pfDeviceRelease is used to indicate the release function ++ * to be called when PVRSRV_DEVICE_INIT_MODE is PVRSRV_LINUX_DEV_INIT_ON_CONNECT, ++ * as we can then have one connections made (either for Services or Sync) per ++ * psDRMFile, and need to know which type of connection is being released ++ * (as the ioctl release call is common for both). ++ */ ++ PFN_PVRSRV_DEV_RELEASE pfDeviceRelease; ++ ++ /* pvSyncConnectionData is used to hold Sync connection data ++ * when PVRSRV_DEVICE_INIT_MODE is not PVRSRV_LINUX_DEV_INIT_ON_CONNECT, ++ * as we can then have two connections made (for Services and Sync) to ++ * the same psDRMFile. ++ */ ++ void *pvSyncConnectionData; ++} PVRSRV_CONNECTION_PRIV; ++ ++int PVRSRVDriverInit(void); ++void PVRSRVDriverDeinit(void); ++ ++int PVRSRVDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++void PVRSRVDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++ ++void PVRSRVDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++int PVRSRVDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++int PVRSRVDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++ ++int PVRSRVDeviceServicesOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ struct drm_file *psDRMFile); ++void PVRSRVDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode, ++ struct drm_file *psDRMFile); ++int drm_pvr_srvkm_init(struct drm_device *dev, ++ void *arg, struct drm_file *psDRMFile); ++ ++#endif /* MODULE_COMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/multicore_defs.h b/drivers/gpu/drm/img-rogue/multicore_defs.h +new file mode 100644 +index 000000000000..2ca4e064d886 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/multicore_defs.h +@@ -0,0 +1,53 @@ ++/**************************************************************************/ /*! ++@File ++@Title RGX Multicore Information flags ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_MULTICORE_DEFS_H ++#define RGX_MULTICORE_DEFS_H ++ ++/* Capability bits returned to client in RGXGetMultiCoreInfo */ ++#define RGX_MULTICORE_CAPABILITY_FRAGMENT_EN (0x00000040U) ++#define RGX_MULTICORE_CAPABILITY_GEOMETRY_EN (0x00000020U) ++#define RGX_MULTICORE_CAPABILITY_COMPUTE_EN (0x00000010U) ++#define RGX_MULTICORE_CAPABILITY_PRIMARY_EN (0x00000008U) ++#define RGX_MULTICORE_ID_CLRMSK (0xFFFFFFF8U) ++ ++#endif /* RGX_MULTICORE_DEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/opaque_types.h b/drivers/gpu/drm/img-rogue/opaque_types.h +new file mode 100644 +index 000000000000..766bc22ea418 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/opaque_types.h +@@ -0,0 +1,56 @@ ++/*************************************************************************/ /*! ++@File ++@Title Opaque Types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines opaque types for various services types ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef SERVICES_OPAQUE_TYPES_H ++#define SERVICES_OPAQUE_TYPES_H ++ ++#include "img_defs.h" ++#include "img_types.h" ++ ++typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE; ++typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE; ++ ++#endif /* SERVICES_OPAQUE_TYPES_H */ ++ ++/****************************************************************************** ++ End of file (opaque_types.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/os_cpu_cache.h b/drivers/gpu/drm/img-rogue/os_cpu_cache.h +new file mode 100644 +index 000000000000..56f92036ff49 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/os_cpu_cache.h +@@ -0,0 +1,69 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS and CPU d-cache maintenance mechanisms ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines for cache management which are visible internally only ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef OS_CPU_CACHE_H ++#define OS_CPU_CACHE_H ++ ++#include "info_page_defs.h" ++ ++#define PVRSRV_CACHE_OP_TIMELINE 0x8 /*!< Request SW_SYNC timeline notification when executed */ ++#define PVRSRV_CACHE_OP_FORCE_SYNCHRONOUS 0x10 /*!< Force all batch members to be executed synchronously */ ++ ++#define CACHEFLUSH_ISA_X86 0x1 /*!< x86/x64 specific UM range-based cache flush */ ++#define CACHEFLUSH_ISA_ARM64 0x2 /*!< Aarch64 specific UM range-based cache flush */ ++#define CACHEFLUSH_ISA_GENERIC 0x3 /*!< Other ISA's without UM range-based cache flush */ ++#ifndef CACHEFLUSH_ISA_TYPE ++ #if defined(__i386__) || defined(__x86_64__) ++ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86 ++ #elif defined(__arm64__) || defined(__aarch64__) ++ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64 ++ #else ++ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC ++ #endif ++#endif ++ ++#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64) ++#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH /*!< x86/x86_64/ARM64 supports user-mode d-cache flush */ ++#endif ++ ++#endif /* OS_CPU_CACHE_H */ +diff --git a/drivers/gpu/drm/img-rogue/os_srvinit_param.h b/drivers/gpu/drm/img-rogue/os_srvinit_param.h +new file mode 100644 +index 000000000000..a4d77e381ff7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/os_srvinit_param.h +@@ -0,0 +1,328 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services initialisation parameters header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Services initialisation parameter support for the Linux kernel. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef OS_SRVINIT_PARAM_H ++#define OS_SRVINIT_PARAM_H ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#include "km_apphint.h" ++#include "km_apphint_defs.h" ++ ++/* Supplied to SrvInitParamGetXXX() functions when the param/AppHint is ++ * applicable to all devices and not a specific device. Typically used ++ * for server-wide build and module AppHints. ++ */ ++#define INITPARAM_NO_DEVICE (NULL) ++ ++#define SrvInitParamOpen() NULL ++#define SrvInitParamClose(pvState) ((void)(pvState)) ++ ++#define SrvInitParamGetBOOL(device, state, name, value) \ ++ ((void) pvr_apphint_get_bool(device, APPHINT_ID_ ## name, &value)) ++ ++#define SrvInitParamGetUINT32(device, state, name, value) \ ++ ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) ++ ++#define SrvInitParamGetUINT64(device, state, name, value) \ ++ ((void) pvr_apphint_get_uint64(device, APPHINT_ID_ ## name, &value)) ++ ++#define SrvInitParamGetSTRING(device, state, name, buffer, size) \ ++ ((void) pvr_apphint_get_string(device, APPHINT_ID_ ## name, buffer, size)) ++ ++#define SrvInitParamGetUINT32BitField(device, state, name, value) \ ++ ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) ++ ++#define SrvInitParamGetUINT32List(device, state, name, value) \ ++ ((void) pvr_apphint_get_uint32(device, APPHINT_ID_ ## name, &value)) ++ ++#else /* defined(__linux__) && defined(__KERNEL__) */ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++#include "img_types.h" ++ ++/*! Lookup item. */ ++typedef struct ++{ ++ const IMG_CHAR *pszValue; /*!< looked up name */ ++ IMG_UINT32 ui32Value; /*!< looked up value */ ++} SRV_INIT_PARAM_UINT32_LOOKUP; ++ ++/*************************************************************************/ /*! ++@Brief SrvInitParamOpen ++ ++@Description Establish a connection to the Parameter resource store which is ++ used to hold configuration information associated with the ++ server instance. ++ ++@Return (void *) Handle to Parameter resource store to be used for ++ subsequent parameter value queries ++ ++*/ /**************************************************************************/ ++void *SrvInitParamOpen(void); ++ ++/*************************************************************************/ /*! ++@Brief SrvInitParamClose ++ ++@Description Remove a pre-existing connection to the Parameter resource store ++ given by 'pvState' and release any temporary storage associated ++ with the 'pvState' mapping handle ++ ++@Input pvState Handle to Parameter resource store ++ ++*/ /**************************************************************************/ ++void SrvInitParamClose(void *pvState); ++ ++/*************************************************************************/ /*! ++@Brief _SrvInitParamGetBOOL ++ ++@Description Get the current BOOL value for parameter 'pszName' from the ++ Parameter resource store attached to 'pvState' ++ ++@Input pvState Handle to Parameter resource store ++ ++@Input pszName Name of parameter to look-up ++ ++@Input pbDefault Value to return if parameter not found ++ ++@Output pbValue Value of parameter 'pszName' or 'pbDefault' ++ if not found ++ ++*/ /**************************************************************************/ ++void _SrvInitParamGetBOOL( ++ void *pvState, ++ const IMG_CHAR *pszName, ++ const IMG_BOOL *pbDefault, ++ IMG_BOOL *pbValue ++); ++ ++/*! Get the BOOL value for parameter 'name' from the parameter resource store ++ * attached to 'state'. */ ++#define SrvInitParamGetBOOL(device, state, name, value) \ ++ _SrvInitParamGetBOOL(state, # name, & __SrvInitParam_ ## name, &(value)) ++ ++/*! Initialise FLAG type parameter identified by 'name'. */ ++#define SrvInitParamInitFLAG(name, defval, unused) \ ++ static const IMG_BOOL __SrvInitParam_ ## name = defval; ++ ++/*! Initialise BOOL type parameter identified by 'name'. */ ++#define SrvInitParamInitBOOL(name, defval, unused) \ ++ static const IMG_BOOL __SrvInitParam_ ## name = defval; ++ ++/*************************************************************************/ /*! ++@Brief _SrvInitParamGetUINT32 ++ ++@Description Get the current IMG_UINT32 value for parameter 'pszName' ++ from the Parameter resource store attached to 'pvState' ++ ++@Input pvState Handle to Parameter resource store ++ ++@Input pszName Name of parameter to look-up ++ ++@Input pui32Default Value to return if parameter not found ++ ++@Output pui32Value Value of parameter 'pszName' or ++ 'pui32Default' if not found ++ ++*/ /**************************************************************************/ ++void _SrvInitParamGetUINT32( ++ void *pvState, ++ const IMG_CHAR *pszName, ++ const IMG_UINT32 *pui32Default, ++ IMG_UINT32 *pui32Value ++); ++ ++/*! Get the UINT32 value for parameter 'name' from the parameter resource store ++ * attached to 'state'. */ ++#define SrvInitParamGetUINT32(device, state, name, value) \ ++ _SrvInitParamGetUINT32(state, # name, & __SrvInitParam_ ## name, &(value)) ++ ++/*! Initialise UINT32 type parameter identified by 'name'. */ ++#define SrvInitParamInitUINT32(name, defval, unused) \ ++ static const IMG_UINT32 __SrvInitParam_ ## name = defval; ++ ++/*! Initialise UINT64 type parameter identified by 'name'. */ ++#define SrvInitParamInitUINT64(name, defval, unused) \ ++ static const IMG_UINT64 __SrvInitParam_ ## name = defval; ++ ++/*! @cond Doxygen_Suppress */ ++#define SrvInitParamUnreferenced(name) \ ++ PVR_UNREFERENCED_PARAMETER( __SrvInitParam_ ## name ) ++/*! @endcond */ ++ ++/*************************************************************************/ /*! ++@Brief _SrvInitParamGetUINT32BitField ++ ++@Description Get the current IMG_UINT32 bitfield value for parameter ++ 'pszBasename' from the Parameter resource store ++ attached to 'pvState' ++ ++@Input pvState Handle to Parameter resource store ++ ++@Input pszBaseName Bitfield parameter name to search for ++ ++@Input uiDefault Default return value if parameter not found ++ ++@Input psLookup Bitfield array to traverse ++ ++@Input uiSize number of elements in 'psLookup' ++ ++@Output puiValue Value of bitfield or 'uiDefault' if ++ parameter not found ++*/ /**************************************************************************/ ++void _SrvInitParamGetUINT32BitField( ++ void *pvState, ++ const IMG_CHAR *pszBaseName, ++ IMG_UINT32 uiDefault, ++ const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, ++ IMG_UINT32 uiSize, ++ IMG_UINT32 *puiValue ++); ++ ++/*! Initialise UINT32 bitfield type parameter identified by 'name' with ++ * 'inival' value and 'lookup' look up array. */ ++#define SrvInitParamInitUINT32Bitfield(name, inival, lookup) \ ++ static IMG_UINT32 __SrvInitParam_ ## name = inival; \ ++ static SRV_INIT_PARAM_UINT32_LOOKUP * \ ++ __SrvInitParamLookup_ ## name = &lookup[0]; \ ++ static const IMG_UINT32 __SrvInitParamSize_ ## name = \ ++ ARRAY_SIZE(lookup); ++ ++/*! Get the UINT32 bitfield value for parameter 'name' from the parameter ++ * resource store attached to 'state'. */ ++#define SrvInitParamGetUINT32BitField(device, state, name, value) \ ++ _SrvInitParamGetUINT32BitField(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) ++ ++/*************************************************************************/ /*! ++@Brief _SrvInitParamGetUINT32List ++ ++@Description Get the current IMG_UINT32 list value for the specified ++ parameter 'pszName' from the Parameter resource store ++ attached to 'pvState' ++ ++@Input pvState Handle to Parameter resource store ++ ++@Input pszName Parameter list name to search for ++ ++@Input uiDefault Default value to return if 'pszName' is ++ not set within 'pvState' ++ ++@Input psLookup parameter list to traverse ++ ++@Input uiSize number of elements in 'psLookup' list ++ ++@Output puiValue value of located list element or ++ 'uiDefault' if parameter not found ++ ++*/ /**************************************************************************/ ++void _SrvInitParamGetUINT32List( ++ void *pvState, ++ const IMG_CHAR *pszName, ++ IMG_UINT32 uiDefault, ++ const SRV_INIT_PARAM_UINT32_LOOKUP *psLookup, ++ IMG_UINT32 uiSize, ++ IMG_UINT32 *puiValue ++); ++ ++/*! Get the UINT32 list value for parameter 'name' from the parameter ++ * resource store attached to 'state'. */ ++#define SrvInitParamGetUINT32List(device, state, name, value) \ ++ _SrvInitParamGetUINT32List(state, # name, __SrvInitParam_ ## name, __SrvInitParamLookup_ ## name, __SrvInitParamSize_ ## name, &(value)) ++ ++/*! Initialise UINT32 list type parameter identified by 'name' with ++ * 'defval' default value and 'lookup' look up list. */ ++#define SrvInitParamInitUINT32List(name, defval, lookup) \ ++ static IMG_UINT32 __SrvInitParam_ ## name = defval; \ ++ static SRV_INIT_PARAM_UINT32_LOOKUP * \ ++ __SrvInitParamLookup_ ## name = &lookup[0]; \ ++ static const IMG_UINT32 __SrvInitParamSize_ ## name = \ ++ ARRAY_SIZE(lookup); ++ ++/*************************************************************************/ /*! ++@Brief _SrvInitParamGetSTRING ++ ++@Description Get the contents of the specified parameter string 'pszName' ++ from the Parameter resource store attached to 'pvState' ++ ++@Input pvState Handle to Parameter resource store ++ ++@Input pszName Parameter string name to search for ++ ++@Input psDefault Default string to return if 'pszName' is ++ not set within 'pvState' ++ ++@Input size Size of output 'pBuffer' ++ ++@Output pBuffer Output copy of 'pszName' contents or ++ copy of 'psDefault' if 'pszName' is not ++ set within 'pvState' ++ ++*/ /**************************************************************************/ ++void _SrvInitParamGetSTRING( ++ void *pvState, ++ const IMG_CHAR *pszName, ++ const IMG_CHAR *psDefault, ++ IMG_CHAR *pBuffer, ++ size_t size ++); ++ ++/*! Initialise STRING type parameter identified by 'name' with 'defval' default ++ * value. */ ++#define SrvInitParamInitSTRING(name, defval, unused) \ ++ static const IMG_CHAR *__SrvInitParam_ ## name = defval; ++ ++/*! Get the STRING value for parameter 'name' from the parameter resource store ++ * attached to 'state'. */ ++#define SrvInitParamGetSTRING(device, state, name, buffer, size) \ ++ _SrvInitParamGetSTRING(state, # name, __SrvInitParam_ ## name, buffer, size) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* defined(__linux__) && defined(__KERNEL__) */ ++ ++#endif /* OS_SRVINIT_PARAM_H */ +diff --git a/drivers/gpu/drm/img-rogue/osconnection_server.c b/drivers/gpu/drm/img-rogue/osconnection_server.c +new file mode 100644 +index 000000000000..0c3bc2d672ee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osconnection_server.c +@@ -0,0 +1,157 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linux specific per process data functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++#include "connection_server.h" ++#include "osconnection_server.h" ++ ++#include "env_connection.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++ ++#include ++ ++#if defined(SUPPORT_ION) ++#include ++#include PVR_ANDROID_ION_HEADER ++ ++/* ++ The ion device (the base object for all requests) ++ gets created by the system and we acquire it via ++ Linux specific functions provided by the system layer ++*/ ++#include "ion_sys.h" ++#endif ++ ++PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) ++{ ++ ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData; ++ ENV_CONNECTION_DATA *psEnvConnection; ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ ENV_ION_CONNECTION_DATA *psIonConnection; ++#endif ++ ++ *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA)); ++ ++ if (*phOsPrivateData == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData; ++ ++ psEnvConnection->owner = current->tgid; ++ ++ psEnvConnection->psDevNode = psPrivData->psDevNode; ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ psEnvConnection->pvPvrSyncPrivateData = NULL; ++#endif ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA)); ++ if (psIonConnection == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __func__)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psEnvConnection->psIonData = psIonConnection; ++ /* ++ We can have more than one connection per process, so we need ++ more than the PID to have a unique name. ++ */ ++ psEnvConnection->psIonData->psIonDev = IonDevAcquire(); ++ OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM()); ++ psEnvConnection->psIonData->psIonClient = ++ ion_client_create(psEnvConnection->psIonData->psIonDev, ++ psEnvConnection->psIonData->azIonClientName); ++ ++ if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create " ++ "ion client for per connection data")); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) ++{ ++ ENV_CONNECTION_DATA *psEnvConnection; ++ ++ if (hOsPrivateData == NULL) ++ { ++ return PVRSRV_OK; ++ } ++ ++ psEnvConnection = hOsPrivateData; ++ ++#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) ++ PVR_ASSERT(psEnvConnection->psIonData != NULL); ++ ++ PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL); ++ ion_client_destroy(psEnvConnection->psIonData->psIonClient); ++ ++ IonDevRelease(psEnvConnection->psIonData->psIonDev); ++ OSFreeMem(psEnvConnection->psIonData); ++#endif ++ ++ OSFreeMem(hOsPrivateData); ++ /*not nulling pointer, copy on stack*/ ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_DEVICE_NODE *OSGetDevNode(CONNECTION_DATA *psConnection) ++{ ++ ENV_CONNECTION_DATA *psEnvConnection; ++ ++ psEnvConnection = PVRSRVConnectionPrivateData(psConnection); ++ PVR_ASSERT(psEnvConnection); ++ ++ return psEnvConnection->psDevNode; ++} +diff --git a/drivers/gpu/drm/img-rogue/osconnection_server.h b/drivers/gpu/drm/img-rogue/osconnection_server.h +new file mode 100644 +index 000000000000..28a6dd3825fb +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osconnection_server.h +@@ -0,0 +1,133 @@ ++/**************************************************************************/ /*! ++@File ++@Title Server side connection management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description API for OS specific callbacks from server side connection ++ management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++#ifndef OSCONNECTION_SERVER_H ++#define OSCONNECTION_SERVER_H ++ ++#include "handle.h" ++#include "osfunc.h" ++ ++/*! Function not implemented definition. */ ++#define OSCONNECTION_SERVER_NOT_IMPLEMENTED 0 ++/*! Assert used for OSCONNECTION_SERVER_NOT_IMPLEMENTED. */ ++#define OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSCONNECTION_SERVER_NOT_IMPLEMENTED) ++ ++#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) ++PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData); ++PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData); ++ ++PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase); ++ ++PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection); ++ ++#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(OSConnectionPrivateDataInit) ++#endif ++/*************************************************************************/ /*! ++@Function OSConnectionPrivateDataInit ++@Description Allocates and initialises any OS-specific private data ++ relating to a connection. ++ Called from PVRSRVCommonConnectionConnect(). ++@Input pvOSData pointer to any OS private data ++@Output phOsPrivateData handle to the created connection ++ private data ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData) ++{ ++ PVR_UNREFERENCED_PARAMETER(phOsPrivateData); ++ PVR_UNREFERENCED_PARAMETER(pvOSData); ++ ++ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(OSConnectionPrivateDataDeInit) ++#endif ++/*************************************************************************/ /*! ++@Function OSConnectionPrivateDataDeInit ++@Description Frees previously allocated OS-specific private data ++ relating to a connection. ++@Input hOsPrivateData handle to the connection private data ++ to be freed ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData) ++{ ++ PVR_UNREFERENCED_PARAMETER(hOsPrivateData); ++ ++ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(OSConnectionSetHandleOptions) ++#endif ++static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase) ++{ ++ PVR_UNREFERENCED_PARAMETER(psHandleBase); ++ ++ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(OSGetDevNode) ++#endif ++static INLINE PVRSRV_DEVICE_NODE* OSGetDevNode(CONNECTION_DATA *psConnection) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ OSCONNECTION_SERVER_NOT_IMPLEMENTED_ASSERT(); ++ ++ return NULL; ++} ++#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */ ++ ++ ++#endif /* OSCONNECTION_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/osdi_impl.h b/drivers/gpu/drm/img-rogue/osdi_impl.h +new file mode 100644 +index 000000000000..5a68d09947c6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osdi_impl.h +@@ -0,0 +1,205 @@ ++/*************************************************************************/ /*! ++@File ++@Title Functions and types for creating Debug Info implementations. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef OSDI_IMPL_H ++#define OSDI_IMPL_H ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++#include ++#else ++#include ++#endif ++ ++#include "di_common.h" ++#include "pvrsrv_error.h" ++ ++/*! Implementation callbacks. Those operations are performed on native ++ * implementation handles. */ ++typedef struct OSDI_IMPL_ENTRY_CB ++{ ++ /*! @Function pfnWrite ++ * ++ * @Description ++ * Writes the binary data of the DI entry to the output sync, whatever that ++ * may be for the DI implementation. ++ * ++ * @Input pvNativeHandle native implementation handle ++ * @Input pvData data ++ * @Input uiSize pvData length ++ */ ++ void (*pfnWrite)(void *pvNativeHandle, const void *pvData, ++ IMG_UINT32 uiSize); ++ ++ /*! @Function pfnVPrintf ++ * ++ * @Description ++ * Implementation of the 'vprintf' operation. ++ * ++ * @Input pvNativeHandle native implementation handle ++ * @Input pszFmt NUL-terminated format string ++ * @Input va_list variable length argument list ++ */ ++ void (*pfnVPrintf)(void *pvNativeHandle, const IMG_CHAR *pszFmt, va_list pArgs); ++ ++ /*! @Function pfnPuts ++ * ++ * @Description ++ * Implementation of the 'puts' operation. ++ * ++ * @Input pvNativeHandle native implementation handle ++ * @Input pszStr NUL-terminated string ++ */ ++ void (*pfnPuts)(void *pvNativeHandle, const IMG_CHAR *pszStr); ++ ++ /*! @Function pfnHasOverflowed ++ * ++ * @Description ++ * Checks if the native implementation's buffer has overflowed. ++ * ++ * @Input pvNativeHandle native implementation handle ++ */ ++ IMG_BOOL (*pfnHasOverflowed)(void *pvNativeHandle); ++} OSDI_IMPL_ENTRY_CB; ++ ++/*! Debug Info entry specialisation. */ ++struct OSDI_IMPL_ENTRY ++{ ++ /*! Pointer to the private data. The data originates from DICreateEntry() ++ * function. */ ++ void *pvPrivData; ++ /*! Pointer to the implementation native handle. */ ++ void *pvNative; ++ /*! Implementation entry callbacks. */ ++ OSDI_IMPL_ENTRY_CB *psCb; ++}; /* OSDI_IMPL_ENTRY is already typedef-ed in di_common.h */ ++ ++/*! Debug Info implementation callbacks. */ ++typedef struct OSDI_IMPL_CB ++{ ++ /*! Initialise implementation callback. ++ */ ++ PVRSRV_ERROR (*pfnInit)(void); ++ ++ /*! De-initialise implementation callback. ++ */ ++ void (*pfnDeInit)(void); ++ ++ /*! @Function pfnCreateEntry ++ * ++ * @Description ++ * Creates entry of eType type with pszName in the pvNativeGroup parent ++ * group. The entry is an abstract term which depends on the implementation, ++ * e.g.: a file in DebugFS. ++ * ++ * @Input pszName: name of the entry ++ * @Input eType: type of the entry ++ * @Input psIterCb: iterator implementation for the entry ++ * @Input pvPrivData: data that will be passed to the iterator callbacks ++ * in OSDI_IMPL_ENTRY - it can be retrieved by calling ++ * DIGetPrivData() function ++ * @Input pvNativeGroup: implementation specific handle to the parent group ++ * ++ * @Output pvNativeEntry: implementation specific handle to the entry ++ * ++ * return PVRSRV_ERROR error code ++ */ ++ PVRSRV_ERROR (*pfnCreateEntry)(const IMG_CHAR *pszName, ++ DI_ENTRY_TYPE eType, ++ const DI_ITERATOR_CB *psIterCb, ++ void *pvPrivData, ++ void *pvNativeGroup, ++ void **pvNativeEntry); ++ ++ /*! @Function pfnDestroyEntry ++ * ++ * @Description ++ * Destroys native entry. ++ * ++ * @Input psNativeEntry: handle to the entry ++ */ ++ void (*pfnDestroyEntry)(void *psNativeEntry); ++ ++ /*! @Function pfnCreateGroup ++ * ++ * @Description ++ * Creates group with pszName in the psNativeParentGroup parent group. ++ * The group is an abstract term which depends on the implementation, ++ * e.g.: a directory in DebugFS. ++ * ++ * @Input pszName: name of the entry ++ * @Input psNativeParentGroup: implementation specific handle to the parent ++ * group ++ * ++ * @Output psNativeGroup: implementation specific handle to the group ++ * ++ * return PVRSRV_ERROR error code ++ */ ++ PVRSRV_ERROR (*pfnCreateGroup)(const IMG_CHAR *pszName, ++ void *psNativeParentGroup, ++ void **psNativeGroup); ++ ++ /*! @Function pfnDestroyGroup ++ * ++ * @Description ++ * Destroys native group. ++ * ++ * @Input psNativeGroup: handle to the group ++ */ ++ void (*pfnDestroyGroup)(void *psNativeGroup); ++} OSDI_IMPL_CB; ++ ++/*! @Function DIRegisterImplementation ++ * ++ * @Description ++ * Registers Debug Info implementations with the framework. The framework takes ++ * the ownership of the implementation and will clean up the resources when ++ * it's de-initialised. ++ * ++ * @Input pszName: name of the implementation ++ * @Input psImplCb: implementation callbacks ++ * ++ * @Return PVRSRV_ERROR error code ++ */ ++PVRSRV_ERROR DIRegisterImplementation(const IMG_CHAR *pszName, ++ const OSDI_IMPL_CB *psImplCb); ++ ++#endif /* OSDI_IMPL_H */ +diff --git a/drivers/gpu/drm/img-rogue/osfunc.c b/drivers/gpu/drm/img-rogue/osfunc.c +new file mode 100644 +index 000000000000..1279b777c166 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc.c +@@ -0,0 +1,2648 @@ ++/*************************************************************************/ /*! ++@File ++@Title Environment related functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++#include ++#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) ++#include ++#include ++#else ++#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */ ++ ++#include "log2.h" ++#include "osfunc.h" ++#include "cache_km.h" ++#include "img_defs.h" ++#include "img_types.h" ++#include "allocmem.h" ++#include "devicemem_server_utils.h" ++#include "event.h" ++#include "linkage.h" ++#include "pvr_uaccess.h" ++#include "pvr_debug.h" ++#include "pvr_bridge_k.h" ++#include "pvrsrv_memallocflags.h" ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++#include "physmem_osmem_linux.h" ++#include "dma_support.h" ++#include "kernel_compatibility.h" ++ ++#include "pvrsrv_sync_server.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++#include "pvr_fence.h" ++#endif ++ ++#if defined(VIRTUAL_PLATFORM) ++#define EVENT_OBJECT_TIMEOUT_US (120000000ULL) ++#else ++#if defined(EMULATOR) || defined(TC_APOLLO_TCF5) ++#define EVENT_OBJECT_TIMEOUT_US (2000000ULL) ++#else ++#define EVENT_OBJECT_TIMEOUT_US (100000ULL) ++#endif /* EMULATOR */ ++#endif ++ ++ ++typedef struct { ++ struct task_struct *kthread; ++ PFN_THREAD pfnThread; ++ void *hData; ++ IMG_CHAR *pszThreadName; ++ IMG_BOOL bIsThreadRunning; ++ IMG_BOOL bIsSupportingThread; ++ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB; ++ DLLIST_NODE sNode; ++} OSThreadData; ++ ++void OSSuspendTaskInterruptible(void) ++{ ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++} ++ ++static DLLIST_NODE gsThreadListHead; ++ ++static void _ThreadListAddEntry(OSThreadData *psThreadListNode) ++{ ++ dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode)); ++} ++ ++static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode) ++{ ++ dllist_remove_node(&(psThreadListNode->sNode)); ++} ++ ++static void _ThreadSetStopped(OSThreadData *psOSThreadData) ++{ ++ psOSThreadData->bIsThreadRunning = IMG_FALSE; ++} ++ ++static void _OSInitThreadList(void) ++{ ++ dllist_init(&gsThreadListHead); ++} ++ ++void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PDLLIST_NODE psNodeCurr, psNodeNext; ++ ++ dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext) ++ { ++ OSThreadData *psThreadListNode; ++ psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode); ++ ++ PVR_DUMPDEBUG_LOG(" %s : %s", ++ psThreadListNode->pszThreadName, ++ (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped"); ++ ++ if (psThreadListNode->pfnDebugDumpCB) ++ { ++ psThreadListNode->pfnDebugDumpCB(pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ } ++} ++ ++PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, ++ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); ++ struct device *psDev = psDevNode->psDevConfig->pvOSDevice; ++ IMG_CPU_PHYADDR sCpuPAddr; ++ struct page *psPage; ++ IMG_UINT32 ui32Order=0; ++ gfp_t gfp_flags; ++ ++ PVR_ASSERT(uiSize != 0); ++ /*Align the size to the page granularity */ ++ uiSize = PAGE_ALIGN(uiSize); ++ ++ /*Get the order to be used with the allocation */ ++ ui32Order = get_order(uiSize); ++ ++ gfp_flags = GFP_KERNEL; ++ ++#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) ++ if (psDev) ++ { ++ if (*psDev->dma_mask == DMA_BIT_MASK(32)) ++ { ++ /* Limit to 32 bit. ++ * Achieved by setting __GFP_DMA32 for 64 bit systems */ ++ gfp_flags |= __GFP_DMA32; ++ } ++ else if (*psDev->dma_mask < DMA_BIT_MASK(32)) ++ { ++ /* Limit to whatever the size of DMA zone is. */ ++ gfp_flags |= __GFP_DMA; ++ } ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psDev); ++#endif ++ ++ /*allocate the pages */ ++ psPage = alloc_pages(gfp_flags, ui32Order); ++ if (psPage == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ uiSize = (1 << ui32Order) * PAGE_SIZE; ++ ++ psMemHandle->u.pvHandle = psPage; ++ psMemHandle->uiOrder = ui32Order; ++ sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage)); ++ ++ /* ++ * Even when more pages are allocated as base MMU object we still need one single physical address because ++ * they are physically contiguous. ++ */ ++ PhysHeapCpuPAddrToDevPAddr(psPhysHeap, 1, psDevPAddr, &sCpuPAddr); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, ++ uiSize, ++ (IMG_UINT64)(uintptr_t) psPage, ++ uiPid); ++#else ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, ++ psPage, ++ sCpuPAddr, ++ uiSize, ++ NULL, ++ uiPid ++ DEBUG_MEMSTATS_VALUES); ++#endif ++#else ++ PVR_UNREFERENCED_PARAMETER(uiPid); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle) ++{ ++ struct page *psPage = (struct page*) psMemHandle->u.pvHandle; ++ IMG_UINT32 uiSize, uiPageCount=0, ui32Order; ++ ++ PVR_UNREFERENCED_PARAMETER(psPhysHeap); ++ ++ ui32Order = psMemHandle->uiOrder; ++ uiPageCount = (1 << ui32Order); ++ uiSize = (uiPageCount * PAGE_SIZE); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, ++ (IMG_UINT64)(uintptr_t) psPage); ++#else ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, ++ (IMG_UINT64)(uintptr_t) psPage, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++#endif ++ ++ __free_pages(psPage, ui32Order); ++ psMemHandle->uiOrder = 0; ++} ++ ++PVRSRV_ERROR OSPhyContigPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, ++ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, ++ void **pvPtr) ++{ ++ size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->uiOrder); ++ *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle); ++ ++ PVR_UNREFERENCED_PARAMETER(psDevPAddr); ++ ++ PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */ ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(psPhysHeap); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM()); ++#else ++ { ++ IMG_CPU_PHYADDR sCpuPAddr; ++ sCpuPAddr.uiAddr = 0; ++ ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, ++ *pvPtr, ++ sCpuPAddr, ++ actualSize, ++ NULL, ++ OSGetCurrentClientProcessIDKM() ++ DEBUG_MEMSTATS_VALUES); ++ } ++#endif ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++void OSPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr) ++{ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ /* Mapping is done a page at a time */ ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, ++ (1 << (PAGE_SHIFT + psMemHandle->uiOrder)), ++ OSGetCurrentClientProcessIDKM()); ++#else ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, ++ (IMG_UINT64)(uintptr_t)pvPtr, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++#endif ++ ++ PVR_UNREFERENCED_PARAMETER(psPhysHeap); ++ PVR_UNREFERENCED_PARAMETER(pvPtr); ++ ++ kunmap((struct page*) psMemHandle->u.pvHandle); ++} ++ ++PVRSRV_ERROR OSPhyContigPagesClean(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 uiLength) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ struct page* psPage = (struct page*) psMemHandle->u.pvHandle; ++ ++ void* pvVirtAddrStart = kmap(psPage) + uiOffset; ++ IMG_CPU_PHYADDR sPhysStart, sPhysEnd; ++ ++ IMG_UINT32 ui32Order; ++ ++ if (uiLength == 0) ++ { ++ goto e0; ++ } ++ ++ ui32Order = psMemHandle->uiOrder; ++ if ((uiOffset + uiLength) > ((1 << ui32Order) * PAGE_SIZE)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid size params, uiOffset %u, uiLength %u", ++ __func__, ++ uiOffset, ++ uiLength)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset; ++ sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength; ++ ++ CacheOpExec(psDevNode, ++ pvVirtAddrStart, ++ pvVirtAddrStart + uiLength, ++ sPhysStart, ++ sPhysEnd, ++ PVRSRV_CACHE_OP_CLEAN); ++ ++e0: ++ kunmap(psPage); ++ ++ return eError; ++} ++ ++#if defined(__GNUC__) ++#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8))) ++#define PVRSRV_MEM_ALIGN_MASK (0x7) ++#else ++#error "PVRSRV Alignment macros need to be defined for this compiler" ++#endif ++ ++IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute) ++{ ++ IMG_UINT32 uiSize = 0; ++ ++ switch (eCacheAttribute) ++ { ++ case OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE: ++ uiSize = cache_line_size(); ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d", ++ __func__, (IMG_UINT32)eCacheAttribute)); ++ PVR_ASSERT(0); ++ break; ++ } ++ ++ return uiSize; ++} ++ ++IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...) ++{ ++ va_list argList; ++ IMG_INT32 iCount = 0; ++ ++ va_start(argList, pszFormat); ++ iCount = vsscanf(pStr, pszFormat, argList); ++ va_end(argList); ++ ++ return iCount; ++} ++ ++IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen) ++{ ++ return (IMG_INT)memcmp(pvBufA, pvBufB, uiLen); ++} ++ ++size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize) ++{ ++ /* ++ * Let strlcat handle any truncation cases correctly. ++ * We will definitely get a NUL-terminated string set in pszDest ++ */ ++ size_t uSrcSize = strlcat(pszDest, pszSrc, uDstSize); ++ ++#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) ++ /* Handle truncation by dumping calling stack if debug allows */ ++ if (uSrcSize >= uDstSize) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'", ++ __func__, pszSrc, (long)uDstSize, pszDest)); ++ OSDumpStack(); ++ } ++#endif /* defined(PVR_DEBUG_STRLCPY) && defined(DEBUG) */ ++ ++ return uSrcSize; ++} ++ ++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) ++{ ++ va_list argList; ++ IMG_INT32 iCount; ++ ++ va_start(argList, pszFormat); ++ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList); ++ va_end(argList); ++ ++ return iCount; ++} ++ ++IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) ++{ ++ return vsnprintf(pStr, ui32Size, pszFormat, vaArgs); ++} ++ ++size_t OSStringLength(const IMG_CHAR *pStr) ++{ ++ return strlen(pStr); ++} ++ ++size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount) ++{ ++ return strnlen(pStr, uiCount); ++} ++ ++IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, ++ size_t uiSize) ++{ ++#if defined(DEBUG) ++ /* Double-check that we are not passing NULL parameters in. If we are we ++ * return -1 (for arg1 == NULL, arg2 != NULL) ++ * 0 (for arg1 == NULL, arg2 == NULL ++ * +1 (for arg1 != NULL, arg2 == NULL) ++ * strncmp(arg1, arg2, size) otherwise ++ */ ++ if (pStr1 == NULL) ++ { ++ if (pStr2 == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): Both args NULL", ++ __func__, pStr1, pStr2, (int)uiSize)); ++ OSDumpStack(); ++ return 0; /* Both NULL */ ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): arg1 NULL", ++ __func__, pStr1, pStr2, (int)uiSize)); ++ OSDumpStack(); ++ return -1; /* NULL < non-NULL */ ++ } ++ } ++ else ++ { ++ if (pStr2 == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s(%p, %p, %d): arg2 NULL", ++ __func__, pStr1, pStr2, (int)uiSize)); ++ OSDumpStack(); ++ return +1; /* non-NULL > NULL */ ++ } ++ else ++ { ++ return strncmp(pStr1, pStr2, uiSize); ++ } ++ } ++#else ++ return strncmp(pStr1, pStr2, uiSize); ++#endif ++} ++ ++PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, ++ IMG_UINT32 *ui32Result) ++{ ++ if (kstrtou32(pStr, ui32Base, ui32Result) != 0) ++ return PVRSRV_ERROR_CONVERSION_FAILED; ++ ++ return PVRSRV_OK; ++} ++ ++IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, ++ IMG_UINT32 ui32Num) ++{ ++ IMG_UINT32 ui32i, ui32Len = 0, ui32NumCopy = ui32Num; ++ ++ /* calculate string length required to hold the number string */ ++ do ++ { ++ ui32Len++; ++ ui32NumCopy /= 10; ++ } while (ui32NumCopy != 0); ++ ++ if (unlikely(ui32Len >= uSize)) ++ { ++ /* insufficient buffer */ ++ return 0; ++ } ++ ++ for (ui32i = 0; ui32i < ui32Len; ui32i++) ++ { ++ pszBuf[ui32Len - (ui32i + 1)] = '0' + ui32Num % 10; ++ ui32Num = ui32Num / 10; ++ } ++ ++ pszBuf[ui32Len] = '\0'; ++ return ui32Len; ++} ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) ++static struct workqueue_struct *gpFenceStatusWq; ++ ++static PVRSRV_ERROR _NativeSyncInit(void) ++{ ++ gpFenceStatusWq = create_freezable_workqueue("pvr_fence_status"); ++ if (!gpFenceStatusWq) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create foreign fence status workqueue", ++ __func__)); ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static void _NativeSyncDeinit(void) ++{ ++ if (gpFenceStatusWq) { ++ destroy_workqueue(gpFenceStatusWq); ++ } ++} ++ ++struct workqueue_struct *NativeSyncGetFenceStatusWq(void) ++{ ++ if (!gpFenceStatusWq) ++ { ++#if defined(DEBUG) ++ PVR_ASSERT(gpFenceStatusWq); ++#endif ++ return NULL; ++ } ++ ++ return gpFenceStatusWq; ++} ++#endif ++ ++PVRSRV_ERROR OSInitEnvData(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ LinuxInitPhysmem(); ++ ++ _OSInitThreadList(); ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) ++ eError = _NativeSyncInit(); ++#endif ++ ++ return eError; ++} ++ ++void OSDeInitEnvData(void) ++{ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) ++ _NativeSyncDeinit(); ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ if (gpFenceDestroyWq) { ++ flush_workqueue(gpFenceDestroyWq); ++ destroy_workqueue(gpFenceDestroyWq); ++ } ++#endif ++ ++ LinuxDeinitPhysmem(); ++} ++ ++void OSReleaseThreadQuanta(void) ++{ ++ schedule(); ++} ++ ++void OSMemoryBarrier(volatile void *hReadback) ++{ ++ mb(); ++ ++ if (hReadback) ++ { ++ /* Force a read-back to memory to avoid posted writes on certain buses ++ * e.g. PCI(E) ++ */ ++ (void) OSReadDeviceMem32(hReadback); ++ } ++} ++ ++void OSWriteMemoryBarrier(volatile void *hReadback) ++{ ++ wmb(); ++ ++ if (hReadback) ++ { ++ /* Force a read-back to memory to avoid posted writes on certain buses ++ * e.g. PCI(E) ++ */ ++ (void) OSReadDeviceMem32(hReadback); ++ } ++} ++ ++/* Not matching/aligning this API to the Clockus() API above to avoid necessary ++ * multiplication/division operations in calling code. ++ */ ++static inline IMG_UINT64 Clockns64(void) ++{ ++ IMG_UINT64 timenow; ++ ++ /* Kernel thread preempt protection. Some architecture implementations ++ * (ARM) of sched_clock are not preempt safe when the kernel is configured ++ * as such e.g. CONFIG_PREEMPT and others. ++ */ ++ preempt_disable(); ++ ++ /* Using sched_clock instead of ktime_get since we need a time stamp that ++ * correlates with that shown in kernel logs and trace data not one that ++ * is a bit behind. */ ++ timenow = sched_clock(); ++ ++ preempt_enable(); ++ ++ return timenow; ++} ++ ++IMG_UINT64 OSClockns64(void) ++{ ++ return Clockns64(); ++} ++ ++IMG_UINT64 OSClockus64(void) ++{ ++ IMG_UINT64 timenow = Clockns64(); ++ IMG_UINT32 remainder; ++ ++ return OSDivide64r64(timenow, 1000, &remainder); ++} ++ ++IMG_UINT32 OSClockus(void) ++{ ++ return (IMG_UINT32) OSClockus64(); ++} ++ ++IMG_UINT32 OSClockms(void) ++{ ++ IMG_UINT64 timenow = Clockns64(); ++ IMG_UINT32 remainder; ++ ++ return OSDivide64(timenow, 1000000, &remainder); ++} ++ ++static inline IMG_UINT64 KClockns64(void) ++{ ++ ktime_t sTime = ktime_get(); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ return sTime; ++#else ++ return sTime.tv64; ++#endif ++} ++ ++PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time) ++{ ++ *pui64Time = KClockns64(); ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time) ++{ ++ IMG_UINT64 timenow = KClockns64(); ++ IMG_UINT32 remainder; ++ ++ *pui64Time = OSDivide64r64(timenow, 1000, &remainder); ++ return PVRSRV_OK; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++IMG_UINT64 OSClockMonotonicRawns64(void) ++{ ++ struct timespec64 ts; ++ ++ ktime_get_raw_ts64(&ts); ++ return ts.tv_sec * 1000000000 + ts.tv_nsec; ++} ++#else ++IMG_UINT64 OSClockMonotonicRawns64(void) ++{ ++ struct timespec ts; ++ ++ getrawmonotonic(&ts); ++ return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec; ++} ++#endif ++ ++IMG_UINT64 OSClockMonotonicRawus64(void) ++{ ++ IMG_UINT32 rem; ++ return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem); ++} ++ ++/* ++ OSWaitus ++*/ ++void OSWaitus(IMG_UINT32 ui32Timeus) ++{ ++ udelay(ui32Timeus); ++} ++ ++ ++/* ++ OSSleepms ++*/ ++void OSSleepms(IMG_UINT32 ui32Timems) ++{ ++ msleep(ui32Timems); ++} ++ ++ ++INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void) ++{ ++ return (IMG_UINT64)TASK_SIZE; ++} ++ ++INLINE IMG_PID OSGetCurrentProcessID(void) ++{ ++ if (in_interrupt()) ++ { ++ return KERNEL_ID; ++ } ++ ++ return (IMG_PID)task_tgid_nr(current); ++} ++ ++INLINE IMG_PID OSGetCurrentVirtualProcessID(void) ++{ ++ if (in_interrupt()) ++ { ++ return KERNEL_ID; ++ } ++ ++ return (IMG_PID)task_tgid_vnr(current); ++} ++ ++INLINE IMG_CHAR *OSGetCurrentProcessName(void) ++{ ++ return current->comm; ++} ++ ++INLINE uintptr_t OSGetCurrentThreadID(void) ++{ ++ if (in_interrupt()) ++ { ++ return KERNEL_ID; ++ } ++ ++ return current->pid; ++} ++ ++IMG_PID OSGetCurrentClientProcessIDKM(void) ++{ ++ return OSGetCurrentProcessID(); ++} ++ ++IMG_CHAR *OSGetCurrentClientProcessNameKM(void) ++{ ++ return OSGetCurrentProcessName(); ++} ++ ++uintptr_t OSGetCurrentClientThreadIDKM(void) ++{ ++ return OSGetCurrentThreadID(); ++} ++ ++size_t OSGetPageSize(void) ++{ ++ return PAGE_SIZE; ++} ++ ++size_t OSGetPageShift(void) ++{ ++ return PAGE_SHIFT; ++} ++ ++size_t OSGetPageMask(void) ++{ ++ return (OSGetPageSize()-1); ++} ++ ++size_t OSGetOrder(size_t uSize) ++{ ++ return get_order(PAGE_ALIGN(uSize)); ++} ++ ++IMG_UINT64 OSGetRAMSize(void) ++{ ++ struct sysinfo SI; ++ si_meminfo(&SI); ++ ++ return (PAGE_SIZE * SI.totalram); ++} ++ ++typedef struct ++{ ++ int os_error; ++ PVRSRV_ERROR pvr_error; ++} error_map_t; ++ ++/* return -ve versions of POSIX errors as they are used in this form */ ++static const error_map_t asErrorMap[] = ++{ ++ {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT}, ++ {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL}, ++ {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM}, ++ {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE}, ++ {-EPERM, PVRSRV_ERROR_BRIDGE_EPERM}, ++ {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY}, ++ {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED}, ++ {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL}, ++ {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY}, ++ {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS}, ++ ++ {0, PVRSRV_OK} ++}; ++ ++int PVRSRVToNativeError(PVRSRV_ERROR e) ++{ ++ int os_error = -EFAULT; ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(asErrorMap); i++) ++ { ++ if (e == asErrorMap[i].pvr_error) ++ { ++ os_error = asErrorMap[i].os_error; ++ break; ++ } ++ } ++ return os_error; ++} ++ ++typedef struct _MISR_DATA_ { ++ struct workqueue_struct *psWorkQueue; ++ struct work_struct sMISRWork; ++ const IMG_CHAR* pszName; ++ PFN_MISR pfnMISR; ++ void *hData; ++} MISR_DATA; ++ ++/* ++ MISRWrapper ++*/ ++static void MISRWrapper(struct work_struct *data) ++{ ++ MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Waking up '%s' MISR %p", psMISRData->pszName, psMISRData)); ++ ++ psMISRData->pfnMISR(psMISRData->hData); ++} ++ ++/* ++ OSInstallMISR ++*/ ++PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR, ++ void *hData, const IMG_CHAR *pszMisrName) ++{ ++ MISR_DATA *psMISRData; ++ ++ psMISRData = OSAllocMem(sizeof(*psMISRData)); ++ PVR_LOG_RETURN_IF_NOMEM(psMISRData, "psMISRData"); ++ ++ psMISRData->hData = hData; ++ psMISRData->pfnMISR = pfnMISR; ++ psMISRData->pszName = pszMisrName; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Installing MISR with cookie %p", psMISRData)); ++ ++ psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr"); ++ ++ if (psMISRData->psWorkQueue == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed")); ++ OSFreeMem(psMISRData); ++ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; ++ } ++ ++ INIT_WORK(&psMISRData->sMISRWork, MISRWrapper); ++ ++ *hMISRData = (IMG_HANDLE) psMISRData; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ OSUninstallMISR ++*/ ++PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData) ++{ ++ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Uninstalling MISR with cookie %p", psMISRData)); ++ ++ destroy_workqueue(psMISRData->psWorkQueue); ++ OSFreeMem(psMISRData); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ OSScheduleMISR ++*/ ++PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData) ++{ ++ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData; ++ ++ /* ++ Note: ++ ++ In the case of NO_HARDWARE we want the driver to be synchronous so ++ that we don't have to worry about waiting for previous operations ++ to complete ++ */ ++#if defined(NO_HARDWARE) ++ psMISRData->pfnMISR(psMISRData->hData); ++ return PVRSRV_OK; ++#else ++ { ++ bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork); ++ return rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS; ++ } ++#endif ++} ++ ++/* OS specific values for thread priority */ ++static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] = ++{ ++ 0, /* OS_THREAD_NOSET_PRIORITY */ ++ -20, /* OS_THREAD_HIGHEST_PRIORITY */ ++ -10, /* OS_THREAD_HIGH_PRIORITY */ ++ 0, /* OS_THREAD_NORMAL_PRIORITY */ ++ 9, /* OS_THREAD_LOW_PRIORITY */ ++ 19, /* OS_THREAD_LOWEST_PRIORITY */ ++}; ++ ++static int OSThreadRun(void *data) ++{ ++ OSThreadData *psOSThreadData = data; ++ ++ /* count freezable threads */ ++ LinuxBridgeNumActiveKernelThreadsIncrement(); ++ ++ /* Returns true if the thread was frozen, should we do anything with this ++ * information? What do we return? Which one is the error case? */ ++ set_freezable(); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Starting Thread '%s'...", psOSThreadData->pszThreadName)); ++ ++ /* Call the client's kernel thread with the client's data pointer */ ++ psOSThreadData->pfnThread(psOSThreadData->hData); ++ ++ if (psOSThreadData->bIsSupportingThread) ++ { ++ _ThreadSetStopped(psOSThreadData); ++ } ++ ++ /* Wait for OSThreadDestroy() to call kthread_stop() */ ++ while (!kthread_freezable_should_stop(NULL)) ++ { ++ schedule(); ++ } ++ ++ LinuxBridgeNumActiveKernelThreadsDecrement(); ++ ++ return 0; ++} ++ ++PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, ++ IMG_CHAR *pszThreadName, ++ PFN_THREAD pfnThread, ++ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, ++ IMG_BOOL bIsSupportingThread, ++ void *hData) ++{ ++ return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, ++ pfnDebugDumpCB, bIsSupportingThread, hData, ++ OS_THREAD_NOSET_PRIORITY); ++} ++ ++PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, ++ IMG_CHAR *pszThreadName, ++ PFN_THREAD pfnThread, ++ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, ++ IMG_BOOL bIsSupportingThread, ++ void *hData, ++ OS_THREAD_LEVEL eThreadPriority) ++{ ++ OSThreadData *psOSThreadData; ++ PVRSRV_ERROR eError; ++ ++ psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSThreadData, eError, fail_alloc); ++ ++ psOSThreadData->pfnThread = pfnThread; ++ psOSThreadData->hData = hData; ++ psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName); ++ ++ if (IS_ERR(psOSThreadData->kthread)) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_kthread; ++ } ++ ++ if (bIsSupportingThread) ++ { ++ psOSThreadData->pszThreadName = pszThreadName; ++ psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB; ++ psOSThreadData->bIsThreadRunning = IMG_TRUE; ++ psOSThreadData->bIsSupportingThread = IMG_TRUE; ++ ++ _ThreadListAddEntry(psOSThreadData); ++ } ++ ++ if (eThreadPriority != OS_THREAD_NOSET_PRIORITY && ++ eThreadPriority < OS_THREAD_LAST_PRIORITY) ++ { ++ set_user_nice(psOSThreadData->kthread, ++ ai32OSPriorityValues[eThreadPriority]); ++ } ++ ++ *phThread = psOSThreadData; ++ ++ return PVRSRV_OK; ++ ++fail_kthread: ++ OSFreeMem(psOSThreadData); ++fail_alloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread) ++{ ++ OSThreadData *psOSThreadData = hThread; ++ int ret; ++ ++ /* Let the thread know we are ready for it to end and wait for it. */ ++ ret = kthread_stop(psOSThreadData->kthread); ++ if (0 != ret) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret)); ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ if (psOSThreadData->bIsSupportingThread) ++ { ++ _ThreadListRemoveEntry(psOSThreadData); ++ } ++ ++ OSFreeMem(psOSThreadData); ++ ++ return PVRSRV_OK; ++} ++ ++void OSPanic(void) ++{ ++ BUG(); ++ ++#if defined(__KLOCWORK__) ++ /* Klocwork does not understand that BUG is terminal... */ ++ abort(); ++#endif ++} ++ ++void * ++OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, ++ size_t ui32Bytes, ++ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags) ++{ ++ void __iomem *pvLinAddr; ++ ++ if (uiMappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)) ++ { ++ PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu"); ++ return NULL; ++ } ++ ++ if (! PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ /* ++ This is required to support DMA physheaps for GPU virtualization. ++ Unfortunately, if a region of kernel managed memory is turned into ++ a DMA buffer, conflicting mappings can come about easily on Linux ++ as the original memory is mapped by the kernel as normal cached ++ memory whilst DMA buffers are mapped mostly as uncached device or ++ cache-coherent device memory. In both cases the system will have ++ two conflicting mappings for the same memory region and will have ++ "undefined behaviour" for most processors notably ARMv6 onwards ++ and some x86 micro-architectures. As a result, perform ioremapping ++ manually for DMA physheap allocations by translating from CPU/VA ++ to BUS/PA thereby preventing the creation of conflicting mappings. ++ */ ++ pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes); ++ if (pvLinAddr != NULL) ++ { ++ return (void __force *) pvLinAddr; ++ } ++ } ++ ++ switch (uiMappingFlags) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); ++ break; ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) ++ pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes); ++#else ++ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); ++#endif ++ break; ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: ++#if defined(CONFIG_X86) || defined(CONFIG_ARM) ++ pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes); ++#else ++ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes); ++#endif ++ break; ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT: ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT: ++ PVR_ASSERT(!"Unexpected cpu cache mode"); ++ pvLinAddr = NULL; ++ break; ++ default: ++ PVR_ASSERT(!"Unsupported cpu cache mode"); ++ pvLinAddr = NULL; ++ break; ++ } ++ ++ return (void __force *) pvLinAddr; ++} ++ ++ ++IMG_BOOL ++OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32Bytes); ++ ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ if (SysDmaCpuVAddrToDevPAddr(pvLinAddr)) ++ { ++ return IMG_TRUE; ++ } ++ } ++ ++ iounmap((void __iomem *) pvLinAddr); ++ ++ return IMG_TRUE; ++} ++ ++#define OS_MAX_TIMERS 8 ++ ++/* Timer callback structure used by OSAddTimer */ ++typedef struct TIMER_CALLBACK_DATA_TAG ++{ ++ IMG_BOOL bInUse; ++ PFN_TIMER_FUNC pfnTimerFunc; ++ void *pvData; ++ struct timer_list sTimer; ++ IMG_UINT32 ui32Delay; ++ IMG_BOOL bActive; ++ struct work_struct sWork; ++}TIMER_CALLBACK_DATA; ++ ++static struct workqueue_struct *psTimerWorkQueue; ++ ++static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS]; ++ ++static DEFINE_MUTEX(sTimerStructLock); ++ ++static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData) ++{ ++ if (!psTimerCBData->bActive) ++ return; ++ ++ /* call timer callback */ ++ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData); ++ ++ /* reset timer */ ++ mod_timer(&psTimerCBData->sTimer, psTimerCBData->sTimer.expires + psTimerCBData->ui32Delay); ++} ++ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) ++/*************************************************************************/ /*! ++@Function OSTimerCallbackWrapper ++@Description OS specific timer callback wrapper function ++@Input psTimer Timer list structure ++*/ /**************************************************************************/ ++static void OSTimerCallbackWrapper(struct timer_list *psTimer) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer); ++#else ++/*************************************************************************/ /*! ++@Function OSTimerCallbackWrapper ++@Description OS specific timer callback wrapper function ++@Input uData Timer callback data ++*/ /**************************************************************************/ ++static void OSTimerCallbackWrapper(uintptr_t uData) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData; ++#endif ++ int res; ++ ++ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork); ++ if (res == 0) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued")); ++ } ++} ++ ++ ++static void OSTimerWorkQueueCallBack(struct work_struct *psWork) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork); ++ ++ OSTimerCallbackBody(psTimerCBData); ++} ++ ++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData; ++ IMG_UINT32 ui32i; ++ ++ /* check callback */ ++ if (!pfnTimerFunc) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback")); ++ return NULL; ++ } ++ ++ /* Allocate timer callback data structure */ ++ mutex_lock(&sTimerStructLock); ++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) ++ { ++ psTimerCBData = &sTimers[ui32i]; ++ if (!psTimerCBData->bInUse) ++ { ++ psTimerCBData->bInUse = IMG_TRUE; ++ break; ++ } ++ } ++ mutex_unlock(&sTimerStructLock); ++ if (ui32i >= OS_MAX_TIMERS) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use")); ++ return NULL; ++ } ++ ++ psTimerCBData->pfnTimerFunc = pfnTimerFunc; ++ psTimerCBData->pvData = pvData; ++ psTimerCBData->bActive = IMG_FALSE; ++ ++ /* ++ HZ = ticks per second ++ ui32MsTimeout = required ms delay ++ ticks = (Hz * ui32MsTimeout) / 1000 ++ */ ++ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000) ++ ? 1 ++ : ((HZ * ui32MsTimeout) / 1000); ++ ++ /* initialise object */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) ++ timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0); ++#else ++ init_timer(&psTimerCBData->sTimer); ++ ++ /* setup timer object */ ++ psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper; ++ psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData; ++#endif ++ ++ return (IMG_HANDLE)(uintptr_t)(ui32i + 1); ++} ++ ++ ++static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer) ++{ ++ IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1; ++ ++ PVR_ASSERT(ui32i < OS_MAX_TIMERS); ++ ++ return &sTimers[ui32i]; ++} ++ ++PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); ++ ++ PVR_ASSERT(psTimerCBData->bInUse); ++ PVR_ASSERT(!psTimerCBData->bActive); ++ ++ /* free timer callback data struct */ ++ psTimerCBData->bInUse = IMG_FALSE; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); ++ ++ PVR_ASSERT(psTimerCBData->bInUse); ++ PVR_ASSERT(!psTimerCBData->bActive); ++ ++ /* Start timer arming */ ++ psTimerCBData->bActive = IMG_TRUE; ++ ++ /* set the expire time */ ++ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies; ++ ++ /* Add the timer to the list */ ++ add_timer(&psTimerCBData->sTimer); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer) ++{ ++ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer); ++ ++ PVR_ASSERT(psTimerCBData->bInUse); ++ PVR_ASSERT(psTimerCBData->bActive); ++ ++ /* Stop timer from arming */ ++ psTimerCBData->bActive = IMG_FALSE; ++ smp_mb(); ++ ++ flush_workqueue(psTimerWorkQueue); ++ ++ /* remove timer */ ++ del_timer_sync(&psTimerCBData->sTimer); ++ ++ /* ++ * This second flush is to catch the case where the timer ran ++ * before we managed to delete it, in which case, it will have ++ * queued more work for the workqueue. Since the bActive flag ++ * has been cleared, this second flush won't result in the ++ * timer being rearmed. ++ */ ++ flush_workqueue(psTimerWorkQueue); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject) ++{ ++ PVR_UNREFERENCED_PARAMETER(pszName); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); ++ ++ return LinuxEventObjectListCreate(hEventObject); ++} ++ ++ ++PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); ++ ++ return LinuxEventObjectListDestroy(hEventObject); ++} ++ ++#define _FREEZABLE IMG_TRUE ++#define _NON_FREEZABLE IMG_FALSE ++ ++/* ++ * EventObjectWaitTimeout() ++ */ ++static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM, ++ IMG_UINT64 uiTimeoutus) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (hOSEventKM && uiTimeoutus > 0) ++ { ++ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, _NON_FREEZABLE); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus) ++{ ++ return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus); ++} ++ ++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM) ++{ ++ return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US); ++} ++ ++PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, ++ IMG_UINT64 uiTimeoutus) ++{ ++ PVRSRV_ERROR eError; ++ ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ if (hOSEventKM) ++ { ++ if (uiTimeoutus > 0) ++ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, ++ _FREEZABLE); ++ else ++ eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM); ++ } ++#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ ++ if (hOSEventKM && uiTimeoutus > 0) ++ { ++ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, ++ _FREEZABLE); ++ } ++#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p", ++ hOSEventKM)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return eError; ++} ++ ++void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM) ++{ ++ LinuxEventObjectDumpDebugInfo(hOSEventKM); ++} ++ ++PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, IMG_HANDLE *phOSEvent) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(phOSEvent, "phOSEvent"); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(hEventObject, eError, error); ++ ++ eError = LinuxEventObjectAdd(hEventObject, phOSEvent); ++ PVR_LOG_GOTO_IF_ERROR(eError, "LinuxEventObjectAdd", error); ++ ++ return PVRSRV_OK; ++ ++error: ++ *phOSEvent = NULL; ++ return eError; ++} ++ ++PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(hOSEventKM, "hOSEventKM"); ++ ++ return LinuxEventObjectDelete(hOSEventKM); ++} ++ ++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(hEventObject, "hEventObject"); ++ ++ return LinuxEventObjectSignal(hEventObject); ++} ++ ++PVRSRV_ERROR OSCopyToUser(void *pvProcess, ++ void __user *pvDest, ++ const void *pvSrc, ++ size_t ui32Bytes) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvProcess); ++ ++ if (pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0) ++ return PVRSRV_OK; ++ else ++ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; ++} ++ ++PVRSRV_ERROR OSCopyFromUser(void *pvProcess, ++ void *pvDest, ++ const void __user *pvSrc, ++ size_t ui32Bytes) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvProcess); ++ ++ if (likely(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)) ++ return PVRSRV_OK; ++ else ++ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY; ++} ++ ++IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) ++{ ++ *pui32Remainder = do_div(ui64Divident, ui32Divisor); ++ ++ return ui64Divident; ++} ++ ++IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder) ++{ ++ *pui32Remainder = do_div(ui64Divident, ui32Divisor); ++ ++ return (IMG_UINT32) ui64Divident; ++} ++ ++/* One time osfunc initialisation */ ++PVRSRV_ERROR PVROSFuncInit(void) ++{ ++ { ++ PVR_ASSERT(!psTimerWorkQueue); ++ ++ psTimerWorkQueue = create_freezable_workqueue("pvr_timer"); ++ if (psTimerWorkQueue == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", ++ __func__)); ++ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD; ++ } ++ } ++ ++ { ++ IMG_UINT32 ui32i; ++ ++ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++) ++ { ++ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i]; ++ ++ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack); ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++/* ++ * Osfunc deinitialisation. ++ * Note that PVROSFuncInit may not have been called ++ */ ++void PVROSFuncDeInit(void) ++{ ++ if (psTimerWorkQueue != NULL) ++ { ++ destroy_workqueue(psTimerWorkQueue); ++ psTimerWorkQueue = NULL; ++ } ++} ++ ++void OSDumpStack(void) ++{ ++ dump_stack(); ++} ++ ++PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_CPU_PHYADDR sCpuPAHeapBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_BOOL bIsLMA) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ pfn_t sPFN; ++#else ++ IMG_UINT64 uiPFN; ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ ++ ++ PVRSRV_ERROR eError; ++ ++ struct mm_struct *psMM = current->mm; ++ struct vm_area_struct *psVMA = NULL; ++ struct address_space *psMapping = NULL; ++ struct page *psPage = NULL; ++ ++ IMG_UINT64 uiCPUVirtAddr = 0; ++ IMG_UINT32 ui32Loop = 0; ++ IMG_UINT32 ui32PageSize = OSGetPageSize(); ++ IMG_BOOL bMixedMap = IMG_FALSE; ++ ++ /* ++ * Acquire the lock before manipulating the VMA ++ * In this case only mmap_sem lock would suffice as the pages associated with this VMA ++ * are never meant to be swapped out. ++ * ++ * In the future, in case the pages are marked as swapped, page_table_lock needs ++ * to be acquired in conjunction with this to disable page swapping. ++ */ ++ ++ /* Find the Virtual Memory Area associated with the user base address */ ++ psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase); ++ if (NULL == psVMA) ++ { ++ eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND; ++ return eError; ++ } ++ ++ /* Acquire the memory sem */ ++ mmap_write_lock(psMM); ++ ++ psMapping = psVMA->vm_file->f_mapping; ++ ++ /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */ ++ psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT); ++ ++ /* Delete the entries for the pages that got freed */ ++ if (ui32FreePageCount && (pai32FreeIndices != NULL)) ++ { ++ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) ++ { ++ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize)); ++ ++ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); ++ ++#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE ++ /* ++ * Still need to map pages in case remap flag is set. ++ * That is not done until the remap case succeeds ++ */ ++#endif ++ } ++ eError = PVRSRV_OK; ++ } ++ ++ if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ vm_flags_set(psVMA, VM_MIXEDMAP); ++#else ++ psVMA->vm_flags |= VM_MIXEDMAP; ++#endif ++ bMixedMap = IMG_TRUE; ++ } ++ else ++ { ++ if (ui32AllocPageCount && (NULL != pai32AllocIndices)) ++ { ++ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) ++ { ++ ++ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ sPFN = page_to_pfn_t(psPage); ++ ++ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) ++#else ++ uiPFN = page_to_pfn(psPage); ++ ++ if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0)) ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ ++ { ++ bMixedMap = IMG_TRUE; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ vm_flags_set(psVMA, VM_MIXEDMAP); ++#else ++ psVMA->vm_flags |= VM_MIXEDMAP; ++#endif ++ break; ++ } ++ } ++ } ++ } ++ ++ /* Map the pages that got allocated */ ++ if (ui32AllocPageCount && (NULL != pai32AllocIndices)) ++ { ++ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++) ++ { ++ int err; ++ ++ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize)); ++ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1); ++ ++ if (bIsLMA) ++ { ++ phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr + ++ ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ sPFN = phys_to_pfn_t(uiAddr, 0); ++ psPage = pfn_t_to_page(sPFN); ++#else ++ uiPFN = uiAddr >> PAGE_SHIFT; ++ psPage = pfn_to_page(uiPFN); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ ++ } ++ else ++ { ++ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]]; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ sPFN = page_to_pfn_t(psPage); ++#else ++ uiPFN = page_to_pfn(psPage); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ ++ } ++ ++ if (bMixedMap) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) ++ vm_fault_t vmf; ++ ++ vmf = vmf_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); ++ if (vmf & VM_FAULT_ERROR) ++ { ++ err = vm_fault_to_errno(vmf, 0); ++ } ++ else ++ { ++ err = 0; ++ } ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN); ++#else ++ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) */ ++ } ++ else ++ { ++ err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage); ++ } ++ ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err)); ++ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; ++ goto eFailed; ++ } ++ } ++ } ++ ++ eError = PVRSRV_OK; ++eFailed: ++ mmap_write_unlock(psMM); ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function OSDebugSignalPID ++@Description Sends a SIGTRAP signal to a specific PID in user mode for ++ debugging purposes. The user mode process can register a handler ++ against this signal. ++ This is necessary to support the Rogue debugger. If the Rogue ++ debugger is not used then this function may be implemented as ++ a stub. ++@Input ui32PID The PID for the signal. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID) ++{ ++ int err; ++ struct pid *psPID; ++ ++ psPID = find_vpid(ui32PID); ++ if (psPID == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__)); ++ return PVRSRV_ERROR_NOT_FOUND; ++ } ++ ++ err = kill_pid(psPID, SIGTRAP, 0); ++ if (err != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err)); ++ return PVRSRV_ERROR_SIGNAL_FAILED; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSIsKernelThread ++@Description This API determines if the current running thread is a kernel ++ thread (i.e. one not associated with any userland process, ++ typically an MISR handler.) ++@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. ++*/ /**************************************************************************/ ++IMG_BOOL OSIsKernelThread(void) ++{ ++ /* ++ * Kernel threads have a NULL memory descriptor. ++ * ++ * See https://www.kernel.org/doc/Documentation/vm/active_mm.txt ++ */ ++ return current->mm == NULL; ++} ++ ++void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s", ++ utsname()->sysname, ++ utsname()->release, ++ utsname()->version, ++ utsname()->machine); ++} ++ ++void OSWriteHWRegl(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) ++{ ++ writel((IMG_UINT32)(ui32Value), (IMG_BYTE __iomem *)(pvLinRegBaseAddr) + (ui32Offset)); ++} ++ ++void OSWriteHWRegll(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value) ++{ ++ volatile void *_addr = pvLinRegBaseAddr; ++ IMG_UINT32 _off = ui32Offset; ++ IMG_UINT64 _val = ui64Value; ++ writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off)); ++ writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); ++} ++ ++IMG_UINT32 OSReadHWRegl(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset) ++{ ++ return ((IMG_UINT32)readl((IMG_BYTE __iomem *)(pvLinRegBaseAddr) + (ui32Offset))); ++} ++ ++IMG_UINT64 OSReadHWRegll(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset) ++{ ++ volatile void *_addr = pvLinRegBaseAddr; ++ IMG_UINT32 _off = ui32Offset; ++ return (IMG_UINT64)(( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \ ++ | readl((IMG_BYTE __iomem *)(_addr) + (_off))); ++} ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ ++typedef struct _OS_CLEANUP_DATA_ ++{ ++ IMG_BOOL bSucceed; ++ IMG_BOOL bAdvanceTimeline; ++ IMG_UINT uiRefCount; ++ IMG_UINT uiNumDMA; ++ IMG_UINT uiCount; ++ ++ struct dma_async_tx_descriptor** ppsDescriptors; ++ ++ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ PFN_SERVER_CLEANUP pfnServerCleanup; ++ void* pvServerCleanupData; ++ ++ enum dma_transfer_direction eDirection; ++ struct sg_table **ppsSg; ++ struct page ***pages; ++ IMG_UINT32* puiNumPages; ++ spinlock_t spinlock; ++ ++ struct completion start_cleanup; ++ struct completion *sync_completion; ++ ++ /* Sparse PMR transfer information */ ++ IMG_BOOL *pbIsSparse; ++ IMG_UINT *uiNumValidPages; ++ struct sg_table ***ppsSgSparse; ++ struct dma_async_tx_descriptor*** ppsDescriptorsSparse; ++ ++} OS_CLEANUP_DATA; ++ ++static int cleanup_thread(void *pvData) ++{ ++ IMG_UINT32 i, j; ++ struct completion *sync_completion = NULL; ++ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvData; ++ IMG_BOOL bSucceed = psOSCleanup->bSucceed; ++ ++ sync_completion = psOSCleanup->sync_completion; ++ ++#if defined(DMA_VERBOSE) ++ PVR_DPF((PVR_DBG_ERROR, "Cleanup thread waiting (%p) on completion", pvData)); ++#endif ++ ++ wait_for_completion(&psOSCleanup->start_cleanup); ++ ++#if defined(DMA_VERBOSE) ++ PVR_DPF((PVR_DBG_ERROR, "Cleanup thread notified (%p)", pvData)); ++#endif ++ /* Free resources */ ++ for (i=0; iuiCount; i++) ++ { ++ if (!psOSCleanup->pbIsSparse[i]) ++ { ++ dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, ++ psOSCleanup->ppsSg[i]->sgl, ++ psOSCleanup->ppsSg[i]->nents, ++ psOSCleanup->eDirection); ++ ++ dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, ++ psOSCleanup->ppsSg[i]->sgl, ++ psOSCleanup->ppsSg[i]->nents, ++ psOSCleanup->eDirection); ++ ++ sg_free_table(psOSCleanup->ppsSg[i]); ++ ++ OSFreeMem(psOSCleanup->ppsSg[i]); ++ ++ /* Unpin pages */ ++ for (j=0; jpuiNumPages[i]; j++) ++ { ++ if (psOSCleanup->eDirection == DMA_DEV_TO_MEM) ++ { ++ set_page_dirty_lock(psOSCleanup->pages[i][j]); ++ } ++ put_page(psOSCleanup->pages[i][j]); ++ } ++ } ++ else ++ { ++ for (j = 0; j < psOSCleanup->puiNumPages[i]; j++) ++ { ++ if (psOSCleanup->ppsSgSparse[i][j]) { ++ dma_sync_sg_for_cpu(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, ++ psOSCleanup->ppsSgSparse[i][j]->sgl, ++ psOSCleanup->ppsSgSparse[i][j]->nents, ++ psOSCleanup->eDirection); ++ ++ ++ dma_unmap_sg(psOSCleanup->psDevNode->psDevConfig->pvOSDevice, ++ psOSCleanup->ppsSgSparse[i][j]->sgl, ++ psOSCleanup->ppsSgSparse[i][j]->nents, ++ psOSCleanup->eDirection); ++ ++ sg_free_table(psOSCleanup->ppsSgSparse[i][j]); ++ ++ OSFreeMem(psOSCleanup->ppsSgSparse[i][j]); ++ ++ } ++ } ++ ++ OSFreeMem(psOSCleanup->ppsSgSparse[i]); ++ OSFreeMem(psOSCleanup->ppsDescriptorsSparse[i]); ++ ++ /* Unpin pages */ ++ for (j=0; jpuiNumPages[i]*2; j++) ++ { ++ /* ++ * Some pages might've been pinned twice ++ * Others may have not been pinned at all ++ */ ++ if (psOSCleanup->pages[i][j]) ++ { ++ if (psOSCleanup->eDirection == DMA_DEV_TO_MEM) ++ { ++ set_page_dirty_lock(psOSCleanup->pages[i][j]); ++ } ++ put_page(psOSCleanup->pages[i][j]); ++ } ++ } ++ } ++ ++ OSFreeMem(psOSCleanup->pages[i]); ++ } ++ ++ psOSCleanup->pfnServerCleanup(psOSCleanup->pvServerCleanupData, ++ psOSCleanup->bAdvanceTimeline); ++ ++ OSFreeMem(psOSCleanup->ppsSg); ++ OSFreeMem(psOSCleanup->pages); ++ OSFreeMem(psOSCleanup->puiNumPages); ++ OSFreeMem(psOSCleanup->ppsSgSparse); ++ OSFreeMem(psOSCleanup->ppsDescriptorsSparse); ++ OSFreeMem(psOSCleanup->ppsDescriptors); ++ OSFreeMem(psOSCleanup->pbIsSparse); ++ OSFreeMem(psOSCleanup->uiNumValidPages); ++ OSFreeMem(psOSCleanup); ++ ++ if (sync_completion && bSucceed) ++ { ++ complete(sync_completion); ++ } ++ ++ do_exit(0); ++ return 0; ++} ++ ++static void dma_callback(void *pvOSCleanup) ++{ ++ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSCleanup; ++ unsigned long flags; ++ ++#if defined(DMA_VERBOSE) ++ PVR_DPF((PVR_DBG_ERROR, "dma_callback (%p) refcount decreased to %d", psOSCleanup, psOSCleanup->uiRefCount - 1)); ++#endif ++ spin_lock_irqsave(&psOSCleanup->spinlock, flags); ++ ++ psOSCleanup->uiRefCount--; ++ ++ if (psOSCleanup->uiRefCount==0) ++ { ++ /* Notify the cleanup thread */ ++ spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); ++ complete(&psOSCleanup->start_cleanup); ++ return; ++ } ++ ++ spin_unlock_irqrestore(&psOSCleanup->spinlock, flags); ++} ++ ++#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) ++static void ++DMADumpPhysicalAddresses(struct page **ppsHostMemPages, ++ IMG_UINT32 uiNumPages, ++ IMG_DMA_ADDR *sDmaAddr, ++ IMG_UINT64 ui64Offset) ++{ ++ IMG_CPU_PHYADDR sPagePhysAddr; ++ IMG_UINT32 uiIdx; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "DMA Transfer Address Dump")); ++ PVR_DPF((PVR_DBG_MESSAGE, "Hostmem phys addresses:")); ++ ++ for (uiIdx = 0; uiIdx < uiNumPages; uiIdx++) ++ { ++ sPagePhysAddr.uiAddr = page_to_phys(ppsHostMemPages[uiIdx]); ++ if (uiIdx == 0) ++ { ++ sPagePhysAddr.uiAddr += ui64Offset; ++ PVR_DPF((PVR_DBG_MESSAGE, "\tHost mem start at 0x%llX", sPagePhysAddr.uiAddr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "\tHost Mem Page %d at 0x%llX", uiIdx, ++ sPagePhysAddr.uiAddr)); ++ } ++ } ++ PVR_DPF((PVR_DBG_MESSAGE, "Devmem CPU phys address: 0x%llX", ++ sDmaAddr->uiAddr)); ++} ++#endif ++ ++PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, ++ void *pvChan, IMG_BOOL bSynchronous) ++{ ++ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA*)pvOSData; ++ struct completion* sync_completion = NULL; ++ ++ psOSCleanup->bSucceed = IMG_TRUE; ++ psOSCleanup->bAdvanceTimeline = IMG_TRUE; ++ ++ if (bSynchronous) ++ { ++ sync_completion = OSAllocZMem(sizeof(struct completion)); ++ init_completion(sync_completion); ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ /* Wait only on number of ops scheduled. This might be different to NumDMAs ++ in certain error conditions */ ++ psOSCleanup->uiRefCount = psOSCleanup->uiCount; ++ psOSCleanup->sync_completion = sync_completion; ++ ++ { ++ IMG_UINT32 i,j; ++ for (i=0; iuiCount; i++) ++ { ++ if (psOSCleanup->pbIsSparse[i]) ++ { ++ for (j=0; jpuiNumPages[i]; j++) ++ { ++ if (psOSCleanup->ppsDescriptorsSparse[i][j]) ++ dmaengine_submit(psOSCleanup->ppsDescriptorsSparse[i][j]); ++ } ++ } ++ else ++ { ++ dmaengine_submit(psOSCleanup->ppsDescriptors[i]); ++ } ++ } ++ } ++ ++ dma_async_issue_pending(pvChan); ++ ++ if (bSynchronous) ++ { ++ wait_for_completion(sync_completion); ++ OSFreeMem(sync_completion); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan, ++ void *pvOSData, void *pvServerCleanupParam, ++ PFN_SERVER_CLEANUP pfnServerCleanup) ++{ ++ OS_CLEANUP_DATA *psOSCleanup = (OS_CLEANUP_DATA *)pvOSData; ++ IMG_UINT ui32Retries; ++ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++ psOSCleanup->bSucceed = IMG_FALSE; ++ psOSCleanup->bAdvanceTimeline = IMG_TRUE; ++ ++ /* Need to wait for outstanding DMA Engine ops before advancing the ++ user-supplied timeline in case of error. dmaengine_terminate_sync ++ cannot be called from within atomic context, so cannot invoke it ++ from inside the cleanup kernel thread. */ ++ for (ui32Retries = 0; ui32Retries < DMA_ERROR_SYNC_RETRIES; ui32Retries++) ++ { ++ if (dmaengine_terminate_sync(pvChan) == 0) ++ { ++ break; ++ } ++ } ++ if (ui32Retries == DMA_ERROR_SYNC_RETRIES) ++ { ++ /* We cannot guarantee all outstanding DMAs were terminated ++ * so we let the UM fence time out as a fallback mechanism */ ++ psOSCleanup->bAdvanceTimeline = IMG_FALSE; ++ } ++ ++ if (psOSCleanup->uiCount > 0) ++ { ++ complete(&psOSCleanup->start_cleanup); ++ } ++ else ++ { ++ /* Cleanup kthread never run, need to manually wind down */ ++ pfnServerCleanup(pvServerCleanupParam, psOSCleanup->bAdvanceTimeline); ++ ++ OSFreeMem(psOSCleanup->ppsSg); ++ OSFreeMem(psOSCleanup->pages); ++ OSFreeMem(psOSCleanup->puiNumPages); ++ OSFreeMem(psOSCleanup->ppsSgSparse); ++ OSFreeMem(psOSCleanup->pbIsSparse); ++ OSFreeMem(psOSCleanup->uiNumValidPages); ++ OSFreeMem(psOSCleanup->ppsDescriptors); ++ OSFreeMem(psOSCleanup->ppsDescriptorsSparse); ++ ++ OSFreeMem(psOSCleanup); ++ } ++} ++ ++PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 uiNumDMA, void **pvOutData) ++{ ++ PVRSRV_ERROR eError; ++ OS_CLEANUP_DATA *psOSCleanup = OSAllocZMem(sizeof(OS_CLEANUP_DATA)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup, eError, e0); ++ ++ psOSCleanup->uiNumDMA = uiNumDMA; ++ psOSCleanup->psDevNode = psDevNode; ++ ++ spin_lock_init(&psOSCleanup->spinlock); ++ ++ init_completion(&psOSCleanup->start_cleanup); ++ ++ psOSCleanup->ppsDescriptors = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptors, eError, e0); ++ ++ psOSCleanup->ppsDescriptorsSparse = OSAllocZMem(uiNumDMA * sizeof(struct dma_async_tx_descriptor*)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsDescriptorsSparse, eError, e11); ++ ++ psOSCleanup->ppsSg = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSg, eError, e1); ++ ++ psOSCleanup->ppsSgSparse = OSAllocZMem(uiNumDMA * sizeof(struct sg_table*)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->ppsSgSparse, eError, e12); ++ ++ psOSCleanup->pbIsSparse = OSAllocZMem(uiNumDMA * sizeof(IMG_BOOL)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pbIsSparse, eError, e13); ++ ++ psOSCleanup->uiNumValidPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->uiNumValidPages, eError, e14); ++ ++ psOSCleanup->pages = OSAllocZMem(uiNumDMA * sizeof(struct page **)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->pages, eError, e2); ++ ++ psOSCleanup->puiNumPages = OSAllocZMem(uiNumDMA * sizeof(IMG_UINT32)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanup->puiNumPages, eError, e3); ++ ++ *pvOutData = psOSCleanup; ++ ++ return PVRSRV_OK; ++ ++e3: ++ OSFreeMem(psOSCleanup->pages); ++e2: ++ OSFreeMem(psOSCleanup->uiNumValidPages); ++e14: ++ OSFreeMem(psOSCleanup->pbIsSparse); ++e13: ++ OSFreeMem(psOSCleanup->ppsSgSparse); ++e12: ++ OSFreeMem(psOSCleanup->ppsSg); ++e1: ++ OSFreeMem(psOSCleanup->ppsDescriptorsSparse); ++e11: ++ OSFreeMem(psOSCleanup->ppsDescriptors); ++e0: ++ OSFreeMem(psOSCleanup); ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function OSDmaTransfer ++@Description This API is used to ask OS to perform a DMA transfer operation ++@Return ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, ++ void* pvChan, ++ IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress, ++ IMG_UINT64 uiSize, IMG_BOOL bMemToDev, ++ void* pvOSData, ++ void* pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst) ++{ ++ ++ IMG_INT iRet; ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ OS_CLEANUP_DATA* psOSCleanupData = pvOSData; ++ ++ struct dma_slave_config sConfig = {0}; ++ struct dma_async_tx_descriptor *psDesc; ++ ++ unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); ++ unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ int num_pinned_pages = 0; ++ unsigned int gup_flags = 0; ++ ++ struct sg_table *psSg = OSAllocZMem(sizeof(struct sg_table)); ++ PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e0); ++ ++ psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(num_pages * sizeof(struct page *)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e1); ++ ++ gup_flags |= bMemToDev ? 0 : FOLL_WRITE; ++ ++ num_pinned_pages = get_user_pages_fast( ++ (unsigned long)puiAddress, ++ (int)num_pages, ++ gup_flags, ++ psOSCleanupData->pages[psOSCleanupData->uiCount]); ++ if (num_pinned_pages != num_pages) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast failed: (%d - %u)", num_pinned_pages, num_pages)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e2; ++ } ++ ++#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) ++ DMADumpPhysicalAddresses(psOSCleanupData->pages[psOSCleanupData->uiCount], ++ num_pages, psDmaAddr, offset); ++#endif ++ ++ psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = num_pinned_pages; ++ ++ if (sg_alloc_table_from_pages(psSg, psOSCleanupData->pages[psOSCleanupData->uiCount], num_pages, offset, uiSize, GFP_KERNEL) != 0) ++ { ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); ++ goto e3; ++ } ++ ++ if (bMemToDev) ++ { ++ sConfig.direction = DMA_MEM_TO_DEV; ++ sConfig.src_addr = 0; ++ sConfig.dst_addr = psDmaAddr->uiAddr; ++ } ++ else ++ { ++ sConfig.direction = DMA_DEV_TO_MEM; ++ sConfig.src_addr = psDmaAddr->uiAddr; ++ sConfig.dst_addr = 0; ++ } ++ dmaengine_slave_config(pvChan, &sConfig); ++ ++ iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); ++ if (!iRet) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e4; ++ } ++ ++ dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction); ++ ++ psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0); ++ if (!psDesc) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e5; ++ } ++ ++ psOSCleanupData->eDirection = sConfig.direction; ++ psOSCleanupData->ppsSg[psOSCleanupData->uiCount] = psSg; ++ psOSCleanupData->pfnServerCleanup = pfnServerCleanup; ++ psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; ++ ++ psDesc->callback_param = psOSCleanupData; ++ psDesc->callback = dma_callback; ++ ++ if (bFirst) ++ { ++ struct task_struct* t1; ++ t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); ++ } ++ psOSCleanupData->ppsDescriptors[psOSCleanupData->uiCount] = psDesc; ++ ++ psOSCleanupData->uiCount++; ++ ++ return PVRSRV_OK; ++ ++e5: ++ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); ++e4: ++ sg_free_table(psSg); ++e3: ++ { ++ IMG_UINT32 i; ++ /* Unpin pages */ ++ for (i=0; ipuiNumPages[psOSCleanupData->uiCount]; i++) ++ { ++ put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); ++ } ++ } ++e2: ++ OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); ++e1: ++ OSFreeMem(psSg); ++e0: ++ return eError; ++} ++ ++static IMG_UINT32 ++CalculateValidPages(IMG_BOOL *pbValid, ++ IMG_UINT32 ui32SizeInPages) ++{ ++ IMG_UINT32 ui32nValid; ++ IMG_UINT32 ui32Idx; ++ ++ for (ui32Idx = 0, ui32nValid = 0; ui32Idx < ui32SizeInPages; ui32Idx++) ++ { ++ ui32nValid += pbValid[ui32Idx] ? 1 : 0; ++ } ++ ++ return ui32nValid; ++} ++ ++PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, ++ void* pvChan, ++ IMG_DMA_ADDR* psDmaAddr, ++ IMG_BOOL *pbValid, ++ IMG_UINT64* puiAddress, ++ IMG_UINT64 uiSize, ++ IMG_UINT32 uiOffsetInFirstPMRPage, ++ IMG_UINT32 ui32SizeInPages, ++ IMG_BOOL bMemToDev, ++ void* pvOSData, ++ void* pvServerCleanupParam, ++ PFN_SERVER_CLEANUP pfnServerCleanup, ++ IMG_BOOL bFirst) ++{ ++ ++ IMG_INT iRet; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ OS_CLEANUP_DATA* psOSCleanupData = pvOSData; ++ IMG_UINT32 ui32PageSize = OSGetPageSize(); ++ void *pvNextAddress = puiAddress; ++ IMG_UINT32 ui32Idx; ++ IMG_INT32 i32Rwd; ++ ++ struct dma_slave_config sConfig = {0}; ++ struct dma_async_tx_descriptor *psDesc; ++ ++ unsigned long offset = (unsigned long)puiAddress & ((1 << PAGE_SHIFT) - 1); ++ unsigned int num_pages = (uiSize + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ unsigned int num_valid_pages = CalculateValidPages(pbValid, ui32SizeInPages); ++ unsigned int num_pinned_pages = 0; ++ unsigned int gup_flags = 0; ++ unsigned int valid_idx; ++ size_t transfer_size; ++ struct page ** next_pages; ++ struct sg_table *psSg; ++ ++ psOSCleanupData->uiNumValidPages[psOSCleanupData->uiCount] = num_valid_pages; ++ psOSCleanupData->pbIsSparse[psOSCleanupData->uiCount] = IMG_TRUE; ++ ++ /* ++ * If an SG transfer from virtual memory to card memory goes over a page boundary in ++ * main memory, it'll span two different pages - therefore, total number of pages to ++ * keep track of should be twice as many as for a simple transfer. This twice-as-big ++ * allocation is also necessary because the same virtual memory page might be present ++ * in more than one SG DMA transfer, because of differences in first-page offset between ++ * the sparse device PMR and the virtual memory buffer. ++ */ ++ psOSCleanupData->pages[psOSCleanupData->uiCount] = OSAllocZMem(2*num_valid_pages * sizeof(struct page *)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->pages[psOSCleanupData->uiCount], eError, e0); ++ ++ psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct sg_table *)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount], eError, e1); ++ ++ psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount] = OSAllocZMem(num_valid_pages * sizeof(struct dma_async_tx_descriptor *)); ++ PVR_LOG_GOTO_IF_NOMEM(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount], eError, e11); ++ ++ gup_flags |= bMemToDev ? 0 : FOLL_WRITE; ++ ++ for (ui32Idx = 0, valid_idx = 0; ui32Idx < ui32SizeInPages; ui32Idx++) ++ { ++ if (valid_idx == num_valid_pages) ++ { ++ break; ++ } ++ if (!pbValid[ui32Idx]) ++ { ++ pvNextAddress += (ui32Idx == 0) ? ui32PageSize - uiOffsetInFirstPMRPage : ui32PageSize; ++ continue; ++ } ++ ++ /* Pick transfer size */ ++ if (ui32Idx == 0) ++ { ++ if (uiOffsetInFirstPMRPage + uiSize <= ui32PageSize) ++ { ++ PVR_ASSERT(num_valid_pages == 1); ++ transfer_size = uiSize; ++ } ++ else ++ { ++ transfer_size = ui32PageSize - uiOffsetInFirstPMRPage; ++ } ++ } ++ else ++ { ++ /* Last valid LMA page */ ++ if (valid_idx == num_valid_pages - 1) ++ { ++ transfer_size = ((uiOffsetInFirstPMRPage + uiSize - 1) % ui32PageSize) + 1; ++ } ++ else ++ { ++ transfer_size = ui32PageSize; ++ } ++ } ++ ++ if (((unsigned long long)pvNextAddress & (ui32PageSize - 1)) + transfer_size > ui32PageSize) ++ { ++ num_pages = 2; ++ } ++ else ++ { ++ num_pages = 1; ++ } ++ ++ next_pages = psOSCleanupData->pages[psOSCleanupData->uiCount] + (valid_idx * 2); ++ ++ num_pinned_pages = get_user_pages_fast( ++ (unsigned long)pvNextAddress, ++ (int)num_pages, ++ gup_flags, ++ next_pages); ++ if (num_pinned_pages != num_pages) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "get_user_pages_fast for sparse failed: (%d - %u)", num_pinned_pages, num_pages)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e2; ++ } ++ ++#if defined(SUPPORT_VALIDATION) && defined(PVRSRV_DEBUG_DMA) ++ DMADumpPhysicalAddresses(next_pages, num_pages, ++ &psDmaAddr[ui32Idx], ++ (unsigned long)pvNextAddress & (ui32PageSize - 1)); ++#endif ++ ++ psSg = OSAllocZMem(sizeof(struct sg_table)); ++ PVR_LOG_GOTO_IF_NOMEM(psSg, eError, e3); ++ ++ if (sg_alloc_table_from_pages(psSg, next_pages, num_pages, ++ (unsigned long)pvNextAddress & (ui32PageSize - 1), ++ transfer_size, ++ GFP_KERNEL) != 0) ++ { ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ PVR_DPF((PVR_DBG_ERROR, "sg_alloc_table_from_pages failed")); ++ goto e4; ++ } ++ ++ pvNextAddress += transfer_size; ++ ++ if (bMemToDev) ++ { ++ sConfig.direction = DMA_MEM_TO_DEV; ++ sConfig.src_addr = 0; ++ sConfig.dst_addr = psDmaAddr[ui32Idx].uiAddr; ++ } ++ else ++ { ++ sConfig.direction = DMA_DEV_TO_MEM; ++ sConfig.src_addr = psDmaAddr[ui32Idx].uiAddr; ++ sConfig.dst_addr = 0; ++ } ++ dmaengine_slave_config(pvChan, &sConfig); ++ ++ iRet = dma_map_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); ++ if (!iRet) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error mapping SG list", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e5; ++ } ++ dma_sync_sg_for_device(psDevConfig->pvOSDevice, psSg->sgl,(unsigned int)iRet, sConfig.direction); ++ ++ psDesc = dmaengine_prep_slave_sg(pvChan, psSg->sgl, (unsigned int)iRet, sConfig.direction, 0); ++ if (!psDesc) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: dmaengine_prep_slave_sg failed", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto e6; ++ } ++ ++ psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][valid_idx] = psSg; ++ psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount][valid_idx] = psDesc; ++ psOSCleanupData->puiNumPages[psOSCleanupData->uiCount] = ++valid_idx; ++ ++ if (valid_idx == num_valid_pages) ++ { ++ psDesc->callback_param = psOSCleanupData; ++ psDesc->callback = dma_callback; ++ ++ if (bFirst) ++ { ++ struct task_struct* t1; ++ ++ psOSCleanupData->eDirection = sConfig.direction; ++ psOSCleanupData->pfnServerCleanup = pfnServerCleanup; ++ psOSCleanupData->pvServerCleanupData = pvServerCleanupParam; ++ ++ t1 = kthread_run(cleanup_thread, psOSCleanupData, "dma-cleanup-thread"); ++ } ++ ++ psOSCleanupData->uiCount++; ++ } ++ ++ } ++ ++ return PVRSRV_OK; ++ ++e6: ++ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); ++e5: ++ sg_free_table(psSg); ++e4: ++ OSFreeMem(psSg); ++e3: ++ /* Unpin last */ ++ put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx]); ++ if (psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]) ++ { ++ put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][valid_idx+1]); ++ } ++e2: ++ /* rewind */ ++ for (i32Rwd=valid_idx-1; i32Rwd >= 0; i32Rwd--) ++ { ++ IMG_UINT32 i; ++ ++ psSg = psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount][i32Rwd]; ++ dma_unmap_sg(psDevConfig->pvOSDevice, psSg->sgl, psSg->nents, sConfig.direction); ++ sg_free_table(psSg); ++ ++ /* Unpin pages */ ++ for (i=0; i < psOSCleanupData->puiNumPages[psOSCleanupData->uiCount]*2; i++) ++ { ++ if (psOSCleanupData->pages[psOSCleanupData->uiCount][i]) ++ { ++ put_page(psOSCleanupData->pages[psOSCleanupData->uiCount][i]); ++ } ++ } ++ } ++ OSFreeMem(psOSCleanupData->ppsDescriptorsSparse[psOSCleanupData->uiCount]); ++e11: ++ OSFreeMem(psOSCleanupData->ppsSgSparse[psOSCleanupData->uiCount]); ++e1: ++ OSFreeMem(psOSCleanupData->pages[psOSCleanupData->uiCount]); ++e0: ++ return eError; ++} ++ ++#endif /* SUPPORT_DMA_TRANSFER */ +diff --git a/drivers/gpu/drm/img-rogue/osfunc.h b/drivers/gpu/drm/img-rogue/osfunc.h +new file mode 100644 +index 000000000000..4e6f879b42f4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc.h +@@ -0,0 +1,1690 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS functions header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS specific API definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifdef DEBUG_RELEASE_BUILD ++#pragma optimize( "", off ) ++#define DEBUG 1 ++#endif ++ ++#ifndef OSFUNC_H ++/*! @cond Doxygen_Suppress */ ++#define OSFUNC_H ++/*! @endcond */ ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#include "kernel_nospec.h" ++#if !defined(NO_HARDWARE) ++#include ++ ++#endif ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++#include ++#else ++#include ++#endif ++ ++#if defined(__QNXNTO__) ++#include ++#include ++#endif ++ ++#if defined(INTEGRITY_OS) ++#include ++#include ++#endif ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "device.h" ++#include "pvrsrv_device.h" ++#include "cache_ops.h" ++#include "osfunc_common.h" ++#if defined(SUPPORT_DMA_TRANSFER) ++#include "dma_km.h" ++#include "pmr.h" ++#endif ++ ++/****************************************************************************** ++ * Static defines ++ *****************************************************************************/ ++/*! ++ * Returned by OSGetCurrentProcessID() and OSGetCurrentThreadID() if the OS ++ * is currently operating in the interrupt context. ++ */ ++#define KERNEL_ID 0xffffffffL ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size)) ++#elif defined(__QNXNTO__) ++#define OSConfineArrayIndexNoSpeculation(index, size) (index) ++#define PVRSRV_MISSING_NO_SPEC_IMPL ++#elif defined(INTEGRITY_OS) ++#define OSConfineArrayIndexNoSpeculation(index, size) (index) ++#define PVRSRV_MISSING_NO_SPEC_IMPL ++#else ++/*************************************************************************/ /*! ++@Function OSConfineArrayIndexNoSpeculation ++@Description This macro aims to avoid code exposure to Cache Timing ++ Side-Channel Mechanisms which rely on speculative code ++ execution (Variant 1). It does so by ensuring a value to be ++ used as an array index will be set to zero if outside of the ++ bounds of the array, meaning any speculative execution of code ++ which uses this suitably adjusted index value will not then ++ attempt to load data from memory outside of the array bounds. ++ Code calling this macro must still first verify that the ++ original unmodified index value is within the bounds of the ++ array, and should then only use the modified value returned ++ by this function when accessing the array itself. ++ NB. If no OS-specific implementation of this macro is ++ defined, the original index is returned unmodified and no ++ protection against the potential exploit is provided. ++@Input index The original array index value that would be used to ++ access the array. ++@Input size The number of elements in the array being accessed. ++@Return The value to use for the array index, modified so that it ++ remains within array bounds. ++*/ /**************************************************************************/ ++#define OSConfineArrayIndexNoSpeculation(index, size) (index) ++#if !defined(DOXYGEN) ++#define PVRSRV_MISSING_NO_SPEC_IMPL ++#endif ++#endif ++ ++/*************************************************************************/ /*! ++@Function OSClockns64 ++@Description This function returns the number of ticks since system boot ++ expressed in nanoseconds. Unlike OSClockns, OSClockns64 has ++ a near 64-bit range. ++@Return The 64-bit clock value, in nanoseconds. ++*/ /**************************************************************************/ ++IMG_UINT64 OSClockns64(void); ++ ++/*************************************************************************/ /*! ++@Function OSClockus64 ++@Description This function returns the number of ticks since system boot ++ expressed in microseconds. Unlike OSClockus, OSClockus64 has ++ a near 64-bit range. ++@Return The 64-bit clock value, in microseconds. ++*/ /**************************************************************************/ ++IMG_UINT64 OSClockus64(void); ++ ++/*************************************************************************/ /*! ++@Function OSClockus ++@Description This function returns the number of ticks since system boot ++ in microseconds. ++@Return The 32-bit clock value, in microseconds. ++*/ /**************************************************************************/ ++IMG_UINT32 OSClockus(void); ++ ++/*************************************************************************/ /*! ++@Function OSClockms ++@Description This function returns the number of ticks since system boot ++ in milliseconds. ++@Return The 32-bit clock value, in milliseconds. ++*/ /**************************************************************************/ ++IMG_UINT32 OSClockms(void); ++ ++/*************************************************************************/ /*! ++@Function OSClockMonotonicns64 ++@Description This function returns a clock value based on the system ++ monotonic clock. ++@Output pui64Time The 64-bit clock value, in nanoseconds. ++@Return Error Code. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time); ++ ++/*************************************************************************/ /*! ++@Function OSClockMonotonicus64 ++@Description This function returns a clock value based on the system ++ monotonic clock. ++@Output pui64Time The 64-bit clock value, in microseconds. ++@Return Error Code. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time); ++ ++/*************************************************************************/ /*! ++@Function OSClockMonotonicRawns64 ++@Description This function returns a clock value based on the system ++ monotonic raw clock. ++@Return 64bit ns timestamp ++*/ /**************************************************************************/ ++IMG_UINT64 OSClockMonotonicRawns64(void); ++ ++/*************************************************************************/ /*! ++@Function OSClockMonotonicRawus64 ++@Description This function returns a clock value based on the system ++ monotonic raw clock. ++@Return 64bit us timestamp ++*/ /**************************************************************************/ ++IMG_UINT64 OSClockMonotonicRawus64(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetPageSize ++@Description This function returns the page size. ++ If the OS is not using memory mappings it should return a ++ default value of 4096. ++@Return The size of a page, in bytes. ++*/ /**************************************************************************/ ++size_t OSGetPageSize(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetPageShift ++@Description This function returns the page size expressed as a power of ++ two. A number of pages, left-shifted by this value, gives the ++ equivalent size in bytes. ++ If the OS is not using memory mappings it should return a ++ default value of 12. ++@Return The page size expressed as a power of two. ++*/ /**************************************************************************/ ++size_t OSGetPageShift(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetPageMask ++@Description This function returns a bitmask that may be applied to an ++ address to mask off the least-significant bits so as to ++ leave the start address of the page containing that address. ++@Return The page mask. ++*/ /**************************************************************************/ ++size_t OSGetPageMask(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetOrder ++@Description This function returns the order of power of two for a given ++ size. Eg. for a uSize of 4096 bytes the function would ++ return 12 (4096 = 2^12). ++@Input uSize The size in bytes. ++@Return The order of power of two. ++*/ /**************************************************************************/ ++size_t OSGetOrder(size_t uSize); ++ ++/*************************************************************************/ /*! ++@Function OSGetRAMSize ++@Description This function returns the total amount of GPU-addressable ++ memory provided by the system. In other words, after loading ++ the driver this would be the largest allocation an ++ application would reasonably expect to be able to make. ++ Note that this is function is not expected to return the ++ current available memory but the amount which would be ++ available on startup. ++@Return Total GPU-addressable memory size, in bytes. ++*/ /**************************************************************************/ ++IMG_UINT64 OSGetRAMSize(void); ++ ++/*************************************************************************/ /*! ++@Description Pointer to a Mid-level Interrupt Service Routine (MISR). ++@Input pvData Pointer to MISR specific data. ++*/ /**************************************************************************/ ++typedef void (*PFN_MISR)(void *pvData); ++ ++/*************************************************************************/ /*! ++@Description Pointer to a thread entry point function. ++@Input pvData Pointer to thread specific data. ++*/ /**************************************************************************/ ++typedef void (*PFN_THREAD)(void *pvData); ++ ++/*************************************************************************/ /*! ++@Function OSChangeSparseMemCPUAddrMap ++@Description This function changes the CPU mapping of the underlying ++ sparse allocation. It is used by a PMR 'factory' ++ implementation if that factory supports sparse ++ allocations. ++@Input psPageArray array representing the pages in the ++ sparse allocation ++@Input sCpuVAddrBase the virtual base address of the sparse ++ allocation ('first' page) ++@Input sCpuPAHeapBase the physical address of the virtual ++ base address 'sCpuVAddrBase' ++@Input ui32AllocPageCount the number of pages referenced in ++ 'pai32AllocIndices' ++@Input pai32AllocIndices list of indices of pages within ++ 'psPageArray' that we now want to ++ allocate and map ++@Input ui32FreePageCount the number of pages referenced in ++ 'pai32FreeIndices' ++@Input pai32FreeIndices list of indices of pages within ++ 'psPageArray' we now want to ++ unmap and free ++@Input bIsLMA flag indicating if the sparse allocation ++ is from LMA or UMA memory ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_CPU_PHYADDR sCpuPAHeapBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_BOOL bIsLMA); ++ ++/*************************************************************************/ /*! ++@Function OSInstallMISR ++@Description Installs a Mid-level Interrupt Service Routine (MISR) ++ which handles higher-level processing of interrupts from ++ the device (GPU). ++ An MISR runs outside of interrupt context, and so may be ++ descheduled. This means it can contain code that would ++ not be permitted in the LISR. ++ An MISR is invoked when OSScheduleMISR() is called. This ++ call should be made by installed LISR once it has completed ++ its interrupt processing. ++ Multiple MISRs may be installed by the driver to handle ++ different causes of interrupt. ++@Input pfnMISR pointer to the function to be installed ++ as the MISR ++@Input hData private data provided to the MISR ++@Input pszMisrName Name describing purpose of MISR worker thread ++ (Must be a string literal). ++@Output hMISRData handle to the installed MISR (to be used ++ for a subsequent uninstall) ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, ++ PFN_MISR pfnMISR, ++ void *hData, ++ const IMG_CHAR *pszMisrName); ++ ++/*************************************************************************/ /*! ++@Function OSUninstallMISR ++@Description Uninstalls a Mid-level Interrupt Service Routine (MISR). ++@Input hMISRData handle to the installed MISR ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData); ++ ++/*************************************************************************/ /*! ++@Function OSScheduleMISR ++@Description Schedules a Mid-level Interrupt Service Routine (MISR) to be ++ executed. An MISR should be executed outside of interrupt ++ context, for example in a work queue. ++@Input hMISRData handle to the installed MISR ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData); ++ ++/*************************************************************************/ /*! ++@Description Pointer to a function implementing debug dump of thread-specific ++ data. ++@Input pfnDumpDebugPrintf Used to specify the print function used ++ to dump any debug information. If this ++ argument is NULL then a default print ++ function will be used. ++@Input pvDumpDebugFile File identifier to be passed to the ++ print function if specified. ++*/ /**************************************************************************/ ++ ++typedef void (*PFN_THREAD_DEBUG_DUMP)(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++/*************************************************************************/ /*! ++@Function OSThreadCreate ++@Description Creates a kernel thread and starts it running. The caller ++ is responsible for informing the thread that it must finish ++ and return from the pfnThread function. It is not possible ++ to kill or terminate it. The new thread runs with the default ++ priority provided by the Operating System. ++ Note: Kernel threads are freezable which means that they ++ can be frozen by the kernel on for example driver suspend. ++ Because of that only OSEventObjectWaitKernel() function should ++ be used to put kernel threads in waiting state. ++@Output phThread Returned handle to the thread. ++@Input pszThreadName Name to assign to the thread. ++@Input pfnThread Thread entry point function. ++@Input pfnDebugDumpCB Used to dump info of the created thread ++@Input bIsSupportingThread Set, if summary of this thread needs to ++ be dumped in debug_dump ++@Input hData Thread specific data pointer for pfnThread(). ++@Return Standard PVRSRV_ERROR error code. ++*/ /**************************************************************************/ ++ ++PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread, ++ IMG_CHAR *pszThreadName, ++ PFN_THREAD pfnThread, ++ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, ++ IMG_BOOL bIsSupportingThread, ++ void *hData); ++ ++/*! Available priority levels for the creation of a new Kernel Thread. */ ++typedef enum priority_levels ++{ ++ OS_THREAD_NOSET_PRIORITY = 0, /* With this option the priority level is the default for the given OS */ ++ OS_THREAD_HIGHEST_PRIORITY, ++ OS_THREAD_HIGH_PRIORITY, ++ OS_THREAD_NORMAL_PRIORITY, ++ OS_THREAD_LOW_PRIORITY, ++ OS_THREAD_LOWEST_PRIORITY, ++ OS_THREAD_LAST_PRIORITY /* This must be always the last entry */ ++} OS_THREAD_LEVEL; ++ ++/*************************************************************************/ /*! ++@Function OSThreadCreatePriority ++@Description As OSThreadCreate, this function creates a kernel thread and ++ starts it running. The difference is that with this function ++ is possible to specify the priority used to schedule the new ++ thread. ++ ++@Output phThread Returned handle to the thread. ++@Input pszThreadName Name to assign to the thread. ++@Input pfnThread Thread entry point function. ++@Input pfnDebugDumpCB Used to dump info of the created thread ++@Input bIsSupportingThread Set, if summary of this thread needs to ++ be dumped in debug_dump ++@Input hData Thread specific data pointer for pfnThread(). ++@Input eThreadPriority Priority level to assign to the new thread. ++@Return Standard PVRSRV_ERROR error code. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread, ++ IMG_CHAR *pszThreadName, ++ PFN_THREAD pfnThread, ++ PFN_THREAD_DEBUG_DUMP pfnDebugDumpCB, ++ IMG_BOOL bIsSupportingThread, ++ void *hData, ++ OS_THREAD_LEVEL eThreadPriority); ++ ++/*************************************************************************/ /*! ++@Function OSThreadDestroy ++@Description Waits for the thread to end and then destroys the thread ++ handle memory. This function will block and wait for the ++ thread to finish successfully, thereby providing a sync point ++ for the thread completing its work. No attempt is made to kill ++ or otherwise terminate the thread. ++@Input hThread The thread handle returned by OSThreadCreate(). ++@Return Standard PVRSRV_ERROR error code. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread); ++ ++/*************************************************************************/ /*! ++@Function OSMapPhysToLin ++@Description Maps physical memory into a linear address range. ++@Input BasePAddr physical CPU address ++@Input ui32Bytes number of bytes to be mapped ++@Input uiFlags flags denoting the caching mode to be employed ++ for the mapping (uncached/write-combined, ++ cached coherent or cached incoherent). ++ See pvrsrv_memallocflags.h for full flag bit ++ definitions. ++@Return Pointer to the new mapping if successful, NULL otherwise. ++*/ /**************************************************************************/ ++void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, PVRSRV_MEMALLOCFLAGS_T uiFlags); ++ ++/*************************************************************************/ /*! ++@Function OSUnMapPhysToLin ++@Description Unmaps physical memory previously mapped by OSMapPhysToLin(). ++@Input pvLinAddr the linear mapping to be unmapped ++@Input ui32Bytes number of bytes to be unmapped ++@Return IMG_TRUE if unmapping was successful, IMG_FALSE otherwise. ++*/ /**************************************************************************/ ++IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes); ++ ++/*************************************************************************/ /*! ++@Function OSCPUCacheFlushRangeKM ++@Description Clean and invalidate the CPU cache for the specified ++ address range. ++@Input psDevNode device on which the allocation was made ++@Input pvVirtStart virtual start address of the range to be ++ flushed ++@Input pvVirtEnd virtual end address of the range to be ++ flushed ++@Input sCPUPhysStart physical start address of the range to be ++ flushed ++@Input sCPUPhysEnd physical end address of the range to be ++ flushed ++@Return None ++*/ /**************************************************************************/ ++void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd); ++ ++/*************************************************************************/ /*! ++@Function OSCPUCacheCleanRangeKM ++@Description Clean the CPU cache for the specified address range. ++ This writes out the contents of the cache and clears the ++ 'dirty' bit (which indicates the physical memory is ++ consistent with the cache contents). ++@Input psDevNode device on which the allocation was made ++@Input pvVirtStart virtual start address of the range to be ++ cleaned ++@Input pvVirtEnd virtual end address of the range to be ++ cleaned ++@Input sCPUPhysStart physical start address of the range to be ++ cleaned ++@Input sCPUPhysEnd physical end address of the range to be ++ cleaned ++@Return None ++*/ /**************************************************************************/ ++void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd); ++ ++/*************************************************************************/ /*! ++@Function OSCPUCacheInvalidateRangeKM ++@Description Invalidate the CPU cache for the specified address range. ++ The cache must reload data from those addresses if they ++ are accessed. ++@Input psDevNode device on which the allocation was made ++@Input pvVirtStart virtual start address of the range to be ++ invalidated ++@Input pvVirtEnd virtual end address of the range to be ++ invalidated ++@Input sCPUPhysStart physical start address of the range to be ++ invalidated ++@Input sCPUPhysEnd physical end address of the range to be ++ invalidated ++@Return None ++*/ /**************************************************************************/ ++void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd); ++ ++/*! CPU Cache operations address domain type */ ++typedef enum ++{ ++ OS_CACHE_OP_ADDR_TYPE_VIRTUAL, /*!< Operation requires CPU virtual address only */ ++ OS_CACHE_OP_ADDR_TYPE_PHYSICAL, /*!< Operation requires CPU physical address only */ ++ OS_CACHE_OP_ADDR_TYPE_BOTH /*!< Operation requires both CPU virtual & physical addresses */ ++} OS_CACHE_OP_ADDR_TYPE; ++ ++/*************************************************************************/ /*! ++@Function OSCPUCacheOpAddressType ++@Description Returns the address type (i.e. virtual/physical/both) the CPU ++ architecture performs cache maintenance operations under. ++ This is used to infer whether the virtual or physical address ++ supplied to the OSCPUCacheXXXRangeKM functions can be omitted ++ when called. ++@Return OS_CACHE_OP_ADDR_TYPE ++*/ /**************************************************************************/ ++OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void); ++ ++/*! CPU Cache attributes available for retrieval, DCache unless specified */ ++typedef enum _OS_CPU_CACHE_ATTRIBUTE_ ++{ ++ OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE, /*!< The cache line size */ ++ OS_CPU_CACHE_ATTRIBUTE_COUNT /*!< The number of attributes (must be last) */ ++} OS_CPU_CACHE_ATTRIBUTE; ++ ++/*************************************************************************/ /*! ++@Function OSCPUCacheAttributeSize ++@Description Returns the size of a given cache attribute. ++ Typically this function is used to return the cache line ++ size, but may be extended to return the size of other ++ cache attributes. ++@Input eCacheAttribute the cache attribute whose size should ++ be returned. ++@Return The size of the specified cache attribute, in bytes. ++*/ /**************************************************************************/ ++IMG_UINT32 OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE eCacheAttribute); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentProcessID ++@Description Returns ID of current process (thread group) ++@Return ID of current process ++*****************************************************************************/ ++IMG_PID OSGetCurrentProcessID(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentVirtualProcessID ++@Description Returns ID of current process (thread group of current ++ PID namespace) ++@Return ID of current process in PID namespace ++*****************************************************************************/ ++IMG_PID OSGetCurrentVirtualProcessID(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentProcessName ++@Description Gets the name of current process ++@Return Process name ++*****************************************************************************/ ++IMG_CHAR *OSGetCurrentProcessName(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentProcessVASpaceSize ++@Description Returns the CPU virtual address space size of current process ++@Return Process VA space size ++*/ /**************************************************************************/ ++IMG_UINT64 OSGetCurrentProcessVASpaceSize(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentThreadID ++@Description Returns ID for current thread ++@Return ID of current thread ++*****************************************************************************/ ++uintptr_t OSGetCurrentThreadID(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentClientProcessIDKM ++@Description Returns ID of current client process (thread group) which ++ has made a bridge call into the server. ++ For some operating systems, this may simply be the current ++ process id. For others, it may be that a dedicated thread ++ is used to handle the processing of bridge calls and that ++ some additional processing is required to obtain the ID of ++ the client process making the bridge call. ++@Return ID of current client process ++*****************************************************************************/ ++IMG_PID OSGetCurrentClientProcessIDKM(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentClientProcessNameKM ++@Description Gets the name of current client process ++@Return Client process name ++*****************************************************************************/ ++IMG_CHAR *OSGetCurrentClientProcessNameKM(void); ++ ++/*************************************************************************/ /*! ++@Function OSGetCurrentClientThreadIDKM ++@Description Returns ID for current client thread ++ For some operating systems, this may simply be the current ++ thread id. For others, it may be that a dedicated thread ++ is used to handle the processing of bridge calls and that ++ some additional processing is require to obtain the ID of ++ the client thread making the bridge call. ++@Return ID of current client thread ++*****************************************************************************/ ++uintptr_t OSGetCurrentClientThreadIDKM(void); ++ ++/*************************************************************************/ /*! ++@Function OSMemCmp ++@Description Compares two blocks of memory for equality. ++@Input pvBufA Pointer to the first block of memory ++@Input pvBufB Pointer to the second block of memory ++@Input uiLen The number of bytes to be compared ++@Return Value < 0 if pvBufA is less than pvBufB. ++ Value > 0 if pvBufB is less than pvBufA. ++ Value = 0 if pvBufA is equal to pvBufB. ++*****************************************************************************/ ++IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen); ++ ++/*************************************************************************/ /*! ++@Function OSPhyContigPagesAlloc ++@Description Allocates a number of contiguous physical pages. ++ If allocations made by this function are CPU cached then ++ OSPhyContigPagesClean has to be implemented to write the ++ cached data to memory. ++@Input psPhysHeap the heap from which to allocate ++@Input uiSize the size of the required allocation (in bytes) ++@Output psMemHandle a returned handle to be used to refer to this ++ allocation ++@Output psDevPAddr the physical address of the allocation ++@Input uiPid the process ID that this allocation should ++ be associated with ++@Return PVRSRV_OK on success, a failure code otherwise. ++*****************************************************************************/ ++PVRSRV_ERROR OSPhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, ++ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid); ++ ++/*************************************************************************/ /*! ++@Function OSPhyContigPagesFree ++@Description Frees a previous allocation of contiguous physical pages ++@Input psPhysHeap the heap from which to allocate ++@Input psMemHandle the handle of the allocation to be freed ++@Return None. ++*****************************************************************************/ ++void OSPhyContigPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle); ++ ++/*************************************************************************/ /*! ++@Function OSPhyContigPagesMap ++@Description Maps the specified allocation of contiguous physical pages ++ to a kernel virtual address ++@Input psPhysHeap the heap from which to allocate ++@Input psMemHandle the handle of the allocation to be mapped ++@Input uiSize the size of the allocation (in bytes) ++@Input psDevPAddr the physical address of the allocation ++@Output pvPtr the virtual kernel address to which the ++ allocation is now mapped ++@Return PVRSRV_OK on success, a failure code otherwise. ++*****************************************************************************/ ++PVRSRV_ERROR OSPhyContigPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, ++ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, ++ void **pvPtr); ++ ++/*************************************************************************/ /*! ++@Function OSPhyContigPagesUnmap ++@Description Unmaps the kernel mapping for the specified allocation of ++ contiguous physical pages ++@Input psPhysHeap the heap from which to allocate ++@Input psMemHandle the handle of the allocation to be unmapped ++@Input pvPtr the virtual kernel address to which the ++ allocation is currently mapped ++@Return None. ++*****************************************************************************/ ++void OSPhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr); ++ ++/*************************************************************************/ /*! ++@Function OSPhyContigPagesClean ++@Description Write the content of the specified allocation from CPU cache to ++ memory from (start + uiOffset) to (start + uiOffset + uiLength) ++ It is expected to be implemented as a cache clean operation but ++ it is allowed to fall back to a cache clean + invalidate ++ (i.e. flush). ++ If allocations returned by OSPhyContigPagesAlloc are always ++ uncached this can be implemented as nop. ++@Input psPhysHeap the heap from which to allocate ++@Input psMemHandle the handle of the allocation to be flushed ++@Input uiOffset the offset in bytes from the start of the ++ allocation from where to start flushing ++@Input uiLength the amount to flush from the offset in bytes ++@Return PVRSRV_OK on success, a failure code otherwise. ++*****************************************************************************/ ++PVRSRV_ERROR OSPhyContigPagesClean(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 uiLength); ++ ++ ++/*************************************************************************/ /*! ++@Function OSInitEnvData ++@Description Called to initialise any environment-specific data. This ++ could include initialising the bridge calling infrastructure ++ or device memory management infrastructure. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSInitEnvData(void); ++ ++/*************************************************************************/ /*! ++@Function OSDeInitEnvData ++@Description The counterpart to OSInitEnvData(). Called to free any ++ resources which may have been allocated by OSInitEnvData(). ++@Return None. ++*/ /**************************************************************************/ ++void OSDeInitEnvData(void); ++ ++/*************************************************************************/ /*! ++@Function OSVSScanf ++@Description OS function to support the standard C vsscanf() function. ++*/ /**************************************************************************/ ++IMG_UINT32 OSVSScanf(const IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...); ++ ++/*************************************************************************/ /*! ++@Function OSStringLCat ++@Description OS function to support the BSD C strlcat() function. ++*/ /**************************************************************************/ ++size_t OSStringLCat(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDstSize); ++ ++/*************************************************************************/ /*! ++@Function OSSNPrintf ++@Description OS function to support the standard C snprintf() function. ++@Output pStr char array to print into ++@Input ui32Size maximum size of data to write (chars) ++@Input pszFormat format string ++*/ /**************************************************************************/ ++IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4); ++ ++/*************************************************************************/ /*! ++@Function OSVSNPrintf ++@Description Printf to IMG string using variable args (see stdarg.h). ++ This is necessary because the '...' notation does not ++ support nested function calls. ++@Input ui32Size maximum size of data to write (chars) ++@Input pszFormat format string ++@Input vaArgs variable args structure (from stdarg.h) ++@Output pStr char array to print into ++@Return Number of character written in buffer if successful other wise -1 on error ++*/ /**************************************************************************/ ++IMG_INT32 OSVSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR* pszFormat, va_list vaArgs) __printf(3, 0); ++ ++/*************************************************************************/ /*! ++@Function OSStringLength ++@Description OS function to support the standard C strlen() function. ++*/ /**************************************************************************/ ++size_t OSStringLength(const IMG_CHAR *pStr); ++ ++/*************************************************************************/ /*! ++@Function OSStringNLength ++@Description Return the length of a string, excluding the terminating null ++ byte ('\0'), but return at most 'uiCount' bytes. Only the first ++ 'uiCount' bytes of 'pStr' are interrogated. ++@Input pStr pointer to the string ++@Input uiCount the maximum length to return ++@Return Length of the string if less than 'uiCount' bytes, otherwise ++ 'uiCount'. ++*/ /**************************************************************************/ ++size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount); ++ ++/*************************************************************************/ /*! ++@Function OSStringNCompare ++@Description OS function to support the standard C strncmp() function. ++*/ /**************************************************************************/ ++IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2, ++ size_t uiSize); ++ ++/*************************************************************************/ /*! ++@Function OSStringToUINT32 ++@Description Changes string to IMG_UINT32. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base, ++ IMG_UINT32 *ui32Result); ++ ++/*************************************************************************/ /*! ++@Function OSStringUINT32ToStr ++@Description Changes IMG_UINT32 to string ++@Input pszBuf Buffer to write output number string ++@Input uSize Size of buffer provided, i.e. size of pszBuf ++@Input ui32Num Number to convert to string ++@Return Returns 0 if buffer is not sufficient to hold the number string, ++ else returns length of number string ++*/ /**************************************************************************/ ++IMG_UINT32 OSStringUINT32ToStr(IMG_CHAR *pszBuf, size_t uSize, IMG_UINT32 ui32Num); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectCreate ++@Description Create an event object. ++@Input pszName name to assign to the new event object. ++@Output EventObject the created event object. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, ++ IMG_HANDLE *EventObject); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectDestroy ++@Description Destroy an event object. ++@Input hEventObject the event object to destroy. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectSignal ++@Description Signal an event object. Any thread waiting on that event ++ object will be woken. ++@Input hEventObject the event object to signal. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectWait ++@Description Wait for an event object to signal. The function is passed ++ an OS event object handle (which allows the OS to have the ++ calling thread wait on the associated event object). ++ The calling thread will be rescheduled when the associated ++ event object signals. ++ If the event object has not signalled after a default timeout ++ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function ++ will return with the result code PVRSRV_ERROR_TIMEOUT. ++ ++ ++@Input hOSEventKM the OS event object handle associated with ++ the event object. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectWaitKernel ++@Description Wait for an event object to signal. The function is passed ++ an OS event object handle (which allows the OS to have the ++ calling thread wait on the associated event object). ++ The calling thread will be rescheduled when the associated ++ event object signals. ++ If the event object has not signalled after a default timeout ++ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function ++ will return with the result code PVRSRV_ERROR_TIMEOUT. ++ ++ Note: This function should be used only by kernel thread. ++ This is because all kernel threads are freezable and ++ this function allows the kernel to freeze the threads ++ when waiting. ++ ++ See OSEventObjectWait() for more details. ++ ++@Input hOSEventKM the OS event object handle associated with ++ the event object. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++#if defined(__linux__) && defined(__KERNEL__) ++PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); ++#else ++#define OSEventObjectWaitKernel OSEventObjectWaitTimeout ++#endif ++ ++/*************************************************************************/ /*! ++@Function OSSuspendTaskInterruptible ++@Description Suspend the current task into interruptible state. ++@Return none. ++*/ /**************************************************************************/ ++#if defined(__linux__) && defined(__KERNEL__) ++void OSSuspendTaskInterruptible(void); ++#endif ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectWaitTimeout ++@Description Wait for an event object to signal or timeout. The function ++ is passed an OS event object handle (which allows the OS to ++ have the calling thread wait on the associated event object). ++ The calling thread will be rescheduled when the associated ++ event object signals. ++ If the event object has not signalled after the specified ++ timeout period (passed in 'uiTimeoutus'), the function ++ will return with the result code PVRSRV_ERROR_TIMEOUT. ++@Input hOSEventKM the OS event object handle associated with ++ the event object. ++@Input uiTimeoutus the timeout period (in usecs) ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectDumpDebugInfo ++@Description Emits debug counters/stats related to the event object passed ++@Input hOSEventKM the OS event object handle associated with ++ the event object. ++@Return None. ++*/ /**************************************************************************/ ++void OSEventObjectDumpDebugInfo(IMG_HANDLE hOSEventKM); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectOpen ++@Description Open an OS handle on the specified event object. ++ This OS handle may then be used to make a thread wait for ++ that event object to signal. ++@Input hEventObject Event object handle. ++@Output phOSEvent OS handle to the returned event object. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject, ++ IMG_HANDLE *phOSEvent); ++ ++/*************************************************************************/ /*! ++@Function OSEventObjectClose ++@Description Close an OS handle previously opened for an event object. ++@Input hOSEventKM OS event object handle to close. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM); ++ ++/*************************************************************************/ /*! ++@Function OSWaitus ++@Description Implements a busy wait of the specified number of microseconds. ++ This function does NOT release thread quanta. ++@Input ui32Timeus The duration of the wait period (in us) ++@Return None. ++*/ /**************************************************************************/ ++void OSWaitus(IMG_UINT32 ui32Timeus); ++ ++/*************************************************************************/ /*! ++@Function OSSleepms ++@Description Implements a sleep of the specified number of milliseconds. ++ This function may allow pre-emption, meaning the thread ++ may potentially not be rescheduled for a longer period. ++@Input ui32Timems The duration of the sleep (in ms) ++@Return None. ++*/ /**************************************************************************/ ++void OSSleepms(IMG_UINT32 ui32Timems); ++ ++/*************************************************************************/ /*! ++@Function OSReleaseThreadQuanta ++@Description Relinquishes the current thread's execution time-slice, ++ permitting the OS scheduler to schedule another thread. ++@Return None. ++*/ /**************************************************************************/ ++void OSReleaseThreadQuanta(void); ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#define OSReadMemoryBarrier() rmb() ++#else ++/*************************************************************************/ /*! ++@Function OSReadMemoryBarrier ++@Description Insert a read memory barrier. ++ The read memory barrier guarantees that all load (read) ++ operations specified before the barrier will appear to happen ++ before all of the load operations specified after the barrier. ++*/ /**************************************************************************/ ++void OSReadMemoryBarrier(void); ++#endif ++/*************************************************************************/ /*! ++@Function OSMemoryBarrier ++@Description Insert a read/write memory barrier. ++ The read and write memory barrier guarantees that all load ++ (read) and all store (write) operations specified before the ++ barrier will appear to happen before all of the load/store ++ operations specified after the barrier. ++@Input hReadback Optional pointer to memory to read back, can be ++ useful for flushing queues in bus interconnects to RAM before ++ device (GPU) access the shared memory. ++@Return None. ++*/ /**************************************************************************/ ++void OSMemoryBarrier(volatile void *hReadback); ++/*************************************************************************/ /*! ++@Function OSWriteMemoryBarrier ++@Description Insert a write memory barrier. ++ The write memory barrier guarantees that all store operations ++ (writes) specified before the barrier will appear to happen ++ before all of the store operations specified after the barrier. ++@Input hReadback Optional pointer to memory to read back, can be ++ useful for flushing queues in bus interconnects to RAM before ++ device (GPU) access the shared memory. ++@Return None. ++*/ /**************************************************************************/ ++void OSWriteMemoryBarrier(volatile void *hReadback); ++ ++/*************************************************************************/ /*! ++*/ /**************************************************************************/ ++ ++/* The access method is dependent on the location of the physical memory that ++ * makes up the PhyHeaps defined for the system and the CPU architecture. These ++ * macros may change in future to accommodate different access requirements. ++ */ ++/*! Performs a 32 bit word read from the device memory. */ ++#define OSReadDeviceMem32(addr) (*((volatile IMG_UINT32 __force *)((void*)addr))) ++/*! Performs a 32 bit word write to the device memory. */ ++#define OSWriteDeviceMem32(addr, val) (*((volatile IMG_UINT32 __force *)((void*)addr)) = (IMG_UINT32)(val)) ++/*! Performs a 32 bit word write to the device memory and issues a write memory barrier */ ++#define OSWriteDeviceMem32WithWMB(addr, val) \ ++ do { \ ++ *((volatile IMG_UINT32 __force *)((void*)addr)) = (IMG_UINT32)(val); \ ++ OSWriteMemoryBarrier(addr); \ ++ } while (0) ++ ++#if defined(__linux__) && defined(__KERNEL__) && !defined(NO_HARDWARE) ++ ++ void OSWriteHWRegl(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); ++ void OSWriteHWRegll(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value); ++ IMG_UINT32 OSReadHWRegl(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); ++ IMG_UINT64 OSReadHWRegll(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); ++ ++ #define OSReadHWReg8(addr, off) ((IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off))) ++ #define OSReadHWReg16(addr, off) ((IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off))) ++ #define OSReadHWReg32(addr, off) OSReadHWRegl(addr, off) ++ ++ /* Little endian support only */ ++ #define OSReadHWReg64(addr, off) OSReadHWRegll(addr, off) ++ ++ #define OSWriteHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off)) ++ #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off)) ++ #define OSWriteHWReg32(addr, off, val) OSWriteHWRegl(addr, off, val) ++ /* Little endian support only */ ++ #define OSWriteHWReg64(addr, off, val) OSWriteHWRegll(addr, off, val) ++ ++ ++#elif defined(NO_HARDWARE) ++ /* OSReadHWReg operations skipped in no hardware builds */ ++ #define OSReadHWReg8(addr, off) ((void)(addr), 0x4eU) ++ #define OSReadHWReg16(addr, off) ((void)(addr), 0x3a4eU) ++ #define OSReadHWReg32(addr, off) ((void)(addr), 0x30f73a4eU) ++#if defined(__QNXNTO__) && __SIZEOF_LONG__ == 8 ++ /* This is needed for 64-bit QNX builds where the size of a long is 64 bits */ ++ #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eUL) ++#else ++ #define OSReadHWReg64(addr, off) ((void)(addr), 0x5b376c9d30f73a4eULL) ++#endif ++ ++ #define OSWriteHWReg8(addr, off, val) ++ #define OSWriteHWReg16(addr, off, val) ++ #define OSWriteHWReg32(addr, off, val) ++ #define OSWriteHWReg64(addr, off, val) ++ ++#else ++/*************************************************************************/ /*! ++@Function OSReadHWReg8 ++@Description Read from an 8-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to read from a location ++ but instead returns a constant value. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be read. ++@Return The byte read. ++*/ /**************************************************************************/ ++ IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); ++ ++/*************************************************************************/ /*! ++@Function OSReadHWReg16 ++@Description Read from a 16-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to read from a location ++ but instead returns a constant value. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be read. ++@Return The word read. ++*/ /**************************************************************************/ ++ IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); ++ ++/*************************************************************************/ /*! ++@Function OSReadHWReg32 ++@Description Read from a 32-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to read from a location ++ but instead returns a constant value. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be read. ++@Return The long word read. ++*/ /**************************************************************************/ ++ IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); ++ ++/*************************************************************************/ /*! ++@Function OSReadHWReg64 ++@Description Read from a 64-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to read from a location ++ but instead returns a constant value. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be read. ++@Return The long long word read. ++*/ /**************************************************************************/ ++ IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset); ++ ++/*************************************************************************/ /*! ++@Function OSWriteHWReg8 ++@Description Write to an 8-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to write to a location. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be written to. ++@Input ui8Value The byte to be written to the register. ++@Return None. ++*/ /**************************************************************************/ ++ void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value); ++ ++/*************************************************************************/ /*! ++@Function OSWriteHWReg16 ++@Description Write to a 16-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to write to a location. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be written to. ++@Input ui16Value The word to be written to the register. ++@Return None. ++*/ /**************************************************************************/ ++ void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value); ++ ++/*************************************************************************/ /*! ++@Function OSWriteHWReg32 ++@Description Write to a 32-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to write to a location. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be written to. ++@Input ui32Value The long word to be written to the register. ++@Return None. ++*/ /**************************************************************************/ ++ void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value); ++ ++/*************************************************************************/ /*! ++@Function OSWriteHWReg64 ++@Description Write to a 64-bit memory-mapped device register. ++ The implementation should not permit the compiler to ++ reorder the I/O sequence. ++ The implementation should ensure that for a NO_HARDWARE ++ build the code does not attempt to write to a location. ++@Input pvLinRegBaseAddr The virtual base address of the register ++ block. ++@Input ui32Offset The byte offset from the base address of ++ the register to be written to. ++@Input ui64Value The long long word to be written to the ++ register. ++@Return None. ++*/ /**************************************************************************/ ++ void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value); ++#endif ++ ++/*************************************************************************/ /*! ++@Description Pointer to a timer callback function. ++@Input pvData Pointer to timer specific data. ++*/ /**************************************************************************/ ++typedef void (*PFN_TIMER_FUNC)(void* pvData); ++ ++/*************************************************************************/ /*! ++@Function OSAddTimer ++@Description OS specific function to install a timer callback. The ++ timer will then need to be enabled, as it is disabled by ++ default. ++ When enabled, the callback will be invoked once the specified ++ timeout has elapsed. ++@Input pfnTimerFunc Timer callback ++@Input *pvData Callback data ++@Input ui32MsTimeout Callback period ++@Return Valid handle on success, NULL if a failure ++*/ /**************************************************************************/ ++IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout); ++ ++/*************************************************************************/ /*! ++@Function OSRemoveTimer ++@Description Removes the specified timer. The handle becomes invalid and ++ should no longer be used. ++@Input hTimer handle of the timer to be removed ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer); ++ ++/*************************************************************************/ /*! ++@Function OSEnableTimer ++@Description Enable the specified timer. after enabling, the timer will ++ invoke the associated callback at an interval determined by ++ the configured timeout period until disabled. ++@Input hTimer handle of the timer to be enabled ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer); ++ ++/*************************************************************************/ /*! ++@Function OSDisableTimer ++@Description Disable the specified timer ++@Input hTimer handle of the timer to be disabled ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer); ++ ++ ++/*************************************************************************/ /*! ++ @Function OSPanic ++ @Description Take action in response to an unrecoverable driver error ++ @Return None ++*/ /**************************************************************************/ ++void OSPanic(void); ++ ++/*************************************************************************/ /*! ++@Function OSCopyToUser ++@Description Copy data to user-addressable memory from kernel-addressable ++ memory. ++ Note that pvDest may be an invalid address or NULL and the ++ function should return an error in this case. ++ For operating systems that do not have a user/kernel space ++ distinction, this function should be implemented as a stub ++ which simply returns PVRSRV_ERROR_NOT_SUPPORTED. ++@Input pvProcess handle of the connection ++@Input pvDest pointer to the destination User memory ++@Input pvSrc pointer to the source Kernel memory ++@Input ui32Bytes size of the data to be copied ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes); ++ ++/*************************************************************************/ /*! ++@Function OSCopyFromUser ++@Description Copy data from user-addressable memory to kernel-addressable ++ memory. ++ Note that pvSrc may be an invalid address or NULL and the ++ function should return an error in this case. ++ For operating systems that do not have a user/kernel space ++ distinction, this function should be implemented as a stub ++ which simply returns PVRSRV_ERROR_NOT_SUPPORTED. ++@Input pvProcess handle of the connection ++@Input pvDest pointer to the destination Kernel memory ++@Input pvSrc pointer to the source User memory ++@Input ui32Bytes size of the data to be copied ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes); ++ ++#if defined(__linux__) || defined(INTEGRITY_OS) ++#define OSBridgeCopyFromUser OSCopyFromUser ++#define OSBridgeCopyToUser OSCopyToUser ++#else ++/*************************************************************************/ /*! ++@Function OSBridgeCopyFromUser ++@Description Copy data from user-addressable memory into kernel-addressable ++ memory as part of a bridge call operation. ++ For operating systems that do not have a user/kernel space ++ distinction, this function will require whatever implementation ++ is needed to pass data for making the bridge function call. ++ For operating systems which do have a user/kernel space ++ distinction (such as Linux) this function may be defined so ++ as to equate to a call to OSCopyFromUser(). ++@Input pvProcess handle of the connection ++@Input pvDest pointer to the destination Kernel memory ++@Input pvSrc pointer to the source User memory ++@Input ui32Bytes size of the data to be copied ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess, ++ void *pvDest, ++ const void *pvSrc, ++ size_t ui32Bytes); ++ ++/*************************************************************************/ /*! ++@Function OSBridgeCopyToUser ++@Description Copy data to user-addressable memory from kernel-addressable ++ memory as part of a bridge call operation. ++ For operating systems that do not have a user/kernel space ++ distinction, this function will require whatever implementation ++ is needed to pass data for making the bridge function call. ++ For operating systems which do have a user/kernel space ++ distinction (such as Linux) this function may be defined so ++ as to equate to a call to OSCopyToUser(). ++@Input pvProcess handle of the connection ++@Input pvDest pointer to the destination User memory ++@Input pvSrc pointer to the source Kernel memory ++@Input ui32Bytes size of the data to be copied ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess, ++ void *pvDest, ++ const void *pvSrc, ++ size_t ui32Bytes); ++#endif ++ ++/* To be increased if required in future */ ++#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x2000 /*!< Size of the memory block used to hold data passed in to a bridge call */ ++#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 /*!< Size of the memory block used to hold data returned from a bridge call */ ++ ++/*************************************************************************/ /*! ++@Function OSPlatformBridgeInit ++@Description Called during device creation to allow the OS port to register ++ other bridge modules and related resources that it requires. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPlatformBridgeInit(void); ++ ++/*************************************************************************/ /*! ++@Function OSPlatformBridgeDeInit ++@Description Called during device destruction to allow the OS port to ++ deregister its OS specific bridges and clean up other ++ related resources. ++*/ /**************************************************************************/ ++void OSPlatformBridgeDeInit(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVToNativeError ++@Description Returns the OS-specific equivalent error number/code for ++ the specified PVRSRV_ERROR value. ++ If there is no equivalent, or the PVRSRV_ERROR value is ++ PVRSRV_OK (no error), 0 is returned. ++@Return The OS equivalent error code. ++*/ /**************************************************************************/ ++int PVRSRVToNativeError(PVRSRV_ERROR e); ++/** See PVRSRVToNativeError(). */ ++#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) ) ++ ++ ++#if defined(__linux__) && defined(__KERNEL__) ++ ++/* Provide LockDep friendly definitions for Services RW locks */ ++#include ++#include ++#include "allocmem.h" ++ ++#define OSWRLockCreate(ppsLock) ({ \ ++ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \ ++ if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \ ++ e;}) ++#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;}) ++ ++#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;}) ++#define OSWRLockAcquireReadNested(psLock, subclass) ({down_read_nested((psLock), (subclass)); PVRSRV_OK;}) ++#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;}) ++#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;}) ++#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;}) ++ ++#elif defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) ++/* User-mode unit tests use these definitions on Linux */ ++ ++PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock); ++void OSWRLockDestroy(POSWR_LOCK psLock); ++void OSWRLockAcquireRead(POSWR_LOCK psLock); ++#define OSWRLockAcquireReadNested(psLock, subclass) OSWRLockAcquireRead((psLock)) ++void OSWRLockReleaseRead(POSWR_LOCK psLock); ++void OSWRLockAcquireWrite(POSWR_LOCK psLock); ++void OSWRLockReleaseWrite(POSWR_LOCK psLock); ++ ++#else ++ ++/*! Function not implemented definition. */ ++#define OSFUNC_NOT_IMPLEMENTED 0 ++/*! Assert used for OSFUNC_NOT_IMPLEMENTED. */ ++#define OSFUNC_NOT_IMPLEMENTED_ASSERT() PVR_ASSERT(OSFUNC_NOT_IMPLEMENTED) ++ ++/*************************************************************************/ /*! ++@Function OSWRLockCreate ++@Description Create a writer/reader lock. ++ This type of lock allows multiple concurrent readers but ++ only a single writer, allowing for optimized performance. ++@Output ppsLock A handle to the created WR lock. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock) ++{ ++ PVR_UNREFERENCED_PARAMETER(ppsLock); ++ ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++ ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++} ++ ++/*************************************************************************/ /*! ++@Function OSWRLockDestroy ++@Description Destroys a writer/reader lock. ++@Input psLock The handle of the WR lock to be destroyed. ++@Return None. ++*/ /**************************************************************************/ ++static INLINE void OSWRLockDestroy(POSWR_LOCK psLock) ++{ ++ PVR_UNREFERENCED_PARAMETER(psLock); ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++} ++ ++/*************************************************************************/ /*! ++@Function OSWRLockAcquireRead ++@Description Acquire a writer/reader read lock. ++ If the write lock is already acquired, the caller will ++ block until it is released. ++@Input psLock The handle of the WR lock to be acquired for ++ reading. ++@Return None. ++*/ /**************************************************************************/ ++static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock) ++{ ++ PVR_UNREFERENCED_PARAMETER(psLock); ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++} ++ ++/*************************************************************************/ /*! ++@Function OSWRLockAcquireReadNested ++@Description Acquire a nested writer/reader read lock. ++ If the write lock is already acquired, the caller will ++ block until it is released. ++ For operating systems other than Linux, this equates to an ++ OSWRLockAcquireRead() call. On Linux, this function wraps a call ++ to down_read_nested(). This recognises the scenario where ++ there may be multiple subclasses within a particular class ++ of lock. In such cases, the order in which the locks belonging ++ these various subclasses are acquired is important and must be ++ validated. ++@Input psLock The handle of the WR lock to be acquired for ++ reading. ++@Input iSubclass The subclass of the lock. ++@Return None. ++*/ /**************************************************************************/ ++static INLINE void OSWRLockAcquireReadNested(POSWR_LOCK psLock, IMG_INT iSubclass) ++{ ++ PVR_UNREFERENCED_PARAMETER(psLock); ++ PVR_UNREFERENCED_PARAMETER(iSubclass); ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++} ++ ++/*************************************************************************/ /*! ++@Function OSWRLockReleaseRead ++@Description Release a writer/reader read lock. ++@Input psLock The handle of the WR lock whose read lock is to ++ be released. ++@Return None. ++*/ /**************************************************************************/ ++static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock) ++{ ++ PVR_UNREFERENCED_PARAMETER(psLock); ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++} ++ ++/*************************************************************************/ /*! ++@Function OSWRLockAcquireWrite ++@Description Acquire a writer/reader write lock. ++ If the write lock or any read lock are already acquired, ++ the caller will block until all are released. ++@Input psLock The handle of the WR lock to be acquired for ++ writing. ++@Return None. ++*/ /**************************************************************************/ ++static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock) ++{ ++ PVR_UNREFERENCED_PARAMETER(psLock); ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++} ++ ++/*************************************************************************/ /*! ++@Function OSWRLockReleaseWrite ++@Description Release a writer/reader write lock. ++@Input psLock The handle of the WR lock whose write lock is to ++ be released. ++@Return None ++*/ /**************************************************************************/ ++static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock) ++{ ++ PVR_UNREFERENCED_PARAMETER(psLock); ++ OSFUNC_NOT_IMPLEMENTED_ASSERT(); ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function OSDivide64r64 ++@Description Divide a 64-bit value by a 32-bit value. Return the 64-bit ++ quotient. ++ The remainder is also returned in 'pui32Remainder'. ++@Input ui64Divident The number to be divided. ++@Input ui32Divisor The 32-bit value 'ui64Divident' is to ++ be divided by. ++@Output pui32Remainder The remainder of the division. ++@Return The 64-bit quotient (result of the division). ++*/ /**************************************************************************/ ++IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); ++ ++/*************************************************************************/ /*! ++@Function OSDivide64 ++@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit ++ quotient. ++ The remainder is also returned in 'pui32Remainder'. ++ This function allows for a more optional implementation ++ of a 64-bit division when the result is known to be ++ representable in 32-bits. ++@Input ui64Divident The number to be divided. ++@Input ui32Divisor The 32-bit value 'ui64Divident' is to ++ be divided by. ++@Output pui32Remainder The remainder of the division. ++@Return The 32-bit quotient (result of the division). ++*/ /**************************************************************************/ ++IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder); ++ ++/*************************************************************************/ /*! ++@Function OSDumpStack ++@Description Dump the current task information and its stack trace. ++@Return None ++*/ /**************************************************************************/ ++void OSDumpStack(void); ++ ++/*************************************************************************/ /*! ++@Function OSUserModeAccessToPerfCountersEn ++@Description Permit User-mode access to CPU performance counter ++ registers. ++ This function is called during device initialisation. ++ Certain CPU architectures may need to explicitly permit ++ User mode access to performance counters - if this is ++ required, the necessary code should be implemented inside ++ this function. ++@Return None. ++*/ /**************************************************************************/ ++void OSUserModeAccessToPerfCountersEn(void); ++ ++/*************************************************************************/ /*! ++@Function OSDebugSignalPID ++@Description Sends a SIGTRAP signal to a specific PID in user mode for ++ debugging purposes. The user mode process can register a handler ++ against this signal. ++ This is necessary to support the Rogue debugger. If the Rogue ++ debugger is not used then this function may be implemented as ++ a stub. ++@Input ui32PID The PID for the signal. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID); ++ ++#if defined(__linux__) && defined(__KERNEL__) && !defined(DOXYGEN) ++#define OSWarnOn(a) WARN_ON(a) ++#else ++/*************************************************************************/ /*! ++@Function OSWarnOn ++@Description This API allows the driver to emit a special token and stack ++ dump to the server log when an issue is detected that needs the ++ OS to be notified. The token or call may be used to trigger ++ log collection by the OS environment. ++ PVR_DPF log messages will have been emitted prior to this call. ++@Input a Expression to evaluate, if true trigger Warn signal ++@Return None ++*/ /**************************************************************************/ ++#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while (0) ++#endif ++ ++/*************************************************************************/ /*! ++@Function OSIsKernelThread ++@Description This API determines if the current running thread is a kernel ++ thread (i.e. one not associated with any userland process, ++ typically an MISR handler.) ++@Return IMG_TRUE if it is a kernel thread, otherwise IMG_FALSE. ++*/ /**************************************************************************/ ++IMG_BOOL OSIsKernelThread(void); ++ ++/*************************************************************************/ /*! ++@Function OSThreadDumpInfo ++@Description Traverse the thread list and call each of the stored ++ callbacks to dump the info in debug_dump. ++@Input pfnDumpDebugPrintf The 'printf' function to be called to ++ display the debug info ++@Input pvDumpDebugFile Optional file identifier to be passed to ++ the 'printf' function if required ++*/ /**************************************************************************/ ++void OSThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++/*************************************************************************/ /*! ++@Function OSDumpVersionInfo ++@Description Store OS version information in debug dump. ++@Input pfnDumpDebugPrintf The 'printf' function to be called to ++ display the debug info ++@Input pvDumpDebugFile Optional file identifier to be passed to ++ the 'printf' function if required ++*/ /**************************************************************************/ ++void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++/*************************************************************************/ /*! ++@Function OSIsWriteCombineUnalignedSafe ++@Description Determine if unaligned accesses to write-combine memory are ++ safe to perform, i.e. whether we are safe from a CPU fault ++ occurring. This test is specifically aimed at ARM64 platforms ++ which cannot provide this guarantee if the memory is 'device' ++ memory rather than 'normal' under the ARM memory architecture. ++@Return IMG_TRUE if safe, IMG_FALSE otherwise. ++*/ /**************************************************************************/ ++IMG_BOOL OSIsWriteCombineUnalignedSafe(void); ++ ++/*************************************************************************/ /*! ++@Function OSDebugLevel ++@Description Returns current value of the debug level. ++@Return Debug level. ++*/ /**************************************************************************/ ++IMG_UINT32 OSDebugLevel(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVSetDebugLevel ++@Description Sets the current value of the debug level to ui32DebugLevel. ++@Input ui32DebugLevel New debug level value. ++*/ /**************************************************************************/ ++void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVIsDebugLevel ++@Description Tests if a given debug level is enabled. ++@Input ui32DebugLevel IMG_TRUE if debug level is enabled ++ and IMG_FALSE otherwise. ++*/ /**************************************************************************/ ++IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel); ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ ++typedef void (*PFN_SERVER_CLEANUP)(void *pvData, IMG_BOOL bAdvanceTimeline); ++ ++#define DMA_COMPLETION_TIMEOUT_MS 60000 ++#define DMA_ERROR_SYNC_RETRIES 100 ++ ++PVRSRV_ERROR OSDmaPrepareTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *psChan, ++ IMG_DMA_ADDR* psDmaAddr, IMG_UINT64* puiAddress, ++ IMG_UINT64 uiSize, IMG_BOOL bMemToDev, ++ IMG_HANDLE pvOSData, ++ IMG_HANDLE pvServerCleanupParam,PFN_SERVER_CLEANUP pfnServerCleanup, IMG_BOOL bFirst); ++ ++PVRSRV_ERROR OSDmaPrepareTransferSparse(PVRSRV_DEVICE_NODE *psDevNode, IMG_HANDLE pvChan, ++ IMG_DMA_ADDR* psDmaAddr, IMG_BOOL *pbValid, ++ IMG_UINT64* puiAddress, IMG_UINT64 uiSize, ++ IMG_UINT32 uiOffsetInPage, ++ IMG_UINT32 ui32SizeInPages, ++ IMG_BOOL bMemToDev, ++ IMG_HANDLE pvOSData, ++ IMG_HANDLE pvServerCleanupParam, PFN_SERVER_CLEANUP pfnServerCleanup, ++ IMG_BOOL bFirst); ++ ++PVRSRV_ERROR OSDmaAllocData(PVRSRV_DEVICE_NODE *psDevNode,IMG_UINT32 uiNumDMA, void **pvAllocedData); ++PVRSRV_ERROR OSDmaSubmitTransfer(PVRSRV_DEVICE_NODE *psDevNode, void *pvOSData, void *psChan, IMG_BOOL bSynchronous); ++void OSDmaForceCleanup(PVRSRV_DEVICE_NODE *psDevNode, void *pvChan, ++ void *pvOSData, IMG_HANDLE pvServerCleanupParam, ++ PFN_SERVER_CLEANUP pfnServerCleanup); ++#endif ++#endif /* OSFUNC_H */ ++ ++/****************************************************************************** ++ End of file (osfunc.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/osfunc_arm.c b/drivers/gpu/drm/img-rogue/osfunc_arm.c +new file mode 100644 +index 000000000000..7d52c1ef50a1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc_arm.c +@@ -0,0 +1,151 @@ ++/*************************************************************************/ /*! ++@File ++@Title arm specific OS functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Processor specific OS functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include ++#include ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) ++ #include ++#endif ++#include ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++ ++ ++static inline size_t pvr_dmac_range_len(const void *pvStart, const void *pvEnd) ++{ ++ return (size_t)((char *)pvEnd - (char *)pvStart); ++} ++ ++void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ++ arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); ++ arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ ++ /* Inner cache */ ++ dmac_flush_range(pvVirtStart, pvVirtEnd); ++ ++ /* Outer cache */ ++ outer_flush_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ ++} ++ ++void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ++ arm_dma_ops.sync_single_for_device(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE); ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ ++ /* Inner cache */ ++ dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_TO_DEVICE); ++ ++ /* Outer cache */ ++ outer_clean_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ ++} ++ ++void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ++ arm_dma_ops.sync_single_for_cpu(psDevNode->psDevConfig->pvOSDevice, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE); ++#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ ++ /* Inner cache */ ++ dmac_map_area(pvVirtStart, pvr_dmac_range_len(pvVirtStart, pvVirtEnd), DMA_FROM_DEVICE); ++ ++ /* Outer cache */ ++ outer_inv_range(sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr); ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) */ ++} ++ ++OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)) ++ return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; ++#else ++ return OS_CACHE_OP_ADDR_TYPE_BOTH; ++#endif ++} ++ ++/* User Enable Register */ ++#define PMUSERENR_EN 0x00000001 /* enable user access to the counters */ ++ ++static void per_cpu_perf_counter_user_access_en(void *data) ++{ ++ PVR_UNREFERENCED_PARAMETER(data); ++ /* Enable user-mode access to counters. */ ++ asm volatile("mcr p15, 0, %0, c9, c14, 0" :: "r"(PMUSERENR_EN)); ++} ++ ++void OSUserModeAccessToPerfCountersEn(void) ++{ ++ on_each_cpu(per_cpu_perf_counter_user_access_en, NULL, 1); ++} ++ ++IMG_BOOL OSIsWriteCombineUnalignedSafe(void) ++{ ++ /* ++ * The kernel looks to have always used normal memory under ARM32. ++ * See osfunc_arm64.c implementation for more details. ++ */ ++ return IMG_TRUE; ++} +diff --git a/drivers/gpu/drm/img-rogue/osfunc_arm64.c b/drivers/gpu/drm/img-rogue/osfunc_arm64.c +new file mode 100644 +index 000000000000..68d1285b00be +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc_arm64.c +@@ -0,0 +1,290 @@ ++/*************************************************************************/ /*! ++@File ++@Title arm64 specific OS functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Processor specific OS functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++ ++#include "kernel_compatibility.h" ++ ++#if defined(CONFIG_OUTER_CACHE) ++ /* If you encounter a 64-bit ARM system with an outer cache, you'll need ++ * to add the necessary code to manage that cache. See osfunc_arm.c ++ * for an example of how to do so. ++ */ ++ #error "CONFIG_OUTER_CACHE not supported on arm64." ++#endif ++ ++static inline void begin_user_mode_access(void) ++{ ++#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) ++ uaccess_enable_privileged(); ++#endif ++} ++ ++static inline void end_user_mode_access(void) ++{ ++#if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_SW_TTBR0_PAN) ++ uaccess_disable_privileged(); ++#endif ++} ++ ++static inline void FlushRange(void *pvRangeAddrStart, ++ void *pvRangeAddrEnd, ++ PVRSRV_CACHE_OP eCacheOp) ++{ ++ IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); ++ IMG_BYTE *pbStart = pvRangeAddrStart; ++ IMG_BYTE *pbEnd = pvRangeAddrEnd; ++ IMG_BYTE *pbBase; ++ ++ /* ++ On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache ++ maintenance is performed on a memory location using a VA, the effect of ++ that cache maintenance is visible to all VA aliases of the physical memory ++ location. So here it's quicker to issue the machine cache maintenance ++ instruction directly without going via the Linux kernel DMA framework as ++ this is sufficient to maintain the CPU d-caches on arm64. ++ */ ++ ++ begin_user_mode_access(); ++ ++ pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize); ++ for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) ++ { ++ switch (eCacheOp) ++ { ++ case PVRSRV_CACHE_OP_CLEAN: ++ asm volatile ("dc cvac, %0" :: "r" (pbBase)); ++ break; ++ ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ asm volatile ("dc ivac, %0" :: "r" (pbBase)); ++ break; ++ ++ case PVRSRV_CACHE_OP_FLUSH: ++ asm volatile ("dc civac, %0" :: "r" (pbBase)); ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Cache maintenance operation type %d is invalid", ++ __func__, eCacheOp)); ++ break; ++ } ++ } ++ ++ end_user_mode_access(); ++} ++ ++void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ struct device *dev; ++ ++ if (pvVirtStart) ++ { ++ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); ++ return; ++ } ++ ++ dev = psDevNode->psDevConfig->pvOSDevice; ++ ++ if (dev) ++ { ++ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_TO_DEVICE); ++ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_FROM_DEVICE); ++ } ++ else ++ { ++ /* ++ * Allocations done prior to obtaining device pointer may ++ * affect in cache operations being scheduled. ++ * ++ * Ignore operations with null device pointer. ++ * This prevents crashes on newer kernels that don't return dummy ops ++ * when null pointer is passed to get_dma_ops. ++ * ++ */ ++ ++ /* Don't spam on nohw */ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); ++#endif ++ } ++ ++} ++ ++void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ struct device *dev; ++ ++ if (pvVirtStart) ++ { ++ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN); ++ return; ++ } ++ ++ dev = psDevNode->psDevConfig->pvOSDevice; ++ ++ if (dev) ++ { ++ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_TO_DEVICE); ++ } ++ else ++ { ++ /* ++ * Allocations done prior to obtaining device pointer may ++ * affect in cache operations being scheduled. ++ * ++ * Ignore operations with null device pointer. ++ * This prevents crashes on newer kernels that don't return dummy ops ++ * when null pointer is passed to get_dma_ops. ++ * ++ */ ++ ++ ++ /* Don't spam on nohw */ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); ++#endif ++ } ++ ++} ++ ++void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ struct device *dev; ++ ++ if (pvVirtStart) ++ { ++ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE); ++ return; ++ } ++ ++ dev = psDevNode->psDevConfig->pvOSDevice; ++ ++ if (dev) ++ { ++ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_FROM_DEVICE); ++ } ++ else ++ { ++ /* ++ * Allocations done prior to obtaining device pointer may ++ * affect in cache operations being scheduled. ++ * ++ * Ignore operations with null device pointer. ++ * This prevents crashes on newer kernels that don't return dummy ops ++ * when null pointer is passed to get_dma_ops. ++ * ++ */ ++ ++ /* Don't spam on nohw */ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); ++#endif ++ } ++} ++ ++ ++OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) ++{ ++ return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; ++} ++ ++void OSUserModeAccessToPerfCountersEn(void) ++{ ++} ++ ++IMG_BOOL OSIsWriteCombineUnalignedSafe(void) ++{ ++ /* ++ * Under ARM64 there is the concept of 'device' [0] and 'normal' [1] memory. ++ * Unaligned access on device memory is explicitly disallowed [2]: ++ * ++ * 'Further, unaligned accesses are only allowed to regions marked as Normal ++ * memory type. ++ * ... ++ * Attempts to perform unaligned accesses when not allowed will cause an ++ * alignment fault (data abort).' ++ * ++ * Write-combine on ARM64 can be implemented as either normal non-cached ++ * memory (NORMAL_NC) or as device memory with gathering enabled ++ * (DEVICE_GRE.) Kernel 3.13 changed this from the latter to the former. ++ * ++ * [0]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/CHDBDIDF.html ++ * [1]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.den0024a/ch13s01s01.html ++ * [2]:http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html ++ */ ++ ++ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); ++ ++ return (pgprot_val(pgprot) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC); ++} +diff --git a/drivers/gpu/drm/img-rogue/osfunc_common.h b/drivers/gpu/drm/img-rogue/osfunc_common.h +new file mode 100644 +index 000000000000..539ef2c042d1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc_common.h +@@ -0,0 +1,300 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS functions header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS specific API definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef OSFUNC_COMMON_H ++/*! @cond Doxygen_Suppress */ ++#define OSFUNC_COMMON_H ++/*! @endcond */ ++ ++#if defined(__KERNEL__) && defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "img_types.h" ++ ++#ifdef __cplusplus ++extern "C" ++{ ++#endif ++ ++/**************************************************************************/ /*! ++@Function DeviceMemSet ++@Description Set memory, whose mapping may be uncached, to a given value. ++ Safe implementation for all architectures for uncached mapping, ++ optimised for speed where supported by tool chains. ++ In such cases, OSDeviceMemSet() is defined as a call to this ++ function. ++@Input pvDest void pointer to the memory to be set ++@Input ui8Value byte containing the value to be set ++@Input ui32Size the number of bytes to be set to the given value ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function DeviceMemCopy ++@Description Copy values from one area of memory. Safe implementation for ++ all architectures for uncached mapping, of either the source ++ or destination, optimised for speed where supported by tool ++ chains. In such cases, OSDeviceMemCopy() is defined as a call ++ to this function. ++@Input pvDst void pointer to the destination memory ++@Input pvSrc void pointer to the source memory ++@Input ui32Size the number of bytes to be copied ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function DeviceMemSetBytes ++@Description Potentially very slow (but safe) memset fallback for non-GNU C ++ compilers for arm64/aarch64 ++@Input pvDest void pointer to the memory to be set ++@Input ui8Value byte containing the value to be set ++@Input ui32Size the number of bytes to be set to the given value ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemSetBytes(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function DeviceMemCopyBytes ++@Description Potentially very slow (but safe) memcpy fallback for non-GNU C ++ compilers for arm64/aarch64 ++@Input pvDst void pointer to the destination memory ++@Input pvSrc void pointer to the source memory ++@Input ui32Size the number of bytes to be copied ++@Return None ++ */ /**************************************************************************/ ++void DeviceMemCopyBytes(void *pvDst, const void *pvSrc, size_t ui32Size); ++ ++/**************************************************************************/ /*! ++@Function StringLCopy ++@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. ++ If no null byte ('\0') is contained within the first uDataSize-1 ++ characters of the source string, the destination string will be ++ truncated. If the length of the source string is less than uDataSize ++ an additional NUL byte will be copied to the destination string ++ to ensure that the string is NUL-terminated. ++@Input pszDest char pointer to the destination string ++@Input pszSrc const char pointer to the source string ++@Input uDataSize the maximum number of bytes to be copied ++@Return Size of the source string ++ */ /**************************************************************************/ ++size_t StringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uDataSize); ++ ++#if defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) ++#if defined(__GNUC__) ++/* Workarounds for assumptions made that memory will not be mapped uncached ++ * in kernel or user address spaces on arm64 platforms (or other testing). ++ */ ++ ++#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c)) ++#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c)) ++ ++#else /* defined __GNUC__ */ ++ ++#define OSDeviceMemSet(a,b,c) DeviceMemSetBytes((a), (b), (c)) ++#define OSDeviceMemCopy(a,b,c) DeviceMemCopyBytes((a), (b), (c)) ++ ++#endif /* defined __GNUC__ */ ++ ++#else /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ ++ ++/* Everything else */ ++ ++/**************************************************************************/ /*! ++@Function OSDeviceMemSet ++@Description Set memory, whose mapping may be uncached, to a given value. ++ On some architectures, additional processing may be needed ++ if the mapping is uncached. ++@Input a void pointer to the memory to be set ++@Input b byte containing the value to be set ++@Input c the number of bytes to be set to the given value ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSDeviceMemSet(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ (void) memset((a), (b), (c)); \ ++ (void) *(volatile IMG_UINT32*)((void*)(a)); \ ++ } \ ++ } while (false) ++ ++/**************************************************************************/ /*! ++@Function OSDeviceMemCopy ++@Description Copy values from one area of memory, to another, when one ++ or both mappings may be uncached. ++ On some architectures, additional processing may be needed ++ if mappings are uncached. ++@Input a void pointer to the destination memory ++@Input b void pointer to the source memory ++@Input c the number of bytes to be copied ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSDeviceMemCopy(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ memcpy((a), (b), (c)); \ ++ (void) *(volatile IMG_UINT32*)((void*)(a)); \ ++ } \ ++ } while (false) ++ ++#endif /* (defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */ ++ ++/**************************************************************************/ /*! ++@Function OSCachedMemSet ++@Description Set memory, where the mapping is known to be cached, to a ++ given value. This function exists to allow an optimal memset ++ to be performed when memory is known to be cached. ++@Input a void pointer to the memory to be set ++@Input b byte containing the value to be set ++@Input c the number of bytes to be set to the given value ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSCachedMemSet(a,b,c) (void) memset((a), (b), (c)) ++ ++/**************************************************************************/ /*! ++@Function OSCachedMemCopy ++@Description Copy values from one area of memory, to another, when both ++ mappings are known to be cached. ++ This function exists to allow an optimal memcpy to be ++ performed when memory is known to be cached. ++@Input a void pointer to the destination memory ++@Input b void pointer to the source memory ++@Input c the number of bytes to be copied ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c)) ++ ++#if defined(__KERNEL__) ++ ++/**************************************************************************/ /*! ++@Function OSCachedMemSetWMB ++@Description Set memory, where the mapping is known to be cached or ++ write-combine, to a given value and issue a write memory barrier ++ after. This ++ function exists to allow an optimal memset to be performed when ++ memory is known to be cached or write-combine. ++@Input a void pointer to the memory to be set ++@Input b byte containing the value to be set ++@Input c the number of bytes to be set to the given value ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#if !defined(SERVICES_SC) ++#define OSCachedMemSetWMB(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ (void) memset((a), (b), (c)); \ ++ OSWriteMemoryBarrier(a); \ ++ } \ ++ } while (false) ++#else ++#define OSCachedMemSetWMB(a,b,c) \ ++ do { \ ++ (void) memset((a), (b), (c)); \ ++ OSWriteMemoryBarrier(); \ ++ } while (false) ++#endif /* !defined(SERVICES_SC) */ ++/**************************************************************************/ /*! ++@Function OSCachedMemCopy ++@Description Copy values from one area of memory, to another, when both ++ mappings are known to be cached or write-combine and issue ++ a write memory barrier after. ++ This function exists to allow an optimal memcpy to be ++ performed when memory is known to be cached or write-combine. ++@Input a void pointer to the destination memory ++@Input b void pointer to the source memory ++@Input c the number of bytes to be copied ++@Return Pointer to the destination memory. ++ */ /**************************************************************************/ ++#if !defined(SERVICES_SC) ++#define OSCachedMemCopyWMB(a,b,c) \ ++ do { \ ++ if ((c) != 0) \ ++ { \ ++ (void) memcpy((a), (b), (c)); \ ++ OSWriteMemoryBarrier(a); \ ++ } \ ++ } while (false) ++#else ++#define OSCachedMemCopyWMB(a,b,c) \ ++ do { \ ++ (void) memcpy((a), (b), (c)); \ ++ OSWriteMemoryBarrier(); \ ++ } while (false) ++#endif /* !defined(SERVICES_SC) */ ++#endif /* defined(__KERNEL__) */ ++ ++/**************************************************************************/ /*! ++@Function OSStringLCopy ++@Description Copy at most uDataSize-1 bytes from pszSrc to pszDest. ++ If no null byte ('\0') is contained within the first uDataSize-1 ++ characters of the source string, the destination string will be ++ truncated. If the length of the source string is less than uDataSize ++ an additional NUL byte will be copied to the destination string ++ to ensure that the string is NUL-terminated. ++@Input a char pointer to the destination string ++@Input b const char pointer to the source string ++@Input c the maximum number of bytes to be copied ++@Return Size of the source string ++ */ /**************************************************************************/ ++#if defined(__QNXNTO__) || (defined(__linux__) && defined(__KERNEL__) && !defined(DEBUG)) ++#define OSStringLCopy(a,b,c) strlcpy((a), (b), (c)) ++#else /* defined(__QNXNTO__) ... */ ++#define OSStringLCopy(a,b,c) StringLCopy((a), (b), (c)) ++#endif /* defined(__QNXNTO__) ... */ ++ ++#ifdef __cplusplus ++} ++#endif ++ ++#endif /* OSFUNC_COMMON_H */ ++ ++/****************************************************************************** ++ End of file (osfunc_common.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/osfunc_riscv.c b/drivers/gpu/drm/img-rogue/osfunc_riscv.c +new file mode 100644 +index 000000000000..81facc6a0cda +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc_riscv.c +@@ -0,0 +1,428 @@ ++/*************************************************************************/ /*! ++@File ++@Title RISC-V specific OS functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Processor specific OS functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++//#include ++#include ++#include ++#include "img_defs.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++#include "cache_ops.h" ++ ++#if 0 ++#define __enable_user_access() \ ++ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory") ++#define __disable_user_access() \ ++ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory") ++#endif ++ ++ //asm volatile (".long 0x0275000b"); /* dcache.civa a0 */ ++ //asm volatile (".long 0x0255000b"); /* dcache.cva a0 */ ++#define sync_is() asm volatile (".long 0x01b0000b") ++static void riscv_dma_wbinv_range(unsigned long start, unsigned long end) ++{ ++//#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) ++//#endif ++ register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1); ++ ++ for (; i < end; i += L1_CACHE_BYTES) ++ asm volatile (".long 0x02b5000b"); /* dcache.civa a0 */ ++ ++ sync_is(); ++ ++//#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) ++//#endif ++} ++ ++static void riscv_dma_wb_range(unsigned long start, unsigned long end) ++{ ++ register unsigned long i asm("a0") = start & ~(L1_CACHE_BYTES - 1); ++ ++ for (; i < end; i += L1_CACHE_BYTES) ++ asm volatile (".long 0x0295000b"); /* dcache.cva a0 */ ++ ++ sync_is(); ++} ++ ++static inline void begin_user_mode_access(void) ++{ ++} ++ ++static inline void end_user_mode_access(void) ++{ ++} ++ ++static inline void FlushRange(void *pvRangeAddrStart, ++ void *pvRangeAddrEnd, ++ PVRSRV_CACHE_OP eCacheOp) ++{ ++// IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); ++ IMG_BYTE *pbStart = pvRangeAddrStart; ++ IMG_BYTE *pbEnd = pvRangeAddrEnd; ++// IMG_BYTE *pbBase; ++ ++ //PVR_DPF((PVR_DBG_WARNING, "%s:&&&&&&%d %lx, %lx %x", __func__, __LINE__, (unsigned long)pbStart, (unsigned long)pbEnd, (uint32_t)eCacheOp)); ++ __enable_user_access(); ++ switch (eCacheOp) ++ { ++ case PVRSRV_CACHE_OP_CLEAN: ++ riscv_dma_wb_range((unsigned long)pbStart, (unsigned long)pbEnd); ++ break; ++ ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ case PVRSRV_CACHE_OP_FLUSH: ++// PVR_DPF((PVR_DBG_WARNING, "%s:&&&&&&%d %lx, %lx %x", __func__, __LINE__, (unsigned long)pbStart, (unsigned long)pbEnd, (uint32_t)eCacheOp)); ++ riscv_dma_wbinv_range((unsigned long)pbStart, (unsigned long)pbEnd); ++ //PVR_DPF((PVR_DBG_WARNING, "%s:&&&&&&%d %lx, %lx %x", __func__, __LINE__, (unsigned long)pbStart, (unsigned long)pbEnd, (uint32_t)eCacheOp)); ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Cache maintenance operation type %d is invalid", ++ __func__, eCacheOp)); ++ break; ++ } ++ ++ __disable_user_access(); ++ /* ++ On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache ++ maintenance is performed on a memory location using a VA, the effect of ++ that cache maintenance is visible to all VA aliases of the physical memory ++ location. So here it's quicker to issue the machine cache maintenance ++ instruction directly without going via the Linux kernel DMA framework as ++ this is sufficient to maintain the CPU d-caches on arm64. ++ */ ++ ++// begin_user_mode_access(); ++// ++// pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize); ++// for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize) ++// { ++// switch (eCacheOp) ++// { ++// case PVRSRV_CACHE_OP_CLEAN: ++// asm volatile ("dcache.cva %0" : : "r"(pbBase)); ++// break; ++// ++// case PVRSRV_CACHE_OP_INVALIDATE: ++// asm volatile ("dcache.iva %0" :: "r" (pbBase)); ++// break; ++// ++// case PVRSRV_CACHE_OP_FLUSH: ++// asm volatile ("dcache.civa %0" :: "r" (pbBase)); ++// break; ++// ++// default: ++// PVR_DPF((PVR_DBG_ERROR, ++// "%s: Cache maintenance operation type %d is invalid", ++// __func__, eCacheOp)); ++// break; ++// } ++// } ++// ++// end_user_mode_access(); ++} ++void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ struct device *dev; ++ ++#if 0 ++ if (pvVirtStart) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s:********&&&&&&%d %lx %lx", __func__, __LINE__, (unsigned long)sCPUPhysStart.uiAddr, (unsigned long)sCPUPhysEnd.uiAddr)); ++ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH); ++ return; ++ } ++#else ++ //PVR_DPF((PVR_DBG_WARNING, "%s:********&&&&&&%d", __func__, __LINE__)); ++ FlushRange((void *)(sCPUPhysStart.uiAddr), (void *)(sCPUPhysEnd.uiAddr), PVRSRV_CACHE_OP_FLUSH); ++ return; ++#endif ++ ++ dev = psDevNode->psDevConfig->pvOSDevice; ++ ++ if (dev) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s:********&&&&&&%d", __func__, __LINE__)); ++ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_TO_DEVICE); ++ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_FROM_DEVICE); ++ } ++ else ++ { ++ /* ++ * Allocations done prior to obtaining device pointer may ++ * affect in cache operations being scheduled. ++ * ++ * Ignore operations with null device pointer. ++ * This prevents crashes on newer kernels that don't return dummy ops ++ * when null pointer is passed to get_dma_ops. ++ * ++ */ ++ ++ /* Don't spam on nohw */ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); ++#endif ++ } ++ ++ /* ++ * RISC-V cache maintenance mechanism is not part of the core spec. ++ * This leaves the actual mechanism of action to an implementer. ++ * Here we let the system layer decide how maintenance is done. ++ */ ++// if (psDevNode->psDevConfig->pfnHostCacheMaintenance) ++// { ++// psDevNode->psDevConfig->pfnHostCacheMaintenance( ++// psDevNode->psDevConfig->hSysData, ++// PVRSRV_CACHE_OP_FLUSH, ++// pvVirtStart, ++// pvVirtEnd, ++// sCPUPhysStart, ++// sCPUPhysEnd); ++// ++// } ++//#if !defined(NO_HARDWARE) ++// else ++// { ++// PVR_DPF((PVR_DBG_WARNING, ++// "%s: System doesn't implement cache maintenance. Skipping!", ++// __func__)); ++// } ++//#endif ++} ++ ++void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ struct device *dev; ++ ++#if 0 ++ if (pvVirtStart) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s:****&&&&&&%d", __func__, __LINE__)); ++ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN); ++ return; ++ } ++#else ++ FlushRange((void *)(sCPUPhysStart.uiAddr), (void *)(sCPUPhysEnd.uiAddr), PVRSRV_CACHE_OP_CLEAN); ++ return; ++#endif ++ ++ ++ dev = psDevNode->psDevConfig->pvOSDevice; ++ ++ if (dev) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s:******&&&&&&%d", __func__, __LINE__)); ++ dma_sync_single_for_device(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_TO_DEVICE); ++ } ++ else ++ { ++ /* ++ * Allocations done prior to obtaining device pointer may ++ * affect in cache operations being scheduled. ++ * ++ * Ignore operations with null device pointer. ++ * This prevents crashes on newer kernels that don't return dummy ops ++ * when null pointer is passed to get_dma_ops. ++ * ++ */ ++ ++ ++ /* Don't spam on nohw */ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); ++#endif ++ } ++ ++ /* ++ * RISC-V cache maintenance mechanism is not part of the core spec. ++ * This leaves the actual mechanism of action to an implementer. ++ * Here we let the system layer decide how maintenance is done. ++ */ ++// if (psDevNode->psDevConfig->pfnHostCacheMaintenance) ++// { ++// psDevNode->psDevConfig->pfnHostCacheMaintenance( ++// psDevNode->psDevConfig->hSysData, ++// PVRSRV_CACHE_OP_CLEAN, ++// pvVirtStart, ++// pvVirtEnd, ++// sCPUPhysStart, ++// sCPUPhysEnd); ++// ++// } ++//#if !defined(NO_HARDWARE) ++// else ++// { ++// PVR_DPF((PVR_DBG_WARNING, ++// "%s: System doesn't implement cache maintenance. Skipping!", ++// __func__)); ++// } ++//#endif ++} ++ ++void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ struct device *dev; ++ ++#if 0 ++ if (pvVirtStart) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s:**&&&&&&%d", __func__, __LINE__)); ++ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE); ++ return; ++ } ++#else ++ FlushRange((void *)(sCPUPhysStart.uiAddr), (void *)(sCPUPhysEnd.uiAddr), PVRSRV_CACHE_OP_INVALIDATE); ++ return; ++#endif ++ ++ ++ dev = psDevNode->psDevConfig->pvOSDevice; ++ ++ if (dev) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s:***&&&&&&%d", __func__, __LINE__)); ++ dma_sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, ++ sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, ++ DMA_FROM_DEVICE); ++ } ++ else ++ { ++ /* ++ * Allocations done prior to obtaining device pointer may ++ * affect in cache operations being scheduled. ++ * ++ * Ignore operations with null device pointer. ++ * This prevents crashes on newer kernels that don't return dummy ops ++ * when null pointer is passed to get_dma_ops. ++ * ++ */ ++ ++ /* Don't spam on nohw */ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "Cache operation cannot be completed!")); ++#endif ++ } ++ /* ++ * RISC-V cache maintenance mechanism is not part of the core spec. ++ * This leaves the actual mechanism of action to an implementer. ++ * Here we let the system layer decide how maintenance is done. ++ */ ++// if (psDevNode->psDevConfig->pfnHostCacheMaintenance) ++// { ++// psDevNode->psDevConfig->pfnHostCacheMaintenance( ++// psDevNode->psDevConfig->hSysData, ++// PVRSRV_CACHE_OP_INVALIDATE, ++// pvVirtStart, ++// pvVirtEnd, ++// sCPUPhysStart, ++// sCPUPhysEnd); ++// ++// } ++//#if !defined(NO_HARDWARE) ++// else ++// { ++// PVR_DPF((PVR_DBG_WARNING, ++// "%s: System doesn't implement cache maintenance. Skipping!", ++// __func__)); ++// } ++//#endif ++} ++ ++OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) ++{ ++ /* ++ * Need to obtain psDevNode here and do the following: ++ * ++ * OS_CACHE_OP_ADDR_TYPE eOpAddrType = ++ * psDevNode->psDevConfig->bHasPhysicalCacheMaintenance ? ++ * OS_CACHE_OP_ADDR_TYPE_PHYSICAL : OS_CACHE_OP_ADDR_TYPE_VIRTUAL; ++ * ++ * Return BOTH for now on. ++ * ++ */ ++ //return OS_CACHE_OP_ADDR_TYPE_BOTH; ++ return OS_CACHE_OP_ADDR_TYPE_PHYSICAL; ++} ++ ++void OSUserModeAccessToPerfCountersEn(void) ++{ ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, "%s: Not implemented!", __func__)); ++// PVR_ASSERT(0); ++#endif ++} ++ ++IMG_BOOL OSIsWriteCombineUnalignedSafe(void) ++{ ++#if !defined(NO_HARDWARE) ++ //PVR_DPF((PVR_DBG_WARNING, ++ // "%s: Not implemented (assuming false)!", ++ // __func__)); ++ //PVR_ASSERT(0); ++ //return IMG_FALSE; ++ return IMG_TRUE; ++#else ++ return IMG_TRUE; ++#endif ++} +diff --git a/drivers/gpu/drm/img-rogue/osfunc_x86.c b/drivers/gpu/drm/img-rogue/osfunc_x86.c +new file mode 100644 +index 000000000000..2c271d8f4c6f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osfunc_x86.c +@@ -0,0 +1,134 @@ ++/*************************************************************************/ /*! ++@File ++@Title x86 specific OS functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Processor specific OS functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++ ++static void x86_flush_cache_range(const void *pvStart, const void *pvEnd) ++{ ++ IMG_BYTE *pbStart = (IMG_BYTE *)pvStart; ++ IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd; ++ IMG_BYTE *pbBase; ++ ++ pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd, ++ (uintptr_t)boot_cpu_data.x86_clflush_size); ++ ++ mb(); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) ++ __uaccess_begin(); ++#endif ++ ++ for (pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size) ++ { ++ clflush(pbBase); ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,168)) ++ __uaccess_end(); ++#endif ++ ++ mb(); ++} ++ ++void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); ++ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); ++ ++ x86_flush_cache_range(pvVirtStart, pvVirtEnd); ++} ++ ++void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); ++ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); ++ ++ /* No clean feature on x86 */ ++ x86_flush_cache_range(pvVirtStart, pvVirtEnd); ++} ++ ++void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart); ++ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd); ++ ++ /* No invalidate-only support */ ++ x86_flush_cache_range(pvVirtStart, pvVirtEnd); ++} ++ ++OS_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void) ++{ ++ return OS_CACHE_OP_ADDR_TYPE_VIRTUAL; ++} ++ ++void OSUserModeAccessToPerfCountersEn(void) ++{ ++ /* Not applicable to x86 architecture. */ ++} ++ ++IMG_BOOL OSIsWriteCombineUnalignedSafe(void) ++{ ++ return IMG_TRUE; ++} +diff --git a/drivers/gpu/drm/img-rogue/oskm_apphint.h b/drivers/gpu/drm/img-rogue/oskm_apphint.h +new file mode 100644 +index 000000000000..78d40407dcf4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/oskm_apphint.h +@@ -0,0 +1,186 @@ ++/*************************************************************************/ /*! ++@File oskm_apphint.h ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS-independent interface for retrieving KM apphints ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "img_defs.h" ++#if defined(__linux__) ++#include "km_apphint.h" ++#include "device.h" ++#else ++#include "services_client_porting.h" ++#endif ++#if !defined(OSKM_APPHINT_H) ++#define OSKM_APPHINT_H ++ ++/*! Supplied to os_get_km_apphint_XXX() functions when the param/AppHint is ++ * applicable to all devices and not a specific device. Typically used ++ * for server-wide build and module AppHints. ++ */ ++#define APPHINT_NO_DEVICE (NULL) ++ ++#if defined(__linux__) && !defined(DOXYGEN) ++static INLINE IMG_UINT os_get_km_apphint_UINT32(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) { ++ return !pvr_apphint_get_uint32(device, id, pVal); ++} ++static INLINE IMG_UINT os_get_km_apphint_UINT64(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) { ++ return !pvr_apphint_get_uint64(device, id, pVal); ++} ++static INLINE IMG_UINT os_get_km_apphint_BOOL(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) { ++ return !pvr_apphint_get_bool(device, id, pVal); ++} ++static INLINE IMG_UINT os_get_km_apphint_STRING(PVRSRV_DEVICE_NODE *device, void *state, APPHINT_ID id, const IMG_CHAR *pAppHintDefault, IMG_CHAR *buffer, size_t size) { ++ return !pvr_apphint_get_string(device, id, buffer, size); ++} ++ ++#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \ ++ os_get_km_apphint_UINT32(device, state, APPHINT_ID_ ## name, appHintDefault, value) ++ ++#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \ ++ os_get_km_apphint_UINT64(device, state, APPHINT_ID_ ## name, appHintDefault, value) ++ ++#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \ ++ os_get_km_apphint_BOOL(device, state, APPHINT_ID_ ## name, appHintDefault, value) ++ ++#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ ++ os_get_km_apphint_STRING(device, state, APPHINT_ID_ ## name, appHintDefault, buffer, size) ++ ++ ++#define OSCreateKMAppHintState(state) \ ++ PVR_UNREFERENCED_PARAMETER(state) ++ ++#define OSFreeKMAppHintState(state) \ ++ PVR_UNREFERENCED_PARAMETER(state) ++ ++#else /* defined(__linux__) && !defined(DOXYGEN) */ ++ ++/**************************************************************************/ /*! ++@def OSGetKMAppHintUINT32(state, name, appHintDefault, value) ++@Description Interface for retrieval of uint32 km app hint. ++ For non-linux operating systems, this macro implements a call ++ from server code to PVRSRVGetAppHint() declared in ++ services_client_porting.h, effectively making it 'shared' code. ++@Input device Device node ++@Input state App hint state ++@Input name Name used to identify app hint ++@Input appHintDefault Default value to be returned if no ++ app hint is found. ++@Output value Pointer to returned app hint value. ++ */ /**************************************************************************/ ++#define OSGetKMAppHintUINT32(device, state, name, appHintDefault, value) \ ++ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) ++ ++/**************************************************************************/ /*! ++@def OSGetKMAppHintUINT64(state, name, appHintDefault, value) ++@Description Interface for retrieval of uint64 km app hint. ++ For non-linux operating systems, this macro implements a call ++ from server code to PVRSRVGetAppHint() declared in ++ services_client_porting.h, effectively making it 'shared' code. ++@Input device Device node ++@Input state App hint state ++@Input name Name used to identify app hint ++@Input appHintDefault Default value to be returned if no ++ app hint is found. ++@Output value Pointer to returned app hint value. ++ */ /**************************************************************************/ ++#define OSGetKMAppHintUINT64(device, state, name, appHintDefault, value) \ ++ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) ++ ++/**************************************************************************/ /*! ++@def OSGetKMAppHintBOOL(state, name, appHintDefault, value) ++@Description Interface for retrieval of IMG_BOOL km app hint. ++ For non-linux operating systems, this macro implements a call ++ from server code to PVRSRVGetAppHint() declared in ++ services_client_porting.h, effectively making it 'shared' code. ++@Input device Device node ++@Input state App hint state ++@Input name Name used to identify app hint ++@Input appHintDefault Default value to be returned if no ++ app hint is found. ++@Output value Pointer to returned app hint value. ++ */ /**************************************************************************/ ++#define OSGetKMAppHintBOOL(device, state, name, appHintDefault, value) \ ++ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value) ++ ++/**************************************************************************/ /*! ++@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) ++@Description Interface for retrieval of string km app hint. ++ For non-linux operating systems, this macro implements a call ++ from server code to PVRSRVGetAppHint() declared in ++ services_client_porting.h, effectively making it 'shared' code. ++@Input device Device node ++@Input state App hint state ++@Input name Name used to identify app hint ++@Input appHintDefault Default value to be returned if no ++ app hint is found. ++@Output buffer Buffer used to return app hint string. ++@Input size Size of the buffer. ++ */ /**************************************************************************/ ++#define OSGetKMAppHintSTRING(device, state, name, appHintDefault, buffer, size) \ ++ (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer)) ++ ++/**************************************************************************/ /*! ++@def OSCreateKMAppHintState(state) ++@Description Creates the app hint state. ++ For non-linux operating systems, this macro implements a call ++ from server code to PVRSRVCreateAppHintState() declared in ++ services_client_porting.h, effectively making it 'shared' code. ++@Output state App hint state ++ */ /**************************************************************************/ ++#define OSCreateKMAppHintState(state) \ ++ PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state) ++ ++/**************************************************************************/ /*! ++@def OSFreeKMAppHintState ++@Description Free the app hint state. ++ For non-linux operating systems, this macro implements a call ++ from server code to PVRSRVCreateAppHintState() declared in ++ services_client_porting.h, effectively making it 'shared' code. ++@Output state App hint state ++ */ /**************************************************************************/ ++#define OSFreeKMAppHintState(state) \ ++ PVRSRVFreeAppHintState(IMG_SRV_UM, state) ++ ++#endif /* defined(__linux__) */ ++ ++#endif /* OSKM_APPHINT_H */ ++ ++/****************************************************************************** ++ End of file (oskm_apphint.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/osmmap.h b/drivers/gpu/drm/img-rogue/osmmap.h +new file mode 100644 +index 000000000000..40a509d19440 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osmmap.h +@@ -0,0 +1,115 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS Interface for mapping PMRs into CPU space. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS abstraction for the mmap2 interface for mapping PMRs into ++ User Mode memory ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef OSMMAP_H ++#define OSMMAP_H ++ ++#include ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++ ++/*************************************************************************/ /*! ++@Function OSMMapPMR ++@Description Maps the specified PMR into CPU memory so that it may be ++ accessed by the user process. ++ Whether the memory is mapped read only, read/write, or not at ++ all, is dependent on the PMR itself. ++ The PMR handle is opaque to the user, and lower levels of this ++ stack ensure that the handle is private to this process, such ++ that this API cannot be abused to gain access to other people's ++ PMRs. The OS implementation of this function should return the ++ virtual address and length for the User to use. The "PrivData" ++ is to be stored opaquely by the caller (N.B. he should make no ++ assumptions, in particular, NULL is a valid handle) and given ++ back to the call to OSMUnmapPMR. ++ The OS implementation is free to use the PrivData handle for ++ any purpose it sees fit. ++@Input hBridge The bridge handle. ++@Input hPMR The handle of the PMR to be mapped. ++@Input uiPMRLength The size of the PMR. ++@Input uiFlags Flags indicating how the mapping should ++ be done (read-only, etc). These may not ++ be honoured if the PMR does not permit ++ them. ++@Output phOSMMapPrivDataOut Returned private data. ++@Output ppvMappingAddressOut The returned mapping. ++@Output puiMappingLengthOut The size of the returned mapping. ++@Return PVRSRV_OK on success, failure code otherwise. ++ */ /*************************************************************************/ ++PVRSRV_ERROR ++OSMMapPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiPMRLength, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_HANDLE *phOSMMapPrivDataOut, ++ void **ppvMappingAddressOut, ++ size_t *puiMappingLengthOut); ++ ++/*************************************************************************/ /*! ++@Function OSMUnmapPMR ++@Description Unmaps the specified PMR from CPU memory. ++ This function is the counterpart to OSMMapPMR. ++ The caller is required to pass the PMR handle back in along ++ with the same 3-tuple of information that was returned by the ++ call to OSMMapPMR in phOSMMapPrivDataOut. ++ It is possible to unmap only part of the original mapping ++ with this call, by specifying only the address range to be ++ unmapped in pvMappingAddress and uiMappingLength. ++@Input hBridge The bridge handle. ++@Input hPMR The handle of the PMR to be unmapped. ++@Input hOSMMapPrivData The OS private data of the mapping. ++@Input pvMappingAddress The address to be unmapped. ++@Input uiMappingLength The size to be unmapped. ++@Return PVRSRV_OK on success, failure code otherwise. ++ */ /*************************************************************************/ ++void ++OSMUnmapPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_HANDLE hOSMMapPrivData, ++ void *pvMappingAddress, ++ size_t uiMappingLength); ++ ++#endif /* OSMMAP_H */ +diff --git a/drivers/gpu/drm/img-rogue/osmmap_stub.c b/drivers/gpu/drm/img-rogue/osmmap_stub.c +new file mode 100644 +index 000000000000..74bad7073c58 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/osmmap_stub.c +@@ -0,0 +1,146 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS abstraction for the mmap2 interface for mapping PMRs into ++ User Mode memory ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* our exported API */ ++#include "osmmap.h" ++ ++/* include/ */ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++ ++/* services/include/ */ ++ ++/* services/include/srvhelper/ */ ++#include "ra.h" ++ ++#include "pmr.h" ++ ++PVRSRV_ERROR ++OSMMapPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_DEVMEM_SIZE_T uiPMRSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_HANDLE *phOSMMapPrivDataOut, ++ void **ppvMappingAddressOut, ++ size_t *puiMappingLengthOut) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMR; ++ void *pvKernelAddress; ++ size_t uiLength; ++ IMG_HANDLE hPriv; ++ ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ ++ /* ++ Normally this function would mmap a PMR into the memory space of ++ user process, but in this case we're taking a PMR and mapping it ++ into kernel virtual space. We keep the same function name for ++ symmetry as this allows the higher layers of the software stack ++ to not care whether they are user mode or kernel ++ */ ++ ++ psPMR = hPMR; ++ ++ if (PMR_IsSparse(psPMR)) ++ { ++ eError = PMRAcquireSparseKernelMappingData(psPMR, ++ 0, ++ 0, ++ &pvKernelAddress, ++ &uiLength, ++ &hPriv); ++ } ++ else ++ { ++ eError = PMRAcquireKernelMappingData(psPMR, ++ 0, ++ 0, ++ &pvKernelAddress, ++ &uiLength, ++ &hPriv); ++ } ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ *phOSMMapPrivDataOut = hPriv; ++ *ppvMappingAddressOut = pvKernelAddress; ++ *puiMappingLengthOut = uiLength; ++ ++ /* MappingLength might be rounded up to page size */ ++ PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize); ++ ++ return PVRSRV_OK; ++ ++ /* ++ error exit paths follow ++ */ ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++void ++OSMUnmapPMR(IMG_HANDLE hBridge, ++ IMG_HANDLE hPMR, ++ IMG_HANDLE hOSMMapPrivData, ++ void *pvMappingAddress, ++ size_t uiMappingLength) ++{ ++ PMR *psPMR; ++ ++ PVR_UNREFERENCED_PARAMETER(hBridge); ++ PVR_UNREFERENCED_PARAMETER(pvMappingAddress); ++ PVR_UNREFERENCED_PARAMETER(uiMappingLength); ++ ++ psPMR = hPMR; ++ PMRReleaseKernelMappingData(psPMR, ++ hOSMMapPrivData); ++} +diff --git a/drivers/gpu/drm/img-rogue/ospvr_gputrace.h b/drivers/gpu/drm/img-rogue/ospvr_gputrace.h +new file mode 100644 +index 000000000000..0d6b89f23e3b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/ospvr_gputrace.h +@@ -0,0 +1,167 @@ ++/*************************************************************************/ /*! ++@File ospvr_gputrace.h ++@Title PVR GPU Trace module common environment interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_GPUTRACE_H_ ++#define PVR_GPUTRACE_H_ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_hwperf.h" ++#include "device.h" ++ ++#if defined(__linux__) ++ ++void PVRGpuTraceEnqueueEvent( ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32FirmwareCtx, ++ IMG_UINT32 ui32ExternalJobRef, ++ IMG_UINT32 ui32InternalJobRef, ++ RGX_HWPERF_KICK_TYPE eKickType); ++ ++/* Early initialisation of GPU Trace events logic. ++ * This function is called on *driver* initialisation. */ ++PVRSRV_ERROR PVRGpuTraceSupportInit(void); ++ ++/* GPU Trace resources final cleanup. ++ * This function is called on driver de-initialisation. */ ++void PVRGpuTraceSupportDeInit(void); ++ ++/* Initialisation for AppHints callbacks. ++ * This function is called during the late stage of driver initialisation but ++ * before the device initialisation but after the debugfs sub-system has been ++ * initialised. */ ++void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/* Per-device initialisation of the GPU Trace resources */ ++PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/* Per-device cleanup for the GPU Trace resources. */ ++void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/* Enables the gpu trace sub-system for a given device. */ ++PVRSRV_ERROR PVRGpuTraceSetEnabled( ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bNewValue); ++ ++/* Returns IMG_TRUE if the gpu trace sub-system has been enabled (but not ++ * necessarily initialised). */ ++IMG_BOOL PVRGpuTraceIsEnabled(void); ++ ++/* Performs some initialisation steps if the feature was enabled ++ * on driver startup. */ ++void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/* FTrace events callbacks interface */ ++ ++void PVRGpuTraceEnableUfoCallback(void); ++void PVRGpuTraceDisableUfoCallback(void); ++ ++void PVRGpuTraceEnableFirmwareActivityCallback(void); ++void PVRGpuTraceDisableFirmwareActivityCallback(void); ++ ++#else /* defined(__linux__) */ ++ ++static inline void PVRGpuTraceEnqueueEvent( ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32FirmwareCtx, ++ IMG_UINT32 ui32ExternalJobRef, ++ IMG_UINT32 ui32InternalJobRef, ++ RGX_HWPERF_KICK_TYPE eKickType) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(ui32ExternalJobRef); ++ PVR_UNREFERENCED_PARAMETER(ui32InternalJobRef); ++ PVR_UNREFERENCED_PARAMETER(eKickType); ++} ++ ++static inline PVRSRV_ERROR PVRGpuTraceSupportInit(void) { ++ return PVRSRV_OK; ++} ++ ++static inline void PVRGpuTraceSupportDeInit(void) {} ++ ++static inline void PVRGpuTraceInitAppHintCallbacks( ++ const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++} ++ ++static inline PVRSRV_ERROR PVRGpuTraceInitDevice( ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ return PVRSRV_OK; ++} ++ ++static inline void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++} ++ ++static inline PVRSRV_ERROR PVRGpuTraceSetEnabled( ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bNewValue) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(bNewValue); ++ return PVRSRV_OK; ++} ++ ++static inline IMG_BOOL PVRGpuTraceIsEnabled(void) ++{ ++ return IMG_FALSE; ++} ++ ++static inline void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++} ++ ++static inline void PVRGpuTraceEnableUfoCallback(void) {} ++static inline void PVRGpuTraceDisableUfoCallback(void) {} ++ ++static inline void PVRGpuTraceEnableFirmwareActivityCallback(void) {} ++static inline void PVRGpuTraceDisableFirmwareActivityCallback(void) {} ++ ++#endif /* defined(__linux__) */ ++ ++#endif /* PVR_GPUTRACE_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/pci_support.c b/drivers/gpu/drm/img-rogue/pci_support.c +new file mode 100644 +index 000000000000..c3bbcc46cb2c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pci_support.c +@@ -0,0 +1,726 @@ ++/*************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++ ++#if defined(CONFIG_MTRR) ++#include ++#endif ++ ++#include "pci_support.h" ++#include "allocmem.h" ++ ++typedef struct _PVR_PCI_DEV_TAG ++{ ++ struct pci_dev *psPCIDev; ++ HOST_PCI_INIT_FLAGS ePCIFlags; ++ IMG_BOOL abPCIResourceInUse[DEVICE_COUNT_RESOURCE]; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ int iMTRR[DEVICE_COUNT_RESOURCE]; ++#endif ++} PVR_PCI_DEV; ++ ++/*************************************************************************/ /*! ++@Function OSPCISetDev ++@Description Set a PCI device for subsequent use. ++@Input pvPCICookie Pointer to OS specific PCI structure ++@Input eFlags Flags ++@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle ++*/ /**************************************************************************/ ++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags) ++{ ++ int err; ++ IMG_UINT32 i; ++ PVR_PCI_DEV *psPVRPCI; ++ ++ psPVRPCI = OSAllocMem(sizeof(*psPVRPCI)); ++ if (psPVRPCI == NULL) ++ { ++ printk(KERN_ERR "OSPCISetDev: Couldn't allocate PVR PCI structure\n"); ++ return NULL; ++ } ++ ++ psPVRPCI->psPCIDev = (struct pci_dev *)pvPCICookie; ++ psPVRPCI->ePCIFlags = eFlags; ++ ++ err = pci_enable_device(psPVRPCI->psPCIDev); ++ if (err != 0) ++ { ++ printk(KERN_ERR "OSPCISetDev: Couldn't enable device (%d)\n", err); ++ OSFreeMem(psPVRPCI); ++ return NULL; ++ } ++ ++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ ++ { ++ pci_set_master(psPVRPCI->psPCIDev); ++ } ++ ++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ ++ { ++#if defined(CONFIG_PCI_MSI) ++ err = pci_enable_msi(psPVRPCI->psPCIDev); ++ if (err != 0) ++ { ++ printk(KERN_ERR "OSPCISetDev: Couldn't enable MSI (%d)", err); ++ psPVRPCI->ePCIFlags &= ~HOST_PCI_INIT_FLAG_MSI; /* PRQA S 1474,3358,4130 */ /* misuse of enums */ ++ } ++#else ++ printk(KERN_ERR "OSPCISetDev: MSI support not enabled in the kernel"); ++#endif ++ } ++ ++ /* Initialise the PCI resource and MTRR tracking array */ ++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) ++ { ++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ psPVRPCI->iMTRR[i] = -1; ++#endif ++ } ++ ++ return (PVRSRV_PCI_DEV_HANDLE)psPVRPCI; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIAcquireDev ++@Description Acquire a PCI device for subsequent use. ++@Input ui16VendorID Vendor PCI ID ++@Input ui16DeviceID Device PCI ID ++@Input eFlags Flags ++@Return PVRSRV_PCI_DEV_HANDLE Pointer to PCI device handle ++*/ /**************************************************************************/ ++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, ++ IMG_UINT16 ui16DeviceID, ++ HOST_PCI_INIT_FLAGS eFlags) ++{ ++ struct pci_dev *psPCIDev; ++ ++ psPCIDev = pci_get_device(ui16VendorID, ui16DeviceID, NULL); ++ if (psPCIDev == NULL) ++ { ++ return NULL; ++ } ++ ++ return OSPCISetDev((void *)psPCIDev, eFlags); ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIIRQ ++@Description Get the interrupt number for the device. ++@Input hPVRPCI PCI device handle ++@Output pui16DeviceID Pointer to where the interrupt number ++ should be returned ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ ++ if (pui32IRQ == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *pui32IRQ = psPVRPCI->psPCIDev->irq; ++ ++ return PVRSRV_OK; ++} ++ ++/* Functions supported by OSPCIAddrRangeFunc */ ++enum HOST_PCI_ADDR_RANGE_FUNC ++{ ++ HOST_PCI_ADDR_RANGE_FUNC_LEN, ++ HOST_PCI_ADDR_RANGE_FUNC_START, ++ HOST_PCI_ADDR_RANGE_FUNC_END, ++ HOST_PCI_ADDR_RANGE_FUNC_REQUEST, ++ HOST_PCI_ADDR_RANGE_FUNC_RELEASE ++}; ++ ++/*************************************************************************/ /*! ++@Function OSPCIAddrRangeFunc ++@Description Internal support function for various address range related ++ functions ++@Input eFunc Function to perform ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return IMG_UINT32 Function dependent value ++*/ /**************************************************************************/ ++static IMG_UINT64 OSPCIAddrRangeFunc(enum HOST_PCI_ADDR_RANGE_FUNC eFunc, ++ PVRSRV_PCI_DEV_HANDLE hPVRPCI, ++ IMG_UINT32 ui32Index) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ ++ if (ui32Index >= DEVICE_COUNT_RESOURCE) ++ { ++ printk(KERN_ERR "OSPCIAddrRangeFunc: Index out of range"); ++ return 0; ++ } ++ ++ switch (eFunc) ++ { ++ case HOST_PCI_ADDR_RANGE_FUNC_LEN: ++ { ++ return pci_resource_len(psPVRPCI->psPCIDev, ui32Index); ++ } ++ case HOST_PCI_ADDR_RANGE_FUNC_START: ++ { ++ return pci_resource_start(psPVRPCI->psPCIDev, ui32Index); ++ } ++ case HOST_PCI_ADDR_RANGE_FUNC_END: ++ { ++ return pci_resource_end(psPVRPCI->psPCIDev, ui32Index); ++ } ++ case HOST_PCI_ADDR_RANGE_FUNC_REQUEST: ++ { ++ int err = pci_request_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index, PVRSRV_MODNAME); ++ if (err != 0) ++ { ++ printk(KERN_ERR "OSPCIAddrRangeFunc: pci_request_region_failed (%d)", err); ++ return 0; ++ } ++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_TRUE; ++ return 1; ++ } ++ case HOST_PCI_ADDR_RANGE_FUNC_RELEASE: ++ { ++ if (psPVRPCI->abPCIResourceInUse[ui32Index]) ++ { ++ pci_release_region(psPVRPCI->psPCIDev, (IMG_INT)ui32Index); ++ psPVRPCI->abPCIResourceInUse[ui32Index] = IMG_FALSE; ++ } ++ return 1; ++ } ++ default: ++ { ++ printk(KERN_ERR "OSPCIAddrRangeFunc: Unknown function"); ++ break; ++ } ++ } ++ ++ return 0; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIAddrRangeLen ++@Description Returns length of a given address range ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return IMG_UINT32 Length of address range or 0 if no ++ such range ++*/ /**************************************************************************/ ++IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_LEN, hPVRPCI, ui32Index); ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIAddrRangeStart ++@Description Returns the start of a given address range ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return IMG_UINT32 Start of address range or 0 if no ++ such range ++*/ /**************************************************************************/ ++IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_START, hPVRPCI, ui32Index); ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIAddrRangeEnd ++@Description Returns the end of a given address range ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return IMG_UINT32 End of address range or 0 if no such ++ range ++*/ /**************************************************************************/ ++IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ return OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_END, hPVRPCI, ui32Index); ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIRequestAddrRange ++@Description Request a given address range index for subsequent use ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, ++ IMG_UINT32 ui32Index) ++{ ++ if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_REQUEST, hPVRPCI, ui32Index) == 0) ++ { ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ else ++ { ++ return PVRSRV_OK; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIReleaseAddrRange ++@Description Release a given address range that is no longer being used ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ if (OSPCIAddrRangeFunc(HOST_PCI_ADDR_RANGE_FUNC_RELEASE, hPVRPCI, ui32Index) == 0) ++ { ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ else ++ { ++ return PVRSRV_OK; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIRequestAddrRegion ++@Description Request a given region from an address range for subsequent use ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Input uiOffset Offset into the address range that forms ++ the start of the region ++@Input uiLength Length of the region ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, ++ IMG_UINT32 ui32Index, ++ IMG_UINT64 uiOffset, ++ IMG_UINT64 uiLength) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ resource_size_t start; ++ resource_size_t end; ++ ++ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); ++ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); ++ ++ /* Check that the requested region is valid */ ++ if ((start + uiOffset + uiLength - 1) > end) ++ { ++ return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; ++ } ++ ++ if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) ++ { ++ if (request_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) ++ { ++ return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; ++ } ++ } ++ else ++ { ++ if (request_mem_region(start + uiOffset, uiLength, PVRSRV_MODNAME) == NULL) ++ { ++ return PVRSRV_ERROR_PCI_REGION_UNAVAILABLE; ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIReleaseAddrRegion ++@Description Release a given region, from an address range, that is no ++ longer in use ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Input ui32Offset Offset into the address range that forms ++ the start of the region ++@Input ui32Length Length of the region ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, ++ IMG_UINT32 ui32Index, ++ IMG_UINT64 uiOffset, ++ IMG_UINT64 uiLength) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ resource_size_t start; ++ resource_size_t end; ++ ++ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); ++ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index); ++ ++ /* Check that the region is valid */ ++ if ((start + uiOffset + uiLength - 1) > end) ++ { ++ return PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH; ++ } ++ ++ if (pci_resource_flags(psPVRPCI->psPCIDev, ui32Index) & IORESOURCE_IO) ++ { ++ release_region(start + uiOffset, uiLength); ++ } ++ else ++ { ++ release_mem_region(start + uiOffset, uiLength); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIReleaseDev ++@Description Release a PCI device that is no longer being used ++@Input hPVRPCI PCI device handle ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ int i; ++ ++ /* Release all PCI regions that are currently in use */ ++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) ++ { ++ if (psPVRPCI->abPCIResourceInUse[i]) ++ { ++ pci_release_region(psPVRPCI->psPCIDev, i); ++ psPVRPCI->abPCIResourceInUse[i] = IMG_FALSE; ++ } ++ } ++ ++#if defined(CONFIG_PCI_MSI) ++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_MSI) /* PRQA S 3358 */ /* misuse of enums */ ++ { ++ pci_disable_msi(psPVRPCI->psPCIDev); ++ } ++#endif ++ ++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ ++ { ++ pci_clear_master(psPVRPCI->psPCIDev); ++ } ++ ++ pci_disable_device(psPVRPCI->psPCIDev); ++ ++ OSFreeMem(psPVRPCI); ++ /*not nulling pointer, copy on stack*/ ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCISuspendDev ++@Description Prepare PCI device to be turned off by power management ++@Input hPVRPCI PCI device handle ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ int i; ++ int err; ++ ++ /* Release all PCI regions that are currently in use */ ++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) ++ { ++ if (psPVRPCI->abPCIResourceInUse[i]) ++ { ++ pci_release_region(psPVRPCI->psPCIDev, i); ++ } ++ } ++ ++ err = pci_save_state(psPVRPCI->psPCIDev); ++ if (err != 0) ++ { ++ printk(KERN_ERR "OSPCISuspendDev: pci_save_state_failed (%d)", err); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ ++ pci_disable_device(psPVRPCI->psPCIDev); ++ ++ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_SUSPEND)); ++ switch (err) ++ { ++ case 0: ++ break; ++ case -EIO: ++ printk(KERN_ERR "OSPCISuspendDev: device doesn't support PCI PM"); ++ break; ++ case -EINVAL: ++ printk(KERN_ERR "OSPCISuspendDev: can't enter requested power state"); ++ break; ++ default: ++ printk(KERN_ERR "OSPCISuspendDev: pci_set_power_state failed (%d)", err); ++ break; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIResumeDev ++@Description Prepare a PCI device to be resumed by power management ++@Input hPVRPCI PCI device handle ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ int err; ++ int i; ++ ++ err = pci_set_power_state(psPVRPCI->psPCIDev, pci_choose_state(psPVRPCI->psPCIDev, PMSG_ON)); ++ switch (err) ++ { ++ case 0: ++ break; ++ case -EIO: ++ printk(KERN_ERR "OSPCIResumeDev: device doesn't support PCI PM"); ++ break; ++ case -EINVAL: ++ printk(KERN_ERR "OSPCIResumeDev: can't enter requested power state"); ++ return PVRSRV_ERROR_UNKNOWN_POWER_STATE; ++ default: ++ printk(KERN_ERR "OSPCIResumeDev: pci_set_power_state failed (%d)", err); ++ return PVRSRV_ERROR_UNKNOWN_POWER_STATE; ++ } ++ ++ pci_restore_state(psPVRPCI->psPCIDev); ++ ++ err = pci_enable_device(psPVRPCI->psPCIDev); ++ if (err != 0) ++ { ++ printk(KERN_ERR "OSPCIResumeDev: Couldn't enable device (%d)", err); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ ++ if (psPVRPCI->ePCIFlags & HOST_PCI_INIT_FLAG_BUS_MASTER) /* PRQA S 3358 */ /* misuse of enums */ ++ pci_set_master(psPVRPCI->psPCIDev); ++ ++ /* Restore the PCI resource tracking array */ ++ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) ++ { ++ if (psPVRPCI->abPCIResourceInUse[i]) ++ { ++ err = pci_request_region(psPVRPCI->psPCIDev, i, PVRSRV_MODNAME); ++ if (err != 0) ++ { ++ printk(KERN_ERR "OSPCIResumeDev: pci_request_region_failed (region %d, error %d)", i, err); ++ } ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIGetVendorDeviceIDs ++@Description Retrieve PCI vendor ID and device ID. ++@Input hPVRPCI PCI device handle ++@Output pui16VendorID Vendor ID ++@Output pui16DeviceID Device ID ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, ++ IMG_UINT16 *pui16VendorID, ++ IMG_UINT16 *pui16DeviceID) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ struct pci_dev *psPCIDev; ++ ++ if (psPVRPCI == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psPCIDev = psPVRPCI->psPCIDev; ++ if (psPCIDev == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *pui16VendorID = psPCIDev->vendor; ++ *pui16DeviceID = psPCIDev->device; ++ ++ return PVRSRV_OK; ++} ++ ++#if defined(CONFIG_MTRR) ++ ++/*************************************************************************/ /*! ++@Function OSPCIClearResourceMTRRs ++@Description Clear any BIOS-configured MTRRs for a PCI memory region ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++@Return PVRSRV_ERROR Services error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ resource_size_t start, end; ++ int res; ++ ++ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); ++ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ res = arch_io_reserve_memtype_wc(start, end - start); ++ if (res) ++ { ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++#endif ++ res = arch_phys_wc_add(start, end - start); ++ if (res < 0) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ arch_io_free_memtype_wc(start, end - start); ++#endif ++ ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ psPVRPCI->iMTRR[ui32Index] = res; ++#else ++ ++ res = mtrr_add(start, end - start, MTRR_TYPE_UNCACHABLE, 0); ++ if (res < 0) ++ { ++ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ ++ res = mtrr_del(res, start, end - start); ++ if (res < 0) ++ { ++ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ ++ /* Workaround for overlapping MTRRs. */ ++ { ++ IMG_BOOL bGotMTRR0 = IMG_FALSE; ++ ++ /* Current mobo BIOSes will normally set up a WRBACK MTRR spanning ++ * 0->4GB, and then another 4GB->6GB. If the PCI card's automatic & ++ * overlapping UNCACHABLE MTRR is deleted, we see WRBACK behaviour. ++ * ++ * WRBACK is incompatible with some PCI devices, so try to split ++ * the UNCACHABLE regions up and insert a WRCOMB region instead. ++ */ ++ res = mtrr_add(start, end - start, MTRR_TYPE_WRBACK, 0); ++ if (res < 0) ++ { ++ /* If this fails, services has probably run before and created ++ * a write-combined MTRR for the test chip. Assume it has, and ++ * don't return an error here. ++ */ ++ return PVRSRV_OK; ++ } ++ ++ if (res == 0) ++ bGotMTRR0 = IMG_TRUE; ++ ++ res = mtrr_del(res, start, end - start); ++ if (res < 0) ++ { ++ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_del failed (%d)", res); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ ++ if (bGotMTRR0) ++ { ++ /* Replace 0 with a non-overlapping WRBACK MTRR */ ++ res = mtrr_add(0, start, MTRR_TYPE_WRBACK, 0); ++ if (res < 0) ++ { ++ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ ++ /* Add a WRCOMB MTRR for the PCI device memory bar */ ++ res = mtrr_add(start, end - start, MTRR_TYPE_WRCOMB, 0); ++ if (res < 0) ++ { ++ printk(KERN_ERR "OSPCIClearResourceMTRRs: mtrr_add failed (%d)", res); ++ return PVRSRV_ERROR_PCI_CALL_FAILED; ++ } ++ } ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function OSPCIReleaseResourceMTRRs ++@Description Release resources allocated by OSPCIClearResourceMTRRs ++@Input hPVRPCI PCI device handle ++@Input ui32Index Address range index ++*/ /**************************************************************************/ ++void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ PVR_PCI_DEV *psPVRPCI = (PVR_PCI_DEV *)hPVRPCI; ++ ++ if (psPVRPCI->iMTRR[ui32Index] >= 0) ++ { ++ arch_phys_wc_del(psPVRPCI->iMTRR[ui32Index]); ++ psPVRPCI->iMTRR[ui32Index] = -1; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ { ++ resource_size_t start, end; ++ ++ start = pci_resource_start(psPVRPCI->psPCIDev, ui32Index); ++ end = pci_resource_end(psPVRPCI->psPCIDev, ui32Index) + 1; ++ ++ arch_io_free_memtype_wc(start, end - start); ++ } ++#endif ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(hPVRPCI); ++ PVR_UNREFERENCED_PARAMETER(ui32Index); ++#endif ++} ++#endif /* defined(CONFIG_MTRR) */ +diff --git a/drivers/gpu/drm/img-rogue/pci_support.h b/drivers/gpu/drm/img-rogue/pci_support.h +new file mode 100644 +index 000000000000..29746c672047 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pci_support.h +@@ -0,0 +1,99 @@ ++/*************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PCI_SUPPORT_H ++#define PCI_SUPPORT_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#if defined(__linux__) ++#include ++#define TO_PCI_COOKIE(dev) to_pci_dev((struct device *)(dev)) ++#else ++#define TO_PCI_COOKIE(dev) (dev) ++#endif ++ ++typedef enum _HOST_PCI_INIT_FLAGS_ ++{ ++ HOST_PCI_INIT_FLAG_BUS_MASTER = 0x00000001, ++ HOST_PCI_INIT_FLAG_MSI = 0x00000002, ++ HOST_PCI_INIT_FLAG_FORCE_I32 = 0x7fffffff ++} HOST_PCI_INIT_FLAGS; ++ ++struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_; ++typedef struct _PVRSRV_PCI_DEV_OPAQUE_STRUCT_ *PVRSRV_PCI_DEV_HANDLE; ++ ++PVRSRV_PCI_DEV_HANDLE OSPCIAcquireDev(IMG_UINT16 ui16VendorID, IMG_UINT16 ui16DeviceID, HOST_PCI_INIT_FLAGS eFlags); ++PVRSRV_PCI_DEV_HANDLE OSPCISetDev(void *pvPCICookie, HOST_PCI_INIT_FLAGS eFlags); ++PVRSRV_ERROR OSPCIReleaseDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); ++PVRSRV_ERROR OSPCIIRQ(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 *pui32IRQ); ++IMG_UINT64 OSPCIAddrRangeLen(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++IMG_UINT64 OSPCIAddrRangeStart(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++IMG_UINT64 OSPCIAddrRangeEnd(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++PVRSRV_ERROR OSPCIRequestAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++PVRSRV_ERROR OSPCIReleaseAddrRange(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++PVRSRV_ERROR OSPCIRequestAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); ++PVRSRV_ERROR OSPCIReleaseAddrRegion(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index, IMG_UINT64 uiOffset, IMG_UINT64 uiLength); ++PVRSRV_ERROR OSPCISuspendDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); ++PVRSRV_ERROR OSPCIResumeDev(PVRSRV_PCI_DEV_HANDLE hPVRPCI); ++PVRSRV_ERROR OSPCIGetVendorDeviceIDs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT16 *pui16VendorID, IMG_UINT16 *pui16DeviceID); ++ ++#if defined(CONFIG_MTRR) ++PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index); ++#else ++static inline PVRSRV_ERROR OSPCIClearResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPVRPCI); ++ PVR_UNREFERENCED_PARAMETER(ui32Index); ++ return PVRSRV_OK; ++} ++ ++static inline void OSPCIReleaseResourceMTRRs(PVRSRV_PCI_DEV_HANDLE hPVRPCI, IMG_UINT32 ui32Index) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPVRPCI); ++ PVR_UNREFERENCED_PARAMETER(ui32Index); ++} ++#endif ++ ++#endif /* PCI_SUPPORT_H */ +diff --git a/drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk b/drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk +new file mode 100644 +index 000000000000..f4ac53c4a7d2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdp/drm_pdp.mk +@@ -0,0 +1,13 @@ ++drm_pdp-y += \ ++ ../apollo/drm_pdp_crtc.o \ ++ ../apollo/drm_pdp_debugfs.o \ ++ ../apollo/drm_pdp_drv.o \ ++ ../apollo/drm_pdp_dvi.o \ ++ ../apollo/drm_pdp_fb.o \ ++ ../apollo/drm_pdp_gem.o \ ++ ../apollo/drm_pdp_modeset.o \ ++ ../apollo/drm_pdp_plane.o \ ++ ../apollo/drm_pdp_tmds.o \ ++ ../apollo/pdp_apollo.o \ ++ ../apollo/pdp_odin.o \ ++ ../apollo/pdp_plato.o +diff --git a/drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h b/drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h +new file mode 100644 +index 000000000000..6164c5814140 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdp2_mmu_regs.h +@@ -0,0 +1,764 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++ ++#ifndef _PDP2_MMU_REGS_H ++#define _PDP2_MMU_REGS_H ++ ++/* Hardware register definitions */ ++ ++#define PDP_BIF_DIR_BASE_ADDR_OFFSET (0x0020) ++#define PDP_BIF_DIR_BASE_ADDR_STRIDE (4) ++#define PDP_BIF_DIR_BASE_ADDR_NO_ENTRIES (4) ++ ++/* PDP_BIF, DIR_BASE_ADDR, MMU_DIR_BASE_ADDR ++Base address in physical memory for MMU Directory n Entries. When MMU_ENABLE_EXT_ADDRESSING is '1', the bits 31:0 are assigned to the address 31+EXT_ADDR_RANGE:0+EXT_ADDR_RANGE, but then any address offset within a page is forced to 0. When MMU_ENABLE_EXT_ADDRESSING is '0', bits 31:12 are assigned to address 31:12 ++*/ ++#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_MASK (0xFFFFFFFF) ++#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SHIFT (0) ++#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_LENGTH (32) ++#define PDP_BIF_DIR_BASE_ADDR_MMU_DIR_BASE_ADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_TILE_CFG_OFFSET (0x0040) ++#define PDP_BIF_TILE_CFG_STRIDE (4) ++#define PDP_BIF_TILE_CFG_NO_ENTRIES (4) ++ ++/* PDP_BIF, TILE_CFG, TILE_128INTERLEAVE ++*/ ++#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_MASK (0x00000010) ++#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LSBMASK (0x00000001) ++#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SHIFT (4) ++#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_LENGTH (1) ++#define PDP_BIF_TILE_CFG_TILE_128INTERLEAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, TILE_CFG, TILE_ENABLE ++*/ ++#define PDP_BIF_TILE_CFG_TILE_ENABLE_MASK (0x00000008) ++#define PDP_BIF_TILE_CFG_TILE_ENABLE_LSBMASK (0x00000001) ++#define PDP_BIF_TILE_CFG_TILE_ENABLE_SHIFT (3) ++#define PDP_BIF_TILE_CFG_TILE_ENABLE_LENGTH (1) ++#define PDP_BIF_TILE_CFG_TILE_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, TILE_CFG, TILE_STRIDE ++*/ ++#define PDP_BIF_TILE_CFG_TILE_STRIDE_MASK (0x00000007) ++#define PDP_BIF_TILE_CFG_TILE_STRIDE_LSBMASK (0x00000007) ++#define PDP_BIF_TILE_CFG_TILE_STRIDE_SHIFT (0) ++#define PDP_BIF_TILE_CFG_TILE_STRIDE_LENGTH (3) ++#define PDP_BIF_TILE_CFG_TILE_STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_TILE_MIN_ADDR_OFFSET (0x0050) ++#define PDP_BIF_TILE_MIN_ADDR_STRIDE (4) ++#define PDP_BIF_TILE_MIN_ADDR_NO_ENTRIES (4) ++ ++/* PDP_BIF, TILE_MIN_ADDR, TILE_MIN_ADDR ++*/ ++#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_MASK (0xFFFFFFFF) ++#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SHIFT (0) ++#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_LENGTH (32) ++#define PDP_BIF_TILE_MIN_ADDR_TILE_MIN_ADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_TILE_MAX_ADDR_OFFSET (0x0060) ++#define PDP_BIF_TILE_MAX_ADDR_STRIDE (4) ++#define PDP_BIF_TILE_MAX_ADDR_NO_ENTRIES (4) ++ ++/* PDP_BIF, TILE_MAX_ADDR, TILE_MAX_ADDR ++*/ ++#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_MASK (0xFFFFFFFF) ++#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SHIFT (0) ++#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_LENGTH (32) ++#define PDP_BIF_TILE_MAX_ADDR_TILE_MAX_ADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_CONTROL0_OFFSET (0x0000) ++ ++/* PDP_BIF, CONTROL0, MMU_TILING_SCHEME ++*/ ++#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_MASK (0x00000001) ++#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SHIFT (0) ++#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_LENGTH (1) ++#define PDP_BIF_CONTROL0_MMU_TILING_SCHEME_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL0, MMU_CACHE_POLICY ++*/ ++#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_MASK (0x00000100) ++#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SHIFT (8) ++#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_LENGTH (1) ++#define PDP_BIF_CONTROL0_MMU_CACHE_POLICY_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL0, FORCE_CACHE_POLICY_BYPASS ++*/ ++#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_MASK (0x00000200) ++#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SHIFT (9) ++#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_LENGTH (1) ++#define PDP_BIF_CONTROL0_FORCE_CACHE_POLICY_BYPASS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL0, STALL_ON_PROTOCOL_FAULT ++*/ ++#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_MASK (0x00001000) ++#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SHIFT (12) ++#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_LENGTH (1) ++#define PDP_BIF_CONTROL0_STALL_ON_PROTOCOL_FAULT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_CONTROL1_OFFSET (0x0008) ++ ++/* PDP_BIF, CONTROL1, MMU_FLUSH0 ++*/ ++#define PDP_BIF_CONTROL1_MMU_FLUSH0_MASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_FLUSH0_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_FLUSH0_SHIFT (0) ++#define PDP_BIF_CONTROL1_MMU_FLUSH0_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_FLUSH0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_FLUSH1 ++*/ ++#define PDP_BIF_CONTROL1_MMU_FLUSH1_MASK (0x00000002) ++#define PDP_BIF_CONTROL1_MMU_FLUSH1_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_FLUSH1_SHIFT (1) ++#define PDP_BIF_CONTROL1_MMU_FLUSH1_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_FLUSH1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_FLUSH2 ++*/ ++#define PDP_BIF_CONTROL1_MMU_FLUSH2_MASK (0x00000004) ++#define PDP_BIF_CONTROL1_MMU_FLUSH2_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_FLUSH2_SHIFT (2) ++#define PDP_BIF_CONTROL1_MMU_FLUSH2_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_FLUSH2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_FLUSH3 ++*/ ++#define PDP_BIF_CONTROL1_MMU_FLUSH3_MASK (0x00000008) ++#define PDP_BIF_CONTROL1_MMU_FLUSH3_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_FLUSH3_SHIFT (3) ++#define PDP_BIF_CONTROL1_MMU_FLUSH3_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_FLUSH3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_INVALDC0 ++*/ ++#define PDP_BIF_CONTROL1_MMU_INVALDC0_MASK (0x00000100) ++#define PDP_BIF_CONTROL1_MMU_INVALDC0_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_INVALDC0_SHIFT (8) ++#define PDP_BIF_CONTROL1_MMU_INVALDC0_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_INVALDC0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_INVALDC1 ++*/ ++#define PDP_BIF_CONTROL1_MMU_INVALDC1_MASK (0x00000200) ++#define PDP_BIF_CONTROL1_MMU_INVALDC1_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_INVALDC1_SHIFT (9) ++#define PDP_BIF_CONTROL1_MMU_INVALDC1_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_INVALDC1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_INVALDC2 ++*/ ++#define PDP_BIF_CONTROL1_MMU_INVALDC2_MASK (0x00000400) ++#define PDP_BIF_CONTROL1_MMU_INVALDC2_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_INVALDC2_SHIFT (10) ++#define PDP_BIF_CONTROL1_MMU_INVALDC2_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_INVALDC2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_INVALDC3 ++*/ ++#define PDP_BIF_CONTROL1_MMU_INVALDC3_MASK (0x00000800) ++#define PDP_BIF_CONTROL1_MMU_INVALDC3_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_INVALDC3_SHIFT (11) ++#define PDP_BIF_CONTROL1_MMU_INVALDC3_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_INVALDC3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_FAULT_CLEAR ++*/ ++#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_MASK (0x00010000) ++#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SHIFT (16) ++#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_FAULT_CLEAR_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, PROTOCOL_FAULT_CLEAR ++*/ ++#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_MASK (0x00100000) ++#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SHIFT (20) ++#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_LENGTH (1) ++#define PDP_BIF_CONTROL1_PROTOCOL_FAULT_CLEAR_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_PAUSE_SET ++*/ ++#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_MASK (0x01000000) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SHIFT (24) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_SET_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_PAUSE_CLEAR ++*/ ++#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_MASK (0x02000000) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SHIFT (25) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_PAUSE_CLEAR_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONTROL1, MMU_SOFT_RESET ++*/ ++#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_MASK (0x10000000) ++#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LSBMASK (0x00000001) ++#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SHIFT (28) ++#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_LENGTH (1) ++#define PDP_BIF_CONTROL1_MMU_SOFT_RESET_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_BANK_INDEX_OFFSET (0x0010) ++ ++/* PDP_BIF, BANK_INDEX, MMU_BANK_INDEX ++*/ ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_MASK (0xC0000000) ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LSBMASK (0x00000003) ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SHIFT (30) ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_LENGTH (2) ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIGNED_FIELD IMG_FALSE ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_NO_REPS (16) ++#define PDP_BIF_BANK_INDEX_MMU_BANK_INDEX_SIZE (2) ++ ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_OFFSET (0x0018) ++ ++/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_PRIORITY_ENABLE ++*/ ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_MASK (0x00008000) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LSBMASK (0x00000001) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SHIFT (15) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_LENGTH (1) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIGNED_FIELD IMG_FALSE ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_NO_REPS (16) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_PRIORITY_ENABLE_SIZE (1) ++ ++/* PDP_BIF, REQUEST_PRIORITY_ENABLE, CMD_MMU_PRIORITY_ENABLE ++*/ ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_MASK (0x00010000) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LSBMASK (0x00000001) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SHIFT (16) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_LENGTH (1) ++#define PDP_BIF_REQUEST_PRIORITY_ENABLE_CMD_MMU_PRIORITY_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_OFFSET (0x001C) ++ ++/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, LIMITED_WORDS ++*/ ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_MASK (0x000003FF) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LSBMASK (0x000003FF) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SHIFT (0) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_LENGTH (10) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_LIMITED_WORDS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, REQUEST_LIMITED_THROUGHPUT, REQUEST_GAP ++*/ ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_MASK (0x0FFF0000) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LSBMASK (0x00000FFF) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SHIFT (16) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_LENGTH (12) ++#define PDP_BIF_REQUEST_LIMITED_THROUGHPUT_REQUEST_GAP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_ADDRESS_CONTROL_OFFSET (0x0070) ++ ++/* PDP_BIF, ADDRESS_CONTROL, MMU_BYPASS ++*/ ++#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_MASK (0x00000001) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LSBMASK (0x00000001) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SHIFT (0) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_LENGTH (1) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_BYPASS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, ADDRESS_CONTROL, MMU_ENABLE_EXT_ADDRESSING ++*/ ++#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_MASK (0x00000010) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LSBMASK (0x00000001) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SHIFT (4) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_LENGTH (1) ++#define PDP_BIF_ADDRESS_CONTROL_MMU_ENABLE_EXT_ADDRESSING_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, ADDRESS_CONTROL, UPPER_ADDRESS_FIXED ++*/ ++#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_MASK (0x00FF0000) ++#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LSBMASK (0x000000FF) ++#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SHIFT (16) ++#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_LENGTH (8) ++#define PDP_BIF_ADDRESS_CONTROL_UPPER_ADDRESS_FIXED_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_CONFIG0_OFFSET (0x0080) ++ ++/* PDP_BIF, CONFIG0, NUM_REQUESTORS ++*/ ++#define PDP_BIF_CONFIG0_NUM_REQUESTORS_MASK (0x0000000F) ++#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LSBMASK (0x0000000F) ++#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SHIFT (0) ++#define PDP_BIF_CONFIG0_NUM_REQUESTORS_LENGTH (4) ++#define PDP_BIF_CONFIG0_NUM_REQUESTORS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, EXTENDED_ADDR_RANGE ++*/ ++#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_MASK (0x000000F0) ++#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LSBMASK (0x0000000F) ++#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SHIFT (4) ++#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_LENGTH (4) ++#define PDP_BIF_CONFIG0_EXTENDED_ADDR_RANGE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, GROUP_OVERRIDE_SIZE ++*/ ++#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_MASK (0x00000700) ++#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LSBMASK (0x00000007) ++#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SHIFT (8) ++#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_LENGTH (3) ++#define PDP_BIF_CONFIG0_GROUP_OVERRIDE_SIZE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, ADDR_COHERENCY_SUPPORTED ++*/ ++#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_MASK (0x00001000) ++#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SHIFT (12) ++#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_LENGTH (1) ++#define PDP_BIF_CONFIG0_ADDR_COHERENCY_SUPPORTED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, MMU_SUPPORTED ++*/ ++#define PDP_BIF_CONFIG0_MMU_SUPPORTED_MASK (0x00002000) ++#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SHIFT (13) ++#define PDP_BIF_CONFIG0_MMU_SUPPORTED_LENGTH (1) ++#define PDP_BIF_CONFIG0_MMU_SUPPORTED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, TILE_ADDR_GRANULARITY ++*/ ++#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_MASK (0x001F0000) ++#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LSBMASK (0x0000001F) ++#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SHIFT (16) ++#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_LENGTH (5) ++#define PDP_BIF_CONFIG0_TILE_ADDR_GRANULARITY_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, NO_READ_REORDER ++*/ ++#define PDP_BIF_CONFIG0_NO_READ_REORDER_MASK (0x00200000) ++#define PDP_BIF_CONFIG0_NO_READ_REORDER_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG0_NO_READ_REORDER_SHIFT (21) ++#define PDP_BIF_CONFIG0_NO_READ_REORDER_LENGTH (1) ++#define PDP_BIF_CONFIG0_NO_READ_REORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG0, TAGS_SUPPORTED ++*/ ++#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_MASK (0xFFC00000) ++#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LSBMASK (0x000003FF) ++#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SHIFT (22) ++#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_LENGTH (10) ++#define PDP_BIF_CONFIG0_TAGS_SUPPORTED_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_CONFIG1_OFFSET (0x0084) ++ ++/* PDP_BIF, CONFIG1, PAGE_SIZE ++*/ ++#define PDP_BIF_CONFIG1_PAGE_SIZE_MASK (0x0000000F) ++#define PDP_BIF_CONFIG1_PAGE_SIZE_LSBMASK (0x0000000F) ++#define PDP_BIF_CONFIG1_PAGE_SIZE_SHIFT (0) ++#define PDP_BIF_CONFIG1_PAGE_SIZE_LENGTH (4) ++#define PDP_BIF_CONFIG1_PAGE_SIZE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG1, PAGE_CACHE_ENTRIES ++*/ ++#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_MASK (0x0000FF00) ++#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LSBMASK (0x000000FF) ++#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SHIFT (8) ++#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_LENGTH (8) ++#define PDP_BIF_CONFIG1_PAGE_CACHE_ENTRIES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG1, DIR_CACHE_ENTRIES ++*/ ++#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_MASK (0x001F0000) ++#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LSBMASK (0x0000001F) ++#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SHIFT (16) ++#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_LENGTH (5) ++#define PDP_BIF_CONFIG1_DIR_CACHE_ENTRIES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG1, BANDWIDTH_COUNT_SUPPORTED ++*/ ++#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_MASK (0x01000000) ++#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SHIFT (24) ++#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_LENGTH (1) ++#define PDP_BIF_CONFIG1_BANDWIDTH_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG1, STALL_COUNT_SUPPORTED ++*/ ++#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_MASK (0x02000000) ++#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SHIFT (25) ++#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_LENGTH (1) ++#define PDP_BIF_CONFIG1_STALL_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG1, LATENCY_COUNT_SUPPORTED ++*/ ++#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_MASK (0x04000000) ++#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SHIFT (26) ++#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_LENGTH (1) ++#define PDP_BIF_CONFIG1_LATENCY_COUNT_SUPPORTED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, CONFIG1, SUPPORT_READ_INTERLEAVE ++*/ ++#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_MASK (0x10000000) ++#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LSBMASK (0x00000001) ++#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SHIFT (28) ++#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_LENGTH (1) ++#define PDP_BIF_CONFIG1_SUPPORT_READ_INTERLEAVE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_STATUS0_OFFSET (0x0088) ++ ++/* PDP_BIF, STATUS0, MMU_PF_N_RW ++*/ ++#define PDP_BIF_STATUS0_MMU_PF_N_RW_MASK (0x00000001) ++#define PDP_BIF_STATUS0_MMU_PF_N_RW_LSBMASK (0x00000001) ++#define PDP_BIF_STATUS0_MMU_PF_N_RW_SHIFT (0) ++#define PDP_BIF_STATUS0_MMU_PF_N_RW_LENGTH (1) ++#define PDP_BIF_STATUS0_MMU_PF_N_RW_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, STATUS0, MMU_FAULT_ADDR ++*/ ++#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_MASK (0xFFFFF000) ++#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LSBMASK (0x000FFFFF) ++#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SHIFT (12) ++#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_LENGTH (20) ++#define PDP_BIF_STATUS0_MMU_FAULT_ADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_STATUS1_OFFSET (0x008C) ++ ++/* PDP_BIF, STATUS1, MMU_FAULT_REQ_STAT ++*/ ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_MASK (0x0000FFFF) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LSBMASK (0x0000FFFF) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SHIFT (0) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_LENGTH (16) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_STAT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, STATUS1, MMU_FAULT_REQ_ID ++*/ ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_MASK (0x000F0000) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LSBMASK (0x0000000F) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SHIFT (16) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_LENGTH (4) ++#define PDP_BIF_STATUS1_MMU_FAULT_REQ_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, STATUS1, MMU_FAULT_INDEX ++*/ ++#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_MASK (0x03000000) ++#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LSBMASK (0x00000003) ++#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SHIFT (24) ++#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_LENGTH (2) ++#define PDP_BIF_STATUS1_MMU_FAULT_INDEX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, STATUS1, MMU_FAULT_RNW ++*/ ++#define PDP_BIF_STATUS1_MMU_FAULT_RNW_MASK (0x10000000) ++#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LSBMASK (0x00000001) ++#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SHIFT (28) ++#define PDP_BIF_STATUS1_MMU_FAULT_RNW_LENGTH (1) ++#define PDP_BIF_STATUS1_MMU_FAULT_RNW_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_MEM_REQ_OFFSET (0x0090) ++ ++/* PDP_BIF, MEM_REQ, TAG_OUTSTANDING ++*/ ++#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_MASK (0x000003FF) ++#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LSBMASK (0x000003FF) ++#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SHIFT (0) ++#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_LENGTH (10) ++#define PDP_BIF_MEM_REQ_TAG_OUTSTANDING_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, MEM_REQ, EXT_WRRESP_FAULT ++*/ ++#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_MASK (0x00001000) ++#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LSBMASK (0x00000001) ++#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SHIFT (12) ++#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_LENGTH (1) ++#define PDP_BIF_MEM_REQ_EXT_WRRESP_FAULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, MEM_REQ, EXT_RDRESP_FAULT ++*/ ++#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_MASK (0x00002000) ++#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LSBMASK (0x00000001) ++#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SHIFT (13) ++#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_LENGTH (1) ++#define PDP_BIF_MEM_REQ_EXT_RDRESP_FAULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, MEM_REQ, EXT_READ_BURST_FAULT ++*/ ++#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_MASK (0x00004000) ++#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LSBMASK (0x00000001) ++#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SHIFT (14) ++#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_LENGTH (1) ++#define PDP_BIF_MEM_REQ_EXT_READ_BURST_FAULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, MEM_REQ, INT_PROTOCOL_FAULT ++*/ ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_MASK (0x80000000) ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LSBMASK (0x00000001) ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SHIFT (31) ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_LENGTH (1) ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIGNED_FIELD IMG_FALSE ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_NO_REPS (16) ++#define PDP_BIF_MEM_REQ_INT_PROTOCOL_FAULT_SIZE (1) ++ ++#define PDP_BIF_MEM_EXT_OUTSTANDING_OFFSET (0x0094) ++ ++/* PDP_BIF, MEM_EXT_OUTSTANDING, READ_WORDS_OUTSTANDING ++*/ ++#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_MASK (0x0000FFFF) ++#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LSBMASK (0x0000FFFF) ++#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SHIFT (0) ++#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_LENGTH (16) ++#define PDP_BIF_MEM_EXT_OUTSTANDING_READ_WORDS_OUTSTANDING_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_FAULT_SELECT_OFFSET (0x00A0) ++ ++/* PDP_BIF, FAULT_SELECT, MMU_FAULT_SELECT ++*/ ++#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_MASK (0x0000000F) ++#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LSBMASK (0x0000000F) ++#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SHIFT (0) ++#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_LENGTH (4) ++#define PDP_BIF_FAULT_SELECT_MMU_FAULT_SELECT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_PROTOCOL_FAULT_OFFSET (0x00A8) ++ ++/* PDP_BIF, PROTOCOL_FAULT, FAULT_PAGE_BREAK ++*/ ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_MASK (0x00000001) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LSBMASK (0x00000001) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SHIFT (0) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_LENGTH (1) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_PAGE_BREAK_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, PROTOCOL_FAULT, FAULT_WRITE ++*/ ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_MASK (0x00000010) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LSBMASK (0x00000001) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SHIFT (4) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_LENGTH (1) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_WRITE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, PROTOCOL_FAULT, FAULT_READ ++*/ ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_MASK (0x00000020) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LSBMASK (0x00000001) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SHIFT (5) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_LENGTH (1) ++#define PDP_BIF_PROTOCOL_FAULT_FAULT_READ_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_TOTAL_READ_REQ_OFFSET (0x0100) ++ ++/* PDP_BIF, TOTAL_READ_REQ, TOTAL_READ_REQ ++*/ ++#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_MASK (0xFFFFFFFF) ++#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SHIFT (0) ++#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_LENGTH (32) ++#define PDP_BIF_TOTAL_READ_REQ_TOTAL_READ_REQ_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_TOTAL_WRITE_REQ_OFFSET (0x0104) ++ ++/* PDP_BIF, TOTAL_WRITE_REQ, TOTAL_WRITE_REQ ++*/ ++#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_MASK (0xFFFFFFFF) ++#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SHIFT (0) ++#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_LENGTH (32) ++#define PDP_BIF_TOTAL_WRITE_REQ_TOTAL_WRITE_REQ_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_READS_LESS_64_REQ_OFFSET (0x0108) ++ ++/* PDP_BIF, READS_LESS_64_REQ, READS_LESS_64_REQ ++*/ ++#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_MASK (0xFFFFFFFF) ++#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SHIFT (0) ++#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_LENGTH (32) ++#define PDP_BIF_READS_LESS_64_REQ_READS_LESS_64_REQ_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_WRITES_LESS_64_REQ_OFFSET (0x010C) ++ ++/* PDP_BIF, WRITES_LESS_64_REQ, WRITES_LESS_64_REQ ++*/ ++#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_MASK (0xFFFFFFFF) ++#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SHIFT (0) ++#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_LENGTH (32) ++#define PDP_BIF_WRITES_LESS_64_REQ_WRITES_LESS_64_REQ_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_EXT_CMD_STALL_OFFSET (0x0120) ++ ++/* PDP_BIF, EXT_CMD_STALL, EXT_CMD_STALL ++*/ ++#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_MASK (0xFFFFFFFF) ++#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SHIFT (0) ++#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_LENGTH (32) ++#define PDP_BIF_EXT_CMD_STALL_EXT_CMD_STALL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_WRITE_REQ_STALL_OFFSET (0x0124) ++ ++/* PDP_BIF, WRITE_REQ_STALL, WRITE_REQ_STALL ++*/ ++#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_MASK (0xFFFFFFFF) ++#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SHIFT (0) ++#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_LENGTH (32) ++#define PDP_BIF_WRITE_REQ_STALL_WRITE_REQ_STALL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_MISS_STALL_OFFSET (0x0128) ++ ++/* PDP_BIF, MISS_STALL, MMU_MISS_STALL ++*/ ++#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_MASK (0xFFFFFFFF) ++#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SHIFT (0) ++#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_LENGTH (32) ++#define PDP_BIF_MISS_STALL_MMU_MISS_STALL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_ADDRESS_STALL_OFFSET (0x012C) ++ ++/* PDP_BIF, ADDRESS_STALL, ADDRESS_STALL ++*/ ++#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_MASK (0xFFFFFFFF) ++#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SHIFT (0) ++#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_LENGTH (32) ++#define PDP_BIF_ADDRESS_STALL_ADDRESS_STALL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_TAG_STALL_OFFSET (0x0130) ++ ++/* PDP_BIF, TAG_STALL, TAG_STALL ++*/ ++#define PDP_BIF_TAG_STALL_TAG_STALL_MASK (0xFFFFFFFF) ++#define PDP_BIF_TAG_STALL_TAG_STALL_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_TAG_STALL_TAG_STALL_SHIFT (0) ++#define PDP_BIF_TAG_STALL_TAG_STALL_LENGTH (32) ++#define PDP_BIF_TAG_STALL_TAG_STALL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_PEAK_READ_OUTSTANDING_OFFSET (0x0140) ++ ++/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_TAG_OUTSTANDING ++*/ ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_MASK (0x000003FF) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LSBMASK (0x000003FF) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SHIFT (0) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_LENGTH (10) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_TAG_OUTSTANDING_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, PEAK_READ_OUTSTANDING, PEAK_READ_LATENCY ++*/ ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_MASK (0xFFFF0000) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LSBMASK (0x0000FFFF) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SHIFT (16) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_LENGTH (16) ++#define PDP_BIF_PEAK_READ_OUTSTANDING_PEAK_READ_LATENCY_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_AVERAGE_READ_LATENCY_OFFSET (0x0144) ++ ++/* PDP_BIF, AVERAGE_READ_LATENCY, AVERAGE_READ_LATENCY ++*/ ++#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_MASK (0xFFFFFFFF) ++#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LSBMASK (0xFFFFFFFF) ++#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SHIFT (0) ++#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_LENGTH (32) ++#define PDP_BIF_AVERAGE_READ_LATENCY_AVERAGE_READ_LATENCY_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_STATISTICS_CONTROL_OFFSET (0x0160) ++ ++/* PDP_BIF, STATISTICS_CONTROL, BANDWIDTH_STATS_INIT ++*/ ++#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_MASK (0x00000001) ++#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LSBMASK (0x00000001) ++#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SHIFT (0) ++#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_LENGTH (1) ++#define PDP_BIF_STATISTICS_CONTROL_BANDWIDTH_STATS_INIT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, STATISTICS_CONTROL, STALL_STATS_INIT ++*/ ++#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_MASK (0x00000002) ++#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LSBMASK (0x00000001) ++#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SHIFT (1) ++#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_LENGTH (1) ++#define PDP_BIF_STATISTICS_CONTROL_STALL_STATS_INIT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, STATISTICS_CONTROL, LATENCY_STATS_INIT ++*/ ++#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_MASK (0x00000004) ++#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LSBMASK (0x00000001) ++#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SHIFT (2) ++#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_LENGTH (1) ++#define PDP_BIF_STATISTICS_CONTROL_LATENCY_STATS_INIT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BIF_VERSION_OFFSET (0x01D0) ++ ++/* PDP_BIF, VERSION, MMU_MAJOR_REV ++*/ ++#define PDP_BIF_VERSION_MMU_MAJOR_REV_MASK (0x00FF0000) ++#define PDP_BIF_VERSION_MMU_MAJOR_REV_LSBMASK (0x000000FF) ++#define PDP_BIF_VERSION_MMU_MAJOR_REV_SHIFT (16) ++#define PDP_BIF_VERSION_MMU_MAJOR_REV_LENGTH (8) ++#define PDP_BIF_VERSION_MMU_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, VERSION, MMU_MINOR_REV ++*/ ++#define PDP_BIF_VERSION_MMU_MINOR_REV_MASK (0x0000FF00) ++#define PDP_BIF_VERSION_MMU_MINOR_REV_LSBMASK (0x000000FF) ++#define PDP_BIF_VERSION_MMU_MINOR_REV_SHIFT (8) ++#define PDP_BIF_VERSION_MMU_MINOR_REV_LENGTH (8) ++#define PDP_BIF_VERSION_MMU_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP_BIF, VERSION, MMU_MAINT_REV ++*/ ++#define PDP_BIF_VERSION_MMU_MAINT_REV_MASK (0x000000FF) ++#define PDP_BIF_VERSION_MMU_MAINT_REV_LSBMASK (0x000000FF) ++#define PDP_BIF_VERSION_MMU_MAINT_REV_SHIFT (0) ++#define PDP_BIF_VERSION_MMU_MAINT_REV_LENGTH (8) ++#define PDP_BIF_VERSION_MMU_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#endif /* _PDP2_MMU_REGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pdp2_regs.h b/drivers/gpu/drm/img-rogue/pdp2_regs.h +new file mode 100644 +index 000000000000..bf85386b1df5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdp2_regs.h +@@ -0,0 +1,8565 @@ ++/*************************************************************************/ /*! ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++ ++#ifndef _PDP2_REGS_H ++#define _PDP2_REGS_H ++ ++/* ++ * Bitfield operations ++ * For each argument field, the following preprocessor macros must exist ++ * field##_MASK - the number of bits in the bit field ++ * field##_SHIFT - offset from the first bit ++ */ ++#define PLACE_FIELD(field, val) \ ++ (((u32)(val) << (field##_SHIFT)) & (field##_MASK)) ++ ++#define ADJ_FIELD(x, field, val) \ ++ (((x) & ~(field##_MASK)) \ ++ | PLACE_FIELD(field, val)) ++ ++#define SET_FIELD(x, field, val) \ ++ (x) = ADJ_FIELD(x, field, val) ++ ++#define GET_FIELD(x, field) \ ++ (((x) & (field##_MASK)) >> (field##_SHIFT)) ++ ++/* Keeps most significant bits */ ++#define MOVE_FIELD(x, o1, l1, o2, l2) \ ++ (((x) >> ((o1) + (l1) - (l2))) << (o2)) ++ ++#define MAX_FIELD_VALUE(field) \ ++ ((field##_MASK) >> (field##_SHIFT)) ++ ++/* Hardware register definitions */ ++ ++#define PDP_GRPH1SURF_OFFSET (0x0000) ++ ++/* PDP, GRPH1SURF, GRPH1PIXFMT ++*/ ++#define PDP_GRPH1SURF_GRPH1PIXFMT_MASK (0xF8000000) ++#define PDP_GRPH1SURF_GRPH1PIXFMT_LSBMASK (0x0000001F) ++#define PDP_GRPH1SURF_GRPH1PIXFMT_SHIFT (27) ++#define PDP_GRPH1SURF_GRPH1PIXFMT_LENGTH (5) ++#define PDP_GRPH1SURF_GRPH1PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USEGAMMA ++*/ ++#define PDP_GRPH1SURF_GRPH1USEGAMMA_MASK (0x04000000) ++#define PDP_GRPH1SURF_GRPH1USEGAMMA_LSBMASK (0x00000001) ++#define PDP_GRPH1SURF_GRPH1USEGAMMA_SHIFT (26) ++#define PDP_GRPH1SURF_GRPH1USEGAMMA_LENGTH (1) ++#define PDP_GRPH1SURF_GRPH1USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USECSC ++*/ ++#define PDP_GRPH1SURF_GRPH1USECSC_MASK (0x02000000) ++#define PDP_GRPH1SURF_GRPH1USECSC_LSBMASK (0x00000001) ++#define PDP_GRPH1SURF_GRPH1USECSC_SHIFT (25) ++#define PDP_GRPH1SURF_GRPH1USECSC_LENGTH (1) ++#define PDP_GRPH1SURF_GRPH1USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1LUTRWCHOICE ++*/ ++#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_MASK (0x01000000) ++#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LSBMASK (0x00000001) ++#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SHIFT (24) ++#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_LENGTH (1) ++#define PDP_GRPH1SURF_GRPH1LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SURF, GRPH1USELUT ++*/ ++#define PDP_GRPH1SURF_GRPH1USELUT_MASK (0x00800000) ++#define PDP_GRPH1SURF_GRPH1USELUT_LSBMASK (0x00000001) ++#define PDP_GRPH1SURF_GRPH1USELUT_SHIFT (23) ++#define PDP_GRPH1SURF_GRPH1USELUT_LENGTH (1) ++#define PDP_GRPH1SURF_GRPH1USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2SURF_OFFSET (0x0004) ++ ++/* PDP, GRPH2SURF, GRPH2PIXFMT ++*/ ++#define PDP_GRPH2SURF_GRPH2PIXFMT_MASK (0xF8000000) ++#define PDP_GRPH2SURF_GRPH2PIXFMT_LSBMASK (0x0000001F) ++#define PDP_GRPH2SURF_GRPH2PIXFMT_SHIFT (27) ++#define PDP_GRPH2SURF_GRPH2PIXFMT_LENGTH (5) ++#define PDP_GRPH2SURF_GRPH2PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USEGAMMA ++*/ ++#define PDP_GRPH2SURF_GRPH2USEGAMMA_MASK (0x04000000) ++#define PDP_GRPH2SURF_GRPH2USEGAMMA_LSBMASK (0x00000001) ++#define PDP_GRPH2SURF_GRPH2USEGAMMA_SHIFT (26) ++#define PDP_GRPH2SURF_GRPH2USEGAMMA_LENGTH (1) ++#define PDP_GRPH2SURF_GRPH2USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USECSC ++*/ ++#define PDP_GRPH2SURF_GRPH2USECSC_MASK (0x02000000) ++#define PDP_GRPH2SURF_GRPH2USECSC_LSBMASK (0x00000001) ++#define PDP_GRPH2SURF_GRPH2USECSC_SHIFT (25) ++#define PDP_GRPH2SURF_GRPH2USECSC_LENGTH (1) ++#define PDP_GRPH2SURF_GRPH2USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2LUTRWCHOICE ++*/ ++#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_MASK (0x01000000) ++#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LSBMASK (0x00000001) ++#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SHIFT (24) ++#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_LENGTH (1) ++#define PDP_GRPH2SURF_GRPH2LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SURF, GRPH2USELUT ++*/ ++#define PDP_GRPH2SURF_GRPH2USELUT_MASK (0x00800000) ++#define PDP_GRPH2SURF_GRPH2USELUT_LSBMASK (0x00000001) ++#define PDP_GRPH2SURF_GRPH2USELUT_SHIFT (23) ++#define PDP_GRPH2SURF_GRPH2USELUT_LENGTH (1) ++#define PDP_GRPH2SURF_GRPH2USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3SURF_OFFSET (0x0008) ++ ++/* PDP, GRPH3SURF, GRPH3PIXFMT ++*/ ++#define PDP_GRPH3SURF_GRPH3PIXFMT_MASK (0xF8000000) ++#define PDP_GRPH3SURF_GRPH3PIXFMT_LSBMASK (0x0000001F) ++#define PDP_GRPH3SURF_GRPH3PIXFMT_SHIFT (27) ++#define PDP_GRPH3SURF_GRPH3PIXFMT_LENGTH (5) ++#define PDP_GRPH3SURF_GRPH3PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USEGAMMA ++*/ ++#define PDP_GRPH3SURF_GRPH3USEGAMMA_MASK (0x04000000) ++#define PDP_GRPH3SURF_GRPH3USEGAMMA_LSBMASK (0x00000001) ++#define PDP_GRPH3SURF_GRPH3USEGAMMA_SHIFT (26) ++#define PDP_GRPH3SURF_GRPH3USEGAMMA_LENGTH (1) ++#define PDP_GRPH3SURF_GRPH3USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USECSC ++*/ ++#define PDP_GRPH3SURF_GRPH3USECSC_MASK (0x02000000) ++#define PDP_GRPH3SURF_GRPH3USECSC_LSBMASK (0x00000001) ++#define PDP_GRPH3SURF_GRPH3USECSC_SHIFT (25) ++#define PDP_GRPH3SURF_GRPH3USECSC_LENGTH (1) ++#define PDP_GRPH3SURF_GRPH3USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3LUTRWCHOICE ++*/ ++#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_MASK (0x01000000) ++#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LSBMASK (0x00000001) ++#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SHIFT (24) ++#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_LENGTH (1) ++#define PDP_GRPH3SURF_GRPH3LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SURF, GRPH3USELUT ++*/ ++#define PDP_GRPH3SURF_GRPH3USELUT_MASK (0x00800000) ++#define PDP_GRPH3SURF_GRPH3USELUT_LSBMASK (0x00000001) ++#define PDP_GRPH3SURF_GRPH3USELUT_SHIFT (23) ++#define PDP_GRPH3SURF_GRPH3USELUT_LENGTH (1) ++#define PDP_GRPH3SURF_GRPH3USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4SURF_OFFSET (0x000C) ++ ++/* PDP, GRPH4SURF, GRPH4PIXFMT ++*/ ++#define PDP_GRPH4SURF_GRPH4PIXFMT_MASK (0xF8000000) ++#define PDP_GRPH4SURF_GRPH4PIXFMT_LSBMASK (0x0000001F) ++#define PDP_GRPH4SURF_GRPH4PIXFMT_SHIFT (27) ++#define PDP_GRPH4SURF_GRPH4PIXFMT_LENGTH (5) ++#define PDP_GRPH4SURF_GRPH4PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USEGAMMA ++*/ ++#define PDP_GRPH4SURF_GRPH4USEGAMMA_MASK (0x04000000) ++#define PDP_GRPH4SURF_GRPH4USEGAMMA_LSBMASK (0x00000001) ++#define PDP_GRPH4SURF_GRPH4USEGAMMA_SHIFT (26) ++#define PDP_GRPH4SURF_GRPH4USEGAMMA_LENGTH (1) ++#define PDP_GRPH4SURF_GRPH4USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USECSC ++*/ ++#define PDP_GRPH4SURF_GRPH4USECSC_MASK (0x02000000) ++#define PDP_GRPH4SURF_GRPH4USECSC_LSBMASK (0x00000001) ++#define PDP_GRPH4SURF_GRPH4USECSC_SHIFT (25) ++#define PDP_GRPH4SURF_GRPH4USECSC_LENGTH (1) ++#define PDP_GRPH4SURF_GRPH4USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4LUTRWCHOICE ++*/ ++#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_MASK (0x01000000) ++#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LSBMASK (0x00000001) ++#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SHIFT (24) ++#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_LENGTH (1) ++#define PDP_GRPH4SURF_GRPH4LUTRWCHOICE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SURF, GRPH4USELUT ++*/ ++#define PDP_GRPH4SURF_GRPH4USELUT_MASK (0x00800000) ++#define PDP_GRPH4SURF_GRPH4USELUT_LSBMASK (0x00000001) ++#define PDP_GRPH4SURF_GRPH4USELUT_SHIFT (23) ++#define PDP_GRPH4SURF_GRPH4USELUT_LENGTH (1) ++#define PDP_GRPH4SURF_GRPH4USELUT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1SURF_OFFSET (0x0010) ++ ++/* PDP, VID1SURF, VID1PIXFMT ++*/ ++#define PDP_VID1SURF_VID1PIXFMT_MASK (0xF8000000) ++#define PDP_VID1SURF_VID1PIXFMT_LSBMASK (0x0000001F) ++#define PDP_VID1SURF_VID1PIXFMT_SHIFT (27) ++#define PDP_VID1SURF_VID1PIXFMT_LENGTH (5) ++#define PDP_VID1SURF_VID1PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEGAMMA ++*/ ++#define PDP_VID1SURF_VID1USEGAMMA_MASK (0x04000000) ++#define PDP_VID1SURF_VID1USEGAMMA_LSBMASK (0x00000001) ++#define PDP_VID1SURF_VID1USEGAMMA_SHIFT (26) ++#define PDP_VID1SURF_VID1USEGAMMA_LENGTH (1) ++#define PDP_VID1SURF_VID1USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USECSC ++*/ ++#define PDP_VID1SURF_VID1USECSC_MASK (0x02000000) ++#define PDP_VID1SURF_VID1USECSC_LSBMASK (0x00000001) ++#define PDP_VID1SURF_VID1USECSC_SHIFT (25) ++#define PDP_VID1SURF_VID1USECSC_LENGTH (1) ++#define PDP_VID1SURF_VID1USECSC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEI2P ++*/ ++#define PDP_VID1SURF_VID1USEI2P_MASK (0x01000000) ++#define PDP_VID1SURF_VID1USEI2P_LSBMASK (0x00000001) ++#define PDP_VID1SURF_VID1USEI2P_SHIFT (24) ++#define PDP_VID1SURF_VID1USEI2P_LENGTH (1) ++#define PDP_VID1SURF_VID1USEI2P_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1COSITED ++*/ ++#define PDP_VID1SURF_VID1COSITED_MASK (0x00800000) ++#define PDP_VID1SURF_VID1COSITED_LSBMASK (0x00000001) ++#define PDP_VID1SURF_VID1COSITED_SHIFT (23) ++#define PDP_VID1SURF_VID1COSITED_LENGTH (1) ++#define PDP_VID1SURF_VID1COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEHQCD ++*/ ++#define PDP_VID1SURF_VID1USEHQCD_MASK (0x00400000) ++#define PDP_VID1SURF_VID1USEHQCD_LSBMASK (0x00000001) ++#define PDP_VID1SURF_VID1USEHQCD_SHIFT (22) ++#define PDP_VID1SURF_VID1USEHQCD_LENGTH (1) ++#define PDP_VID1SURF_VID1USEHQCD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SURF, VID1USEINSTREAM ++*/ ++#define PDP_VID1SURF_VID1USEINSTREAM_MASK (0x00200000) ++#define PDP_VID1SURF_VID1USEINSTREAM_LSBMASK (0x00000001) ++#define PDP_VID1SURF_VID1USEINSTREAM_SHIFT (21) ++#define PDP_VID1SURF_VID1USEINSTREAM_LENGTH (1) ++#define PDP_VID1SURF_VID1USEINSTREAM_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2SURF_OFFSET (0x0014) ++ ++/* PDP, VID2SURF, VID2PIXFMT ++*/ ++#define PDP_VID2SURF_VID2PIXFMT_MASK (0xF8000000) ++#define PDP_VID2SURF_VID2PIXFMT_LSBMASK (0x0000001F) ++#define PDP_VID2SURF_VID2PIXFMT_SHIFT (27) ++#define PDP_VID2SURF_VID2PIXFMT_LENGTH (5) ++#define PDP_VID2SURF_VID2PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2COSITED ++*/ ++#define PDP_VID2SURF_VID2COSITED_MASK (0x00800000) ++#define PDP_VID2SURF_VID2COSITED_LSBMASK (0x00000001) ++#define PDP_VID2SURF_VID2COSITED_SHIFT (23) ++#define PDP_VID2SURF_VID2COSITED_LENGTH (1) ++#define PDP_VID2SURF_VID2COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2USEGAMMA ++*/ ++#define PDP_VID2SURF_VID2USEGAMMA_MASK (0x04000000) ++#define PDP_VID2SURF_VID2USEGAMMA_LSBMASK (0x00000001) ++#define PDP_VID2SURF_VID2USEGAMMA_SHIFT (26) ++#define PDP_VID2SURF_VID2USEGAMMA_LENGTH (1) ++#define PDP_VID2SURF_VID2USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SURF, VID2USECSC ++*/ ++#define PDP_VID2SURF_VID2USECSC_MASK (0x02000000) ++#define PDP_VID2SURF_VID2USECSC_LSBMASK (0x00000001) ++#define PDP_VID2SURF_VID2USECSC_SHIFT (25) ++#define PDP_VID2SURF_VID2USECSC_LENGTH (1) ++#define PDP_VID2SURF_VID2USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3SURF_OFFSET (0x0018) ++ ++/* PDP, VID3SURF, VID3PIXFMT ++*/ ++#define PDP_VID3SURF_VID3PIXFMT_MASK (0xF8000000) ++#define PDP_VID3SURF_VID3PIXFMT_LSBMASK (0x0000001F) ++#define PDP_VID3SURF_VID3PIXFMT_SHIFT (27) ++#define PDP_VID3SURF_VID3PIXFMT_LENGTH (5) ++#define PDP_VID3SURF_VID3PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3COSITED ++*/ ++#define PDP_VID3SURF_VID3COSITED_MASK (0x00800000) ++#define PDP_VID3SURF_VID3COSITED_LSBMASK (0x00000001) ++#define PDP_VID3SURF_VID3COSITED_SHIFT (23) ++#define PDP_VID3SURF_VID3COSITED_LENGTH (1) ++#define PDP_VID3SURF_VID3COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3USEGAMMA ++*/ ++#define PDP_VID3SURF_VID3USEGAMMA_MASK (0x04000000) ++#define PDP_VID3SURF_VID3USEGAMMA_LSBMASK (0x00000001) ++#define PDP_VID3SURF_VID3USEGAMMA_SHIFT (26) ++#define PDP_VID3SURF_VID3USEGAMMA_LENGTH (1) ++#define PDP_VID3SURF_VID3USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SURF, VID3USECSC ++*/ ++#define PDP_VID3SURF_VID3USECSC_MASK (0x02000000) ++#define PDP_VID3SURF_VID3USECSC_LSBMASK (0x00000001) ++#define PDP_VID3SURF_VID3USECSC_SHIFT (25) ++#define PDP_VID3SURF_VID3USECSC_LENGTH (1) ++#define PDP_VID3SURF_VID3USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4SURF_OFFSET (0x001C) ++ ++/* PDP, VID4SURF, VID4PIXFMT ++*/ ++#define PDP_VID4SURF_VID4PIXFMT_MASK (0xF8000000) ++#define PDP_VID4SURF_VID4PIXFMT_LSBMASK (0x0000001F) ++#define PDP_VID4SURF_VID4PIXFMT_SHIFT (27) ++#define PDP_VID4SURF_VID4PIXFMT_LENGTH (5) ++#define PDP_VID4SURF_VID4PIXFMT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4COSITED ++*/ ++#define PDP_VID4SURF_VID4COSITED_MASK (0x00800000) ++#define PDP_VID4SURF_VID4COSITED_LSBMASK (0x00000001) ++#define PDP_VID4SURF_VID4COSITED_SHIFT (23) ++#define PDP_VID4SURF_VID4COSITED_LENGTH (1) ++#define PDP_VID4SURF_VID4COSITED_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4USEGAMMA ++*/ ++#define PDP_VID4SURF_VID4USEGAMMA_MASK (0x04000000) ++#define PDP_VID4SURF_VID4USEGAMMA_LSBMASK (0x00000001) ++#define PDP_VID4SURF_VID4USEGAMMA_SHIFT (26) ++#define PDP_VID4SURF_VID4USEGAMMA_LENGTH (1) ++#define PDP_VID4SURF_VID4USEGAMMA_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SURF, VID4USECSC ++*/ ++#define PDP_VID4SURF_VID4USECSC_MASK (0x02000000) ++#define PDP_VID4SURF_VID4USECSC_LSBMASK (0x00000001) ++#define PDP_VID4SURF_VID4USECSC_SHIFT (25) ++#define PDP_VID4SURF_VID4USECSC_LENGTH (1) ++#define PDP_VID4SURF_VID4USECSC_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1CTRL_OFFSET (0x0020) ++ ++/* PDP, GRPH1CTRL, GRPH1STREN ++*/ ++#define PDP_GRPH1CTRL_GRPH1STREN_MASK (0x80000000) ++#define PDP_GRPH1CTRL_GRPH1STREN_LSBMASK (0x00000001) ++#define PDP_GRPH1CTRL_GRPH1STREN_SHIFT (31) ++#define PDP_GRPH1CTRL_GRPH1STREN_LENGTH (1) ++#define PDP_GRPH1CTRL_GRPH1STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1CKEYEN ++*/ ++#define PDP_GRPH1CTRL_GRPH1CKEYEN_MASK (0x40000000) ++#define PDP_GRPH1CTRL_GRPH1CKEYEN_LSBMASK (0x00000001) ++#define PDP_GRPH1CTRL_GRPH1CKEYEN_SHIFT (30) ++#define PDP_GRPH1CTRL_GRPH1CKEYEN_LENGTH (1) ++#define PDP_GRPH1CTRL_GRPH1CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1CKEYSRC ++*/ ++#define PDP_GRPH1CTRL_GRPH1CKEYSRC_MASK (0x20000000) ++#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LSBMASK (0x00000001) ++#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SHIFT (29) ++#define PDP_GRPH1CTRL_GRPH1CKEYSRC_LENGTH (1) ++#define PDP_GRPH1CTRL_GRPH1CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1BLEND ++*/ ++#define PDP_GRPH1CTRL_GRPH1BLEND_MASK (0x18000000) ++#define PDP_GRPH1CTRL_GRPH1BLEND_LSBMASK (0x00000003) ++#define PDP_GRPH1CTRL_GRPH1BLEND_SHIFT (27) ++#define PDP_GRPH1CTRL_GRPH1BLEND_LENGTH (2) ++#define PDP_GRPH1CTRL_GRPH1BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1BLENDPOS ++*/ ++#define PDP_GRPH1CTRL_GRPH1BLENDPOS_MASK (0x07000000) ++#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LSBMASK (0x00000007) ++#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SHIFT (24) ++#define PDP_GRPH1CTRL_GRPH1BLENDPOS_LENGTH (3) ++#define PDP_GRPH1CTRL_GRPH1BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CTRL, GRPH1DITHEREN ++*/ ++#define PDP_GRPH1CTRL_GRPH1DITHEREN_MASK (0x00800000) ++#define PDP_GRPH1CTRL_GRPH1DITHEREN_LSBMASK (0x00000001) ++#define PDP_GRPH1CTRL_GRPH1DITHEREN_SHIFT (23) ++#define PDP_GRPH1CTRL_GRPH1DITHEREN_LENGTH (1) ++#define PDP_GRPH1CTRL_GRPH1DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2CTRL_OFFSET (0x0024) ++ ++/* PDP, GRPH2CTRL, GRPH2STREN ++*/ ++#define PDP_GRPH2CTRL_GRPH2STREN_MASK (0x80000000) ++#define PDP_GRPH2CTRL_GRPH2STREN_LSBMASK (0x00000001) ++#define PDP_GRPH2CTRL_GRPH2STREN_SHIFT (31) ++#define PDP_GRPH2CTRL_GRPH2STREN_LENGTH (1) ++#define PDP_GRPH2CTRL_GRPH2STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2CKEYEN ++*/ ++#define PDP_GRPH2CTRL_GRPH2CKEYEN_MASK (0x40000000) ++#define PDP_GRPH2CTRL_GRPH2CKEYEN_LSBMASK (0x00000001) ++#define PDP_GRPH2CTRL_GRPH2CKEYEN_SHIFT (30) ++#define PDP_GRPH2CTRL_GRPH2CKEYEN_LENGTH (1) ++#define PDP_GRPH2CTRL_GRPH2CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2CKEYSRC ++*/ ++#define PDP_GRPH2CTRL_GRPH2CKEYSRC_MASK (0x20000000) ++#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LSBMASK (0x00000001) ++#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SHIFT (29) ++#define PDP_GRPH2CTRL_GRPH2CKEYSRC_LENGTH (1) ++#define PDP_GRPH2CTRL_GRPH2CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2BLEND ++*/ ++#define PDP_GRPH2CTRL_GRPH2BLEND_MASK (0x18000000) ++#define PDP_GRPH2CTRL_GRPH2BLEND_LSBMASK (0x00000003) ++#define PDP_GRPH2CTRL_GRPH2BLEND_SHIFT (27) ++#define PDP_GRPH2CTRL_GRPH2BLEND_LENGTH (2) ++#define PDP_GRPH2CTRL_GRPH2BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2BLENDPOS ++*/ ++#define PDP_GRPH2CTRL_GRPH2BLENDPOS_MASK (0x07000000) ++#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LSBMASK (0x00000007) ++#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SHIFT (24) ++#define PDP_GRPH2CTRL_GRPH2BLENDPOS_LENGTH (3) ++#define PDP_GRPH2CTRL_GRPH2BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CTRL, GRPH2DITHEREN ++*/ ++#define PDP_GRPH2CTRL_GRPH2DITHEREN_MASK (0x00800000) ++#define PDP_GRPH2CTRL_GRPH2DITHEREN_LSBMASK (0x00000001) ++#define PDP_GRPH2CTRL_GRPH2DITHEREN_SHIFT (23) ++#define PDP_GRPH2CTRL_GRPH2DITHEREN_LENGTH (1) ++#define PDP_GRPH2CTRL_GRPH2DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3CTRL_OFFSET (0x0028) ++ ++/* PDP, GRPH3CTRL, GRPH3STREN ++*/ ++#define PDP_GRPH3CTRL_GRPH3STREN_MASK (0x80000000) ++#define PDP_GRPH3CTRL_GRPH3STREN_LSBMASK (0x00000001) ++#define PDP_GRPH3CTRL_GRPH3STREN_SHIFT (31) ++#define PDP_GRPH3CTRL_GRPH3STREN_LENGTH (1) ++#define PDP_GRPH3CTRL_GRPH3STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3CKEYEN ++*/ ++#define PDP_GRPH3CTRL_GRPH3CKEYEN_MASK (0x40000000) ++#define PDP_GRPH3CTRL_GRPH3CKEYEN_LSBMASK (0x00000001) ++#define PDP_GRPH3CTRL_GRPH3CKEYEN_SHIFT (30) ++#define PDP_GRPH3CTRL_GRPH3CKEYEN_LENGTH (1) ++#define PDP_GRPH3CTRL_GRPH3CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3CKEYSRC ++*/ ++#define PDP_GRPH3CTRL_GRPH3CKEYSRC_MASK (0x20000000) ++#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LSBMASK (0x00000001) ++#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SHIFT (29) ++#define PDP_GRPH3CTRL_GRPH3CKEYSRC_LENGTH (1) ++#define PDP_GRPH3CTRL_GRPH3CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3BLEND ++*/ ++#define PDP_GRPH3CTRL_GRPH3BLEND_MASK (0x18000000) ++#define PDP_GRPH3CTRL_GRPH3BLEND_LSBMASK (0x00000003) ++#define PDP_GRPH3CTRL_GRPH3BLEND_SHIFT (27) ++#define PDP_GRPH3CTRL_GRPH3BLEND_LENGTH (2) ++#define PDP_GRPH3CTRL_GRPH3BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3BLENDPOS ++*/ ++#define PDP_GRPH3CTRL_GRPH3BLENDPOS_MASK (0x07000000) ++#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LSBMASK (0x00000007) ++#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SHIFT (24) ++#define PDP_GRPH3CTRL_GRPH3BLENDPOS_LENGTH (3) ++#define PDP_GRPH3CTRL_GRPH3BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CTRL, GRPH3DITHEREN ++*/ ++#define PDP_GRPH3CTRL_GRPH3DITHEREN_MASK (0x00800000) ++#define PDP_GRPH3CTRL_GRPH3DITHEREN_LSBMASK (0x00000001) ++#define PDP_GRPH3CTRL_GRPH3DITHEREN_SHIFT (23) ++#define PDP_GRPH3CTRL_GRPH3DITHEREN_LENGTH (1) ++#define PDP_GRPH3CTRL_GRPH3DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4CTRL_OFFSET (0x002C) ++ ++/* PDP, GRPH4CTRL, GRPH4STREN ++*/ ++#define PDP_GRPH4CTRL_GRPH4STREN_MASK (0x80000000) ++#define PDP_GRPH4CTRL_GRPH4STREN_LSBMASK (0x00000001) ++#define PDP_GRPH4CTRL_GRPH4STREN_SHIFT (31) ++#define PDP_GRPH4CTRL_GRPH4STREN_LENGTH (1) ++#define PDP_GRPH4CTRL_GRPH4STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4CKEYEN ++*/ ++#define PDP_GRPH4CTRL_GRPH4CKEYEN_MASK (0x40000000) ++#define PDP_GRPH4CTRL_GRPH4CKEYEN_LSBMASK (0x00000001) ++#define PDP_GRPH4CTRL_GRPH4CKEYEN_SHIFT (30) ++#define PDP_GRPH4CTRL_GRPH4CKEYEN_LENGTH (1) ++#define PDP_GRPH4CTRL_GRPH4CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4CKEYSRC ++*/ ++#define PDP_GRPH4CTRL_GRPH4CKEYSRC_MASK (0x20000000) ++#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LSBMASK (0x00000001) ++#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SHIFT (29) ++#define PDP_GRPH4CTRL_GRPH4CKEYSRC_LENGTH (1) ++#define PDP_GRPH4CTRL_GRPH4CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4BLEND ++*/ ++#define PDP_GRPH4CTRL_GRPH4BLEND_MASK (0x18000000) ++#define PDP_GRPH4CTRL_GRPH4BLEND_LSBMASK (0x00000003) ++#define PDP_GRPH4CTRL_GRPH4BLEND_SHIFT (27) ++#define PDP_GRPH4CTRL_GRPH4BLEND_LENGTH (2) ++#define PDP_GRPH4CTRL_GRPH4BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4BLENDPOS ++*/ ++#define PDP_GRPH4CTRL_GRPH4BLENDPOS_MASK (0x07000000) ++#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LSBMASK (0x00000007) ++#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SHIFT (24) ++#define PDP_GRPH4CTRL_GRPH4BLENDPOS_LENGTH (3) ++#define PDP_GRPH4CTRL_GRPH4BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CTRL, GRPH4DITHEREN ++*/ ++#define PDP_GRPH4CTRL_GRPH4DITHEREN_MASK (0x00800000) ++#define PDP_GRPH4CTRL_GRPH4DITHEREN_LSBMASK (0x00000001) ++#define PDP_GRPH4CTRL_GRPH4DITHEREN_SHIFT (23) ++#define PDP_GRPH4CTRL_GRPH4DITHEREN_LENGTH (1) ++#define PDP_GRPH4CTRL_GRPH4DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1CTRL_OFFSET (0x0030) ++ ++/* PDP, VID1CTRL, VID1STREN ++*/ ++#define PDP_VID1CTRL_VID1STREN_MASK (0x80000000) ++#define PDP_VID1CTRL_VID1STREN_LSBMASK (0x00000001) ++#define PDP_VID1CTRL_VID1STREN_SHIFT (31) ++#define PDP_VID1CTRL_VID1STREN_LENGTH (1) ++#define PDP_VID1CTRL_VID1STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1CKEYEN ++*/ ++#define PDP_VID1CTRL_VID1CKEYEN_MASK (0x40000000) ++#define PDP_VID1CTRL_VID1CKEYEN_LSBMASK (0x00000001) ++#define PDP_VID1CTRL_VID1CKEYEN_SHIFT (30) ++#define PDP_VID1CTRL_VID1CKEYEN_LENGTH (1) ++#define PDP_VID1CTRL_VID1CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1CKEYSRC ++*/ ++#define PDP_VID1CTRL_VID1CKEYSRC_MASK (0x20000000) ++#define PDP_VID1CTRL_VID1CKEYSRC_LSBMASK (0x00000001) ++#define PDP_VID1CTRL_VID1CKEYSRC_SHIFT (29) ++#define PDP_VID1CTRL_VID1CKEYSRC_LENGTH (1) ++#define PDP_VID1CTRL_VID1CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1BLEND ++*/ ++#define PDP_VID1CTRL_VID1BLEND_MASK (0x18000000) ++#define PDP_VID1CTRL_VID1BLEND_LSBMASK (0x00000003) ++#define PDP_VID1CTRL_VID1BLEND_SHIFT (27) ++#define PDP_VID1CTRL_VID1BLEND_LENGTH (2) ++#define PDP_VID1CTRL_VID1BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1BLENDPOS ++*/ ++#define PDP_VID1CTRL_VID1BLENDPOS_MASK (0x07000000) ++#define PDP_VID1CTRL_VID1BLENDPOS_LSBMASK (0x00000007) ++#define PDP_VID1CTRL_VID1BLENDPOS_SHIFT (24) ++#define PDP_VID1CTRL_VID1BLENDPOS_LENGTH (3) ++#define PDP_VID1CTRL_VID1BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CTRL, VID1DITHEREN ++*/ ++#define PDP_VID1CTRL_VID1DITHEREN_MASK (0x00800000) ++#define PDP_VID1CTRL_VID1DITHEREN_LSBMASK (0x00000001) ++#define PDP_VID1CTRL_VID1DITHEREN_SHIFT (23) ++#define PDP_VID1CTRL_VID1DITHEREN_LENGTH (1) ++#define PDP_VID1CTRL_VID1DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2CTRL_OFFSET (0x0034) ++ ++/* PDP, VID2CTRL, VID2STREN ++*/ ++#define PDP_VID2CTRL_VID2STREN_MASK (0x80000000) ++#define PDP_VID2CTRL_VID2STREN_LSBMASK (0x00000001) ++#define PDP_VID2CTRL_VID2STREN_SHIFT (31) ++#define PDP_VID2CTRL_VID2STREN_LENGTH (1) ++#define PDP_VID2CTRL_VID2STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2CKEYEN ++*/ ++#define PDP_VID2CTRL_VID2CKEYEN_MASK (0x40000000) ++#define PDP_VID2CTRL_VID2CKEYEN_LSBMASK (0x00000001) ++#define PDP_VID2CTRL_VID2CKEYEN_SHIFT (30) ++#define PDP_VID2CTRL_VID2CKEYEN_LENGTH (1) ++#define PDP_VID2CTRL_VID2CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2CKEYSRC ++*/ ++#define PDP_VID2CTRL_VID2CKEYSRC_MASK (0x20000000) ++#define PDP_VID2CTRL_VID2CKEYSRC_LSBMASK (0x00000001) ++#define PDP_VID2CTRL_VID2CKEYSRC_SHIFT (29) ++#define PDP_VID2CTRL_VID2CKEYSRC_LENGTH (1) ++#define PDP_VID2CTRL_VID2CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2BLEND ++*/ ++#define PDP_VID2CTRL_VID2BLEND_MASK (0x18000000) ++#define PDP_VID2CTRL_VID2BLEND_LSBMASK (0x00000003) ++#define PDP_VID2CTRL_VID2BLEND_SHIFT (27) ++#define PDP_VID2CTRL_VID2BLEND_LENGTH (2) ++#define PDP_VID2CTRL_VID2BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2BLENDPOS ++*/ ++#define PDP_VID2CTRL_VID2BLENDPOS_MASK (0x07000000) ++#define PDP_VID2CTRL_VID2BLENDPOS_LSBMASK (0x00000007) ++#define PDP_VID2CTRL_VID2BLENDPOS_SHIFT (24) ++#define PDP_VID2CTRL_VID2BLENDPOS_LENGTH (3) ++#define PDP_VID2CTRL_VID2BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CTRL, VID2DITHEREN ++*/ ++#define PDP_VID2CTRL_VID2DITHEREN_MASK (0x00800000) ++#define PDP_VID2CTRL_VID2DITHEREN_LSBMASK (0x00000001) ++#define PDP_VID2CTRL_VID2DITHEREN_SHIFT (23) ++#define PDP_VID2CTRL_VID2DITHEREN_LENGTH (1) ++#define PDP_VID2CTRL_VID2DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3CTRL_OFFSET (0x0038) ++ ++/* PDP, VID3CTRL, VID3STREN ++*/ ++#define PDP_VID3CTRL_VID3STREN_MASK (0x80000000) ++#define PDP_VID3CTRL_VID3STREN_LSBMASK (0x00000001) ++#define PDP_VID3CTRL_VID3STREN_SHIFT (31) ++#define PDP_VID3CTRL_VID3STREN_LENGTH (1) ++#define PDP_VID3CTRL_VID3STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3CKEYEN ++*/ ++#define PDP_VID3CTRL_VID3CKEYEN_MASK (0x40000000) ++#define PDP_VID3CTRL_VID3CKEYEN_LSBMASK (0x00000001) ++#define PDP_VID3CTRL_VID3CKEYEN_SHIFT (30) ++#define PDP_VID3CTRL_VID3CKEYEN_LENGTH (1) ++#define PDP_VID3CTRL_VID3CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3CKEYSRC ++*/ ++#define PDP_VID3CTRL_VID3CKEYSRC_MASK (0x20000000) ++#define PDP_VID3CTRL_VID3CKEYSRC_LSBMASK (0x00000001) ++#define PDP_VID3CTRL_VID3CKEYSRC_SHIFT (29) ++#define PDP_VID3CTRL_VID3CKEYSRC_LENGTH (1) ++#define PDP_VID3CTRL_VID3CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3BLEND ++*/ ++#define PDP_VID3CTRL_VID3BLEND_MASK (0x18000000) ++#define PDP_VID3CTRL_VID3BLEND_LSBMASK (0x00000003) ++#define PDP_VID3CTRL_VID3BLEND_SHIFT (27) ++#define PDP_VID3CTRL_VID3BLEND_LENGTH (2) ++#define PDP_VID3CTRL_VID3BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3BLENDPOS ++*/ ++#define PDP_VID3CTRL_VID3BLENDPOS_MASK (0x07000000) ++#define PDP_VID3CTRL_VID3BLENDPOS_LSBMASK (0x00000007) ++#define PDP_VID3CTRL_VID3BLENDPOS_SHIFT (24) ++#define PDP_VID3CTRL_VID3BLENDPOS_LENGTH (3) ++#define PDP_VID3CTRL_VID3BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CTRL, VID3DITHEREN ++*/ ++#define PDP_VID3CTRL_VID3DITHEREN_MASK (0x00800000) ++#define PDP_VID3CTRL_VID3DITHEREN_LSBMASK (0x00000001) ++#define PDP_VID3CTRL_VID3DITHEREN_SHIFT (23) ++#define PDP_VID3CTRL_VID3DITHEREN_LENGTH (1) ++#define PDP_VID3CTRL_VID3DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4CTRL_OFFSET (0x003C) ++ ++/* PDP, VID4CTRL, VID4STREN ++*/ ++#define PDP_VID4CTRL_VID4STREN_MASK (0x80000000) ++#define PDP_VID4CTRL_VID4STREN_LSBMASK (0x00000001) ++#define PDP_VID4CTRL_VID4STREN_SHIFT (31) ++#define PDP_VID4CTRL_VID4STREN_LENGTH (1) ++#define PDP_VID4CTRL_VID4STREN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4CKEYEN ++*/ ++#define PDP_VID4CTRL_VID4CKEYEN_MASK (0x40000000) ++#define PDP_VID4CTRL_VID4CKEYEN_LSBMASK (0x00000001) ++#define PDP_VID4CTRL_VID4CKEYEN_SHIFT (30) ++#define PDP_VID4CTRL_VID4CKEYEN_LENGTH (1) ++#define PDP_VID4CTRL_VID4CKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4CKEYSRC ++*/ ++#define PDP_VID4CTRL_VID4CKEYSRC_MASK (0x20000000) ++#define PDP_VID4CTRL_VID4CKEYSRC_LSBMASK (0x00000001) ++#define PDP_VID4CTRL_VID4CKEYSRC_SHIFT (29) ++#define PDP_VID4CTRL_VID4CKEYSRC_LENGTH (1) ++#define PDP_VID4CTRL_VID4CKEYSRC_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4BLEND ++*/ ++#define PDP_VID4CTRL_VID4BLEND_MASK (0x18000000) ++#define PDP_VID4CTRL_VID4BLEND_LSBMASK (0x00000003) ++#define PDP_VID4CTRL_VID4BLEND_SHIFT (27) ++#define PDP_VID4CTRL_VID4BLEND_LENGTH (2) ++#define PDP_VID4CTRL_VID4BLEND_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4BLENDPOS ++*/ ++#define PDP_VID4CTRL_VID4BLENDPOS_MASK (0x07000000) ++#define PDP_VID4CTRL_VID4BLENDPOS_LSBMASK (0x00000007) ++#define PDP_VID4CTRL_VID4BLENDPOS_SHIFT (24) ++#define PDP_VID4CTRL_VID4BLENDPOS_LENGTH (3) ++#define PDP_VID4CTRL_VID4BLENDPOS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CTRL, VID4DITHEREN ++*/ ++#define PDP_VID4CTRL_VID4DITHEREN_MASK (0x00800000) ++#define PDP_VID4CTRL_VID4DITHEREN_LSBMASK (0x00000001) ++#define PDP_VID4CTRL_VID4DITHEREN_SHIFT (23) ++#define PDP_VID4CTRL_VID4DITHEREN_LENGTH (1) ++#define PDP_VID4CTRL_VID4DITHEREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1UCTRL_OFFSET (0x0050) ++ ++/* PDP, VID1UCTRL, VID1UVHALFSTR ++*/ ++#define PDP_VID1UCTRL_VID1UVHALFSTR_MASK (0xC0000000) ++#define PDP_VID1UCTRL_VID1UVHALFSTR_LSBMASK (0x00000003) ++#define PDP_VID1UCTRL_VID1UVHALFSTR_SHIFT (30) ++#define PDP_VID1UCTRL_VID1UVHALFSTR_LENGTH (2) ++#define PDP_VID1UCTRL_VID1UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2UCTRL_OFFSET (0x0054) ++ ++/* PDP, VID2UCTRL, VID2UVHALFSTR ++*/ ++#define PDP_VID2UCTRL_VID2UVHALFSTR_MASK (0xC0000000) ++#define PDP_VID2UCTRL_VID2UVHALFSTR_LSBMASK (0x00000003) ++#define PDP_VID2UCTRL_VID2UVHALFSTR_SHIFT (30) ++#define PDP_VID2UCTRL_VID2UVHALFSTR_LENGTH (2) ++#define PDP_VID2UCTRL_VID2UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3UCTRL_OFFSET (0x0058) ++ ++/* PDP, VID3UCTRL, VID3UVHALFSTR ++*/ ++#define PDP_VID3UCTRL_VID3UVHALFSTR_MASK (0xC0000000) ++#define PDP_VID3UCTRL_VID3UVHALFSTR_LSBMASK (0x00000003) ++#define PDP_VID3UCTRL_VID3UVHALFSTR_SHIFT (30) ++#define PDP_VID3UCTRL_VID3UVHALFSTR_LENGTH (2) ++#define PDP_VID3UCTRL_VID3UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4UCTRL_OFFSET (0x005C) ++ ++/* PDP, VID4UCTRL, VID4UVHALFSTR ++*/ ++#define PDP_VID4UCTRL_VID4UVHALFSTR_MASK (0xC0000000) ++#define PDP_VID4UCTRL_VID4UVHALFSTR_LSBMASK (0x00000003) ++#define PDP_VID4UCTRL_VID4UVHALFSTR_SHIFT (30) ++#define PDP_VID4UCTRL_VID4UVHALFSTR_LENGTH (2) ++#define PDP_VID4UCTRL_VID4UVHALFSTR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1STRIDE_OFFSET (0x0060) ++ ++/* PDP, GRPH1STRIDE, GRPH1STRIDE ++*/ ++#define PDP_GRPH1STRIDE_GRPH1STRIDE_MASK (0xFFC00000) ++#define PDP_GRPH1STRIDE_GRPH1STRIDE_LSBMASK (0x000003FF) ++#define PDP_GRPH1STRIDE_GRPH1STRIDE_SHIFT (22) ++#define PDP_GRPH1STRIDE_GRPH1STRIDE_LENGTH (10) ++#define PDP_GRPH1STRIDE_GRPH1STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2STRIDE_OFFSET (0x0064) ++ ++/* PDP, GRPH2STRIDE, GRPH2STRIDE ++*/ ++#define PDP_GRPH2STRIDE_GRPH2STRIDE_MASK (0xFFC00000) ++#define PDP_GRPH2STRIDE_GRPH2STRIDE_LSBMASK (0x000003FF) ++#define PDP_GRPH2STRIDE_GRPH2STRIDE_SHIFT (22) ++#define PDP_GRPH2STRIDE_GRPH2STRIDE_LENGTH (10) ++#define PDP_GRPH2STRIDE_GRPH2STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3STRIDE_OFFSET (0x0068) ++ ++/* PDP, GRPH3STRIDE, GRPH3STRIDE ++*/ ++#define PDP_GRPH3STRIDE_GRPH3STRIDE_MASK (0xFFC00000) ++#define PDP_GRPH3STRIDE_GRPH3STRIDE_LSBMASK (0x000003FF) ++#define PDP_GRPH3STRIDE_GRPH3STRIDE_SHIFT (22) ++#define PDP_GRPH3STRIDE_GRPH3STRIDE_LENGTH (10) ++#define PDP_GRPH3STRIDE_GRPH3STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4STRIDE_OFFSET (0x006C) ++ ++/* PDP, GRPH4STRIDE, GRPH4STRIDE ++*/ ++#define PDP_GRPH4STRIDE_GRPH4STRIDE_MASK (0xFFC00000) ++#define PDP_GRPH4STRIDE_GRPH4STRIDE_LSBMASK (0x000003FF) ++#define PDP_GRPH4STRIDE_GRPH4STRIDE_SHIFT (22) ++#define PDP_GRPH4STRIDE_GRPH4STRIDE_LENGTH (10) ++#define PDP_GRPH4STRIDE_GRPH4STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1STRIDE_OFFSET (0x0070) ++ ++/* PDP, VID1STRIDE, VID1STRIDE ++*/ ++#define PDP_VID1STRIDE_VID1STRIDE_MASK (0xFFC00000) ++#define PDP_VID1STRIDE_VID1STRIDE_LSBMASK (0x000003FF) ++#define PDP_VID1STRIDE_VID1STRIDE_SHIFT (22) ++#define PDP_VID1STRIDE_VID1STRIDE_LENGTH (10) ++#define PDP_VID1STRIDE_VID1STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2STRIDE_OFFSET (0x0074) ++ ++/* PDP, VID2STRIDE, VID2STRIDE ++*/ ++#define PDP_VID2STRIDE_VID2STRIDE_MASK (0xFFC00000) ++#define PDP_VID2STRIDE_VID2STRIDE_LSBMASK (0x000003FF) ++#define PDP_VID2STRIDE_VID2STRIDE_SHIFT (22) ++#define PDP_VID2STRIDE_VID2STRIDE_LENGTH (10) ++#define PDP_VID2STRIDE_VID2STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3STRIDE_OFFSET (0x0078) ++ ++/* PDP, VID3STRIDE, VID3STRIDE ++*/ ++#define PDP_VID3STRIDE_VID3STRIDE_MASK (0xFFC00000) ++#define PDP_VID3STRIDE_VID3STRIDE_LSBMASK (0x000003FF) ++#define PDP_VID3STRIDE_VID3STRIDE_SHIFT (22) ++#define PDP_VID3STRIDE_VID3STRIDE_LENGTH (10) ++#define PDP_VID3STRIDE_VID3STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4STRIDE_OFFSET (0x007C) ++ ++/* PDP, VID4STRIDE, VID4STRIDE ++*/ ++#define PDP_VID4STRIDE_VID4STRIDE_MASK (0xFFC00000) ++#define PDP_VID4STRIDE_VID4STRIDE_LSBMASK (0x000003FF) ++#define PDP_VID4STRIDE_VID4STRIDE_SHIFT (22) ++#define PDP_VID4STRIDE_VID4STRIDE_LENGTH (10) ++#define PDP_VID4STRIDE_VID4STRIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1SIZE_OFFSET (0x0080) ++ ++/* PDP, GRPH1SIZE, GRPH1WIDTH ++*/ ++#define PDP_GRPH1SIZE_GRPH1WIDTH_MASK (0x0FFF0000) ++#define PDP_GRPH1SIZE_GRPH1WIDTH_LSBMASK (0x00000FFF) ++#define PDP_GRPH1SIZE_GRPH1WIDTH_SHIFT (16) ++#define PDP_GRPH1SIZE_GRPH1WIDTH_LENGTH (12) ++#define PDP_GRPH1SIZE_GRPH1WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1SIZE, GRPH1HEIGHT ++*/ ++#define PDP_GRPH1SIZE_GRPH1HEIGHT_MASK (0x00000FFF) ++#define PDP_GRPH1SIZE_GRPH1HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_GRPH1SIZE_GRPH1HEIGHT_SHIFT (0) ++#define PDP_GRPH1SIZE_GRPH1HEIGHT_LENGTH (12) ++#define PDP_GRPH1SIZE_GRPH1HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2SIZE_OFFSET (0x0084) ++ ++/* PDP, GRPH2SIZE, GRPH2WIDTH ++*/ ++#define PDP_GRPH2SIZE_GRPH2WIDTH_MASK (0x0FFF0000) ++#define PDP_GRPH2SIZE_GRPH2WIDTH_LSBMASK (0x00000FFF) ++#define PDP_GRPH2SIZE_GRPH2WIDTH_SHIFT (16) ++#define PDP_GRPH2SIZE_GRPH2WIDTH_LENGTH (12) ++#define PDP_GRPH2SIZE_GRPH2WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2SIZE, GRPH2HEIGHT ++*/ ++#define PDP_GRPH2SIZE_GRPH2HEIGHT_MASK (0x00000FFF) ++#define PDP_GRPH2SIZE_GRPH2HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_GRPH2SIZE_GRPH2HEIGHT_SHIFT (0) ++#define PDP_GRPH2SIZE_GRPH2HEIGHT_LENGTH (12) ++#define PDP_GRPH2SIZE_GRPH2HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3SIZE_OFFSET (0x0088) ++ ++/* PDP, GRPH3SIZE, GRPH3WIDTH ++*/ ++#define PDP_GRPH3SIZE_GRPH3WIDTH_MASK (0x0FFF0000) ++#define PDP_GRPH3SIZE_GRPH3WIDTH_LSBMASK (0x00000FFF) ++#define PDP_GRPH3SIZE_GRPH3WIDTH_SHIFT (16) ++#define PDP_GRPH3SIZE_GRPH3WIDTH_LENGTH (12) ++#define PDP_GRPH3SIZE_GRPH3WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3SIZE, GRPH3HEIGHT ++*/ ++#define PDP_GRPH3SIZE_GRPH3HEIGHT_MASK (0x00000FFF) ++#define PDP_GRPH3SIZE_GRPH3HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_GRPH3SIZE_GRPH3HEIGHT_SHIFT (0) ++#define PDP_GRPH3SIZE_GRPH3HEIGHT_LENGTH (12) ++#define PDP_GRPH3SIZE_GRPH3HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4SIZE_OFFSET (0x008C) ++ ++/* PDP, GRPH4SIZE, GRPH4WIDTH ++*/ ++#define PDP_GRPH4SIZE_GRPH4WIDTH_MASK (0x0FFF0000) ++#define PDP_GRPH4SIZE_GRPH4WIDTH_LSBMASK (0x00000FFF) ++#define PDP_GRPH4SIZE_GRPH4WIDTH_SHIFT (16) ++#define PDP_GRPH4SIZE_GRPH4WIDTH_LENGTH (12) ++#define PDP_GRPH4SIZE_GRPH4WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4SIZE, GRPH4HEIGHT ++*/ ++#define PDP_GRPH4SIZE_GRPH4HEIGHT_MASK (0x00000FFF) ++#define PDP_GRPH4SIZE_GRPH4HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_GRPH4SIZE_GRPH4HEIGHT_SHIFT (0) ++#define PDP_GRPH4SIZE_GRPH4HEIGHT_LENGTH (12) ++#define PDP_GRPH4SIZE_GRPH4HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1SIZE_OFFSET (0x0090) ++ ++/* PDP, VID1SIZE, VID1WIDTH ++*/ ++#define PDP_VID1SIZE_VID1WIDTH_MASK (0x0FFF0000) ++#define PDP_VID1SIZE_VID1WIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID1SIZE_VID1WIDTH_SHIFT (16) ++#define PDP_VID1SIZE_VID1WIDTH_LENGTH (12) ++#define PDP_VID1SIZE_VID1WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SIZE, VID1HEIGHT ++*/ ++#define PDP_VID1SIZE_VID1HEIGHT_MASK (0x00000FFF) ++#define PDP_VID1SIZE_VID1HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID1SIZE_VID1HEIGHT_SHIFT (0) ++#define PDP_VID1SIZE_VID1HEIGHT_LENGTH (12) ++#define PDP_VID1SIZE_VID1HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2SIZE_OFFSET (0x0094) ++ ++/* PDP, VID2SIZE, VID2WIDTH ++*/ ++#define PDP_VID2SIZE_VID2WIDTH_MASK (0x0FFF0000) ++#define PDP_VID2SIZE_VID2WIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID2SIZE_VID2WIDTH_SHIFT (16) ++#define PDP_VID2SIZE_VID2WIDTH_LENGTH (12) ++#define PDP_VID2SIZE_VID2WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SIZE, VID2HEIGHT ++*/ ++#define PDP_VID2SIZE_VID2HEIGHT_MASK (0x00000FFF) ++#define PDP_VID2SIZE_VID2HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID2SIZE_VID2HEIGHT_SHIFT (0) ++#define PDP_VID2SIZE_VID2HEIGHT_LENGTH (12) ++#define PDP_VID2SIZE_VID2HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3SIZE_OFFSET (0x0098) ++ ++/* PDP, VID3SIZE, VID3WIDTH ++*/ ++#define PDP_VID3SIZE_VID3WIDTH_MASK (0x0FFF0000) ++#define PDP_VID3SIZE_VID3WIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID3SIZE_VID3WIDTH_SHIFT (16) ++#define PDP_VID3SIZE_VID3WIDTH_LENGTH (12) ++#define PDP_VID3SIZE_VID3WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SIZE, VID3HEIGHT ++*/ ++#define PDP_VID3SIZE_VID3HEIGHT_MASK (0x00000FFF) ++#define PDP_VID3SIZE_VID3HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID3SIZE_VID3HEIGHT_SHIFT (0) ++#define PDP_VID3SIZE_VID3HEIGHT_LENGTH (12) ++#define PDP_VID3SIZE_VID3HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4SIZE_OFFSET (0x009C) ++ ++/* PDP, VID4SIZE, VID4WIDTH ++*/ ++#define PDP_VID4SIZE_VID4WIDTH_MASK (0x0FFF0000) ++#define PDP_VID4SIZE_VID4WIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID4SIZE_VID4WIDTH_SHIFT (16) ++#define PDP_VID4SIZE_VID4WIDTH_LENGTH (12) ++#define PDP_VID4SIZE_VID4WIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SIZE, VID4HEIGHT ++*/ ++#define PDP_VID4SIZE_VID4HEIGHT_MASK (0x00000FFF) ++#define PDP_VID4SIZE_VID4HEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID4SIZE_VID4HEIGHT_SHIFT (0) ++#define PDP_VID4SIZE_VID4HEIGHT_LENGTH (12) ++#define PDP_VID4SIZE_VID4HEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1POSN_OFFSET (0x00A0) ++ ++/* PDP, GRPH1POSN, GRPH1XSTART ++*/ ++#define PDP_GRPH1POSN_GRPH1XSTART_MASK (0x0FFF0000) ++#define PDP_GRPH1POSN_GRPH1XSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH1POSN_GRPH1XSTART_SHIFT (16) ++#define PDP_GRPH1POSN_GRPH1XSTART_LENGTH (12) ++#define PDP_GRPH1POSN_GRPH1XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1POSN, GRPH1YSTART ++*/ ++#define PDP_GRPH1POSN_GRPH1YSTART_MASK (0x00000FFF) ++#define PDP_GRPH1POSN_GRPH1YSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH1POSN_GRPH1YSTART_SHIFT (0) ++#define PDP_GRPH1POSN_GRPH1YSTART_LENGTH (12) ++#define PDP_GRPH1POSN_GRPH1YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2POSN_OFFSET (0x00A4) ++ ++/* PDP, GRPH2POSN, GRPH2XSTART ++*/ ++#define PDP_GRPH2POSN_GRPH2XSTART_MASK (0x0FFF0000) ++#define PDP_GRPH2POSN_GRPH2XSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH2POSN_GRPH2XSTART_SHIFT (16) ++#define PDP_GRPH2POSN_GRPH2XSTART_LENGTH (12) ++#define PDP_GRPH2POSN_GRPH2XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2POSN, GRPH2YSTART ++*/ ++#define PDP_GRPH2POSN_GRPH2YSTART_MASK (0x00000FFF) ++#define PDP_GRPH2POSN_GRPH2YSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH2POSN_GRPH2YSTART_SHIFT (0) ++#define PDP_GRPH2POSN_GRPH2YSTART_LENGTH (12) ++#define PDP_GRPH2POSN_GRPH2YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3POSN_OFFSET (0x00A8) ++ ++/* PDP, GRPH3POSN, GRPH3XSTART ++*/ ++#define PDP_GRPH3POSN_GRPH3XSTART_MASK (0x0FFF0000) ++#define PDP_GRPH3POSN_GRPH3XSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH3POSN_GRPH3XSTART_SHIFT (16) ++#define PDP_GRPH3POSN_GRPH3XSTART_LENGTH (12) ++#define PDP_GRPH3POSN_GRPH3XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3POSN, GRPH3YSTART ++*/ ++#define PDP_GRPH3POSN_GRPH3YSTART_MASK (0x00000FFF) ++#define PDP_GRPH3POSN_GRPH3YSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH3POSN_GRPH3YSTART_SHIFT (0) ++#define PDP_GRPH3POSN_GRPH3YSTART_LENGTH (12) ++#define PDP_GRPH3POSN_GRPH3YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4POSN_OFFSET (0x00AC) ++ ++/* PDP, GRPH4POSN, GRPH4XSTART ++*/ ++#define PDP_GRPH4POSN_GRPH4XSTART_MASK (0x0FFF0000) ++#define PDP_GRPH4POSN_GRPH4XSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH4POSN_GRPH4XSTART_SHIFT (16) ++#define PDP_GRPH4POSN_GRPH4XSTART_LENGTH (12) ++#define PDP_GRPH4POSN_GRPH4XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4POSN, GRPH4YSTART ++*/ ++#define PDP_GRPH4POSN_GRPH4YSTART_MASK (0x00000FFF) ++#define PDP_GRPH4POSN_GRPH4YSTART_LSBMASK (0x00000FFF) ++#define PDP_GRPH4POSN_GRPH4YSTART_SHIFT (0) ++#define PDP_GRPH4POSN_GRPH4YSTART_LENGTH (12) ++#define PDP_GRPH4POSN_GRPH4YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1POSN_OFFSET (0x00B0) ++ ++/* PDP, VID1POSN, VID1XSTART ++*/ ++#define PDP_VID1POSN_VID1XSTART_MASK (0x0FFF0000) ++#define PDP_VID1POSN_VID1XSTART_LSBMASK (0x00000FFF) ++#define PDP_VID1POSN_VID1XSTART_SHIFT (16) ++#define PDP_VID1POSN_VID1XSTART_LENGTH (12) ++#define PDP_VID1POSN_VID1XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1POSN, VID1YSTART ++*/ ++#define PDP_VID1POSN_VID1YSTART_MASK (0x00000FFF) ++#define PDP_VID1POSN_VID1YSTART_LSBMASK (0x00000FFF) ++#define PDP_VID1POSN_VID1YSTART_SHIFT (0) ++#define PDP_VID1POSN_VID1YSTART_LENGTH (12) ++#define PDP_VID1POSN_VID1YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2POSN_OFFSET (0x00B4) ++ ++/* PDP, VID2POSN, VID2XSTART ++*/ ++#define PDP_VID2POSN_VID2XSTART_MASK (0x0FFF0000) ++#define PDP_VID2POSN_VID2XSTART_LSBMASK (0x00000FFF) ++#define PDP_VID2POSN_VID2XSTART_SHIFT (16) ++#define PDP_VID2POSN_VID2XSTART_LENGTH (12) ++#define PDP_VID2POSN_VID2XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2POSN, VID2YSTART ++*/ ++#define PDP_VID2POSN_VID2YSTART_MASK (0x00000FFF) ++#define PDP_VID2POSN_VID2YSTART_LSBMASK (0x00000FFF) ++#define PDP_VID2POSN_VID2YSTART_SHIFT (0) ++#define PDP_VID2POSN_VID2YSTART_LENGTH (12) ++#define PDP_VID2POSN_VID2YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3POSN_OFFSET (0x00B8) ++ ++/* PDP, VID3POSN, VID3XSTART ++*/ ++#define PDP_VID3POSN_VID3XSTART_MASK (0x0FFF0000) ++#define PDP_VID3POSN_VID3XSTART_LSBMASK (0x00000FFF) ++#define PDP_VID3POSN_VID3XSTART_SHIFT (16) ++#define PDP_VID3POSN_VID3XSTART_LENGTH (12) ++#define PDP_VID3POSN_VID3XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3POSN, VID3YSTART ++*/ ++#define PDP_VID3POSN_VID3YSTART_MASK (0x00000FFF) ++#define PDP_VID3POSN_VID3YSTART_LSBMASK (0x00000FFF) ++#define PDP_VID3POSN_VID3YSTART_SHIFT (0) ++#define PDP_VID3POSN_VID3YSTART_LENGTH (12) ++#define PDP_VID3POSN_VID3YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4POSN_OFFSET (0x00BC) ++ ++/* PDP, VID4POSN, VID4XSTART ++*/ ++#define PDP_VID4POSN_VID4XSTART_MASK (0x0FFF0000) ++#define PDP_VID4POSN_VID4XSTART_LSBMASK (0x00000FFF) ++#define PDP_VID4POSN_VID4XSTART_SHIFT (16) ++#define PDP_VID4POSN_VID4XSTART_LENGTH (12) ++#define PDP_VID4POSN_VID4XSTART_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4POSN, VID4YSTART ++*/ ++#define PDP_VID4POSN_VID4YSTART_MASK (0x00000FFF) ++#define PDP_VID4POSN_VID4YSTART_LSBMASK (0x00000FFF) ++#define PDP_VID4POSN_VID4YSTART_SHIFT (0) ++#define PDP_VID4POSN_VID4YSTART_LENGTH (12) ++#define PDP_VID4POSN_VID4YSTART_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1GALPHA_OFFSET (0x00C0) ++ ++/* PDP, GRPH1GALPHA, GRPH1GALPHA ++*/ ++#define PDP_GRPH1GALPHA_GRPH1GALPHA_MASK (0x000003FF) ++#define PDP_GRPH1GALPHA_GRPH1GALPHA_LSBMASK (0x000003FF) ++#define PDP_GRPH1GALPHA_GRPH1GALPHA_SHIFT (0) ++#define PDP_GRPH1GALPHA_GRPH1GALPHA_LENGTH (10) ++#define PDP_GRPH1GALPHA_GRPH1GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2GALPHA_OFFSET (0x00C4) ++ ++/* PDP, GRPH2GALPHA, GRPH2GALPHA ++*/ ++#define PDP_GRPH2GALPHA_GRPH2GALPHA_MASK (0x000003FF) ++#define PDP_GRPH2GALPHA_GRPH2GALPHA_LSBMASK (0x000003FF) ++#define PDP_GRPH2GALPHA_GRPH2GALPHA_SHIFT (0) ++#define PDP_GRPH2GALPHA_GRPH2GALPHA_LENGTH (10) ++#define PDP_GRPH2GALPHA_GRPH2GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3GALPHA_OFFSET (0x00C8) ++ ++/* PDP, GRPH3GALPHA, GRPH3GALPHA ++*/ ++#define PDP_GRPH3GALPHA_GRPH3GALPHA_MASK (0x000003FF) ++#define PDP_GRPH3GALPHA_GRPH3GALPHA_LSBMASK (0x000003FF) ++#define PDP_GRPH3GALPHA_GRPH3GALPHA_SHIFT (0) ++#define PDP_GRPH3GALPHA_GRPH3GALPHA_LENGTH (10) ++#define PDP_GRPH3GALPHA_GRPH3GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4GALPHA_OFFSET (0x00CC) ++ ++/* PDP, GRPH4GALPHA, GRPH4GALPHA ++*/ ++#define PDP_GRPH4GALPHA_GRPH4GALPHA_MASK (0x000003FF) ++#define PDP_GRPH4GALPHA_GRPH4GALPHA_LSBMASK (0x000003FF) ++#define PDP_GRPH4GALPHA_GRPH4GALPHA_SHIFT (0) ++#define PDP_GRPH4GALPHA_GRPH4GALPHA_LENGTH (10) ++#define PDP_GRPH4GALPHA_GRPH4GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1GALPHA_OFFSET (0x00D0) ++ ++/* PDP, VID1GALPHA, VID1GALPHA ++*/ ++#define PDP_VID1GALPHA_VID1GALPHA_MASK (0x000003FF) ++#define PDP_VID1GALPHA_VID1GALPHA_LSBMASK (0x000003FF) ++#define PDP_VID1GALPHA_VID1GALPHA_SHIFT (0) ++#define PDP_VID1GALPHA_VID1GALPHA_LENGTH (10) ++#define PDP_VID1GALPHA_VID1GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2GALPHA_OFFSET (0x00D4) ++ ++/* PDP, VID2GALPHA, VID2GALPHA ++*/ ++#define PDP_VID2GALPHA_VID2GALPHA_MASK (0x000003FF) ++#define PDP_VID2GALPHA_VID2GALPHA_LSBMASK (0x000003FF) ++#define PDP_VID2GALPHA_VID2GALPHA_SHIFT (0) ++#define PDP_VID2GALPHA_VID2GALPHA_LENGTH (10) ++#define PDP_VID2GALPHA_VID2GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3GALPHA_OFFSET (0x00D8) ++ ++/* PDP, VID3GALPHA, VID3GALPHA ++*/ ++#define PDP_VID3GALPHA_VID3GALPHA_MASK (0x000003FF) ++#define PDP_VID3GALPHA_VID3GALPHA_LSBMASK (0x000003FF) ++#define PDP_VID3GALPHA_VID3GALPHA_SHIFT (0) ++#define PDP_VID3GALPHA_VID3GALPHA_LENGTH (10) ++#define PDP_VID3GALPHA_VID3GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4GALPHA_OFFSET (0x00DC) ++ ++/* PDP, VID4GALPHA, VID4GALPHA ++*/ ++#define PDP_VID4GALPHA_VID4GALPHA_MASK (0x000003FF) ++#define PDP_VID4GALPHA_VID4GALPHA_LSBMASK (0x000003FF) ++#define PDP_VID4GALPHA_VID4GALPHA_SHIFT (0) ++#define PDP_VID4GALPHA_VID4GALPHA_LENGTH (10) ++#define PDP_VID4GALPHA_VID4GALPHA_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1CKEY_R_OFFSET (0x00E0) ++ ++/* PDP, GRPH1CKEY_R, GRPH1CKEY_R ++*/ ++#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_MASK (0x000003FF) ++#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LSBMASK (0x000003FF) ++#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SHIFT (0) ++#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_LENGTH (10) ++#define PDP_GRPH1CKEY_R_GRPH1CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1CKEY_GB_OFFSET (0x00E4) ++ ++/* PDP, GRPH1CKEY_GB, GRPH1CKEY_G ++*/ ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_MASK (0x03FF0000) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LSBMASK (0x000003FF) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SHIFT (16) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_LENGTH (10) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1CKEY_GB, GRPH1CKEY_B ++*/ ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_MASK (0x000003FF) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LSBMASK (0x000003FF) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SHIFT (0) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_LENGTH (10) ++#define PDP_GRPH1CKEY_GB_GRPH1CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2CKEY_R_OFFSET (0x00E8) ++ ++/* PDP, GRPH2CKEY_R, GRPH2CKEY_R ++*/ ++#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_MASK (0x000003FF) ++#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LSBMASK (0x000003FF) ++#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SHIFT (0) ++#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_LENGTH (10) ++#define PDP_GRPH2CKEY_R_GRPH2CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2CKEY_GB_OFFSET (0x00EC) ++ ++/* PDP, GRPH2CKEY_GB, GRPH2CKEY_G ++*/ ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_MASK (0x03FF0000) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LSBMASK (0x000003FF) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SHIFT (16) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_LENGTH (10) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2CKEY_GB, GRPH2CKEY_B ++*/ ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_MASK (0x000003FF) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LSBMASK (0x000003FF) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SHIFT (0) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_LENGTH (10) ++#define PDP_GRPH2CKEY_GB_GRPH2CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3CKEY_R_OFFSET (0x00F0) ++ ++/* PDP, GRPH3CKEY_R, GRPH3CKEY_R ++*/ ++#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_MASK (0x000003FF) ++#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LSBMASK (0x000003FF) ++#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SHIFT (0) ++#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_LENGTH (10) ++#define PDP_GRPH3CKEY_R_GRPH3CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3CKEY_GB_OFFSET (0x00F4) ++ ++/* PDP, GRPH3CKEY_GB, GRPH3CKEY_G ++*/ ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_MASK (0x03FF0000) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LSBMASK (0x000003FF) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SHIFT (16) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_LENGTH (10) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3CKEY_GB, GRPH3CKEY_B ++*/ ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_MASK (0x000003FF) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LSBMASK (0x000003FF) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SHIFT (0) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_LENGTH (10) ++#define PDP_GRPH3CKEY_GB_GRPH3CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4CKEY_R_OFFSET (0x00F8) ++ ++/* PDP, GRPH4CKEY_R, GRPH4CKEY_R ++*/ ++#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_MASK (0x000003FF) ++#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LSBMASK (0x000003FF) ++#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SHIFT (0) ++#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_LENGTH (10) ++#define PDP_GRPH4CKEY_R_GRPH4CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4CKEY_GB_OFFSET (0x00FC) ++ ++/* PDP, GRPH4CKEY_GB, GRPH4CKEY_G ++*/ ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_MASK (0x03FF0000) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LSBMASK (0x000003FF) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SHIFT (16) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_LENGTH (10) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4CKEY_GB, GRPH4CKEY_B ++*/ ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_MASK (0x000003FF) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LSBMASK (0x000003FF) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SHIFT (0) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_LENGTH (10) ++#define PDP_GRPH4CKEY_GB_GRPH4CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1CKEY_R_OFFSET (0x0100) ++ ++/* PDP, VID1CKEY_R, VID1CKEY_R ++*/ ++#define PDP_VID1CKEY_R_VID1CKEY_R_MASK (0x000003FF) ++#define PDP_VID1CKEY_R_VID1CKEY_R_LSBMASK (0x000003FF) ++#define PDP_VID1CKEY_R_VID1CKEY_R_SHIFT (0) ++#define PDP_VID1CKEY_R_VID1CKEY_R_LENGTH (10) ++#define PDP_VID1CKEY_R_VID1CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1CKEY_GB_OFFSET (0x0104) ++ ++/* PDP, VID1CKEY_GB, VID1CKEY_G ++*/ ++#define PDP_VID1CKEY_GB_VID1CKEY_G_MASK (0x03FF0000) ++#define PDP_VID1CKEY_GB_VID1CKEY_G_LSBMASK (0x000003FF) ++#define PDP_VID1CKEY_GB_VID1CKEY_G_SHIFT (16) ++#define PDP_VID1CKEY_GB_VID1CKEY_G_LENGTH (10) ++#define PDP_VID1CKEY_GB_VID1CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1CKEY_GB, VID1CKEY_B ++*/ ++#define PDP_VID1CKEY_GB_VID1CKEY_B_MASK (0x000003FF) ++#define PDP_VID1CKEY_GB_VID1CKEY_B_LSBMASK (0x000003FF) ++#define PDP_VID1CKEY_GB_VID1CKEY_B_SHIFT (0) ++#define PDP_VID1CKEY_GB_VID1CKEY_B_LENGTH (10) ++#define PDP_VID1CKEY_GB_VID1CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2CKEY_R_OFFSET (0x0108) ++ ++/* PDP, VID2CKEY_R, VID2CKEY_R ++*/ ++#define PDP_VID2CKEY_R_VID2CKEY_R_MASK (0x000003FF) ++#define PDP_VID2CKEY_R_VID2CKEY_R_LSBMASK (0x000003FF) ++#define PDP_VID2CKEY_R_VID2CKEY_R_SHIFT (0) ++#define PDP_VID2CKEY_R_VID2CKEY_R_LENGTH (10) ++#define PDP_VID2CKEY_R_VID2CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2CKEY_GB_OFFSET (0x010C) ++ ++/* PDP, VID2CKEY_GB, VID2CKEY_G ++*/ ++#define PDP_VID2CKEY_GB_VID2CKEY_G_MASK (0x03FF0000) ++#define PDP_VID2CKEY_GB_VID2CKEY_G_LSBMASK (0x000003FF) ++#define PDP_VID2CKEY_GB_VID2CKEY_G_SHIFT (16) ++#define PDP_VID2CKEY_GB_VID2CKEY_G_LENGTH (10) ++#define PDP_VID2CKEY_GB_VID2CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2CKEY_GB, VID2CKEY_B ++*/ ++#define PDP_VID2CKEY_GB_VID2CKEY_B_MASK (0x000003FF) ++#define PDP_VID2CKEY_GB_VID2CKEY_B_LSBMASK (0x000003FF) ++#define PDP_VID2CKEY_GB_VID2CKEY_B_SHIFT (0) ++#define PDP_VID2CKEY_GB_VID2CKEY_B_LENGTH (10) ++#define PDP_VID2CKEY_GB_VID2CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3CKEY_R_OFFSET (0x0110) ++ ++/* PDP, VID3CKEY_R, VID3CKEY_R ++*/ ++#define PDP_VID3CKEY_R_VID3CKEY_R_MASK (0x000003FF) ++#define PDP_VID3CKEY_R_VID3CKEY_R_LSBMASK (0x000003FF) ++#define PDP_VID3CKEY_R_VID3CKEY_R_SHIFT (0) ++#define PDP_VID3CKEY_R_VID3CKEY_R_LENGTH (10) ++#define PDP_VID3CKEY_R_VID3CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3CKEY_GB_OFFSET (0x0114) ++ ++/* PDP, VID3CKEY_GB, VID3CKEY_G ++*/ ++#define PDP_VID3CKEY_GB_VID3CKEY_G_MASK (0x03FF0000) ++#define PDP_VID3CKEY_GB_VID3CKEY_G_LSBMASK (0x000003FF) ++#define PDP_VID3CKEY_GB_VID3CKEY_G_SHIFT (16) ++#define PDP_VID3CKEY_GB_VID3CKEY_G_LENGTH (10) ++#define PDP_VID3CKEY_GB_VID3CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3CKEY_GB, VID3CKEY_B ++*/ ++#define PDP_VID3CKEY_GB_VID3CKEY_B_MASK (0x000003FF) ++#define PDP_VID3CKEY_GB_VID3CKEY_B_LSBMASK (0x000003FF) ++#define PDP_VID3CKEY_GB_VID3CKEY_B_SHIFT (0) ++#define PDP_VID3CKEY_GB_VID3CKEY_B_LENGTH (10) ++#define PDP_VID3CKEY_GB_VID3CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4CKEY_R_OFFSET (0x0118) ++ ++/* PDP, VID4CKEY_R, VID4CKEY_R ++*/ ++#define PDP_VID4CKEY_R_VID4CKEY_R_MASK (0x000003FF) ++#define PDP_VID4CKEY_R_VID4CKEY_R_LSBMASK (0x000003FF) ++#define PDP_VID4CKEY_R_VID4CKEY_R_SHIFT (0) ++#define PDP_VID4CKEY_R_VID4CKEY_R_LENGTH (10) ++#define PDP_VID4CKEY_R_VID4CKEY_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4CKEY_GB_OFFSET (0x011C) ++ ++/* PDP, VID4CKEY_GB, VID4CKEY_G ++*/ ++#define PDP_VID4CKEY_GB_VID4CKEY_G_MASK (0x03FF0000) ++#define PDP_VID4CKEY_GB_VID4CKEY_G_LSBMASK (0x000003FF) ++#define PDP_VID4CKEY_GB_VID4CKEY_G_SHIFT (16) ++#define PDP_VID4CKEY_GB_VID4CKEY_G_LENGTH (10) ++#define PDP_VID4CKEY_GB_VID4CKEY_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4CKEY_GB, VID4CKEY_B ++*/ ++#define PDP_VID4CKEY_GB_VID4CKEY_B_MASK (0x000003FF) ++#define PDP_VID4CKEY_GB_VID4CKEY_B_LSBMASK (0x000003FF) ++#define PDP_VID4CKEY_GB_VID4CKEY_B_SHIFT (0) ++#define PDP_VID4CKEY_GB_VID4CKEY_B_LENGTH (10) ++#define PDP_VID4CKEY_GB_VID4CKEY_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1BLND2_R_OFFSET (0x0120) ++ ++/* PDP, GRPH1BLND2_R, GRPH1PIXDBL ++*/ ++#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_MASK (0x80000000) ++#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LSBMASK (0x00000001) ++#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SHIFT (31) ++#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_LENGTH (1) ++#define PDP_GRPH1BLND2_R_GRPH1PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_R, GRPH1LINDBL ++*/ ++#define PDP_GRPH1BLND2_R_GRPH1LINDBL_MASK (0x20000000) ++#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LSBMASK (0x00000001) ++#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SHIFT (29) ++#define PDP_GRPH1BLND2_R_GRPH1LINDBL_LENGTH (1) ++#define PDP_GRPH1BLND2_R_GRPH1LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_R, GRPH1CKEYMASK_R ++*/ ++#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_MASK (0x000003FF) ++#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SHIFT (0) ++#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_LENGTH (10) ++#define PDP_GRPH1BLND2_R_GRPH1CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1BLND2_GB_OFFSET (0x0124) ++ ++/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_G ++*/ ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SHIFT (16) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_LENGTH (10) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1BLND2_GB, GRPH1CKEYMASK_B ++*/ ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_MASK (0x000003FF) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SHIFT (0) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_LENGTH (10) ++#define PDP_GRPH1BLND2_GB_GRPH1CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2BLND2_R_OFFSET (0x0128) ++ ++/* PDP, GRPH2BLND2_R, GRPH2PIXDBL ++*/ ++#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_MASK (0x80000000) ++#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LSBMASK (0x00000001) ++#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SHIFT (31) ++#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_LENGTH (1) ++#define PDP_GRPH2BLND2_R_GRPH2PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_R, GRPH2LINDBL ++*/ ++#define PDP_GRPH2BLND2_R_GRPH2LINDBL_MASK (0x20000000) ++#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LSBMASK (0x00000001) ++#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SHIFT (29) ++#define PDP_GRPH2BLND2_R_GRPH2LINDBL_LENGTH (1) ++#define PDP_GRPH2BLND2_R_GRPH2LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_R, GRPH2CKEYMASK_R ++*/ ++#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_MASK (0x000003FF) ++#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SHIFT (0) ++#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_LENGTH (10) ++#define PDP_GRPH2BLND2_R_GRPH2CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2BLND2_GB_OFFSET (0x012C) ++ ++/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_G ++*/ ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SHIFT (16) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_LENGTH (10) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2BLND2_GB, GRPH2CKEYMASK_B ++*/ ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_MASK (0x000003FF) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SHIFT (0) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_LENGTH (10) ++#define PDP_GRPH2BLND2_GB_GRPH2CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3BLND2_R_OFFSET (0x0130) ++ ++/* PDP, GRPH3BLND2_R, GRPH3PIXDBL ++*/ ++#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_MASK (0x80000000) ++#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LSBMASK (0x00000001) ++#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SHIFT (31) ++#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_LENGTH (1) ++#define PDP_GRPH3BLND2_R_GRPH3PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_R, GRPH3LINDBL ++*/ ++#define PDP_GRPH3BLND2_R_GRPH3LINDBL_MASK (0x20000000) ++#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LSBMASK (0x00000001) ++#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SHIFT (29) ++#define PDP_GRPH3BLND2_R_GRPH3LINDBL_LENGTH (1) ++#define PDP_GRPH3BLND2_R_GRPH3LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_R, GRPH3CKEYMASK_R ++*/ ++#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_MASK (0x000003FF) ++#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SHIFT (0) ++#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_LENGTH (10) ++#define PDP_GRPH3BLND2_R_GRPH3CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3BLND2_GB_OFFSET (0x0134) ++ ++/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_G ++*/ ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SHIFT (16) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_LENGTH (10) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3BLND2_GB, GRPH3CKEYMASK_B ++*/ ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_MASK (0x000003FF) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SHIFT (0) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_LENGTH (10) ++#define PDP_GRPH3BLND2_GB_GRPH3CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4BLND2_R_OFFSET (0x0138) ++ ++/* PDP, GRPH4BLND2_R, GRPH4PIXDBL ++*/ ++#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_MASK (0x80000000) ++#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LSBMASK (0x00000001) ++#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SHIFT (31) ++#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_LENGTH (1) ++#define PDP_GRPH4BLND2_R_GRPH4PIXDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_R, GRPH4LINDBL ++*/ ++#define PDP_GRPH4BLND2_R_GRPH4LINDBL_MASK (0x20000000) ++#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LSBMASK (0x00000001) ++#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SHIFT (29) ++#define PDP_GRPH4BLND2_R_GRPH4LINDBL_LENGTH (1) ++#define PDP_GRPH4BLND2_R_GRPH4LINDBL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_R, GRPH4CKEYMASK_R ++*/ ++#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_MASK (0x000003FF) ++#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SHIFT (0) ++#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_LENGTH (10) ++#define PDP_GRPH4BLND2_R_GRPH4CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4BLND2_GB_OFFSET (0x013C) ++ ++/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_G ++*/ ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SHIFT (16) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_LENGTH (10) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4BLND2_GB, GRPH4CKEYMASK_B ++*/ ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_MASK (0x000003FF) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SHIFT (0) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_LENGTH (10) ++#define PDP_GRPH4BLND2_GB_GRPH4CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1BLND2_R_OFFSET (0x0140) ++ ++/* PDP, VID1BLND2_R, VID1CKEYMASK_R ++*/ ++#define PDP_VID1BLND2_R_VID1CKEYMASK_R_MASK (0x000003FF) ++#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SHIFT (0) ++#define PDP_VID1BLND2_R_VID1CKEYMASK_R_LENGTH (10) ++#define PDP_VID1BLND2_R_VID1CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1BLND2_GB_OFFSET (0x0144) ++ ++/* PDP, VID1BLND2_GB, VID1CKEYMASK_G ++*/ ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SHIFT (16) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_LENGTH (10) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1BLND2_GB, VID1CKEYMASK_B ++*/ ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_MASK (0x000003FF) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SHIFT (0) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_LENGTH (10) ++#define PDP_VID1BLND2_GB_VID1CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2BLND2_R_OFFSET (0x0148) ++ ++/* PDP, VID2BLND2_R, VID2CKEYMASK_R ++*/ ++#define PDP_VID2BLND2_R_VID2CKEYMASK_R_MASK (0x000003FF) ++#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SHIFT (0) ++#define PDP_VID2BLND2_R_VID2CKEYMASK_R_LENGTH (10) ++#define PDP_VID2BLND2_R_VID2CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2BLND2_GB_OFFSET (0x014C) ++ ++/* PDP, VID2BLND2_GB, VID2CKEYMASK_G ++*/ ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SHIFT (16) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_LENGTH (10) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2BLND2_GB, VID2CKEYMASK_B ++*/ ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_MASK (0x000003FF) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SHIFT (0) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_LENGTH (10) ++#define PDP_VID2BLND2_GB_VID2CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3BLND2_R_OFFSET (0x0150) ++ ++/* PDP, VID3BLND2_R, VID3CKEYMASK_R ++*/ ++#define PDP_VID3BLND2_R_VID3CKEYMASK_R_MASK (0x000003FF) ++#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SHIFT (0) ++#define PDP_VID3BLND2_R_VID3CKEYMASK_R_LENGTH (10) ++#define PDP_VID3BLND2_R_VID3CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3BLND2_GB_OFFSET (0x0154) ++ ++/* PDP, VID3BLND2_GB, VID3CKEYMASK_G ++*/ ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SHIFT (16) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_LENGTH (10) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3BLND2_GB, VID3CKEYMASK_B ++*/ ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_MASK (0x000003FF) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SHIFT (0) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_LENGTH (10) ++#define PDP_VID3BLND2_GB_VID3CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4BLND2_R_OFFSET (0x0158) ++ ++/* PDP, VID4BLND2_R, VID4CKEYMASK_R ++*/ ++#define PDP_VID4BLND2_R_VID4CKEYMASK_R_MASK (0x000003FF) ++#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LSBMASK (0x000003FF) ++#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SHIFT (0) ++#define PDP_VID4BLND2_R_VID4CKEYMASK_R_LENGTH (10) ++#define PDP_VID4BLND2_R_VID4CKEYMASK_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4BLND2_GB_OFFSET (0x015C) ++ ++/* PDP, VID4BLND2_GB, VID4CKEYMASK_G ++*/ ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_MASK (0x03FF0000) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LSBMASK (0x000003FF) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SHIFT (16) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_LENGTH (10) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4BLND2_GB, VID4CKEYMASK_B ++*/ ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_MASK (0x000003FF) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LSBMASK (0x000003FF) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SHIFT (0) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_LENGTH (10) ++#define PDP_VID4BLND2_GB_VID4CKEYMASK_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1INTERLEAVE_CTRL_OFFSET (0x0160) ++ ++/* PDP, GRPH1INTERLEAVE_CTRL, GRPH1INTFIELD ++*/ ++#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_MASK (0x00000001) ++#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LSBMASK (0x00000001) ++#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SHIFT (0) ++#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_LENGTH (1) ++#define PDP_GRPH1INTERLEAVE_CTRL_GRPH1INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2INTERLEAVE_CTRL_OFFSET (0x0164) ++ ++/* PDP, GRPH2INTERLEAVE_CTRL, GRPH2INTFIELD ++*/ ++#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_MASK (0x00000001) ++#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LSBMASK (0x00000001) ++#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SHIFT (0) ++#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_LENGTH (1) ++#define PDP_GRPH2INTERLEAVE_CTRL_GRPH2INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3INTERLEAVE_CTRL_OFFSET (0x0168) ++ ++/* PDP, GRPH3INTERLEAVE_CTRL, GRPH3INTFIELD ++*/ ++#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_MASK (0x00000001) ++#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LSBMASK (0x00000001) ++#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SHIFT (0) ++#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_LENGTH (1) ++#define PDP_GRPH3INTERLEAVE_CTRL_GRPH3INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4INTERLEAVE_CTRL_OFFSET (0x016C) ++ ++/* PDP, GRPH4INTERLEAVE_CTRL, GRPH4INTFIELD ++*/ ++#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_MASK (0x00000001) ++#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LSBMASK (0x00000001) ++#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SHIFT (0) ++#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_LENGTH (1) ++#define PDP_GRPH4INTERLEAVE_CTRL_GRPH4INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1INTERLEAVE_CTRL_OFFSET (0x0170) ++ ++/* PDP, VID1INTERLEAVE_CTRL, VID1INTFIELD ++*/ ++#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_MASK (0x00000001) ++#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LSBMASK (0x00000001) ++#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SHIFT (0) ++#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_LENGTH (1) ++#define PDP_VID1INTERLEAVE_CTRL_VID1INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2INTERLEAVE_CTRL_OFFSET (0x0174) ++ ++/* PDP, VID2INTERLEAVE_CTRL, VID2INTFIELD ++*/ ++#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_MASK (0x00000001) ++#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LSBMASK (0x00000001) ++#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SHIFT (0) ++#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_LENGTH (1) ++#define PDP_VID2INTERLEAVE_CTRL_VID2INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3INTERLEAVE_CTRL_OFFSET (0x0178) ++ ++/* PDP, VID3INTERLEAVE_CTRL, VID3INTFIELD ++*/ ++#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_MASK (0x00000001) ++#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LSBMASK (0x00000001) ++#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SHIFT (0) ++#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_LENGTH (1) ++#define PDP_VID3INTERLEAVE_CTRL_VID3INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4INTERLEAVE_CTRL_OFFSET (0x017C) ++ ++/* PDP, VID4INTERLEAVE_CTRL, VID4INTFIELD ++*/ ++#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_MASK (0x00000001) ++#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LSBMASK (0x00000001) ++#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SHIFT (0) ++#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_LENGTH (1) ++#define PDP_VID4INTERLEAVE_CTRL_VID4INTFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1BASEADDR_OFFSET (0x0180) ++ ++/* PDP, GRPH1BASEADDR, GRPH1BASEADDR ++*/ ++#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SHIFT (5) ++#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_LENGTH (27) ++#define PDP_GRPH1BASEADDR_GRPH1BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2BASEADDR_OFFSET (0x0184) ++ ++/* PDP, GRPH2BASEADDR, GRPH2BASEADDR ++*/ ++#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SHIFT (5) ++#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_LENGTH (27) ++#define PDP_GRPH2BASEADDR_GRPH2BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3BASEADDR_OFFSET (0x0188) ++ ++/* PDP, GRPH3BASEADDR, GRPH3BASEADDR ++*/ ++#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SHIFT (5) ++#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_LENGTH (27) ++#define PDP_GRPH3BASEADDR_GRPH3BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4BASEADDR_OFFSET (0x018C) ++ ++/* PDP, GRPH4BASEADDR, GRPH4BASEADDR ++*/ ++#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SHIFT (5) ++#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_LENGTH (27) ++#define PDP_GRPH4BASEADDR_GRPH4BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1BASEADDR_OFFSET (0x0190) ++ ++/* PDP, VID1BASEADDR, VID1BASEADDR ++*/ ++#define PDP_VID1BASEADDR_VID1BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID1BASEADDR_VID1BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID1BASEADDR_VID1BASEADDR_SHIFT (5) ++#define PDP_VID1BASEADDR_VID1BASEADDR_LENGTH (27) ++#define PDP_VID1BASEADDR_VID1BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2BASEADDR_OFFSET (0x0194) ++ ++/* PDP, VID2BASEADDR, VID2BASEADDR ++*/ ++#define PDP_VID2BASEADDR_VID2BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID2BASEADDR_VID2BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID2BASEADDR_VID2BASEADDR_SHIFT (5) ++#define PDP_VID2BASEADDR_VID2BASEADDR_LENGTH (27) ++#define PDP_VID2BASEADDR_VID2BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3BASEADDR_OFFSET (0x0198) ++ ++/* PDP, VID3BASEADDR, VID3BASEADDR ++*/ ++#define PDP_VID3BASEADDR_VID3BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID3BASEADDR_VID3BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID3BASEADDR_VID3BASEADDR_SHIFT (5) ++#define PDP_VID3BASEADDR_VID3BASEADDR_LENGTH (27) ++#define PDP_VID3BASEADDR_VID3BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4BASEADDR_OFFSET (0x019C) ++ ++/* PDP, VID4BASEADDR, VID4BASEADDR ++*/ ++#define PDP_VID4BASEADDR_VID4BASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID4BASEADDR_VID4BASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID4BASEADDR_VID4BASEADDR_SHIFT (5) ++#define PDP_VID4BASEADDR_VID4BASEADDR_LENGTH (27) ++#define PDP_VID4BASEADDR_VID4BASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1UBASEADDR_OFFSET (0x01B0) ++ ++/* PDP, VID1UBASEADDR, VID1UBASEADDR ++*/ ++#define PDP_VID1UBASEADDR_VID1UBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID1UBASEADDR_VID1UBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID1UBASEADDR_VID1UBASEADDR_SHIFT (5) ++#define PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH (27) ++#define PDP_VID1UBASEADDR_VID1UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2UBASEADDR_OFFSET (0x01B4) ++ ++/* PDP, VID2UBASEADDR, VID2UBASEADDR ++*/ ++#define PDP_VID2UBASEADDR_VID2UBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID2UBASEADDR_VID2UBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID2UBASEADDR_VID2UBASEADDR_SHIFT (5) ++#define PDP_VID2UBASEADDR_VID2UBASEADDR_LENGTH (27) ++#define PDP_VID2UBASEADDR_VID2UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3UBASEADDR_OFFSET (0x01B8) ++ ++/* PDP, VID3UBASEADDR, VID3UBASEADDR ++*/ ++#define PDP_VID3UBASEADDR_VID3UBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID3UBASEADDR_VID3UBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID3UBASEADDR_VID3UBASEADDR_SHIFT (5) ++#define PDP_VID3UBASEADDR_VID3UBASEADDR_LENGTH (27) ++#define PDP_VID3UBASEADDR_VID3UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4UBASEADDR_OFFSET (0x01BC) ++ ++/* PDP, VID4UBASEADDR, VID4UBASEADDR ++*/ ++#define PDP_VID4UBASEADDR_VID4UBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID4UBASEADDR_VID4UBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID4UBASEADDR_VID4UBASEADDR_SHIFT (5) ++#define PDP_VID4UBASEADDR_VID4UBASEADDR_LENGTH (27) ++#define PDP_VID4UBASEADDR_VID4UBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VBASEADDR_OFFSET (0x01D0) ++ ++/* PDP, VID1VBASEADDR, VID1VBASEADDR ++*/ ++#define PDP_VID1VBASEADDR_VID1VBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID1VBASEADDR_VID1VBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID1VBASEADDR_VID1VBASEADDR_SHIFT (5) ++#define PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH (27) ++#define PDP_VID1VBASEADDR_VID1VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VBASEADDR_OFFSET (0x01D4) ++ ++/* PDP, VID2VBASEADDR, VID2VBASEADDR ++*/ ++#define PDP_VID2VBASEADDR_VID2VBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID2VBASEADDR_VID2VBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID2VBASEADDR_VID2VBASEADDR_SHIFT (5) ++#define PDP_VID2VBASEADDR_VID2VBASEADDR_LENGTH (27) ++#define PDP_VID2VBASEADDR_VID2VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VBASEADDR_OFFSET (0x01D8) ++ ++/* PDP, VID3VBASEADDR, VID3VBASEADDR ++*/ ++#define PDP_VID3VBASEADDR_VID3VBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID3VBASEADDR_VID3VBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID3VBASEADDR_VID3VBASEADDR_SHIFT (5) ++#define PDP_VID3VBASEADDR_VID3VBASEADDR_LENGTH (27) ++#define PDP_VID3VBASEADDR_VID3VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VBASEADDR_OFFSET (0x01DC) ++ ++/* PDP, VID4VBASEADDR, VID4VBASEADDR ++*/ ++#define PDP_VID4VBASEADDR_VID4VBASEADDR_MASK (0xFFFFFFE0) ++#define PDP_VID4VBASEADDR_VID4VBASEADDR_LSBMASK (0x07FFFFFF) ++#define PDP_VID4VBASEADDR_VID4VBASEADDR_SHIFT (5) ++#define PDP_VID4VBASEADDR_VID4VBASEADDR_LENGTH (27) ++#define PDP_VID4VBASEADDR_VID4VBASEADDR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1POSTSKIPCTRL_OFFSET (0x0230) ++ ++/* PDP, VID1POSTSKIPCTRL, VID1HPOSTCLIP ++*/ ++#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_MASK (0x007F0000) ++#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LSBMASK (0x0000007F) ++#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SHIFT (16) ++#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_LENGTH (7) ++#define PDP_VID1POSTSKIPCTRL_VID1HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1POSTSKIPCTRL, VID1VPOSTCLIP ++*/ ++#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_MASK (0x0000003F) ++#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LSBMASK (0x0000003F) ++#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SHIFT (0) ++#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_LENGTH (6) ++#define PDP_VID1POSTSKIPCTRL_VID1VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2POSTSKIPCTRL_OFFSET (0x0234) ++ ++/* PDP, VID2POSTSKIPCTRL, VID2HPOSTCLIP ++*/ ++#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_MASK (0x007F0000) ++#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LSBMASK (0x0000007F) ++#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SHIFT (16) ++#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_LENGTH (7) ++#define PDP_VID2POSTSKIPCTRL_VID2HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2POSTSKIPCTRL, VID2VPOSTCLIP ++*/ ++#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_MASK (0x0000003F) ++#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LSBMASK (0x0000003F) ++#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SHIFT (0) ++#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_LENGTH (6) ++#define PDP_VID2POSTSKIPCTRL_VID2VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3POSTSKIPCTRL_OFFSET (0x0238) ++ ++/* PDP, VID3POSTSKIPCTRL, VID3HPOSTCLIP ++*/ ++#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_MASK (0x007F0000) ++#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LSBMASK (0x0000007F) ++#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SHIFT (16) ++#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_LENGTH (7) ++#define PDP_VID3POSTSKIPCTRL_VID3HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3POSTSKIPCTRL, VID3VPOSTCLIP ++*/ ++#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_MASK (0x0000003F) ++#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LSBMASK (0x0000003F) ++#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SHIFT (0) ++#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_LENGTH (6) ++#define PDP_VID3POSTSKIPCTRL_VID3VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4POSTSKIPCTRL_OFFSET (0x023C) ++ ++/* PDP, VID4POSTSKIPCTRL, VID4HPOSTCLIP ++*/ ++#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_MASK (0x007F0000) ++#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LSBMASK (0x0000007F) ++#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SHIFT (16) ++#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_LENGTH (7) ++#define PDP_VID4POSTSKIPCTRL_VID4HPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4POSTSKIPCTRL, VID4VPOSTCLIP ++*/ ++#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_MASK (0x0000003F) ++#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LSBMASK (0x0000003F) ++#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SHIFT (0) ++#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_LENGTH (6) ++#define PDP_VID4POSTSKIPCTRL_VID4VPOSTCLIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1DECIMATE_CTRL_OFFSET (0x0240) ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1DECIMATE_CTRL, GRPH1DECIMATE_EN ++*/ ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_MASK (0x00000001) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SHIFT (0) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_LENGTH (1) ++#define PDP_GRPH1DECIMATE_CTRL_GRPH1DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2DECIMATE_CTRL_OFFSET (0x0244) ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2DECIMATE_CTRL, GRPH2DECIMATE_EN ++*/ ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_MASK (0x00000001) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SHIFT (0) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_LENGTH (1) ++#define PDP_GRPH2DECIMATE_CTRL_GRPH2DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3DECIMATE_CTRL_OFFSET (0x0248) ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3DECIMATE_CTRL, GRPH3DECIMATE_EN ++*/ ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_MASK (0x00000001) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SHIFT (0) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_LENGTH (1) ++#define PDP_GRPH3DECIMATE_CTRL_GRPH3DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4DECIMATE_CTRL_OFFSET (0x024C) ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4DECIMATE_CTRL, GRPH4DECIMATE_EN ++*/ ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_MASK (0x00000001) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SHIFT (0) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_LENGTH (1) ++#define PDP_GRPH4DECIMATE_CTRL_GRPH4DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1DECIMATE_CTRL_OFFSET (0x0250) ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1DECIMATE_CTRL, VID1DECIMATE_EN ++*/ ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_MASK (0x00000001) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SHIFT (0) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_LENGTH (1) ++#define PDP_VID1DECIMATE_CTRL_VID1DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2DECIMATE_CTRL_OFFSET (0x0254) ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2DECIMATE_CTRL, VID2DECIMATE_EN ++*/ ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_MASK (0x00000001) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SHIFT (0) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_LENGTH (1) ++#define PDP_VID2DECIMATE_CTRL_VID2DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3DECIMATE_CTRL_OFFSET (0x0258) ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3DECIMATE_CTRL, VID3DECIMATE_EN ++*/ ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_MASK (0x00000001) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SHIFT (0) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_LENGTH (1) ++#define PDP_VID3DECIMATE_CTRL_VID3DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4DECIMATE_CTRL_OFFSET (0x025C) ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_COUNT ++*/ ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_MASK (0x000000F0) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LSBMASK (0x0000000F) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SHIFT (4) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_LENGTH (4) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_COUNT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_LINE_DISCARD_MODE ++*/ ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_MASK (0x00000008) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LSBMASK (0x00000001) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SHIFT (3) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_LENGTH (1) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_LINE_DISCARD_MODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_PIXEL_HALVE ++*/ ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_MASK (0x00000004) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LSBMASK (0x00000001) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SHIFT (2) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_LENGTH (1) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_PIXEL_HALVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4DECIMATE_CTRL, VID4DECIMATE_EN ++*/ ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_MASK (0x00000001) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LSBMASK (0x00000001) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SHIFT (0) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_LENGTH (1) ++#define PDP_VID4DECIMATE_CTRL_VID4DECIMATE_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1SKIPCTRL_OFFSET (0x0270) ++ ++/* PDP, VID1SKIPCTRL, VID1HSKIP ++*/ ++#define PDP_VID1SKIPCTRL_VID1HSKIP_MASK (0x0FFF0000) ++#define PDP_VID1SKIPCTRL_VID1HSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID1SKIPCTRL_VID1HSKIP_SHIFT (16) ++#define PDP_VID1SKIPCTRL_VID1HSKIP_LENGTH (12) ++#define PDP_VID1SKIPCTRL_VID1HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SKIPCTRL, VID1VSKIP ++*/ ++#define PDP_VID1SKIPCTRL_VID1VSKIP_MASK (0x00000FFF) ++#define PDP_VID1SKIPCTRL_VID1VSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID1SKIPCTRL_VID1VSKIP_SHIFT (0) ++#define PDP_VID1SKIPCTRL_VID1VSKIP_LENGTH (12) ++#define PDP_VID1SKIPCTRL_VID1VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2SKIPCTRL_OFFSET (0x0274) ++ ++/* PDP, VID2SKIPCTRL, VID2HSKIP ++*/ ++#define PDP_VID2SKIPCTRL_VID2HSKIP_MASK (0x0FFF0000) ++#define PDP_VID2SKIPCTRL_VID2HSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID2SKIPCTRL_VID2HSKIP_SHIFT (16) ++#define PDP_VID2SKIPCTRL_VID2HSKIP_LENGTH (12) ++#define PDP_VID2SKIPCTRL_VID2HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SKIPCTRL, VID2VSKIP ++*/ ++#define PDP_VID2SKIPCTRL_VID2VSKIP_MASK (0x00000FFF) ++#define PDP_VID2SKIPCTRL_VID2VSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID2SKIPCTRL_VID2VSKIP_SHIFT (0) ++#define PDP_VID2SKIPCTRL_VID2VSKIP_LENGTH (12) ++#define PDP_VID2SKIPCTRL_VID2VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3SKIPCTRL_OFFSET (0x0278) ++ ++/* PDP, VID3SKIPCTRL, VID3HSKIP ++*/ ++#define PDP_VID3SKIPCTRL_VID3HSKIP_MASK (0x0FFF0000) ++#define PDP_VID3SKIPCTRL_VID3HSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID3SKIPCTRL_VID3HSKIP_SHIFT (16) ++#define PDP_VID3SKIPCTRL_VID3HSKIP_LENGTH (12) ++#define PDP_VID3SKIPCTRL_VID3HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SKIPCTRL, VID3VSKIP ++*/ ++#define PDP_VID3SKIPCTRL_VID3VSKIP_MASK (0x00000FFF) ++#define PDP_VID3SKIPCTRL_VID3VSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID3SKIPCTRL_VID3VSKIP_SHIFT (0) ++#define PDP_VID3SKIPCTRL_VID3VSKIP_LENGTH (12) ++#define PDP_VID3SKIPCTRL_VID3VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4SKIPCTRL_OFFSET (0x027C) ++ ++/* PDP, VID4SKIPCTRL, VID4HSKIP ++*/ ++#define PDP_VID4SKIPCTRL_VID4HSKIP_MASK (0x0FFF0000) ++#define PDP_VID4SKIPCTRL_VID4HSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID4SKIPCTRL_VID4HSKIP_SHIFT (16) ++#define PDP_VID4SKIPCTRL_VID4HSKIP_LENGTH (12) ++#define PDP_VID4SKIPCTRL_VID4HSKIP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SKIPCTRL, VID4VSKIP ++*/ ++#define PDP_VID4SKIPCTRL_VID4VSKIP_MASK (0x00000FFF) ++#define PDP_VID4SKIPCTRL_VID4VSKIP_LSBMASK (0x00000FFF) ++#define PDP_VID4SKIPCTRL_VID4VSKIP_SHIFT (0) ++#define PDP_VID4SKIPCTRL_VID4VSKIP_LENGTH (12) ++#define PDP_VID4SKIPCTRL_VID4VSKIP_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1SCALECTRL_OFFSET (0x0460) ++ ++/* PDP, VID1SCALECTRL, VID1HSCALEBP ++*/ ++#define PDP_VID1SCALECTRL_VID1HSCALEBP_MASK (0x80000000) ++#define PDP_VID1SCALECTRL_VID1HSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID1SCALECTRL_VID1HSCALEBP_SHIFT (31) ++#define PDP_VID1SCALECTRL_VID1HSCALEBP_LENGTH (1) ++#define PDP_VID1SCALECTRL_VID1HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VSCALEBP ++*/ ++#define PDP_VID1SCALECTRL_VID1VSCALEBP_MASK (0x40000000) ++#define PDP_VID1SCALECTRL_VID1VSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID1SCALECTRL_VID1VSCALEBP_SHIFT (30) ++#define PDP_VID1SCALECTRL_VID1VSCALEBP_LENGTH (1) ++#define PDP_VID1SCALECTRL_VID1VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1HSBEFOREVS ++*/ ++#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_MASK (0x20000000) ++#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LSBMASK (0x00000001) ++#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SHIFT (29) ++#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_LENGTH (1) ++#define PDP_VID1SCALECTRL_VID1HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VSURUNCTRL ++*/ ++#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_MASK (0x08000000) ++#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LSBMASK (0x00000001) ++#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SHIFT (27) ++#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_LENGTH (1) ++#define PDP_VID1SCALECTRL_VID1VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1PAN_EN ++*/ ++#define PDP_VID1SCALECTRL_VID1PAN_EN_MASK (0x00040000) ++#define PDP_VID1SCALECTRL_VID1PAN_EN_LSBMASK (0x00000001) ++#define PDP_VID1SCALECTRL_VID1PAN_EN_SHIFT (18) ++#define PDP_VID1SCALECTRL_VID1PAN_EN_LENGTH (1) ++#define PDP_VID1SCALECTRL_VID1PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VORDER ++*/ ++#define PDP_VID1SCALECTRL_VID1VORDER_MASK (0x00030000) ++#define PDP_VID1SCALECTRL_VID1VORDER_LSBMASK (0x00000003) ++#define PDP_VID1SCALECTRL_VID1VORDER_SHIFT (16) ++#define PDP_VID1SCALECTRL_VID1VORDER_LENGTH (2) ++#define PDP_VID1SCALECTRL_VID1VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALECTRL, VID1VPITCH ++*/ ++#define PDP_VID1SCALECTRL_VID1VPITCH_MASK (0x0000FFFF) ++#define PDP_VID1SCALECTRL_VID1VPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID1SCALECTRL_VID1VPITCH_SHIFT (0) ++#define PDP_VID1SCALECTRL_VID1VPITCH_LENGTH (16) ++#define PDP_VID1SCALECTRL_VID1VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VSINIT_OFFSET (0x0464) ++ ++/* PDP, VID1VSINIT, VID1VINITIAL1 ++*/ ++#define PDP_VID1VSINIT_VID1VINITIAL1_MASK (0xFFFF0000) ++#define PDP_VID1VSINIT_VID1VINITIAL1_LSBMASK (0x0000FFFF) ++#define PDP_VID1VSINIT_VID1VINITIAL1_SHIFT (16) ++#define PDP_VID1VSINIT_VID1VINITIAL1_LENGTH (16) ++#define PDP_VID1VSINIT_VID1VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1VSINIT, VID1VINITIAL0 ++*/ ++#define PDP_VID1VSINIT_VID1VINITIAL0_MASK (0x0000FFFF) ++#define PDP_VID1VSINIT_VID1VINITIAL0_LSBMASK (0x0000FFFF) ++#define PDP_VID1VSINIT_VID1VINITIAL0_SHIFT (0) ++#define PDP_VID1VSINIT_VID1VINITIAL0_LENGTH (16) ++#define PDP_VID1VSINIT_VID1VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF0_OFFSET (0x0468) ++ ++/* PDP, VID1VCOEFF0, VID1VCOEFF0 ++*/ ++#define PDP_VID1VCOEFF0_VID1VCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF0_VID1VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF0_VID1VCOEFF0_SHIFT (0) ++#define PDP_VID1VCOEFF0_VID1VCOEFF0_LENGTH (32) ++#define PDP_VID1VCOEFF0_VID1VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF1_OFFSET (0x046C) ++ ++/* PDP, VID1VCOEFF1, VID1VCOEFF1 ++*/ ++#define PDP_VID1VCOEFF1_VID1VCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF1_VID1VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF1_VID1VCOEFF1_SHIFT (0) ++#define PDP_VID1VCOEFF1_VID1VCOEFF1_LENGTH (32) ++#define PDP_VID1VCOEFF1_VID1VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF2_OFFSET (0x0470) ++ ++/* PDP, VID1VCOEFF2, VID1VCOEFF2 ++*/ ++#define PDP_VID1VCOEFF2_VID1VCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF2_VID1VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF2_VID1VCOEFF2_SHIFT (0) ++#define PDP_VID1VCOEFF2_VID1VCOEFF2_LENGTH (32) ++#define PDP_VID1VCOEFF2_VID1VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF3_OFFSET (0x0474) ++ ++/* PDP, VID1VCOEFF3, VID1VCOEFF3 ++*/ ++#define PDP_VID1VCOEFF3_VID1VCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF3_VID1VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF3_VID1VCOEFF3_SHIFT (0) ++#define PDP_VID1VCOEFF3_VID1VCOEFF3_LENGTH (32) ++#define PDP_VID1VCOEFF3_VID1VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF4_OFFSET (0x0478) ++ ++/* PDP, VID1VCOEFF4, VID1VCOEFF4 ++*/ ++#define PDP_VID1VCOEFF4_VID1VCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF4_VID1VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF4_VID1VCOEFF4_SHIFT (0) ++#define PDP_VID1VCOEFF4_VID1VCOEFF4_LENGTH (32) ++#define PDP_VID1VCOEFF4_VID1VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF5_OFFSET (0x047C) ++ ++/* PDP, VID1VCOEFF5, VID1VCOEFF5 ++*/ ++#define PDP_VID1VCOEFF5_VID1VCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF5_VID1VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF5_VID1VCOEFF5_SHIFT (0) ++#define PDP_VID1VCOEFF5_VID1VCOEFF5_LENGTH (32) ++#define PDP_VID1VCOEFF5_VID1VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF6_OFFSET (0x0480) ++ ++/* PDP, VID1VCOEFF6, VID1VCOEFF6 ++*/ ++#define PDP_VID1VCOEFF6_VID1VCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF6_VID1VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF6_VID1VCOEFF6_SHIFT (0) ++#define PDP_VID1VCOEFF6_VID1VCOEFF6_LENGTH (32) ++#define PDP_VID1VCOEFF6_VID1VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF7_OFFSET (0x0484) ++ ++/* PDP, VID1VCOEFF7, VID1VCOEFF7 ++*/ ++#define PDP_VID1VCOEFF7_VID1VCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF7_VID1VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1VCOEFF7_VID1VCOEFF7_SHIFT (0) ++#define PDP_VID1VCOEFF7_VID1VCOEFF7_LENGTH (32) ++#define PDP_VID1VCOEFF7_VID1VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1VCOEFF8_OFFSET (0x0488) ++ ++/* PDP, VID1VCOEFF8, VID1VCOEFF8 ++*/ ++#define PDP_VID1VCOEFF8_VID1VCOEFF8_MASK (0x000000FF) ++#define PDP_VID1VCOEFF8_VID1VCOEFF8_LSBMASK (0x000000FF) ++#define PDP_VID1VCOEFF8_VID1VCOEFF8_SHIFT (0) ++#define PDP_VID1VCOEFF8_VID1VCOEFF8_LENGTH (8) ++#define PDP_VID1VCOEFF8_VID1VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HSINIT_OFFSET (0x048C) ++ ++/* PDP, VID1HSINIT, VID1HINITIAL ++*/ ++#define PDP_VID1HSINIT_VID1HINITIAL_MASK (0xFFFF0000) ++#define PDP_VID1HSINIT_VID1HINITIAL_LSBMASK (0x0000FFFF) ++#define PDP_VID1HSINIT_VID1HINITIAL_SHIFT (16) ++#define PDP_VID1HSINIT_VID1HINITIAL_LENGTH (16) ++#define PDP_VID1HSINIT_VID1HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1HSINIT, VID1HPITCH ++*/ ++#define PDP_VID1HSINIT_VID1HPITCH_MASK (0x0000FFFF) ++#define PDP_VID1HSINIT_VID1HPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID1HSINIT_VID1HPITCH_SHIFT (0) ++#define PDP_VID1HSINIT_VID1HPITCH_LENGTH (16) ++#define PDP_VID1HSINIT_VID1HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF0_OFFSET (0x0490) ++ ++/* PDP, VID1HCOEFF0, VID1HCOEFF0 ++*/ ++#define PDP_VID1HCOEFF0_VID1HCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF0_VID1HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF0_VID1HCOEFF0_SHIFT (0) ++#define PDP_VID1HCOEFF0_VID1HCOEFF0_LENGTH (32) ++#define PDP_VID1HCOEFF0_VID1HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF1_OFFSET (0x0494) ++ ++/* PDP, VID1HCOEFF1, VID1HCOEFF1 ++*/ ++#define PDP_VID1HCOEFF1_VID1HCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF1_VID1HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF1_VID1HCOEFF1_SHIFT (0) ++#define PDP_VID1HCOEFF1_VID1HCOEFF1_LENGTH (32) ++#define PDP_VID1HCOEFF1_VID1HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF2_OFFSET (0x0498) ++ ++/* PDP, VID1HCOEFF2, VID1HCOEFF2 ++*/ ++#define PDP_VID1HCOEFF2_VID1HCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF2_VID1HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF2_VID1HCOEFF2_SHIFT (0) ++#define PDP_VID1HCOEFF2_VID1HCOEFF2_LENGTH (32) ++#define PDP_VID1HCOEFF2_VID1HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF3_OFFSET (0x049C) ++ ++/* PDP, VID1HCOEFF3, VID1HCOEFF3 ++*/ ++#define PDP_VID1HCOEFF3_VID1HCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF3_VID1HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF3_VID1HCOEFF3_SHIFT (0) ++#define PDP_VID1HCOEFF3_VID1HCOEFF3_LENGTH (32) ++#define PDP_VID1HCOEFF3_VID1HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF4_OFFSET (0x04A0) ++ ++/* PDP, VID1HCOEFF4, VID1HCOEFF4 ++*/ ++#define PDP_VID1HCOEFF4_VID1HCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF4_VID1HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF4_VID1HCOEFF4_SHIFT (0) ++#define PDP_VID1HCOEFF4_VID1HCOEFF4_LENGTH (32) ++#define PDP_VID1HCOEFF4_VID1HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF5_OFFSET (0x04A4) ++ ++/* PDP, VID1HCOEFF5, VID1HCOEFF5 ++*/ ++#define PDP_VID1HCOEFF5_VID1HCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF5_VID1HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF5_VID1HCOEFF5_SHIFT (0) ++#define PDP_VID1HCOEFF5_VID1HCOEFF5_LENGTH (32) ++#define PDP_VID1HCOEFF5_VID1HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF6_OFFSET (0x04A8) ++ ++/* PDP, VID1HCOEFF6, VID1HCOEFF6 ++*/ ++#define PDP_VID1HCOEFF6_VID1HCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF6_VID1HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF6_VID1HCOEFF6_SHIFT (0) ++#define PDP_VID1HCOEFF6_VID1HCOEFF6_LENGTH (32) ++#define PDP_VID1HCOEFF6_VID1HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF7_OFFSET (0x04AC) ++ ++/* PDP, VID1HCOEFF7, VID1HCOEFF7 ++*/ ++#define PDP_VID1HCOEFF7_VID1HCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF7_VID1HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF7_VID1HCOEFF7_SHIFT (0) ++#define PDP_VID1HCOEFF7_VID1HCOEFF7_LENGTH (32) ++#define PDP_VID1HCOEFF7_VID1HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF8_OFFSET (0x04B0) ++ ++/* PDP, VID1HCOEFF8, VID1HCOEFF8 ++*/ ++#define PDP_VID1HCOEFF8_VID1HCOEFF8_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF8_VID1HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF8_VID1HCOEFF8_SHIFT (0) ++#define PDP_VID1HCOEFF8_VID1HCOEFF8_LENGTH (32) ++#define PDP_VID1HCOEFF8_VID1HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF9_OFFSET (0x04B4) ++ ++/* PDP, VID1HCOEFF9, VID1HCOEFF9 ++*/ ++#define PDP_VID1HCOEFF9_VID1HCOEFF9_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF9_VID1HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF9_VID1HCOEFF9_SHIFT (0) ++#define PDP_VID1HCOEFF9_VID1HCOEFF9_LENGTH (32) ++#define PDP_VID1HCOEFF9_VID1HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF10_OFFSET (0x04B8) ++ ++/* PDP, VID1HCOEFF10, VID1HCOEFF10 ++*/ ++#define PDP_VID1HCOEFF10_VID1HCOEFF10_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF10_VID1HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF10_VID1HCOEFF10_SHIFT (0) ++#define PDP_VID1HCOEFF10_VID1HCOEFF10_LENGTH (32) ++#define PDP_VID1HCOEFF10_VID1HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF11_OFFSET (0x04BC) ++ ++/* PDP, VID1HCOEFF11, VID1HCOEFF11 ++*/ ++#define PDP_VID1HCOEFF11_VID1HCOEFF11_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF11_VID1HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF11_VID1HCOEFF11_SHIFT (0) ++#define PDP_VID1HCOEFF11_VID1HCOEFF11_LENGTH (32) ++#define PDP_VID1HCOEFF11_VID1HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF12_OFFSET (0x04C0) ++ ++/* PDP, VID1HCOEFF12, VID1HCOEFF12 ++*/ ++#define PDP_VID1HCOEFF12_VID1HCOEFF12_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF12_VID1HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF12_VID1HCOEFF12_SHIFT (0) ++#define PDP_VID1HCOEFF12_VID1HCOEFF12_LENGTH (32) ++#define PDP_VID1HCOEFF12_VID1HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF13_OFFSET (0x04C4) ++ ++/* PDP, VID1HCOEFF13, VID1HCOEFF13 ++*/ ++#define PDP_VID1HCOEFF13_VID1HCOEFF13_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF13_VID1HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF13_VID1HCOEFF13_SHIFT (0) ++#define PDP_VID1HCOEFF13_VID1HCOEFF13_LENGTH (32) ++#define PDP_VID1HCOEFF13_VID1HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF14_OFFSET (0x04C8) ++ ++/* PDP, VID1HCOEFF14, VID1HCOEFF14 ++*/ ++#define PDP_VID1HCOEFF14_VID1HCOEFF14_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF14_VID1HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF14_VID1HCOEFF14_SHIFT (0) ++#define PDP_VID1HCOEFF14_VID1HCOEFF14_LENGTH (32) ++#define PDP_VID1HCOEFF14_VID1HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF15_OFFSET (0x04CC) ++ ++/* PDP, VID1HCOEFF15, VID1HCOEFF15 ++*/ ++#define PDP_VID1HCOEFF15_VID1HCOEFF15_MASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF15_VID1HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define PDP_VID1HCOEFF15_VID1HCOEFF15_SHIFT (0) ++#define PDP_VID1HCOEFF15_VID1HCOEFF15_LENGTH (32) ++#define PDP_VID1HCOEFF15_VID1HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1HCOEFF16_OFFSET (0x04D0) ++ ++/* PDP, VID1HCOEFF16, VID1HCOEFF16 ++*/ ++#define PDP_VID1HCOEFF16_VID1HCOEFF16_MASK (0x000000FF) ++#define PDP_VID1HCOEFF16_VID1HCOEFF16_LSBMASK (0x000000FF) ++#define PDP_VID1HCOEFF16_VID1HCOEFF16_SHIFT (0) ++#define PDP_VID1HCOEFF16_VID1HCOEFF16_LENGTH (8) ++#define PDP_VID1HCOEFF16_VID1HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1SCALESIZE_OFFSET (0x04D4) ++ ++/* PDP, VID1SCALESIZE, VID1SCALEWIDTH ++*/ ++#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_MASK (0x0FFF0000) ++#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SHIFT (16) ++#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_LENGTH (12) ++#define PDP_VID1SCALESIZE_VID1SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1SCALESIZE, VID1SCALEHEIGHT ++*/ ++#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_MASK (0x00000FFF) ++#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SHIFT (0) ++#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_LENGTH (12) ++#define PDP_VID1SCALESIZE_VID1SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CORE_ID_OFFSET (0x04E0) ++ ++/* PDP, PVR_PDP_CORE_ID, GROUP_ID ++*/ ++#define PDP_CORE_ID_GROUP_ID_MASK (0xFF000000) ++#define PDP_CORE_ID_GROUP_ID_LSBMASK (0x000000FF) ++#define PDP_CORE_ID_GROUP_ID_SHIFT (24) ++#define PDP_CORE_ID_GROUP_ID_LENGTH (8) ++#define PDP_CORE_ID_GROUP_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_PDP_CORE_ID, CORE_ID ++*/ ++#define PDP_CORE_ID_CORE_ID_MASK (0x00FF0000) ++#define PDP_CORE_ID_CORE_ID_LSBMASK (0x000000FF) ++#define PDP_CORE_ID_CORE_ID_SHIFT (16) ++#define PDP_CORE_ID_CORE_ID_LENGTH (8) ++#define PDP_CORE_ID_CORE_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_PDP_CORE_ID, CONFIG_ID ++*/ ++#define PDP_CORE_ID_CONFIG_ID_MASK (0x0000FFFF) ++#define PDP_CORE_ID_CONFIG_ID_LSBMASK (0x0000FFFF) ++#define PDP_CORE_ID_CONFIG_ID_SHIFT (0) ++#define PDP_CORE_ID_CONFIG_ID_LENGTH (16) ++#define PDP_CORE_ID_CONFIG_ID_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CORE_REV_OFFSET (0x04F0) ++ ++/* PDP, PVR_PDP_CORE_REV, MAJOR_REV ++*/ ++#define PDP_CORE_REV_MAJOR_REV_MASK (0x00FF0000) ++#define PDP_CORE_REV_MAJOR_REV_LSBMASK (0x000000FF) ++#define PDP_CORE_REV_MAJOR_REV_SHIFT (16) ++#define PDP_CORE_REV_MAJOR_REV_LENGTH (8) ++#define PDP_CORE_REV_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_PDP_CORE_REV, MINOR_REV ++*/ ++#define PDP_CORE_REV_MINOR_REV_MASK (0x0000FF00) ++#define PDP_CORE_REV_MINOR_REV_LSBMASK (0x000000FF) ++#define PDP_CORE_REV_MINOR_REV_SHIFT (8) ++#define PDP_CORE_REV_MINOR_REV_LENGTH (8) ++#define PDP_CORE_REV_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_PDP_CORE_REV, MAINT_REV ++*/ ++#define PDP_CORE_REV_MAINT_REV_MASK (0x000000FF) ++#define PDP_CORE_REV_MAINT_REV_LSBMASK (0x000000FF) ++#define PDP_CORE_REV_MAINT_REV_SHIFT (0) ++#define PDP_CORE_REV_MAINT_REV_LENGTH (8) ++#define PDP_CORE_REV_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2SCALECTRL_OFFSET (0x0500) ++ ++/* PDP, VID2SCALECTRL, VID2HSCALEBP ++*/ ++#define PDP_VID2SCALECTRL_VID2HSCALEBP_MASK (0x80000000) ++#define PDP_VID2SCALECTRL_VID2HSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID2SCALECTRL_VID2HSCALEBP_SHIFT (31) ++#define PDP_VID2SCALECTRL_VID2HSCALEBP_LENGTH (1) ++#define PDP_VID2SCALECTRL_VID2HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VSCALEBP ++*/ ++#define PDP_VID2SCALECTRL_VID2VSCALEBP_MASK (0x40000000) ++#define PDP_VID2SCALECTRL_VID2VSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID2SCALECTRL_VID2VSCALEBP_SHIFT (30) ++#define PDP_VID2SCALECTRL_VID2VSCALEBP_LENGTH (1) ++#define PDP_VID2SCALECTRL_VID2VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2HSBEFOREVS ++*/ ++#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_MASK (0x20000000) ++#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LSBMASK (0x00000001) ++#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SHIFT (29) ++#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_LENGTH (1) ++#define PDP_VID2SCALECTRL_VID2HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VSURUNCTRL ++*/ ++#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_MASK (0x08000000) ++#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LSBMASK (0x00000001) ++#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SHIFT (27) ++#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_LENGTH (1) ++#define PDP_VID2SCALECTRL_VID2VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2PAN_EN ++*/ ++#define PDP_VID2SCALECTRL_VID2PAN_EN_MASK (0x00040000) ++#define PDP_VID2SCALECTRL_VID2PAN_EN_LSBMASK (0x00000001) ++#define PDP_VID2SCALECTRL_VID2PAN_EN_SHIFT (18) ++#define PDP_VID2SCALECTRL_VID2PAN_EN_LENGTH (1) ++#define PDP_VID2SCALECTRL_VID2PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VORDER ++*/ ++#define PDP_VID2SCALECTRL_VID2VORDER_MASK (0x00030000) ++#define PDP_VID2SCALECTRL_VID2VORDER_LSBMASK (0x00000003) ++#define PDP_VID2SCALECTRL_VID2VORDER_SHIFT (16) ++#define PDP_VID2SCALECTRL_VID2VORDER_LENGTH (2) ++#define PDP_VID2SCALECTRL_VID2VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALECTRL, VID2VPITCH ++*/ ++#define PDP_VID2SCALECTRL_VID2VPITCH_MASK (0x0000FFFF) ++#define PDP_VID2SCALECTRL_VID2VPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID2SCALECTRL_VID2VPITCH_SHIFT (0) ++#define PDP_VID2SCALECTRL_VID2VPITCH_LENGTH (16) ++#define PDP_VID2SCALECTRL_VID2VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VSINIT_OFFSET (0x0504) ++ ++/* PDP, VID2VSINIT, VID2VINITIAL1 ++*/ ++#define PDP_VID2VSINIT_VID2VINITIAL1_MASK (0xFFFF0000) ++#define PDP_VID2VSINIT_VID2VINITIAL1_LSBMASK (0x0000FFFF) ++#define PDP_VID2VSINIT_VID2VINITIAL1_SHIFT (16) ++#define PDP_VID2VSINIT_VID2VINITIAL1_LENGTH (16) ++#define PDP_VID2VSINIT_VID2VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2VSINIT, VID2VINITIAL0 ++*/ ++#define PDP_VID2VSINIT_VID2VINITIAL0_MASK (0x0000FFFF) ++#define PDP_VID2VSINIT_VID2VINITIAL0_LSBMASK (0x0000FFFF) ++#define PDP_VID2VSINIT_VID2VINITIAL0_SHIFT (0) ++#define PDP_VID2VSINIT_VID2VINITIAL0_LENGTH (16) ++#define PDP_VID2VSINIT_VID2VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF0_OFFSET (0x0508) ++ ++/* PDP, VID2VCOEFF0, VID2VCOEFF0 ++*/ ++#define PDP_VID2VCOEFF0_VID2VCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF0_VID2VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF0_VID2VCOEFF0_SHIFT (0) ++#define PDP_VID2VCOEFF0_VID2VCOEFF0_LENGTH (32) ++#define PDP_VID2VCOEFF0_VID2VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF1_OFFSET (0x050C) ++ ++/* PDP, VID2VCOEFF1, VID2VCOEFF1 ++*/ ++#define PDP_VID2VCOEFF1_VID2VCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF1_VID2VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF1_VID2VCOEFF1_SHIFT (0) ++#define PDP_VID2VCOEFF1_VID2VCOEFF1_LENGTH (32) ++#define PDP_VID2VCOEFF1_VID2VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF2_OFFSET (0x0510) ++ ++/* PDP, VID2VCOEFF2, VID2VCOEFF2 ++*/ ++#define PDP_VID2VCOEFF2_VID2VCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF2_VID2VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF2_VID2VCOEFF2_SHIFT (0) ++#define PDP_VID2VCOEFF2_VID2VCOEFF2_LENGTH (32) ++#define PDP_VID2VCOEFF2_VID2VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF3_OFFSET (0x0514) ++ ++/* PDP, VID2VCOEFF3, VID2VCOEFF3 ++*/ ++#define PDP_VID2VCOEFF3_VID2VCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF3_VID2VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF3_VID2VCOEFF3_SHIFT (0) ++#define PDP_VID2VCOEFF3_VID2VCOEFF3_LENGTH (32) ++#define PDP_VID2VCOEFF3_VID2VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF4_OFFSET (0x0518) ++ ++/* PDP, VID2VCOEFF4, VID2VCOEFF4 ++*/ ++#define PDP_VID2VCOEFF4_VID2VCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF4_VID2VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF4_VID2VCOEFF4_SHIFT (0) ++#define PDP_VID2VCOEFF4_VID2VCOEFF4_LENGTH (32) ++#define PDP_VID2VCOEFF4_VID2VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF5_OFFSET (0x051C) ++ ++/* PDP, VID2VCOEFF5, VID2VCOEFF5 ++*/ ++#define PDP_VID2VCOEFF5_VID2VCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF5_VID2VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF5_VID2VCOEFF5_SHIFT (0) ++#define PDP_VID2VCOEFF5_VID2VCOEFF5_LENGTH (32) ++#define PDP_VID2VCOEFF5_VID2VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF6_OFFSET (0x0520) ++ ++/* PDP, VID2VCOEFF6, VID2VCOEFF6 ++*/ ++#define PDP_VID2VCOEFF6_VID2VCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF6_VID2VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF6_VID2VCOEFF6_SHIFT (0) ++#define PDP_VID2VCOEFF6_VID2VCOEFF6_LENGTH (32) ++#define PDP_VID2VCOEFF6_VID2VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF7_OFFSET (0x0524) ++ ++/* PDP, VID2VCOEFF7, VID2VCOEFF7 ++*/ ++#define PDP_VID2VCOEFF7_VID2VCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF7_VID2VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2VCOEFF7_VID2VCOEFF7_SHIFT (0) ++#define PDP_VID2VCOEFF7_VID2VCOEFF7_LENGTH (32) ++#define PDP_VID2VCOEFF7_VID2VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2VCOEFF8_OFFSET (0x0528) ++ ++/* PDP, VID2VCOEFF8, VID2VCOEFF8 ++*/ ++#define PDP_VID2VCOEFF8_VID2VCOEFF8_MASK (0x000000FF) ++#define PDP_VID2VCOEFF8_VID2VCOEFF8_LSBMASK (0x000000FF) ++#define PDP_VID2VCOEFF8_VID2VCOEFF8_SHIFT (0) ++#define PDP_VID2VCOEFF8_VID2VCOEFF8_LENGTH (8) ++#define PDP_VID2VCOEFF8_VID2VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HSINIT_OFFSET (0x052C) ++ ++/* PDP, VID2HSINIT, VID2HINITIAL ++*/ ++#define PDP_VID2HSINIT_VID2HINITIAL_MASK (0xFFFF0000) ++#define PDP_VID2HSINIT_VID2HINITIAL_LSBMASK (0x0000FFFF) ++#define PDP_VID2HSINIT_VID2HINITIAL_SHIFT (16) ++#define PDP_VID2HSINIT_VID2HINITIAL_LENGTH (16) ++#define PDP_VID2HSINIT_VID2HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2HSINIT, VID2HPITCH ++*/ ++#define PDP_VID2HSINIT_VID2HPITCH_MASK (0x0000FFFF) ++#define PDP_VID2HSINIT_VID2HPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID2HSINIT_VID2HPITCH_SHIFT (0) ++#define PDP_VID2HSINIT_VID2HPITCH_LENGTH (16) ++#define PDP_VID2HSINIT_VID2HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF0_OFFSET (0x0530) ++ ++/* PDP, VID2HCOEFF0, VID2HCOEFF0 ++*/ ++#define PDP_VID2HCOEFF0_VID2HCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF0_VID2HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF0_VID2HCOEFF0_SHIFT (0) ++#define PDP_VID2HCOEFF0_VID2HCOEFF0_LENGTH (32) ++#define PDP_VID2HCOEFF0_VID2HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF1_OFFSET (0x0534) ++ ++/* PDP, VID2HCOEFF1, VID2HCOEFF1 ++*/ ++#define PDP_VID2HCOEFF1_VID2HCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF1_VID2HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF1_VID2HCOEFF1_SHIFT (0) ++#define PDP_VID2HCOEFF1_VID2HCOEFF1_LENGTH (32) ++#define PDP_VID2HCOEFF1_VID2HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF2_OFFSET (0x0538) ++ ++/* PDP, VID2HCOEFF2, VID2HCOEFF2 ++*/ ++#define PDP_VID2HCOEFF2_VID2HCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF2_VID2HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF2_VID2HCOEFF2_SHIFT (0) ++#define PDP_VID2HCOEFF2_VID2HCOEFF2_LENGTH (32) ++#define PDP_VID2HCOEFF2_VID2HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF3_OFFSET (0x053C) ++ ++/* PDP, VID2HCOEFF3, VID2HCOEFF3 ++*/ ++#define PDP_VID2HCOEFF3_VID2HCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF3_VID2HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF3_VID2HCOEFF3_SHIFT (0) ++#define PDP_VID2HCOEFF3_VID2HCOEFF3_LENGTH (32) ++#define PDP_VID2HCOEFF3_VID2HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF4_OFFSET (0x0540) ++ ++/* PDP, VID2HCOEFF4, VID2HCOEFF4 ++*/ ++#define PDP_VID2HCOEFF4_VID2HCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF4_VID2HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF4_VID2HCOEFF4_SHIFT (0) ++#define PDP_VID2HCOEFF4_VID2HCOEFF4_LENGTH (32) ++#define PDP_VID2HCOEFF4_VID2HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF5_OFFSET (0x0544) ++ ++/* PDP, VID2HCOEFF5, VID2HCOEFF5 ++*/ ++#define PDP_VID2HCOEFF5_VID2HCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF5_VID2HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF5_VID2HCOEFF5_SHIFT (0) ++#define PDP_VID2HCOEFF5_VID2HCOEFF5_LENGTH (32) ++#define PDP_VID2HCOEFF5_VID2HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF6_OFFSET (0x0548) ++ ++/* PDP, VID2HCOEFF6, VID2HCOEFF6 ++*/ ++#define PDP_VID2HCOEFF6_VID2HCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF6_VID2HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF6_VID2HCOEFF6_SHIFT (0) ++#define PDP_VID2HCOEFF6_VID2HCOEFF6_LENGTH (32) ++#define PDP_VID2HCOEFF6_VID2HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF7_OFFSET (0x054C) ++ ++/* PDP, VID2HCOEFF7, VID2HCOEFF7 ++*/ ++#define PDP_VID2HCOEFF7_VID2HCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF7_VID2HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF7_VID2HCOEFF7_SHIFT (0) ++#define PDP_VID2HCOEFF7_VID2HCOEFF7_LENGTH (32) ++#define PDP_VID2HCOEFF7_VID2HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF8_OFFSET (0x0550) ++ ++/* PDP, VID2HCOEFF8, VID2HCOEFF8 ++*/ ++#define PDP_VID2HCOEFF8_VID2HCOEFF8_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF8_VID2HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF8_VID2HCOEFF8_SHIFT (0) ++#define PDP_VID2HCOEFF8_VID2HCOEFF8_LENGTH (32) ++#define PDP_VID2HCOEFF8_VID2HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF9_OFFSET (0x0554) ++ ++/* PDP, VID2HCOEFF9, VID2HCOEFF9 ++*/ ++#define PDP_VID2HCOEFF9_VID2HCOEFF9_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF9_VID2HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF9_VID2HCOEFF9_SHIFT (0) ++#define PDP_VID2HCOEFF9_VID2HCOEFF9_LENGTH (32) ++#define PDP_VID2HCOEFF9_VID2HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF10_OFFSET (0x0558) ++ ++/* PDP, VID2HCOEFF10, VID2HCOEFF10 ++*/ ++#define PDP_VID2HCOEFF10_VID2HCOEFF10_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF10_VID2HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF10_VID2HCOEFF10_SHIFT (0) ++#define PDP_VID2HCOEFF10_VID2HCOEFF10_LENGTH (32) ++#define PDP_VID2HCOEFF10_VID2HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF11_OFFSET (0x055C) ++ ++/* PDP, VID2HCOEFF11, VID2HCOEFF11 ++*/ ++#define PDP_VID2HCOEFF11_VID2HCOEFF11_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF11_VID2HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF11_VID2HCOEFF11_SHIFT (0) ++#define PDP_VID2HCOEFF11_VID2HCOEFF11_LENGTH (32) ++#define PDP_VID2HCOEFF11_VID2HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF12_OFFSET (0x0560) ++ ++/* PDP, VID2HCOEFF12, VID2HCOEFF12 ++*/ ++#define PDP_VID2HCOEFF12_VID2HCOEFF12_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF12_VID2HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF12_VID2HCOEFF12_SHIFT (0) ++#define PDP_VID2HCOEFF12_VID2HCOEFF12_LENGTH (32) ++#define PDP_VID2HCOEFF12_VID2HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF13_OFFSET (0x0564) ++ ++/* PDP, VID2HCOEFF13, VID2HCOEFF13 ++*/ ++#define PDP_VID2HCOEFF13_VID2HCOEFF13_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF13_VID2HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF13_VID2HCOEFF13_SHIFT (0) ++#define PDP_VID2HCOEFF13_VID2HCOEFF13_LENGTH (32) ++#define PDP_VID2HCOEFF13_VID2HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF14_OFFSET (0x0568) ++ ++/* PDP, VID2HCOEFF14, VID2HCOEFF14 ++*/ ++#define PDP_VID2HCOEFF14_VID2HCOEFF14_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF14_VID2HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF14_VID2HCOEFF14_SHIFT (0) ++#define PDP_VID2HCOEFF14_VID2HCOEFF14_LENGTH (32) ++#define PDP_VID2HCOEFF14_VID2HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF15_OFFSET (0x056C) ++ ++/* PDP, VID2HCOEFF15, VID2HCOEFF15 ++*/ ++#define PDP_VID2HCOEFF15_VID2HCOEFF15_MASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF15_VID2HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define PDP_VID2HCOEFF15_VID2HCOEFF15_SHIFT (0) ++#define PDP_VID2HCOEFF15_VID2HCOEFF15_LENGTH (32) ++#define PDP_VID2HCOEFF15_VID2HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2HCOEFF16_OFFSET (0x0570) ++ ++/* PDP, VID2HCOEFF16, VID2HCOEFF16 ++*/ ++#define PDP_VID2HCOEFF16_VID2HCOEFF16_MASK (0x000000FF) ++#define PDP_VID2HCOEFF16_VID2HCOEFF16_LSBMASK (0x000000FF) ++#define PDP_VID2HCOEFF16_VID2HCOEFF16_SHIFT (0) ++#define PDP_VID2HCOEFF16_VID2HCOEFF16_LENGTH (8) ++#define PDP_VID2HCOEFF16_VID2HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2SCALESIZE_OFFSET (0x0574) ++ ++/* PDP, VID2SCALESIZE, VID2SCALEWIDTH ++*/ ++#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_MASK (0x0FFF0000) ++#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SHIFT (16) ++#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_LENGTH (12) ++#define PDP_VID2SCALESIZE_VID2SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2SCALESIZE, VID2SCALEHEIGHT ++*/ ++#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_MASK (0x00000FFF) ++#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SHIFT (0) ++#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_LENGTH (12) ++#define PDP_VID2SCALESIZE_VID2SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3SCALECTRL_OFFSET (0x0578) ++ ++/* PDP, VID3SCALECTRL, VID3HSCALEBP ++*/ ++#define PDP_VID3SCALECTRL_VID3HSCALEBP_MASK (0x80000000) ++#define PDP_VID3SCALECTRL_VID3HSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID3SCALECTRL_VID3HSCALEBP_SHIFT (31) ++#define PDP_VID3SCALECTRL_VID3HSCALEBP_LENGTH (1) ++#define PDP_VID3SCALECTRL_VID3HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VSCALEBP ++*/ ++#define PDP_VID3SCALECTRL_VID3VSCALEBP_MASK (0x40000000) ++#define PDP_VID3SCALECTRL_VID3VSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID3SCALECTRL_VID3VSCALEBP_SHIFT (30) ++#define PDP_VID3SCALECTRL_VID3VSCALEBP_LENGTH (1) ++#define PDP_VID3SCALECTRL_VID3VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3HSBEFOREVS ++*/ ++#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_MASK (0x20000000) ++#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LSBMASK (0x00000001) ++#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SHIFT (29) ++#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_LENGTH (1) ++#define PDP_VID3SCALECTRL_VID3HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VSURUNCTRL ++*/ ++#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_MASK (0x08000000) ++#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LSBMASK (0x00000001) ++#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SHIFT (27) ++#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_LENGTH (1) ++#define PDP_VID3SCALECTRL_VID3VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3PAN_EN ++*/ ++#define PDP_VID3SCALECTRL_VID3PAN_EN_MASK (0x00040000) ++#define PDP_VID3SCALECTRL_VID3PAN_EN_LSBMASK (0x00000001) ++#define PDP_VID3SCALECTRL_VID3PAN_EN_SHIFT (18) ++#define PDP_VID3SCALECTRL_VID3PAN_EN_LENGTH (1) ++#define PDP_VID3SCALECTRL_VID3PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VORDER ++*/ ++#define PDP_VID3SCALECTRL_VID3VORDER_MASK (0x00030000) ++#define PDP_VID3SCALECTRL_VID3VORDER_LSBMASK (0x00000003) ++#define PDP_VID3SCALECTRL_VID3VORDER_SHIFT (16) ++#define PDP_VID3SCALECTRL_VID3VORDER_LENGTH (2) ++#define PDP_VID3SCALECTRL_VID3VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALECTRL, VID3VPITCH ++*/ ++#define PDP_VID3SCALECTRL_VID3VPITCH_MASK (0x0000FFFF) ++#define PDP_VID3SCALECTRL_VID3VPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID3SCALECTRL_VID3VPITCH_SHIFT (0) ++#define PDP_VID3SCALECTRL_VID3VPITCH_LENGTH (16) ++#define PDP_VID3SCALECTRL_VID3VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VSINIT_OFFSET (0x057C) ++ ++/* PDP, VID3VSINIT, VID3VINITIAL1 ++*/ ++#define PDP_VID3VSINIT_VID3VINITIAL1_MASK (0xFFFF0000) ++#define PDP_VID3VSINIT_VID3VINITIAL1_LSBMASK (0x0000FFFF) ++#define PDP_VID3VSINIT_VID3VINITIAL1_SHIFT (16) ++#define PDP_VID3VSINIT_VID3VINITIAL1_LENGTH (16) ++#define PDP_VID3VSINIT_VID3VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3VSINIT, VID3VINITIAL0 ++*/ ++#define PDP_VID3VSINIT_VID3VINITIAL0_MASK (0x0000FFFF) ++#define PDP_VID3VSINIT_VID3VINITIAL0_LSBMASK (0x0000FFFF) ++#define PDP_VID3VSINIT_VID3VINITIAL0_SHIFT (0) ++#define PDP_VID3VSINIT_VID3VINITIAL0_LENGTH (16) ++#define PDP_VID3VSINIT_VID3VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF0_OFFSET (0x0580) ++ ++/* PDP, VID3VCOEFF0, VID3VCOEFF0 ++*/ ++#define PDP_VID3VCOEFF0_VID3VCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF0_VID3VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF0_VID3VCOEFF0_SHIFT (0) ++#define PDP_VID3VCOEFF0_VID3VCOEFF0_LENGTH (32) ++#define PDP_VID3VCOEFF0_VID3VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF1_OFFSET (0x0584) ++ ++/* PDP, VID3VCOEFF1, VID3VCOEFF1 ++*/ ++#define PDP_VID3VCOEFF1_VID3VCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF1_VID3VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF1_VID3VCOEFF1_SHIFT (0) ++#define PDP_VID3VCOEFF1_VID3VCOEFF1_LENGTH (32) ++#define PDP_VID3VCOEFF1_VID3VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF2_OFFSET (0x0588) ++ ++/* PDP, VID3VCOEFF2, VID3VCOEFF2 ++*/ ++#define PDP_VID3VCOEFF2_VID3VCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF2_VID3VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF2_VID3VCOEFF2_SHIFT (0) ++#define PDP_VID3VCOEFF2_VID3VCOEFF2_LENGTH (32) ++#define PDP_VID3VCOEFF2_VID3VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF3_OFFSET (0x058C) ++ ++/* PDP, VID3VCOEFF3, VID3VCOEFF3 ++*/ ++#define PDP_VID3VCOEFF3_VID3VCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF3_VID3VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF3_VID3VCOEFF3_SHIFT (0) ++#define PDP_VID3VCOEFF3_VID3VCOEFF3_LENGTH (32) ++#define PDP_VID3VCOEFF3_VID3VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF4_OFFSET (0x0590) ++ ++/* PDP, VID3VCOEFF4, VID3VCOEFF4 ++*/ ++#define PDP_VID3VCOEFF4_VID3VCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF4_VID3VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF4_VID3VCOEFF4_SHIFT (0) ++#define PDP_VID3VCOEFF4_VID3VCOEFF4_LENGTH (32) ++#define PDP_VID3VCOEFF4_VID3VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF5_OFFSET (0x0594) ++ ++/* PDP, VID3VCOEFF5, VID3VCOEFF5 ++*/ ++#define PDP_VID3VCOEFF5_VID3VCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF5_VID3VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF5_VID3VCOEFF5_SHIFT (0) ++#define PDP_VID3VCOEFF5_VID3VCOEFF5_LENGTH (32) ++#define PDP_VID3VCOEFF5_VID3VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF6_OFFSET (0x0598) ++ ++/* PDP, VID3VCOEFF6, VID3VCOEFF6 ++*/ ++#define PDP_VID3VCOEFF6_VID3VCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF6_VID3VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF6_VID3VCOEFF6_SHIFT (0) ++#define PDP_VID3VCOEFF6_VID3VCOEFF6_LENGTH (32) ++#define PDP_VID3VCOEFF6_VID3VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF7_OFFSET (0x059C) ++ ++/* PDP, VID3VCOEFF7, VID3VCOEFF7 ++*/ ++#define PDP_VID3VCOEFF7_VID3VCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF7_VID3VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3VCOEFF7_VID3VCOEFF7_SHIFT (0) ++#define PDP_VID3VCOEFF7_VID3VCOEFF7_LENGTH (32) ++#define PDP_VID3VCOEFF7_VID3VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3VCOEFF8_OFFSET (0x05A0) ++ ++/* PDP, VID3VCOEFF8, VID3VCOEFF8 ++*/ ++#define PDP_VID3VCOEFF8_VID3VCOEFF8_MASK (0x000000FF) ++#define PDP_VID3VCOEFF8_VID3VCOEFF8_LSBMASK (0x000000FF) ++#define PDP_VID3VCOEFF8_VID3VCOEFF8_SHIFT (0) ++#define PDP_VID3VCOEFF8_VID3VCOEFF8_LENGTH (8) ++#define PDP_VID3VCOEFF8_VID3VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HSINIT_OFFSET (0x05A4) ++ ++/* PDP, VID3HSINIT, VID3HINITIAL ++*/ ++#define PDP_VID3HSINIT_VID3HINITIAL_MASK (0xFFFF0000) ++#define PDP_VID3HSINIT_VID3HINITIAL_LSBMASK (0x0000FFFF) ++#define PDP_VID3HSINIT_VID3HINITIAL_SHIFT (16) ++#define PDP_VID3HSINIT_VID3HINITIAL_LENGTH (16) ++#define PDP_VID3HSINIT_VID3HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3HSINIT, VID3HPITCH ++*/ ++#define PDP_VID3HSINIT_VID3HPITCH_MASK (0x0000FFFF) ++#define PDP_VID3HSINIT_VID3HPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID3HSINIT_VID3HPITCH_SHIFT (0) ++#define PDP_VID3HSINIT_VID3HPITCH_LENGTH (16) ++#define PDP_VID3HSINIT_VID3HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF0_OFFSET (0x05A8) ++ ++/* PDP, VID3HCOEFF0, VID3HCOEFF0 ++*/ ++#define PDP_VID3HCOEFF0_VID3HCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF0_VID3HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF0_VID3HCOEFF0_SHIFT (0) ++#define PDP_VID3HCOEFF0_VID3HCOEFF0_LENGTH (32) ++#define PDP_VID3HCOEFF0_VID3HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF1_OFFSET (0x05AC) ++ ++/* PDP, VID3HCOEFF1, VID3HCOEFF1 ++*/ ++#define PDP_VID3HCOEFF1_VID3HCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF1_VID3HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF1_VID3HCOEFF1_SHIFT (0) ++#define PDP_VID3HCOEFF1_VID3HCOEFF1_LENGTH (32) ++#define PDP_VID3HCOEFF1_VID3HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF2_OFFSET (0x05B0) ++ ++/* PDP, VID3HCOEFF2, VID3HCOEFF2 ++*/ ++#define PDP_VID3HCOEFF2_VID3HCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF2_VID3HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF2_VID3HCOEFF2_SHIFT (0) ++#define PDP_VID3HCOEFF2_VID3HCOEFF2_LENGTH (32) ++#define PDP_VID3HCOEFF2_VID3HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF3_OFFSET (0x05B4) ++ ++/* PDP, VID3HCOEFF3, VID3HCOEFF3 ++*/ ++#define PDP_VID3HCOEFF3_VID3HCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF3_VID3HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF3_VID3HCOEFF3_SHIFT (0) ++#define PDP_VID3HCOEFF3_VID3HCOEFF3_LENGTH (32) ++#define PDP_VID3HCOEFF3_VID3HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF4_OFFSET (0x05B8) ++ ++/* PDP, VID3HCOEFF4, VID3HCOEFF4 ++*/ ++#define PDP_VID3HCOEFF4_VID3HCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF4_VID3HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF4_VID3HCOEFF4_SHIFT (0) ++#define PDP_VID3HCOEFF4_VID3HCOEFF4_LENGTH (32) ++#define PDP_VID3HCOEFF4_VID3HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF5_OFFSET (0x05BC) ++ ++/* PDP, VID3HCOEFF5, VID3HCOEFF5 ++*/ ++#define PDP_VID3HCOEFF5_VID3HCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF5_VID3HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF5_VID3HCOEFF5_SHIFT (0) ++#define PDP_VID3HCOEFF5_VID3HCOEFF5_LENGTH (32) ++#define PDP_VID3HCOEFF5_VID3HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF6_OFFSET (0x05C0) ++ ++/* PDP, VID3HCOEFF6, VID3HCOEFF6 ++*/ ++#define PDP_VID3HCOEFF6_VID3HCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF6_VID3HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF6_VID3HCOEFF6_SHIFT (0) ++#define PDP_VID3HCOEFF6_VID3HCOEFF6_LENGTH (32) ++#define PDP_VID3HCOEFF6_VID3HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF7_OFFSET (0x05C4) ++ ++/* PDP, VID3HCOEFF7, VID3HCOEFF7 ++*/ ++#define PDP_VID3HCOEFF7_VID3HCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF7_VID3HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF7_VID3HCOEFF7_SHIFT (0) ++#define PDP_VID3HCOEFF7_VID3HCOEFF7_LENGTH (32) ++#define PDP_VID3HCOEFF7_VID3HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF8_OFFSET (0x05C8) ++ ++/* PDP, VID3HCOEFF8, VID3HCOEFF8 ++*/ ++#define PDP_VID3HCOEFF8_VID3HCOEFF8_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF8_VID3HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF8_VID3HCOEFF8_SHIFT (0) ++#define PDP_VID3HCOEFF8_VID3HCOEFF8_LENGTH (32) ++#define PDP_VID3HCOEFF8_VID3HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF9_OFFSET (0x05CC) ++ ++/* PDP, VID3HCOEFF9, VID3HCOEFF9 ++*/ ++#define PDP_VID3HCOEFF9_VID3HCOEFF9_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF9_VID3HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF9_VID3HCOEFF9_SHIFT (0) ++#define PDP_VID3HCOEFF9_VID3HCOEFF9_LENGTH (32) ++#define PDP_VID3HCOEFF9_VID3HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF10_OFFSET (0x05D0) ++ ++/* PDP, VID3HCOEFF10, VID3HCOEFF10 ++*/ ++#define PDP_VID3HCOEFF10_VID3HCOEFF10_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF10_VID3HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF10_VID3HCOEFF10_SHIFT (0) ++#define PDP_VID3HCOEFF10_VID3HCOEFF10_LENGTH (32) ++#define PDP_VID3HCOEFF10_VID3HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF11_OFFSET (0x05D4) ++ ++/* PDP, VID3HCOEFF11, VID3HCOEFF11 ++*/ ++#define PDP_VID3HCOEFF11_VID3HCOEFF11_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF11_VID3HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF11_VID3HCOEFF11_SHIFT (0) ++#define PDP_VID3HCOEFF11_VID3HCOEFF11_LENGTH (32) ++#define PDP_VID3HCOEFF11_VID3HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF12_OFFSET (0x05D8) ++ ++/* PDP, VID3HCOEFF12, VID3HCOEFF12 ++*/ ++#define PDP_VID3HCOEFF12_VID3HCOEFF12_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF12_VID3HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF12_VID3HCOEFF12_SHIFT (0) ++#define PDP_VID3HCOEFF12_VID3HCOEFF12_LENGTH (32) ++#define PDP_VID3HCOEFF12_VID3HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF13_OFFSET (0x05DC) ++ ++/* PDP, VID3HCOEFF13, VID3HCOEFF13 ++*/ ++#define PDP_VID3HCOEFF13_VID3HCOEFF13_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF13_VID3HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF13_VID3HCOEFF13_SHIFT (0) ++#define PDP_VID3HCOEFF13_VID3HCOEFF13_LENGTH (32) ++#define PDP_VID3HCOEFF13_VID3HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF14_OFFSET (0x05E0) ++ ++/* PDP, VID3HCOEFF14, VID3HCOEFF14 ++*/ ++#define PDP_VID3HCOEFF14_VID3HCOEFF14_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF14_VID3HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF14_VID3HCOEFF14_SHIFT (0) ++#define PDP_VID3HCOEFF14_VID3HCOEFF14_LENGTH (32) ++#define PDP_VID3HCOEFF14_VID3HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF15_OFFSET (0x05E4) ++ ++/* PDP, VID3HCOEFF15, VID3HCOEFF15 ++*/ ++#define PDP_VID3HCOEFF15_VID3HCOEFF15_MASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF15_VID3HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define PDP_VID3HCOEFF15_VID3HCOEFF15_SHIFT (0) ++#define PDP_VID3HCOEFF15_VID3HCOEFF15_LENGTH (32) ++#define PDP_VID3HCOEFF15_VID3HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3HCOEFF16_OFFSET (0x05E8) ++ ++/* PDP, VID3HCOEFF16, VID3HCOEFF16 ++*/ ++#define PDP_VID3HCOEFF16_VID3HCOEFF16_MASK (0x000000FF) ++#define PDP_VID3HCOEFF16_VID3HCOEFF16_LSBMASK (0x000000FF) ++#define PDP_VID3HCOEFF16_VID3HCOEFF16_SHIFT (0) ++#define PDP_VID3HCOEFF16_VID3HCOEFF16_LENGTH (8) ++#define PDP_VID3HCOEFF16_VID3HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3SCALESIZE_OFFSET (0x05EC) ++ ++/* PDP, VID3SCALESIZE, VID3SCALEWIDTH ++*/ ++#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_MASK (0x0FFF0000) ++#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SHIFT (16) ++#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_LENGTH (12) ++#define PDP_VID3SCALESIZE_VID3SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3SCALESIZE, VID3SCALEHEIGHT ++*/ ++#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_MASK (0x00000FFF) ++#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SHIFT (0) ++#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_LENGTH (12) ++#define PDP_VID3SCALESIZE_VID3SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4SCALECTRL_OFFSET (0x05F0) ++ ++/* PDP, VID4SCALECTRL, VID4HSCALEBP ++*/ ++#define PDP_VID4SCALECTRL_VID4HSCALEBP_MASK (0x80000000) ++#define PDP_VID4SCALECTRL_VID4HSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID4SCALECTRL_VID4HSCALEBP_SHIFT (31) ++#define PDP_VID4SCALECTRL_VID4HSCALEBP_LENGTH (1) ++#define PDP_VID4SCALECTRL_VID4HSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VSCALEBP ++*/ ++#define PDP_VID4SCALECTRL_VID4VSCALEBP_MASK (0x40000000) ++#define PDP_VID4SCALECTRL_VID4VSCALEBP_LSBMASK (0x00000001) ++#define PDP_VID4SCALECTRL_VID4VSCALEBP_SHIFT (30) ++#define PDP_VID4SCALECTRL_VID4VSCALEBP_LENGTH (1) ++#define PDP_VID4SCALECTRL_VID4VSCALEBP_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4HSBEFOREVS ++*/ ++#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_MASK (0x20000000) ++#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LSBMASK (0x00000001) ++#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SHIFT (29) ++#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_LENGTH (1) ++#define PDP_VID4SCALECTRL_VID4HSBEFOREVS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VSURUNCTRL ++*/ ++#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_MASK (0x08000000) ++#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LSBMASK (0x00000001) ++#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SHIFT (27) ++#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_LENGTH (1) ++#define PDP_VID4SCALECTRL_VID4VSURUNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4PAN_EN ++*/ ++#define PDP_VID4SCALECTRL_VID4PAN_EN_MASK (0x00040000) ++#define PDP_VID4SCALECTRL_VID4PAN_EN_LSBMASK (0x00000001) ++#define PDP_VID4SCALECTRL_VID4PAN_EN_SHIFT (18) ++#define PDP_VID4SCALECTRL_VID4PAN_EN_LENGTH (1) ++#define PDP_VID4SCALECTRL_VID4PAN_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VORDER ++*/ ++#define PDP_VID4SCALECTRL_VID4VORDER_MASK (0x00030000) ++#define PDP_VID4SCALECTRL_VID4VORDER_LSBMASK (0x00000003) ++#define PDP_VID4SCALECTRL_VID4VORDER_SHIFT (16) ++#define PDP_VID4SCALECTRL_VID4VORDER_LENGTH (2) ++#define PDP_VID4SCALECTRL_VID4VORDER_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALECTRL, VID4VPITCH ++*/ ++#define PDP_VID4SCALECTRL_VID4VPITCH_MASK (0x0000FFFF) ++#define PDP_VID4SCALECTRL_VID4VPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID4SCALECTRL_VID4VPITCH_SHIFT (0) ++#define PDP_VID4SCALECTRL_VID4VPITCH_LENGTH (16) ++#define PDP_VID4SCALECTRL_VID4VPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VSINIT_OFFSET (0x05F4) ++ ++/* PDP, VID4VSINIT, VID4VINITIAL1 ++*/ ++#define PDP_VID4VSINIT_VID4VINITIAL1_MASK (0xFFFF0000) ++#define PDP_VID4VSINIT_VID4VINITIAL1_LSBMASK (0x0000FFFF) ++#define PDP_VID4VSINIT_VID4VINITIAL1_SHIFT (16) ++#define PDP_VID4VSINIT_VID4VINITIAL1_LENGTH (16) ++#define PDP_VID4VSINIT_VID4VINITIAL1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4VSINIT, VID4VINITIAL0 ++*/ ++#define PDP_VID4VSINIT_VID4VINITIAL0_MASK (0x0000FFFF) ++#define PDP_VID4VSINIT_VID4VINITIAL0_LSBMASK (0x0000FFFF) ++#define PDP_VID4VSINIT_VID4VINITIAL0_SHIFT (0) ++#define PDP_VID4VSINIT_VID4VINITIAL0_LENGTH (16) ++#define PDP_VID4VSINIT_VID4VINITIAL0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF0_OFFSET (0x05F8) ++ ++/* PDP, VID4VCOEFF0, VID4VCOEFF0 ++*/ ++#define PDP_VID4VCOEFF0_VID4VCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF0_VID4VCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF0_VID4VCOEFF0_SHIFT (0) ++#define PDP_VID4VCOEFF0_VID4VCOEFF0_LENGTH (32) ++#define PDP_VID4VCOEFF0_VID4VCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF1_OFFSET (0x05FC) ++ ++/* PDP, VID4VCOEFF1, VID4VCOEFF1 ++*/ ++#define PDP_VID4VCOEFF1_VID4VCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF1_VID4VCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF1_VID4VCOEFF1_SHIFT (0) ++#define PDP_VID4VCOEFF1_VID4VCOEFF1_LENGTH (32) ++#define PDP_VID4VCOEFF1_VID4VCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF2_OFFSET (0x0600) ++ ++/* PDP, VID4VCOEFF2, VID4VCOEFF2 ++*/ ++#define PDP_VID4VCOEFF2_VID4VCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF2_VID4VCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF2_VID4VCOEFF2_SHIFT (0) ++#define PDP_VID4VCOEFF2_VID4VCOEFF2_LENGTH (32) ++#define PDP_VID4VCOEFF2_VID4VCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF3_OFFSET (0x0604) ++ ++/* PDP, VID4VCOEFF3, VID4VCOEFF3 ++*/ ++#define PDP_VID4VCOEFF3_VID4VCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF3_VID4VCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF3_VID4VCOEFF3_SHIFT (0) ++#define PDP_VID4VCOEFF3_VID4VCOEFF3_LENGTH (32) ++#define PDP_VID4VCOEFF3_VID4VCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF4_OFFSET (0x0608) ++ ++/* PDP, VID4VCOEFF4, VID4VCOEFF4 ++*/ ++#define PDP_VID4VCOEFF4_VID4VCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF4_VID4VCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF4_VID4VCOEFF4_SHIFT (0) ++#define PDP_VID4VCOEFF4_VID4VCOEFF4_LENGTH (32) ++#define PDP_VID4VCOEFF4_VID4VCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF5_OFFSET (0x060C) ++ ++/* PDP, VID4VCOEFF5, VID4VCOEFF5 ++*/ ++#define PDP_VID4VCOEFF5_VID4VCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF5_VID4VCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF5_VID4VCOEFF5_SHIFT (0) ++#define PDP_VID4VCOEFF5_VID4VCOEFF5_LENGTH (32) ++#define PDP_VID4VCOEFF5_VID4VCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF6_OFFSET (0x0610) ++ ++/* PDP, VID4VCOEFF6, VID4VCOEFF6 ++*/ ++#define PDP_VID4VCOEFF6_VID4VCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF6_VID4VCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF6_VID4VCOEFF6_SHIFT (0) ++#define PDP_VID4VCOEFF6_VID4VCOEFF6_LENGTH (32) ++#define PDP_VID4VCOEFF6_VID4VCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF7_OFFSET (0x0614) ++ ++/* PDP, VID4VCOEFF7, VID4VCOEFF7 ++*/ ++#define PDP_VID4VCOEFF7_VID4VCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF7_VID4VCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4VCOEFF7_VID4VCOEFF7_SHIFT (0) ++#define PDP_VID4VCOEFF7_VID4VCOEFF7_LENGTH (32) ++#define PDP_VID4VCOEFF7_VID4VCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4VCOEFF8_OFFSET (0x0618) ++ ++/* PDP, VID4VCOEFF8, VID4VCOEFF8 ++*/ ++#define PDP_VID4VCOEFF8_VID4VCOEFF8_MASK (0x000000FF) ++#define PDP_VID4VCOEFF8_VID4VCOEFF8_LSBMASK (0x000000FF) ++#define PDP_VID4VCOEFF8_VID4VCOEFF8_SHIFT (0) ++#define PDP_VID4VCOEFF8_VID4VCOEFF8_LENGTH (8) ++#define PDP_VID4VCOEFF8_VID4VCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HSINIT_OFFSET (0x061C) ++ ++/* PDP, VID4HSINIT, VID4HINITIAL ++*/ ++#define PDP_VID4HSINIT_VID4HINITIAL_MASK (0xFFFF0000) ++#define PDP_VID4HSINIT_VID4HINITIAL_LSBMASK (0x0000FFFF) ++#define PDP_VID4HSINIT_VID4HINITIAL_SHIFT (16) ++#define PDP_VID4HSINIT_VID4HINITIAL_LENGTH (16) ++#define PDP_VID4HSINIT_VID4HINITIAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4HSINIT, VID4HPITCH ++*/ ++#define PDP_VID4HSINIT_VID4HPITCH_MASK (0x0000FFFF) ++#define PDP_VID4HSINIT_VID4HPITCH_LSBMASK (0x0000FFFF) ++#define PDP_VID4HSINIT_VID4HPITCH_SHIFT (0) ++#define PDP_VID4HSINIT_VID4HPITCH_LENGTH (16) ++#define PDP_VID4HSINIT_VID4HPITCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF0_OFFSET (0x0620) ++ ++/* PDP, VID4HCOEFF0, VID4HCOEFF0 ++*/ ++#define PDP_VID4HCOEFF0_VID4HCOEFF0_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF0_VID4HCOEFF0_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF0_VID4HCOEFF0_SHIFT (0) ++#define PDP_VID4HCOEFF0_VID4HCOEFF0_LENGTH (32) ++#define PDP_VID4HCOEFF0_VID4HCOEFF0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF1_OFFSET (0x0624) ++ ++/* PDP, VID4HCOEFF1, VID4HCOEFF1 ++*/ ++#define PDP_VID4HCOEFF1_VID4HCOEFF1_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF1_VID4HCOEFF1_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF1_VID4HCOEFF1_SHIFT (0) ++#define PDP_VID4HCOEFF1_VID4HCOEFF1_LENGTH (32) ++#define PDP_VID4HCOEFF1_VID4HCOEFF1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF2_OFFSET (0x0628) ++ ++/* PDP, VID4HCOEFF2, VID4HCOEFF2 ++*/ ++#define PDP_VID4HCOEFF2_VID4HCOEFF2_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF2_VID4HCOEFF2_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF2_VID4HCOEFF2_SHIFT (0) ++#define PDP_VID4HCOEFF2_VID4HCOEFF2_LENGTH (32) ++#define PDP_VID4HCOEFF2_VID4HCOEFF2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF3_OFFSET (0x062C) ++ ++/* PDP, VID4HCOEFF3, VID4HCOEFF3 ++*/ ++#define PDP_VID4HCOEFF3_VID4HCOEFF3_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF3_VID4HCOEFF3_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF3_VID4HCOEFF3_SHIFT (0) ++#define PDP_VID4HCOEFF3_VID4HCOEFF3_LENGTH (32) ++#define PDP_VID4HCOEFF3_VID4HCOEFF3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF4_OFFSET (0x0630) ++ ++/* PDP, VID4HCOEFF4, VID4HCOEFF4 ++*/ ++#define PDP_VID4HCOEFF4_VID4HCOEFF4_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF4_VID4HCOEFF4_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF4_VID4HCOEFF4_SHIFT (0) ++#define PDP_VID4HCOEFF4_VID4HCOEFF4_LENGTH (32) ++#define PDP_VID4HCOEFF4_VID4HCOEFF4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF5_OFFSET (0x0634) ++ ++/* PDP, VID4HCOEFF5, VID4HCOEFF5 ++*/ ++#define PDP_VID4HCOEFF5_VID4HCOEFF5_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF5_VID4HCOEFF5_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF5_VID4HCOEFF5_SHIFT (0) ++#define PDP_VID4HCOEFF5_VID4HCOEFF5_LENGTH (32) ++#define PDP_VID4HCOEFF5_VID4HCOEFF5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF6_OFFSET (0x0638) ++ ++/* PDP, VID4HCOEFF6, VID4HCOEFF6 ++*/ ++#define PDP_VID4HCOEFF6_VID4HCOEFF6_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF6_VID4HCOEFF6_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF6_VID4HCOEFF6_SHIFT (0) ++#define PDP_VID4HCOEFF6_VID4HCOEFF6_LENGTH (32) ++#define PDP_VID4HCOEFF6_VID4HCOEFF6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF7_OFFSET (0x063C) ++ ++/* PDP, VID4HCOEFF7, VID4HCOEFF7 ++*/ ++#define PDP_VID4HCOEFF7_VID4HCOEFF7_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF7_VID4HCOEFF7_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF7_VID4HCOEFF7_SHIFT (0) ++#define PDP_VID4HCOEFF7_VID4HCOEFF7_LENGTH (32) ++#define PDP_VID4HCOEFF7_VID4HCOEFF7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF8_OFFSET (0x0640) ++ ++/* PDP, VID4HCOEFF8, VID4HCOEFF8 ++*/ ++#define PDP_VID4HCOEFF8_VID4HCOEFF8_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF8_VID4HCOEFF8_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF8_VID4HCOEFF8_SHIFT (0) ++#define PDP_VID4HCOEFF8_VID4HCOEFF8_LENGTH (32) ++#define PDP_VID4HCOEFF8_VID4HCOEFF8_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF9_OFFSET (0x0644) ++ ++/* PDP, VID4HCOEFF9, VID4HCOEFF9 ++*/ ++#define PDP_VID4HCOEFF9_VID4HCOEFF9_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF9_VID4HCOEFF9_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF9_VID4HCOEFF9_SHIFT (0) ++#define PDP_VID4HCOEFF9_VID4HCOEFF9_LENGTH (32) ++#define PDP_VID4HCOEFF9_VID4HCOEFF9_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF10_OFFSET (0x0648) ++ ++/* PDP, VID4HCOEFF10, VID4HCOEFF10 ++*/ ++#define PDP_VID4HCOEFF10_VID4HCOEFF10_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF10_VID4HCOEFF10_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF10_VID4HCOEFF10_SHIFT (0) ++#define PDP_VID4HCOEFF10_VID4HCOEFF10_LENGTH (32) ++#define PDP_VID4HCOEFF10_VID4HCOEFF10_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF11_OFFSET (0x064C) ++ ++/* PDP, VID4HCOEFF11, VID4HCOEFF11 ++*/ ++#define PDP_VID4HCOEFF11_VID4HCOEFF11_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF11_VID4HCOEFF11_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF11_VID4HCOEFF11_SHIFT (0) ++#define PDP_VID4HCOEFF11_VID4HCOEFF11_LENGTH (32) ++#define PDP_VID4HCOEFF11_VID4HCOEFF11_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF12_OFFSET (0x0650) ++ ++/* PDP, VID4HCOEFF12, VID4HCOEFF12 ++*/ ++#define PDP_VID4HCOEFF12_VID4HCOEFF12_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF12_VID4HCOEFF12_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF12_VID4HCOEFF12_SHIFT (0) ++#define PDP_VID4HCOEFF12_VID4HCOEFF12_LENGTH (32) ++#define PDP_VID4HCOEFF12_VID4HCOEFF12_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF13_OFFSET (0x0654) ++ ++/* PDP, VID4HCOEFF13, VID4HCOEFF13 ++*/ ++#define PDP_VID4HCOEFF13_VID4HCOEFF13_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF13_VID4HCOEFF13_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF13_VID4HCOEFF13_SHIFT (0) ++#define PDP_VID4HCOEFF13_VID4HCOEFF13_LENGTH (32) ++#define PDP_VID4HCOEFF13_VID4HCOEFF13_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF14_OFFSET (0x0658) ++ ++/* PDP, VID4HCOEFF14, VID4HCOEFF14 ++*/ ++#define PDP_VID4HCOEFF14_VID4HCOEFF14_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF14_VID4HCOEFF14_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF14_VID4HCOEFF14_SHIFT (0) ++#define PDP_VID4HCOEFF14_VID4HCOEFF14_LENGTH (32) ++#define PDP_VID4HCOEFF14_VID4HCOEFF14_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF15_OFFSET (0x065C) ++ ++/* PDP, VID4HCOEFF15, VID4HCOEFF15 ++*/ ++#define PDP_VID4HCOEFF15_VID4HCOEFF15_MASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF15_VID4HCOEFF15_LSBMASK (0xFFFFFFFF) ++#define PDP_VID4HCOEFF15_VID4HCOEFF15_SHIFT (0) ++#define PDP_VID4HCOEFF15_VID4HCOEFF15_LENGTH (32) ++#define PDP_VID4HCOEFF15_VID4HCOEFF15_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4HCOEFF16_OFFSET (0x0660) ++ ++/* PDP, VID4HCOEFF16, VID4HCOEFF16 ++*/ ++#define PDP_VID4HCOEFF16_VID4HCOEFF16_MASK (0x000000FF) ++#define PDP_VID4HCOEFF16_VID4HCOEFF16_LSBMASK (0x000000FF) ++#define PDP_VID4HCOEFF16_VID4HCOEFF16_SHIFT (0) ++#define PDP_VID4HCOEFF16_VID4HCOEFF16_LENGTH (8) ++#define PDP_VID4HCOEFF16_VID4HCOEFF16_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4SCALESIZE_OFFSET (0x0664) ++ ++/* PDP, VID4SCALESIZE, VID4SCALEWIDTH ++*/ ++#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_MASK (0x0FFF0000) ++#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LSBMASK (0x00000FFF) ++#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SHIFT (16) ++#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_LENGTH (12) ++#define PDP_VID4SCALESIZE_VID4SCALEWIDTH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4SCALESIZE, VID4SCALEHEIGHT ++*/ ++#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_MASK (0x00000FFF) ++#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LSBMASK (0x00000FFF) ++#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SHIFT (0) ++#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_LENGTH (12) ++#define PDP_VID4SCALESIZE_VID4SCALEHEIGHT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND0_OFFSET (0x0668) ++ ++/* PDP, PORTER_BLND0, BLND0BLENDTYPE ++*/ ++#define PDP_PORTER_BLND0_BLND0BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND0_BLND0BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND0_BLND0BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND0, BLND0PORTERMODE ++*/ ++#define PDP_PORTER_BLND0_BLND0PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND0_BLND0PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND0_BLND0PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND0_BLND0PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND0_BLND0PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND1_OFFSET (0x066C) ++ ++/* PDP, PORTER_BLND1, BLND1BLENDTYPE ++*/ ++#define PDP_PORTER_BLND1_BLND1BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND1_BLND1BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND1_BLND1BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND1, BLND1PORTERMODE ++*/ ++#define PDP_PORTER_BLND1_BLND1PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND1_BLND1PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND1_BLND1PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND1_BLND1PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND1_BLND1PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND2_OFFSET (0x0670) ++ ++/* PDP, PORTER_BLND2, BLND2BLENDTYPE ++*/ ++#define PDP_PORTER_BLND2_BLND2BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND2_BLND2BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND2_BLND2BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND2, BLND2PORTERMODE ++*/ ++#define PDP_PORTER_BLND2_BLND2PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND2_BLND2PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND2_BLND2PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND2_BLND2PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND2_BLND2PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND3_OFFSET (0x0674) ++ ++/* PDP, PORTER_BLND3, BLND3BLENDTYPE ++*/ ++#define PDP_PORTER_BLND3_BLND3BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND3_BLND3BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND3_BLND3BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND3, BLND3PORTERMODE ++*/ ++#define PDP_PORTER_BLND3_BLND3PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND3_BLND3PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND3_BLND3PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND3_BLND3PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND3_BLND3PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND4_OFFSET (0x0678) ++ ++/* PDP, PORTER_BLND4, BLND4BLENDTYPE ++*/ ++#define PDP_PORTER_BLND4_BLND4BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND4_BLND4BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND4_BLND4BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND4, BLND4PORTERMODE ++*/ ++#define PDP_PORTER_BLND4_BLND4PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND4_BLND4PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND4_BLND4PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND4_BLND4PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND4_BLND4PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND5_OFFSET (0x067C) ++ ++/* PDP, PORTER_BLND5, BLND5BLENDTYPE ++*/ ++#define PDP_PORTER_BLND5_BLND5BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND5_BLND5BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND5_BLND5BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND5, BLND5PORTERMODE ++*/ ++#define PDP_PORTER_BLND5_BLND5PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND5_BLND5PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND5_BLND5PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND5_BLND5PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND5_BLND5PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND6_OFFSET (0x0680) ++ ++/* PDP, PORTER_BLND6, BLND6BLENDTYPE ++*/ ++#define PDP_PORTER_BLND6_BLND6BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND6_BLND6BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND6_BLND6BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND6, BLND6PORTERMODE ++*/ ++#define PDP_PORTER_BLND6_BLND6PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND6_BLND6PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND6_BLND6PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND6_BLND6PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND6_BLND6PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PORTER_BLND7_OFFSET (0x0684) ++ ++/* PDP, PORTER_BLND7, BLND7BLENDTYPE ++*/ ++#define PDP_PORTER_BLND7_BLND7BLENDTYPE_MASK (0x00000010) ++#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LSBMASK (0x00000001) ++#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SHIFT (4) ++#define PDP_PORTER_BLND7_BLND7BLENDTYPE_LENGTH (1) ++#define PDP_PORTER_BLND7_BLND7BLENDTYPE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PORTER_BLND7, BLND7PORTERMODE ++*/ ++#define PDP_PORTER_BLND7_BLND7PORTERMODE_MASK (0x0000000F) ++#define PDP_PORTER_BLND7_BLND7PORTERMODE_LSBMASK (0x0000000F) ++#define PDP_PORTER_BLND7_BLND7PORTERMODE_SHIFT (0) ++#define PDP_PORTER_BLND7_BLND7PORTERMODE_LENGTH (4) ++#define PDP_PORTER_BLND7_BLND7PORTERMODE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06C8) ++ ++/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_TRANS ++*/ ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SHIFT (16) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_LENGTH (10) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_ALPHA_TRANS_OPAQUE, VID1LUMAKEYALPHA_OPAQUE ++*/ ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define PDP_VID1LUMAKEY_ALPHA_TRANS_OPAQUE_VID1LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06CC) ++ ++/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMAX ++*/ ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_MASK (0x03FF0000) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SHIFT (16) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_LENGTH (10) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_LUMA_MAX_MIN, VID1LUMAKEYYMIN ++*/ ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_MASK (0x000003FF) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SHIFT (0) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_LENGTH (10) ++#define PDP_VID1LUMAKEY_LUMA_MAX_MIN_VID1LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1LUMAKEY_C_RG_OFFSET (0x06D0) ++ ++/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_R ++*/ ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_MASK (0x0FFF0000) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SHIFT (16) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_LENGTH (12) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_RG, VID1LUMAKEYC_G ++*/ ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_MASK (0x00000FFF) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SHIFT (0) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_LENGTH (12) ++#define PDP_VID1LUMAKEY_C_RG_VID1LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1LUMAKEY_C_B_OFFSET (0x06D4) ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYALPHAMULT ++*/ ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_MASK (0x20000000) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SHIFT (29) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_LENGTH (1) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYEN ++*/ ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_MASK (0x10000000) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LSBMASK (0x00000001) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SHIFT (28) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_LENGTH (1) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYOUTOFF ++*/ ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SHIFT (16) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_LENGTH (10) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1LUMAKEY_C_B, VID1LUMAKEYC_B ++*/ ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_MASK (0x00000FFF) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SHIFT (0) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_LENGTH (12) ++#define PDP_VID1LUMAKEY_C_B_VID1LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06D8) ++ ++/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_TRANS ++*/ ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SHIFT (16) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_LENGTH (10) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_ALPHA_TRANS_OPAQUE, VID2LUMAKEYALPHA_OPAQUE ++*/ ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define PDP_VID2LUMAKEY_ALPHA_TRANS_OPAQUE_VID2LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06DC) ++ ++/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMAX ++*/ ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_MASK (0x03FF0000) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SHIFT (16) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_LENGTH (10) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_LUMA_MAX_MIN, VID2LUMAKEYYMIN ++*/ ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_MASK (0x000003FF) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SHIFT (0) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_LENGTH (10) ++#define PDP_VID2LUMAKEY_LUMA_MAX_MIN_VID2LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2LUMAKEY_C_RG_OFFSET (0x06E0) ++ ++/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_R ++*/ ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_MASK (0x0FFF0000) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SHIFT (16) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_LENGTH (12) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_RG, VID2LUMAKEYC_G ++*/ ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_MASK (0x00000FFF) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SHIFT (0) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_LENGTH (12) ++#define PDP_VID2LUMAKEY_C_RG_VID2LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2LUMAKEY_C_B_OFFSET (0x06E4) ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYALPHAMULT ++*/ ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_MASK (0x20000000) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SHIFT (29) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_LENGTH (1) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYEN ++*/ ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_MASK (0x10000000) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LSBMASK (0x00000001) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SHIFT (28) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_LENGTH (1) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYOUTOFF ++*/ ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SHIFT (16) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_LENGTH (10) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2LUMAKEY_C_B, VID2LUMAKEYC_B ++*/ ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_MASK (0x00000FFF) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SHIFT (0) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_LENGTH (12) ++#define PDP_VID2LUMAKEY_C_B_VID2LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06E8) ++ ++/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_TRANS ++*/ ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SHIFT (16) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_LENGTH (10) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_ALPHA_TRANS_OPAQUE, VID3LUMAKEYALPHA_OPAQUE ++*/ ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define PDP_VID3LUMAKEY_ALPHA_TRANS_OPAQUE_VID3LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06EC) ++ ++/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMAX ++*/ ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_MASK (0x03FF0000) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SHIFT (16) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_LENGTH (10) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_LUMA_MAX_MIN, VID3LUMAKEYYMIN ++*/ ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_MASK (0x000003FF) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SHIFT (0) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_LENGTH (10) ++#define PDP_VID3LUMAKEY_LUMA_MAX_MIN_VID3LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3LUMAKEY_C_RG_OFFSET (0x06F0) ++ ++/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_R ++*/ ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_MASK (0x0FFF0000) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SHIFT (16) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_LENGTH (12) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_RG, VID3LUMAKEYC_G ++*/ ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_MASK (0x00000FFF) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SHIFT (0) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_LENGTH (12) ++#define PDP_VID3LUMAKEY_C_RG_VID3LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3LUMAKEY_C_B_OFFSET (0x06F4) ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYALPHAMULT ++*/ ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_MASK (0x20000000) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SHIFT (29) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_LENGTH (1) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYEN ++*/ ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_MASK (0x10000000) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LSBMASK (0x00000001) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SHIFT (28) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_LENGTH (1) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYOUTOFF ++*/ ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SHIFT (16) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_LENGTH (10) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3LUMAKEY_C_B, VID3LUMAKEYC_B ++*/ ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_MASK (0x00000FFF) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SHIFT (0) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_LENGTH (12) ++#define PDP_VID3LUMAKEY_C_B_VID3LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_OFFSET (0x06F8) ++ ++/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_TRANS ++*/ ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_MASK (0x03FF0000) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LSBMASK (0x000003FF) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SHIFT (16) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_LENGTH (10) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_TRANS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_ALPHA_TRANS_OPAQUE, VID4LUMAKEYALPHA_OPAQUE ++*/ ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_MASK (0x000003FF) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LSBMASK (0x000003FF) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SHIFT (0) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_LENGTH (10) ++#define PDP_VID4LUMAKEY_ALPHA_TRANS_OPAQUE_VID4LUMAKEYALPHA_OPAQUE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_OFFSET (0x06FC) ++ ++/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMAX ++*/ ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_MASK (0x03FF0000) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LSBMASK (0x000003FF) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SHIFT (16) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_LENGTH (10) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_LUMA_MAX_MIN, VID4LUMAKEYYMIN ++*/ ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_MASK (0x000003FF) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LSBMASK (0x000003FF) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SHIFT (0) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_LENGTH (10) ++#define PDP_VID4LUMAKEY_LUMA_MAX_MIN_VID4LUMAKEYYMIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4LUMAKEY_C_RG_OFFSET (0x0700) ++ ++/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_R ++*/ ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_MASK (0x0FFF0000) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LSBMASK (0x00000FFF) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SHIFT (16) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_LENGTH (12) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_R_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_RG, VID4LUMAKEYC_G ++*/ ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_MASK (0x00000FFF) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LSBMASK (0x00000FFF) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SHIFT (0) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_LENGTH (12) ++#define PDP_VID4LUMAKEY_C_RG_VID4LUMAKEYC_G_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4LUMAKEY_C_B_OFFSET (0x0704) ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYALPHAMULT ++*/ ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_MASK (0x20000000) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LSBMASK (0x00000001) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SHIFT (29) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_LENGTH (1) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYALPHAMULT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYEN ++*/ ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_MASK (0x10000000) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LSBMASK (0x00000001) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SHIFT (28) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_LENGTH (1) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYOUTOFF ++*/ ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_MASK (0x03FF0000) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LSBMASK (0x000003FF) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SHIFT (16) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_LENGTH (10) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYOUTOFF_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4LUMAKEY_C_B, VID4LUMAKEYC_B ++*/ ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_MASK (0x00000FFF) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LSBMASK (0x00000FFF) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SHIFT (0) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_LENGTH (12) ++#define PDP_VID4LUMAKEY_C_B_VID4LUMAKEYC_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CSCCOEFF0_OFFSET (0x0708) ++ ++/* PDP, CSCCOEFF0, CSCCOEFFRU ++*/ ++#define PDP_CSCCOEFF0_CSCCOEFFRU_MASK (0x003FF800) ++#define PDP_CSCCOEFF0_CSCCOEFFRU_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF0_CSCCOEFFRU_SHIFT (11) ++#define PDP_CSCCOEFF0_CSCCOEFFRU_LENGTH (11) ++#define PDP_CSCCOEFF0_CSCCOEFFRU_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF0, CSCCOEFFRY ++*/ ++#define PDP_CSCCOEFF0_CSCCOEFFRY_MASK (0x000007FF) ++#define PDP_CSCCOEFF0_CSCCOEFFRY_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF0_CSCCOEFFRY_SHIFT (0) ++#define PDP_CSCCOEFF0_CSCCOEFFRY_LENGTH (11) ++#define PDP_CSCCOEFF0_CSCCOEFFRY_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CSCCOEFF1_OFFSET (0x070C) ++ ++/* PDP, CSCCOEFF1, CSCCOEFFGY ++*/ ++#define PDP_CSCCOEFF1_CSCCOEFFGY_MASK (0x003FF800) ++#define PDP_CSCCOEFF1_CSCCOEFFGY_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF1_CSCCOEFFGY_SHIFT (11) ++#define PDP_CSCCOEFF1_CSCCOEFFGY_LENGTH (11) ++#define PDP_CSCCOEFF1_CSCCOEFFGY_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF1, CSCCOEFFRV ++*/ ++#define PDP_CSCCOEFF1_CSCCOEFFRV_MASK (0x000007FF) ++#define PDP_CSCCOEFF1_CSCCOEFFRV_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF1_CSCCOEFFRV_SHIFT (0) ++#define PDP_CSCCOEFF1_CSCCOEFFRV_LENGTH (11) ++#define PDP_CSCCOEFF1_CSCCOEFFRV_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CSCCOEFF2_OFFSET (0x0710) ++ ++/* PDP, CSCCOEFF2, CSCCOEFFGV ++*/ ++#define PDP_CSCCOEFF2_CSCCOEFFGV_MASK (0x003FF800) ++#define PDP_CSCCOEFF2_CSCCOEFFGV_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF2_CSCCOEFFGV_SHIFT (11) ++#define PDP_CSCCOEFF2_CSCCOEFFGV_LENGTH (11) ++#define PDP_CSCCOEFF2_CSCCOEFFGV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF2, CSCCOEFFGU ++*/ ++#define PDP_CSCCOEFF2_CSCCOEFFGU_MASK (0x000007FF) ++#define PDP_CSCCOEFF2_CSCCOEFFGU_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF2_CSCCOEFFGU_SHIFT (0) ++#define PDP_CSCCOEFF2_CSCCOEFFGU_LENGTH (11) ++#define PDP_CSCCOEFF2_CSCCOEFFGU_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CSCCOEFF3_OFFSET (0x0714) ++ ++/* PDP, CSCCOEFF3, CSCCOEFFBU ++*/ ++#define PDP_CSCCOEFF3_CSCCOEFFBU_MASK (0x003FF800) ++#define PDP_CSCCOEFF3_CSCCOEFFBU_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF3_CSCCOEFFBU_SHIFT (11) ++#define PDP_CSCCOEFF3_CSCCOEFFBU_LENGTH (11) ++#define PDP_CSCCOEFF3_CSCCOEFFBU_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CSCCOEFF3, CSCCOEFFBY ++*/ ++#define PDP_CSCCOEFF3_CSCCOEFFBY_MASK (0x000007FF) ++#define PDP_CSCCOEFF3_CSCCOEFFBY_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF3_CSCCOEFFBY_SHIFT (0) ++#define PDP_CSCCOEFF3_CSCCOEFFBY_LENGTH (11) ++#define PDP_CSCCOEFF3_CSCCOEFFBY_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CSCCOEFF4_OFFSET (0x0718) ++ ++/* PDP, CSCCOEFF4, CSCCOEFFBV ++*/ ++#define PDP_CSCCOEFF4_CSCCOEFFBV_MASK (0x000007FF) ++#define PDP_CSCCOEFF4_CSCCOEFFBV_LSBMASK (0x000007FF) ++#define PDP_CSCCOEFF4_CSCCOEFFBV_SHIFT (0) ++#define PDP_CSCCOEFF4_CSCCOEFFBV_LENGTH (11) ++#define PDP_CSCCOEFF4_CSCCOEFFBV_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BGNDCOL_AR_OFFSET (0x071C) ++ ++/* PDP, BGNDCOL_AR, BGNDCOL_A ++*/ ++#define PDP_BGNDCOL_AR_BGNDCOL_A_MASK (0x03FF0000) ++#define PDP_BGNDCOL_AR_BGNDCOL_A_LSBMASK (0x000003FF) ++#define PDP_BGNDCOL_AR_BGNDCOL_A_SHIFT (16) ++#define PDP_BGNDCOL_AR_BGNDCOL_A_LENGTH (10) ++#define PDP_BGNDCOL_AR_BGNDCOL_A_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BGNDCOL_AR, BGNDCOL_R ++*/ ++#define PDP_BGNDCOL_AR_BGNDCOL_R_MASK (0x000003FF) ++#define PDP_BGNDCOL_AR_BGNDCOL_R_LSBMASK (0x000003FF) ++#define PDP_BGNDCOL_AR_BGNDCOL_R_SHIFT (0) ++#define PDP_BGNDCOL_AR_BGNDCOL_R_LENGTH (10) ++#define PDP_BGNDCOL_AR_BGNDCOL_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BGNDCOL_GB_OFFSET (0x0720) ++ ++/* PDP, BGNDCOL_GB, BGNDCOL_G ++*/ ++#define PDP_BGNDCOL_GB_BGNDCOL_G_MASK (0x03FF0000) ++#define PDP_BGNDCOL_GB_BGNDCOL_G_LSBMASK (0x000003FF) ++#define PDP_BGNDCOL_GB_BGNDCOL_G_SHIFT (16) ++#define PDP_BGNDCOL_GB_BGNDCOL_G_LENGTH (10) ++#define PDP_BGNDCOL_GB_BGNDCOL_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BGNDCOL_GB, BGNDCOL_B ++*/ ++#define PDP_BGNDCOL_GB_BGNDCOL_B_MASK (0x000003FF) ++#define PDP_BGNDCOL_GB_BGNDCOL_B_LSBMASK (0x000003FF) ++#define PDP_BGNDCOL_GB_BGNDCOL_B_SHIFT (0) ++#define PDP_BGNDCOL_GB_BGNDCOL_B_LENGTH (10) ++#define PDP_BGNDCOL_GB_BGNDCOL_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BORDCOL_R_OFFSET (0x0724) ++ ++/* PDP, BORDCOL_R, BORDCOL_R ++*/ ++#define PDP_BORDCOL_R_BORDCOL_R_MASK (0x000003FF) ++#define PDP_BORDCOL_R_BORDCOL_R_LSBMASK (0x000003FF) ++#define PDP_BORDCOL_R_BORDCOL_R_SHIFT (0) ++#define PDP_BORDCOL_R_BORDCOL_R_LENGTH (10) ++#define PDP_BORDCOL_R_BORDCOL_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BORDCOL_GB_OFFSET (0x0728) ++ ++/* PDP, BORDCOL_GB, BORDCOL_G ++*/ ++#define PDP_BORDCOL_GB_BORDCOL_G_MASK (0x03FF0000) ++#define PDP_BORDCOL_GB_BORDCOL_G_LSBMASK (0x000003FF) ++#define PDP_BORDCOL_GB_BORDCOL_G_SHIFT (16) ++#define PDP_BORDCOL_GB_BORDCOL_G_LENGTH (10) ++#define PDP_BORDCOL_GB_BORDCOL_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, BORDCOL_GB, BORDCOL_B ++*/ ++#define PDP_BORDCOL_GB_BORDCOL_B_MASK (0x000003FF) ++#define PDP_BORDCOL_GB_BORDCOL_B_LSBMASK (0x000003FF) ++#define PDP_BORDCOL_GB_BORDCOL_B_SHIFT (0) ++#define PDP_BORDCOL_GB_BORDCOL_B_LENGTH (10) ++#define PDP_BORDCOL_GB_BORDCOL_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_LINESTAT_OFFSET (0x0734) ++ ++/* PDP, LINESTAT, LINENO ++*/ ++#define PDP_LINESTAT_LINENO_MASK (0x00001FFF) ++#define PDP_LINESTAT_LINENO_LSBMASK (0x00001FFF) ++#define PDP_LINESTAT_LINENO_SHIFT (0) ++#define PDP_LINESTAT_LINENO_LENGTH (13) ++#define PDP_LINESTAT_LINENO_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_C11C12_OFFSET (0x0738) ++ ++/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C12 ++*/ ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_MASK (0x3FFF0000) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C12_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_C11C12, CR_PROCAMP_C11 ++*/ ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_MASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C11C12_CR_PROCAMP_C11_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_C13C21_OFFSET (0x073C) ++ ++/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C21 ++*/ ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_MASK (0x3FFF0000) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C21_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_C13C21, CR_PROCAMP_C13 ++*/ ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_MASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C13C21_CR_PROCAMP_C13_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_C22C23_OFFSET (0x0740) ++ ++/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C23 ++*/ ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_MASK (0x3FFF0000) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C23_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_C22C23, CR_PROCAMP_C22 ++*/ ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_MASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C22C23_CR_PROCAMP_C22_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_C31C32_OFFSET (0x0744) ++ ++/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C32 ++*/ ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_MASK (0x3FFF0000) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C32_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_C31C32, CR_PROCAMP_C31 ++*/ ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_MASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C31C32_CR_PROCAMP_C31_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_C33_OFFSET (0x0748) ++ ++/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_C33 ++*/ ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_MASK (0x3FFF0000) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LSBMASK (0x00003FFF) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_LENGTH (14) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_C33_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_RANGE ++*/ ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_MASK (0x00000030) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LSBMASK (0x00000003) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SHIFT (4) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_LENGTH (2) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_RANGE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_C33, CR_PROCAMP_EN ++*/ ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_MASK (0x00000001) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LSBMASK (0x00000001) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_LENGTH (1) ++#define PDP_CR_PDP_PROCAMP_C33_CR_PROCAMP_EN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_OFFSET (0x074C) ++ ++/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_G ++*/ ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_MASK (0x0FFF0000) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LSBMASK (0x00000FFF) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_LENGTH (12) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_OUTOFFSET_BG, CR_PROCAMP_OUTOFF_B ++*/ ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_MASK (0x00000FFF) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LSBMASK (0x00000FFF) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_LENGTH (12) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_BG_CR_PROCAMP_OUTOFF_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_OFFSET (0x0750) ++ ++/* PDP, CR_PDP_PROCAMP_OUTOFFSET_R, CR_PROCAMP_OUTOFF_R ++*/ ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_MASK (0x00000FFF) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LSBMASK (0x00000FFF) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_LENGTH (12) ++#define PDP_CR_PDP_PROCAMP_OUTOFFSET_R_CR_PROCAMP_OUTOFF_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_OFFSET (0x0754) ++ ++/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_G ++*/ ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_MASK (0x03FF0000) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LSBMASK (0x000003FF) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SHIFT (16) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_LENGTH (10) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, CR_PDP_PROCAMP_INOFFSET_BG, CR_PROCAMP_INOFF_B ++*/ ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_MASK (0x000003FF) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LSBMASK (0x000003FF) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_LENGTH (10) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_BG_CR_PROCAMP_INOFF_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_CR_PDP_PROCAMP_INOFFSET_R_OFFSET (0x0758) ++ ++/* PDP, CR_PDP_PROCAMP_INOFFSET_R, CR_PROCAMP_INOFF_R ++*/ ++#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_MASK (0x000003FF) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LSBMASK (0x000003FF) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SHIFT (0) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_LENGTH (10) ++#define PDP_CR_PDP_PROCAMP_INOFFSET_R_CR_PROCAMP_INOFF_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_SIGNAT_R_OFFSET (0x075C) ++ ++/* PDP, SIGNAT_R, SIGNATURE_R ++*/ ++#define PDP_SIGNAT_R_SIGNATURE_R_MASK (0x000003FF) ++#define PDP_SIGNAT_R_SIGNATURE_R_LSBMASK (0x000003FF) ++#define PDP_SIGNAT_R_SIGNATURE_R_SHIFT (0) ++#define PDP_SIGNAT_R_SIGNATURE_R_LENGTH (10) ++#define PDP_SIGNAT_R_SIGNATURE_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_SIGNAT_GB_OFFSET (0x0760) ++ ++/* PDP, SIGNAT_GB, SIGNATURE_G ++*/ ++#define PDP_SIGNAT_GB_SIGNATURE_G_MASK (0x03FF0000) ++#define PDP_SIGNAT_GB_SIGNATURE_G_LSBMASK (0x000003FF) ++#define PDP_SIGNAT_GB_SIGNATURE_G_SHIFT (16) ++#define PDP_SIGNAT_GB_SIGNATURE_G_LENGTH (10) ++#define PDP_SIGNAT_GB_SIGNATURE_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SIGNAT_GB, SIGNATURE_B ++*/ ++#define PDP_SIGNAT_GB_SIGNATURE_B_MASK (0x000003FF) ++#define PDP_SIGNAT_GB_SIGNATURE_B_LSBMASK (0x000003FF) ++#define PDP_SIGNAT_GB_SIGNATURE_B_SHIFT (0) ++#define PDP_SIGNAT_GB_SIGNATURE_B_LENGTH (10) ++#define PDP_SIGNAT_GB_SIGNATURE_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_REGISTER_UPDATE_CTRL_OFFSET (0x0764) ++ ++/* PDP, REGISTER_UPDATE_CTRL, BYPASS_DOUBLE_BUFFERING ++*/ ++#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_MASK (0x00000004) ++#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LSBMASK (0x00000001) ++#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SHIFT (2) ++#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_LENGTH (1) ++#define PDP_REGISTER_UPDATE_CTRL_BYPASS_DOUBLE_BUFFERING_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGISTER_UPDATE_CTRL, REGISTERS_VALID ++*/ ++#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_MASK (0x00000002) ++#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LSBMASK (0x00000001) ++#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SHIFT (1) ++#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_LENGTH (1) ++#define PDP_REGISTER_UPDATE_CTRL_REGISTERS_VALID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGISTER_UPDATE_CTRL, USE_VBLANK ++*/ ++#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_MASK (0x00000001) ++#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LSBMASK (0x00000001) ++#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SHIFT (0) ++#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_LENGTH (1) ++#define PDP_REGISTER_UPDATE_CTRL_USE_VBLANK_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_REGISTER_UPDATE_STATUS_OFFSET (0x0768) ++ ++/* PDP, REGISTER_UPDATE_STATUS, REGISTERS_UPDATED ++*/ ++#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_MASK (0x00000002) ++#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LSBMASK (0x00000001) ++#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SHIFT (1) ++#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_LENGTH (1) ++#define PDP_REGISTER_UPDATE_STATUS_REGISTERS_UPDATED_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DBGCTRL_OFFSET (0x076C) ++ ++/* PDP, DBGCTRL, DBG_READ ++*/ ++#define PDP_DBGCTRL_DBG_READ_MASK (0x00000002) ++#define PDP_DBGCTRL_DBG_READ_LSBMASK (0x00000001) ++#define PDP_DBGCTRL_DBG_READ_SHIFT (1) ++#define PDP_DBGCTRL_DBG_READ_LENGTH (1) ++#define PDP_DBGCTRL_DBG_READ_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGCTRL, DBG_ENAB ++*/ ++#define PDP_DBGCTRL_DBG_ENAB_MASK (0x00000001) ++#define PDP_DBGCTRL_DBG_ENAB_LSBMASK (0x00000001) ++#define PDP_DBGCTRL_DBG_ENAB_SHIFT (0) ++#define PDP_DBGCTRL_DBG_ENAB_LENGTH (1) ++#define PDP_DBGCTRL_DBG_ENAB_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DBGDATA_R_OFFSET (0x0770) ++ ++/* PDP, DBGDATA_R, DBG_DATA_R ++*/ ++#define PDP_DBGDATA_R_DBG_DATA_R_MASK (0x000003FF) ++#define PDP_DBGDATA_R_DBG_DATA_R_LSBMASK (0x000003FF) ++#define PDP_DBGDATA_R_DBG_DATA_R_SHIFT (0) ++#define PDP_DBGDATA_R_DBG_DATA_R_LENGTH (10) ++#define PDP_DBGDATA_R_DBG_DATA_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DBGDATA_GB_OFFSET (0x0774) ++ ++/* PDP, DBGDATA_GB, DBG_DATA_G ++*/ ++#define PDP_DBGDATA_GB_DBG_DATA_G_MASK (0x03FF0000) ++#define PDP_DBGDATA_GB_DBG_DATA_G_LSBMASK (0x000003FF) ++#define PDP_DBGDATA_GB_DBG_DATA_G_SHIFT (16) ++#define PDP_DBGDATA_GB_DBG_DATA_G_LENGTH (10) ++#define PDP_DBGDATA_GB_DBG_DATA_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGDATA_GB, DBG_DATA_B ++*/ ++#define PDP_DBGDATA_GB_DBG_DATA_B_MASK (0x000003FF) ++#define PDP_DBGDATA_GB_DBG_DATA_B_LSBMASK (0x000003FF) ++#define PDP_DBGDATA_GB_DBG_DATA_B_SHIFT (0) ++#define PDP_DBGDATA_GB_DBG_DATA_B_LENGTH (10) ++#define PDP_DBGDATA_GB_DBG_DATA_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DBGSIDE_OFFSET (0x0778) ++ ++/* PDP, DBGSIDE, DBG_VAL ++*/ ++#define PDP_DBGSIDE_DBG_VAL_MASK (0x00000008) ++#define PDP_DBGSIDE_DBG_VAL_LSBMASK (0x00000001) ++#define PDP_DBGSIDE_DBG_VAL_SHIFT (3) ++#define PDP_DBGSIDE_DBG_VAL_LENGTH (1) ++#define PDP_DBGSIDE_DBG_VAL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DBGSIDE, DBG_SIDE ++*/ ++#define PDP_DBGSIDE_DBG_SIDE_MASK (0x00000007) ++#define PDP_DBGSIDE_DBG_SIDE_LSBMASK (0x00000007) ++#define PDP_DBGSIDE_DBG_SIDE_SHIFT (0) ++#define PDP_DBGSIDE_DBG_SIDE_LENGTH (3) ++#define PDP_DBGSIDE_DBG_SIDE_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_OUTPUT_OFFSET (0x077C) ++ ++/* PDP, OUTPUT, EIGHT_BIT_OUTPUT ++*/ ++#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_MASK (0x00000002) ++#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LSBMASK (0x00000001) ++#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SHIFT (1) ++#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_LENGTH (1) ++#define PDP_OUTPUT_EIGHT_BIT_OUTPUT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OUTPUT, OUTPUT_CONFIG ++*/ ++#define PDP_OUTPUT_OUTPUT_CONFIG_MASK (0x00000001) ++#define PDP_OUTPUT_OUTPUT_CONFIG_LSBMASK (0x00000001) ++#define PDP_OUTPUT_OUTPUT_CONFIG_SHIFT (0) ++#define PDP_OUTPUT_OUTPUT_CONFIG_LENGTH (1) ++#define PDP_OUTPUT_OUTPUT_CONFIG_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_SYNCCTRL_OFFSET (0x0780) ++ ++/* PDP, SYNCCTRL, SYNCACTIVE ++*/ ++#define PDP_SYNCCTRL_SYNCACTIVE_MASK (0x80000000) ++#define PDP_SYNCCTRL_SYNCACTIVE_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_SYNCACTIVE_SHIFT (31) ++#define PDP_SYNCCTRL_SYNCACTIVE_LENGTH (1) ++#define PDP_SYNCCTRL_SYNCACTIVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, PDP_RST ++*/ ++#define PDP_SYNCCTRL_PDP_RST_MASK (0x20000000) ++#define PDP_SYNCCTRL_PDP_RST_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_PDP_RST_SHIFT (29) ++#define PDP_SYNCCTRL_PDP_RST_LENGTH (1) ++#define PDP_SYNCCTRL_PDP_RST_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, POWERDN ++*/ ++#define PDP_SYNCCTRL_POWERDN_MASK (0x10000000) ++#define PDP_SYNCCTRL_POWERDN_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_POWERDN_SHIFT (28) ++#define PDP_SYNCCTRL_POWERDN_LENGTH (1) ++#define PDP_SYNCCTRL_POWERDN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, LOWPWRMODE ++*/ ++#define PDP_SYNCCTRL_LOWPWRMODE_MASK (0x08000000) ++#define PDP_SYNCCTRL_LOWPWRMODE_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_LOWPWRMODE_SHIFT (27) ++#define PDP_SYNCCTRL_LOWPWRMODE_LENGTH (1) ++#define PDP_SYNCCTRL_LOWPWRMODE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDSYNCTRL ++*/ ++#define PDP_SYNCCTRL_UPDSYNCTRL_MASK (0x04000000) ++#define PDP_SYNCCTRL_UPDSYNCTRL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_UPDSYNCTRL_SHIFT (26) ++#define PDP_SYNCCTRL_UPDSYNCTRL_LENGTH (1) ++#define PDP_SYNCCTRL_UPDSYNCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDINTCTRL ++*/ ++#define PDP_SYNCCTRL_UPDINTCTRL_MASK (0x02000000) ++#define PDP_SYNCCTRL_UPDINTCTRL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_UPDINTCTRL_SHIFT (25) ++#define PDP_SYNCCTRL_UPDINTCTRL_LENGTH (1) ++#define PDP_SYNCCTRL_UPDINTCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDCTRL ++*/ ++#define PDP_SYNCCTRL_UPDCTRL_MASK (0x01000000) ++#define PDP_SYNCCTRL_UPDCTRL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_UPDCTRL_SHIFT (24) ++#define PDP_SYNCCTRL_UPDCTRL_LENGTH (1) ++#define PDP_SYNCCTRL_UPDCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, UPDWAIT ++*/ ++#define PDP_SYNCCTRL_UPDWAIT_MASK (0x000F0000) ++#define PDP_SYNCCTRL_UPDWAIT_LSBMASK (0x0000000F) ++#define PDP_SYNCCTRL_UPDWAIT_SHIFT (16) ++#define PDP_SYNCCTRL_UPDWAIT_LENGTH (4) ++#define PDP_SYNCCTRL_UPDWAIT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, FIELD_EN ++*/ ++#define PDP_SYNCCTRL_FIELD_EN_MASK (0x00002000) ++#define PDP_SYNCCTRL_FIELD_EN_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_FIELD_EN_SHIFT (13) ++#define PDP_SYNCCTRL_FIELD_EN_LENGTH (1) ++#define PDP_SYNCCTRL_FIELD_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, CSYNC_EN ++*/ ++#define PDP_SYNCCTRL_CSYNC_EN_MASK (0x00001000) ++#define PDP_SYNCCTRL_CSYNC_EN_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_CSYNC_EN_SHIFT (12) ++#define PDP_SYNCCTRL_CSYNC_EN_LENGTH (1) ++#define PDP_SYNCCTRL_CSYNC_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, CLKPOL ++*/ ++#define PDP_SYNCCTRL_CLKPOL_MASK (0x00000800) ++#define PDP_SYNCCTRL_CLKPOL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_CLKPOL_SHIFT (11) ++#define PDP_SYNCCTRL_CLKPOL_LENGTH (1) ++#define PDP_SYNCCTRL_CLKPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VS_SLAVE ++*/ ++#define PDP_SYNCCTRL_VS_SLAVE_MASK (0x00000080) ++#define PDP_SYNCCTRL_VS_SLAVE_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_VS_SLAVE_SHIFT (7) ++#define PDP_SYNCCTRL_VS_SLAVE_LENGTH (1) ++#define PDP_SYNCCTRL_VS_SLAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HS_SLAVE ++*/ ++#define PDP_SYNCCTRL_HS_SLAVE_MASK (0x00000040) ++#define PDP_SYNCCTRL_HS_SLAVE_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_HS_SLAVE_SHIFT (6) ++#define PDP_SYNCCTRL_HS_SLAVE_LENGTH (1) ++#define PDP_SYNCCTRL_HS_SLAVE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, BLNKPOL ++*/ ++#define PDP_SYNCCTRL_BLNKPOL_MASK (0x00000020) ++#define PDP_SYNCCTRL_BLNKPOL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_BLNKPOL_SHIFT (5) ++#define PDP_SYNCCTRL_BLNKPOL_LENGTH (1) ++#define PDP_SYNCCTRL_BLNKPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, BLNKDIS ++*/ ++#define PDP_SYNCCTRL_BLNKDIS_MASK (0x00000010) ++#define PDP_SYNCCTRL_BLNKDIS_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_BLNKDIS_SHIFT (4) ++#define PDP_SYNCCTRL_BLNKDIS_LENGTH (1) ++#define PDP_SYNCCTRL_BLNKDIS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VSPOL ++*/ ++#define PDP_SYNCCTRL_VSPOL_MASK (0x00000008) ++#define PDP_SYNCCTRL_VSPOL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_VSPOL_SHIFT (3) ++#define PDP_SYNCCTRL_VSPOL_LENGTH (1) ++#define PDP_SYNCCTRL_VSPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, VSDIS ++*/ ++#define PDP_SYNCCTRL_VSDIS_MASK (0x00000004) ++#define PDP_SYNCCTRL_VSDIS_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_VSDIS_SHIFT (2) ++#define PDP_SYNCCTRL_VSDIS_LENGTH (1) ++#define PDP_SYNCCTRL_VSDIS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HSPOL ++*/ ++#define PDP_SYNCCTRL_HSPOL_MASK (0x00000002) ++#define PDP_SYNCCTRL_HSPOL_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_HSPOL_SHIFT (1) ++#define PDP_SYNCCTRL_HSPOL_LENGTH (1) ++#define PDP_SYNCCTRL_HSPOL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, SYNCCTRL, HSDIS ++*/ ++#define PDP_SYNCCTRL_HSDIS_MASK (0x00000001) ++#define PDP_SYNCCTRL_HSDIS_LSBMASK (0x00000001) ++#define PDP_SYNCCTRL_HSDIS_SHIFT (0) ++#define PDP_SYNCCTRL_HSDIS_LENGTH (1) ++#define PDP_SYNCCTRL_HSDIS_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_HSYNC1_OFFSET (0x0784) ++ ++/* PDP, HSYNC1, HBPS ++*/ ++#define PDP_HSYNC1_HBPS_MASK (0x1FFF0000) ++#define PDP_HSYNC1_HBPS_LSBMASK (0x00001FFF) ++#define PDP_HSYNC1_HBPS_SHIFT (16) ++#define PDP_HSYNC1_HBPS_LENGTH (13) ++#define PDP_HSYNC1_HBPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC1, HT ++*/ ++#define PDP_HSYNC1_HT_MASK (0x00001FFF) ++#define PDP_HSYNC1_HT_LSBMASK (0x00001FFF) ++#define PDP_HSYNC1_HT_SHIFT (0) ++#define PDP_HSYNC1_HT_LENGTH (13) ++#define PDP_HSYNC1_HT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_HSYNC2_OFFSET (0x0788) ++ ++/* PDP, HSYNC2, HAS ++*/ ++#define PDP_HSYNC2_HAS_MASK (0x1FFF0000) ++#define PDP_HSYNC2_HAS_LSBMASK (0x00001FFF) ++#define PDP_HSYNC2_HAS_SHIFT (16) ++#define PDP_HSYNC2_HAS_LENGTH (13) ++#define PDP_HSYNC2_HAS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC2, HLBS ++*/ ++#define PDP_HSYNC2_HLBS_MASK (0x00001FFF) ++#define PDP_HSYNC2_HLBS_LSBMASK (0x00001FFF) ++#define PDP_HSYNC2_HLBS_SHIFT (0) ++#define PDP_HSYNC2_HLBS_LENGTH (13) ++#define PDP_HSYNC2_HLBS_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_HSYNC3_OFFSET (0x078C) ++ ++/* PDP, HSYNC3, HFPS ++*/ ++#define PDP_HSYNC3_HFPS_MASK (0x1FFF0000) ++#define PDP_HSYNC3_HFPS_LSBMASK (0x00001FFF) ++#define PDP_HSYNC3_HFPS_SHIFT (16) ++#define PDP_HSYNC3_HFPS_LENGTH (13) ++#define PDP_HSYNC3_HFPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HSYNC3, HRBS ++*/ ++#define PDP_HSYNC3_HRBS_MASK (0x00001FFF) ++#define PDP_HSYNC3_HRBS_LSBMASK (0x00001FFF) ++#define PDP_HSYNC3_HRBS_SHIFT (0) ++#define PDP_HSYNC3_HRBS_LENGTH (13) ++#define PDP_HSYNC3_HRBS_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VSYNC1_OFFSET (0x0790) ++ ++/* PDP, VSYNC1, VBPS ++*/ ++#define PDP_VSYNC1_VBPS_MASK (0x1FFF0000) ++#define PDP_VSYNC1_VBPS_LSBMASK (0x00001FFF) ++#define PDP_VSYNC1_VBPS_SHIFT (16) ++#define PDP_VSYNC1_VBPS_LENGTH (13) ++#define PDP_VSYNC1_VBPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC1, VT ++*/ ++#define PDP_VSYNC1_VT_MASK (0x00001FFF) ++#define PDP_VSYNC1_VT_LSBMASK (0x00001FFF) ++#define PDP_VSYNC1_VT_SHIFT (0) ++#define PDP_VSYNC1_VT_LENGTH (13) ++#define PDP_VSYNC1_VT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VSYNC2_OFFSET (0x0794) ++ ++/* PDP, VSYNC2, VAS ++*/ ++#define PDP_VSYNC2_VAS_MASK (0x1FFF0000) ++#define PDP_VSYNC2_VAS_LSBMASK (0x00001FFF) ++#define PDP_VSYNC2_VAS_SHIFT (16) ++#define PDP_VSYNC2_VAS_LENGTH (13) ++#define PDP_VSYNC2_VAS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC2, VTBS ++*/ ++#define PDP_VSYNC2_VTBS_MASK (0x00001FFF) ++#define PDP_VSYNC2_VTBS_LSBMASK (0x00001FFF) ++#define PDP_VSYNC2_VTBS_SHIFT (0) ++#define PDP_VSYNC2_VTBS_LENGTH (13) ++#define PDP_VSYNC2_VTBS_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VSYNC3_OFFSET (0x0798) ++ ++/* PDP, VSYNC3, VFPS ++*/ ++#define PDP_VSYNC3_VFPS_MASK (0x1FFF0000) ++#define PDP_VSYNC3_VFPS_LSBMASK (0x00001FFF) ++#define PDP_VSYNC3_VFPS_SHIFT (16) ++#define PDP_VSYNC3_VFPS_LENGTH (13) ++#define PDP_VSYNC3_VFPS_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VSYNC3, VBBS ++*/ ++#define PDP_VSYNC3_VBBS_MASK (0x00001FFF) ++#define PDP_VSYNC3_VBBS_LSBMASK (0x00001FFF) ++#define PDP_VSYNC3_VBBS_SHIFT (0) ++#define PDP_VSYNC3_VBBS_LENGTH (13) ++#define PDP_VSYNC3_VBBS_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_INTSTAT_OFFSET (0x079C) ++ ++/* PDP, INTSTAT, INTS_VID4ORUN ++*/ ++#define PDP_INTSTAT_INTS_VID4ORUN_MASK (0x00080000) ++#define PDP_INTSTAT_INTS_VID4ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID4ORUN_SHIFT (19) ++#define PDP_INTSTAT_INTS_VID4ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID3ORUN ++*/ ++#define PDP_INTSTAT_INTS_VID3ORUN_MASK (0x00040000) ++#define PDP_INTSTAT_INTS_VID3ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID3ORUN_SHIFT (18) ++#define PDP_INTSTAT_INTS_VID3ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID2ORUN ++*/ ++#define PDP_INTSTAT_INTS_VID2ORUN_MASK (0x00020000) ++#define PDP_INTSTAT_INTS_VID2ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID2ORUN_SHIFT (17) ++#define PDP_INTSTAT_INTS_VID2ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID1ORUN ++*/ ++#define PDP_INTSTAT_INTS_VID1ORUN_MASK (0x00010000) ++#define PDP_INTSTAT_INTS_VID1ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID1ORUN_SHIFT (16) ++#define PDP_INTSTAT_INTS_VID1ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH4ORUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH4ORUN_MASK (0x00008000) ++#define PDP_INTSTAT_INTS_GRPH4ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH4ORUN_SHIFT (15) ++#define PDP_INTSTAT_INTS_GRPH4ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH3ORUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH3ORUN_MASK (0x00004000) ++#define PDP_INTSTAT_INTS_GRPH3ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH3ORUN_SHIFT (14) ++#define PDP_INTSTAT_INTS_GRPH3ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH2ORUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH2ORUN_MASK (0x00002000) ++#define PDP_INTSTAT_INTS_GRPH2ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH2ORUN_SHIFT (13) ++#define PDP_INTSTAT_INTS_GRPH2ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH1ORUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH1ORUN_MASK (0x00001000) ++#define PDP_INTSTAT_INTS_GRPH1ORUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH1ORUN_SHIFT (12) ++#define PDP_INTSTAT_INTS_GRPH1ORUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID4URUN ++*/ ++#define PDP_INTSTAT_INTS_VID4URUN_MASK (0x00000800) ++#define PDP_INTSTAT_INTS_VID4URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID4URUN_SHIFT (11) ++#define PDP_INTSTAT_INTS_VID4URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID3URUN ++*/ ++#define PDP_INTSTAT_INTS_VID3URUN_MASK (0x00000400) ++#define PDP_INTSTAT_INTS_VID3URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID3URUN_SHIFT (10) ++#define PDP_INTSTAT_INTS_VID3URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID2URUN ++*/ ++#define PDP_INTSTAT_INTS_VID2URUN_MASK (0x00000200) ++#define PDP_INTSTAT_INTS_VID2URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID2URUN_SHIFT (9) ++#define PDP_INTSTAT_INTS_VID2URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VID1URUN ++*/ ++#define PDP_INTSTAT_INTS_VID1URUN_MASK (0x00000100) ++#define PDP_INTSTAT_INTS_VID1URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VID1URUN_SHIFT (8) ++#define PDP_INTSTAT_INTS_VID1URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH4URUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH4URUN_MASK (0x00000080) ++#define PDP_INTSTAT_INTS_GRPH4URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH4URUN_SHIFT (7) ++#define PDP_INTSTAT_INTS_GRPH4URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH3URUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH3URUN_MASK (0x00000040) ++#define PDP_INTSTAT_INTS_GRPH3URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH3URUN_SHIFT (6) ++#define PDP_INTSTAT_INTS_GRPH3URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH2URUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH2URUN_MASK (0x00000020) ++#define PDP_INTSTAT_INTS_GRPH2URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH2URUN_SHIFT (5) ++#define PDP_INTSTAT_INTS_GRPH2URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_GRPH1URUN ++*/ ++#define PDP_INTSTAT_INTS_GRPH1URUN_MASK (0x00000010) ++#define PDP_INTSTAT_INTS_GRPH1URUN_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_GRPH1URUN_SHIFT (4) ++#define PDP_INTSTAT_INTS_GRPH1URUN_LENGTH (1) ++#define PDP_INTSTAT_INTS_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VBLNK1 ++*/ ++#define PDP_INTSTAT_INTS_VBLNK1_MASK (0x00000008) ++#define PDP_INTSTAT_INTS_VBLNK1_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VBLNK1_SHIFT (3) ++#define PDP_INTSTAT_INTS_VBLNK1_LENGTH (1) ++#define PDP_INTSTAT_INTS_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_VBLNK0 ++*/ ++#define PDP_INTSTAT_INTS_VBLNK0_MASK (0x00000004) ++#define PDP_INTSTAT_INTS_VBLNK0_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_VBLNK0_SHIFT (2) ++#define PDP_INTSTAT_INTS_VBLNK0_LENGTH (1) ++#define PDP_INTSTAT_INTS_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_HBLNK1 ++*/ ++#define PDP_INTSTAT_INTS_HBLNK1_MASK (0x00000002) ++#define PDP_INTSTAT_INTS_HBLNK1_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_HBLNK1_SHIFT (1) ++#define PDP_INTSTAT_INTS_HBLNK1_LENGTH (1) ++#define PDP_INTSTAT_INTS_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTSTAT, INTS_HBLNK0 ++*/ ++#define PDP_INTSTAT_INTS_HBLNK0_MASK (0x00000001) ++#define PDP_INTSTAT_INTS_HBLNK0_LSBMASK (0x00000001) ++#define PDP_INTSTAT_INTS_HBLNK0_SHIFT (0) ++#define PDP_INTSTAT_INTS_HBLNK0_LENGTH (1) ++#define PDP_INTSTAT_INTS_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_INTENAB_OFFSET (0x07A0) ++ ++/* PDP, INTENAB, INTEN_VID4ORUN ++*/ ++#define PDP_INTENAB_INTEN_VID4ORUN_MASK (0x00080000) ++#define PDP_INTENAB_INTEN_VID4ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID4ORUN_SHIFT (19) ++#define PDP_INTENAB_INTEN_VID4ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID3ORUN ++*/ ++#define PDP_INTENAB_INTEN_VID3ORUN_MASK (0x00040000) ++#define PDP_INTENAB_INTEN_VID3ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID3ORUN_SHIFT (18) ++#define PDP_INTENAB_INTEN_VID3ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID2ORUN ++*/ ++#define PDP_INTENAB_INTEN_VID2ORUN_MASK (0x00020000) ++#define PDP_INTENAB_INTEN_VID2ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID2ORUN_SHIFT (17) ++#define PDP_INTENAB_INTEN_VID2ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID1ORUN ++*/ ++#define PDP_INTENAB_INTEN_VID1ORUN_MASK (0x00010000) ++#define PDP_INTENAB_INTEN_VID1ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID1ORUN_SHIFT (16) ++#define PDP_INTENAB_INTEN_VID1ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH4ORUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH4ORUN_MASK (0x00008000) ++#define PDP_INTENAB_INTEN_GRPH4ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH4ORUN_SHIFT (15) ++#define PDP_INTENAB_INTEN_GRPH4ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH3ORUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH3ORUN_MASK (0x00004000) ++#define PDP_INTENAB_INTEN_GRPH3ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH3ORUN_SHIFT (14) ++#define PDP_INTENAB_INTEN_GRPH3ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH2ORUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH2ORUN_MASK (0x00002000) ++#define PDP_INTENAB_INTEN_GRPH2ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH2ORUN_SHIFT (13) ++#define PDP_INTENAB_INTEN_GRPH2ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH1ORUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH1ORUN_MASK (0x00001000) ++#define PDP_INTENAB_INTEN_GRPH1ORUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH1ORUN_SHIFT (12) ++#define PDP_INTENAB_INTEN_GRPH1ORUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID4URUN ++*/ ++#define PDP_INTENAB_INTEN_VID4URUN_MASK (0x00000800) ++#define PDP_INTENAB_INTEN_VID4URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID4URUN_SHIFT (11) ++#define PDP_INTENAB_INTEN_VID4URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID3URUN ++*/ ++#define PDP_INTENAB_INTEN_VID3URUN_MASK (0x00000400) ++#define PDP_INTENAB_INTEN_VID3URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID3URUN_SHIFT (10) ++#define PDP_INTENAB_INTEN_VID3URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID2URUN ++*/ ++#define PDP_INTENAB_INTEN_VID2URUN_MASK (0x00000200) ++#define PDP_INTENAB_INTEN_VID2URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID2URUN_SHIFT (9) ++#define PDP_INTENAB_INTEN_VID2URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VID1URUN ++*/ ++#define PDP_INTENAB_INTEN_VID1URUN_MASK (0x00000100) ++#define PDP_INTENAB_INTEN_VID1URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VID1URUN_SHIFT (8) ++#define PDP_INTENAB_INTEN_VID1URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH4URUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH4URUN_MASK (0x00000080) ++#define PDP_INTENAB_INTEN_GRPH4URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH4URUN_SHIFT (7) ++#define PDP_INTENAB_INTEN_GRPH4URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH3URUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH3URUN_MASK (0x00000040) ++#define PDP_INTENAB_INTEN_GRPH3URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH3URUN_SHIFT (6) ++#define PDP_INTENAB_INTEN_GRPH3URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH2URUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH2URUN_MASK (0x00000020) ++#define PDP_INTENAB_INTEN_GRPH2URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH2URUN_SHIFT (5) ++#define PDP_INTENAB_INTEN_GRPH2URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_GRPH1URUN ++*/ ++#define PDP_INTENAB_INTEN_GRPH1URUN_MASK (0x00000010) ++#define PDP_INTENAB_INTEN_GRPH1URUN_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_GRPH1URUN_SHIFT (4) ++#define PDP_INTENAB_INTEN_GRPH1URUN_LENGTH (1) ++#define PDP_INTENAB_INTEN_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VBLNK1 ++*/ ++#define PDP_INTENAB_INTEN_VBLNK1_MASK (0x00000008) ++#define PDP_INTENAB_INTEN_VBLNK1_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VBLNK1_SHIFT (3) ++#define PDP_INTENAB_INTEN_VBLNK1_LENGTH (1) ++#define PDP_INTENAB_INTEN_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_VBLNK0 ++*/ ++#define PDP_INTENAB_INTEN_VBLNK0_MASK (0x00000004) ++#define PDP_INTENAB_INTEN_VBLNK0_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_VBLNK0_SHIFT (2) ++#define PDP_INTENAB_INTEN_VBLNK0_LENGTH (1) ++#define PDP_INTENAB_INTEN_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_HBLNK1 ++*/ ++#define PDP_INTENAB_INTEN_HBLNK1_MASK (0x00000002) ++#define PDP_INTENAB_INTEN_HBLNK1_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_HBLNK1_SHIFT (1) ++#define PDP_INTENAB_INTEN_HBLNK1_LENGTH (1) ++#define PDP_INTENAB_INTEN_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTENAB, INTEN_HBLNK0 ++*/ ++#define PDP_INTENAB_INTEN_HBLNK0_MASK (0x00000001) ++#define PDP_INTENAB_INTEN_HBLNK0_LSBMASK (0x00000001) ++#define PDP_INTENAB_INTEN_HBLNK0_SHIFT (0) ++#define PDP_INTENAB_INTEN_HBLNK0_LENGTH (1) ++#define PDP_INTENAB_INTEN_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_INTCLR_OFFSET (0x07A4) ++ ++/* PDP, INTCLR, INTCLR_VID4ORUN ++*/ ++#define PDP_INTCLR_INTCLR_VID4ORUN_MASK (0x00080000) ++#define PDP_INTCLR_INTCLR_VID4ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID4ORUN_SHIFT (19) ++#define PDP_INTCLR_INTCLR_VID4ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID3ORUN ++*/ ++#define PDP_INTCLR_INTCLR_VID3ORUN_MASK (0x00040000) ++#define PDP_INTCLR_INTCLR_VID3ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID3ORUN_SHIFT (18) ++#define PDP_INTCLR_INTCLR_VID3ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID2ORUN ++*/ ++#define PDP_INTCLR_INTCLR_VID2ORUN_MASK (0x00020000) ++#define PDP_INTCLR_INTCLR_VID2ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID2ORUN_SHIFT (17) ++#define PDP_INTCLR_INTCLR_VID2ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID1ORUN ++*/ ++#define PDP_INTCLR_INTCLR_VID1ORUN_MASK (0x00010000) ++#define PDP_INTCLR_INTCLR_VID1ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID1ORUN_SHIFT (16) ++#define PDP_INTCLR_INTCLR_VID1ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH4ORUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH4ORUN_MASK (0x00008000) ++#define PDP_INTCLR_INTCLR_GRPH4ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH4ORUN_SHIFT (15) ++#define PDP_INTCLR_INTCLR_GRPH4ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH4ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH3ORUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH3ORUN_MASK (0x00004000) ++#define PDP_INTCLR_INTCLR_GRPH3ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH3ORUN_SHIFT (14) ++#define PDP_INTCLR_INTCLR_GRPH3ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH3ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH2ORUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH2ORUN_MASK (0x00002000) ++#define PDP_INTCLR_INTCLR_GRPH2ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH2ORUN_SHIFT (13) ++#define PDP_INTCLR_INTCLR_GRPH2ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH2ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH1ORUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH1ORUN_MASK (0x00001000) ++#define PDP_INTCLR_INTCLR_GRPH1ORUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH1ORUN_SHIFT (12) ++#define PDP_INTCLR_INTCLR_GRPH1ORUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH1ORUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID4URUN ++*/ ++#define PDP_INTCLR_INTCLR_VID4URUN_MASK (0x00000800) ++#define PDP_INTCLR_INTCLR_VID4URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID4URUN_SHIFT (11) ++#define PDP_INTCLR_INTCLR_VID4URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID3URUN ++*/ ++#define PDP_INTCLR_INTCLR_VID3URUN_MASK (0x00000400) ++#define PDP_INTCLR_INTCLR_VID3URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID3URUN_SHIFT (10) ++#define PDP_INTCLR_INTCLR_VID3URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID2URUN ++*/ ++#define PDP_INTCLR_INTCLR_VID2URUN_MASK (0x00000200) ++#define PDP_INTCLR_INTCLR_VID2URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID2URUN_SHIFT (9) ++#define PDP_INTCLR_INTCLR_VID2URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VID1URUN ++*/ ++#define PDP_INTCLR_INTCLR_VID1URUN_MASK (0x00000100) ++#define PDP_INTCLR_INTCLR_VID1URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VID1URUN_SHIFT (8) ++#define PDP_INTCLR_INTCLR_VID1URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VID1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH4URUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH4URUN_MASK (0x00000080) ++#define PDP_INTCLR_INTCLR_GRPH4URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH4URUN_SHIFT (7) ++#define PDP_INTCLR_INTCLR_GRPH4URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH4URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH3URUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH3URUN_MASK (0x00000040) ++#define PDP_INTCLR_INTCLR_GRPH3URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH3URUN_SHIFT (6) ++#define PDP_INTCLR_INTCLR_GRPH3URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH3URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH2URUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH2URUN_MASK (0x00000020) ++#define PDP_INTCLR_INTCLR_GRPH2URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH2URUN_SHIFT (5) ++#define PDP_INTCLR_INTCLR_GRPH2URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH2URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_GRPH1URUN ++*/ ++#define PDP_INTCLR_INTCLR_GRPH1URUN_MASK (0x00000010) ++#define PDP_INTCLR_INTCLR_GRPH1URUN_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_GRPH1URUN_SHIFT (4) ++#define PDP_INTCLR_INTCLR_GRPH1URUN_LENGTH (1) ++#define PDP_INTCLR_INTCLR_GRPH1URUN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VBLNK1 ++*/ ++#define PDP_INTCLR_INTCLR_VBLNK1_MASK (0x00000008) ++#define PDP_INTCLR_INTCLR_VBLNK1_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VBLNK1_SHIFT (3) ++#define PDP_INTCLR_INTCLR_VBLNK1_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_VBLNK0 ++*/ ++#define PDP_INTCLR_INTCLR_VBLNK0_MASK (0x00000004) ++#define PDP_INTCLR_INTCLR_VBLNK0_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_VBLNK0_SHIFT (2) ++#define PDP_INTCLR_INTCLR_VBLNK0_LENGTH (1) ++#define PDP_INTCLR_INTCLR_VBLNK0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_HBLNK1 ++*/ ++#define PDP_INTCLR_INTCLR_HBLNK1_MASK (0x00000002) ++#define PDP_INTCLR_INTCLR_HBLNK1_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_HBLNK1_SHIFT (1) ++#define PDP_INTCLR_INTCLR_HBLNK1_LENGTH (1) ++#define PDP_INTCLR_INTCLR_HBLNK1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, INTCLR, INTCLR_HBLNK0 ++*/ ++#define PDP_INTCLR_INTCLR_HBLNK0_MASK (0x00000001) ++#define PDP_INTCLR_INTCLR_HBLNK0_LSBMASK (0x00000001) ++#define PDP_INTCLR_INTCLR_HBLNK0_SHIFT (0) ++#define PDP_INTCLR_INTCLR_HBLNK0_LENGTH (1) ++#define PDP_INTCLR_INTCLR_HBLNK0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_MEMCTRL_OFFSET (0x07A8) ++ ++/* PDP, MEMCTRL, MEMREFRESH ++*/ ++#define PDP_MEMCTRL_MEMREFRESH_MASK (0xC0000000) ++#define PDP_MEMCTRL_MEMREFRESH_LSBMASK (0x00000003) ++#define PDP_MEMCTRL_MEMREFRESH_SHIFT (30) ++#define PDP_MEMCTRL_MEMREFRESH_LENGTH (2) ++#define PDP_MEMCTRL_MEMREFRESH_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEMCTRL, BURSTLEN ++*/ ++#define PDP_MEMCTRL_BURSTLEN_MASK (0x000000FF) ++#define PDP_MEMCTRL_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_MEMCTRL_BURSTLEN_SHIFT (0) ++#define PDP_MEMCTRL_BURSTLEN_LENGTH (8) ++#define PDP_MEMCTRL_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_MEM_THRESH_OFFSET (0x07AC) ++ ++/* PDP, MEM_THRESH, UVTHRESHOLD ++*/ ++#define PDP_MEM_THRESH_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_MEM_THRESH_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_MEM_THRESH_UVTHRESHOLD_SHIFT (24) ++#define PDP_MEM_THRESH_UVTHRESHOLD_LENGTH (8) ++#define PDP_MEM_THRESH_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEM_THRESH, YTHRESHOLD ++*/ ++#define PDP_MEM_THRESH_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_MEM_THRESH_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_MEM_THRESH_YTHRESHOLD_SHIFT (12) ++#define PDP_MEM_THRESH_YTHRESHOLD_LENGTH (9) ++#define PDP_MEM_THRESH_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, MEM_THRESH, THRESHOLD ++*/ ++#define PDP_MEM_THRESH_THRESHOLD_MASK (0x000001FF) ++#define PDP_MEM_THRESH_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_MEM_THRESH_THRESHOLD_SHIFT (0) ++#define PDP_MEM_THRESH_THRESHOLD_LENGTH (9) ++#define PDP_MEM_THRESH_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_ALTERNATE_3D_CTRL_OFFSET (0x07B0) ++ ++/* PDP, ALTERNATE_3D_CTRL, ALT3D_ON ++*/ ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_MASK (0x00000010) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LSBMASK (0x00000001) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SHIFT (4) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_LENGTH (1) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_ON_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, ALTERNATE_3D_CTRL, ALT3D_BLENDSEL ++*/ ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_MASK (0x00000007) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LSBMASK (0x00000007) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SHIFT (0) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_LENGTH (3) ++#define PDP_ALTERNATE_3D_CTRL_ALT3D_BLENDSEL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA0_R_OFFSET (0x07B4) ++ ++/* PDP, GAMMA0_R, GAMMA0_R ++*/ ++#define PDP_GAMMA0_R_GAMMA0_R_MASK (0x000003FF) ++#define PDP_GAMMA0_R_GAMMA0_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA0_R_GAMMA0_R_SHIFT (0) ++#define PDP_GAMMA0_R_GAMMA0_R_LENGTH (10) ++#define PDP_GAMMA0_R_GAMMA0_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA0_GB_OFFSET (0x07B8) ++ ++/* PDP, GAMMA0_GB, GAMMA0_G ++*/ ++#define PDP_GAMMA0_GB_GAMMA0_G_MASK (0x03FF0000) ++#define PDP_GAMMA0_GB_GAMMA0_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA0_GB_GAMMA0_G_SHIFT (16) ++#define PDP_GAMMA0_GB_GAMMA0_G_LENGTH (10) ++#define PDP_GAMMA0_GB_GAMMA0_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA0_GB, GAMMA0_B ++*/ ++#define PDP_GAMMA0_GB_GAMMA0_B_MASK (0x000003FF) ++#define PDP_GAMMA0_GB_GAMMA0_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA0_GB_GAMMA0_B_SHIFT (0) ++#define PDP_GAMMA0_GB_GAMMA0_B_LENGTH (10) ++#define PDP_GAMMA0_GB_GAMMA0_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA1_R_OFFSET (0x07BC) ++ ++/* PDP, GAMMA1_R, GAMMA1_R ++*/ ++#define PDP_GAMMA1_R_GAMMA1_R_MASK (0x000003FF) ++#define PDP_GAMMA1_R_GAMMA1_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA1_R_GAMMA1_R_SHIFT (0) ++#define PDP_GAMMA1_R_GAMMA1_R_LENGTH (10) ++#define PDP_GAMMA1_R_GAMMA1_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA1_GB_OFFSET (0x07C0) ++ ++/* PDP, GAMMA1_GB, GAMMA1_G ++*/ ++#define PDP_GAMMA1_GB_GAMMA1_G_MASK (0x03FF0000) ++#define PDP_GAMMA1_GB_GAMMA1_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA1_GB_GAMMA1_G_SHIFT (16) ++#define PDP_GAMMA1_GB_GAMMA1_G_LENGTH (10) ++#define PDP_GAMMA1_GB_GAMMA1_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA1_GB, GAMMA1_B ++*/ ++#define PDP_GAMMA1_GB_GAMMA1_B_MASK (0x000003FF) ++#define PDP_GAMMA1_GB_GAMMA1_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA1_GB_GAMMA1_B_SHIFT (0) ++#define PDP_GAMMA1_GB_GAMMA1_B_LENGTH (10) ++#define PDP_GAMMA1_GB_GAMMA1_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA2_R_OFFSET (0x07C4) ++ ++/* PDP, GAMMA2_R, GAMMA2_R ++*/ ++#define PDP_GAMMA2_R_GAMMA2_R_MASK (0x000003FF) ++#define PDP_GAMMA2_R_GAMMA2_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA2_R_GAMMA2_R_SHIFT (0) ++#define PDP_GAMMA2_R_GAMMA2_R_LENGTH (10) ++#define PDP_GAMMA2_R_GAMMA2_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA2_GB_OFFSET (0x07C8) ++ ++/* PDP, GAMMA2_GB, GAMMA2_G ++*/ ++#define PDP_GAMMA2_GB_GAMMA2_G_MASK (0x03FF0000) ++#define PDP_GAMMA2_GB_GAMMA2_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA2_GB_GAMMA2_G_SHIFT (16) ++#define PDP_GAMMA2_GB_GAMMA2_G_LENGTH (10) ++#define PDP_GAMMA2_GB_GAMMA2_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA2_GB, GAMMA2_B ++*/ ++#define PDP_GAMMA2_GB_GAMMA2_B_MASK (0x000003FF) ++#define PDP_GAMMA2_GB_GAMMA2_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA2_GB_GAMMA2_B_SHIFT (0) ++#define PDP_GAMMA2_GB_GAMMA2_B_LENGTH (10) ++#define PDP_GAMMA2_GB_GAMMA2_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA3_R_OFFSET (0x07CC) ++ ++/* PDP, GAMMA3_R, GAMMA3_R ++*/ ++#define PDP_GAMMA3_R_GAMMA3_R_MASK (0x000003FF) ++#define PDP_GAMMA3_R_GAMMA3_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA3_R_GAMMA3_R_SHIFT (0) ++#define PDP_GAMMA3_R_GAMMA3_R_LENGTH (10) ++#define PDP_GAMMA3_R_GAMMA3_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA3_GB_OFFSET (0x07D0) ++ ++/* PDP, GAMMA3_GB, GAMMA3_G ++*/ ++#define PDP_GAMMA3_GB_GAMMA3_G_MASK (0x03FF0000) ++#define PDP_GAMMA3_GB_GAMMA3_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA3_GB_GAMMA3_G_SHIFT (16) ++#define PDP_GAMMA3_GB_GAMMA3_G_LENGTH (10) ++#define PDP_GAMMA3_GB_GAMMA3_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA3_GB, GAMMA3_B ++*/ ++#define PDP_GAMMA3_GB_GAMMA3_B_MASK (0x000003FF) ++#define PDP_GAMMA3_GB_GAMMA3_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA3_GB_GAMMA3_B_SHIFT (0) ++#define PDP_GAMMA3_GB_GAMMA3_B_LENGTH (10) ++#define PDP_GAMMA3_GB_GAMMA3_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA4_R_OFFSET (0x07D4) ++ ++/* PDP, GAMMA4_R, GAMMA4_R ++*/ ++#define PDP_GAMMA4_R_GAMMA4_R_MASK (0x000003FF) ++#define PDP_GAMMA4_R_GAMMA4_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA4_R_GAMMA4_R_SHIFT (0) ++#define PDP_GAMMA4_R_GAMMA4_R_LENGTH (10) ++#define PDP_GAMMA4_R_GAMMA4_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA4_GB_OFFSET (0x07D8) ++ ++/* PDP, GAMMA4_GB, GAMMA4_G ++*/ ++#define PDP_GAMMA4_GB_GAMMA4_G_MASK (0x03FF0000) ++#define PDP_GAMMA4_GB_GAMMA4_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA4_GB_GAMMA4_G_SHIFT (16) ++#define PDP_GAMMA4_GB_GAMMA4_G_LENGTH (10) ++#define PDP_GAMMA4_GB_GAMMA4_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA4_GB, GAMMA4_B ++*/ ++#define PDP_GAMMA4_GB_GAMMA4_B_MASK (0x000003FF) ++#define PDP_GAMMA4_GB_GAMMA4_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA4_GB_GAMMA4_B_SHIFT (0) ++#define PDP_GAMMA4_GB_GAMMA4_B_LENGTH (10) ++#define PDP_GAMMA4_GB_GAMMA4_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA5_R_OFFSET (0x07DC) ++ ++/* PDP, GAMMA5_R, GAMMA5_R ++*/ ++#define PDP_GAMMA5_R_GAMMA5_R_MASK (0x000003FF) ++#define PDP_GAMMA5_R_GAMMA5_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA5_R_GAMMA5_R_SHIFT (0) ++#define PDP_GAMMA5_R_GAMMA5_R_LENGTH (10) ++#define PDP_GAMMA5_R_GAMMA5_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA5_GB_OFFSET (0x07E0) ++ ++/* PDP, GAMMA5_GB, GAMMA5_G ++*/ ++#define PDP_GAMMA5_GB_GAMMA5_G_MASK (0x03FF0000) ++#define PDP_GAMMA5_GB_GAMMA5_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA5_GB_GAMMA5_G_SHIFT (16) ++#define PDP_GAMMA5_GB_GAMMA5_G_LENGTH (10) ++#define PDP_GAMMA5_GB_GAMMA5_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA5_GB, GAMMA5_B ++*/ ++#define PDP_GAMMA5_GB_GAMMA5_B_MASK (0x000003FF) ++#define PDP_GAMMA5_GB_GAMMA5_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA5_GB_GAMMA5_B_SHIFT (0) ++#define PDP_GAMMA5_GB_GAMMA5_B_LENGTH (10) ++#define PDP_GAMMA5_GB_GAMMA5_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA6_R_OFFSET (0x07E4) ++ ++/* PDP, GAMMA6_R, GAMMA6_R ++*/ ++#define PDP_GAMMA6_R_GAMMA6_R_MASK (0x000003FF) ++#define PDP_GAMMA6_R_GAMMA6_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA6_R_GAMMA6_R_SHIFT (0) ++#define PDP_GAMMA6_R_GAMMA6_R_LENGTH (10) ++#define PDP_GAMMA6_R_GAMMA6_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA6_GB_OFFSET (0x07E8) ++ ++/* PDP, GAMMA6_GB, GAMMA6_G ++*/ ++#define PDP_GAMMA6_GB_GAMMA6_G_MASK (0x03FF0000) ++#define PDP_GAMMA6_GB_GAMMA6_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA6_GB_GAMMA6_G_SHIFT (16) ++#define PDP_GAMMA6_GB_GAMMA6_G_LENGTH (10) ++#define PDP_GAMMA6_GB_GAMMA6_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA6_GB, GAMMA6_B ++*/ ++#define PDP_GAMMA6_GB_GAMMA6_B_MASK (0x000003FF) ++#define PDP_GAMMA6_GB_GAMMA6_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA6_GB_GAMMA6_B_SHIFT (0) ++#define PDP_GAMMA6_GB_GAMMA6_B_LENGTH (10) ++#define PDP_GAMMA6_GB_GAMMA6_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA7_R_OFFSET (0x07EC) ++ ++/* PDP, GAMMA7_R, GAMMA7_R ++*/ ++#define PDP_GAMMA7_R_GAMMA7_R_MASK (0x000003FF) ++#define PDP_GAMMA7_R_GAMMA7_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA7_R_GAMMA7_R_SHIFT (0) ++#define PDP_GAMMA7_R_GAMMA7_R_LENGTH (10) ++#define PDP_GAMMA7_R_GAMMA7_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA7_GB_OFFSET (0x07F0) ++ ++/* PDP, GAMMA7_GB, GAMMA7_G ++*/ ++#define PDP_GAMMA7_GB_GAMMA7_G_MASK (0x03FF0000) ++#define PDP_GAMMA7_GB_GAMMA7_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA7_GB_GAMMA7_G_SHIFT (16) ++#define PDP_GAMMA7_GB_GAMMA7_G_LENGTH (10) ++#define PDP_GAMMA7_GB_GAMMA7_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA7_GB, GAMMA7_B ++*/ ++#define PDP_GAMMA7_GB_GAMMA7_B_MASK (0x000003FF) ++#define PDP_GAMMA7_GB_GAMMA7_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA7_GB_GAMMA7_B_SHIFT (0) ++#define PDP_GAMMA7_GB_GAMMA7_B_LENGTH (10) ++#define PDP_GAMMA7_GB_GAMMA7_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA8_R_OFFSET (0x07F4) ++ ++/* PDP, GAMMA8_R, GAMMA8_R ++*/ ++#define PDP_GAMMA8_R_GAMMA8_R_MASK (0x000003FF) ++#define PDP_GAMMA8_R_GAMMA8_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA8_R_GAMMA8_R_SHIFT (0) ++#define PDP_GAMMA8_R_GAMMA8_R_LENGTH (10) ++#define PDP_GAMMA8_R_GAMMA8_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA8_GB_OFFSET (0x07F8) ++ ++/* PDP, GAMMA8_GB, GAMMA8_G ++*/ ++#define PDP_GAMMA8_GB_GAMMA8_G_MASK (0x03FF0000) ++#define PDP_GAMMA8_GB_GAMMA8_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA8_GB_GAMMA8_G_SHIFT (16) ++#define PDP_GAMMA8_GB_GAMMA8_G_LENGTH (10) ++#define PDP_GAMMA8_GB_GAMMA8_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA8_GB, GAMMA8_B ++*/ ++#define PDP_GAMMA8_GB_GAMMA8_B_MASK (0x000003FF) ++#define PDP_GAMMA8_GB_GAMMA8_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA8_GB_GAMMA8_B_SHIFT (0) ++#define PDP_GAMMA8_GB_GAMMA8_B_LENGTH (10) ++#define PDP_GAMMA8_GB_GAMMA8_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA9_R_OFFSET (0x07FC) ++ ++/* PDP, GAMMA9_R, GAMMA9_R ++*/ ++#define PDP_GAMMA9_R_GAMMA9_R_MASK (0x000003FF) ++#define PDP_GAMMA9_R_GAMMA9_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA9_R_GAMMA9_R_SHIFT (0) ++#define PDP_GAMMA9_R_GAMMA9_R_LENGTH (10) ++#define PDP_GAMMA9_R_GAMMA9_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA9_GB_OFFSET (0x0800) ++ ++/* PDP, GAMMA9_GB, GAMMA9_G ++*/ ++#define PDP_GAMMA9_GB_GAMMA9_G_MASK (0x03FF0000) ++#define PDP_GAMMA9_GB_GAMMA9_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA9_GB_GAMMA9_G_SHIFT (16) ++#define PDP_GAMMA9_GB_GAMMA9_G_LENGTH (10) ++#define PDP_GAMMA9_GB_GAMMA9_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA9_GB, GAMMA9_B ++*/ ++#define PDP_GAMMA9_GB_GAMMA9_B_MASK (0x000003FF) ++#define PDP_GAMMA9_GB_GAMMA9_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA9_GB_GAMMA9_B_SHIFT (0) ++#define PDP_GAMMA9_GB_GAMMA9_B_LENGTH (10) ++#define PDP_GAMMA9_GB_GAMMA9_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA10_R_OFFSET (0x0804) ++ ++/* PDP, GAMMA10_R, GAMMA10_R ++*/ ++#define PDP_GAMMA10_R_GAMMA10_R_MASK (0x000003FF) ++#define PDP_GAMMA10_R_GAMMA10_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA10_R_GAMMA10_R_SHIFT (0) ++#define PDP_GAMMA10_R_GAMMA10_R_LENGTH (10) ++#define PDP_GAMMA10_R_GAMMA10_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA10_GB_OFFSET (0x0808) ++ ++/* PDP, GAMMA10_GB, GAMMA10_G ++*/ ++#define PDP_GAMMA10_GB_GAMMA10_G_MASK (0x03FF0000) ++#define PDP_GAMMA10_GB_GAMMA10_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA10_GB_GAMMA10_G_SHIFT (16) ++#define PDP_GAMMA10_GB_GAMMA10_G_LENGTH (10) ++#define PDP_GAMMA10_GB_GAMMA10_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA10_GB, GAMMA10_B ++*/ ++#define PDP_GAMMA10_GB_GAMMA10_B_MASK (0x000003FF) ++#define PDP_GAMMA10_GB_GAMMA10_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA10_GB_GAMMA10_B_SHIFT (0) ++#define PDP_GAMMA10_GB_GAMMA10_B_LENGTH (10) ++#define PDP_GAMMA10_GB_GAMMA10_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA11_R_OFFSET (0x080C) ++ ++/* PDP, GAMMA11_R, GAMMA11_R ++*/ ++#define PDP_GAMMA11_R_GAMMA11_R_MASK (0x000003FF) ++#define PDP_GAMMA11_R_GAMMA11_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA11_R_GAMMA11_R_SHIFT (0) ++#define PDP_GAMMA11_R_GAMMA11_R_LENGTH (10) ++#define PDP_GAMMA11_R_GAMMA11_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA11_GB_OFFSET (0x0810) ++ ++/* PDP, GAMMA11_GB, GAMMA11_G ++*/ ++#define PDP_GAMMA11_GB_GAMMA11_G_MASK (0x03FF0000) ++#define PDP_GAMMA11_GB_GAMMA11_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA11_GB_GAMMA11_G_SHIFT (16) ++#define PDP_GAMMA11_GB_GAMMA11_G_LENGTH (10) ++#define PDP_GAMMA11_GB_GAMMA11_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA11_GB, GAMMA11_B ++*/ ++#define PDP_GAMMA11_GB_GAMMA11_B_MASK (0x000003FF) ++#define PDP_GAMMA11_GB_GAMMA11_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA11_GB_GAMMA11_B_SHIFT (0) ++#define PDP_GAMMA11_GB_GAMMA11_B_LENGTH (10) ++#define PDP_GAMMA11_GB_GAMMA11_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA12_R_OFFSET (0x0814) ++ ++/* PDP, GAMMA12_R, GAMMA12_R ++*/ ++#define PDP_GAMMA12_R_GAMMA12_R_MASK (0x000003FF) ++#define PDP_GAMMA12_R_GAMMA12_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA12_R_GAMMA12_R_SHIFT (0) ++#define PDP_GAMMA12_R_GAMMA12_R_LENGTH (10) ++#define PDP_GAMMA12_R_GAMMA12_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA12_GB_OFFSET (0x0818) ++ ++/* PDP, GAMMA12_GB, GAMMA12_G ++*/ ++#define PDP_GAMMA12_GB_GAMMA12_G_MASK (0x03FF0000) ++#define PDP_GAMMA12_GB_GAMMA12_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA12_GB_GAMMA12_G_SHIFT (16) ++#define PDP_GAMMA12_GB_GAMMA12_G_LENGTH (10) ++#define PDP_GAMMA12_GB_GAMMA12_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA12_GB, GAMMA12_B ++*/ ++#define PDP_GAMMA12_GB_GAMMA12_B_MASK (0x000003FF) ++#define PDP_GAMMA12_GB_GAMMA12_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA12_GB_GAMMA12_B_SHIFT (0) ++#define PDP_GAMMA12_GB_GAMMA12_B_LENGTH (10) ++#define PDP_GAMMA12_GB_GAMMA12_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA13_R_OFFSET (0x081C) ++ ++/* PDP, GAMMA13_R, GAMMA13_R ++*/ ++#define PDP_GAMMA13_R_GAMMA13_R_MASK (0x000003FF) ++#define PDP_GAMMA13_R_GAMMA13_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA13_R_GAMMA13_R_SHIFT (0) ++#define PDP_GAMMA13_R_GAMMA13_R_LENGTH (10) ++#define PDP_GAMMA13_R_GAMMA13_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA13_GB_OFFSET (0x0820) ++ ++/* PDP, GAMMA13_GB, GAMMA13_G ++*/ ++#define PDP_GAMMA13_GB_GAMMA13_G_MASK (0x03FF0000) ++#define PDP_GAMMA13_GB_GAMMA13_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA13_GB_GAMMA13_G_SHIFT (16) ++#define PDP_GAMMA13_GB_GAMMA13_G_LENGTH (10) ++#define PDP_GAMMA13_GB_GAMMA13_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA13_GB, GAMMA13_B ++*/ ++#define PDP_GAMMA13_GB_GAMMA13_B_MASK (0x000003FF) ++#define PDP_GAMMA13_GB_GAMMA13_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA13_GB_GAMMA13_B_SHIFT (0) ++#define PDP_GAMMA13_GB_GAMMA13_B_LENGTH (10) ++#define PDP_GAMMA13_GB_GAMMA13_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA14_R_OFFSET (0x0824) ++ ++/* PDP, GAMMA14_R, GAMMA14_R ++*/ ++#define PDP_GAMMA14_R_GAMMA14_R_MASK (0x000003FF) ++#define PDP_GAMMA14_R_GAMMA14_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA14_R_GAMMA14_R_SHIFT (0) ++#define PDP_GAMMA14_R_GAMMA14_R_LENGTH (10) ++#define PDP_GAMMA14_R_GAMMA14_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA14_GB_OFFSET (0x0828) ++ ++/* PDP, GAMMA14_GB, GAMMA14_G ++*/ ++#define PDP_GAMMA14_GB_GAMMA14_G_MASK (0x03FF0000) ++#define PDP_GAMMA14_GB_GAMMA14_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA14_GB_GAMMA14_G_SHIFT (16) ++#define PDP_GAMMA14_GB_GAMMA14_G_LENGTH (10) ++#define PDP_GAMMA14_GB_GAMMA14_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA14_GB, GAMMA14_B ++*/ ++#define PDP_GAMMA14_GB_GAMMA14_B_MASK (0x000003FF) ++#define PDP_GAMMA14_GB_GAMMA14_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA14_GB_GAMMA14_B_SHIFT (0) ++#define PDP_GAMMA14_GB_GAMMA14_B_LENGTH (10) ++#define PDP_GAMMA14_GB_GAMMA14_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA15_R_OFFSET (0x082C) ++ ++/* PDP, GAMMA15_R, GAMMA15_R ++*/ ++#define PDP_GAMMA15_R_GAMMA15_R_MASK (0x000003FF) ++#define PDP_GAMMA15_R_GAMMA15_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA15_R_GAMMA15_R_SHIFT (0) ++#define PDP_GAMMA15_R_GAMMA15_R_LENGTH (10) ++#define PDP_GAMMA15_R_GAMMA15_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA15_GB_OFFSET (0x0830) ++ ++/* PDP, GAMMA15_GB, GAMMA15_G ++*/ ++#define PDP_GAMMA15_GB_GAMMA15_G_MASK (0x03FF0000) ++#define PDP_GAMMA15_GB_GAMMA15_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA15_GB_GAMMA15_G_SHIFT (16) ++#define PDP_GAMMA15_GB_GAMMA15_G_LENGTH (10) ++#define PDP_GAMMA15_GB_GAMMA15_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA15_GB, GAMMA15_B ++*/ ++#define PDP_GAMMA15_GB_GAMMA15_B_MASK (0x000003FF) ++#define PDP_GAMMA15_GB_GAMMA15_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA15_GB_GAMMA15_B_SHIFT (0) ++#define PDP_GAMMA15_GB_GAMMA15_B_LENGTH (10) ++#define PDP_GAMMA15_GB_GAMMA15_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA16_R_OFFSET (0x0834) ++ ++/* PDP, GAMMA16_R, GAMMA16_R ++*/ ++#define PDP_GAMMA16_R_GAMMA16_R_MASK (0x000003FF) ++#define PDP_GAMMA16_R_GAMMA16_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA16_R_GAMMA16_R_SHIFT (0) ++#define PDP_GAMMA16_R_GAMMA16_R_LENGTH (10) ++#define PDP_GAMMA16_R_GAMMA16_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA16_GB_OFFSET (0x0838) ++ ++/* PDP, GAMMA16_GB, GAMMA16_G ++*/ ++#define PDP_GAMMA16_GB_GAMMA16_G_MASK (0x03FF0000) ++#define PDP_GAMMA16_GB_GAMMA16_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA16_GB_GAMMA16_G_SHIFT (16) ++#define PDP_GAMMA16_GB_GAMMA16_G_LENGTH (10) ++#define PDP_GAMMA16_GB_GAMMA16_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA16_GB, GAMMA16_B ++*/ ++#define PDP_GAMMA16_GB_GAMMA16_B_MASK (0x000003FF) ++#define PDP_GAMMA16_GB_GAMMA16_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA16_GB_GAMMA16_B_SHIFT (0) ++#define PDP_GAMMA16_GB_GAMMA16_B_LENGTH (10) ++#define PDP_GAMMA16_GB_GAMMA16_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA17_R_OFFSET (0x083C) ++ ++/* PDP, GAMMA17_R, GAMMA17_R ++*/ ++#define PDP_GAMMA17_R_GAMMA17_R_MASK (0x000003FF) ++#define PDP_GAMMA17_R_GAMMA17_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA17_R_GAMMA17_R_SHIFT (0) ++#define PDP_GAMMA17_R_GAMMA17_R_LENGTH (10) ++#define PDP_GAMMA17_R_GAMMA17_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA17_GB_OFFSET (0x0840) ++ ++/* PDP, GAMMA17_GB, GAMMA17_G ++*/ ++#define PDP_GAMMA17_GB_GAMMA17_G_MASK (0x03FF0000) ++#define PDP_GAMMA17_GB_GAMMA17_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA17_GB_GAMMA17_G_SHIFT (16) ++#define PDP_GAMMA17_GB_GAMMA17_G_LENGTH (10) ++#define PDP_GAMMA17_GB_GAMMA17_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA17_GB, GAMMA17_B ++*/ ++#define PDP_GAMMA17_GB_GAMMA17_B_MASK (0x000003FF) ++#define PDP_GAMMA17_GB_GAMMA17_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA17_GB_GAMMA17_B_SHIFT (0) ++#define PDP_GAMMA17_GB_GAMMA17_B_LENGTH (10) ++#define PDP_GAMMA17_GB_GAMMA17_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA18_R_OFFSET (0x0844) ++ ++/* PDP, GAMMA18_R, GAMMA18_R ++*/ ++#define PDP_GAMMA18_R_GAMMA18_R_MASK (0x000003FF) ++#define PDP_GAMMA18_R_GAMMA18_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA18_R_GAMMA18_R_SHIFT (0) ++#define PDP_GAMMA18_R_GAMMA18_R_LENGTH (10) ++#define PDP_GAMMA18_R_GAMMA18_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA18_GB_OFFSET (0x0848) ++ ++/* PDP, GAMMA18_GB, GAMMA18_G ++*/ ++#define PDP_GAMMA18_GB_GAMMA18_G_MASK (0x03FF0000) ++#define PDP_GAMMA18_GB_GAMMA18_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA18_GB_GAMMA18_G_SHIFT (16) ++#define PDP_GAMMA18_GB_GAMMA18_G_LENGTH (10) ++#define PDP_GAMMA18_GB_GAMMA18_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA18_GB, GAMMA18_B ++*/ ++#define PDP_GAMMA18_GB_GAMMA18_B_MASK (0x000003FF) ++#define PDP_GAMMA18_GB_GAMMA18_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA18_GB_GAMMA18_B_SHIFT (0) ++#define PDP_GAMMA18_GB_GAMMA18_B_LENGTH (10) ++#define PDP_GAMMA18_GB_GAMMA18_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA19_R_OFFSET (0x084C) ++ ++/* PDP, GAMMA19_R, GAMMA19_R ++*/ ++#define PDP_GAMMA19_R_GAMMA19_R_MASK (0x000003FF) ++#define PDP_GAMMA19_R_GAMMA19_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA19_R_GAMMA19_R_SHIFT (0) ++#define PDP_GAMMA19_R_GAMMA19_R_LENGTH (10) ++#define PDP_GAMMA19_R_GAMMA19_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA19_GB_OFFSET (0x0850) ++ ++/* PDP, GAMMA19_GB, GAMMA19_G ++*/ ++#define PDP_GAMMA19_GB_GAMMA19_G_MASK (0x03FF0000) ++#define PDP_GAMMA19_GB_GAMMA19_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA19_GB_GAMMA19_G_SHIFT (16) ++#define PDP_GAMMA19_GB_GAMMA19_G_LENGTH (10) ++#define PDP_GAMMA19_GB_GAMMA19_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA19_GB, GAMMA19_B ++*/ ++#define PDP_GAMMA19_GB_GAMMA19_B_MASK (0x000003FF) ++#define PDP_GAMMA19_GB_GAMMA19_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA19_GB_GAMMA19_B_SHIFT (0) ++#define PDP_GAMMA19_GB_GAMMA19_B_LENGTH (10) ++#define PDP_GAMMA19_GB_GAMMA19_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA20_R_OFFSET (0x0854) ++ ++/* PDP, GAMMA20_R, GAMMA20_R ++*/ ++#define PDP_GAMMA20_R_GAMMA20_R_MASK (0x000003FF) ++#define PDP_GAMMA20_R_GAMMA20_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA20_R_GAMMA20_R_SHIFT (0) ++#define PDP_GAMMA20_R_GAMMA20_R_LENGTH (10) ++#define PDP_GAMMA20_R_GAMMA20_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA20_GB_OFFSET (0x0858) ++ ++/* PDP, GAMMA20_GB, GAMMA20_G ++*/ ++#define PDP_GAMMA20_GB_GAMMA20_G_MASK (0x03FF0000) ++#define PDP_GAMMA20_GB_GAMMA20_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA20_GB_GAMMA20_G_SHIFT (16) ++#define PDP_GAMMA20_GB_GAMMA20_G_LENGTH (10) ++#define PDP_GAMMA20_GB_GAMMA20_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA20_GB, GAMMA20_B ++*/ ++#define PDP_GAMMA20_GB_GAMMA20_B_MASK (0x000003FF) ++#define PDP_GAMMA20_GB_GAMMA20_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA20_GB_GAMMA20_B_SHIFT (0) ++#define PDP_GAMMA20_GB_GAMMA20_B_LENGTH (10) ++#define PDP_GAMMA20_GB_GAMMA20_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA21_R_OFFSET (0x085C) ++ ++/* PDP, GAMMA21_R, GAMMA21_R ++*/ ++#define PDP_GAMMA21_R_GAMMA21_R_MASK (0x000003FF) ++#define PDP_GAMMA21_R_GAMMA21_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA21_R_GAMMA21_R_SHIFT (0) ++#define PDP_GAMMA21_R_GAMMA21_R_LENGTH (10) ++#define PDP_GAMMA21_R_GAMMA21_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA21_GB_OFFSET (0x0860) ++ ++/* PDP, GAMMA21_GB, GAMMA21_G ++*/ ++#define PDP_GAMMA21_GB_GAMMA21_G_MASK (0x03FF0000) ++#define PDP_GAMMA21_GB_GAMMA21_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA21_GB_GAMMA21_G_SHIFT (16) ++#define PDP_GAMMA21_GB_GAMMA21_G_LENGTH (10) ++#define PDP_GAMMA21_GB_GAMMA21_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA21_GB, GAMMA21_B ++*/ ++#define PDP_GAMMA21_GB_GAMMA21_B_MASK (0x000003FF) ++#define PDP_GAMMA21_GB_GAMMA21_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA21_GB_GAMMA21_B_SHIFT (0) ++#define PDP_GAMMA21_GB_GAMMA21_B_LENGTH (10) ++#define PDP_GAMMA21_GB_GAMMA21_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA22_R_OFFSET (0x0864) ++ ++/* PDP, GAMMA22_R, GAMMA22_R ++*/ ++#define PDP_GAMMA22_R_GAMMA22_R_MASK (0x000003FF) ++#define PDP_GAMMA22_R_GAMMA22_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA22_R_GAMMA22_R_SHIFT (0) ++#define PDP_GAMMA22_R_GAMMA22_R_LENGTH (10) ++#define PDP_GAMMA22_R_GAMMA22_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA22_GB_OFFSET (0x0868) ++ ++/* PDP, GAMMA22_GB, GAMMA22_G ++*/ ++#define PDP_GAMMA22_GB_GAMMA22_G_MASK (0x03FF0000) ++#define PDP_GAMMA22_GB_GAMMA22_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA22_GB_GAMMA22_G_SHIFT (16) ++#define PDP_GAMMA22_GB_GAMMA22_G_LENGTH (10) ++#define PDP_GAMMA22_GB_GAMMA22_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA22_GB, GAMMA22_B ++*/ ++#define PDP_GAMMA22_GB_GAMMA22_B_MASK (0x000003FF) ++#define PDP_GAMMA22_GB_GAMMA22_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA22_GB_GAMMA22_B_SHIFT (0) ++#define PDP_GAMMA22_GB_GAMMA22_B_LENGTH (10) ++#define PDP_GAMMA22_GB_GAMMA22_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA23_R_OFFSET (0x086C) ++ ++/* PDP, GAMMA23_R, GAMMA23_R ++*/ ++#define PDP_GAMMA23_R_GAMMA23_R_MASK (0x000003FF) ++#define PDP_GAMMA23_R_GAMMA23_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA23_R_GAMMA23_R_SHIFT (0) ++#define PDP_GAMMA23_R_GAMMA23_R_LENGTH (10) ++#define PDP_GAMMA23_R_GAMMA23_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA23_GB_OFFSET (0x0870) ++ ++/* PDP, GAMMA23_GB, GAMMA23_G ++*/ ++#define PDP_GAMMA23_GB_GAMMA23_G_MASK (0x03FF0000) ++#define PDP_GAMMA23_GB_GAMMA23_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA23_GB_GAMMA23_G_SHIFT (16) ++#define PDP_GAMMA23_GB_GAMMA23_G_LENGTH (10) ++#define PDP_GAMMA23_GB_GAMMA23_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA23_GB, GAMMA23_B ++*/ ++#define PDP_GAMMA23_GB_GAMMA23_B_MASK (0x000003FF) ++#define PDP_GAMMA23_GB_GAMMA23_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA23_GB_GAMMA23_B_SHIFT (0) ++#define PDP_GAMMA23_GB_GAMMA23_B_LENGTH (10) ++#define PDP_GAMMA23_GB_GAMMA23_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA24_R_OFFSET (0x0874) ++ ++/* PDP, GAMMA24_R, GAMMA24_R ++*/ ++#define PDP_GAMMA24_R_GAMMA24_R_MASK (0x000003FF) ++#define PDP_GAMMA24_R_GAMMA24_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA24_R_GAMMA24_R_SHIFT (0) ++#define PDP_GAMMA24_R_GAMMA24_R_LENGTH (10) ++#define PDP_GAMMA24_R_GAMMA24_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA24_GB_OFFSET (0x0878) ++ ++/* PDP, GAMMA24_GB, GAMMA24_G ++*/ ++#define PDP_GAMMA24_GB_GAMMA24_G_MASK (0x03FF0000) ++#define PDP_GAMMA24_GB_GAMMA24_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA24_GB_GAMMA24_G_SHIFT (16) ++#define PDP_GAMMA24_GB_GAMMA24_G_LENGTH (10) ++#define PDP_GAMMA24_GB_GAMMA24_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA24_GB, GAMMA24_B ++*/ ++#define PDP_GAMMA24_GB_GAMMA24_B_MASK (0x000003FF) ++#define PDP_GAMMA24_GB_GAMMA24_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA24_GB_GAMMA24_B_SHIFT (0) ++#define PDP_GAMMA24_GB_GAMMA24_B_LENGTH (10) ++#define PDP_GAMMA24_GB_GAMMA24_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA25_R_OFFSET (0x087C) ++ ++/* PDP, GAMMA25_R, GAMMA25_R ++*/ ++#define PDP_GAMMA25_R_GAMMA25_R_MASK (0x000003FF) ++#define PDP_GAMMA25_R_GAMMA25_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA25_R_GAMMA25_R_SHIFT (0) ++#define PDP_GAMMA25_R_GAMMA25_R_LENGTH (10) ++#define PDP_GAMMA25_R_GAMMA25_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA25_GB_OFFSET (0x0880) ++ ++/* PDP, GAMMA25_GB, GAMMA25_G ++*/ ++#define PDP_GAMMA25_GB_GAMMA25_G_MASK (0x03FF0000) ++#define PDP_GAMMA25_GB_GAMMA25_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA25_GB_GAMMA25_G_SHIFT (16) ++#define PDP_GAMMA25_GB_GAMMA25_G_LENGTH (10) ++#define PDP_GAMMA25_GB_GAMMA25_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA25_GB, GAMMA25_B ++*/ ++#define PDP_GAMMA25_GB_GAMMA25_B_MASK (0x000003FF) ++#define PDP_GAMMA25_GB_GAMMA25_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA25_GB_GAMMA25_B_SHIFT (0) ++#define PDP_GAMMA25_GB_GAMMA25_B_LENGTH (10) ++#define PDP_GAMMA25_GB_GAMMA25_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA26_R_OFFSET (0x0884) ++ ++/* PDP, GAMMA26_R, GAMMA26_R ++*/ ++#define PDP_GAMMA26_R_GAMMA26_R_MASK (0x000003FF) ++#define PDP_GAMMA26_R_GAMMA26_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA26_R_GAMMA26_R_SHIFT (0) ++#define PDP_GAMMA26_R_GAMMA26_R_LENGTH (10) ++#define PDP_GAMMA26_R_GAMMA26_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA26_GB_OFFSET (0x0888) ++ ++/* PDP, GAMMA26_GB, GAMMA26_G ++*/ ++#define PDP_GAMMA26_GB_GAMMA26_G_MASK (0x03FF0000) ++#define PDP_GAMMA26_GB_GAMMA26_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA26_GB_GAMMA26_G_SHIFT (16) ++#define PDP_GAMMA26_GB_GAMMA26_G_LENGTH (10) ++#define PDP_GAMMA26_GB_GAMMA26_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA26_GB, GAMMA26_B ++*/ ++#define PDP_GAMMA26_GB_GAMMA26_B_MASK (0x000003FF) ++#define PDP_GAMMA26_GB_GAMMA26_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA26_GB_GAMMA26_B_SHIFT (0) ++#define PDP_GAMMA26_GB_GAMMA26_B_LENGTH (10) ++#define PDP_GAMMA26_GB_GAMMA26_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA27_R_OFFSET (0x088C) ++ ++/* PDP, GAMMA27_R, GAMMA27_R ++*/ ++#define PDP_GAMMA27_R_GAMMA27_R_MASK (0x000003FF) ++#define PDP_GAMMA27_R_GAMMA27_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA27_R_GAMMA27_R_SHIFT (0) ++#define PDP_GAMMA27_R_GAMMA27_R_LENGTH (10) ++#define PDP_GAMMA27_R_GAMMA27_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA27_GB_OFFSET (0x0890) ++ ++/* PDP, GAMMA27_GB, GAMMA27_G ++*/ ++#define PDP_GAMMA27_GB_GAMMA27_G_MASK (0x03FF0000) ++#define PDP_GAMMA27_GB_GAMMA27_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA27_GB_GAMMA27_G_SHIFT (16) ++#define PDP_GAMMA27_GB_GAMMA27_G_LENGTH (10) ++#define PDP_GAMMA27_GB_GAMMA27_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA27_GB, GAMMA27_B ++*/ ++#define PDP_GAMMA27_GB_GAMMA27_B_MASK (0x000003FF) ++#define PDP_GAMMA27_GB_GAMMA27_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA27_GB_GAMMA27_B_SHIFT (0) ++#define PDP_GAMMA27_GB_GAMMA27_B_LENGTH (10) ++#define PDP_GAMMA27_GB_GAMMA27_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA28_R_OFFSET (0x0894) ++ ++/* PDP, GAMMA28_R, GAMMA28_R ++*/ ++#define PDP_GAMMA28_R_GAMMA28_R_MASK (0x000003FF) ++#define PDP_GAMMA28_R_GAMMA28_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA28_R_GAMMA28_R_SHIFT (0) ++#define PDP_GAMMA28_R_GAMMA28_R_LENGTH (10) ++#define PDP_GAMMA28_R_GAMMA28_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA28_GB_OFFSET (0x0898) ++ ++/* PDP, GAMMA28_GB, GAMMA28_G ++*/ ++#define PDP_GAMMA28_GB_GAMMA28_G_MASK (0x03FF0000) ++#define PDP_GAMMA28_GB_GAMMA28_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA28_GB_GAMMA28_G_SHIFT (16) ++#define PDP_GAMMA28_GB_GAMMA28_G_LENGTH (10) ++#define PDP_GAMMA28_GB_GAMMA28_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA28_GB, GAMMA28_B ++*/ ++#define PDP_GAMMA28_GB_GAMMA28_B_MASK (0x000003FF) ++#define PDP_GAMMA28_GB_GAMMA28_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA28_GB_GAMMA28_B_SHIFT (0) ++#define PDP_GAMMA28_GB_GAMMA28_B_LENGTH (10) ++#define PDP_GAMMA28_GB_GAMMA28_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA29_R_OFFSET (0x089C) ++ ++/* PDP, GAMMA29_R, GAMMA29_R ++*/ ++#define PDP_GAMMA29_R_GAMMA29_R_MASK (0x000003FF) ++#define PDP_GAMMA29_R_GAMMA29_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA29_R_GAMMA29_R_SHIFT (0) ++#define PDP_GAMMA29_R_GAMMA29_R_LENGTH (10) ++#define PDP_GAMMA29_R_GAMMA29_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA29_GB_OFFSET (0x08A0) ++ ++/* PDP, GAMMA29_GB, GAMMA29_G ++*/ ++#define PDP_GAMMA29_GB_GAMMA29_G_MASK (0x03FF0000) ++#define PDP_GAMMA29_GB_GAMMA29_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA29_GB_GAMMA29_G_SHIFT (16) ++#define PDP_GAMMA29_GB_GAMMA29_G_LENGTH (10) ++#define PDP_GAMMA29_GB_GAMMA29_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA29_GB, GAMMA29_B ++*/ ++#define PDP_GAMMA29_GB_GAMMA29_B_MASK (0x000003FF) ++#define PDP_GAMMA29_GB_GAMMA29_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA29_GB_GAMMA29_B_SHIFT (0) ++#define PDP_GAMMA29_GB_GAMMA29_B_LENGTH (10) ++#define PDP_GAMMA29_GB_GAMMA29_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA30_R_OFFSET (0x08A4) ++ ++/* PDP, GAMMA30_R, GAMMA30_R ++*/ ++#define PDP_GAMMA30_R_GAMMA30_R_MASK (0x000003FF) ++#define PDP_GAMMA30_R_GAMMA30_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA30_R_GAMMA30_R_SHIFT (0) ++#define PDP_GAMMA30_R_GAMMA30_R_LENGTH (10) ++#define PDP_GAMMA30_R_GAMMA30_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA30_GB_OFFSET (0x08A8) ++ ++/* PDP, GAMMA30_GB, GAMMA30_G ++*/ ++#define PDP_GAMMA30_GB_GAMMA30_G_MASK (0x03FF0000) ++#define PDP_GAMMA30_GB_GAMMA30_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA30_GB_GAMMA30_G_SHIFT (16) ++#define PDP_GAMMA30_GB_GAMMA30_G_LENGTH (10) ++#define PDP_GAMMA30_GB_GAMMA30_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA30_GB, GAMMA30_B ++*/ ++#define PDP_GAMMA30_GB_GAMMA30_B_MASK (0x000003FF) ++#define PDP_GAMMA30_GB_GAMMA30_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA30_GB_GAMMA30_B_SHIFT (0) ++#define PDP_GAMMA30_GB_GAMMA30_B_LENGTH (10) ++#define PDP_GAMMA30_GB_GAMMA30_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA31_R_OFFSET (0x08AC) ++ ++/* PDP, GAMMA31_R, GAMMA31_R ++*/ ++#define PDP_GAMMA31_R_GAMMA31_R_MASK (0x000003FF) ++#define PDP_GAMMA31_R_GAMMA31_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA31_R_GAMMA31_R_SHIFT (0) ++#define PDP_GAMMA31_R_GAMMA31_R_LENGTH (10) ++#define PDP_GAMMA31_R_GAMMA31_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA31_GB_OFFSET (0x08B0) ++ ++/* PDP, GAMMA31_GB, GAMMA31_G ++*/ ++#define PDP_GAMMA31_GB_GAMMA31_G_MASK (0x03FF0000) ++#define PDP_GAMMA31_GB_GAMMA31_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA31_GB_GAMMA31_G_SHIFT (16) ++#define PDP_GAMMA31_GB_GAMMA31_G_LENGTH (10) ++#define PDP_GAMMA31_GB_GAMMA31_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA31_GB, GAMMA31_B ++*/ ++#define PDP_GAMMA31_GB_GAMMA31_B_MASK (0x000003FF) ++#define PDP_GAMMA31_GB_GAMMA31_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA31_GB_GAMMA31_B_SHIFT (0) ++#define PDP_GAMMA31_GB_GAMMA31_B_LENGTH (10) ++#define PDP_GAMMA31_GB_GAMMA31_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA32_R_OFFSET (0x08B4) ++ ++/* PDP, GAMMA32_R, GAMMA32_R ++*/ ++#define PDP_GAMMA32_R_GAMMA32_R_MASK (0x000003FF) ++#define PDP_GAMMA32_R_GAMMA32_R_LSBMASK (0x000003FF) ++#define PDP_GAMMA32_R_GAMMA32_R_SHIFT (0) ++#define PDP_GAMMA32_R_GAMMA32_R_LENGTH (10) ++#define PDP_GAMMA32_R_GAMMA32_R_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GAMMA32_GB_OFFSET (0x08B8) ++ ++/* PDP, GAMMA32_GB, GAMMA32_G ++*/ ++#define PDP_GAMMA32_GB_GAMMA32_G_MASK (0x03FF0000) ++#define PDP_GAMMA32_GB_GAMMA32_G_LSBMASK (0x000003FF) ++#define PDP_GAMMA32_GB_GAMMA32_G_SHIFT (16) ++#define PDP_GAMMA32_GB_GAMMA32_G_LENGTH (10) ++#define PDP_GAMMA32_GB_GAMMA32_G_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GAMMA32_GB, GAMMA32_B ++*/ ++#define PDP_GAMMA32_GB_GAMMA32_B_MASK (0x000003FF) ++#define PDP_GAMMA32_GB_GAMMA32_B_LSBMASK (0x000003FF) ++#define PDP_GAMMA32_GB_GAMMA32_B_SHIFT (0) ++#define PDP_GAMMA32_GB_GAMMA32_B_LENGTH (10) ++#define PDP_GAMMA32_GB_GAMMA32_B_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VEVENT_OFFSET (0x08BC) ++ ++/* PDP, VEVENT, VEVENT ++*/ ++#define PDP_VEVENT_VEVENT_MASK (0x1FFF0000) ++#define PDP_VEVENT_VEVENT_LSBMASK (0x00001FFF) ++#define PDP_VEVENT_VEVENT_SHIFT (16) ++#define PDP_VEVENT_VEVENT_LENGTH (13) ++#define PDP_VEVENT_VEVENT_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VEVENT, VFETCH ++*/ ++#define PDP_VEVENT_VFETCH_MASK (0x00001FFF) ++#define PDP_VEVENT_VFETCH_LSBMASK (0x00001FFF) ++#define PDP_VEVENT_VFETCH_SHIFT (0) ++#define PDP_VEVENT_VFETCH_LENGTH (13) ++#define PDP_VEVENT_VFETCH_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_HDECTRL_OFFSET (0x08C0) ++ ++/* PDP, HDECTRL, HDES ++*/ ++#define PDP_HDECTRL_HDES_MASK (0x1FFF0000) ++#define PDP_HDECTRL_HDES_LSBMASK (0x00001FFF) ++#define PDP_HDECTRL_HDES_SHIFT (16) ++#define PDP_HDECTRL_HDES_LENGTH (13) ++#define PDP_HDECTRL_HDES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, HDECTRL, HDEF ++*/ ++#define PDP_HDECTRL_HDEF_MASK (0x00001FFF) ++#define PDP_HDECTRL_HDEF_LSBMASK (0x00001FFF) ++#define PDP_HDECTRL_HDEF_SHIFT (0) ++#define PDP_HDECTRL_HDEF_LENGTH (13) ++#define PDP_HDECTRL_HDEF_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VDECTRL_OFFSET (0x08C4) ++ ++/* PDP, VDECTRL, VDES ++*/ ++#define PDP_VDECTRL_VDES_MASK (0x1FFF0000) ++#define PDP_VDECTRL_VDES_LSBMASK (0x00001FFF) ++#define PDP_VDECTRL_VDES_SHIFT (16) ++#define PDP_VDECTRL_VDES_LENGTH (13) ++#define PDP_VDECTRL_VDES_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VDECTRL, VDEF ++*/ ++#define PDP_VDECTRL_VDEF_MASK (0x00001FFF) ++#define PDP_VDECTRL_VDEF_LSBMASK (0x00001FFF) ++#define PDP_VDECTRL_VDEF_SHIFT (0) ++#define PDP_VDECTRL_VDEF_LENGTH (13) ++#define PDP_VDECTRL_VDEF_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_OPMASK_R_OFFSET (0x08C8) ++ ++/* PDP, OPMASK_R, MASKLEVEL ++*/ ++#define PDP_OPMASK_R_MASKLEVEL_MASK (0x80000000) ++#define PDP_OPMASK_R_MASKLEVEL_LSBMASK (0x00000001) ++#define PDP_OPMASK_R_MASKLEVEL_SHIFT (31) ++#define PDP_OPMASK_R_MASKLEVEL_LENGTH (1) ++#define PDP_OPMASK_R_MASKLEVEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_R, BLANKLEVEL ++*/ ++#define PDP_OPMASK_R_BLANKLEVEL_MASK (0x40000000) ++#define PDP_OPMASK_R_BLANKLEVEL_LSBMASK (0x00000001) ++#define PDP_OPMASK_R_BLANKLEVEL_SHIFT (30) ++#define PDP_OPMASK_R_BLANKLEVEL_LENGTH (1) ++#define PDP_OPMASK_R_BLANKLEVEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_R, MASKR ++*/ ++#define PDP_OPMASK_R_MASKR_MASK (0x000003FF) ++#define PDP_OPMASK_R_MASKR_LSBMASK (0x000003FF) ++#define PDP_OPMASK_R_MASKR_SHIFT (0) ++#define PDP_OPMASK_R_MASKR_LENGTH (10) ++#define PDP_OPMASK_R_MASKR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_OPMASK_GB_OFFSET (0x08CC) ++ ++/* PDP, OPMASK_GB, MASKG ++*/ ++#define PDP_OPMASK_GB_MASKG_MASK (0x03FF0000) ++#define PDP_OPMASK_GB_MASKG_LSBMASK (0x000003FF) ++#define PDP_OPMASK_GB_MASKG_SHIFT (16) ++#define PDP_OPMASK_GB_MASKG_LENGTH (10) ++#define PDP_OPMASK_GB_MASKG_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, OPMASK_GB, MASKB ++*/ ++#define PDP_OPMASK_GB_MASKB_MASK (0x000003FF) ++#define PDP_OPMASK_GB_MASKB_LSBMASK (0x000003FF) ++#define PDP_OPMASK_GB_MASKB_SHIFT (0) ++#define PDP_OPMASK_GB_MASKB_LENGTH (10) ++#define PDP_OPMASK_GB_MASKB_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_REGLD_ADDR_CTRL_OFFSET (0x08D0) ++ ++/* PDP, REGLD_ADDR_CTRL, REGLD_ADDRIN ++*/ ++#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_MASK (0xFFFFFFF0) ++#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LSBMASK (0x0FFFFFFF) ++#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SHIFT (4) ++#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_LENGTH (28) ++#define PDP_REGLD_ADDR_CTRL_REGLD_ADDRIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_REGLD_ADDR_STAT_OFFSET (0x08D4) ++ ++/* PDP, REGLD_ADDR_STAT, REGLD_ADDROUT ++*/ ++#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_MASK (0xFFFFFFF0) ++#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LSBMASK (0x0FFFFFFF) ++#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SHIFT (4) ++#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_LENGTH (28) ++#define PDP_REGLD_ADDR_STAT_REGLD_ADDROUT_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_REGLD_STAT_OFFSET (0x08D8) ++ ++/* PDP, REGLD_STAT, REGLD_ADDREN ++*/ ++#define PDP_REGLD_STAT_REGLD_ADDREN_MASK (0x00800000) ++#define PDP_REGLD_STAT_REGLD_ADDREN_LSBMASK (0x00000001) ++#define PDP_REGLD_STAT_REGLD_ADDREN_SHIFT (23) ++#define PDP_REGLD_STAT_REGLD_ADDREN_LENGTH (1) ++#define PDP_REGLD_STAT_REGLD_ADDREN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_REGLD_CTRL_OFFSET (0x08DC) ++ ++/* PDP, REGLD_CTRL, REGLD_ADDRLEN ++*/ ++#define PDP_REGLD_CTRL_REGLD_ADDRLEN_MASK (0xFF000000) ++#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LSBMASK (0x000000FF) ++#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SHIFT (24) ++#define PDP_REGLD_CTRL_REGLD_ADDRLEN_LENGTH (8) ++#define PDP_REGLD_CTRL_REGLD_ADDRLEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, REGLD_CTRL, REGLD_VAL ++*/ ++#define PDP_REGLD_CTRL_REGLD_VAL_MASK (0x00800000) ++#define PDP_REGLD_CTRL_REGLD_VAL_LSBMASK (0x00000001) ++#define PDP_REGLD_CTRL_REGLD_VAL_SHIFT (23) ++#define PDP_REGLD_CTRL_REGLD_VAL_LENGTH (1) ++#define PDP_REGLD_CTRL_REGLD_VAL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_UPDCTRL_OFFSET (0x08E0) ++ ++/* PDP, UPDCTRL, UPDFIELD ++*/ ++#define PDP_UPDCTRL_UPDFIELD_MASK (0x00000001) ++#define PDP_UPDCTRL_UPDFIELD_LSBMASK (0x00000001) ++#define PDP_UPDCTRL_UPDFIELD_SHIFT (0) ++#define PDP_UPDCTRL_UPDFIELD_LENGTH (1) ++#define PDP_UPDCTRL_UPDFIELD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_INTCTRL_OFFSET (0x08E4) ++ ++/* PDP, PVR_PDP_INTCTRL, HBLNK_LINE ++*/ ++#define PDP_INTCTRL_HBLNK_LINE_MASK (0x00010000) ++#define PDP_INTCTRL_HBLNK_LINE_LSBMASK (0x00000001) ++#define PDP_INTCTRL_HBLNK_LINE_SHIFT (16) ++#define PDP_INTCTRL_HBLNK_LINE_LENGTH (1) ++#define PDP_INTCTRL_HBLNK_LINE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PVR_PDP_INTCTRL, HBLNK_LINENO ++*/ ++#define PDP_INTCTRL_HBLNK_LINENO_MASK (0x00001FFF) ++#define PDP_INTCTRL_HBLNK_LINENO_LSBMASK (0x00001FFF) ++#define PDP_INTCTRL_HBLNK_LINENO_SHIFT (0) ++#define PDP_INTCTRL_HBLNK_LINENO_LENGTH (13) ++#define PDP_INTCTRL_HBLNK_LINENO_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PDISETUP_OFFSET (0x0900) ++ ++/* PDP, PDISETUP, PDI_BLNKLVL ++*/ ++#define PDP_PDISETUP_PDI_BLNKLVL_MASK (0x00000040) ++#define PDP_PDISETUP_PDI_BLNKLVL_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_BLNKLVL_SHIFT (6) ++#define PDP_PDISETUP_PDI_BLNKLVL_LENGTH (1) ++#define PDP_PDISETUP_PDI_BLNKLVL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_BLNK ++*/ ++#define PDP_PDISETUP_PDI_BLNK_MASK (0x00000020) ++#define PDP_PDISETUP_PDI_BLNK_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_BLNK_SHIFT (5) ++#define PDP_PDISETUP_PDI_BLNK_LENGTH (1) ++#define PDP_PDISETUP_PDI_BLNK_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_PWR ++*/ ++#define PDP_PDISETUP_PDI_PWR_MASK (0x00000010) ++#define PDP_PDISETUP_PDI_PWR_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_PWR_SHIFT (4) ++#define PDP_PDISETUP_PDI_PWR_LENGTH (1) ++#define PDP_PDISETUP_PDI_PWR_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_EN ++*/ ++#define PDP_PDISETUP_PDI_EN_MASK (0x00000008) ++#define PDP_PDISETUP_PDI_EN_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_EN_SHIFT (3) ++#define PDP_PDISETUP_PDI_EN_LENGTH (1) ++#define PDP_PDISETUP_PDI_EN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_GDEN ++*/ ++#define PDP_PDISETUP_PDI_GDEN_MASK (0x00000004) ++#define PDP_PDISETUP_PDI_GDEN_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_GDEN_SHIFT (2) ++#define PDP_PDISETUP_PDI_GDEN_LENGTH (1) ++#define PDP_PDISETUP_PDI_GDEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_NFEN ++*/ ++#define PDP_PDISETUP_PDI_NFEN_MASK (0x00000002) ++#define PDP_PDISETUP_PDI_NFEN_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_NFEN_SHIFT (1) ++#define PDP_PDISETUP_PDI_NFEN_LENGTH (1) ++#define PDP_PDISETUP_PDI_NFEN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDISETUP, PDI_CR ++*/ ++#define PDP_PDISETUP_PDI_CR_MASK (0x00000001) ++#define PDP_PDISETUP_PDI_CR_LSBMASK (0x00000001) ++#define PDP_PDISETUP_PDI_CR_SHIFT (0) ++#define PDP_PDISETUP_PDI_CR_LENGTH (1) ++#define PDP_PDISETUP_PDI_CR_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PDITIMING0_OFFSET (0x0904) ++ ++/* PDP, PDITIMING0, PDI_PWRSVGD ++*/ ++#define PDP_PDITIMING0_PDI_PWRSVGD_MASK (0x0F000000) ++#define PDP_PDITIMING0_PDI_PWRSVGD_LSBMASK (0x0000000F) ++#define PDP_PDITIMING0_PDI_PWRSVGD_SHIFT (24) ++#define PDP_PDITIMING0_PDI_PWRSVGD_LENGTH (4) ++#define PDP_PDITIMING0_PDI_PWRSVGD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING0, PDI_LSDEL ++*/ ++#define PDP_PDITIMING0_PDI_LSDEL_MASK (0x007F0000) ++#define PDP_PDITIMING0_PDI_LSDEL_LSBMASK (0x0000007F) ++#define PDP_PDITIMING0_PDI_LSDEL_SHIFT (16) ++#define PDP_PDITIMING0_PDI_LSDEL_LENGTH (7) ++#define PDP_PDITIMING0_PDI_LSDEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING0, PDI_PWRSV2GD2 ++*/ ++#define PDP_PDITIMING0_PDI_PWRSV2GD2_MASK (0x000003FF) ++#define PDP_PDITIMING0_PDI_PWRSV2GD2_LSBMASK (0x000003FF) ++#define PDP_PDITIMING0_PDI_PWRSV2GD2_SHIFT (0) ++#define PDP_PDITIMING0_PDI_PWRSV2GD2_LENGTH (10) ++#define PDP_PDITIMING0_PDI_PWRSV2GD2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PDITIMING1_OFFSET (0x0908) ++ ++/* PDP, PDITIMING1, PDI_NLDEL ++*/ ++#define PDP_PDITIMING1_PDI_NLDEL_MASK (0x000F0000) ++#define PDP_PDITIMING1_PDI_NLDEL_LSBMASK (0x0000000F) ++#define PDP_PDITIMING1_PDI_NLDEL_SHIFT (16) ++#define PDP_PDITIMING1_PDI_NLDEL_LENGTH (4) ++#define PDP_PDITIMING1_PDI_NLDEL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDITIMING1, PDI_ACBDEL ++*/ ++#define PDP_PDITIMING1_PDI_ACBDEL_MASK (0x000003FF) ++#define PDP_PDITIMING1_PDI_ACBDEL_LSBMASK (0x000003FF) ++#define PDP_PDITIMING1_PDI_ACBDEL_SHIFT (0) ++#define PDP_PDITIMING1_PDI_ACBDEL_LENGTH (10) ++#define PDP_PDITIMING1_PDI_ACBDEL_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PDICOREID_OFFSET (0x090C) ++ ++/* PDP, PDICOREID, PDI_GROUP_ID ++*/ ++#define PDP_PDICOREID_PDI_GROUP_ID_MASK (0xFF000000) ++#define PDP_PDICOREID_PDI_GROUP_ID_LSBMASK (0x000000FF) ++#define PDP_PDICOREID_PDI_GROUP_ID_SHIFT (24) ++#define PDP_PDICOREID_PDI_GROUP_ID_LENGTH (8) ++#define PDP_PDICOREID_PDI_GROUP_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREID, PDI_CORE_ID ++*/ ++#define PDP_PDICOREID_PDI_CORE_ID_MASK (0x00FF0000) ++#define PDP_PDICOREID_PDI_CORE_ID_LSBMASK (0x000000FF) ++#define PDP_PDICOREID_PDI_CORE_ID_SHIFT (16) ++#define PDP_PDICOREID_PDI_CORE_ID_LENGTH (8) ++#define PDP_PDICOREID_PDI_CORE_ID_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREID, PDI_CONFIG_ID ++*/ ++#define PDP_PDICOREID_PDI_CONFIG_ID_MASK (0x0000FFFF) ++#define PDP_PDICOREID_PDI_CONFIG_ID_LSBMASK (0x0000FFFF) ++#define PDP_PDICOREID_PDI_CONFIG_ID_SHIFT (0) ++#define PDP_PDICOREID_PDI_CONFIG_ID_LENGTH (16) ++#define PDP_PDICOREID_PDI_CONFIG_ID_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_PDICOREREV_OFFSET (0x0910) ++ ++/* PDP, PDICOREREV, PDI_MAJOR_REV ++*/ ++#define PDP_PDICOREREV_PDI_MAJOR_REV_MASK (0x00FF0000) ++#define PDP_PDICOREREV_PDI_MAJOR_REV_LSBMASK (0x000000FF) ++#define PDP_PDICOREREV_PDI_MAJOR_REV_SHIFT (16) ++#define PDP_PDICOREREV_PDI_MAJOR_REV_LENGTH (8) ++#define PDP_PDICOREREV_PDI_MAJOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREREV, PDI_MINOR_REV ++*/ ++#define PDP_PDICOREREV_PDI_MINOR_REV_MASK (0x0000FF00) ++#define PDP_PDICOREREV_PDI_MINOR_REV_LSBMASK (0x000000FF) ++#define PDP_PDICOREREV_PDI_MINOR_REV_SHIFT (8) ++#define PDP_PDICOREREV_PDI_MINOR_REV_LENGTH (8) ++#define PDP_PDICOREREV_PDI_MINOR_REV_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, PDICOREREV, PDI_MAINT_REV ++*/ ++#define PDP_PDICOREREV_PDI_MAINT_REV_MASK (0x000000FF) ++#define PDP_PDICOREREV_PDI_MAINT_REV_LSBMASK (0x000000FF) ++#define PDP_PDICOREREV_PDI_MAINT_REV_SHIFT (0) ++#define PDP_PDICOREREV_PDI_MAINT_REV_LENGTH (8) ++#define PDP_PDICOREREV_PDI_MAINT_REV_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX2_OFFSET (0x0920) ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y1 ++*/ ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_MASK (0x000000C0) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LSBMASK (0x00000003) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SHIFT (6) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_LENGTH (2) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y1 ++*/ ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_MASK (0x00000030) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LSBMASK (0x00000003) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SHIFT (4) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_LENGTH (2) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X1Y0 ++*/ ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_MASK (0x0000000C) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LSBMASK (0x00000003) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SHIFT (2) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_LENGTH (2) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX2, DITHERMATRIX2X0Y0 ++*/ ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_MASK (0x00000003) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LSBMASK (0x00000003) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SHIFT (0) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_LENGTH (2) ++#define PDP_DITHERMATRIX2_DITHERMATRIX2X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX4_0_OFFSET (0x0924) ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y1 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_MASK (0xF0000000) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SHIFT (28) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y1 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_MASK (0x0F000000) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SHIFT (24) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y1 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_MASK (0x00F00000) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SHIFT (20) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y1 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_MASK (0x000F0000) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SHIFT (16) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X3Y0 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_MASK (0x0000F000) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SHIFT (12) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X3Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X2Y0 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_MASK (0x00000F00) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SHIFT (8) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X2Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X1Y0 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_MASK (0x000000F0) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SHIFT (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_0, DITHERMATRIX4X0Y0 ++*/ ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_MASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SHIFT (0) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_LENGTH (4) ++#define PDP_DITHERMATRIX4_0_DITHERMATRIX4X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX4_1_OFFSET (0x0928) ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y3 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_MASK (0xF0000000) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SHIFT (28) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y3 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_MASK (0x0F000000) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SHIFT (24) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y3 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_MASK (0x00F00000) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SHIFT (20) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y3 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_MASK (0x000F0000) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SHIFT (16) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X3Y2 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_MASK (0x0000F000) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SHIFT (12) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X3Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X2Y2 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_MASK (0x00000F00) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SHIFT (8) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X2Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X1Y2 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_MASK (0x000000F0) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SHIFT (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X1Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX4_1, DITHERMATRIX4X0Y2 ++*/ ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_MASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LSBMASK (0x0000000F) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SHIFT (0) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_LENGTH (4) ++#define PDP_DITHERMATRIX4_1_DITHERMATRIX4X0Y2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_0_OFFSET (0x092C) ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X4Y0 ++*/ ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SHIFT (24) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X4Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X3Y0 ++*/ ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SHIFT (18) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X3Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X2Y0 ++*/ ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SHIFT (12) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X2Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X1Y0 ++*/ ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SHIFT (6) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X1Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_0, DITHERMATRIX8X0Y0 ++*/ ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SHIFT (0) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_0_DITHERMATRIX8X0Y0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_1_OFFSET (0x0930) ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X1Y1 ++*/ ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SHIFT (24) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X1Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X0Y1 ++*/ ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SHIFT (18) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X0Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X7Y0 ++*/ ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SHIFT (12) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X7Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X6Y0 ++*/ ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SHIFT (6) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X6Y0_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_1, DITHERMATRIX8X5Y0 ++*/ ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SHIFT (0) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_LENGTH (6) ++#define PDP_DITHERMATRIX8_1_DITHERMATRIX8X5Y0_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_2_OFFSET (0x0934) ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X6Y1 ++*/ ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SHIFT (24) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X6Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X5Y1 ++*/ ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SHIFT (18) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X5Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X4Y1 ++*/ ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SHIFT (12) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X4Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X3Y1 ++*/ ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SHIFT (6) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X3Y1_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_2, DITHERMATRIX8X2Y1 ++*/ ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SHIFT (0) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_2_DITHERMATRIX8X2Y1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_3_OFFSET (0x0938) ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X3Y2 ++*/ ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SHIFT (24) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X3Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X2Y2 ++*/ ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SHIFT (18) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X2Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X1Y2 ++*/ ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SHIFT (12) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X1Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X0Y2 ++*/ ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SHIFT (6) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X0Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_3, DITHERMATRIX8X7Y1 ++*/ ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SHIFT (0) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_LENGTH (6) ++#define PDP_DITHERMATRIX8_3_DITHERMATRIX8X7Y1_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_4_OFFSET (0x093C) ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X0Y3 ++*/ ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SHIFT (24) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X0Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X7Y2 ++*/ ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SHIFT (18) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X7Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X6Y2 ++*/ ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SHIFT (12) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X6Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X5Y2 ++*/ ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SHIFT (6) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X5Y2_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_4, DITHERMATRIX8X4Y2 ++*/ ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SHIFT (0) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_LENGTH (6) ++#define PDP_DITHERMATRIX8_4_DITHERMATRIX8X4Y2_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_5_OFFSET (0x0940) ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X5Y3 ++*/ ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SHIFT (24) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X5Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X4Y3 ++*/ ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SHIFT (18) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X4Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X3Y3 ++*/ ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SHIFT (12) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X3Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X2Y3 ++*/ ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SHIFT (6) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X2Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_5, DITHERMATRIX8X1Y3 ++*/ ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SHIFT (0) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_5_DITHERMATRIX8X1Y3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_6_OFFSET (0x0944) ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X2Y4 ++*/ ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SHIFT (24) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X2Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X1Y4 ++*/ ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SHIFT (18) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X1Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X0Y4 ++*/ ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SHIFT (12) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X0Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X7Y3 ++*/ ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SHIFT (6) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X7Y3_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_6, DITHERMATRIX8X6Y3 ++*/ ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SHIFT (0) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_LENGTH (6) ++#define PDP_DITHERMATRIX8_6_DITHERMATRIX8X6Y3_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_7_OFFSET (0x0948) ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X7Y4 ++*/ ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SHIFT (24) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X7Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X6Y4 ++*/ ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SHIFT (18) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X6Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X5Y4 ++*/ ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SHIFT (12) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X5Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X4Y4 ++*/ ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SHIFT (6) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X4Y4_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_7, DITHERMATRIX8X3Y4 ++*/ ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SHIFT (0) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_LENGTH (6) ++#define PDP_DITHERMATRIX8_7_DITHERMATRIX8X3Y4_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_8_OFFSET (0x094C) ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X4Y5 ++*/ ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SHIFT (24) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X4Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X3Y5 ++*/ ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SHIFT (18) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X3Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X2Y5 ++*/ ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SHIFT (12) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X2Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X1Y5 ++*/ ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SHIFT (6) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X1Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_8, DITHERMATRIX8X0Y5 ++*/ ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SHIFT (0) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_8_DITHERMATRIX8X0Y5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_9_OFFSET (0x0950) ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X1Y6 ++*/ ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SHIFT (24) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X1Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X0Y6 ++*/ ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SHIFT (18) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X0Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X7Y5 ++*/ ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SHIFT (12) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X7Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X6Y5 ++*/ ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SHIFT (6) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X6Y5_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_9, DITHERMATRIX8X5Y5 ++*/ ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SHIFT (0) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_LENGTH (6) ++#define PDP_DITHERMATRIX8_9_DITHERMATRIX8X5Y5_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_10_OFFSET (0x0954) ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X6Y6 ++*/ ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SHIFT (24) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X6Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X5Y6 ++*/ ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SHIFT (18) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X5Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X4Y6 ++*/ ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SHIFT (12) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X4Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X3Y6 ++*/ ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SHIFT (6) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X3Y6_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_10, DITHERMATRIX8X2Y6 ++*/ ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SHIFT (0) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_10_DITHERMATRIX8X2Y6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_11_OFFSET (0x0958) ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X3Y7 ++*/ ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_MASK (0x3F000000) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SHIFT (24) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X3Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X2Y7 ++*/ ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SHIFT (18) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X2Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X1Y7 ++*/ ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SHIFT (12) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X1Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X0Y7 ++*/ ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SHIFT (6) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X0Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_11, DITHERMATRIX8X7Y6 ++*/ ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SHIFT (0) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_LENGTH (6) ++#define PDP_DITHERMATRIX8_11_DITHERMATRIX8X7Y6_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_DITHERMATRIX8_12_OFFSET (0x095C) ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X7Y7 ++*/ ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_MASK (0x00FC0000) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SHIFT (18) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X7Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X6Y7 ++*/ ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_MASK (0x0003F000) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SHIFT (12) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X6Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X5Y7 ++*/ ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_MASK (0x00000FC0) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SHIFT (6) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X5Y7_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, DITHERMATRIX8_12, DITHERMATRIX8X4Y7 ++*/ ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_MASK (0x0000003F) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LSBMASK (0x0000003F) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SHIFT (0) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_LENGTH (6) ++#define PDP_DITHERMATRIX8_12_DITHERMATRIX8X4Y7_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1_MEMCTRL_OFFSET (0x0960) ++ ++/* PDP, GRPH1_MEMCTRL, GRPH1_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_GRPH1_MEMCTRL_GRPH1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEMCTRL, GRPH1_BURSTLEN ++*/ ++#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_MASK (0x000000FF) ++#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SHIFT (0) ++#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_LENGTH (8) ++#define PDP_GRPH1_MEMCTRL_GRPH1_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1_MEM_THRESH_OFFSET (0x0964) ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_UVTHRESHOLD ++*/ ++#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SHIFT (24) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_LENGTH (8) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_YTHRESHOLD ++*/ ++#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SHIFT (12) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_LENGTH (9) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_MEM_THRESH, GRPH1_THRESHOLD ++*/ ++#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_MASK (0x000001FF) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SHIFT (0) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_LENGTH (9) ++#define PDP_GRPH1_MEM_THRESH_GRPH1_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2_MEMCTRL_OFFSET (0x0968) ++ ++/* PDP, GRPH2_MEMCTRL, GRPH2_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_GRPH2_MEMCTRL_GRPH2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEMCTRL, GRPH2_BURSTLEN ++*/ ++#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_MASK (0x000000FF) ++#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SHIFT (0) ++#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_LENGTH (8) ++#define PDP_GRPH2_MEMCTRL_GRPH2_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2_MEM_THRESH_OFFSET (0x096C) ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_UVTHRESHOLD ++*/ ++#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SHIFT (24) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_LENGTH (8) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_YTHRESHOLD ++*/ ++#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SHIFT (12) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_LENGTH (9) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_MEM_THRESH, GRPH2_THRESHOLD ++*/ ++#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_MASK (0x000001FF) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SHIFT (0) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_LENGTH (9) ++#define PDP_GRPH2_MEM_THRESH_GRPH2_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3_MEMCTRL_OFFSET (0x0970) ++ ++/* PDP, GRPH3_MEMCTRL, GRPH3_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_GRPH3_MEMCTRL_GRPH3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEMCTRL, GRPH3_BURSTLEN ++*/ ++#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_MASK (0x000000FF) ++#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SHIFT (0) ++#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_LENGTH (8) ++#define PDP_GRPH3_MEMCTRL_GRPH3_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3_MEM_THRESH_OFFSET (0x0974) ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_UVTHRESHOLD ++*/ ++#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SHIFT (24) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_LENGTH (8) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_YTHRESHOLD ++*/ ++#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SHIFT (12) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_LENGTH (9) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_MEM_THRESH, GRPH3_THRESHOLD ++*/ ++#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_MASK (0x000001FF) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SHIFT (0) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_LENGTH (9) ++#define PDP_GRPH3_MEM_THRESH_GRPH3_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4_MEMCTRL_OFFSET (0x0978) ++ ++/* PDP, GRPH4_MEMCTRL, GRPH4_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_GRPH4_MEMCTRL_GRPH4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEMCTRL, GRPH4_BURSTLEN ++*/ ++#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_MASK (0x000000FF) ++#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SHIFT (0) ++#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_LENGTH (8) ++#define PDP_GRPH4_MEMCTRL_GRPH4_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4_MEM_THRESH_OFFSET (0x097C) ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_UVTHRESHOLD ++*/ ++#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SHIFT (24) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_LENGTH (8) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_YTHRESHOLD ++*/ ++#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SHIFT (12) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_LENGTH (9) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_MEM_THRESH, GRPH4_THRESHOLD ++*/ ++#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_MASK (0x000001FF) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SHIFT (0) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_LENGTH (9) ++#define PDP_GRPH4_MEM_THRESH_GRPH4_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1_MEMCTRL_OFFSET (0x0980) ++ ++/* PDP, VID1_MEMCTRL, VID1_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_VID1_MEMCTRL_VID1_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEMCTRL, VID1_BURSTLEN ++*/ ++#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_MASK (0x000000FF) ++#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SHIFT (0) ++#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_LENGTH (8) ++#define PDP_VID1_MEMCTRL_VID1_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1_MEM_THRESH_OFFSET (0x0984) ++ ++/* PDP, VID1_MEM_THRESH, VID1_UVTHRESHOLD ++*/ ++#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SHIFT (24) ++#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_LENGTH (8) ++#define PDP_VID1_MEM_THRESH_VID1_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEM_THRESH, VID1_YTHRESHOLD ++*/ ++#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SHIFT (12) ++#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_LENGTH (9) ++#define PDP_VID1_MEM_THRESH_VID1_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_MEM_THRESH, VID1_THRESHOLD ++*/ ++#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_MASK (0x000001FF) ++#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SHIFT (0) ++#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_LENGTH (9) ++#define PDP_VID1_MEM_THRESH_VID1_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2_MEMCTRL_OFFSET (0x0988) ++ ++/* PDP, VID2_MEMCTRL, VID2_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_VID2_MEMCTRL_VID2_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEMCTRL, VID2_BURSTLEN ++*/ ++#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_MASK (0x000000FF) ++#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SHIFT (0) ++#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_LENGTH (8) ++#define PDP_VID2_MEMCTRL_VID2_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2_MEM_THRESH_OFFSET (0x098C) ++ ++/* PDP, VID2_MEM_THRESH, VID2_UVTHRESHOLD ++*/ ++#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SHIFT (24) ++#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_LENGTH (8) ++#define PDP_VID2_MEM_THRESH_VID2_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEM_THRESH, VID2_YTHRESHOLD ++*/ ++#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SHIFT (12) ++#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_LENGTH (9) ++#define PDP_VID2_MEM_THRESH_VID2_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_MEM_THRESH, VID2_THRESHOLD ++*/ ++#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_MASK (0x000001FF) ++#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SHIFT (0) ++#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_LENGTH (9) ++#define PDP_VID2_MEM_THRESH_VID2_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3_MEMCTRL_OFFSET (0x0990) ++ ++/* PDP, VID3_MEMCTRL, VID3_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_VID3_MEMCTRL_VID3_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEMCTRL, VID3_BURSTLEN ++*/ ++#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_MASK (0x000000FF) ++#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SHIFT (0) ++#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_LENGTH (8) ++#define PDP_VID3_MEMCTRL_VID3_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3_MEM_THRESH_OFFSET (0x0994) ++ ++/* PDP, VID3_MEM_THRESH, VID3_UVTHRESHOLD ++*/ ++#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SHIFT (24) ++#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_LENGTH (8) ++#define PDP_VID3_MEM_THRESH_VID3_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEM_THRESH, VID3_YTHRESHOLD ++*/ ++#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SHIFT (12) ++#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_LENGTH (9) ++#define PDP_VID3_MEM_THRESH_VID3_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_MEM_THRESH, VID3_THRESHOLD ++*/ ++#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_MASK (0x000001FF) ++#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SHIFT (0) ++#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_LENGTH (9) ++#define PDP_VID3_MEM_THRESH_VID3_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4_MEMCTRL_OFFSET (0x0998) ++ ++/* PDP, VID4_MEMCTRL, VID4_LOCAL_GLOBAL_MEMCTRL ++*/ ++#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_MASK (0x80000000) ++#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LSBMASK (0x00000001) ++#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SHIFT (31) ++#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_LENGTH (1) ++#define PDP_VID4_MEMCTRL_VID4_LOCAL_GLOBAL_MEMCTRL_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEMCTRL, VID4_BURSTLEN ++*/ ++#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_MASK (0x000000FF) ++#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LSBMASK (0x000000FF) ++#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SHIFT (0) ++#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_LENGTH (8) ++#define PDP_VID4_MEMCTRL_VID4_BURSTLEN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4_MEM_THRESH_OFFSET (0x099C) ++ ++/* PDP, VID4_MEM_THRESH, VID4_UVTHRESHOLD ++*/ ++#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_MASK (0xFF000000) ++#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LSBMASK (0x000000FF) ++#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SHIFT (24) ++#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_LENGTH (8) ++#define PDP_VID4_MEM_THRESH_VID4_UVTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEM_THRESH, VID4_YTHRESHOLD ++*/ ++#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_MASK (0x001FF000) ++#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SHIFT (12) ++#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_LENGTH (9) ++#define PDP_VID4_MEM_THRESH_VID4_YTHRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_MEM_THRESH, VID4_THRESHOLD ++*/ ++#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_MASK (0x000001FF) ++#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LSBMASK (0x000001FF) ++#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SHIFT (0) ++#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_LENGTH (9) ++#define PDP_VID4_MEM_THRESH_VID4_THRESHOLD_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH1_PANIC_THRESH_OFFSET (0x09A0) ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_ENABLE ++*/ ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_ENABLE ++*/ ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH1_PANIC_THRESH, GRPH1_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_GRPH1_PANIC_THRESH_GRPH1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH2_PANIC_THRESH_OFFSET (0x09A4) ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_ENABLE ++*/ ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_ENABLE ++*/ ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH2_PANIC_THRESH, GRPH2_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_GRPH2_PANIC_THRESH_GRPH2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH3_PANIC_THRESH_OFFSET (0x09A8) ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_ENABLE ++*/ ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_ENABLE ++*/ ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH3_PANIC_THRESH, GRPH3_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_GRPH3_PANIC_THRESH_GRPH3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_GRPH4_PANIC_THRESH_OFFSET (0x09AC) ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_ENABLE ++*/ ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_ENABLE ++*/ ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, GRPH4_PANIC_THRESH, GRPH4_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_GRPH4_PANIC_THRESH_GRPH4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID1_PANIC_THRESH_OFFSET (0x09B0) ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_ENABLE ++*/ ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_ENABLE ++*/ ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID1_PANIC_THRESH, VID1_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_VID1_PANIC_THRESH_VID1_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID2_PANIC_THRESH_OFFSET (0x09B4) ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_ENABLE ++*/ ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_ENABLE ++*/ ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID2_PANIC_THRESH, VID2_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_VID2_PANIC_THRESH_VID2_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID3_PANIC_THRESH_OFFSET (0x09B8) ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_ENABLE ++*/ ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_ENABLE ++*/ ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID3_PANIC_THRESH, VID3_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_VID3_PANIC_THRESH_VID3_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_VID4_PANIC_THRESH_OFFSET (0x09BC) ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_ENABLE ++*/ ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_MASK (0x80000000) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SHIFT (31) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_LENGTH (1) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_ENABLE ++*/ ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_MASK (0x40000000) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LSBMASK (0x00000001) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SHIFT (30) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_LENGTH (1) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_ENABLE_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MAX ++*/ ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_MASK (0x3F800000) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LSBMASK (0x0000007F) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SHIFT (23) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_LENGTH (7) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_UV_THRESHOLD_MIN ++*/ ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_MASK (0x007F0000) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LSBMASK (0x0000007F) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SHIFT (16) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_LENGTH (7) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_UV_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MAX ++*/ ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_MASK (0x0000FF00) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LSBMASK (0x000000FF) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SHIFT (8) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_LENGTH (8) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MAX_SIGNED_FIELD IMG_FALSE ++ ++/* PDP, VID4_PANIC_THRESH, VID4_ALERT_Y_THRESHOLD_MIN ++*/ ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_MASK (0x000000FF) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LSBMASK (0x000000FF) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SHIFT (0) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_LENGTH (8) ++#define PDP_VID4_PANIC_THRESH_VID4_ALERT_Y_THRESHOLD_MIN_SIGNED_FIELD IMG_FALSE ++ ++#define PDP_BURST_BOUNDARY_OFFSET (0x09C0) ++ ++/* PDP, BURST_BOUNDARY, BURST_BOUNDARY ++*/ ++#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_MASK (0x0000003F) ++#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LSBMASK (0x0000003F) ++#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SHIFT (0) ++#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_LENGTH (6) ++#define PDP_BURST_BOUNDARY_BURST_BOUNDARY_SIGNED_FIELD IMG_FALSE ++ ++ ++/* ------------------------ End of register definitions ------------------------ */ ++ ++/* ++// NUMREG defines the extent of register address space. ++*/ ++ ++#define PDP_NUMREG ((0x09C0 >> 2)+1) ++ ++/* Info about video plane addresses */ ++#define PDP_YADDR_BITS (PDP_VID1BASEADDR_VID1BASEADDR_LENGTH) ++#define PDP_YADDR_ALIGN 5 ++#define PDP_UADDR_BITS (PDP_VID1UBASEADDR_VID1UBASEADDR_LENGTH) ++#define PDP_UADDR_ALIGN 5 ++#define PDP_VADDR_BITS (PDP_VID1VBASEADDR_VID1VBASEADDR_LENGTH) ++#define PDP_VADDR_ALIGN 5 ++ ++#define PDP_YSTRIDE_BITS (PDP_VID1STRIDE_VID1STRIDE_LENGTH) ++#define PDP_YSTRIDE_ALIGN 5 ++ ++#define PDP_MAX_INPUT_WIDTH (PDP_VID1SIZE_VID1WIDTH_LSBMASK + 1) ++#define PDP_MAX_INPUT_HEIGHT (PDP_VID1SIZE_VID1HEIGHT_LSBMASK + 1) ++ ++/* Maximum 6 bytes per pixel for RGB161616 */ ++#define PDP_MAX_IMAGE_BYTES (PDP_MAX_INPUT_WIDTH * PDP_MAX_INPUT_HEIGHT * 6) ++ ++/* Round up */ ++#define PDP_MAX_IMAGE_PAGES ((PDP_MAX_IMAGE_BYTES+PAGE_SIZE-1)/PAGE_SIZE) ++ ++#define PDP_YADDR_MAX (((1 << PDP_YADDR_BITS) - 1) << PDP_YADDR_ALIGN) ++#define PDP_UADDR_MAX (((1 << PDP_UADDR_BITS) - 1) << PDP_UADDR_ALIGN) ++#define PDP_VADDR_MAX (((1 << PDP_VADDR_BITS) - 1) << PDP_VADDR_ALIGN) ++#define PDP_YSTRIDE_MAX ((1 << PDP_YSTRIDE_BITS) << PDP_YSTRIDE_ALIGN) ++#define PDP_YADDR_ALIGNMASK ((1 << PDP_YADDR_ALIGN) - 1) ++#define PDP_UADDR_ALIGNMASK ((1 << PDP_UADDR_ALIGN) - 1) ++#define PDP_VADDR_ALIGNMASK ((1 << PDP_VADDR_ALIGN) - 1) ++#define PDP_YSTRIDE_ALIGNMASK ((1 << PDP_YSTRIDE_ALIGN) - 1) ++ ++/* Field Values */ ++#define PDP_SURF_PIXFMT_RGB332 0x3 ++#define PDP_SURF_PIXFMT_ARGB4444 0x4 ++#define PDP_SURF_PIXFMT_ARGB1555 0x5 ++#define PDP_SURF_PIXFMT_RGB888 0x6 ++#define PDP_SURF_PIXFMT_RGB565 0x7 ++#define PDP_SURF_PIXFMT_ARGB8888 0x8 ++#define PDP_SURF_PIXFMT_420_PL8 0x9 ++#define PDP_SURF_PIXFMT_420_PL8IVU 0xA ++#define PDP_SURF_PIXFMT_420_PL8IUV 0xB ++#define PDP_SURF_PIXFMT_422_UY0VY1_8888 0xC ++#define PDP_SURF_PIXFMT_422_VY0UY1_8888 0xD ++#define PDP_SURF_PIXFMT_422_Y0UY1V_8888 0xE ++#define PDP_SURF_PIXFMT_422_Y0VY1U_8888 0xF ++#define PDP_SURF_PIXFMT_AYUV8888 0x10 ++#define PDP_SURF_PIXFMT_YUV101010 0x15 ++#define PDP_SURF_PIXFMT_RGB101010 0x17 ++#define PDP_SURF_PIXFMT_420_PL10IUV 0x18 ++#define PDP_SURF_PIXFMT_420_PL10IVU 0x19 ++#define PDP_SURF_PIXFMT_422_PL10IUV 0x1A ++#define PDP_SURF_PIXFMT_422_PL10IVU 0x1B ++#define PDP_SURF_PIXFMT_RGB121212 0x1E ++#define PDP_SURF_PIXFMT_RGB161616 0x1F ++ ++#define PDP_CTRL_CKEYSRC_PREV 0x0 ++#define PDP_CTRL_CKEYSRC_CUR 0x1 ++ ++#define PDP_MEMCTRL_MEMREFRESH_ALWAYS 0x0 ++#define PDP_MEMCTRL_MEMREFRESH_HBLNK 0x1 ++#define PDP_MEMCTRL_MEMREFRESH_VBLNK 0x2 ++#define PDP_MEMCTRL_MEMREFRESH_BOTH 0x3 ++ ++#define PDP_3D_CTRL_BLENDSEL_BGND_WITH_POS0 0x0 ++#define PDP_3D_CTRL_BLENDSEL_POS0_WITH_POS1 0x1 ++#define PDP_3D_CTRL_BLENDSEL_POS1_WITH_POS2 0x2 ++#define PDP_3D_CTRL_BLENDSEL_POS2_WITH_POS3 0x3 ++#define PDP_3D_CTRL_BLENDSEL_POS3_WITH_POS4 0x4 ++#define PDP_3D_CTRL_BLENDSEL_POS4_WITH_POS5 0x5 ++#define PDP_3D_CTRL_BLENDSEL_POS5_WITH_POS6 0x6 ++#define PDP_3D_CTRL_BLENDSEL_POS6_WITH_POS7 0x7 ++ ++#define PDP_UADDR_UV_STRIDE_EQUAL_TO_Y_STRIDE 0x0 ++#define PDP_UADDR_UV_STRIDE_EQUAL_TO_DOUBLE_Y_STRIDE 0x1 ++#define PDP_UADDR_UV_STRIDE_EQUAL_TO_HALF_Y_STRIDE 0x2 ++ ++#define PDP_PROCAMP_OUTPUT_OFFSET_FRACTIONAL_BITS 1 ++#define PDP_PROCAMP_COEFFICIENT_FRACTIONAL_BITS 10 ++ ++/*-------------------------------------------------------------------------------*/ ++ ++#endif /* _PDP2_REGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pdp_drm.h b/drivers/gpu/drm/img-rogue/pdp_drm.h +new file mode 100644 +index 000000000000..f5d747d3ad10 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdp_drm.h +@@ -0,0 +1,105 @@ ++/* ++ * @File ++ * @Title PDP DRM definitions shared between kernel and user space. ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PDP_DRM_H__) ++#define __PDP_DRM_H__ ++ ++#if defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++ ++struct drm_pdp_gem_create { ++ __u64 size; /* in */ ++ __u32 flags; /* in */ ++ __u32 handle; /* out */ ++}; ++ ++struct drm_pdp_gem_mmap { ++ __u32 handle; /* in */ ++ __u32 pad; ++ __u64 offset; /* out */ ++}; ++ ++#define PDP_GEM_CPU_PREP_READ (1 << 0) ++#define PDP_GEM_CPU_PREP_WRITE (1 << 1) ++#define PDP_GEM_CPU_PREP_NOWAIT (1 << 2) ++ ++struct drm_pdp_gem_cpu_prep { ++ __u32 handle; /* in */ ++ __u32 flags; /* in */ ++}; ++ ++struct drm_pdp_gem_cpu_fini { ++ __u32 handle; /* in */ ++ __u32 pad; ++}; ++ ++/* ++ * DRM command numbers, relative to DRM_COMMAND_BASE. ++ * These defines must be prefixed with "DRM_". ++ */ ++#define DRM_PDP_GEM_CREATE 0x00 ++#define DRM_PDP_GEM_MMAP 0x01 ++#define DRM_PDP_GEM_CPU_PREP 0x02 ++#define DRM_PDP_GEM_CPU_FINI 0x03 ++ ++/* These defines must be prefixed with "DRM_IOCTL_". */ ++#define DRM_IOCTL_PDP_GEM_CREATE \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_CREATE, \ ++ struct drm_pdp_gem_create) ++ ++#define DRM_IOCTL_PDP_GEM_MMAP\ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PDP_GEM_MMAP, \ ++ struct drm_pdp_gem_mmap) ++ ++#define DRM_IOCTL_PDP_GEM_CPU_PREP \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_PREP, \ ++ struct drm_pdp_gem_cpu_prep) ++ ++#define DRM_IOCTL_PDP_GEM_CPU_FINI \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PDP_GEM_CPU_FINI, \ ++ struct drm_pdp_gem_cpu_fini) ++ ++#endif /* defined(__PDP_DRM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pdump.h b/drivers/gpu/drm/img-rogue/pdump.h +new file mode 100644 +index 000000000000..3ef71846446e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdump.h +@@ -0,0 +1,238 @@ ++/*************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef SERVICES_PDUMP_H ++#define SERVICES_PDUMP_H ++ ++#include "img_types.h" ++#include "services_km.h" ++ ++ ++/* A PDump out2.txt script is made up of 3 sections from three buffers: ++ * * ++ * - Init phase buffer - holds PDump data written during driver ++ * initialisation, non-volatile. ++ * - Main phase buffer - holds PDump data written after driver init, ++ * volatile. ++ * - Deinit phase buffer - holds PDump data needed to shutdown HW/play back, ++ * written only during driver initialisation using ++ * the DEINIT flag. ++ * ++ * Volatile in this sense means that the buffer is drained and cleared when ++ * the pdump capture application connects and transfers the data to file. ++ * ++ * The PDump sub-system uses the driver state (init/post-init), whether ++ * the pdump capture application is connected or not (capture range set/unset) ++ * and, if pdump connected whether the frame is in the range set, to decide ++ * which of the 3 buffers to write the PDump data. Hence there are several ++ * key time periods in the lifetime of the kernel driver that is enabled ++ * with PDUMP=1 (flag XX labels below time line): ++ * ++ * Events:load init pdump enter exit pdump ++ * driver done connects range range disconnects ++ * |__________________|____________|__________|______________|____________|______ . . . ++ * State: | init phase | no capture | <- capture client connected -> | no capture ++ * | | | | ++ * |__________________|____________|______________________________________|_____ . . . ++ * Flag: | CT,DI | NONE,CT,PR | NONE,CT,PR | See no ++ * | Never NONE or PR | Never DI | Never DI | capture ++ * |__________________|____________|______________________________________|_____ . . . ++ * Write | NONE -undef | -No write | -No write | -Main buf | -No write | See no ++ * buffer | CT -Init buf | -Main buf | -Main buf | -Main buf | -Main buf | capture ++ * | PR -undef | -Init buf | -undef | -Init & Main | -undef | ++ * | DI -Deinit buf | -undef | -undef | -undef | -undef | ++ * |__________________|____________|___________|______________|___________|_____ . . . ++ * ++ * Note: The time line could repeat if the pdump capture application is ++ * disconnected and reconnected without unloading the driver module. ++ * ++ * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never ++ * be OR'd together and given to a PDump call since undefined behaviour may ++ * result and produce an invalid PDump which does not play back cleanly. ++ * ++ * The decision on which flag to use comes down to which time period the ++ * client or server driver makes the PDump write call AND the nature/purpose ++ * of the data. ++ * ++ * Note: This is a simplified time line, not all conditions represented. ++ * ++ */ ++ ++typedef IMG_UINT32 PDUMP_FLAGS_T; ++ ++#define PDUMP_FLAGS_NONE PDUMP_NONE /* ++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++ #include ++ #else ++ #include ++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ ++ #else ++ #include ++ #endif /* __linux__ */ ++#endif /* PDUMP */ ++ ++/* services/srvkm/include/ */ ++#include "device.h" ++ ++/* include/ */ ++#include "pvrsrv_error.h" ++ ++ ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++#include "connection_server.h" ++/* Pull in pdump flags from services include */ ++#include "pdump.h" ++#include "pdumpdefs.h" ++ ++/* Define this to enable the PDUMP_HERE trace in the server */ ++#undef PDUMP_TRACE ++ ++#if defined(PDUMP_TRACE) ++#define PDUMP_HERE_VAR IMG_UINT32 here = 0; ++#define PDUMP_HERE(a) { here = (a); if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a))); } ++#define PDUMP_HEREA(a) { here = (a); PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a))); } ++#else ++#define PDUMP_HERE_VAR IMG_UINT32 here = 0; ++#define PDUMP_HERE(a) here = (a); ++#define PDUMP_HEREA(a) here = (a); ++#endif ++ ++#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0 ++#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0 ++ ++/* Invalid value for PDump block number */ ++#define PDUMP_BLOCKNUM_INVALID IMG_UINT32_MAX ++ ++typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA; ++ ++/* PDump transition events */ ++typedef enum _PDUMP_TRANSITION_EVENT_ ++{ ++ PDUMP_TRANSITION_EVENT_NONE, /* No event */ ++ PDUMP_TRANSITION_EVENT_BLOCK_FINISHED, /* Block mode event, current PDump-block has finished */ ++ PDUMP_TRANSITION_EVENT_BLOCK_STARTED, /* Block mode event, new PDump-block has started */ ++ PDUMP_TRANSITION_EVENT_RANGE_ENTERED, /* Transition into capture range */ ++ PDUMP_TRANSITION_EVENT_RANGE_EXITED, /* Transition out of capture range */ ++} PDUMP_TRANSITION_EVENT; ++ ++typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags); ++typedef void (*PFN_PDUMP_SYNCBLOCKS)(PVRSRV_DEVICE_NODE *psDevNode, void *pvData, PDUMP_TRANSITION_EVENT eEvent); ++ ++typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION_FENCE_SYNC)(void *pvData, PDUMP_TRANSITION_EVENT eEvent); ++ ++#ifdef PDUMP ++ ++/*! Macro used to record a panic in the PDump script stream */ ++#define PDUMP_PANIC(_dev, _id, _msg) do \ ++ { PVRSRV_ERROR _eE;\ ++ _eE = PDumpPanic((_dev), ((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __func__, __LINE__); \ ++ PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ ++#define PDUMP_ERROR(_dev, _err, _msg) \ ++ (void)PDumpCaptureError((_dev), _err, _msg, __func__, __LINE__) ++ ++#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE ++#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE ++#define SZ_FILENAME_SIZE_MAX (PVRSRV_PDUMP_MAX_FILENAME_SIZE+sizeof(PDUMP_PARAM_N_FILE_NAME)) ++ ++#define PDUMP_GET_SCRIPT_STRING() \ ++ IMG_HANDLE hScript; \ ++ void *pvScriptAlloc; \ ++ IMG_UINT32 ui32MaxLen = SZ_SCRIPT_SIZE_MAX-1; \ ++ pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ ++ if (!pvScriptAlloc) \ ++ { \ ++ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_STRING() failed to allocate memory for script buffer")); \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ } \ ++ \ ++ hScript = (IMG_HANDLE) pvScriptAlloc; ++ ++#define PDUMP_GET_MSG_STRING() \ ++ IMG_CHAR *pszMsg; \ ++ void *pvMsgAlloc; \ ++ IMG_UINT32 ui32MaxLen = SZ_MSG_SIZE_MAX-1; \ ++ pvMsgAlloc = OSAllocMem( SZ_MSG_SIZE_MAX ); \ ++ if (!pvMsgAlloc) \ ++ { \ ++ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_MSG_STRING() failed to allocate memory for message buffer")); \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ } \ ++ pszMsg = (IMG_CHAR *)pvMsgAlloc; ++ ++#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \ ++ IMG_HANDLE hScript; \ ++ IMG_CHAR *pszFileName; \ ++ IMG_UINT32 ui32MaxLenScript = SZ_SCRIPT_SIZE_MAX-1; \ ++ void *pvScriptAlloc; \ ++ void *pvFileAlloc; \ ++ pvScriptAlloc = OSAllocMem( SZ_SCRIPT_SIZE_MAX ); \ ++ if (!pvScriptAlloc) \ ++ { \ ++ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for script buffer")); \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ } \ ++ \ ++ hScript = (IMG_HANDLE) pvScriptAlloc; \ ++ pvFileAlloc = OSAllocMem( SZ_FILENAME_SIZE_MAX ); \ ++ if (!pvFileAlloc) \ ++ { \ ++ PVR_DPF((PVR_DBG_ERROR, "PDUMP_GET_SCRIPT_AND_FILE_STRING() failed to allocate memory for filename buffer")); \ ++ OSFreeMem(pvScriptAlloc); \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ } \ ++ pszFileName = (IMG_CHAR *)pvFileAlloc; ++ ++#define PDUMP_RELEASE_SCRIPT_STRING() \ ++ if (pvScriptAlloc) \ ++ { \ ++ OSFreeMem(pvScriptAlloc); \ ++ pvScriptAlloc = NULL; \ ++ } ++ ++#define PDUMP_RELEASE_MSG_STRING() \ ++ if (pvMsgAlloc) \ ++ { \ ++ OSFreeMem(pvMsgAlloc); \ ++ pvMsgAlloc = NULL; \ ++ } ++ ++#define PDUMP_RELEASE_FILE_STRING() \ ++ if (pvFileAlloc) \ ++ { \ ++ OSFreeMem(pvFileAlloc); \ ++ pvFileAlloc = NULL; \ ++ } ++ ++#define PDUMP_RELEASE_SCRIPT_AND_FILE_STRING() \ ++ if (pvScriptAlloc) \ ++ { \ ++ OSFreeMem(pvScriptAlloc); \ ++ pvScriptAlloc = NULL; \ ++ } \ ++ if (pvFileAlloc) \ ++ { \ ++ OSFreeMem(pvFileAlloc); \ ++ pvFileAlloc = NULL; \ ++ } ++ ++ ++/* Shared across pdump_x files */ ++PVRSRV_ERROR PDumpInitCommon(void); ++void PDumpDeInitCommon(void); ++PVRSRV_ERROR PDumpReady(void); ++void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset, ++ size_t *puiZeroPageSize, ++ const IMG_CHAR **ppszZeroPageFilename); ++ ++void PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode); ++void PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++void PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode); ++PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Frame); ++PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32* pui32Frame); ++PVRSRV_ERROR PDumpCommentKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32CommentSize, ++ IMG_CHAR *pszComment, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Mode, ++ IMG_UINT32 ui32Start, ++ IMG_UINT32 ui32End, ++ IMG_UINT32 ui32Interval, ++ IMG_UINT32 ui32MaxParamFileSize); ++ ++ ++PVRSRV_ERROR PDumpReg32(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32RegValue, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpReg64(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64RegValue, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegLabelToReg64(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegDst, ++ IMG_UINT32 ui32RegSrc, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpPhysHandleToInternalVar64(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszInternalVar, ++ IMG_HANDLE hPdumpPages, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar, ++ PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpInternalVarToMemLabel(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_CHAR *pszInternalVar, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpWriteVarORValueOp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVariable, ++ const IMG_UINT64 ui64Value, ++ const IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PDumpWriteVarANDValueOp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVariable, ++ const IMG_UINT64 ui64Value, ++ const IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PDumpWriteVarSHRValueOp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVariable, ++ const IMG_UINT64 ui64Value, ++ const IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PDumpWriteVarORVarOp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVar, ++ const IMG_CHAR *pszInternalVar2, ++ const IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PDumpWriteVarANDVarOp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVar, ++ const IMG_CHAR *pszInternalVar2, ++ const IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PDumpInternalVarToReg32(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32Reg, ++ IMG_CHAR *pszInternalVar, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpInternalVarToReg64(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32Reg, ++ IMG_CHAR *pszInternalVar, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource, ++ PMR *psPMRDest, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource, ++ PMR *psPMRDest, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32Reg, ++ PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32Reg, ++ PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegLabelToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32Reg, ++ IMG_CHAR *pszInternalVar, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpSAW(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszDevSpaceName, ++ IMG_UINT32 ui32HPOffsetBytes, ++ IMG_UINT32 ui32NumSaveBytes, ++ IMG_CHAR *pszOutfileName, ++ IMG_UINT32 ui32OutfileOffsetByte, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR PDumpRegPolKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32RegValue, ++ IMG_UINT32 ui32Mask, ++ IMG_UINT32 ui32Flags, ++ PDUMP_POLL_OPERATOR eOperator); ++ ++ ++/**************************************************************************/ /*! ++@Function PDumpImageDescriptor ++@Description PDumps image data out as an IMGBv2 data section ++@Input psDeviceNode Pointer to device node. ++@Input ui32MMUContextID PDUMP MMU context ID. ++@Input pszSABFileName Pointer to string containing file name of ++ Image being SABed ++@Input sData GPU virtual address of this surface. ++@Input ui32DataSize Image data size ++@Input ui32LogicalWidth Image logical width ++@Input ui32LogicalHeight Image logical height ++@Input ui32PhysicalWidth Image physical width ++@Input ui32PhysicalHeight Image physical height ++@Input ePixFmt Image pixel format ++@Input eFBCompression FB compression mode ++@Input paui32FBCClearColour FB clear colour (Only applicable to FBC surfaces) ++@Input eFBCSwizzle FBC channel swizzle (Only applicable to FBC surfaces) ++@Input sHeader GPU virtual address of the headers of this ++ surface (Only applicable to FBC surfaces) ++@Input ui32HeaderSize Header size (Only applicable to FBC surfaces) ++@Input ui32PDumpFlags PDUMP flags ++@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++*/ /***************************************************************************/ ++PVRSRV_ERROR PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32MMUContextID, ++ IMG_CHAR *pszSABFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32LogicalWidth, ++ IMG_UINT32 ui32LogicalHeight, ++ IMG_UINT32 ui32PhysicalWidth, ++ IMG_UINT32 ui32PhysicalHeight, ++ PDUMP_PIXEL_FORMAT ePixFmt, ++ IMG_MEMLAYOUT eMemLayout, ++ IMG_FB_COMPRESSION eFBCompression, ++ const IMG_UINT32 *paui32FBCClearColour, ++ PDUMP_FBC_SWIZZLE eFBCSwizzle, ++ IMG_DEV_VIRTADDR sHeader, ++ IMG_UINT32 ui32HeaderSize, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/**************************************************************************/ /*! ++@Function PDumpDataDescriptor ++@Description PDumps non-image data out as an IMGCv1 data section ++@Input psDeviceNode Pointer to device node. ++@Input ui32MMUContextID PDUMP MMU context ID. ++@Input pszSABFileName Pointer to string containing file name of ++ Data being SABed ++@Input sData GPU virtual address of this data. ++@Input ui32DataSize Data size ++@Input ui32HeaderType Header type ++@Input ui32ElementType Data element type ++@Input ui32ElementCount Number of data elements ++@Input ui32PDumpFlags PDUMP flags ++@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++*/ /***************************************************************************/ ++PVRSRV_ERROR PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32MMUContextID, ++ IMG_CHAR *pszSABFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32HeaderType, ++ IMG_UINT32 ui32ElementType, ++ IMG_UINT32 ui32ElementCount, ++ IMG_UINT32 ui32PDumpFlags); ++ ++ ++PVRSRV_ERROR PDumpReadRegKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_CHAR *pszFileName, ++ IMG_UINT32 ui32FileOffset, ++ IMG_UINT32 ui32Address, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32PDumpFlags); ++ ++__printf(3, 4) ++PVRSRV_ERROR PDumpCommentWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Flags, ++ IMG_CHAR* pszFormat, ++ ...); ++ ++PVRSRV_ERROR PDumpCommentWithFlagsVA(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Flags, ++ const IMG_CHAR * pszFormat, ++ va_list args); ++ ++PVRSRV_ERROR PDumpPanic(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PanicNo, ++ IMG_CHAR* pszPanicMsg, ++ const IMG_CHAR* pszPPFunc, ++ IMG_UINT32 ui32PPline); ++ ++PVRSRV_ERROR PDumpCaptureError(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_ERROR ui32ErrorNo, ++ IMG_CHAR* pszErrorMsg, ++ const IMG_CHAR* pszPPFunc, ++ IMG_UINT32 ui32PPline); ++ ++PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame); ++ ++PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State); ++ ++PVRSRV_ERROR PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++PVRSRV_ERROR PDumpRegRead32ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegOffset, ++ IMG_CHAR *pszInternalVar, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegRead32(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ const IMG_UINT32 dwRegOffset, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegRead64(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ const IMG_UINT32 dwRegOffset, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpRegRead64ToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_CHAR *pszInternalVar, ++ const IMG_UINT32 dwRegOffset, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpIDLWithFlags(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Clocks, ++ IMG_UINT32 ui32Flags); ++PVRSRV_ERROR PDumpIDL(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Clocks); ++ ++PVRSRV_ERROR PDumpRegBasedCBP(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegOffset, ++ IMG_UINT32 ui32WPosVal, ++ IMG_UINT32 ui32PacketSize, ++ IMG_UINT32 ui32BufferSize, ++ IMG_UINT32 ui32Flags); ++ ++PVRSRV_ERROR PDumpTRG(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszMemSpace, ++ IMG_UINT32 ui32MMUCtxID, ++ IMG_UINT32 ui32RegionID, ++ IMG_BOOL bEnable, ++ IMG_UINT64 ui64VAddr, ++ IMG_UINT64 ui64LenBytes, ++ IMG_UINT32 ui32XStride, ++ IMG_UINT32 ui32Flags); ++ ++void PDumpLock(void); ++void PDumpUnlock(void); ++ ++PVRSRV_ERROR PDumpRegCondStr(IMG_CHAR **ppszPDumpCond, ++ IMG_CHAR *pszPDumpRegName, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32RegValue, ++ IMG_UINT32 ui32Mask, ++ IMG_UINT32 ui32Flags, ++ PDUMP_POLL_OPERATOR eOperator); ++ ++PVRSRV_ERROR PDumpInternalValCondStr(IMG_CHAR **ppszPDumpCond, ++ IMG_CHAR *pszInternalVar, ++ IMG_UINT32 ui32RegValue, ++ IMG_UINT32 ui32Mask, ++ IMG_UINT32 ui32Flags, ++ PDUMP_POLL_OPERATOR eOperator); ++ ++PVRSRV_ERROR PDumpIfKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); ++PVRSRV_ERROR PDumpElseKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); ++PVRSRV_ERROR PDumpFiKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpCond, IMG_UINT32 ui32PDumpFlags); ++PVRSRV_ERROR PDumpStartDoLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PDumpFlags); ++PVRSRV_ERROR PDumpEndDoWhileLoopKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszPDumpWhileCond, ++ IMG_UINT32 ui32PDumpFlags); ++PVRSRV_ERROR PDumpCOMCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PDumpFlags, ++ const IMG_CHAR *pszPDump); ++ ++void PDumpPowerTransitionStart(PVRSRV_DEVICE_NODE *psDeviceNode); ++void PDumpPowerTransitionEnd(PVRSRV_DEVICE_NODE *psDeviceNode); ++IMG_BOOL PDumpCheckFlagsWrite(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Flags); ++ ++/*! ++ * @name PDumpWriteParameter ++ * @brief General function for writing to PDump stream. Used ++ * mainly for memory dumps to parameter stream. ++ * Usually more convenient to use PDumpWriteScript below ++ * for the script stream. ++ * @param psDeviceNode - device PDump pertains to ++ * @param psui8Data - data to write ++ * @param ui32Size - size of write ++ * @param ui32Flags - PDump flags ++ * @param pui32FileOffset - on return contains the file offset to ++ * the start of the parameter data ++ * @param aszFilenameStr - pointer to at least a 20 char buffer to ++ * return the parameter filename ++ * @return error ++ */ ++PVRSRV_ERROR PDumpWriteParameter(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset, ++ IMG_CHAR* aszFilenameStr); ++ ++/*! ++ * @name PDumpWriteScript ++ * @brief Write an PDumpOS created string to the "script" output stream ++ * @param psDeviceNode - device PDump pertains to ++ * @param hString - PDump OS layer handle of string buffer to write ++ * @param ui32Flags - PDump flags ++ * @return IMG_TRUE on success. ++ */ ++IMG_BOOL PDumpWriteScript(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hString, IMG_UINT32 ui32Flags); ++ ++/**************************************************************************/ /*! ++@Function PDumpSNPrintf ++@Description Printf to OS-specific PDump state buffer. This function is ++ only called if PDUMP is defined. ++@Input hBuf handle of buffer to write into ++@Input ui32ScriptSizeMax maximum size of data to write (chars) ++@Input pszFormat format string ++@Return None ++*/ /**************************************************************************/ ++__printf(3, 4) ++PVRSRV_ERROR PDumpSNPrintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...); ++ ++ ++/* ++ PDumpWriteShiftedMaskedValue(): ++ ++ loads the "reference" address into an internal PDump register, ++ optionally shifts it right, ++ optionally shifts it left, ++ optionally masks it ++ then finally writes the computed value to the given destination address ++ ++ i.e. it emits pdump language equivalent to this expression: ++ ++ dest = ((&ref) >> SHRamount << SHLamount) & MASK ++*/ ++PVRSRV_ERROR ++PDumpWriteShiftedMaskedValue(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDestRegspaceName, ++ const IMG_CHAR *pszDestSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiDestOffset, ++ const IMG_CHAR *pszRefRegspaceName, ++ const IMG_CHAR *pszRefSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiRefOffset, ++ IMG_UINT32 uiSHRAmount, ++ IMG_UINT32 uiSHLAmount, ++ IMG_UINT32 uiMask, ++ IMG_DEVMEM_SIZE_T uiWordSize, ++ IMG_UINT32 uiPDumpFlags); ++ ++/* ++ PDumpWriteSymbAddress(): ++ writes the address of the "reference" to the offset given ++*/ ++PVRSRV_ERROR ++PDumpWriteSymbAddress(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDestSpaceName, ++ IMG_DEVMEM_OFFSET_T uiDestOffset, ++ const IMG_CHAR *pszRefSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiRefOffset, ++ const IMG_CHAR *pszPDumpDevName, ++ IMG_UINT32 ui32WordSize, ++ IMG_UINT32 ui32AlignShift, ++ IMG_UINT32 ui32Shift, ++ IMG_UINT32 uiPDumpFlags); ++ ++/* Register the connection with the PDump subsystem */ ++PVRSRV_ERROR ++PDumpRegisterConnection(void *hSyncPrivData, ++ PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, ++ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData); ++ ++/* Unregister the connection with the PDump subsystem */ ++void ++PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData); ++ ++/* Register for notification of PDump Transition into/out of capture range */ ++PVRSRV_ERROR ++PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, ++ PFN_PDUMP_TRANSITION pfnCallback, ++ void *hPrivData, ++ void *pvDevice, ++ void **ppvHandle); ++ ++/* Unregister notification of PDump Transition */ ++void ++PDumpUnregisterTransitionCallback(void *pvHandle); ++ ++PVRSRV_ERROR ++PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, ++ PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, ++ void **ppvHandle); ++ ++void ++PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle); ++ ++/* Notify PDump of a Transition into/out of capture range */ ++PVRSRV_ERROR ++PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PDUMP_CONNECTION_DATA *psPDumpConnectionData, ++ PDUMP_TRANSITION_EVENT eEvent, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/* Check if writing to a PDump file is permitted for the given device */ ++IMG_BOOL PDumpIsDevicePermitted(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/* _ui32PDumpFlags must be a variable in the local scope */ ++#define PDUMP_LOCK(_ui32PDumpFlags) do \ ++ { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ ++ {\ ++ PDumpLock();\ ++ }\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++/* _ui32PDumpFlags must be a variable in the local scope */ ++#define PDUMP_UNLOCK(_ui32PDumpFlags) do \ ++ { if ((_ui32PDumpFlags & PDUMP_FLAGS_PDUMP_LOCK_HELD) == 0)\ ++ {\ ++ PDumpUnlock();\ ++ }\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++#define PDUMPINIT PDumpInitCommon ++#define PDUMPDEINIT PDumpDeInitCommon ++#define PDUMPREG32 PDumpReg32 ++#define PDUMPREG64 PDumpReg64 ++#define PDUMPREGREAD32 PDumpRegRead32 ++#define PDUMPREGREAD64 PDumpRegRead64 ++#define PDUMPCOMMENT(d, ...) PDumpCommentWithFlags(d, PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__) ++#define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags ++#define PDUMPREGPOL PDumpRegPolKM ++#define PDUMPREGBASEDCBP PDumpRegBasedCBP ++#define PDUMPENDINITPHASE PDumpStopInitPhase ++#define PDUMPIDLWITHFLAGS PDumpIDLWithFlags ++#define PDUMPIDL PDumpIDL ++#define PDUMPPOWCMDSTART PDumpPowerTransitionStart ++#define PDUMPPOWCMDEND PDumpPowerTransitionEnd ++#define PDUMPCOM PDumpCOMCommand ++ ++/* _ui32PDumpFlags must be a variable in the local scope */ ++#define PDUMP_BLKSTART(_ui32PDumpFlags) do \ ++ { PDUMP_LOCK(_ui32PDumpFlags);\ ++ _ui32PDumpFlags |= PDUMP_FLAGS_PDUMP_LOCK_HELD;\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++/* _ui32PDumpFlags must be a variable in the local scope */ ++#define PDUMP_BLKEND(_ui32PDumpFlags) do \ ++ { _ui32PDumpFlags &= ~PDUMP_FLAGS_PDUMP_LOCK_HELD;\ ++ PDUMP_UNLOCK(_ui32PDumpFlags);\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++/* _ui32PDumpFlags must be a variable in the local scope */ ++#define PDUMPIF(_dev,_msg,_ui32PDumpFlags) do \ ++ {PDUMP_BLKSTART(_ui32PDumpFlags);\ ++ PDumpIfKM(_dev,_msg,_ui32PDumpFlags);\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++#define PDUMPELSE PDumpElseKM ++ ++/* _ui32PDumpFlags must be a variable in the local scope */ ++#define PDUMPFI(_dev,_msg,_ui32PDumpFlags) do \ ++ { PDumpFiKM(_dev,_msg,_ui32PDumpFlags);\ ++ PDUMP_BLKEND(_ui32PDumpFlags);\ ++ MSC_SUPPRESS_4127\ ++ } while (0) ++ ++#else ++/* ++ We should be clearer about which functions can be called ++ across the bridge as this looks rather unbalanced ++*/ ++ ++/*! Macro used to record a panic in the PDump script stream */ ++#define PDUMP_PANIC(_dev, _id, _msg) ((void)0) ++ ++/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */ ++#define PDUMP_ERROR(_dev, _err, _msg) ((void)0) ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpInitCommon) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpInitCommon(void) ++{ ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpConnectionNotify) ++#endif ++static INLINE void ++PDumpConnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpDisconnectionNotify) ++#endif ++static INLINE void ++PDumpDisconnectionNotify(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpLock) ++#endif ++static INLINE void ++PDumpLock(void) ++{ ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpUnlock) ++#endif ++static INLINE void ++PDumpUnlock(void) ++{ ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpStopInitPhase) ++#endif ++static INLINE void ++PDumpStopInitPhase(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpSetFrameKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpSetFrameKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32Frame) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(ui32Frame); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpGetFrameKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpGetFrameKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32* pui32Frame) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(pui32Frame); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpCommentKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpCommentKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32CommentSize, ++ IMG_CHAR *pszComment, ++ IMG_UINT32 ui32Flags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(ui32CommentSize); ++ PVR_UNREFERENCED_PARAMETER(pszComment); ++ PVR_UNREFERENCED_PARAMETER(ui32Flags); ++ return PVRSRV_OK; ++} ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpSetDefaultCaptureParamsKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpSetDefaultCaptureParamsKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Mode, ++ IMG_UINT32 ui32Start, ++ IMG_UINT32 ui32End, ++ IMG_UINT32 ui32Interval, ++ IMG_UINT32 ui32MaxParamFileSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(ui32Mode); ++ PVR_UNREFERENCED_PARAMETER(ui32Start); ++ PVR_UNREFERENCED_PARAMETER(ui32End); ++ PVR_UNREFERENCED_PARAMETER(ui32Interval); ++ PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize); ++ ++ return PVRSRV_OK; ++} ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpPanic) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpPanic(IMG_UINT32 ui32PanicNo, ++ IMG_CHAR* pszPanicMsg, ++ const IMG_CHAR* pszPPFunc, ++ IMG_UINT32 ui32PPline) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32PanicNo); ++ PVR_UNREFERENCED_PARAMETER(pszPanicMsg); ++ PVR_UNREFERENCED_PARAMETER(pszPPFunc); ++ PVR_UNREFERENCED_PARAMETER(ui32PPline); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpCaptureError) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo, ++ IMG_CHAR* pszErrorMsg, ++ const IMG_CHAR* pszPPFunc, ++ IMG_UINT32 ui32PPline) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32ErrorNo); ++ PVR_UNREFERENCED_PARAMETER(pszErrorMsg); ++ PVR_UNREFERENCED_PARAMETER(pszPPFunc); ++ PVR_UNREFERENCED_PARAMETER(ui32PPline); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpIsLastCaptureFrameKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame) ++{ ++ *pbIsLastCaptureFrame = IMG_FALSE; ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpGetStateKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpGetStateKM(IMG_UINT64 *ui64State) ++{ ++ *ui64State = 0; ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpForceCaptureStopKM) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpForceCaptureStopKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpImageDescriptor) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpImageDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32MMUContextID, ++ IMG_CHAR *pszSABFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32LogicalWidth, ++ IMG_UINT32 ui32LogicalHeight, ++ IMG_UINT32 ui32PhysicalWidth, ++ IMG_UINT32 ui32PhysicalHeight, ++ PDUMP_PIXEL_FORMAT ePixFmt, ++ IMG_MEMLAYOUT eMemLayout, ++ IMG_FB_COMPRESSION eFBCompression, ++ const IMG_UINT32 *paui32FBCClearColour, ++ PDUMP_FBC_SWIZZLE eFBCSwizzle, ++ IMG_DEV_VIRTADDR sHeader, ++ IMG_UINT32 ui32HeaderSize, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); ++ PVR_UNREFERENCED_PARAMETER(pszSABFileName); ++ PVR_UNREFERENCED_PARAMETER(sData); ++ PVR_UNREFERENCED_PARAMETER(ui32DataSize); ++ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth); ++ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight); ++ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth); ++ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight); ++ PVR_UNREFERENCED_PARAMETER(ePixFmt); ++ PVR_UNREFERENCED_PARAMETER(eMemLayout); ++ PVR_UNREFERENCED_PARAMETER(eFBCompression); ++ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour); ++ PVR_UNREFERENCED_PARAMETER(eFBCSwizzle); ++ PVR_UNREFERENCED_PARAMETER(sHeader); ++ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpDataDescriptor) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpDataDescriptor(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32MMUContextID, ++ IMG_CHAR *pszSABFileName, ++ IMG_DEV_VIRTADDR sData, ++ IMG_UINT32 ui32DataSize, ++ IMG_UINT32 ui32ElementType, ++ IMG_UINT32 ui32ElementCount, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID); ++ PVR_UNREFERENCED_PARAMETER(pszSABFileName); ++ PVR_UNREFERENCED_PARAMETER(sData); ++ PVR_UNREFERENCED_PARAMETER(ui32DataSize); ++ PVR_UNREFERENCED_PARAMETER(ui32ElementType); ++ PVR_UNREFERENCED_PARAMETER(ui32ElementCount); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpRegisterConnection) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpRegisterConnection(void *hSyncPrivData, ++ PFN_PDUMP_SYNCBLOCKS pfnPDumpSyncBlocks, ++ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData) ++{ ++ PVR_UNREFERENCED_PARAMETER(hSyncPrivData); ++ PVR_UNREFERENCED_PARAMETER(pfnPDumpSyncBlocks); ++ PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData); ++ ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpUnregisterConnection) ++#endif ++static INLINE void ++PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpRegisterTransitionCallback) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData, ++ PFN_PDUMP_TRANSITION pfnCallback, ++ void *hPrivData, ++ void *pvDevice, ++ void **ppvHandle) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); ++ PVR_UNREFERENCED_PARAMETER(pfnCallback); ++ PVR_UNREFERENCED_PARAMETER(hPrivData); ++ PVR_UNREFERENCED_PARAMETER(pvDevice); ++ PVR_UNREFERENCED_PARAMETER(ppvHandle); ++ ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpUnregisterTransitionCallback) ++#endif ++static INLINE void ++PDumpUnregisterTransitionCallback(void *pvHandle) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvHandle); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpRegisterTransitionCallback) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpRegisterTransitionCallbackFenceSync(void *hPrivData, ++ PFN_PDUMP_TRANSITION_FENCE_SYNC pfnCallback, ++ void **ppvHandle) ++{ ++ PVR_UNREFERENCED_PARAMETER(pfnCallback); ++ PVR_UNREFERENCED_PARAMETER(hPrivData); ++ PVR_UNREFERENCED_PARAMETER(ppvHandle); ++ ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpUnregisterTransitionCallbackFenceSync) ++#endif ++static INLINE void ++PDumpUnregisterTransitionCallbackFenceSync(void *pvHandle) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvHandle); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpTransition) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpTransition(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PDUMP_CONNECTION_DATA *psPDumpConnectionData, ++ PDUMP_TRANSITION_EVENT eEvent, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData); ++ PVR_UNREFERENCED_PARAMETER(eEvent); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#if defined(__linux__) || defined(GCC_IA32) || defined(GCC_ARM) || defined(__QNXNTO__) || defined(INTEGRITY_OS) ++ #define PDUMPINIT PDumpInitCommon ++ #define PDUMPDEINIT(args...) ++ #define PDUMPREG32(args...) ++ #define PDUMPREG64(args...) ++ #define PDUMPREGREAD32(args...) ++ #define PDUMPREGREAD64(args...) ++ #define PDUMPCOMMENT(args...) ++ #define PDUMPREGPOL(args...) ++ #define PDUMPSYNC(args...) ++ #define PDUMPCOPYTOMEM(args...) ++ #define PDUMPWRITE(args...) ++ #define PDUMPREGBASEDCBP(args...) ++ #define PDUMPCOMMENTWITHFLAGS(args...) ++ #define PDUMPENDINITPHASE(args...) ++ #define PDUMPIDLWITHFLAGS(args...) ++ #define PDUMPIDL(args...) ++ #define PDUMPPOWCMDSTART(args...) ++ #define PDUMPPOWCMDEND(args...) ++ #define PDUMP_LOCK(args...) ++ #define PDUMP_UNLOCK(args...) ++ #define PDUMPIF(args...) ++ #define PDUMPFI(args...) ++ #define PDUMPCOM(args...) ++#else ++ #error Compiler not specified ++#endif ++ ++#endif /* PDUMP */ ++ ++#endif /* PDUMP_KM_H */ ++ ++/****************************************************************************** ++ End of file (pdump_km.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pdump_mmu.h b/drivers/gpu/drm/img-rogue/pdump_mmu.h +new file mode 100644 +index 000000000000..b67a402e53c3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdump_mmu.h +@@ -0,0 +1,180 @@ ++/**************************************************************************/ /*! ++@File ++@Title Common MMU Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements basic low level control of MMU. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVKM_PDUMP_MMU_H ++#define SRVKM_PDUMP_MMU_H ++ ++/* services/server/include/ */ ++#include "pdump_symbolicaddr.h" ++/* include/ */ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#include "mmu_common.h" ++ ++#include "opaque_types.h" ++ ++/* ++ * PDUMP MMU attributes ++ */ ++typedef struct _PDUMP_MMU_ATTRIB_DEVICE_ ++{ ++ /* Per-Device Pdump attribs */ ++ ++ /*!< Pdump memory bank name */ ++ IMG_CHAR *pszPDumpMemDevName; ++ ++ /*!< Pdump register bank name */ ++ IMG_CHAR *pszPDumpRegDevName; ++ ++} PDUMP_MMU_ATTRIB_DEVICE; ++ ++typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_ ++{ ++ IMG_UINT32 ui32Dummy; ++} PDUMP_MMU_ATTRIB_CONTEXT; ++ ++typedef struct _PDUMP_MMU_ATTRIB_HEAP_ ++{ ++ /* data page info */ ++ IMG_UINT32 ui32DataPageMask; ++} PDUMP_MMU_ATTRIB_HEAP; ++ ++typedef struct _PDUMP_MMU_ATTRIB_ ++{ ++ struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice; ++ struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext; ++ struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap; ++} PDUMP_MMU_ATTRIB; ++ ++#if defined(PDUMP) ++PVRSRV_ERROR ++PDumpMMUMalloc(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszPDumpDevName, ++ MMU_LEVEL eMMULevel, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32Align, ++ PDUMP_MMU_TYPE eMMUType); ++ ++PVRSRV_ERROR ++PDumpMMUFree(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszPDumpDevName, ++ MMU_LEVEL eMMULevel, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ PDUMP_MMU_TYPE eMMUType); ++ ++PVRSRV_ERROR ++PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName, ++ PMR *psPMRDest, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest, ++ IMG_UINT32 ui32Flags, ++ MMU_LEVEL eMMULevel, ++ IMG_UINT64 ui64PxSymAddr, ++ IMG_UINT64 ui64PxOffset); ++ ++PVRSRV_ERROR ++PDumpMMUDumpPxEntries(PPVRSRV_DEVICE_NODE psDeviceNode, ++ MMU_LEVEL eMMULevel, ++ const IMG_CHAR *pszPDumpDevName, ++ void *pvPxMem, ++ IMG_DEV_PHYADDR sPxDevPAddr, ++ IMG_UINT32 uiFirstEntry, ++ IMG_UINT32 uiNumEntries, ++ const IMG_CHAR *pszMemspaceName, ++ const IMG_CHAR *pszSymbolicAddr, ++ IMG_UINT64 uiSymbolicAddrOffset, ++ IMG_UINT32 uiBytesPerEntry, ++ IMG_UINT32 uiLog2Align, ++ IMG_UINT32 uiAddrShift, ++ IMG_UINT64 uiAddrMask, ++ IMG_UINT64 uiPxEProtMask, ++ IMG_UINT64 uiDataValidEnable, ++ IMG_UINT32 ui32Flags, ++ PDUMP_MMU_TYPE eMMUType); ++ ++PVRSRV_ERROR ++PDumpMMUAllocMMUContext(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszPDumpMemSpaceName, ++ IMG_DEV_PHYADDR sPCDevPAddr, ++ PDUMP_MMU_TYPE eMMUType, ++ IMG_UINT32 *pui32MMUContextID, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR ++PDumpMMUFreeMMUContext(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszPDumpMemSpaceName, ++ IMG_UINT32 ui32MMUContextID, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR ++PDumpMMUSAB(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszPDumpMemNamespace, ++ IMG_UINT32 uiPDumpMMUCtx, ++ IMG_DEV_VIRTADDR sDevAddrStart, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset, ++ IMG_UINT32 ui32PDumpFlags); ++ ++#define PDUMP_MMU_ALLOC_MMUCONTEXT(psDevNode, pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ ++ PDumpMMUAllocMMUContext(psDevNode, \ ++ pszPDumpMemDevName, \ ++ sPCDevPAddr, \ ++ eMMUType, \ ++ puiPDumpCtxID, \ ++ ui32PDumpFlags) ++ ++#define PDUMP_MMU_FREE_MMUCONTEXT(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ ++ PDumpMMUFreeMMUContext(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) ++#else /* PDUMP */ ++ ++#define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID, ui32PDumpFlags) \ ++ ((void)0) ++#define PDUMP_MMU_FREE_MMUCONTEXT(psDevNode, pszPDumpMemDevName, uiPDumpCtxID, ui32PDumpFlags) \ ++ ((void)0) ++ ++#endif /* PDUMP */ ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/pdump_physmem.h b/drivers/gpu/drm/img-rogue/pdump_physmem.h +new file mode 100644 +index 000000000000..a5a6f37409e6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdump_physmem.h +@@ -0,0 +1,257 @@ ++/**************************************************************************/ /*! ++@File ++@Title pdump functions to assist with physmem allocations ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements basic low level control of MMU. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVSRV_PDUMP_PHYSMEM_H ++#define SRVSRV_PDUMP_PHYSMEM_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "pmr.h" ++#include "device.h" /* For device node */ ++ ++#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40 ++#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60 ++#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH) ++ ++typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T; ++ ++#if defined(PDUMP) ++PVRSRV_ERROR ++PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, ++ IMG_CHAR **ppszSymbolicAddress); ++ ++PVRSRV_ERROR ++PDumpMalloc(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicAddress, ++ IMG_UINT64 ui64Size, ++ /* alignment is alignment of start of buffer _and_ ++ minimum contiguity - i.e. smallest allowable ++ page-size. */ ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ IMG_BOOL bInitialise, ++ IMG_UINT32 ui32InitValue, ++ IMG_HANDLE *phHandlePtr, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR ++PDumpFree(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hPDumpAllocationInfoHandle); ++ ++void ++PDumpMakeStringValid(IMG_CHAR *pszString, ++ IMG_UINT32 ui32StrLen); ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PDumpGetSymbolicAddr) ++#endif ++static INLINE PVRSRV_ERROR ++PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle, ++ IMG_CHAR **ppszSymbolicAddress) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle); ++ PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress); ++ return PVRSRV_OK; ++} ++ ++static INLINE PVRSRV_ERROR ++PDumpMalloc(const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicAddress, ++ IMG_UINT64 ui64Size, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ IMG_BOOL bInitialise, ++ IMG_UINT32 ui32InitValue, ++ IMG_HANDLE *phHandlePtr, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(pszDevSpace); ++ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress); ++ PVR_UNREFERENCED_PARAMETER(ui64Size); ++ PVR_UNREFERENCED_PARAMETER(uiAlign); ++ PVR_UNREFERENCED_PARAMETER(bInitialise); ++ PVR_UNREFERENCED_PARAMETER(ui32InitValue); ++ PVR_UNREFERENCED_PARAMETER(phHandlePtr); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++ return PVRSRV_OK; ++} ++ ++static INLINE PVRSRV_ERROR ++PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle); ++ return PVRSRV_OK; ++} ++#endif /* PDUMP */ ++ ++#define PMR_DEFAULT_PREFIX "PMR" ++#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s" ++#define PMR_MEMSPACE_FMTSPEC "%s" ++#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC "CC_%s" ++ ++#if defined(PDUMP) ++#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \ ++ PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE) ++#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ ++ PDumpFree(hHandle) ++#else ++#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \ ++ ((void)(*phHandlePtr=NULL)) ++#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \ ++ ((void)(0)) ++#endif /* PDUMP */ ++ ++PVRSRV_ERROR ++PDumpPMRWRW32(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRWRW32InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ const IMG_CHAR *pszInternalVar, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRRDW32MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVar, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRWRW64(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT64 ui64Value, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRWRW64InternalVarToMem(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ const IMG_CHAR *pszInternalVar, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRRDW64MemToInternalVar(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszInternalVar, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRLDB(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRSAB(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ const IMG_CHAR *pszFileName, ++ IMG_UINT32 uiFileOffset); ++ ++/* ++ PDumpPMRPOL() ++ ++ Emits a POL to the PDUMP. ++*/ ++PVRSRV_ERROR ++PDumpPMRPOL(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszMempaceName, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 uiCount, ++ IMG_UINT32 uiDelay, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PDumpPMRCBP(PVRSRV_DEVICE_NODE *psDeviceNode, ++ const IMG_CHAR *pszMemspaceName, ++ const IMG_CHAR *pszSymbolicName, ++ IMG_DEVMEM_OFFSET_T uiReadOffset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize); ++ ++/* ++ * PDumpWriteParameterBlob() ++ * ++ * Writes a binary blob to the pdump param stream containing the current ++ * contents of the memory, and returns the filename and offset of where ++ * that blob is located (for use in a subsequent LDB, for example). ++ * ++ * Caller to provide buffer to receive filename, and declare the size of ++ * that buffer. ++ */ ++PVRSRV_ERROR ++PDumpWriteParameterBlob(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT8 *pcBuffer, ++ size_t uiNumBytes, ++ PDUMP_FLAGS_T uiPDumpFlags, ++ IMG_CHAR *pszFilenameOut, ++ size_t uiFilenameBufSz, ++ PDUMP_FILEOFFSET_T *puiOffsetOut); ++ ++#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */ +diff --git a/drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h b/drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h +new file mode 100644 +index 000000000000..ed912a5096c9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdump_symbolicaddr.h +@@ -0,0 +1,55 @@ ++/**************************************************************************/ /*! ++@File ++@Title Abstraction of PDUMP symbolic address derivation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Allows pdump functions to derive symbolic addresses on-the-fly ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVKM_PDUMP_SYMBOLICADDR_H ++#define SRVKM_PDUMP_SYMBOLICADDR_H ++ ++#include "img_types.h" ++ ++#include "pvrsrv_error.h" ++ ++/* pdump symbolic addresses are generated on-the-fly with a callback */ ++ ++typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset); ++ ++#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */ +diff --git a/drivers/gpu/drm/img-rogue/pdumpdefs.h b/drivers/gpu/drm/img-rogue/pdumpdefs.h +new file mode 100644 +index 000000000000..3f8cccabc824 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdumpdefs.h +@@ -0,0 +1,249 @@ ++/*************************************************************************/ /*! ++@File ++@Title PDUMP definitions header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description PDUMP definitions header ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PDUMPDEFS_H ++#define PDUMPDEFS_H ++ ++/*! PDump Pixel Format Enumeration */ ++typedef enum _PDUMP_PIXEL_FORMAT_ ++{ ++ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2, ++ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9, ++/* PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10, */ ++ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11, ++ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12, ++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13, ++ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15, ++ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16, ++ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17, ++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18, ++ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20, ++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25, ++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26, ++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27, ++ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28, ++ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29, ++ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36, ++ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37, ++ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45, ++ PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46, ++ PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49, ++ PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50, ++ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51, ++ PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56, ++ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64, ++ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65, ++ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66, ++ ++ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff ++ ++} PDUMP_PIXEL_FORMAT; ++ ++typedef enum _PDUMP_FBC_SWIZZLE_ ++{ ++ PVRSRV_PDUMP_FBC_SWIZZLE_ARGB = 0x0, ++ PVRSRV_PDUMP_FBC_SWIZZLE_ARBG = 0x1, ++ PVRSRV_PDUMP_FBC_SWIZZLE_AGRB = 0x2, ++ PVRSRV_PDUMP_FBC_SWIZZLE_AGBR = 0x3, ++ PVRSRV_PDUMP_FBC_SWIZZLE_ABGR = 0x4, ++ PVRSRV_PDUMP_FBC_SWIZZLE_ABRG = 0x5, ++ PVRSRV_PDUMP_FBC_SWIZZLE_RGBA = 0x8, ++ PVRSRV_PDUMP_FBC_SWIZZLE_RBGA = 0x9, ++ PVRSRV_PDUMP_FBC_SWIZZLE_GRBA = 0xA, ++ PVRSRV_PDUMP_FBC_SWIZZLE_GBRA = 0xB, ++ PVRSRV_PDUMP_FBC_SWIZZLE_BGRA = 0xC, ++ PVRSRV_PDUMP_FBC_SWIZZLE_BRGA = 0xD, ++} PDUMP_FBC_SWIZZLE; ++ ++/*! PDump addrmode */ ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0 ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF ++ ++#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8 ++#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1U << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12 ++#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000 ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20 ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000 ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24 ++#define PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT 25 ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28 ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000 ++ ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_ZTWIDDLED (12U << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7U << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1U << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBC_LOSSY (1U << PVRSRV_PDUMP_ADDRMODE_FBCLOSSY_SHIFT) ++ ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_SURFACE (6U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_1_RESOURCE (7U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4 (8U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V4PLUS (9U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_TFBCDC (10U << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT) ++ ++/*! PDump Poll Operator */ ++typedef enum _PDUMP_POLL_OPERATOR ++{ ++ PDUMP_POLL_OPERATOR_EQUAL = 0, ++ PDUMP_POLL_OPERATOR_LESS = 1, ++ PDUMP_POLL_OPERATOR_LESSEQUAL = 2, ++ PDUMP_POLL_OPERATOR_GREATER = 3, ++ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4, ++ PDUMP_POLL_OPERATOR_NOTEQUAL = 5, ++} PDUMP_POLL_OPERATOR; ++ ++ ++#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */ ++#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */ ++ ++/*! ++ PDump MMU type ++ (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13) ++*/ ++typedef enum ++{ ++ PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1, ++ PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2, ++ PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3, ++ PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4, ++ PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5, ++ PDUMP_MMU_TYPE_VARPAGE_40BIT = 6, ++ PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7, ++ PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8, ++ PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9, ++ PDUMP_MMU_TYPE_LAST ++} PDUMP_MMU_TYPE; ++ ++/*! ++ PDump states ++ These values are used by the bridge call PVRSRVPDumpGetState ++*/ ++#define PDUMP_STATE_CAPTURE_FRAME (1U) /*!< Flag represents the PDump being in capture range or not*/ ++#define PDUMP_STATE_CONNECTED (2U) /*!< Flag represents the PDump Client App being connected on not */ ++#define PDUMP_STATE_SUSPENDED (4U) /*!< Flag represents the PDump being suspended or not */ ++#define PDUMP_STATE_CAPTURE_IN_INTERVAL (8U) /*!< Flag represents the PDump being in a capture range interval */ ++ ++/*! ++ PDump Capture modes ++ Values used with calls to PVRSRVPDumpSetDefaultCaptureParams ++*/ ++#define PDUMP_CAPMODE_UNSET 0x00000000UL ++#define PDUMP_CAPMODE_FRAMED 0x00000001UL ++#define PDUMP_CAPMODE_CONTINUOUS 0x00000002UL ++#define PDUMP_CAPMODE_BLOCKED 0x00000003UL ++ ++#define PDUMP_CAPMODE_MAX PDUMP_CAPMODE_BLOCKED ++ ++#endif /* PDUMPDEFS_H */ ++ ++/***************************************************************************** ++ End of file (pdumpdefs.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pdumpdesc.h b/drivers/gpu/drm/img-rogue/pdumpdesc.h +new file mode 100644 +index 000000000000..d159bf4ee334 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pdumpdesc.h +@@ -0,0 +1,226 @@ ++/*************************************************************************/ /*! ++@File pdumpdesc.h ++@Title PDump Descriptor format ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Describes PDump descriptors that may be passed to the ++ extraction routines (SAB). ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PDUMPDESC_H) ++#define PDUMPDESC_H ++ ++#include "pdumpdefs.h" ++ ++/* ++ * Common fields ++ */ ++#define HEADER_WORD0_TYPE_SHIFT (0) ++#define HEADER_WORD0_TYPE_CLRMSK (0xFFFFFFFFU) ++ ++#define HEADER_WORD1_SIZE_SHIFT (0) ++#define HEADER_WORD1_SIZE_CLRMSK (0x0000FFFFU) ++#define HEADER_WORD1_VERSION_SHIFT (16) ++#define HEADER_WORD1_VERSION_CLRMSK (0xFFFF0000U) ++ ++#define HEADER_WORD2_DATA_SIZE_SHIFT (0) ++#define HEADER_WORD2_DATA_SIZE_CLRMSK (0xFFFFFFFFU) ++ ++ ++/* ++ * The image type descriptor ++ */ ++ ++/* ++ * Header type (IMGBv2) - 'IMGB' in hex + VERSION 2 ++ * Header size - 64 bytes ++ */ ++#define IMAGE_HEADER_TYPE (0x42474D49) ++#define IMAGE_HEADER_SIZE (64) ++#define IMAGE_HEADER_VERSION (2) ++ ++/* ++ * Image type-specific fields ++ */ ++#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT (0) ++#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT (0) ++#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD5_FORMAT_SHIFT (0) ++#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT (0) ++#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT (0) ++#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT (0) ++#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK (0x000000FFU) ++#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) ++#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) ++#define IMAGE_HEADER_WORD8_TWIDDLING_ZTWIDDLE (12 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT) ++ ++ ++#define IMAGE_HEADER_WORD8_STRIDE_SHIFT (8) ++#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK (0x0000FF00U) ++#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) ++#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT) ++ ++#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT (16) ++#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK (0x00FF0000U) ++#define IMAGE_HEADER_WORD8_BIFTYPE_NONE (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT) ++ ++#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT (24) ++#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK (0xFF000000U) ++#define IMAGE_HEADER_WORD8_FBCTYPE_8X8 (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) ++#define IMAGE_HEADER_WORD8_FBCTYPE_16x4 (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) ++#define IMAGE_HEADER_WORD8_FBCTYPE_32x2 (3 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT) ++ ++#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT (0) ++#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK (0x000000FFU) ++#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT) ++ ++/* Align with fbcomp_export_c.h in pdump_tools branch */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT (8) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK (0x0000FF00U) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* TWIDDLED_ENHANCED */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2 (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT1 (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_0_LAYOUT2 (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V30_WITH_HEADER_REMAP */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT1 (6 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_1_LAYOUT2 (7 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) /* V31_WITH_HEADER_REMAP */ ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4 (8 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_V4_PLUS (9 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++#define IMAGE_HEADER_WORD9_FBCCOMPAT_TFBC (10 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT) ++ ++#define IMAGE_HEADER_WORD9_LOSSY_SHIFT (16) ++#define IMAGE_HEADER_WORD9_LOSSY_CLRMSK (0x00FF0000U) ++/* Non-TFBC */ ++#define IMAGE_HEADER_WORD9_LOSSY_ON (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++ ++/* TFBC */ ++#define IMAGE_HEADER_WORD9_LOSSY_75 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_37 (1 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_50 (2 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_25 (3 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++#define IMAGE_HEADER_WORD9_LOSSY_OFF (0 << IMAGE_HEADER_WORD9_LOSSY_SHIFT) ++ ++#define IMAGE_HEADER_WORD9_SWIZZLE_SHIFT (24) ++#define IMAGE_HEADER_WORD9_SWIZZLE_CLRMSK (0xFF000000U) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARGB (0x0 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ARBG (0x1 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGRB (0x2 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_AGBR (0x3 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABGR (0x4 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_ABRG (0x5 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RGBA (0x8 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_RBGA (0x9 << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GRBA (0xA << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_GBRA (0xB << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BGRA (0xC << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++#define IMAGE_HEADER_WORD9_SWIZZLE_MODE_BRGA (0xD << IMAGE_HEADER_WORD9_SWIZZLE_SHIFT) ++ ++#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT (0) ++#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT (0) ++#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT (0) ++#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT (0) ++#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK (0xFFFFFFFFU) ++ ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT (0) ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_CLRMSK (0x000000FFU) ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_50_75 (0 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT) ++#define IMAGE_HEADER_WORD14_TFBC_GROUP_25_37_50 (1 << IMAGE_HEADER_WORD14_TFBC_GROUP_SHIFT) ++ ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT (8) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_CLRMSK (0x0000FF00U) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_ALL (0 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_CORR (1 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_D_STD_ONLY (2 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++#define IMAGE_HEADER_WORD14_COMP_SCHEME_PTC_ONLY (3 << IMAGE_HEADER_WORD14_COMP_SCHEME_SHIFT) ++ ++#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT (16) ++#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_CLRMSK (0x00FF0000U) ++#define IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_EN (1 << IMAGE_HEADER_WORD14_YUV10_OPTIMAL_FMT_8_SHIFT) /* Treat YUV10 optimal formats as 8 bits */ ++ ++/* IMAGE_HEADER_WORD15_RESERVED2 */ ++ ++/* ++ * The data type descriptor ++ */ ++ ++/* ++ * Header type (IMGCv1) - 'IMGC' in hex + VERSION 0 ++ * Header size - 20 bytes (5 x 32 bit WORDS) ++ */ ++#define DATA_HEADER_TYPE (0x43474D49) ++#define DATA_HEADER_SIZE (20) ++#define DATA_HEADER_VERSION (0) ++ ++/* ++ * The IBIN type descriptor ++ */ ++ ++/* ++ * Header type (IBIN) - 'IBIN' in hex + VERSION 0 ++ * Header size - 12 bytes (3 x 32 bit WORDS) ++ */ ++#define IBIN_HEADER_TYPE (0x4e494249) ++#define IBIN_HEADER_SIZE (12) ++#define IBIN_HEADER_VERSION (0) ++ ++/* ++ * Data type-specific fields ++ */ ++#define DATA_HEADER_WORD3_ELEMENT_TYPE_SHIFT (0) ++#define DATA_HEADER_WORD3_ELEMENT_TYPE_CLRMSK (0xFFFFFFFFU) ++ ++#define DATA_HEADER_WORD4_ELEMENT_COUNT_SHIFT (0) ++#define DATA_HEADER_WORD4_ELEMENT_COUNT_CLRMSK (0xFFFFFFFFU) ++ ++#endif /* PDUMPDESC_H */ +diff --git a/drivers/gpu/drm/img-rogue/physheap.c b/drivers/gpu/drm/img-rogue/physheap.c +new file mode 100644 +index 000000000000..2155bcbe9342 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physheap.c +@@ -0,0 +1,1184 @@ ++/*************************************************************************/ /*! ++@File physheap.c ++@Title Physical heap management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Management functions for the physical heap(s). A heap contains ++ all the information required by services when using memory from ++ that heap (such as CPU <> Device physical address translation). ++ A system must register one heap but can have more then one which ++ is why a heap must register with a (system) unique ID. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++#include "img_types.h" ++#include "img_defs.h" ++#include "physheap.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "osfunc.h" ++#include "pvrsrv.h" ++#include "physmem.h" ++#include "physmem_hostmem.h" ++#include "physmem_lma.h" ++#include "physmem_osmem.h" ++ ++struct _PHYS_HEAP_ ++{ ++ /*! The type of this heap */ ++ PHYS_HEAP_TYPE eType; ++ /* Config flags */ ++ PHYS_HEAP_USAGE_FLAGS ui32UsageFlags; ++ ++ /*! Pointer to device node struct */ ++ PPVRSRV_DEVICE_NODE psDevNode; ++ /*! PDump name of this physical memory heap */ ++ IMG_CHAR *pszPDumpMemspaceName; ++ /*! Private data for the translate routines */ ++ IMG_HANDLE hPrivData; ++ /*! Function callbacks */ ++ PHYS_HEAP_FUNCTIONS *psMemFuncs; ++ ++ /*! Refcount */ ++ IMG_UINT32 ui32RefCount; ++ ++ /*! Implementation specific */ ++ PHEAP_IMPL_DATA pvImplData; ++ PHEAP_IMPL_FUNCS *psImplFuncs; ++ ++ /*! Pointer to next physical heap */ ++ struct _PHYS_HEAP_ *psNext; ++}; ++ ++static PHYS_HEAP *g_psPhysHeapList; ++static POS_LOCK g_hPhysHeapLock; ++ ++#if defined(REFCOUNT_DEBUG) ++#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \ ++ PVRSRVDebugPrintf(PVR_DBG_WARNING, \ ++ __FILE__, \ ++ __LINE__, \ ++ fmt, \ ++ __VA_ARGS__) ++#else ++#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) ++#endif ++ ++ ++ ++typedef struct PHYS_HEAP_PROPERTIES_TAG ++{ ++ PVRSRV_PHYS_HEAP eFallbackHeap; ++ IMG_BOOL bPVRLayerAcquire; ++ IMG_BOOL bUserModeAlloc; ++} PHYS_HEAP_PROPERTIES; ++ ++/* NOTE: Table entries and order must match enum PVRSRV_PHYS_HEAP to ensure ++ * correct operation of PhysHeapCreatePMR(). ++ */ ++static PHYS_HEAP_PROPERTIES gasHeapProperties[PVRSRV_PHYS_HEAP_LAST] = ++{ ++ /* eFallbackHeap, bPVRLayerAcquire, bUserModeAlloc */ ++ { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* DEFAULT */ ++ { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* GPU_LOCAL */ ++ { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* CPU_LOCAL */ ++ { PVRSRV_PHYS_HEAP_DEFAULT, IMG_TRUE, IMG_TRUE }, /* GPU_PRIVATE */ ++ { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_FALSE, IMG_FALSE }, /* FW_MAIN */ ++ { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* EXTERNAL */ ++ { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_FALSE }, /* GPU_COHERENT */ ++ { PVRSRV_PHYS_HEAP_GPU_LOCAL, IMG_TRUE, IMG_TRUE }, /* GPU_SECURE */ ++ { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CONFIG */ ++ { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_CODE */ ++ { PVRSRV_PHYS_HEAP_FW_MAIN, IMG_FALSE, IMG_FALSE }, /* FW_DATA */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP0, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP0 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP1, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP1 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP2, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP2 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP3, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP3 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP4, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP4 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP5, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP5 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP6, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP6 */ ++ { PVRSRV_PHYS_HEAP_FW_PREMAP7, IMG_FALSE, IMG_FALSE }, /* FW_PREMAP7 */ ++}; ++ ++static_assert((ARRAY_SIZE(gasHeapProperties) == PVRSRV_PHYS_HEAP_LAST), ++ "Size or order of gasHeapProperties entries incorrect for PVRSRV_PHYS_HEAP enum"); ++ ++void PVRSRVGetDevicePhysHeapCount(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 *pui32PhysHeapCount) ++{ ++ *pui32PhysHeapCount = psDevNode->ui32UserAllocHeapCount; ++} ++ ++static IMG_UINT32 PhysHeapOSGetPageShift(void) ++{ ++ return (IMG_UINT32)OSGetPageShift(); ++} ++ ++static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = ++{ ++ .pfnDestroyData = NULL, ++ .pfnGetPMRFactoryMemStats = PhysmemGetOSRamMemStats, ++ .pfnCreatePMR = PhysmemNewOSRamBackedPMR, ++ .pfnPagesAlloc = &OSPhyContigPagesAlloc, ++ .pfnPagesFree = &OSPhyContigPagesFree, ++ .pfnPagesMap = &OSPhyContigPagesMap, ++ .pfnPagesUnMap = &OSPhyContigPagesUnmap, ++ .pfnPagesClean = &OSPhyContigPagesClean, ++ .pfnGetPageShift = &PhysHeapOSGetPageShift, ++}; ++ ++/*************************************************************************/ /*! ++@Function _PhysHeapDebugRequest ++@Description This function is used to output debug information for a given ++ device's PhysHeaps. ++@Input pfnDbgRequestHandle Data required by this function that is ++ passed through the RegisterDeviceDbgRequestNotify ++ function. ++@Input ui32VerbLevel The maximum verbosity of the debug request. ++@Input pfnDumpDebugPrintf The specified print function that should be ++ used to dump any debug information ++ (see PVRSRVDebugRequest). ++@Input pvDumpDebugFile Optional file identifier to be passed to ++ the print function if required. ++@Return void ++*/ /**************************************************************************/ ++static void _PhysHeapDebugRequest(PVRSRV_DBGREQ_HANDLE pfnDbgRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ static const IMG_CHAR *const pszTypeStrings[] = { ++ "UNKNOWN", ++ "UMA", ++ "LMA", ++ "DMA", ++#if defined(SUPPORT_WRAP_EXTMEMOBJECT) ++ "WRAP" ++#endif ++ }; ++ ++ PPVRSRV_DEVICE_NODE psDeviceNode = (PPVRSRV_DEVICE_NODE)pfnDbgRequestHandle; ++ PHYS_HEAP *psPhysHeap = NULL; ++ IMG_UINT64 ui64TotalSize; ++ IMG_UINT64 ui64FreeSize; ++ IMG_UINT32 i; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE(psDeviceNode != NULL, ++ "Phys Heap debug request failed. psDeviceNode was NULL"); ++ ++ PVR_DUMPDEBUG_LOG("------[ Device ID: %d - Phys Heaps ]------", ++ psDeviceNode->sDevId.i32OsDeviceID); ++ ++ for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++) ++ { ++ psPhysHeap = psDeviceNode->papsRegisteredPhysHeaps[i]; ++ ++ if (psPhysHeap->eType >= ARRAY_SIZE(pszTypeStrings)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PhysHeap at address %p eType is not a PHYS_HEAP_TYPE", ++ psPhysHeap)); ++ break; ++ } ++ ++ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, ++ &ui64TotalSize, ++ &ui64FreeSize); ++ ++ if (psPhysHeap->eType == PHYS_HEAP_TYPE_LMA) ++ { ++ IMG_CPU_PHYADDR sCPUPAddr; ++ IMG_DEV_PHYADDR sGPUPAddr; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetCPUPAddr != NULL); ++ PVR_ASSERT(psPhysHeap->psImplFuncs->pfnGetDevPAddr != NULL); ++ ++ eError = psPhysHeap->psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, ++ &sCPUPAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "pfnGetCPUPAddr"); ++ sCPUPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(IMG_UINT64_MAX); ++ } ++ ++ eError = psPhysHeap->psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, ++ &sGPUPAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "pfnGetDevPAddr"); ++ sGPUPAddr.uiAddr = IMG_UINT64_MAX; ++ } ++ ++ PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, " ++ ++ "CPU PA Base: " CPUPHYADDR_UINT_FMTSPEC", " ++ "GPU PA Base: 0x%08"IMG_UINT64_FMTSPECx", " ++ "Usage Flags: 0x%08x, Refs: %d, " ++ "Free Size: %"IMG_UINT64_FMTSPEC", " ++ "Total Size: %"IMG_UINT64_FMTSPEC, ++ psPhysHeap, ++ psPhysHeap->pszPDumpMemspaceName, ++ pszTypeStrings[psPhysHeap->eType], ++ CPUPHYADDR_FMTARG(sCPUPAddr.uiAddr), ++ sGPUPAddr.uiAddr, ++ psPhysHeap->ui32UsageFlags, ++ psPhysHeap->ui32RefCount, ++ ui64FreeSize, ++ ui64TotalSize); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("0x%p -> Name: %s, Type: %s, " ++ "Usage Flags: 0x%08x, Refs: %d, " ++ "Free Size: %"IMG_UINT64_FMTSPEC", " ++ "Total Size: %"IMG_UINT64_FMTSPEC, ++ psPhysHeap, ++ psPhysHeap->pszPDumpMemspaceName, ++ pszTypeStrings[psPhysHeap->eType], ++ psPhysHeap->ui32UsageFlags, ++ psPhysHeap->ui32RefCount, ++ ui64FreeSize, ++ ui64TotalSize); ++ } ++ } ++} ++ ++PVRSRV_ERROR ++PhysHeapCreateHeapFromConfig(PVRSRV_DEVICE_NODE *psDevNode, ++ PHYS_HEAP_CONFIG *psConfig, ++ PHYS_HEAP **ppsPhysHeap) ++{ ++ PVRSRV_ERROR eResult; ++ ++ if (psConfig->eType == PHYS_HEAP_TYPE_UMA ++#if defined(SUPPORT_WRAP_EXTMEMOBJECT) ++ || psConfig->eType == PHYS_HEAP_TYPE_WRAP ++#endif ++ ) ++ { ++ eResult = PhysHeapCreate(psDevNode, psConfig, NULL, ++ &_sPHEAPImplFuncs, ppsPhysHeap); ++ } ++ else if (psConfig->eType == PHYS_HEAP_TYPE_LMA || ++ psConfig->eType == PHYS_HEAP_TYPE_DMA) ++ { ++ eResult = PhysmemCreateHeapLMA(psDevNode, psConfig, "GPU LMA (Sys)", ppsPhysHeap); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s Invalid phys heap type: %d", ++ __func__, psConfig->eType)); ++ eResult = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return eResult; ++} ++ ++PVRSRV_ERROR ++PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP_CONFIG *pasConfigs, ++ IMG_UINT32 ui32NumConfigs) ++{ ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError; ++ ++ /* Register the physical memory heaps */ ++ psDevNode->papsRegisteredPhysHeaps = ++ OSAllocZMem(sizeof(*psDevNode->papsRegisteredPhysHeaps) * ui32NumConfigs); ++ PVR_LOG_RETURN_IF_NOMEM(psDevNode->papsRegisteredPhysHeaps, "OSAllocZMem"); ++ ++ psDevNode->ui32RegisteredPhysHeaps = 0; ++ ++ for (i = 0; i < ui32NumConfigs; i++) ++ { ++ eError = PhysHeapCreateHeapFromConfig(psDevNode, ++ pasConfigs + i, ++ psDevNode->papsRegisteredPhysHeaps + i); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemCreateHeap"); ++ ++ psDevNode->ui32RegisteredPhysHeaps++; ++ } ++ ++#if defined(SUPPORT_PHYSMEM_TEST) ++ /* For a temporary device node there will never be a debug dump ++ * request targeting it */ ++ if (psDevNode->hDebugTable != NULL) ++#endif ++ { ++ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hPhysHeapDbgReqNotify, ++ psDevNode, ++ _PhysHeapDebugRequest, ++ DEBUG_REQUEST_SYS, ++ psDevNode); ++ ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify"); ++ } ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP_CONFIG *psConfig, ++ PHEAP_IMPL_DATA pvImplData, ++ PHEAP_IMPL_FUNCS *psImplFuncs, ++ PHYS_HEAP **ppsPhysHeap) ++{ ++ PHYS_HEAP *psNew; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); ++ ++ if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs != NULL, "psImplFuncs"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psImplFuncs->pfnCreatePMR != NULL, "psImplFuncs->pfnCreatePMR"); ++ ++ psNew = OSAllocMem(sizeof(PHYS_HEAP)); ++ PVR_RETURN_IF_NOMEM(psNew); ++ psNew->psDevNode = psDevNode; ++ psNew->eType = psConfig->eType; ++ psNew->psMemFuncs = psConfig->psMemFuncs; ++ psNew->hPrivData = psConfig->hPrivData; ++ psNew->ui32RefCount = 0; ++ psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName; ++ psNew->ui32UsageFlags = psConfig->ui32UsageFlags; ++ ++ psNew->pvImplData = pvImplData; ++ psNew->psImplFuncs = psImplFuncs; ++ ++ psNew->psNext = g_psPhysHeapList; ++ g_psPhysHeapList = psNew; ++ ++ *ppsPhysHeap = psNew; ++ ++ PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap); ++} ++ ++void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode) ++{ ++ IMG_UINT32 i; ++ ++ if (psDevNode->hPhysHeapDbgReqNotify) ++ { ++ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hPhysHeapDbgReqNotify); ++ } ++ ++ /* Unregister heaps */ ++ for (i = 0; i < psDevNode->ui32RegisteredPhysHeaps; i++) ++ { ++ PhysHeapDestroy(psDevNode->papsRegisteredPhysHeaps[i]); ++ } ++ ++ OSFreeMem(psDevNode->papsRegisteredPhysHeaps); ++} ++ ++void PhysHeapDestroy(PHYS_HEAP *psPhysHeap) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ ++ PVR_DPF_ENTERED1(psPhysHeap); ++ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) ++#endif ++ { ++ PVR_ASSERT(psPhysHeap->ui32RefCount == 0); ++ } ++ ++ if (g_psPhysHeapList == psPhysHeap) ++ { ++ g_psPhysHeapList = psPhysHeap->psNext; ++ } ++ else ++ { ++ PHYS_HEAP *psTmp = g_psPhysHeapList; ++ ++ while (psTmp->psNext != psPhysHeap) ++ { ++ psTmp = psTmp->psNext; ++ } ++ psTmp->psNext = psPhysHeap->psNext; ++ } ++ ++ if (psImplFuncs->pfnDestroyData != NULL) ++ { ++ psImplFuncs->pfnDestroyData(psPhysHeap->pvImplData); ++ } ++ ++ OSFreeMem(psPhysHeap); ++ ++ PVR_DPF_RETURN; ++} ++ ++PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psPhysHeap != NULL, "psPhysHeap"); ++ ++ psPhysHeap->ui32RefCount++; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag, ++ PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP **ppsPhysHeap) ++{ ++ PHYS_HEAP *psNode = g_psPhysHeapList; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32UsageFlag != 0, "ui32UsageFlag"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); ++ ++ PVR_DPF_ENTERED1(ui32UsageFlag); ++ ++ OSLockAcquire(g_hPhysHeapLock); ++ ++ while (psNode) ++ { ++ if (psNode->psDevNode != psDevNode) ++ { ++ psNode = psNode->psNext; ++ continue; ++ } ++ if (BITMASK_ANY(psNode->ui32UsageFlags, ui32UsageFlag)) ++ { ++ break; ++ } ++ psNode = psNode->psNext; ++ } ++ ++ if (psNode == NULL) ++ { ++ eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; ++ } ++ else ++ { ++ psNode->ui32RefCount++; ++ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", ++ __func__, psNode, psNode->ui32RefCount); ++ } ++ ++ OSLockRelease(g_hPhysHeapLock); ++ ++ *ppsPhysHeap = psNode; ++ PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); ++} ++ ++static PHYS_HEAP * _PhysHeapFindHeap(PVRSRV_PHYS_HEAP ePhysHeap, ++ PPVRSRV_DEVICE_NODE psDevNode) ++{ ++ PHYS_HEAP *psPhysHeapNode = g_psPhysHeapList; ++ PVRSRV_PHYS_HEAP eFallback; ++ ++ if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) ++ { ++ ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; ++ } ++ ++ while (psPhysHeapNode) ++ { ++ if ((psPhysHeapNode->psDevNode == psDevNode) && ++ BIT_ISSET(psPhysHeapNode->ui32UsageFlags, ePhysHeap)) ++ { ++ return psPhysHeapNode; ++ } ++ ++ psPhysHeapNode = psPhysHeapNode->psNext; ++ } ++ ++ eFallback = gasHeapProperties[ePhysHeap].eFallbackHeap; ++ ++ if (ePhysHeap == eFallback) ++ { ++ return NULL; ++ } ++ else ++ { ++ return _PhysHeapFindHeap(eFallback, psDevNode); ++ } ++} ++ ++PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap, ++ PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP **ppsPhysHeap) ++{ ++ PHYS_HEAP *psPhysHeap; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap != PVRSRV_PHYS_HEAP_DEFAULT, "eDevPhysHeap"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(eDevPhysHeap < PVRSRV_PHYS_HEAP_LAST, "eDevPhysHeap"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); ++ ++ PVR_DPF_ENTERED1(ui32Flags); ++ ++ OSLockAcquire(g_hPhysHeapLock); ++ ++ psPhysHeap = _PhysHeapFindHeap(eDevPhysHeap, psDevNode); ++ ++ if (psPhysHeap != NULL) ++ { ++ psPhysHeap->ui32RefCount++; ++ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", ++ __func__, psPhysHeap, psPhysHeap->ui32RefCount); ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID; ++ } ++ ++ OSLockRelease(g_hPhysHeapLock); ++ ++ *ppsPhysHeap = psPhysHeap; ++ PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap); ++} ++ ++void PhysHeapRelease(PHYS_HEAP *psPhysHeap) ++{ ++ PVR_DPF_ENTERED1(psPhysHeap); ++ ++ OSLockAcquire(g_hPhysHeapLock); ++ psPhysHeap->ui32RefCount--; ++ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", ++ __func__, psPhysHeap, psPhysHeap->ui32RefCount); ++ OSLockRelease(g_hPhysHeapLock); ++ ++ PVR_DPF_RETURN; ++} ++ ++PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap) ++{ ++ return psPhysHeap->pvImplData; ++} ++ ++PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap) ++{ ++ PVR_ASSERT(psPhysHeap->eType != PHYS_HEAP_TYPE_UNKNOWN); ++ return psPhysHeap->eType; ++} ++ ++PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap) ++{ ++ return psPhysHeap->ui32UsageFlags; ++} ++ ++IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode) ++{ ++ PHYS_HEAP *psDefaultHeap; ++ IMG_BOOL bDefaultHeapFound; ++ PhysHeapAcquireByUsage(1<<(psDevNode->psDevConfig->eDefaultHeap), psDevNode, &psDefaultHeap); ++ if (psDefaultHeap == NULL) ++ { ++ bDefaultHeapFound = IMG_FALSE; ++ } ++ else ++ { ++ PhysHeapRelease(psDefaultHeap); ++ bDefaultHeapFound = IMG_TRUE; ++ } ++ return bDefaultHeapFound; ++} ++ ++ ++/* ++ * This function will set the psDevPAddr to whatever the system layer ++ * has set it for the referenced region. ++ * It will not fail if the psDevPAddr is invalid. ++ */ ++PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnGetDevPAddr != NULL) ++ { ++ eResult = psImplFuncs->pfnGetDevPAddr(psPhysHeap->pvImplData, ++ psDevPAddr); ++ } ++ ++ return eResult; ++} ++ ++/* ++ * This function will set the psCpuPAddr to whatever the system layer ++ * has set it for the referenced region. ++ * It will not fail if the psCpuPAddr is invalid. ++ */ ++PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnGetCPUPAddr != NULL) ++ { ++ eResult = psImplFuncs->pfnGetCPUPAddr(psPhysHeap->pvImplData, ++ psCpuPAddr); ++ } ++ ++ return eResult; ++} ++ ++PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, ++ IMG_UINT64 *puiSize) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnGetSize != NULL) ++ { ++ eResult = psImplFuncs->pfnGetSize(psPhysHeap->pvImplData, ++ puiSize); ++ } ++ ++ return eResult; ++} ++ ++PVRSRV_ERROR ++PhysHeapGetMemInfo(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats) ++{ ++ IMG_UINT32 i = 0; ++ PHYS_HEAP *psPhysHeap; ++ ++ PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST); ++ ++ for (i = 0; i < ui32PhysHeapCount; i++) ++ { ++ if (paePhysHeapID[i] >= PVRSRV_PHYS_HEAP_LAST) ++ { ++ return PVRSRV_ERROR_PHYSHEAP_ID_INVALID; ++ } ++ ++ if (paePhysHeapID[i] == PVRSRV_PHYS_HEAP_DEFAULT) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psPhysHeap = _PhysHeapFindHeap(paePhysHeapID[i], psDevNode); ++ ++ paPhysHeapMemStats[i].ui32PhysHeapFlags = 0; ++ ++ if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i]) ++ && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats) ++ { ++ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, ++ &paPhysHeapMemStats[i].ui64TotalSize, ++ &paPhysHeapMemStats[i].ui64FreeSize); ++ paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap); ++ ++ if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap) ++ { ++ paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT; ++ } ++ } ++ else ++ { ++ paPhysHeapMemStats[i].ui64TotalSize = 0; ++ paPhysHeapMemStats[i].ui64FreeSize = 0; ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PhysHeapGetMemInfoPkd(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS_PKD_PTR paPhysHeapMemStats) ++{ ++ IMG_UINT32 i = 0; ++ PHYS_HEAP *psPhysHeap; ++ ++ PVR_ASSERT(ui32PhysHeapCount <= PVRSRV_PHYS_HEAP_LAST); ++ ++ for (i = 0; i < ui32PhysHeapCount; i++) ++ { ++ if (paePhysHeapID[i] >= PVRSRV_PHYS_HEAP_LAST) ++ { ++ return PVRSRV_ERROR_PHYSHEAP_ID_INVALID; ++ } ++ ++ if (paePhysHeapID[i] == PVRSRV_PHYS_HEAP_DEFAULT) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psPhysHeap = _PhysHeapFindHeap(paePhysHeapID[i], psDevNode); ++ ++ paPhysHeapMemStats[i].ui32PhysHeapFlags = 0; ++ ++ if (psPhysHeap && PhysHeapUserModeAlloc(paePhysHeapID[i]) ++ && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats) ++ { ++ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, ++ &paPhysHeapMemStats[i].ui64TotalSize, ++ &paPhysHeapMemStats[i].ui64FreeSize); ++ paPhysHeapMemStats[i].ui32PhysHeapFlags |= PhysHeapGetType(psPhysHeap); ++ ++ if (paePhysHeapID[i] == psDevNode->psDevConfig->eDefaultHeap) ++ { ++ paPhysHeapMemStats[i].ui32PhysHeapFlags |= PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT; ++ } ++ } ++ else ++ { ++ paPhysHeapMemStats[i].ui64TotalSize = 0; ++ paPhysHeapMemStats[i].ui64FreeSize = 0; ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap, IMG_UINT64 *pui64TotalSize, IMG_UINT64 *pui64FreeSize) ++{ ++ if (psPhysHeap && psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats) ++ { ++ psPhysHeap->psImplFuncs->pfnGetPMRFactoryMemStats(psPhysHeap->pvImplData, ++ pui64TotalSize, ++ pui64FreeSize); ++ } ++ else ++ { ++ *pui64TotalSize = *pui64FreeSize = 0; ++ } ++} ++ ++void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData, ++ ui32NumOfAddr, ++ psDevPAddr, ++ psCpuPAddr); ++} ++ ++void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData, ++ ui32NumOfAddr, ++ psCpuPAddr, ++ psDevPAddr); ++} ++ ++IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap) ++{ ++ return psPhysHeap->pszPDumpMemspaceName; ++} ++ ++PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, ++ struct _CONNECTION_DATA_ *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ ++ return psImplFuncs->pfnCreatePMR(psPhysHeap, ++ psConnection, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2PageSize, ++ uiFlags, ++ pszAnnotation, ++ uiPid, ++ ppsPMRPtr, ++ ui32PDumpFlags); ++} ++ ++PVRSRV_ERROR PhysHeapInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ g_psPhysHeapList = NULL; ++ ++ eError = OSLockCreate(&g_hPhysHeapLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); ++ ++ return PVRSRV_OK; ++} ++ ++void PhysHeapDeinit(void) ++{ ++ PVR_ASSERT(g_psPhysHeapList == NULL); ++ ++ OSLockDestroy(g_hPhysHeapLock); ++} ++ ++PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap) ++{ ++ PVR_ASSERT(psPhysHeap != NULL); ++ ++ return psPhysHeap->psDevNode; ++} ++ ++IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap) ++{ ++ PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); ++ ++ return gasHeapProperties[ePhysHeap].bPVRLayerAcquire; ++} ++ ++IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap) ++{ ++ PVR_ASSERT(ePhysHeap < PVRSRV_PHYS_HEAP_LAST); ++ ++ return gasHeapProperties[ePhysHeap].bUserModeAlloc; ++} ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++/*************************************************************************/ /*! ++@Function CreateGpuVirtValArenas ++@Description Create virtualization validation arenas ++@Input psDeviceNode The device node ++@Return PVRSRV_ERROR PVRSRV_OK on success ++*/ /**************************************************************************/ ++static PVRSRV_ERROR CreateGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ /* aui64OSidMin and aui64OSidMax are what we program into HW registers. ++ The values are different from base/size of arenas. */ ++ IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; ++ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]; ++ PHYS_HEAP_CONFIG *psGPULocalHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_GPU_LOCAL); ++ PHYS_HEAP_CONFIG *psDisplayHeap = FindPhysHeapConfig(psDeviceNode->psDevConfig, PHYS_HEAP_USAGE_DISPLAY); ++ IMG_UINT64 uBase; ++ IMG_UINT64 uSize; ++ IMG_UINT64 uBaseShared; ++ IMG_UINT64 uSizeShared; ++ IMG_UINT64 uSizeSharedReg; ++ IMG_UINT32 i; ++ ++ /* Shared region is fixed size, the remaining space is divided amongst OSes */ ++ uSizeShared = PVR_ALIGN(GPUVIRT_SIZEOF_SHARED, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ++ uSize = psGPULocalHeap->uiSize - uSizeShared; ++ uSize /= GPUVIRT_VALIDATION_NUM_OS; ++ uSize = uSize & ~((IMG_UINT64)OSGetPageSize() - 1ULL); /* Align, round down */ ++ ++ uBase = psGPULocalHeap->sCardBase.uiAddr; ++ uBaseShared = uBase + uSize * GPUVIRT_VALIDATION_NUM_OS; ++ uSizeShared = psGPULocalHeap->uiSize - (uBaseShared - uBase); ++ ++ PVR_LOG(("GPUVIRT_VALIDATION split GPU_LOCAL base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", ++ psGPULocalHeap->sCardBase.uiAddr, ++ psGPULocalHeap->uiSize)); ++ ++ /* If a display heap config exists, include the display heap in the non-secure regions */ ++ if (psDisplayHeap) ++ { ++ /* Only works when DISPLAY heap follows GPU_LOCAL heap. */ ++ PVR_LOG(("GPUVIRT_VALIDATION include DISPLAY in shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", ++ psDisplayHeap->sCardBase.uiAddr, ++ psDisplayHeap->uiSize)); ++ ++ uSizeSharedReg = uSizeShared + psDisplayHeap->uiSize; ++ } ++ else ++ { ++ uSizeSharedReg = uSizeShared; ++ } ++ ++ PVR_ASSERT(uSize >= GPUVIRT_MIN_SIZE); ++ PVR_ASSERT(uSizeSharedReg >= GPUVIRT_SIZEOF_SHARED); ++ ++ for (i = 0; i < GPUVIRT_VALIDATION_NUM_OS; i++) ++ { ++ IMG_CHAR aszOSRAName[RA_MAX_NAME_LENGTH]; ++ ++ PVR_LOG(("GPUVIRT_VALIDATION create arena OS: %d, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", i, uBase, uSize)); ++ ++ OSSNPrintf(aszOSRAName, RA_MAX_NAME_LENGTH, "GPUVIRT_OS%d", i); ++ ++ psDeviceNode->psOSidSubArena[i] = RA_Create_With_Span(aszOSRAName, ++ OSGetPageShift(), ++ 0, ++ uBase, ++ uSize); ++ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSidSubArena[i], "RA_Create_With_Span"); ++ ++ aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i] = uBase; ++ ++ if (i == 0) ++ { ++ /* OSid0 has access to all regions */ ++ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = psGPULocalHeap->uiSize - 1ULL; ++ } ++ else ++ { ++ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i] = uBase + uSize - 1ULL; ++ } ++ ++ /* uSizeSharedReg includes display heap */ ++ aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared; ++ aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i] = uBaseShared + uSizeSharedReg - 1ULL; ++ ++ PVR_LOG(("GPUVIRT_VALIDATION HW reg regions %d: min[0]: 0x%" IMG_UINT64_FMTSPECX ", max[0]: 0x%" IMG_UINT64_FMTSPECX ", min[1]: 0x%" IMG_UINT64_FMTSPECX ", max[1]: 0x%" IMG_UINT64_FMTSPECX ",", ++ i, ++ aui64OSidMin[GPUVIRT_VAL_REGION_SECURE][i], ++ aui64OSidMax[GPUVIRT_VAL_REGION_SECURE][i], ++ aui64OSidMin[GPUVIRT_VAL_REGION_SHARED][i], ++ aui64OSidMax[GPUVIRT_VAL_REGION_SHARED][i])); ++ uBase += uSize; ++ } ++ ++ PVR_LOG(("GPUVIRT_VALIDATION create arena Shared, base: 0x%" IMG_UINT64_FMTSPECX ", size: 0x%" IMG_UINT64_FMTSPECX ".", uBaseShared, uSizeShared)); ++ ++ PVR_ASSERT(uSizeShared >= GPUVIRT_SIZEOF_SHARED); ++ ++ /* uSizeShared does not include display heap */ ++ psDeviceNode->psOSSharedArena = RA_Create_With_Span("GPUVIRT_SHARED", ++ OSGetPageShift(), ++ 0, ++ uBaseShared, ++ uSizeShared); ++ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->psOSSharedArena, "RA_Create_With_Span"); ++ ++ if (psDeviceNode->psDevConfig->pfnSysDevVirtInit != NULL) ++ { ++ psDeviceNode->psDevConfig->pfnSysDevVirtInit(aui64OSidMin, aui64OSidMax); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Counter-part to CreateGpuVirtValArenas. ++ */ ++static void DestroyGpuVirtValArenas(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ IMG_UINT32 uiCounter = 0; ++ ++ /* ++ * NOTE: We overload psOSidSubArena[0] into the psLocalMemArena so we must ++ * not free it here as it gets cleared later. ++ */ ++ for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++) ++ { ++ if (psDeviceNode->psOSidSubArena[uiCounter] == NULL) ++ { ++ continue; ++ } ++ RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]); ++ } ++ ++ if (psDeviceNode->psOSSharedArena != NULL) ++ { ++ RA_Delete(psDeviceNode->psOSSharedArena); ++ } ++} ++#endif ++ ++PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ PHYS_HEAP_TYPE eHeapType; ++ PVRSRV_ERROR eError; ++ ++ eError = PhysHeapAcquireByDevPhysHeap(psDeviceNode->psDevConfig->eDefaultHeap, ++ psDeviceNode, &psDeviceNode->psMMUPhysHeap); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit); ++ ++ eHeapType = PhysHeapGetType(psDeviceNode->psMMUPhysHeap); ++ ++ if (eHeapType == PHYS_HEAP_TYPE_UMA) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses OS System memory (UMA)", __func__)); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Virtualisation Validation builds are currently only" ++ " supported on systems with local memory (LMA).", __func__)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ goto ErrorDeinit; ++#endif ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: GPU physical heap uses local memory managed by the driver (LMA)", __func__)); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ eError = CreateGpuVirtValArenas(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "CreateGpuVirtValArenas", ErrorDeinit); ++#endif ++ } ++ ++ return PVRSRV_OK; ++ErrorDeinit: ++ return eError; ++} ++ ++void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* Remove local LMA subarenas */ ++ DestroyGpuVirtValArenas(psDeviceNode); ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++ ++ if (psDeviceNode->psMMUPhysHeap != NULL) ++ { ++ PhysHeapRelease(psDeviceNode->psMMUPhysHeap); ++ psDeviceNode->psMMUPhysHeap = NULL; ++ } ++} ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap, size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 ui32OSid, IMG_PID uiPid) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnPagesAllocGPV != NULL) ++ { ++ eResult = psImplFuncs->pfnPagesAllocGPV(psPhysHeap, ++ uiSize, psMemHandle, psDevPAddr, ui32OSid, uiPid); ++ } ++ ++ return eResult; ++} ++#endif ++ ++PVRSRV_ERROR PhysHeapPagesAlloc(PHYS_HEAP *psPhysHeap, size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnPagesAlloc != NULL) ++ { ++ eResult = psImplFuncs->pfnPagesAlloc(psPhysHeap, ++ uiSize, psMemHandle, psDevPAddr, uiPid); ++ } ++ ++ return eResult; ++} ++ ++void PhysHeapPagesFree(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ ++ PVR_ASSERT(psImplFuncs->pfnPagesFree != NULL); ++ ++ if (psImplFuncs->pfnPagesFree != NULL) ++ { ++ psImplFuncs->pfnPagesFree(psPhysHeap, ++ psMemHandle); ++ } ++} ++ ++PVRSRV_ERROR PhysHeapPagesMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, ++ void **pvPtr) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnPagesMap != NULL) ++ { ++ eResult = psImplFuncs->pfnPagesMap(psPhysHeap, ++ pshMemHandle, uiSize, psDevPAddr, pvPtr); ++ } ++ ++ return eResult; ++} ++ ++void PhysHeapPagesUnMap(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle, void *pvPtr) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ ++ PVR_ASSERT(psImplFuncs->pfnPagesUnMap != NULL); ++ ++ if (psImplFuncs->pfnPagesUnMap != NULL) ++ { ++ psImplFuncs->pfnPagesUnMap(psPhysHeap, ++ psMemHandle, pvPtr); ++ } ++} ++ ++PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 uiLength) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ PVRSRV_ERROR eResult = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ ++ if (psImplFuncs->pfnPagesClean != NULL) ++ { ++ eResult = psImplFuncs->pfnPagesClean(psPhysHeap, ++ pshMemHandle, uiOffset, uiLength); ++ } ++ ++ return eResult; ++} ++ ++IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap) ++{ ++ PHEAP_IMPL_FUNCS *psImplFuncs = psPhysHeap->psImplFuncs; ++ IMG_UINT32 ui32PageShift = 0; ++ ++ PVR_ASSERT(psImplFuncs->pfnGetPageShift != NULL); ++ ++ if (psImplFuncs->pfnGetPageShift != NULL) ++ { ++ ui32PageShift = psImplFuncs->pfnGetPageShift(); ++ } ++ ++ return ui32PageShift; ++} +diff --git a/drivers/gpu/drm/img-rogue/physheap.h b/drivers/gpu/drm/img-rogue/physheap.h +new file mode 100644 +index 000000000000..060c5cd0e4df +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physheap.h +@@ -0,0 +1,497 @@ ++/*************************************************************************/ /*! ++@File ++@Title Physical heap management header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the interface for the physical heap management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++#include "devicemem_typedefs.h" ++#include "opaque_types.h" ++#include "pmr_impl.h" ++#include "physheap_config.h" ++ ++#ifndef PHYSHEAP_H ++#define PHYSHEAP_H ++ ++typedef struct _PHYS_HEAP_ PHYS_HEAP; ++#define INVALID_PHYS_HEAP 0xDEADDEAD ++ ++struct _CONNECTION_DATA_; ++ ++typedef struct _PG_HANDLE_ ++{ ++ union ++ { ++ void *pvHandle; ++ IMG_UINT64 ui64Handle; ++ }u; ++ /* The allocation order is log2 value of the number of pages to allocate. ++ * As such this is a correspondingly small value. E.g, for order 4 we ++ * are talking 2^4 * PAGE_SIZE contiguous allocation. ++ * DevPxAlloc API does not need to support orders higher than 4. ++ */ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ IMG_BYTE uiOrder; /* Order of the corresponding allocation */ ++ IMG_BYTE uiOSid; /* OSid to use for allocation arena. ++ * Connection-specific. */ ++ IMG_BYTE uiPad1, ++ uiPad2; /* Spare */ ++#else ++ IMG_BYTE uiOrder; /* Order of the corresponding allocation */ ++ IMG_BYTE uiPad1, ++ uiPad2, ++ uiPad3; /* Spare */ ++#endif ++} PG_HANDLE; ++ ++/*! Pointer to private implementation specific data */ ++typedef void *PHEAP_IMPL_DATA; ++ ++/*************************************************************************/ /*! ++@Function Callback function PFN_DESTROY_DATA ++@Description Destroy private implementation specific data. ++@Input PHEAP_IMPL_DATA Pointer to implementation data. ++*/ /**************************************************************************/ ++typedef void (*PFN_DESTROY_DATA)(PHEAP_IMPL_DATA); ++/*************************************************************************/ /*! ++@Function Callback function PFN_GET_DEV_PADDR ++@Description Get heap device physical address. ++@Input PHEAP_IMPL_DATA Pointer to implementation data. ++@Output IMG_DEV_PHYADDR Device physical address. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_GET_DEV_PADDR)(PHEAP_IMPL_DATA, IMG_DEV_PHYADDR*); ++/*************************************************************************/ /*! ++@Function Callback function PFN_GET_CPU_PADDR ++@Description Get heap CPU physical address. ++@Input PHEAP_IMPL_DATA Pointer to implementation data. ++@Output IMG_CPU_PHYADDR CPU physical address. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_GET_CPU_PADDR)(PHEAP_IMPL_DATA, IMG_CPU_PHYADDR*); ++/*************************************************************************/ /*! ++@Function Callback function PFN_GET_SIZE ++@Description Get size of heap. ++@Input PHEAP_IMPL_DATA Pointer to implementation data. ++@Output IMG_UINT64 Size of heap. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_GET_SIZE)(PHEAP_IMPL_DATA, IMG_UINT64*); ++/*************************************************************************/ /*! ++@Function Callback function PFN_GET_PAGE_SHIFT ++@Description Get heap log2 page shift. ++@Return IMG_UINT32 Log2 page shift ++*/ /**************************************************************************/ ++typedef IMG_UINT32 (*PFN_GET_PAGE_SHIFT)(void); ++ ++/*************************************************************************/ /*! ++@Function Callback function PFN_GET_MEM_STATS ++@Description Get total and free memory size of the physical heap managed by ++ the PMR Factory. ++@Input PHEAP_IMPL_DATA Pointer to implementation data. ++@Output IMG_UINT64 total Size of heap. ++@Output IMG_UINT64 free Size available in a heap. ++@Return none ++*/ /**************************************************************************/ ++typedef void (*PFN_GET_MEM_STATS)(PHEAP_IMPL_DATA, IMG_UINT64 *, IMG_UINT64 *); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC_GPV)(PHYS_HEAP *psPhysHeap, size_t uiSize, ++ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 ui32OSid, IMG_PID uiPid); ++#endif ++typedef PVRSRV_ERROR (*PFN_PAGES_ALLOC)(PHYS_HEAP *psPhysHeap, size_t uiSize, ++ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid); ++ ++typedef void (*PFN_PAGES_FREE)(PHYS_HEAP *psPhysHeap, PG_HANDLE *psMemHandle); ++ ++typedef PVRSRV_ERROR (*PFN_PAGES_MAP)(PHYS_HEAP *psPhysHeap, PG_HANDLE *pshMemHandle, ++ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, ++ void **pvPtr); ++ ++typedef void (*PFN_PAGES_UNMAP)(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, void *pvPtr); ++ ++typedef PVRSRV_ERROR (*PFN_PAGES_CLEAN)(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *pshMemHandle, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 uiLength); ++ ++/*************************************************************************/ /*! ++@Function Callback function PFN_CREATE_PMR ++@Description Create a PMR physical allocation and back with RAM on creation, ++ if required. The RAM page comes either directly from ++ the Phys Heap's associated pool of memory or from an OS API. ++@Input psPhysHeap Pointer to Phys Heap. ++@Input psConnection Pointer to device connection. ++@Input uiSize Allocation size. ++@Input uiChunkSize Chunk size. ++@Input ui32NumPhysChunks Physical chunk count. ++@Input ui32NumVirtChunks Virtual chunk count. ++@Input pui32MappingTable Mapping Table. ++@Input uiLog2PageSize Page size. ++@Input uiFlags Memalloc flags. ++@Input pszAnnotation Annotation. ++@Input uiPid Process ID. ++@Output ppsPMRPtr Pointer to PMR. ++@Input ui32PDumpFlag PDump flags. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_CREATE_PMR)(PHYS_HEAP *psPhysHeap, ++ struct _CONNECTION_DATA_ *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*! Implementation specific function table */ ++typedef struct PHEAP_IMPL_FUNCS_TAG ++{ ++ PFN_DESTROY_DATA pfnDestroyData; ++ PFN_GET_DEV_PADDR pfnGetDevPAddr; ++ PFN_GET_CPU_PADDR pfnGetCPUPAddr; ++ PFN_GET_SIZE pfnGetSize; ++ PFN_GET_PAGE_SHIFT pfnGetPageShift; ++ PFN_GET_MEM_STATS pfnGetPMRFactoryMemStats; ++ PFN_CREATE_PMR pfnCreatePMR; ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ PFN_PAGES_ALLOC_GPV pfnPagesAllocGPV; ++#endif ++ PFN_PAGES_ALLOC pfnPagesAlloc; ++ PFN_PAGES_FREE pfnPagesFree; ++ PFN_PAGES_MAP pfnPagesMap; ++ PFN_PAGES_UNMAP pfnPagesUnMap; ++ PFN_PAGES_CLEAN pfnPagesClean; ++} PHEAP_IMPL_FUNCS; ++ ++/*************************************************************************/ /*! ++@Function PhysHeapCreateDeviceHeapsFromConfigs ++@Description Create new heaps for a device from configs. ++@Input psDevNode Pointer to device node struct ++@Input pasConfigs Pointer to array of Heap configurations. ++@Input ui32NumConfigs Number of configurations in array. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysHeapCreateDeviceHeapsFromConfigs(PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP_CONFIG *pasConfigs, ++ IMG_UINT32 ui32NumConfigs); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapCreateHeapFromConfig ++@Description Create a new heap. Calls specific heap API depending ++ on heap type. ++@Input psDevNode Pointer to device node struct. ++@Input psConfig Heap configuration. ++@Output ppsPhysHeap Pointer to the created heap. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysHeapCreateHeapFromConfig(PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP_CONFIG *psConfig, ++ PHYS_HEAP **ppsPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapCreate ++@Description Create a new heap. Allocated and stored internally. ++ Destroy with PhysHeapDestroy when no longer required. ++@Input psDevNode Pointer to device node struct ++@Input psConfig Heap configuration. ++@Input pvImplData Implementation specific data. Can be NULL. ++@Input psImplFuncs Implementation specific function table. Must be ++ a valid pointer. ++@Output ppsPhysHeap Pointer to the created heap. Must be a valid ++ pointer. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR PhysHeapCreate(PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP_CONFIG *psConfig, ++ PHEAP_IMPL_DATA pvImplData, ++ PHEAP_IMPL_FUNCS *psImplFuncs, ++ PHYS_HEAP **ppsPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapDestroyDeviceHeaps ++@Description Destroys all heaps referenced by a device. ++@Input psDevNode Pointer to a device node struct. ++@Return void ++*/ /**************************************************************************/ ++void PhysHeapDestroyDeviceHeaps(PPVRSRV_DEVICE_NODE psDevNode); ++ ++void PhysHeapDestroy(PHYS_HEAP *psPhysHeap); ++ ++PVRSRV_ERROR PhysHeapAcquire(PHYS_HEAP *psPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapAcquireByUsage ++@Description Acquire PhysHeap by usage flag. ++@Input ui32UsageFlag PhysHeap usage flag ++@Input psDevNode Pointer to device node struct ++@Output ppsPhysHeap PhysHeap if found. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR PhysHeapAcquireByUsage(PHYS_HEAP_USAGE_FLAGS ui32UsageFlag, ++ PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP **ppsPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapAcquireByDevPhysHeap ++@Description Acquire PhysHeap by DevPhysHeap. ++@Input eDevPhysHeap Device Phys Heap. ++@Input psDevNode Pointer to device node struct ++@Output ppsPhysHeap PhysHeap if found. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP eDevPhysHeap, ++ PPVRSRV_DEVICE_NODE psDevNode, ++ PHYS_HEAP **ppsPhysHeap); ++ ++void PhysHeapRelease(PHYS_HEAP *psPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapGetImplData ++@Description Get physical heap implementation specific data. ++@Input psPhysHeap Pointer to physical heap. ++@Input psConfig Heap configuration. ++@Return pvImplData Implementation specific data. Can be NULL. ++*/ /**************************************************************************/ ++PHEAP_IMPL_DATA PhysHeapGetImplData(PHYS_HEAP *psPhysHeap); ++ ++PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapGetFlags ++@Description Get phys heap usage flags. ++@Input psPhysHeap Pointer to physical heap. ++@Return PHYS_HEAP_USAGE_FLAGS Phys heap usage flags. ++*/ /**************************************************************************/ ++PHYS_HEAP_USAGE_FLAGS PhysHeapGetFlags(PHYS_HEAP *psPhysHeap); ++ ++IMG_BOOL PhysHeapValidateDefaultHeapExists(PPVRSRV_DEVICE_NODE psDevNode); ++ ++PVRSRV_ERROR PhysHeapGetCpuPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_CPU_PHYADDR *psCpuPAddr); ++ ++ ++PVRSRV_ERROR PhysHeapGetSize(PHYS_HEAP *psPhysHeap, ++ IMG_UINT64 *puiSize); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetDevicePhysHeapCount ++@Description Get the physical heap count supported by the device. ++@Input psDevNode Device node, the heap count is requested for. ++@Output pui32PhysHeapCount Buffer that holds the heap count ++@Return None ++*/ /**************************************************************************/ ++void PVRSRVGetDevicePhysHeapCount(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 *pui32PhysHeapCount); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapGetMemInfo ++@Description Get phys heap memory statistics for a given physical heap ID. ++@Input psDevNode Pointer to device node struct ++@Input ui32PhysHeapCount Physical heap count ++@Input paePhysHeapID Physical heap ID ++@Output paPhysHeapMemStats Buffer that holds the memory statistics ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysHeapGetMemInfo(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS_PTR paPhysHeapMemStats); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapGetMemInfoPkd ++@Description Get phys heap memory statistics for a given physical heap ID. ++@Input psDevNode Pointer to device node struct ++@Input ui32PhysHeapCount Physical heap count ++@Input paePhysHeapID Physical heap ID ++@Output paPhysHeapMemStats Buffer that holds the memory statistics ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysHeapGetMemInfoPkd(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS_PKD_PTR paPhysHeapMemStats); ++ ++/*************************************************************************/ /*! ++@Function PhysheapGetPhysMemUsage ++@Description Get memory statistics for a given physical heap. ++@Input psPhysHeap Physical heap ++@Output pui64TotalSize Buffer that holds the total memory size of the ++ given physical heap. ++@Output pui64FreeSize Buffer that holds the free memory available in ++ a given physical heap. ++@Return none ++*/ /**************************************************************************/ ++void PhysheapGetPhysMemUsage(PHYS_HEAP *psPhysHeap, ++ IMG_UINT64 *pui64TotalSize, ++ IMG_UINT64 *pui64FreeSize); ++ ++PVRSRV_ERROR PhysHeapGetDevPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_DEV_PHYADDR *psDevPAddr); ++ ++void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr); ++ ++void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr); ++ ++IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapCreatePMR ++@Description Function calls an implementation-specific function pointer. ++ See function pointer for details. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR PhysHeapCreatePMR(PHYS_HEAP *psPhysHeap, ++ struct _CONNECTION_DATA_ *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PhysHeapInit(void); ++void PhysHeapDeinit(void); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapDeviceNode ++@Description Get pointer to the device node this heap belongs to. ++@Input psPhysHeap Pointer to physical heap. ++@Return PPVRSRV_DEVICE_NODE Pointer to device node. ++*/ /**************************************************************************/ ++PPVRSRV_DEVICE_NODE PhysHeapDeviceNode(PHYS_HEAP *psPhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapPVRLayerAcquire ++@Description Is phys heap to be acquired in PVR layer? ++@Input ePhysHeap phys heap ++@Return IMG_BOOL return IMG_TRUE if yes ++*/ /**************************************************************************/ ++IMG_BOOL PhysHeapPVRLayerAcquire(PVRSRV_PHYS_HEAP ePhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapUserModeAlloc ++@Description Is allocation from UM allowed? ++@Input ePhysHeap phys heap ++@Return IMG_BOOL return IMG_TRUE if yes ++*/ /**************************************************************************/ ++IMG_BOOL PhysHeapUserModeAlloc(PVRSRV_PHYS_HEAP ePhysHeap); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapMMUPxSetup ++@Description Setup MMU Px allocation function pointers. ++@Input psDeviceNode Pointer to device node struct ++@Return PVRSRV_ERROR PVRSRV_OK on success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR PhysHeapMMUPxSetup(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapMMUPxDeInit ++@Description Deinit after PhysHeapMMUPxSetup. ++@Input psDeviceNode Pointer to device node struct ++*/ /**************************************************************************/ ++void PhysHeapMMUPxDeInit(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++PVRSRV_ERROR PhysHeapPagesAllocGPV(PHYS_HEAP *psPhysHeap, ++ size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 ui32OSid, IMG_PID uiPid); ++#endif ++ ++PVRSRV_ERROR PhysHeapPagesAlloc(PHYS_HEAP *psPhysHeap, ++ size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid); ++ ++void PhysHeapPagesFree(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle); ++ ++PVRSRV_ERROR PhysHeapPagesMap(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *pshMemHandle, ++ size_t uiSize, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ void **pvPtr); ++ ++void PhysHeapPagesUnMap(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, ++ void *pvPtr); ++ ++PVRSRV_ERROR PhysHeapPagesClean(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *pshMemHandle, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 uiLength); ++ ++/*************************************************************************/ /*! ++@Function PhysHeapGetPageShift ++@Description Get phys heap page shift. ++@Input psPhysHeap Pointer to physical heap. ++@Return IMG_UINT32 Log2 page shift ++*/ /**************************************************************************/ ++IMG_UINT32 PhysHeapGetPageShift(PHYS_HEAP *psPhysHeap); ++ ++#endif /* PHYSHEAP_H */ +diff --git a/drivers/gpu/drm/img-rogue/physheap_config.h b/drivers/gpu/drm/img-rogue/physheap_config.h +new file mode 100644 +index 000000000000..9d4d786dd078 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physheap_config.h +@@ -0,0 +1,119 @@ ++/*************************************************************************/ /*! ++@File physheap_config.h ++@Title Physical heap Config API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Physical heap configs are created in the system layer and ++ stored against each device node for use in the Services Server ++ common layer. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PHYSHEAP_CONFIG_H ++#define PHYSHEAP_CONFIG_H ++ ++#include "img_types.h" ++#include "pvrsrv_memallocflags.h" ++#include "pvrsrv_memalloc_physheap.h" ++ ++typedef IMG_UINT32 PHYS_HEAP_USAGE_FLAGS; ++ ++#define PHYS_HEAP_USAGE_GPU_LOCAL (1< ++ ++module_param(gPMRAllocFail, uint, 0644); ++MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches " ++ "this value, it will fail (default value is 0 which " ++ "means that alloc function will behave normally)."); ++#endif /* defined(__linux__) */ ++#endif /* defined(DEBUG) */ ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#include "proc_stats.h" ++#endif ++ ++PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32MemSize, ++ IMG_UINT32 ui32Log2Align, ++ const IMG_UINT8 u8Value, ++ IMG_BOOL bInitPage, ++#if defined(PDUMP) ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicAddress, ++ IMG_HANDLE *phHandlePtr, ++#endif ++ IMG_HANDLE hMemHandle, ++ IMG_DEV_PHYADDR *psDevPhysAddr) ++{ ++ void *pvCpuVAddr; ++ PVRSRV_ERROR eError; ++#if defined(PDUMP) ++ IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME]; ++ PDUMP_FILEOFFSET_T uiOffsetOut; ++ IMG_UINT32 ui32PageSize; ++ IMG_UINT32 ui32PDumpMemSize = ui32MemSize; ++ PVRSRV_ERROR ePDumpError; ++#endif ++ PG_HANDLE *psMemHandle; ++ IMG_UINT64 uiMask; ++ IMG_DEV_PHYADDR sDevPhysAddr_int; ++ IMG_PID uiPid = 0; ++ ++ psMemHandle = hMemHandle; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ uiPid = psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT ? ++ PVR_SYS_ALLOC_PID : OSGetCurrentClientProcessIDKM(); ++#endif ++ ++ /* Allocate the pages */ ++ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, ++ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), ++ psMemHandle, ++ &sDevPhysAddr_int, ++ uiPid); ++ PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:1"); ++ ++ /* Check to see if the page allocator returned pages with our desired ++ * alignment, which is not unlikely ++ */ ++ uiMask = (1 << ui32Log2Align) - 1; ++ if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask)) ++ { ++ /* use over allocation instead */ ++ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); ++ ++ ui32MemSize += (IMG_UINT32) uiMask; ++ eError = PhysHeapPagesAlloc(psDevNode->psMMUPhysHeap, ++ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize), ++ psMemHandle, ++ &sDevPhysAddr_int, ++ uiPid); ++ PVR_LOG_RETURN_IF_ERROR(eError, "pfnDevPxAlloc:2"); ++ ++ sDevPhysAddr_int.uiAddr += uiMask; ++ sDevPhysAddr_int.uiAddr &= ~uiMask; ++ } ++ *psDevPhysAddr = sDevPhysAddr_int; ++ ++#if defined(PDUMP) ++ ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize(); ++ eError = PDumpMalloc(psDevNode, ++ pszDevSpace, ++ pszSymbolicAddress, ++ ui32PDumpMemSize, ++ ui32PageSize, ++ IMG_FALSE, ++ 0, ++ phHandlePtr, ++ PDUMP_NONE); ++ if (PVRSRV_OK != eError) ++ { ++ PDUMPCOMMENT(psDevNode, "Allocating pages failed"); ++ *phHandlePtr = NULL; ++ } ++ ePDumpError = eError; ++#endif ++ ++ if (bInitPage) ++ { ++ /*Map the page to the CPU VA space */ ++ eError = PhysHeapPagesMap(psDevNode->psMMUPhysHeap, ++ psMemHandle, ++ ui32MemSize, ++ &sDevPhysAddr_int, ++ &pvCpuVAddr); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_LOG_ERROR(eError, "DevPxMap"); ++ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); ++ return eError; ++ } ++ ++ /*Fill the memory with given content */ ++ OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize); ++ ++ /*Map the page to the CPU VA space */ ++ eError = PhysHeapPagesClean(psDevNode->psMMUPhysHeap, ++ psMemHandle, ++ 0, ++ ui32MemSize); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_LOG_ERROR(eError, "DevPxClean"); ++ PhysHeapPagesUnMap(psDevNode->psMMUPhysHeap, psMemHandle, pvCpuVAddr); ++ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); ++ return eError; ++ } ++ ++#if defined(PDUMP) ++ if (ePDumpError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) ++ { ++ /* PDumping of the page contents can be done in two ways ++ * 1. Store the single byte init value to the .prm file ++ * and load the same value to the entire dummy page buffer ++ * This method requires lot of LDB's inserted into the out2.txt ++ * ++ * 2. Store the entire contents of the buffer to the .prm file ++ * and load them back. ++ * This only needs a single LDB instruction in the .prm file ++ * and chosen this method ++ * size of .prm file might go up but that's not huge at least ++ * for this allocation ++ */ ++ /* Write the buffer contents to the prm file */ ++ eError = PDumpWriteParameterBlob(psDevNode, ++ pvCpuVAddr, ++ ui32PDumpMemSize, ++ PDUMP_FLAGS_CONTINUOUS, ++ szFilenameOut, ++ sizeof(szFilenameOut), ++ &uiOffsetOut); ++ if (PVRSRV_OK == eError) ++ { ++ /* Load the buffer back to the allocated memory when playing the pdump */ ++ eError = PDumpPMRLDB(psDevNode, ++ pszDevSpace, ++ pszSymbolicAddress, ++ 0, ++ ui32PDumpMemSize, ++ szFilenameOut, ++ uiOffsetOut, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (PVRSRV_OK != eError) ++ { ++ PDUMP_ERROR(psDevNode, eError, "Failed to write LDB statement to script file"); ++ PVR_LOG_ERROR(eError, "PDumpPMRLDB"); ++ } ++ } ++ else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) ++ { ++ PDUMP_ERROR(psDevNode, eError, "Failed to write device allocation to parameter file"); ++ PVR_LOG_ERROR(eError, "PDumpWriteParameterBlob"); ++ } ++ else ++ { ++ /* Else write to parameter file prevented under the flags and ++ * current state of the driver so skip write to script and error IF. ++ * This is expected e.g., if not in the capture range. ++ */ ++ eError = PVRSRV_OK; ++ } ++ } ++#endif ++ ++ /* Unmap the page */ ++ PhysHeapPagesUnMap(psDevNode->psMMUPhysHeap, ++ psMemHandle, ++ pvCpuVAddr); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, ++#if defined(PDUMP) ++ IMG_HANDLE hPDUMPMemHandle, ++#endif ++ IMG_HANDLE hMemHandle) ++{ ++ PG_HANDLE *psMemHandle; ++ ++ psMemHandle = hMemHandle; ++ PhysHeapPagesFree(psDevNode->psMMUPhysHeap, psMemHandle); ++#if defined(PDUMP) ++ if (NULL != hPDUMPMemHandle) ++ { ++ PDumpFree(psDevNode, hPDUMPMemHandle); ++ } ++#endif ++ ++} ++ ++ ++/* Checks the input parameters and adjusts them if possible and necessary */ ++static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 *puiLog2AllocPageSize, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ PMR_SIZE_T *puiChunkSize) ++{ ++ IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize; ++ IMG_DEVMEM_SIZE_T uiSize = *puiSize; ++ PMR_SIZE_T uiChunkSize = *puiChunkSize; ++ /* Sparse if we have different number of virtual and physical chunks plus ++ * in general all allocations with more than one virtual chunk */ ++ IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks || ++ ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE; ++ ++ if (ui32NumPhysChunks == 0 && ui32NumVirtChunks == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Number of physical chunks and number of virtual chunks " ++ "cannot be both 0", ++ __func__)); ++ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Protect against ridiculous page sizes */ ++ if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Range check of the alloc size */ ++ if (uiSize >= 0x1000000000ULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Cancelling allocation request of over 64 GB. " ++ "This is likely a bug." ++ , __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Fail if requesting coherency on one side but uncached on the other */ ++ if (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) && ++ (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached " ++ "Please use GPU cached flags for coherency.")); ++ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; ++ } ++ ++ if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && ++ (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached " ++ "Please use CPU cached flags for coherency.")); ++ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE; ++ } ++ ++ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (bIsSparse) ++ { ++ /* For sparse we need correct parameters like a suitable page size.... */ ++ if (OSGetPageShift() > uiLog2AllocPageSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid log2-contiguity for sparse allocation. " ++ "Requested %u, required minimum %zd", ++ __func__, ++ uiLog2AllocPageSize, ++ OSGetPageShift() )); ++ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* ... chunk size must be a equal to page size ... */ ++ if (uiChunkSize != (1 << uiLog2AllocPageSize)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid chunk size for sparse allocation. Requested " ++ "%#" IMG_UINT64_FMTSPECx ", must be same as page size %#x.", ++ __func__, uiChunkSize, 1 << uiLog2AllocPageSize)); ++ ++ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; ++ } ++ ++ if (ui32NumVirtChunks * uiChunkSize != uiSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Total alloc size (%#" IMG_UINT64_FMTSPECx ") " ++ "is not equal to virtual chunks * chunk size " ++ "(%#" IMG_UINT64_FMTSPECx ")", ++ __func__, uiSize, ui32NumVirtChunks * uiChunkSize)); ++ ++ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; ++ } ++ ++ if (ui32NumPhysChunks > ui32NumVirtChunks) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Number of physical chunks (%u) must not be greater " ++ "than number of virtual chunks (%u)", ++ __func__, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks)); ++ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ else ++ { ++ /* ++ * Silently round up alignment/pagesize if request was less that PAGE_SHIFT ++ * because it would never be harmful for memory to be _more_ contiguous that ++ * was desired. ++ */ ++ uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ? ++ OSGetPageShift() : uiLog2AllocPageSize; ++ ++ /* Same for total size */ ++ uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ++ *puiChunkSize = uiSize; ++ } ++ ++ if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Total size (%#" IMG_UINT64_FMTSPECx ") " ++ "must be a multiple of the requested contiguity (%" ++ IMG_UINT64_FMTSPEC ")", __func__, uiSize, ++ (IMG_UINT64) (1ULL << uiLog2AllocPageSize))); ++ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE; ++ } ++ ++ *puiLog2AllocPageSize = uiLog2AllocPageSize; ++ *puiSize = uiSize; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _DevPhysHeapFromFlags(PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ PVRSRV_PHYS_HEAP *peDevPhysHeap) ++{ ++ PVRSRV_PHYS_HEAP eHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); ++ ++ switch (eHeap) ++ { ++ case PVRSRV_PHYS_HEAP_FW_PREMAP0: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP1: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP2: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP3: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP4: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP5: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP6: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP7: ++ { ++ /* keep heap (with check) */ ++ PVR_RETURN_IF_INVALID_PARAM(PVRSRV_VZ_MODE_IS(HOST)); ++ break; ++ } ++ case PVRSRV_PHYS_HEAP_LAST: ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ default: ++ { ++ break; ++ } ++ } ++ ++ *peDevPhysHeap = eHeap; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PhysmemNewRamBackedPMR_direct(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2AllocPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 uiAnnotationLength, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_PHYS_HEAP ePhysHeapIdx; ++ PVRSRV_MEMALLOCFLAGS_T uiPMRFlags = uiFlags; ++ PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize = ++ psDevNode->psDevConfig->pfnCheckMemAllocSize; ++ ++ PVR_UNREFERENCED_PARAMETER(uiAnnotationLength); ++ ++ eError = _ValidateParams(ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ uiFlags, ++ &uiLog2AllocPageSize, ++ &uiSize, ++ &uiChunkSize); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ eError = _DevPhysHeapFromFlags(uiFlags, &ePhysHeapIdx); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_DEFAULT) ++ { ++ ePhysHeapIdx = psDevNode->psDevConfig->eDefaultHeap; ++ PVRSRV_CHANGE_PHYS_HEAP_HINT(ePhysHeapIdx, uiPMRFlags); ++ } ++ ++ if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_LOCAL) ++ { ++ if ((uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK) == 0) ++ { ++ ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_PRIVATE; ++ PVRSRV_SET_PHYS_HEAP_HINT(GPU_PRIVATE, uiPMRFlags); ++ PVR_DPF((PVR_DBG_VERBOSE, "%s: Consider explicit use of GPU_PRIVATE for PMR %s." ++ " Implicit conversion to GPU PRIVATE performed", ++ __func__, pszAnnotation)); ++ } ++ else if (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) && ++ PVRSRVSystemSnoopingOfCPUCache(psDevNode->psDevConfig)) ++ { ++ ePhysHeapIdx = PVRSRV_PHYS_HEAP_GPU_COHERENT; ++ PVRSRV_SET_PHYS_HEAP_HINT(GPU_COHERENT, uiPMRFlags); ++ } ++ } ++ else if (ePhysHeapIdx == PVRSRV_PHYS_HEAP_GPU_PRIVATE) ++ { ++ if (uiFlags & PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid flags for PMR %s!" ++ " Client requested GPU_PRIVATE physical heap with CPU access flags.", ++ __func__, pszAnnotation)); ++ return PVRSRV_ERROR_INVALID_HEAP; ++ } ++ } ++ ++ if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx]) ++ { ++ /* In case a heap hasn't been acquired for this type, return invalid heap error */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from " ++ "an invalid heap (HeapIndex=%d)", ++ __func__, psDevNode, ePhysHeapIdx)); ++ return PVRSRV_ERROR_INVALID_HEAP; ++ } ++ ++ /* Apply memory budgeting policy */ ++ if (pfnCheckMemAllocSize) ++ { ++ IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks; ++ ++ eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize); ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++#if defined(DEBUG) ++ if (gPMRAllocFail > 0) ++ { ++ static IMG_UINT32 ui32AllocCount = 1; ++ ++ if (ui32AllocCount < gPMRAllocFail) ++ { ++ ui32AllocCount++; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.", ++ __func__, ui32AllocCount)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ } ++#endif /* defined(DEBUG) */ ++ ++ /* If the driver is in an 'init' state all of the allocated memory ++ * should be attributed to the driver (PID 1) rather than to the ++ * process those allocations are made under. Same applies to the memory ++ * allocated for the Firmware. */ ++ if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT || ++ PVRSRV_CHECK_FW_MAIN(uiFlags)) ++ { ++ uiPid = PVR_SYS_ALLOC_PID; ++ } ++ ++ eError = PhysHeapCreatePMR(psDevNode->apsPhysHeap[ePhysHeapIdx], ++ psConnection, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2AllocPageSize, ++ uiFlags, ++ pszAnnotation, ++ uiPid, ++ ppsPMRPtr, ++ ui32PDumpFlags); ++ ++ if (puiPMRFlags != NULL) ++ { ++ *puiPMRFlags = uiPMRFlags; ++ } ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ if (eError != PVRSRV_OK) ++ { ++ PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, ++ OSGetCurrentClientProcessIDKM()); ++ } ++#endif ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PhysmemNewRamBackedPMR(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2AllocPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 uiAnnotationLength, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) ++{ ++ PVRSRV_PHYS_HEAP ePhysHeap = PVRSRV_GET_PHYS_HEAP_HINT(uiFlags); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(uiAnnotationLength != 0, "uiAnnotationLength"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); ++ ++ if (ePhysHeap == PVRSRV_PHYS_HEAP_DEFAULT) ++ { ++ ePhysHeap = psDevNode->psDevConfig->eDefaultHeap; ++ } ++ ++ if (!PhysHeapUserModeAlloc(ePhysHeap)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid phys heap hint: %d.", __func__, ePhysHeap)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return PhysmemNewRamBackedPMR_direct(psConnection, ++ psDevNode, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2AllocPageSize, ++ uiFlags, ++ uiAnnotationLength, ++ pszAnnotation, ++ uiPid, ++ ppsPMRPtr, ++ ui32PDumpFlags, ++ puiPMRFlags); ++} ++ ++PVRSRV_ERROR ++PhysmemNewRamBackedLockedPMR(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 uiAnnotationLength, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags) ++{ ++ ++ PVRSRV_ERROR eError; ++ eError = PhysmemNewRamBackedPMR(psConnection, ++ psDevNode, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2PageSize, ++ uiFlags, ++ uiAnnotationLength, ++ pszAnnotation, ++ uiPid, ++ ppsPMRPtr, ++ ui32PDumpFlags, ++ puiPMRFlags); ++ ++ if (eError == PVRSRV_OK) ++ { ++ eError = PMRLockSysPhysAddresses(*ppsPMRPtr); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 *pui32PhysHeapCount) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVRSRVGetDevicePhysHeapCount(psDevNode, pui32PhysHeapCount); ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_PHYS_HEAP *peHeap) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ *peHeap = psDevNode->psDevConfig->eDefaultHeap; ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS *apPhysHeapMemStats) ++{ ++ PHYS_HEAP *psPhysHeap; ++ IMG_UINT uiHeapIndex, i = 0; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++) ++ { ++ psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex]; ++ ++ if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex)) ++ { ++ PVR_ASSERT(i < ui32PhysHeapCount); ++ ++ PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize, ++ &apPhysHeapMemStats[i].ui64FreeSize); ++ ++ i++; ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVGetHeapPhysMemUsagePkdKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS_PKD *apPhysHeapMemStats) ++{ ++ PHYS_HEAP *psPhysHeap; ++ IMG_UINT uiHeapIndex, i = 0; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui32PhysHeapCount != psDevNode->ui32UserAllocHeapCount) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ for (uiHeapIndex = PVRSRV_PHYS_HEAP_DEFAULT+1; (uiHeapIndex < PVRSRV_PHYS_HEAP_LAST); uiHeapIndex++) ++ { ++ psPhysHeap = psDevNode->apsPhysHeap[uiHeapIndex]; ++ ++ if (psPhysHeap && PhysHeapUserModeAlloc(uiHeapIndex)) ++ { ++ PVR_ASSERT(i < ui32PhysHeapCount); ++ ++ PhysheapGetPhysMemUsage(psPhysHeap, &apPhysHeapMemStats[i].ui64TotalSize, ++ &apPhysHeapMemStats[i].ui64FreeSize); ++ ++ i++; ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS *paPhysHeapMemStats) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ return PhysHeapGetMemInfo(psDevNode, ++ ui32PhysHeapCount, ++ paePhysHeapID, ++ paPhysHeapMemStats); ++} ++ ++PVRSRV_ERROR ++PVRSRVPhysHeapGetMemInfoPkdKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS_PKD *paPhysHeapMemStats) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ return PhysHeapGetMemInfoPkd(psDevNode, ++ ui32PhysHeapCount, ++ paePhysHeapID, ++ paPhysHeapMemStats); ++} ++ ++/* 'Wrapper' function to call PMRImportPMR(), which first checks the PMR is ++ * for the current device. This avoids the need to do this in pmr.c, which ++ * would then need PVRSRV_DEVICE_NODE (defining this type in pmr.h causes a ++ * typedef redefinition issue). ++ */ ++PVRSRV_ERROR ++PhysmemImportPMR(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PMR_EXPORT *psPMRExport, ++ PMR_PASSWORD_T uiPassword, ++ PMR_SIZE_T uiSize, ++ PMR_LOG2ALIGN_T uiLog2Contig, ++ PMR **ppsPMR) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (PMRGetExportDeviceNode(psPMRExport) != psDevNode) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", __func__)); ++ return PVRSRV_ERROR_PMR_NOT_PERMITTED; ++ } ++ ++ return PMRImportPMR(psPMRExport, ++ uiPassword, ++ uiSize, ++ uiLog2Contig, ++ ppsPMR); ++} +diff --git a/drivers/gpu/drm/img-rogue/physmem.h b/drivers/gpu/drm/img-rogue/physmem.h +new file mode 100644 +index 000000000000..ca293e9c8e09 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem.h +@@ -0,0 +1,321 @@ ++/*************************************************************************/ /*! ++@File ++@Title Physmem header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for common entry point for creation of RAM backed PMR's ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SRVSRV_PHYSMEM_H ++#define SRVSRV_PHYSMEM_H ++ ++/* include/ */ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++#include "connection_server.h" ++ ++/* services/server/include/ */ ++#include "pmr.h" ++#include "pmr_impl.h" ++ ++/* Valid values for TC_MEMORY_CONFIG configuration option */ ++#define TC_MEMORY_LOCAL (1) ++#define TC_MEMORY_HOST (2) ++#define TC_MEMORY_HYBRID (3) ++ ++/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ ++#define PLATO_MEMORY_LOCAL (1) ++#define PLATO_MEMORY_HOST (2) ++#define PLATO_MEMORY_HYBRID (3) ++ ++/*************************************************************************/ /*! ++@Function DevPhysMemAlloc ++@Description Allocate memory from device specific heaps directly. ++@Input psDevNode device node to operate on ++@Input ui32MemSize Size of the memory to be allocated ++@Input u8Value Value to be initialised to. ++@Input bInitPage Flag to control initialisation ++@Input pszDevSpace PDUMP memory space in which the ++ allocation is to be done ++@Input pszSymbolicAddress Symbolic name of the allocation ++@Input phHandlePtr PDUMP handle to the allocation ++@Output hMemHandle Handle to the allocated memory ++@Output psDevPhysAddr Device Physical address of allocated ++ page ++@Return PVRSRV_OK if the allocation is successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32MemSize, ++ IMG_UINT32 ui32Log2Align, ++ const IMG_UINT8 u8Value, ++ IMG_BOOL bInitPage, ++#if defined(PDUMP) ++ const IMG_CHAR *pszDevSpace, ++ const IMG_CHAR *pszSymbolicAddress, ++ IMG_HANDLE *phHandlePtr, ++#endif ++ IMG_HANDLE hMemHandle, ++ IMG_DEV_PHYADDR *psDevPhysAddr); ++ ++/*************************************************************************/ /*! ++@Function DevPhysMemFree ++@Description Free memory to device specific heaps directly. ++@Input psDevNode device node to operate on ++@Input hPDUMPMemHandle Pdump handle to allocated memory ++@Input hMemHandle Devmem handle to allocated memory ++@Return None ++*/ /**************************************************************************/ ++void ++DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode, ++#if defined(PDUMP) ++ IMG_HANDLE hPDUMPMemHandle, ++#endif ++ IMG_HANDLE hMemHandle); ++ ++/* ++ * PhysmemNewRamBackedPMR ++ * ++ * This function will create a RAM backed PMR using the device specific ++ * callback, this allows control at a per-devicenode level to select the ++ * memory source thus supporting mixed UMA/LMA systems. ++ * ++ * The size must be a multiple of page size. The page size is specified in ++ * log2. It should be regarded as a minimum contiguity of which the ++ * resulting memory must be a multiple. It may be that this should be a fixed ++ * number. It may be that the allocation size needs to be a multiple of some ++ * coarser "page size" than that specified in the page size argument. ++ * For example, take an OS whose page granularity is a fixed 16kB, but the ++ * caller requests memory in page sizes of 4kB. The request can be satisfied ++ * if and only if the SIZE requested is a multiple of 16kB. If the arguments ++ * supplied are such that this OS cannot grant the request, ++ * PVRSRV_ERROR_INVALID_PARAMS will be returned. ++ * ++ * The caller should supply storage of a pointer. Upon successful return a ++ * PMR object will have been created and a pointer to it returned in the ++ * PMROut argument. ++ * ++ * A PMR successfully created should be destroyed with PhysmemUnrefPMR. ++ * ++ * Note that this function may cause memory allocations and on some operating ++ * systems this may cause scheduling events, so it is important that this ++ * function be called with interrupts enabled and in a context where ++ * scheduling events and memory allocations are permitted. ++ * ++ * The flags may be used by the implementation to change its behaviour if ++ * required. The flags will also be stored in the PMR as immutable metadata ++ * and returned to mmu_common when it asks for it. ++ * ++ * The PID specified is used to tie this allocation to the process context ++ * that the allocation is made on behalf of. ++ */ ++PVRSRV_ERROR ++PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 uiAnnotationLength, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMROut, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); ++ ++PVRSRV_ERROR ++PhysmemNewRamBackedPMR_direct(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 uiAnnotationLength, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMROut, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); ++ ++/* ++ * PhysmemNewRamBackedLockedPMR ++ * ++ * Same as function above but is additionally locking down the PMR. ++ * ++ * Get the physical memory and lock down the PMR directly, we do not want to ++ * defer the actual allocation to mapping time. ++ * ++ * In general the concept of on-demand allocations is not useful for ++ * allocations where we give the users the freedom to map and unmap memory at ++ * will. The user is not expecting their memory contents to suddenly vanish ++ * just because they unmapped the buffer. ++ * Even if they would know and be ok with it, we do not want to check for ++ * every page we unmap whether we have to unlock the underlying PMR. ++*/ ++PVRSRV_ERROR ++PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 uiAnnotationLength, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags, ++ PVRSRV_MEMALLOCFLAGS_T *puiPMRFlags); ++ ++/*************************************************************************/ /*! ++@Function PhysmemImportPMR ++@Description Import PMR a previously exported PMR ++@Input psPMRExport The exported PMR token ++@Input uiPassword Authorisation password ++ for the PMR being imported ++@Input uiSize Size of the PMR being imported ++ (for verification) ++@Input uiLog2Contig Log2 continuity of the PMR being ++ imported (for verification) ++@Output ppsPMR The imported PMR ++@Return PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device ++ PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect ++ PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect ++ PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysmemImportPMR(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PMR_EXPORT *psPMRExport, ++ PMR_PASSWORD_T uiPassword, ++ PMR_SIZE_T uiSize, ++ PMR_LOG2ALIGN_T uiLog2Contig, ++ PMR **ppsPMR); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetMaxPhysHeapCountKM ++@Description Get the user accessible physical heap count ++@Output puiPhysHeapCount user accessible physical heap count ++@Return PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVGetMaxPhysHeapCountKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 *puiPhysHeapCount); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetDefaultPhysicalHeapKM ++@Description For the specified device, get the physical heap used for ++ allocations when the PVRSRV_PHYS_HEAP_DEFAULT ++ physical heap hint is set in memalloc flags. ++@Output peHeap Default Heap return value ++@Return PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVGetDefaultPhysicalHeapKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_PHYS_HEAP *peHeap); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetHeapPhysMemUsageKM ++@Description Get the memory usage statistics for all user accessible ++ physical heaps ++@Input ui32PhysHeapCount Total user accessible physical heaps ++@Output apPhysHeapMemStats Buffer to hold the memory statistics ++@Return PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVGetHeapPhysMemUsageKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS *apPhysHeapMemStats); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetHeapPhysMemUsagePkdKM ++@Description Get the memory usage statistics for all user accessible ++ physical heaps ++@Input ui32PhysHeapCount Total user accessible physical heaps ++@Output apPhysHeapMemStats Buffer to hold the memory statistics ++@Return PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVGetHeapPhysMemUsagePkdKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PHYS_HEAP_MEM_STATS_PKD *apPhysHeapMemStats); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVPhysHeapGetMemInfoKM ++@Description Get the memory usage statistics for a given physical heap ID ++@Input ui32PhysHeapCount Physical Heap count ++@Input paePhysHeapID Array of Physical Heap ID's ++@Output paPhysHeapMemStats Buffer to hold the memory statistics ++@Return PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVPhysHeapGetMemInfoKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS *paPhysHeapMemStats); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVPhysHeapGetMemInfoPkdKM ++@Description Get the memory usage statistics for a given physical heap ID ++@Input ui32PhysHeapCount Physical Heap count ++@Input paePhysHeapID Array of Physical Heap ID's ++@Output paPhysHeapMemStats Buffer to hold the memory statistics ++@Return PVRSRV_OK if successful ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVPhysHeapGetMemInfoPkdKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32PhysHeapCount, ++ PVRSRV_PHYS_HEAP *paePhysHeapID, ++ PHYS_HEAP_MEM_STATS_PKD *paPhysHeapMemStats); ++ ++#endif /* SRVSRV_PHYSMEM_H */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_dmabuf.c b/drivers/gpu/drm/img-rogue/physmem_dmabuf.c +new file mode 100644 +index 000000000000..3fb02aeaeed0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_dmabuf.c +@@ -0,0 +1,1302 @@ ++/*************************************************************************/ /*! ++@File physmem_dmabuf.c ++@Title dmabuf memory allocator ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ implementing the function callbacks for dmabuf memory. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++#include "physmem_dmabuf.h" ++#include "pvrsrv.h" ++#include "pmr.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) ++ ++#include ++#include ++#include ++#include ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++ ++#include "allocmem.h" ++#include "osfunc.h" ++#include "pmr_impl.h" ++#include "hash.h" ++#include "private_data.h" ++#include "module_common.h" ++#include "pvr_ion_stats.h" ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#include "ri_server.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++#include "mmap_stats.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++ ++#include "kernel_compatibility.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++MODULE_IMPORT_NS(DMA_BUF); ++#endif ++ ++/* ++ * dma_buf_ops ++ * ++ * These are all returning errors if used. ++ * The point is to prevent anyone outside of our driver from importing ++ * and using our dmabuf. ++ */ ++ ++static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ ++ !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) ++ struct device *psDev, ++#endif ++ struct dma_buf_attachment *psAttachment) ++{ ++ return -ENOSYS; ++} ++ ++static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment, ++ enum dma_data_direction eDirection) ++{ ++ /* Attach hasn't been called yet */ ++ return ERR_PTR(-EINVAL); ++} ++ ++static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment, ++ struct sg_table *psTable, ++ enum dma_data_direction eDirection) ++{ ++} ++ ++static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf) ++{ ++ PMR *psPMR = (PMR *) psDmaBuf->priv; ++ ++ PMRUnrefPMR(psPMR); ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) ++static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum) ++{ ++ return ERR_PTR(-ENOSYS); ++} ++#endif ++ ++static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA) ++{ ++ return -ENOSYS; ++} ++ ++static const struct dma_buf_ops sPVRDmaBufOps = ++{ ++ .attach = PVRDmaBufOpsAttach, ++ .map_dma_buf = PVRDmaBufOpsMap, ++ .unmap_dma_buf = PVRDmaBufOpsUnmap, ++ .release = PVRDmaBufOpsRelease, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)) && \ ++ !((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) && (defined(CHROMIUMOS_KERNEL)))) ++ .map_atomic = PVRDmaBufOpsKMap, ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) ++ .map = PVRDmaBufOpsKMap, ++#endif ++#else ++ .kmap_atomic = PVRDmaBufOpsKMap, ++ .kmap = PVRDmaBufOpsKMap, ++#endif ++ .mmap = PVRDmaBufOpsMMap, ++}; ++ ++/* end of dma_buf_ops */ ++ ++ ++typedef struct _PMR_DMA_BUF_DATA_ ++{ ++ /* Filled in at PMR create time */ ++ PHYS_HEAP *psPhysHeap; ++ struct dma_buf_attachment *psAttachment; ++ PFN_DESTROY_DMABUF_PMR pfnDestroy; ++ IMG_BOOL bPoisonOnFree; ++ ++ /* Mapping information. */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ struct iosys_map sMap; ++#else ++ struct dma_buf_map sMap; ++#endif ++ ++ /* Modified by PMR lock/unlock */ ++ struct sg_table *psSgTable; ++ IMG_DEV_PHYADDR *pasDevPhysAddr; ++ IMG_UINT32 ui32PhysPageCount; ++ IMG_UINT32 ui32VirtPageCount; ++} PMR_DMA_BUF_DATA; ++ ++/* Start size of the g_psDmaBufHash hash table */ ++#define DMA_BUF_HASH_SIZE 20 ++ ++static DEFINE_MUTEX(g_HashLock); ++ ++static HASH_TABLE *g_psDmaBufHash; ++static IMG_UINT32 g_ui32HashRefCount; ++ ++#if defined(PVR_ANDROID_ION_USE_SG_LENGTH) ++#define pvr_sg_length(sg) ((sg)->length) ++#else ++#define pvr_sg_length(sg) sg_dma_len(sg) ++#endif ++ ++static int ++DmaBufSetValue(struct dma_buf *psDmaBuf, int iValue, const char *szFunc) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ struct iosys_map sMap; ++#else ++ struct dma_buf_map sMap; ++#endif ++ int err, err_end_access; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)) ++ int i; ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ sMap.is_iomem = false; ++#endif ++ ++ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to begin cpu access (err=%d)", ++ szFunc, err)); ++ goto err_out; ++ } ++ ++ err = dma_buf_vmap(psDmaBuf, &sMap); ++ if (err) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0)) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map page (err=%d)", ++ szFunc, err)); ++ goto exit_end_access; ++#else ++ for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++) ++ { ++ void *pvKernAddr; ++ ++ pvKernAddr = dma_buf_kmap(psDmaBuf, i); ++ if (IS_ERR_OR_NULL(pvKernAddr)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map page (err=%ld)", ++ szFunc, ++ pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM)); ++ err = !pvKernAddr ? -ENOMEM : -EINVAL; ++ ++ goto exit_end_access; ++ } ++ ++ memset(pvKernAddr, iValue, PAGE_SIZE); ++ ++ dma_buf_kunmap(psDmaBuf, i, pvKernAddr); ++ } ++#endif ++ } ++ else ++ { ++ memset(sMap.vaddr, iValue, psDmaBuf->size); ++ ++ dma_buf_vunmap(psDmaBuf, &sMap); ++ } ++ ++ err = 0; ++ ++exit_end_access: ++ do { ++ err_end_access = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE); ++ } while (err_end_access == -EAGAIN || err_end_access == -EINTR); ++ ++ if (err_end_access) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to end cpu access (err=%d)", ++ szFunc, err_end_access)); ++ if (!err) ++ { ++ err = err_end_access; ++ } ++ } ++ ++err_out: ++ return err; ++} ++ ++/***************************************************************************** ++ * PMR callback functions * ++ *****************************************************************************/ ++ ++static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PMR_DMA_BUF_DATA *psPrivData = pvPriv; ++ struct dma_buf_attachment *psAttachment = psPrivData->psAttachment; ++ struct dma_buf *psDmaBuf = psAttachment->dmabuf; ++ struct sg_table *psSgTable = psPrivData->psSgTable; ++ PMR *psPMR; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (psDmaBuf->ops != &sPVRDmaBufOps) ++ { ++ if (g_psDmaBufHash) ++ { ++ /* We have a hash table so check if we've seen this dmabuf before */ ++ psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); ++ ++ if (psPMR) ++ { ++ if (!PMRIsPMRLive(psPMR)) ++ { ++ HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf); ++ g_ui32HashRefCount--; ++ ++ if (g_ui32HashRefCount == 0) ++ { ++ HASH_Delete(g_psDmaBufHash); ++ g_psDmaBufHash = NULL; ++ } ++ } ++ else{ ++ eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; ++ } ++ } ++ PVRSRVIonRemoveMemAllocRecord(psDmaBuf); ++ } ++ }else ++ { ++ psPMR = (PMR *) psDmaBuf->priv; ++ if (PMRIsPMRLive(psPMR)) ++ { ++ eError = PVRSRV_ERROR_PMR_STILL_REFERENCED; ++ } ++ ++ } ++ ++ if (PVRSRV_OK != eError) ++ { ++ return eError; ++ } ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, ++ psPrivData->ui32PhysPageCount << PAGE_SHIFT, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++ ++ psPrivData->ui32PhysPageCount = 0; ++ ++ dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL); ++ ++ ++ if (psPrivData->bPoisonOnFree) ++ { ++ int err; ++ ++ err = DmaBufSetValue(psDmaBuf, PVRSRV_POISON_ON_FREE_VALUE, __func__); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to poison allocation before " ++ "free", __func__)); ++ PVR_ASSERT(IMG_FALSE); ++ } ++ } ++ ++ if (psPrivData->pfnDestroy) ++ { ++ eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ } ++ ++ OSFreeMem(psPrivData->pasDevPhysAddr); ++ OSFreeMem(psPrivData); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvPriv); ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvPriv); ++ return PVRSRV_OK; ++} ++ ++static void PMRGetFactoryLock(void) ++{ ++ mutex_lock(&g_HashLock); ++} ++ ++static void PMRReleaseFactoryLock(void) ++{ ++ mutex_unlock(&g_HashLock); ++} ++ ++static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_BOOL *pbValid, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ PMR_DMA_BUF_DATA *psPrivData = pvPriv; ++ IMG_UINT32 ui32PageIndex; ++ IMG_UINT32 idx; ++ ++ if (ui32Log2PageSize != PAGE_SHIFT) ++ { ++ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; ++ } ++ ++ for (idx=0; idx < ui32NumOfPages; idx++) ++ { ++ if (pbValid[idx]) ++ { ++ IMG_UINT32 ui32InPageOffset; ++ ++ ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT; ++ ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT); ++ ++ PVR_LOG_RETURN_IF_FALSE(ui32PageIndex < psPrivData->ui32VirtPageCount, ++ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); ++ ++ PVR_ASSERT(ui32InPageOffset < PAGE_SIZE); ++ psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset; ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, ++ size_t uiOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ IMG_HANDLE *phHandleOut, ++ PMR_FLAGS_T ulFlags) ++{ ++ PMR_DMA_BUF_DATA *psPrivData = pvPriv; ++ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; ++ PVRSRV_ERROR eError; ++ int err; ++ ++ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs " ++ "are not allowed!", __func__)); ++ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; ++ goto fail; ++ } ++ ++ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); ++ if (err) ++ { ++ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; ++ goto fail; ++ } ++ ++ err = dma_buf_vmap(psDmaBuf, &psPrivData->sMap); ++ if (err != 0 || psPrivData->sMap.vaddr == NULL) ++ { ++ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; ++ goto fail_kmap; ++ } ++ ++ *ppvKernelAddressOut = psPrivData->sMap.vaddr + uiOffset; ++ *phHandleOut = psPrivData->sMap.vaddr; ++ ++ return PVRSRV_OK; ++ ++fail_kmap: ++ do { ++ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); ++ } while (err == -EAGAIN || err == -EINTR); ++ ++fail: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_HANDLE hHandle) ++{ ++ PMR_DMA_BUF_DATA *psPrivData = pvPriv; ++ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; ++ int err; ++ ++ dma_buf_vunmap(psDmaBuf, &psPrivData->sMap); ++ ++ do { ++ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL); ++ } while (err == -EAGAIN || err == -EINTR); ++} ++ ++static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv, ++ PMR *psPMR, ++ PMR_MMAP_DATA pOSMMapData) ++{ ++ PMR_DMA_BUF_DATA *psPrivData = pvPriv; ++ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf; ++ struct vm_area_struct *psVma = pOSMMapData; ++ int err; ++ ++ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Not possible to MMAP sparse DMABufs", ++ __func__)); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ ++ err = dma_buf_mmap(psDmaBuf, psVma, 0); ++ if (err) ++ { ++ return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING; ++ } ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++ MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab = ++{ ++ .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf, ++ .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf, ++ .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf, ++ .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf, ++ .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf, ++ .pfnMMap = PMRMMapDmaBuf, ++ .pfnFinalize = PMRFinalizeDmaBuf, ++ .pfnGetPMRFactoryLock = PMRGetFactoryLock, ++ .pfnReleasePMRFactoryLock = PMRReleaseFactoryLock, ++}; ++ ++/***************************************************************************** ++ * Public facing interface * ++ *****************************************************************************/ ++ ++PVRSRV_ERROR ++PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, ++ struct dma_buf_attachment *psAttachment, ++ PFN_DESTROY_DMABUF_PMR pfnDestroy, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr) ++{ ++ struct dma_buf *psDmaBuf = psAttachment->dmabuf; ++ PMR_DMA_BUF_DATA *psPrivData; ++ PMR_FLAGS_T uiPMRFlags; ++ IMG_BOOL bZeroOnAlloc; ++ IMG_BOOL bPoisonOnAlloc; ++ IMG_BOOL bPoisonOnFree; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i, j; ++ IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT; ++ IMG_UINT32 ui32PageCount = 0; ++ struct scatterlist *sg; ++ struct sg_table *table; ++ IMG_UINT32 uiSglOffset; ++ IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; ++ ++ bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags); ++ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags); ++#if defined(DEBUG) ++ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags); ++#else ++ bPoisonOnFree = IMG_FALSE; ++#endif ++ if (bZeroOnAlloc && bPoisonOnFree) ++ { ++ /* Zero on Alloc and Poison on Alloc are mutually exclusive */ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errReturn; ++ } ++ ++ psPrivData = OSAllocZMem(sizeof(*psPrivData)); ++ if (psPrivData == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto errReturn; ++ } ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ psPrivData->sMap.is_iomem = false; ++#endif ++ psPrivData->psPhysHeap = psHeap; ++ psPrivData->psAttachment = psAttachment; ++ psPrivData->pfnDestroy = pfnDestroy; ++ psPrivData->bPoisonOnFree = bPoisonOnFree; ++ psPrivData->ui32VirtPageCount = ++ (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT; ++ ++ psPrivData->pasDevPhysAddr = ++ OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) * ++ psPrivData->ui32VirtPageCount); ++ if (!psPrivData->pasDevPhysAddr) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate buffer for physical addresses (oom)", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto errFreePrivData; ++ } ++ ++ if (bZeroOnAlloc || bPoisonOnAlloc) ++ { ++ int iValue = bZeroOnAlloc ? 0 : PVRSRV_POISON_ON_ALLOC_VALUE; ++ int err; ++ ++ err = DmaBufSetValue(psDmaBuf, iValue, __func__); ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map buffer for %s", ++ __func__, ++ bZeroOnAlloc ? "zeroing" : "poisoning")); ++ ++ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING; ++ goto errFreePhysAddr; ++ } ++ } ++ ++ table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL); ++ if (IS_ERR_OR_NULL(table)) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errFreePhysAddr; ++ } ++ ++ /* ++ * We do a two pass process: first work out how many pages there ++ * are and second, fill in the data. ++ */ ++ for_each_sg(table->sgl, sg, table->nents, i) ++ { ++ ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE; ++ } ++ ++ if (WARN_ON(!ui32PageCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero", ++ __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errUnmap; ++ } ++ ++ if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual " ++ "number of physical dma buf pages don't match", ++ __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errUnmap; ++ } ++ ++ psPrivData->ui32PhysPageCount = ui32PageCount; ++ psPrivData->psSgTable = table; ++ ui32PageCount = 0; ++ sg = table->sgl; ++ uiSglOffset = 0; ++ ++ ++ /* Fill physical address array */ ++ for (i = 0; i < ui32NumPhysChunks; i++) ++ { ++ for (j = 0; j < uiPagesPerChunk; j++) ++ { ++ IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j; ++ ++ psPrivData->pasDevPhysAddr[uiIdx].uiAddr = ++ sg_dma_address(sg) + uiSglOffset; ++ ++ /* Get the next offset for the current sgl or the next sgl */ ++ uiSglOffset += PAGE_SIZE; ++ if (uiSglOffset >= pvr_sg_length(sg)) ++ { ++ sg = sg_next(sg); ++ uiSglOffset = 0; ++ ++ /* Check that we haven't looped */ ++ if (WARN_ON(sg == table->sgl)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address " ++ "array", ++ __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errUnmap; ++ } ++ } ++ } ++ } ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, ++ psPrivData->ui32PhysPageCount << PAGE_SHIFT, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++ ++ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); ++ ++ /* ++ * Check no significant bits were lost in cast due to different ++ * bit widths for flags ++ */ ++ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); ++ ++ if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0) ++ { ++ pszAnnotation[0] = '\0'; ++ } ++ else ++ { ++ pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0'; ++ } ++ ++ eError = PMRCreatePMR(psHeap, ++ ui32NumVirtChunks * uiChunkSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ PAGE_SHIFT, ++ uiPMRFlags, ++ pszAnnotation, ++ &_sPMRDmaBufFuncTab, ++ psPrivData, ++ PMR_TYPE_DMABUF, ++ ppsPMRPtr, ++ PDUMP_NONE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto errFreePhysAddr; ++ } ++ ++ return PVRSRV_OK; ++ ++errUnmap: ++ dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL); ++errFreePhysAddr: ++ OSFreeMem(psPrivData->pasDevPhysAddr); ++errFreePrivData: ++ OSFreeMem(psPrivData); ++errReturn: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap, ++ struct dma_buf_attachment *psAttachment) ++{ ++ struct dma_buf *psDmaBuf = psAttachment->dmabuf; ++ ++ PVR_UNREFERENCED_PARAMETER(psHeap); ++ ++ dma_buf_detach(psDmaBuf, psAttachment); ++ dma_buf_put(psDmaBuf); ++ ++ return PVRSRV_OK; ++} ++ ++struct dma_buf * ++PhysmemGetDmaBuf(PMR *psPMR) ++{ ++ PMR_DMA_BUF_DATA *psPrivData; ++ ++ psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab); ++ if (psPrivData) ++ { ++ return psPrivData->psAttachment->dmabuf; ++ } ++ ++ return NULL; ++} ++ ++PVRSRV_ERROR ++PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PMR *psPMR, ++ IMG_INT *piFd) ++{ ++ struct dma_buf *psDmaBuf; ++ IMG_DEVMEM_SIZE_T uiPMRSize; ++ PVRSRV_ERROR eError; ++ IMG_INT iFd; ++ ++ mutex_lock(&g_HashLock); ++ ++ PMRRefPMR(psPMR); ++ ++ PMR_LogicalSize(psPMR, &uiPMRSize); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ { ++ DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo); ++ ++ sDmaBufExportInfo.priv = psPMR; ++ sDmaBufExportInfo.ops = &sPVRDmaBufOps; ++ sDmaBufExportInfo.size = uiPMRSize; ++ sDmaBufExportInfo.flags = O_RDWR; ++ ++ psDmaBuf = dma_buf_export(&sDmaBufExportInfo); ++ } ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) ++ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, ++ uiPMRSize, O_RDWR, NULL); ++#else ++ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps, ++ uiPMRSize, O_RDWR); ++#endif ++ ++ if (IS_ERR_OR_NULL(psDmaBuf)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)", ++ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_pmr_ref; ++ } ++ ++ iFd = dma_buf_fd(psDmaBuf, O_RDWR); ++ if (iFd < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)", ++ __func__, iFd)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_dma_buf; ++ } ++ ++ mutex_unlock(&g_HashLock); ++ *piFd = iFd; ++ ++ /* A PMR memory lay out can't change once exported ++ * This makes sure the exported and imported parties see ++ * the same layout of the memory */ ++ PMR_SetLayoutFixed(psPMR, IMG_TRUE); ++ ++ return PVRSRV_OK; ++ ++fail_dma_buf: ++ dma_buf_put(psDmaBuf); ++ ++fail_pmr_ref: ++ mutex_unlock(&g_HashLock); ++ PMRUnrefPMR(psPMR); ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_UINT32 ui32MappingTable = 0; ++ struct dma_buf *psDmaBuf; ++ PVRSRV_ERROR eError; ++ ++ /* Get the buffer handle */ ++ psDmaBuf = dma_buf_get(fd); ++ if (IS_ERR_OR_NULL(psDmaBuf)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", ++ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); ++ return PVRSRV_ERROR_BAD_MAPPING; ++ ++ } ++ ++ uiSize = psDmaBuf->size; ++ ++ eError = PhysmemImportSparseDmaBuf(psConnection, ++ psDevNode, ++ fd, ++ uiFlags, ++ uiSize, ++ 1, ++ 1, ++ &ui32MappingTable, ++ ui32NameSize, ++ pszName, ++ ppsPMRPtr, ++ puiSize, ++ puiAlign); ++ ++ dma_buf_put(psDmaBuf); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ PMR *psPMRPtr; ++ PVRSRV_ERROR eError; ++ ++ eError = PhysmemImportDmaBuf(psConnection, ++ psDevNode, ++ fd, ++ uiFlags, ++ ui32NameSize, ++ pszName, ++ &psPMRPtr, ++ puiSize, ++ puiAlign); ++ ++ if (eError == PVRSRV_OK) ++ { ++ eError = PMRLockSysPhysAddresses(psPMRPtr); ++ if (eError == PVRSRV_OK) ++ { ++ *ppsPMRPtr = psPMRPtr; ++ } ++ else ++ { ++ PMRUnrefPMR(psPMRPtr); ++ } ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ PMR *psPMR = NULL; ++ struct dma_buf_attachment *psAttachment; ++ struct dma_buf *psDmaBuf; ++ PVRSRV_ERROR eError; ++ IMG_BOOL bHashTableCreated = IMG_FALSE; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (!psDevNode) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errReturn; ++ } ++ ++ /* Terminate string from bridge to prevent corrupt annotations in RI */ ++ if (pszName != NULL) ++ { ++ IMG_CHAR* pszName0 = (IMG_CHAR*) pszName; ++ pszName0[ui32NameSize-1] = '\0'; ++ } ++ ++ mutex_lock(&g_HashLock); ++ ++ /* Get the buffer handle */ ++ psDmaBuf = dma_buf_get(fd); ++ if (IS_ERR_OR_NULL(psDmaBuf)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)", ++ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM)); ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ goto errUnlockReturn; ++ } ++ ++ if (psDmaBuf->ops == &sPVRDmaBufOps) ++ { ++ PVRSRV_DEVICE_NODE *psPMRDevNode; ++ ++ /* We exported this dma_buf, so we can just get its PMR */ ++ psPMR = (PMR *) psDmaBuf->priv; ++ ++ /* However, we can't import it if it belongs to a different device */ ++ psPMRDevNode = PMR_DeviceNode(psPMR); ++ if (psPMRDevNode != psDevNode) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device", ++ __func__)); ++ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED; ++ goto err; ++ } ++ } ++ else ++ { ++ if (g_psDmaBufHash) ++ { ++ /* We have a hash table so check if we've seen this dmabuf before */ ++ psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf); ++ } ++ else ++ { ++ /* ++ * As different processes may import the same dmabuf we need to ++ * create a hash table so we don't generate a duplicate PMR but ++ * rather just take a reference on an existing one. ++ */ ++ g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE); ++ if (!g_psDmaBufHash) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err; ++ } ++ bHashTableCreated = IMG_TRUE; ++ } ++ } ++ ++ if (psPMR) ++ { ++ /* Reuse the PMR we already created */ ++ PMRRefPMR(psPMR); ++ ++ *ppsPMRPtr = psPMR; ++ PMR_LogicalSize(psPMR, puiSize); ++ *puiAlign = PAGE_SIZE; ++ } ++ /* No errors so far */ ++ eError = PVRSRV_OK; ++ ++err: ++ if (psPMR || (PVRSRV_OK != eError)) ++ { ++ mutex_unlock(&g_HashLock); ++ dma_buf_put(psDmaBuf); ++ ++ if (PVRSRV_OK == eError) ++ { ++ /* ++ * We expect a PMR to be immutable at this point ++ * But its explicitly set here to cover a corner case ++ * where a PMR created through non-DMA interface could be ++ * imported back again through DMA interface */ ++ PMR_SetLayoutFixed(psPMR, IMG_TRUE); ++ } ++ return eError; ++ } ++ ++ /* Do we want this to be a sparse PMR? */ ++ if (ui32NumVirtChunks > 1) ++ { ++ IMG_UINT32 i; ++ ++ /* Parameter validation */ ++ if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) || ++ uiChunkSize != PAGE_SIZE || ++ ui32NumPhysChunks > ui32NumVirtChunks) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Requesting sparse buffer: " ++ "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to " ++ "OS page size (%lu). uiChunkSize * ui32NumPhysChunks " ++ "("IMG_DEVMEM_SIZE_FMTSPEC") must" ++ " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). " ++ "ui32NumPhysChunks (%u) must be lesser or equal to " ++ "ui32NumVirtChunks (%u)", ++ __func__, ++ uiChunkSize, ++ PAGE_SIZE, ++ uiChunkSize * ui32NumPhysChunks, ++ psDmaBuf->size, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errUnlockAndDMAPut; ++ } ++ ++ /* Parameter validation - Mapping table entries*/ ++ for (i = 0; i < ui32NumPhysChunks; i++) ++ { ++ if (pui32MappingTable[i] > ui32NumVirtChunks) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Requesting sparse buffer: " ++ "Entry in mapping table (%u) is out of allocation " ++ "bounds (%u)", ++ __func__, ++ (IMG_UINT32) pui32MappingTable[i], ++ (IMG_UINT32) ui32NumVirtChunks)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errUnlockAndDMAPut; ++ } ++ } ++ } ++ else ++ { ++ /* if ui32NumPhysChunks == 0 pui32MappingTable is NULL and because ++ * is ui32NumPhysChunks is set to 1 below we don't allow NULL array */ ++ if (pui32MappingTable == NULL) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errUnlockAndDMAPut; ++ } ++ ++ /* Make sure parameters are valid for non-sparse allocations as well */ ++ uiChunkSize = psDmaBuf->size; ++ ui32NumPhysChunks = 1; ++ ui32NumVirtChunks = 1; ++ } ++ ++ ++ psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice); ++ if (IS_ERR_OR_NULL(psAttachment)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)", ++ __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM)); ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ goto errUnlockAndDMAPut; ++ } ++ ++ /* ++ * Note: ++ * While we have no way to determine the type of the buffer we just ++ * assume that all dmabufs are from the same physical heap. ++ */ ++ eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_EXTERNAL], ++ psAttachment, ++ PhysmemDestroyDmaBuf, ++ uiFlags, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ ui32NameSize, ++ pszName, ++ &psPMR); ++ if (eError != PVRSRV_OK) ++ { ++ goto errDMADetach; ++ } ++ ++ /* First time we've seen this dmabuf so store it in the hash table */ ++ HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR); ++ g_ui32HashRefCount++; ++ ++ mutex_unlock(&g_HashLock); ++ ++ PVRSRVIonAddMemAllocRecord(psDmaBuf); ++ ++ *ppsPMRPtr = psPMR; ++ *puiSize = ui32NumVirtChunks * uiChunkSize; ++ *puiAlign = PAGE_SIZE; ++ ++ /* The memory that's just imported is owned by some other entity. ++ * Hence the memory layout cannot be changed through our API */ ++ PMR_SetLayoutFixed(psPMR, IMG_TRUE); ++ ++ return PVRSRV_OK; ++ ++errDMADetach: ++ dma_buf_detach(psDmaBuf, psAttachment); ++ ++errUnlockAndDMAPut: ++ if (IMG_TRUE == bHashTableCreated) ++ { ++ HASH_Delete(g_psDmaBufHash); ++ g_psDmaBufHash = NULL; ++ } ++ dma_buf_put(psDmaBuf); ++ ++errUnlockReturn: ++ mutex_unlock(&g_HashLock); ++ ++errReturn: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */ ++ ++PVRSRV_ERROR ++PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, ++ struct dma_buf_attachment *psAttachment, ++ PFN_DESTROY_DMABUF_PMR pfnDestroy, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr) ++{ ++ PVR_UNREFERENCED_PARAMETER(psHeap); ++ PVR_UNREFERENCED_PARAMETER(psAttachment); ++ PVR_UNREFERENCED_PARAMETER(pfnDestroy); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ PVR_UNREFERENCED_PARAMETER(uiChunkSize); ++ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); ++ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); ++ PVR_UNREFERENCED_PARAMETER(pui32MappingTable); ++ PVR_UNREFERENCED_PARAMETER(ui32NameSize); ++ PVR_UNREFERENCED_PARAMETER(pszName); ++ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++struct dma_buf * ++PhysmemGetDmaBuf(PMR *psPMR) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ ++ return NULL; ++} ++ ++PVRSRV_ERROR ++PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PMR *psPMR, ++ IMG_INT *piFd) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(piFd); ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++PVRSRV_ERROR ++PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(fd); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ PVR_UNREFERENCED_PARAMETER(ui32NameSize); ++ PVR_UNREFERENCED_PARAMETER(pszName); ++ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); ++ PVR_UNREFERENCED_PARAMETER(puiSize); ++ PVR_UNREFERENCED_PARAMETER(puiAlign); ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++PVRSRV_ERROR ++PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(fd); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr); ++ PVR_UNREFERENCED_PARAMETER(puiSize); ++ PVR_UNREFERENCED_PARAMETER(puiAlign); ++ PVR_UNREFERENCED_PARAMETER(uiChunkSize); ++ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); ++ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks); ++ PVR_UNREFERENCED_PARAMETER(pui32MappingTable); ++ PVR_UNREFERENCED_PARAMETER(ui32NameSize); ++ PVR_UNREFERENCED_PARAMETER(pszName); ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_dmabuf.h b/drivers/gpu/drm/img-rogue/physmem_dmabuf.h +new file mode 100644 +index 000000000000..99b5c3365fb0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_dmabuf.h +@@ -0,0 +1,124 @@ ++/**************************************************************************/ /*! ++@File physmem_dmabuf.h ++@Title Header for dmabuf PMR factory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ implementing the function callbacks importing Ion allocations ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#if !defined(PHYSMEM_DMABUF_H) ++#define PHYSMEM_DMABUF_H ++ ++#include ++ ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++#include "connection_server.h" ++ ++#include "pmr.h" ++ ++typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap, ++ struct dma_buf_attachment *psAttachment); ++ ++PVRSRV_ERROR ++PhysmemCreateNewDmaBufBackedPMR(PHYS_HEAP *psHeap, ++ struct dma_buf_attachment *psAttachment, ++ PFN_DESTROY_DMABUF_PMR pfnDestroy, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr); ++ ++struct dma_buf * ++PhysmemGetDmaBuf(PMR *psPMR); ++ ++PVRSRV_ERROR ++PhysmemExportDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PMR *psPMR, ++ IMG_INT *piFd); ++ ++PVRSRV_ERROR ++PhysmemImportDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign); ++ ++PVRSRV_ERROR ++PhysmemImportDmaBufLocked(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign); ++ ++PVRSRV_ERROR ++PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_INT fd, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 ui32NameSize, ++ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN], ++ PMR **ppsPMRPtr, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign); ++ ++#endif /* !defined(PHYSMEM_DMABUF_H) */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_hostmem.c b/drivers/gpu/drm/img-rogue/physmem_hostmem.c +new file mode 100644 +index 000000000000..2f1dc409301a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_hostmem.c +@@ -0,0 +1,206 @@ ++/*************************************************************************/ /*! ++@File physmem_hostmem.c ++@Title Host memory device node functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Functions relevant to device memory allocations made from host ++ mem device node. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "physmem_hostmem.h" ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "allocmem.h" ++#include "physheap.h" ++#include "pvrsrv_device.h" ++#include "physheap.h" ++#include "physmem_osmem.h" ++ ++static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr); ++ ++static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr); ++ ++/* heap callbacks for host driver's device's heap */ ++static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs = ++{ ++ /* pfnCpuPAddrToDevPAddr */ ++ HostMemCpuPAddrToDevPAddr, ++ /* pfnDevPAddrToCpuPAddr */ ++ HostMemDevPAddrToCpuPAddr, ++}; ++ ++static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[]; ++ ++/* heap configuration for host driver's device */ ++static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] = ++{ ++ { ++ PHYS_HEAP_TYPE_UMA, ++ "SYSMEM", ++ &gsHostMemDevPhysHeapFuncs, ++ {0}, ++ {0}, ++ 0, ++ (IMG_HANDLE)&gsHostMemDevConfig[0], ++ PHYS_HEAP_USAGE_CPU_LOCAL, ++ } ++}; ++ ++/* device configuration for host driver's device */ ++static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] = ++{ ++ { ++ .pszName = "HostMemDevice", ++ .eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE, ++ .pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0], ++ .ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice), ++ } ++}; ++ ++static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivData); ++ /* Optimise common case */ ++ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; ++ if (ui32NumOfAddr > 1) ++ { ++ IMG_UINT32 ui32Idx; ++ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) ++ { ++ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; ++ } ++ } ++} ++ ++static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivData); ++ /* Optimise common case */ ++ psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr); ++ if (ui32NumOfAddr > 1) ++ { ++ IMG_UINT32 ui32Idx; ++ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) ++ { ++ psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr); ++ } ++ } ++} ++ ++PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = &gsHostMemDevConfig[0]; ++ ++ /* Assert ensures HostMemory device isn't already created and ++ * that data is initialised */ ++ PVR_ASSERT(*ppsDeviceNode == NULL); ++ ++ /* for now, we only know a single heap (UMA) config for host device */ ++ PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 && ++ psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA); ++ ++ /* N.B.- In case of any failures in this function, we just return error to ++ the caller, as clean-up is taken care by _HostMemDeviceDestroy function */ ++ ++ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); ++ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); ++ ++ /* early save return pointer to aid clean-up */ ++ *ppsDeviceNode = psDeviceNode; ++ ++ psDeviceNode->psDevConfig = psDevConfig; ++ psDeviceNode->papsRegisteredPhysHeaps = ++ OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) * ++ psDevConfig->ui32PhysHeapCount); ++ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem"); ++ ++ eError = PhysHeapCreateHeapFromConfig(psDeviceNode, ++ &psDevConfig->pasPhysHeaps[0], ++ &psDeviceNode->papsRegisteredPhysHeaps[0]); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapCreateHeapFromConfig"); ++ psDeviceNode->ui32RegisteredPhysHeaps = 1; ++ ++ /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */ ++ eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_CPU_LOCAL, ++ psDeviceNode, ++ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquire"); ++ ++ return PVRSRV_OK; ++} ++ ++void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ if (!psDeviceNode) ++ { ++ return; ++ } ++ ++ if (psDeviceNode->papsRegisteredPhysHeaps) ++ { ++ if (psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]) ++ { ++ PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]); ++ } ++ ++ if (psDeviceNode->papsRegisteredPhysHeaps[0]) ++ { ++ /* clean-up function as well is aware of only one heap */ ++ PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1); ++ PhysHeapDestroy(psDeviceNode->papsRegisteredPhysHeaps[0]); ++ } ++ ++ OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps); ++ } ++ OSFreeMem(psDeviceNode); ++} +diff --git a/drivers/gpu/drm/img-rogue/physmem_hostmem.h b/drivers/gpu/drm/img-rogue/physmem_hostmem.h +new file mode 100644 +index 000000000000..cfa453de343a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_hostmem.h +@@ -0,0 +1,65 @@ ++/*************************************************************************/ /*! ++@File physmem_hostmem.h ++@Title Host memory device node header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PHYSMEM_HOSTMEM_H) ++#define PHYSMEM_HOSTMEM_H ++ ++#include "pvrsrv_device.h" ++#include "device.h" ++ ++/*************************************************************************/ /*! ++@Function HostMemDeviceCreate ++@Description Allocate memory for and create host memory device node. ++@Output ppsDeviceNode Pointer to device node pointer. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR HostMemDeviceCreate(PVRSRV_DEVICE_NODE **ppsDeviceNode); ++ ++/*************************************************************************/ /*! ++@Function HostMemDeviceDestroy ++@Description Destroy host memory device node. ++@Input psDeviceNode Pointer to device node. ++*/ /**************************************************************************/ ++void HostMemDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* !defined(PHYSMEM_HOSTMEM_H) */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_lma.c b/drivers/gpu/drm/img-rogue/physmem_lma.c +new file mode 100644 +index 000000000000..4fa61ac18fcf +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_lma.c +@@ -0,0 +1,2003 @@ ++/*************************************************************************/ /*! ++@File physmem_lma.c ++@Title Local card memory allocator ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ implementing the function callbacks for local card memory. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++#include "rgx_pdump_panics.h" ++#include "allocmem.h" ++#include "osfunc.h" ++#include "pvrsrv.h" ++#include "devicemem_server_utils.h" ++#include "physmem_lma.h" ++#include "pdump_km.h" ++#include "pmr.h" ++#include "pmr_impl.h" ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "rgxutils.h" ++#endif ++ ++#if defined(INTEGRITY_OS) ++#include "mm.h" ++#include "integrity_memobject.h" ++#endif ++ ++/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid ++ * page address */ ++#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0) ++ ++typedef struct _PMR_LMALLOCARRAY_DATA_ { ++ IMG_PID uiPid; ++ IMG_INT32 iNumPagesAllocated; ++ /* ++ * uiTotalNumPages: ++ * Total number of pages supported by this PMR. ++ * (Fixed as of now due the fixed Page table array size) ++ */ ++ IMG_UINT32 uiTotalNumPages; ++ IMG_UINT32 uiPagesToAlloc; ++ ++ IMG_UINT32 uiLog2AllocSize; ++ IMG_UINT32 uiContigAllocSize; ++ IMG_DEV_PHYADDR *pasDevPAddr; ++ ++ IMG_BOOL bZeroOnAlloc; ++ IMG_BOOL bPoisonOnAlloc; ++ ++ IMG_BOOL bOnDemand; ++ ++ /* ++ Record at alloc time whether poisoning will be required when the ++ PMR is freed. ++ */ ++ IMG_BOOL bPoisonOnFree; ++ ++ /* Physical heap and arena pointers for this allocation */ ++ PHYS_HEAP* psPhysHeap; ++ RA_ARENA* psArena; ++ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags; ++ ++ /* ++ Connection data for this requests' originating process. NULL for ++ direct-bridge originating calls ++ */ ++ CONNECTION_DATA *psConnection; ++} PMR_LMALLOCARRAY_DATA; ++ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) ++/* Global structure to manage GPU memory leak */ ++static DEFINE_MUTEX(g_sLMALeakMutex); ++static IMG_UINT32 g_ui32LMALeakCounter = 0; ++#endif ++ ++typedef struct PHYSMEM_LMA_DATA_TAG { ++ RA_ARENA *psRA; ++ ++ IMG_CPU_PHYADDR sStartAddr; ++ IMG_DEV_PHYADDR sCardBase; ++ IMG_UINT64 uiSize; ++} PHYSMEM_LMA_DATA; ++ ++/* ++ * This function will set the psDevPAddr to whatever the system layer ++ * has set it for the referenced heap. ++ * It will not fail if the psDevPAddr is invalid. ++ */ ++static PVRSRV_ERROR ++_GetDevPAddr(PHEAP_IMPL_DATA pvImplData, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ ++ *psDevPAddr = psLMAData->sCardBase; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * This function will set the psCpuPAddr to whatever the system layer ++ * has set it for the referenced heap. ++ * It will not fail if the psCpuPAddr is invalid. ++ */ ++static PVRSRV_ERROR ++_GetCPUPAddr(PHEAP_IMPL_DATA pvImplData, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ ++ *psCpuPAddr = psLMAData->sStartAddr; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++_GetSize(PHEAP_IMPL_DATA pvImplData, ++ IMG_UINT64 *puiSize) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ ++ *puiSize = psLMAData->uiSize; ++ ++ return PVRSRV_OK; ++} ++ ++static IMG_UINT32 ++_GetPageShift(void) ++{ ++ return PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT; ++} ++ ++static void PhysmemGetLocalRamMemStats(PHEAP_IMPL_DATA pvImplData, ++ IMG_UINT64 *pui64TotalSize, ++ IMG_UINT64 *pui64FreeSize) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ RA_USAGE_STATS sRAUsageStats; ++ ++ RA_Get_Usage_Stats(psLMAData->psRA, &sRAUsageStats); ++ ++ *pui64TotalSize = sRAUsageStats.ui64TotalArenaSize; ++ *pui64FreeSize = sRAUsageStats.ui64FreeArenaSize; ++} ++ ++static PVRSRV_ERROR ++PhysmemGetArenaLMA(PHYS_HEAP *psPhysHeap, ++ RA_ARENA **ppsArena) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)PhysHeapGetImplData(psPhysHeap); ++ ++ PVR_LOG_RETURN_IF_FALSE(psLMAData != NULL, "psLMAData", PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ *ppsArena = psLMAData->psRA; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++_CreateArenas(PHEAP_IMPL_DATA pvImplData, IMG_CHAR *pszLabel) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ ++ psLMAData->psRA = RA_Create_With_Span(pszLabel, ++ OSGetPageShift(), ++ psLMAData->sStartAddr.uiAddr, ++ psLMAData->sCardBase.uiAddr, ++ psLMAData->uiSize); ++ PVR_LOG_RETURN_IF_NOMEM(psLMAData->psRA, "RA_Create_With_Span"); ++ ++ return PVRSRV_OK; ++} ++ ++static void ++_DestroyArenas(PHEAP_IMPL_DATA pvImplData) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ ++ /* Remove RAs and RA names for local card memory */ ++ if (psLMAData->psRA) ++ { ++ OSFreeMem(psLMAData->psRA); ++ psLMAData->psRA = NULL; ++ } ++} ++ ++static void ++_DestroyImplData(PHEAP_IMPL_DATA pvImplData) ++{ ++ PHYSMEM_LMA_DATA *psLMAData = (PHYSMEM_LMA_DATA*)pvImplData; ++ ++ _DestroyArenas(pvImplData); ++ ++ OSFreeMem(psLMAData); ++} ++ ++struct _PHYS_HEAP_ITERATOR_ { ++ PHYS_HEAP *psPhysHeap; ++ RA_ARENA_ITERATOR *psRAIter; ++ ++ IMG_UINT64 uiTotalSize; ++ IMG_UINT64 uiInUseSize; ++}; ++ ++PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, ++ PHYS_HEAP_USAGE_FLAGS ui32Flags, ++ PHYS_HEAP_ITERATOR **ppsIter) ++{ ++ PVRSRV_ERROR eError; ++ PHYSMEM_LMA_DATA *psLMAData; ++ PHYS_HEAP_ITERATOR *psHeapIter; ++ PHYS_HEAP *psPhysHeap = NULL; ++ RA_USAGE_STATS sStats; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppsIter != NULL, "ppsIter"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode != NULL, "psDevNode"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32Flags != 0, "ui32Flags"); ++ ++ eError = PhysHeapAcquireByUsage(ui32Flags, psDevNode, &psPhysHeap); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapAcquireByUsage"); ++ ++ PVR_LOG_GOTO_IF_FALSE(PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA, ++ "PhysHeap must be of LMA type", release_heap); ++ ++ psLMAData = (PHYSMEM_LMA_DATA *) PhysHeapGetImplData(psPhysHeap); ++ ++ psHeapIter = OSAllocMem(sizeof(*psHeapIter)); ++ PVR_LOG_GOTO_IF_NOMEM(psHeapIter, eError, release_heap); ++ ++ psHeapIter->psPhysHeap = psPhysHeap; ++ psHeapIter->psRAIter = RA_IteratorAcquire(psLMAData->psRA, IMG_FALSE); ++ PVR_LOG_GOTO_IF_NOMEM(psHeapIter->psRAIter, eError, free_heap_iter); ++ ++ /* get heap usage */ ++ RA_Get_Usage_Stats(psLMAData->psRA, &sStats); ++ ++ psHeapIter->uiTotalSize = sStats.ui64TotalArenaSize; ++ psHeapIter->uiInUseSize = sStats.ui64TotalArenaSize - sStats.ui64FreeArenaSize; ++ ++ *ppsIter = psHeapIter; ++ ++ return PVRSRV_OK; ++ ++free_heap_iter: ++ OSFreeMem(psHeapIter); ++ ++release_heap: ++ PhysHeapRelease(psPhysHeap); ++ ++ return eError; ++} ++ ++void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter) ++{ ++ PHYS_HEAP_ITERATOR *psHeapIter = psIter; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE(psHeapIter != NULL, "psHeapIter is NULL"); ++ ++ PhysHeapRelease(psHeapIter->psPhysHeap); ++ RA_IteratorRelease(psHeapIter->psRAIter); ++ OSFreeMem(psHeapIter); ++} ++ ++PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter) ++{ ++ PHYS_HEAP_ITERATOR *psHeapIter = psIter; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "ppsIter"); ++ ++ RA_IteratorReset(psHeapIter->psRAIter); ++ ++ return PVRSRV_OK; ++} ++ ++IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT64 *puiSize) ++{ ++ PHYS_HEAP_ITERATOR *psHeapIter = psIter; ++ RA_ITERATOR_DATA sData = {0}; ++ ++ if (psHeapIter == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "psHeapIter in %s() is NULL", __func__)); ++ return IMG_FALSE; ++ } ++ ++ if (!RA_IteratorNext(psHeapIter->psRAIter, &sData)) ++ { ++ return IMG_FALSE; ++ } ++ ++ PVR_ASSERT(sData.uiSize != 0); ++ ++ psDevPAddr->uiAddr = sData.uiAddr; ++ *puiSize = sData.uiSize; ++ ++ return IMG_TRUE; ++} ++ ++PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, ++ IMG_UINT64 *puiTotalSize, ++ IMG_UINT64 *puiInUseSize) ++{ ++ PHYS_HEAP_ITERATOR *psHeapIter = psIter; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psHeapIter != NULL, "psHeapIter"); ++ ++ *puiTotalSize = psHeapIter->uiTotalSize; ++ *puiInUseSize = psHeapIter->uiInUseSize; ++ ++ return PVRSRV_OK; ++} ++ ++ ++static PVRSRV_ERROR ++_LMA_DoPhyContigPagesAlloc(RA_ARENA *pArena, ++ size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid) ++{ ++ RA_BASE_T uiCardAddr = 0; ++ RA_LENGTH_T uiActualSize; ++ PVRSRV_ERROR eError; ++#if defined(DEBUG) ++ static IMG_UINT32 ui32MaxLog2NumPages = 4; /* 16 pages => 64KB */ ++#endif /* defined(DEBUG) */ ++ ++ IMG_UINT32 ui32Log2NumPages = 0; ++ ++ PVR_ASSERT(uiSize != 0); ++ ui32Log2NumPages = OSGetOrder(uiSize); ++ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); ++ ++ eError = RA_Alloc(pArena, ++ uiSize, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, /* No flags */ ++ uiSize, ++ "LMA_PhyContigPagesAlloc", ++ &uiCardAddr, ++ &uiActualSize, ++ NULL); /* No private handle */ ++ ++ PVR_ASSERT(uiSize == uiActualSize); ++ ++ psMemHandle->u.ui64Handle = uiCardAddr; ++ psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr; ++ ++ if (PVRSRV_OK == eError) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, ++ uiSize, ++ uiCardAddr, ++ uiPid); ++#else ++ IMG_CPU_PHYADDR sCpuPAddr; ++ sCpuPAddr.uiAddr = psDevPAddr->uiAddr; ++ ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, ++ NULL, ++ sCpuPAddr, ++ uiSize, ++ NULL, ++ uiPid ++ DEBUG_MEMSTATS_VALUES); ++#endif ++#endif ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: (GPU Virtualisation) Allocated 0x" IMG_SIZE_FMTSPECX " at 0x%" IMG_UINT64_FMTSPECX ", Arena ID %u", ++ __func__, uiSize, psDevPAddr->uiAddr, psMemHandle->uiOSid)); ++#endif ++ ++#if defined(DEBUG) ++ PVR_ASSERT((ui32Log2NumPages <= ui32MaxLog2NumPages)); ++ if (ui32Log2NumPages > ui32MaxLog2NumPages) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ui32MaxLog2NumPages = %u, increasing to %u", __func__, ++ ui32MaxLog2NumPages, ui32Log2NumPages )); ++ ui32MaxLog2NumPages = ui32Log2NumPages; ++ } ++#endif /* defined(DEBUG) */ ++ psMemHandle->uiOrder = ui32Log2NumPages; ++ } ++ ++ return eError; ++} ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++static PVRSRV_ERROR ++LMA_PhyContigPagesAllocGPV(PHYS_HEAP *psPhysHeap, ++ size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 ui32OSid, ++ IMG_PID uiPid) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); ++ RA_ARENA *pArena; ++ IMG_UINT32 ui32Log2NumPages = 0; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(uiSize != 0); ++ ui32Log2NumPages = OSGetOrder(uiSize); ++ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); ++ ++ PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); ++ if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u defaulting to 0", ++ __func__, ui32OSid)); ++ ui32OSid = 0; ++ } ++ ++ pArena = psDevNode->psOSidSubArena[ui32OSid]; ++ ++ if (psMemHandle->uiOSid != ui32OSid) ++ { ++ PVR_LOG(("%s: Unexpected OSid value %u - expecting %u", __func__, ++ psMemHandle->uiOSid, ui32OSid)); ++ } ++ ++ psMemHandle->uiOSid = ui32OSid; /* For Free() use */ ++ ++ eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, ++ psDevPAddr, uiPid); ++ PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); ++ ++ return eError; ++} ++#endif ++ ++static PVRSRV_ERROR ++LMA_PhyContigPagesAlloc(PHYS_HEAP *psPhysHeap, ++ size_t uiSize, ++ PG_HANDLE *psMemHandle, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_PID uiPid) ++{ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ IMG_UINT32 ui32OSid = 0; ++ return LMA_PhyContigPagesAllocGPV(psPhysHeap, uiSize, psMemHandle, psDevPAddr, ++ ui32OSid, uiPid); ++#else ++ PVRSRV_ERROR eError; ++ ++ RA_ARENA *pArena; ++ IMG_UINT32 ui32Log2NumPages = 0; ++ ++ eError = PhysmemGetArenaLMA(psPhysHeap, &pArena); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); ++ ++ PVR_ASSERT(uiSize != 0); ++ ui32Log2NumPages = OSGetOrder(uiSize); ++ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize(); ++ ++ eError = _LMA_DoPhyContigPagesAlloc(pArena, uiSize, psMemHandle, ++ psDevPAddr, uiPid); ++ PVR_LOG_IF_ERROR(eError, "_LMA_DoPhyContigPagesAlloc"); ++ ++ return eError; ++#endif ++} ++ ++static void ++LMA_PhyContigPagesFree(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle) ++{ ++ RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle; ++ RA_ARENA *pArena; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); ++ IMG_UINT32 ui32OSid = psMemHandle->uiOSid; ++ ++ /* ++ * The Arena ID is set by the originating allocation, and maintained via ++ * the call stacks into this function. We have a limited range of IDs ++ * and if the passed value falls outside this we simply treat it as a ++ * 'global' arena ID of 0. This is where all default OS-specific allocations ++ * are created. ++ */ ++ PVR_ASSERT(ui32OSid < GPUVIRT_VALIDATION_NUM_OS); ++ if (ui32OSid >= GPUVIRT_VALIDATION_NUM_OS) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Arena index %u PhysAddr 0x%" ++ IMG_UINT64_FMTSPECx " Reverting to Arena 0", __func__, ++ ui32OSid, uiCardAddr)); ++ /* ++ * No way of determining what we're trying to free so default to the ++ * global default arena index 0. ++ */ ++ ui32OSid = 0; ++ } ++ ++ pArena = psDevNode->psOSidSubArena[ui32OSid]; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: (GPU Virtualisation) Freeing 0x%" ++ IMG_UINT64_FMTSPECx ", Arena %u", __func__, ++ uiCardAddr, ui32OSid)); ++ ++#else ++ PhysmemGetArenaLMA(psPhysHeap, &pArena); ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, ++ (IMG_UINT64)uiCardAddr); ++#else ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, ++ (IMG_UINT64)uiCardAddr, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++#endif ++ ++ RA_Free(pArena, uiCardAddr); ++ psMemHandle->uiOrder = 0; ++} ++ ++static PVRSRV_ERROR ++LMA_PhyContigPagesMap(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, ++ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr, ++ void **pvPtr) ++{ ++ IMG_CPU_PHYADDR sCpuPAddr; ++ IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ ++ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); ++ *pvPtr = OSMapPhysToLin(sCpuPAddr, ++ ui32NumPages * OSGetPageSize(), ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC); ++ PVR_RETURN_IF_NOMEM(*pvPtr); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ++ ui32NumPages * OSGetPageSize(), ++ OSGetCurrentClientProcessIDKM()); ++#else ++ { ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ++ *pvPtr, ++ sCpuPAddr, ++ ui32NumPages * OSGetPageSize(), ++ NULL, ++ OSGetCurrentClientProcessIDKM() ++ DEBUG_MEMSTATS_VALUES); ++ } ++#endif ++#endif ++ return PVRSRV_OK; ++} ++ ++static void ++LMA_PhyContigPagesUnmap(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, ++ void *pvPtr) ++{ ++ IMG_UINT32 ui32NumPages = (1 << psMemHandle->uiOrder); ++ PVR_UNREFERENCED_PARAMETER(psPhysHeap); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ++ ui32NumPages * OSGetPageSize(), ++ OSGetCurrentClientProcessIDKM()); ++#else ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, ++ (IMG_UINT64)(uintptr_t)pvPtr, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++#endif ++ ++ OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize()); ++} ++ ++static PVRSRV_ERROR ++LMA_PhyContigPagesClean(PHYS_HEAP *psPhysHeap, ++ PG_HANDLE *psMemHandle, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 uiLength) ++{ ++ /* No need to flush because we map as uncached */ ++ PVR_UNREFERENCED_PARAMETER(psPhysHeap); ++ PVR_UNREFERENCED_PARAMETER(psMemHandle); ++ PVR_UNREFERENCED_PARAMETER(uiOffset); ++ PVR_UNREFERENCED_PARAMETER(uiLength); ++ ++ return PVRSRV_OK; ++} ++ ++static PHEAP_IMPL_FUNCS _sPHEAPImplFuncs = ++{ ++ .pfnDestroyData = &_DestroyImplData, ++ .pfnGetDevPAddr = &_GetDevPAddr, ++ .pfnGetCPUPAddr = &_GetCPUPAddr, ++ .pfnGetSize = &_GetSize, ++ .pfnGetPageShift = &_GetPageShift, ++ .pfnGetPMRFactoryMemStats = &PhysmemGetLocalRamMemStats, ++ .pfnCreatePMR = &PhysmemNewLocalRamBackedPMR, ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ .pfnPagesAllocGPV = &LMA_PhyContigPagesAllocGPV, ++#endif ++ .pfnPagesAlloc = &LMA_PhyContigPagesAlloc, ++ .pfnPagesFree = &LMA_PhyContigPagesFree, ++ .pfnPagesMap = &LMA_PhyContigPagesMap, ++ .pfnPagesUnMap = &LMA_PhyContigPagesUnmap, ++ .pfnPagesClean = &LMA_PhyContigPagesClean, ++}; ++ ++PVRSRV_ERROR ++PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, ++ PHYS_HEAP_CONFIG *psConfig, ++ IMG_CHAR *pszLabel, ++ PHYS_HEAP **ppsPhysHeap) ++{ ++ PHYSMEM_LMA_DATA *psLMAData; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszLabel != NULL, "pszLabel"); ++ ++ psLMAData = OSAllocMem(sizeof(*psLMAData)); ++ PVR_LOG_RETURN_IF_NOMEM(psLMAData, "OSAllocMem"); ++ ++ psLMAData->sStartAddr = psConfig->sStartAddr; ++ psLMAData->sCardBase = psConfig->sCardBase; ++ psLMAData->uiSize = psConfig->uiSize; ++ ++ ++ eError = PhysHeapCreate(psDevNode, ++ psConfig, ++ (PHEAP_IMPL_DATA)psLMAData, ++ &_sPHEAPImplFuncs, ++ ppsPhysHeap); ++ if (eError != PVRSRV_OK) ++ { ++ OSFreeMem(psLMAData); ++ return eError; ++ } ++ ++ eError = _CreateArenas(psLMAData, pszLabel); ++ PVR_LOG_RETURN_IF_ERROR(eError, "_CreateArenas"); ++ ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR _MapAlloc(PHYS_HEAP *psPhysHeap, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ size_t uiSize, ++ PMR_FLAGS_T ulFlags, ++ void **pvPtr) ++{ ++ IMG_UINT32 ui32CPUCacheFlags; ++ IMG_CPU_PHYADDR sCpuPAddr; ++ PVRSRV_ERROR eError; ++ ++ eError = DevmemCPUCacheMode(PhysHeapDeviceNode(psPhysHeap), ulFlags, &ui32CPUCacheFlags); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr); ++ ++ *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags); ++ PVR_RETURN_IF_NOMEM(*pvPtr); ++ ++ return PVRSRV_OK; ++} ++ ++static void _UnMapAlloc(size_t uiSize, ++ void *pvPtr) ++{ ++ OSUnMapPhysToLin(pvPtr, uiSize); ++} ++ ++static PVRSRV_ERROR ++_PoisonAlloc(PHYS_HEAP *psPhysHeap, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 uiContigAllocSize, ++ IMG_BYTE ui8PoisonValue) ++{ ++ PVRSRV_ERROR eError; ++ void *pvKernLin = NULL; ++ ++ eError = _MapAlloc(psPhysHeap, ++ psDevPAddr, ++ uiContigAllocSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, ++ &pvKernLin); ++ PVR_GOTO_IF_ERROR(eError, map_failed); ++ ++ OSCachedMemSetWMB(pvKernLin, ui8PoisonValue, uiContigAllocSize); ++ ++ _UnMapAlloc(uiContigAllocSize, pvKernLin); ++ ++ return PVRSRV_OK; ++ ++map_failed: ++ PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation")); ++ return eError; ++} ++ ++static PVRSRV_ERROR ++_ZeroAlloc(PHYS_HEAP *psPhysHeap, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT32 uiContigAllocSize) ++{ ++ void *pvKernLin = NULL; ++ PVRSRV_ERROR eError; ++ ++ eError = _MapAlloc(psPhysHeap, ++ psDevPAddr, ++ uiContigAllocSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, ++ &pvKernLin); ++ PVR_GOTO_IF_ERROR(eError, map_failed); ++ ++ OSCachedMemSetWMB(pvKernLin, 0, uiContigAllocSize); ++ ++ _UnMapAlloc(uiContigAllocSize, pvKernLin); ++ ++ return PVRSRV_OK; ++ ++map_failed: ++ PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation")); ++ return eError; ++} ++ ++static PVRSRV_ERROR ++_AllocLMPageArray(PMR_SIZE_T uiSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pabMappingTable, ++ IMG_UINT32 uiLog2AllocPageSize, ++ IMG_BOOL bZero, ++ IMG_BOOL bPoisonOnAlloc, ++ IMG_BOOL bPoisonOnFree, ++ IMG_BOOL bContig, ++ IMG_BOOL bOnDemand, ++ PHYS_HEAP* psPhysHeap, ++ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, ++ IMG_PID uiPid, ++ PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr, ++ CONNECTION_DATA *psConnection ++ ) ++{ ++ PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL; ++ IMG_UINT32 ui32Index; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(!bZero || !bPoisonOnAlloc); ++ PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize); ++ ++ psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA)); ++ PVR_GOTO_IF_NOMEM(psPageArrayData, eError, errorOnAllocArray); ++ ++ if (bContig) ++ { ++ /* ++ Some allocations require kernel mappings in which case in order ++ to be virtually contiguous we also have to be physically contiguous. ++ */ ++ psPageArrayData->uiTotalNumPages = 1; ++ psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages; ++ psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize); ++ psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize; ++ } ++ else ++ { ++ IMG_UINT32 uiNumPages; ++ ++ /* Use of cast below is justified by the assertion that follows to ++ prove that no significant bits have been truncated */ ++ uiNumPages = (IMG_UINT32)(((uiSize - 1) >> uiLog2AllocPageSize) + 1); ++ PVR_ASSERT(((PMR_SIZE_T)uiNumPages << uiLog2AllocPageSize) == uiSize); ++ ++ psPageArrayData->uiTotalNumPages = uiNumPages; ++ ++ if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks)) ++ { ++ psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks; ++ } ++ else ++ { ++ psPageArrayData->uiPagesToAlloc = uiNumPages; ++ } ++ psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize; ++ psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize; ++ } ++ psPageArrayData->psConnection = psConnection; ++ psPageArrayData->uiPid = uiPid; ++ psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) * ++ psPageArrayData->uiTotalNumPages); ++ PVR_GOTO_IF_NOMEM(psPageArrayData->pasDevPAddr, eError, errorOnAllocAddr); ++ ++ /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */ ++ for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++) ++ { ++ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; ++ } ++ ++ psPageArrayData->iNumPagesAllocated = 0; ++ psPageArrayData->bZeroOnAlloc = bZero; ++ psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc; ++ psPageArrayData->bPoisonOnFree = bPoisonOnFree; ++ psPageArrayData->bOnDemand = bOnDemand; ++ psPageArrayData->psPhysHeap = psPhysHeap; ++ psPageArrayData->uiAllocFlags = uiAllocFlags; ++ ++ *ppsPageArrayDataPtr = psPageArrayData; ++ ++ return PVRSRV_OK; ++ ++ /* ++ error exit paths follow: ++ */ ++errorOnAllocAddr: ++ OSFreeMem(psPageArrayData); ++ ++errorOnAllocArray: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++static PVRSRV_ERROR ++_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable) ++{ ++ PVRSRV_ERROR eError; ++ RA_BASE_T uiCardAddr; ++ RA_LENGTH_T uiActualSize; ++ IMG_UINT32 i, ui32Index = 0; ++ IMG_UINT32 uiContigAllocSize; ++ IMG_UINT32 uiLog2AllocSize; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ IMG_BOOL bPoisonOnAlloc; ++ IMG_BOOL bZeroOnAlloc; ++ RA_ARENA *pArena; ++ ++ PVR_ASSERT(NULL != psPageArrayData); ++ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated); ++ ++ psDevNode = PhysHeapDeviceNode(psPageArrayData->psPhysHeap); ++ uiContigAllocSize = psPageArrayData->uiContigAllocSize; ++ uiLog2AllocSize = psPageArrayData->uiLog2AllocSize; ++ bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc; ++ bZeroOnAlloc = psPageArrayData->bZeroOnAlloc; ++ ++ /* Get suitable local memory region for this GPU physheap allocation */ ++ eError = PhysmemGetArenaLMA(psPageArrayData->psPhysHeap, &pArena); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemGetArenaLMA"); ++ ++ if (psPageArrayData->uiTotalNumPages < ++ (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Pages requested to allocate don't fit PMR alloc Size. " ++ "Allocated: %u + Requested: %u > Total Allowed: %u", ++ psPageArrayData->iNumPagesAllocated, ++ psPageArrayData->uiPagesToAlloc, ++ psPageArrayData->uiTotalNumPages)); ++ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; ++ } ++ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ { ++ IMG_UINT32 ui32OSid=0; ++ ++ /* Obtain the OSid specific data from our connection handle */ ++ if (psPageArrayData->psConnection != NULL) ++ { ++ ui32OSid = psPageArrayData->psConnection->ui32OSid; ++ } ++ ++ if (PVRSRV_CHECK_SHARED_BUFFER(psPageArrayData->uiAllocFlags)) ++ { ++ pArena=psDevNode->psOSSharedArena; ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "(GPU Virtualization Validation): Giving from shared mem")); ++ } ++ else ++ { ++ pArena=psDevNode->psOSidSubArena[ui32OSid]; ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "(GPU Virtualization Validation): Giving from OS slot %d", ++ ui32OSid)); ++ } ++ } ++#endif ++ ++ psPageArrayData->psArena = pArena; ++ ++ for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++) ++ { ++ /* This part of index finding should happen before allocating the page. ++ * Just avoiding intricate paths */ ++ if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc) ++ { ++ ui32Index = i; ++ } ++ else ++ { ++ if (NULL == pui32MapTable) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("pui32MapTable", eError, PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY, errorOnRAAlloc); ++ } ++ ++ ui32Index = pui32MapTable[i]; ++ if (ui32Index >= psPageArrayData->uiTotalNumPages) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Page alloc request Index out of bounds for PMR @0x%p", ++ __func__, ++ psPageArrayData)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, errorOnRAAlloc); ++ } ++ ++ if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("Mapping already exists", eError, PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS, errorOnRAAlloc); ++ } ++ } ++ ++ eError = RA_Alloc(pArena, ++ uiContigAllocSize, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, /* No flags */ ++ 1ULL << uiLog2AllocSize, ++ "LMA_Page_Alloc", ++ &uiCardAddr, ++ &uiActualSize, ++ NULL); /* No private handle */ ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to Allocate the page @index:%d, size = 0x%llx", ++ ui32Index, 1ULL << uiLog2AllocSize)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES, errorOnRAAlloc); ++ } ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++{ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "(GPU Virtualization Validation): Address: 0x%"IMG_UINT64_FMTSPECX, ++ uiCardAddr)); ++} ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ /* Allocation is done a page at a time */ ++ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid); ++#else ++ { ++ IMG_CPU_PHYADDR sLocalCpuPAddr; ++ ++ sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr; ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ++ NULL, ++ sLocalCpuPAddr, ++ uiActualSize, ++ NULL, ++ psPageArrayData->uiPid ++ DEBUG_MEMSTATS_VALUES); ++ } ++#endif ++#endif ++ ++ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr; ++ if (bPoisonOnAlloc) ++ { ++ eError = _PoisonAlloc(psPageArrayData->psPhysHeap, ++ &psPageArrayData->pasDevPAddr[ui32Index], ++ uiContigAllocSize, ++ PVRSRV_POISON_ON_ALLOC_VALUE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_PoisonAlloc", errorOnPoison); ++ } ++ ++ if (bZeroOnAlloc) ++ { ++ eError = _ZeroAlloc(psPageArrayData->psPhysHeap, ++ &psPageArrayData->pasDevPAddr[ui32Index], ++ uiContigAllocSize); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_ZeroAlloc", errorOnZero); ++ } ++ } ++ psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc; ++ ++ return PVRSRV_OK; ++ ++ /* ++ error exit paths follow: ++ */ ++errorOnZero: ++errorOnPoison: ++ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; ++errorOnRAAlloc: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)", ++ __func__, ++ ui32Index, ++ i, ++ psPageArrayData->uiPagesToAlloc, ++ PVRSRVGetErrorString(eError))); ++ while (--i < psPageArrayData->uiPagesToAlloc) ++ { ++ if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc) ++ { ++ ui32Index = i; ++ } ++ else ++ { ++ if (NULL == pui32MapTable) ++ { ++ break; ++ } ++ ++ ui32Index = pui32MapTable[i]; ++ } ++ ++ if (ui32Index < psPageArrayData->uiTotalNumPages) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ /* Allocation is done a page at a time */ ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ++ uiContigAllocSize, ++ psPageArrayData->uiPid); ++#else ++ { ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ++ psPageArrayData->pasDevPAddr[ui32Index].uiAddr, ++ psPageArrayData->uiPid); ++ } ++#endif ++#endif ++ RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr); ++ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; ++ } ++ } ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static PVRSRV_ERROR ++_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData) ++{ ++ OSFreeMem(psPageArrayData->pasDevPAddr); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "physmem_lma.c: freed local memory array structure for PMR @0x%p", ++ psPageArrayData)); ++ ++ OSFreeMem(psPageArrayData); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, ++ IMG_UINT32 *pui32FreeIndices, ++ IMG_UINT32 ui32FreePageCount) ++{ ++ IMG_UINT32 uiContigAllocSize; ++ IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0; ++ RA_ARENA *pArena = psPageArrayData->psArena; ++ ++ PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0); ++ ++ uiContigAllocSize = psPageArrayData->uiContigAllocSize; ++ ++ ui32PagesToFree = (NULL == pui32FreeIndices) ? ++ psPageArrayData->uiTotalNumPages : ui32FreePageCount; ++ ++ for (i = 0; i < ui32PagesToFree; i++) ++ { ++ if (NULL == pui32FreeIndices) ++ { ++ ui32Index = i; ++ } ++ else ++ { ++ ui32Index = pui32FreeIndices[i]; ++ } ++ ++ if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr) ++ { ++ ui32PagesFreed++; ++ if (psPageArrayData->bPoisonOnFree) ++ { ++ _PoisonAlloc(psPageArrayData->psPhysHeap, ++ &psPageArrayData->pasDevPAddr[ui32Index], ++ uiContigAllocSize, ++ PVRSRV_POISON_ON_FREE_VALUE); ++ } ++ ++ RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ /* Allocation is done a page at a time */ ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ++ uiContigAllocSize, ++ psPageArrayData->uiPid); ++#else ++ { ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, ++ psPageArrayData->pasDevPAddr[ui32Index].uiAddr, ++ psPageArrayData->uiPid); ++ } ++#endif ++#endif ++ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR; ++ } ++ } ++ psPageArrayData->iNumPagesAllocated -= ui32PagesFreed; ++ ++ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: freed %d local memory for PMR @0x%p", ++ __func__, ++ (ui32PagesFreed * uiContigAllocSize), ++ psPageArrayData)); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * ++ * Implementation of callback functions ++ * ++ */ ++ ++/* destructor func is called after last reference disappears, but ++ before PMR itself is freed. */ ++static PVRSRV_ERROR ++PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PVRSRV_ERROR eError; ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; ++ ++ psLMAllocArrayData = pvPriv; ++ ++ /* We can't free pages until now. */ ++ if (psLMAllocArrayData->iNumPagesAllocated != 0) ++ { ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) && defined(__linux__) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ IMG_UINT32 ui32LMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; ++ ++ mutex_lock(&g_sLMALeakMutex); ++ ++ g_ui32LMALeakCounter++; ++ if (ui32LMALeakMax && g_ui32LMALeakCounter >= ui32LMALeakMax) ++ { ++ g_ui32LMALeakCounter = 0; ++ mutex_unlock(&g_sLMALeakMutex); ++ ++ PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); ++ return PVRSRV_OK; ++ } ++ ++ mutex_unlock(&g_sLMALeakMutex); ++#endif ++ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); ++ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ ++ } ++ ++ eError = _FreeLMPageArray(psLMAllocArrayData); ++ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */ ++ ++ return PVRSRV_OK; ++} ++ ++/* callback function for locking the system physical page addresses. ++ As we are LMA there is nothing to do as we control physical memory. */ ++static PVRSRV_ERROR ++PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ ++ PVRSRV_ERROR eError; ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; ++ ++ psLMAllocArrayData = pvPriv; ++ ++ if (psLMAllocArrayData->bOnDemand) ++ { ++ /* Allocate Memory for deferred allocation */ ++ eError = _AllocLMPages(psLMAllocArrayData, NULL); ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData; ++ ++ psLMAllocArrayData = pvPriv; ++ ++ if (psLMAllocArrayData->bOnDemand) ++ { ++ /* Free Memory for deferred allocation */ ++ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0); ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ return eError; ++} ++ ++/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */ ++static PVRSRV_ERROR ++PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_BOOL *pbValid, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ IMG_UINT32 idx; ++ IMG_UINT32 uiLog2AllocSize; ++ IMG_UINT32 uiNumAllocs; ++ IMG_UINT64 uiAllocIndex; ++ IMG_DEVMEM_OFFSET_T uiInAllocOffset; ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv; ++ ++ if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Requested physical addresses from PMR " ++ "for incompatible contiguity %u!", ++ __func__, ++ ui32Log2PageSize)); ++ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; ++ } ++ ++ uiNumAllocs = psLMAllocArrayData->uiTotalNumPages; ++ if (uiNumAllocs > 1) ++ { ++ PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0); ++ uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize; ++ ++ for (idx=0; idx < ui32NumOfPages; idx++) ++ { ++ if (pbValid[idx]) ++ { ++ uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize; ++ uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize); ++ ++ PVR_LOG_RETURN_IF_FALSE(uiAllocIndex < uiNumAllocs, ++ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); ++ ++ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize)); ++ ++ psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset; ++ } ++ } ++ } ++ else ++ { ++ for (idx=0; idx < ui32NumOfPages; idx++) ++ { ++ if (pbValid[idx]) ++ { ++ psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx]; ++ } ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, ++ size_t uiOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ IMG_HANDLE *phHandleOut, ++ PMR_FLAGS_T ulFlags) ++{ ++ PVRSRV_ERROR eError; ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; ++ void *pvKernLinAddr = NULL; ++ IMG_UINT32 ui32PageIndex = 0; ++ size_t uiOffsetMask = uiOffset; ++ ++ psLMAllocArrayData = pvPriv; ++ ++ /* Check that we can map this in contiguously */ ++ if (psLMAllocArrayData->uiTotalNumPages != 1) ++ { ++ size_t uiStart = uiOffset; ++ size_t uiEnd = uiOffset + uiSize - 1; ++ size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1); ++ ++ /* We can still map if only one page is required */ ++ if ((uiStart & uiPageMask) != (uiEnd & uiPageMask)) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY, e0); ++ } ++ ++ /* Locate the desired physical page to map in */ ++ ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize; ++ uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1; ++ } ++ ++ PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages); ++ ++ eError = _MapAlloc(psLMAllocArrayData->psPhysHeap, ++ &psLMAllocArrayData->pasDevPAddr[ui32PageIndex], ++ psLMAllocArrayData->uiContigAllocSize, ++ ulFlags, ++ &pvKernLinAddr); ++ ++ *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask); ++ *phHandleOut = pvKernLinAddr; ++ ++ return eError; ++ ++ /* ++ error exit paths follow: ++ */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_HANDLE hHandle) ++{ ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; ++ void *pvKernLinAddr = NULL; ++ ++ psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv; ++ pvKernLinAddr = (void *) hHandle; ++ ++ _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize, ++ pvKernLinAddr); ++} ++ ++ ++static PVRSRV_ERROR ++CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes, ++ void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer, ++ IMG_UINT8 *pcPMR, ++ size_t uiSize)) ++{ ++ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL; ++ size_t uiBytesCopied; ++ size_t uiBytesToCopy; ++ size_t uiBytesCopyableFromAlloc; ++ void *pvMapping = NULL; ++ IMG_UINT8 *pcKernelPointer = NULL; ++ size_t uiBufferOffset; ++ IMG_UINT64 uiAllocIndex; ++ IMG_DEVMEM_OFFSET_T uiInAllocOffset; ++ PVRSRV_ERROR eError; ++ ++ psLMAllocArrayData = pvPriv; ++ ++ uiBytesCopied = 0; ++ uiBytesToCopy = uiBufSz; ++ uiBufferOffset = 0; ++ ++ if (psLMAllocArrayData->uiTotalNumPages > 1) ++ { ++ while (uiBytesToCopy > 0) ++ { ++ /* we have to map one alloc in at a time */ ++ PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0); ++ uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize; ++ uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize); ++ uiBytesCopyableFromAlloc = uiBytesToCopy; ++ if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize)) ++ { ++ uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset); ++ } ++ ++ PVR_ASSERT(uiBytesCopyableFromAlloc != 0); ++ PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages); ++ PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize)); ++ ++ eError = _MapAlloc(psLMAllocArrayData->psPhysHeap, ++ &psLMAllocArrayData->pasDevPAddr[uiAllocIndex], ++ psLMAllocArrayData->uiContigAllocSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, ++ &pvMapping); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ pcKernelPointer = pvMapping; ++ pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc); ++ ++ _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize, ++ pvMapping); ++ ++ uiBufferOffset += uiBytesCopyableFromAlloc; ++ uiBytesToCopy -= uiBytesCopyableFromAlloc; ++ uiOffset += uiBytesCopyableFromAlloc; ++ uiBytesCopied += uiBytesCopyableFromAlloc; ++ } ++ } ++ else ++ { ++ PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize); ++ PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0); ++ eError = _MapAlloc(psLMAllocArrayData->psPhysHeap, ++ &psLMAllocArrayData->pasDevPAddr[0], ++ psLMAllocArrayData->uiContigAllocSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC, ++ &pvMapping); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ pcKernelPointer = pvMapping; ++ pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz); ++ ++ _UnMapAlloc(psLMAllocArrayData->uiContigAllocSize, ++ pvMapping); ++ ++ uiBytesCopied = uiBufSz; ++ } ++ *puiNumBytes = uiBytesCopied; ++ return PVRSRV_OK; ++e0: ++ *puiNumBytes = uiBytesCopied; ++ return eError; ++} ++ ++static void ReadLocalMem(IMG_UINT8 *pcBuffer, ++ IMG_UINT8 *pcPMR, ++ size_t uiSize) ++{ ++ /* the memory is mapped as WC (and also aligned to page size) so we can ++ * safely call "Cached" memcpy */ ++ OSCachedMemCopy(pcBuffer, pcPMR, uiSize); ++} ++ ++static PVRSRV_ERROR ++PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes) ++{ ++ return CopyBytesLocalMem(pvPriv, ++ uiOffset, ++ pcBuffer, ++ uiBufSz, ++ puiNumBytes, ++ ReadLocalMem); ++} ++ ++static void WriteLocalMem(IMG_UINT8 *pcBuffer, ++ IMG_UINT8 *pcPMR, ++ size_t uiSize) ++{ ++ /* the memory is mapped as WC (and also aligned to page size) so we can ++ * safely call "Cached" memcpy but need to issue a write memory barrier ++ * to flush the write buffers after */ ++ OSCachedMemCopyWMB(pcPMR, pcBuffer, uiSize); ++} ++ ++static PVRSRV_ERROR ++PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes) ++{ ++ return CopyBytesLocalMem(pvPriv, ++ uiOffset, ++ pcBuffer, ++ uiBufSz, ++ puiNumBytes, ++ WriteLocalMem); ++} ++ ++/*************************************************************************/ /*! ++@Function PMRChangeSparseMemLocalMem ++@Description This function Changes the sparse mapping by allocating and ++ freeing of pages. It also changes the GPU maps accordingly. ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++static PVRSRV_ERROR ++PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv, ++ const PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 uiFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ IMG_UINT32 ui32AdtnlAllocPages = 0; ++ IMG_UINT32 ui32AdtnlFreePages = 0; ++ IMG_UINT32 ui32CommonRequstCount = 0; ++ IMG_UINT32 ui32Loop = 0; ++ IMG_UINT32 ui32Index = 0; ++ IMG_UINT32 uiAllocpgidx; ++ IMG_UINT32 uiFreepgidx; ++ ++ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; ++ IMG_DEV_PHYADDR sPhyAddr; ++ ++#if defined(DEBUG) ++ IMG_BOOL bPoisonFail = IMG_FALSE; ++ IMG_BOOL bZeroFail = IMG_FALSE; ++#endif ++ ++ /* Fetch the Page table array represented by the PMR */ ++ IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr; ++ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); ++ ++ /* The incoming request is classified into two operations independent of ++ * each other: alloc & free pages. ++ * These operations can be combined with two mapping operations as well ++ * which are GPU & CPU space mappings. ++ * ++ * From the alloc and free page requests, the net amount of pages to be ++ * allocated or freed is computed. Pages that were requested to be freed ++ * will be reused to fulfil alloc requests. ++ * ++ * The order of operations is: ++ * 1. Allocate new pages from the OS ++ * 2. Move the free pages from free request to alloc positions. ++ * 3. Free the rest of the pages not used for alloc ++ * ++ * Alloc parameters are validated at the time of allocation ++ * and any error will be handled then. */ ++ ++ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) ++ { ++ ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ? ++ ui32FreePageCount : ui32AllocPageCount; ++ ++ PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); ++ } ++ ++ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) ++ { ++ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount; ++ } ++ else ++ { ++ ui32AllocPageCount = 0; ++ } ++ ++ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) ++ { ++ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount; ++ } ++ else ++ { ++ ui32FreePageCount = 0; ++ } ++ ++ PVR_LOG_RETURN_IF_FALSE( ++ (ui32CommonRequstCount | ui32AdtnlAllocPages | ui32AdtnlFreePages) != 0, ++ "Invalid combination of parameters: ui32CommonRequstCount," ++ " ui32AdtnlAllocPages and ui32AdtnlFreePages.", ++ PVRSRV_ERROR_INVALID_PARAMS ++ ); ++ ++ { ++ /* Validate the free page indices */ ++ if (ui32FreePageCount) ++ { ++ if (NULL != pai32FreeIndices) ++ { ++ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) ++ { ++ uiFreepgidx = pai32FreeIndices[ui32Loop]; ++ ++ if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); ++ } ++ ++ if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psPageArray[uiFreepgidx].uiAddr", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ } ++ }else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Given non-zero free count but missing indices array", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ ++ /*The following block of code verifies any issues with common alloc page indices */ ++ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) ++ { ++ uiAllocpgidx = pai32AllocIndices[ui32Loop]; ++ if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE, e0); ++ } ++ ++ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) ++ { ++ if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) || ++ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("Trying to allocate already allocated page again", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ } ++ else ++ { ++ if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx].uiAddr) || ++ (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx])) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("Unable to remap memory due to missing page", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ } ++ } ++ ++ ++ ui32Loop = 0; ++ ++ /* Allocate new pages */ ++ if (0 != ui32AdtnlAllocPages) ++ { ++ /* Say how many pages to allocate */ ++ psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages; ++ ++ eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_AllocLMPages", e0); ++ ++ /* Mark the corresponding pages of translation table as valid */ ++ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) ++ { ++ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; ++ } ++ ++ psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; ++ } ++ ++ ui32Index = ui32Loop; ++ ++ /* Move the corresponding free pages to alloc request */ ++ for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++) ++ { ++ ++ uiAllocpgidx = pai32AllocIndices[ui32Index]; ++ uiFreepgidx = pai32FreeIndices[ui32Loop]; ++ sPhyAddr = psPageArray[uiAllocpgidx]; ++ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; ++ ++ /* Is remap mem used in real world scenario? Should it be turned to a ++ * debug feature? The condition check needs to be out of loop, will be ++ * done at later point though after some analysis */ ++ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) ++ { ++ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; ++ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; ++ psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR; ++ } ++ else ++ { ++ psPageArray[uiFreepgidx] = sPhyAddr; ++ psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; ++ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; ++ } ++ ++ /* Be sure to honour the attributes associated with the allocation ++ * such as zeroing, poisoning etc. */ ++ if (psPMRPageArrayData->bPoisonOnAlloc) ++ { ++ eError = _PoisonAlloc(psPMRPageArrayData->psPhysHeap, ++ &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx], ++ psPMRPageArrayData->uiContigAllocSize, ++ PVRSRV_POISON_ON_ALLOC_VALUE); ++ ++ /* Consider this as a soft failure and go ahead but log error to kernel log */ ++ if (eError != PVRSRV_OK) ++ { ++#if defined(DEBUG) ++ bPoisonFail = IMG_TRUE; ++#endif ++ } ++ } ++ else ++ { ++ if (psPMRPageArrayData->bZeroOnAlloc) ++ { ++ eError = _ZeroAlloc(psPMRPageArrayData->psPhysHeap, ++ &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx], ++ psPMRPageArrayData->uiContigAllocSize); ++ /* Consider this as a soft failure and go ahead but log error to kernel log */ ++ if (eError != PVRSRV_OK) ++ { ++#if defined(DEBUG) ++ /*Don't think we need to zero any pages further*/ ++ bZeroFail = IMG_TRUE; ++#endif ++ } ++ } ++ } ++ } ++ ++ /*Free the additional free pages */ ++ if (0 != ui32AdtnlFreePages) ++ { ++ ui32Index = ui32Loop; ++ _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages); ++ ui32Loop = 0; ++ ++ while (ui32Loop++ < ui32AdtnlFreePages) ++ { ++ /*Set the corresponding mapping table entry to invalid address */ ++ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID; ++ } ++ ++ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; ++ } ++ ++ } ++ ++#if defined(DEBUG) ++ if (IMG_TRUE == bPoisonFail) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __func__)); ++ } ++ ++ if (IMG_TRUE == bZeroFail) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __func__)); ++ } ++#endif ++ ++ /* Update the PMR memory holding information */ ++ eError = PVRSRV_OK; ++ ++e0: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function PMRChangeSparseMemCPUMapLocalMem ++@Description This function Changes CPU maps accordingly ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++static ++PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv, ++ const PMR *psPMR, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices) ++{ ++ PVRSRV_ERROR eError; ++ IMG_DEV_PHYADDR *psPageArray; ++ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv; ++ uintptr_t sCpuVABase = sCpuVAddrBase; ++ IMG_CPU_PHYADDR sCpuAddrPtr; ++ IMG_BOOL bValid = IMG_FALSE; ++ ++ /*Get the base address of the heap */ ++ eError = PMR_CpuPhysAddr(psPMR, ++ psPMRPageArrayData->uiLog2AllocSize, ++ 1, ++ 0, /* offset zero here mean first page in the PMR */ ++ &sCpuAddrPtr, ++ &bValid); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PMR_CpuPhysAddr"); ++ ++ /* Phys address of heap is computed here by subtracting the offset of this page ++ * basically phys address of any page = Base address of heap + offset of the page */ ++ sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr; ++ psPageArray = psPMRPageArrayData->pasDevPAddr; ++ ++ return OSChangeSparseMemCPUAddrMap((void **)psPageArray, ++ sCpuVABase, ++ sCpuAddrPtr, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices, ++ IMG_TRUE); ++} ++ ++static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { ++ /* pfnLockPhysAddresses */ ++ &PMRLockSysPhysAddressesLocalMem, ++ /* pfnUnlockPhysAddresses */ ++ &PMRUnlockSysPhysAddressesLocalMem, ++ /* pfnDevPhysAddr */ ++ &PMRSysPhysAddrLocalMem, ++ /* pfnAcquireKernelMappingData */ ++ &PMRAcquireKernelMappingDataLocalMem, ++ /* pfnReleaseKernelMappingData */ ++ &PMRReleaseKernelMappingDataLocalMem, ++ /* pfnReadBytes */ ++ &PMRReadBytesLocalMem, ++ /* pfnWriteBytes */ ++ &PMRWriteBytesLocalMem, ++ /* pfnUnpinMem */ ++ NULL, ++ /* pfnPinMem */ ++ NULL, ++ /* pfnChangeSparseMem*/ ++ &PMRChangeSparseMemLocalMem, ++ /* pfnChangeSparseMemCPUMap */ ++ &PMRChangeSparseMemCPUMapLocalMem, ++ /* pfnMMap */ ++ NULL, ++ /* pfnFinalize */ ++ &PMRFinalizeLocalMem ++}; ++ ++PVRSRV_ERROR ++PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, ++ CONNECTION_DATA *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2AllocPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eError2; ++ PMR *psPMR = NULL; ++ PMR_LMALLOCARRAY_DATA *psPrivData = NULL; ++ PMR_FLAGS_T uiPMRFlags; ++ IMG_BOOL bZero; ++ IMG_BOOL bPoisonOnAlloc; ++ IMG_BOOL bPoisonOnFree; ++ IMG_BOOL bOnDemand; ++ IMG_BOOL bContig; ++ ++ /* For sparse requests we have to do the allocation ++ * in chunks rather than requesting one contiguous block */ ++ if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1) ++ { ++ if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: LMA kernel mapping functions currently " ++ "don't work with discontiguous memory.", ++ __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, errorOnParam); ++ } ++ bContig = IMG_FALSE; ++ } ++ else ++ { ++ bContig = IMG_TRUE; ++ } ++ ++ bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE; ++ bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; ++ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE; ++#if defined(DEBUG) ++ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE; ++#else ++ bPoisonOnFree = IMG_FALSE; ++#endif ++ ++ /* Create Array structure that holds the physical pages */ ++ eError = _AllocLMPageArray(uiChunkSize * ui32NumVirtChunks, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2AllocPageSize, ++ bZero, ++ bPoisonOnAlloc, ++ bPoisonOnFree, ++ bContig, ++ bOnDemand, ++ psPhysHeap, ++ uiFlags, ++ uiPid, ++ &psPrivData, ++ psConnection); ++ PVR_GOTO_IF_ERROR(eError, errorOnAllocPageArray); ++ ++ if (!bOnDemand) ++ { ++ /* Allocate the physical pages */ ++ eError = _AllocLMPages(psPrivData, pui32MappingTable); ++ PVR_GOTO_IF_ERROR(eError, errorOnAllocPages); ++ } ++ ++ /* In this instance, we simply pass flags straight through. ++ ++ Generically, uiFlags can include things that control the PMR ++ factory, but we don't need any such thing (at the time of ++ writing!), and our caller specifies all PMR flags so we don't ++ need to meddle with what was given to us. ++ */ ++ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); ++ /* check no significant bits were lost in cast due to different ++ bit widths for flags */ ++ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); ++ ++ if (bOnDemand) ++ { ++ PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (LMA)"); ++ } ++ ++ eError = PMRCreatePMR(psPhysHeap, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2AllocPageSize, ++ uiPMRFlags, ++ pszAnnotation, ++ &_sPMRLMAFuncTab, ++ psPrivData, ++ PMR_TYPE_LMA, ++ &psPMR, ++ ui32PDumpFlags); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRCreatePMR", errorOnCreate); ++ ++ *ppsPMRPtr = psPMR; ++ return PVRSRV_OK; ++ ++errorOnCreate: ++ if (!bOnDemand && psPrivData->iNumPagesAllocated) ++ { ++ eError2 = _FreeLMPages(psPrivData, NULL, 0); ++ PVR_ASSERT(eError2 == PVRSRV_OK); ++ } ++ ++errorOnAllocPages: ++ eError2 = _FreeLMPageArray(psPrivData); ++ PVR_ASSERT(eError2 == PVRSRV_OK); ++ ++errorOnAllocPageArray: ++errorOnParam: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/physmem_lma.h b/drivers/gpu/drm/img-rogue/physmem_lma.h +new file mode 100644 +index 000000000000..51f4257b5a82 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_lma.h +@@ -0,0 +1,93 @@ ++/**************************************************************************/ /*! ++@File ++@Title Header for local card memory allocator ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ implementing the function callbacks for local card memory. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVSRV_PHYSMEM_LMA_H ++#define SRVSRV_PHYSMEM_LMA_H ++ ++/* include/ */ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++ ++/* services/server/include/ */ ++#include "pmr.h" ++#include "pmr_impl.h" ++ ++/*************************************************************************/ /*! ++@Function PhysmemCreateHeapLMA ++@Description Create and register new LMA heap with LMA specific details. ++@Input psDevNode Pointer to device node struct. ++@Input psConfig Heap configuration. ++@Input pszLabel Debug identifier label ++@Output ppsPhysHeap Pointer to the created heap. ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysmemCreateHeapLMA(PVRSRV_DEVICE_NODE *psDevNode, ++ PHYS_HEAP_CONFIG *psConfig, ++ IMG_CHAR *pszLabel, ++ PHYS_HEAP **ppsPhysHeap); ++ ++/* ++ * PhysmemNewLocalRamBackedPMR ++ * ++ * This function will create a PMR using the local card memory and is OS ++ * agnostic. ++ */ ++PVRSRV_ERROR ++PhysmemNewLocalRamBackedPMR(PHYS_HEAP *psPhysHeap, ++ CONNECTION_DATA *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags); ++ ++#endif /* #ifndef SRVSRV_PHYSMEM_LMA_H */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem.h b/drivers/gpu/drm/img-rogue/physmem_osmem.h +new file mode 100644 +index 000000000000..1eb756530902 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_osmem.h +@@ -0,0 +1,142 @@ ++/*************************************************************************/ /*! ++@File physmem_osmem.h ++@Title OS memory PMR factory API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of Services memory management. This file defines the ++ OS memory PMR factory API that must be defined so that the ++ common & device layer code in the Services Server can allocate ++ new PMRs back with pages from the OS page allocator. Applicable ++ for UMA based platforms, such platforms must implement this API ++ in the OS Porting layer, in the "env" directory for that ++ system. ++ ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PHYSMEM_OSMEM_H ++#define PHYSMEM_OSMEM_H ++ ++/* include/ */ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++ ++/* services/server/include/ */ ++#include "pmr.h" ++#include "pmr_impl.h" ++#include "connection_server.h" ++#include "physheap.h" ++ ++/*************************************************************************/ /*! ++@Function PhysmemNewOSRamBackedPMR ++@Description Rogue Services will call this function to allocate GPU device ++ memory from the PMR factory supported by the OS DDK port. This ++ factory typically obtains physical memory from the kernel/OS ++ API that allocates memory from the default heap of shared ++ system memory available on the platform. The allocated memory ++ must be page-aligned and be a whole number of pages. ++ After allocating the required memory, the implementation must ++ then call PMRCreatePMR() to obtain the PMR structure that ++ describes this allocation to the upper layers of the Services. ++ memory management sub-system. ++ NB. Implementation of this function is mandatory. If shared ++ system memory is not to be used in the OS port then the ++ implementation must return PVRSRV_ERROR_NOT_SUPPORTED. ++ ++@Input psPhysHeap the phys heap ++@Input psConnection the connection to the originator process ++@Input uiSize the size of the allocation ++ (must be a multiple of page size) ++@Input uiChunkSize when sparse allocations are requested, ++ this is the allocated chunk size. ++ For regular allocations, this will be ++ the same as uiSize. ++ (must be a multiple of page size) ++@Input ui32NumPhysChunks when sparse allocations are requested, ++ this is the number of physical chunks ++ to be allocated. ++ For regular allocations, this will be 1. ++@Input ui32NumVirtChunks when sparse allocations are requested, ++ this is the number of virtual chunks ++ covering the sparse allocation. ++ For regular allocations, this will be 1. ++@Input pui32MappingTable when sparse allocations are requested, ++ this is the list of the indices of ++ each physically-backed virtual chunk ++ For regular allocations, this will ++ be NULL. ++@Input uiLog2PageSize the physical pagesize in log2(bytes). ++@Input uiFlags the allocation flags. ++@Input pszAnnotation string describing the PMR (for debug). ++ This should be passed into the function ++ PMRCreatePMR(). ++@Input uiPid The process ID that this allocation should ++ be associated with. ++@Output ppsPMROut pointer to the PMR created for the ++ new allocation ++@Input ui32PDumpFlags the pdump flags. ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, ++ CONNECTION_DATA *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ IMG_UINT32 uiLog2PageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMROut, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function PhysmemGetOSRamMemStats ++@Description Function that gets the OS memory usage statistics ++@Input pvImplData Physical heap private data. ++@Output pui64TotalSize Buffer that holds the total OS memory size ++@Output pui64FreeSize Buffer that holds the free OS memory size ++@Return None. ++*/ /**************************************************************************/ ++void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData, ++ IMG_UINT64 *pui64TotalSize, ++ IMG_UINT64 *pui64FreeSize); ++ ++#endif /* PHYSMEM_OSMEM_H */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem_linux.c b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.c +new file mode 100644 +index 000000000000..efea3372e3b0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.c +@@ -0,0 +1,3952 @@ ++/*************************************************************************/ /*! ++@File ++@Title Implementation of PMR functions for OS managed memory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ implementing the function callbacks for physical memory borrowed ++ from that normally managed by the operating system. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if defined(CONFIG_X86) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) ++#include ++#else ++#include ++#endif ++#endif ++ ++/* include/ */ ++#include "rgx_heaps.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++#include "rgx_pdump_panics.h" ++/* services/server/include/ */ ++#include "allocmem.h" ++#include "osfunc.h" ++#include "pdump_km.h" ++#include "pmr.h" ++#include "pmr_impl.h" ++#include "cache_km.h" ++#include "devicemem_server_utils.h" ++#include "pvr_vmap.h" ++#include "physheap.h" ++ ++/* ourselves */ ++#include "physmem_osmem.h" ++#include "physmem_osmem_linux.h" ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#if !defined(PVRSRV_ENABLE_MEMORY_STATS) ++#include "hash.h" ++#endif ++#endif ++ ++#include "kernel_compatibility.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) ++static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM; ++#else ++/* split_page not available on older kernels */ ++#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM ++#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0 ++static IMG_UINT32 g_uiMaxOrder; ++#endif ++ ++/* ++ These corresponds to the MMU min/max page sizes and associated PTE ++ alignment that can be used on the device for an allocation. It is ++ 4KB (min) and 2MB (max) respectively. ++*/ ++#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_4KB_PAGE_SHIFT ++#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_2MB_PAGE_SHIFT ++ ++/* Defines how many pages should be mapped at once to the kernel */ ++#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */ ++ ++/* ++ These are used to get/set/mask lower-order bits in a dma_addr_t ++ to provide side-band information associated with that address. ++ These includes whether the address was obtained via alloc_page ++ or dma_alloc and if address came allocated pre-aligned or an ++ adjustment was made manually to aligned it. ++*/ ++#define DMA_SET_ADJUSTED_ADDR(x) ((x) | ((dma_addr_t)0x02)) ++#define DMA_IS_ADDR_ADJUSTED(x) ((x) & ((dma_addr_t)0x02)) ++#define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01)) ++#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01)) ++#define DMA_GET_ALIGN_ADJUSTMENT(x) ((x>>2) & ((dma_addr_t)0x3ff)) ++#define DMA_SET_ALIGN_ADJUSTMENT(x,y) ((x) | (((dma_addr_t)y)<<0x02)) ++#define DMA_GET_ADDR(x) (((dma_addr_t)x) & ((dma_addr_t)~0xfff)) ++#define DMA_VADDR_NOT_IN_USE 0xCAFEF00DDEADBEEFULL ++ ++#define PVRSRV_ZERO_VALUE 0 ++ ++typedef struct _PMR_OSPAGEARRAY_DATA_ { ++ /* Device for which this allocation has been made */ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ /* The pid that made this allocation */ ++ IMG_PID uiPid; ++ ++ /* ++ * iNumOSPagesAllocated: ++ * Number of pages allocated in this PMR so far. ++ * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR. ++ */ ++ IMG_INT32 iNumOSPagesAllocated; ++ ++ /* ++ * uiTotalNumOSPages: ++ * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size) ++ * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...) ++ */ ++ IMG_UINT32 uiTotalNumOSPages; ++ ++ /* ++ uiLog2AllocPageSize; ++ ++ size of each "page" -- this would normally be the same as ++ PAGE_SHIFT, but we support the idea that we may allocate pages ++ in larger chunks for better contiguity, using order>0 in the ++ call to alloc_pages() ++ */ ++ IMG_UINT32 uiLog2AllocPageSize; ++ ++ /* ++ ui64DmaMask; ++ */ ++ IMG_UINT64 ui64DmaMask; ++ ++ /* ++ For non DMA/CMA allocation, pagearray references the pages ++ thus allocated; one entry per compound page when compound ++ pages are used. In addition, for DMA/CMA allocations, we ++ track the returned cpu virtual and device bus address. ++ */ ++ struct page **pagearray; ++ dma_addr_t *dmaphysarray; ++ void **dmavirtarray; ++ ++ ++#define FLAG_ZERO (0U) ++#define FLAG_POISON_ON_FREE (1U) ++#define FLAG_POISON_ON_ALLOC (2U) ++#define FLAG_ONDEMAND (3U) ++#define FLAG_UNPINNED (4U) ++#define FLAG_IS_CMA (5U) ++#define FLAG_UNSET_MEMORY_TYPE (6U) ++ ++ /* ++ * Allocation flags related to the pages: ++ * Zero - Should we Zero memory on alloc ++ * Poison on free - Should we Poison the memory on free. ++ * Poison on alloc - Should we Poison the memory on alloc. ++ * On demand - Is the allocation on Demand i.e Do we defer allocation to time of use. ++ * Unpinned - Should be protected by page pool lock ++ * CMA - Is CMA memory allocated via DMA framework ++ * Unset Memory Type - Upon free do we need to revert the cache type before return to OS ++ * */ ++ IMG_UINT32 ui32AllocFlags; ++ ++ /* ++ The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean ++ flag, advising us to do cache maintenance on behalf of the caller. ++ Boolean used to track if we need to revert the cache attributes ++ of the pages used in this allocation. Depends on OS/architecture. ++ */ ++ IMG_UINT32 ui32CPUCacheFlags; ++ /* ++ * In CMA allocation path, algorithm can allocate double the size of ++ * requested allocation size to satisfy the alignment. In this case ++ * the additional pages allocated are tracked through this additional ++ * variable and are accounted for in the memory statistics */ ++ IMG_UINT32 ui32CMAAdjustedPageCount; ++} PMR_OSPAGEARRAY_DATA; ++ ++/*********************************** ++ * Page pooling for uncached pages * ++ ***********************************/ ++ ++static INLINE void ++_FreeOSPage_CMA(struct device *dev, ++ size_t alloc_size, ++ IMG_UINT32 uiOrder, ++ void *virt_addr, ++ dma_addr_t dev_addr, ++ struct page *psPage); ++ ++static void ++_FreeOSPage(IMG_UINT32 uiOrder, ++ IMG_BOOL bUnsetMemoryType, ++ struct page *psPage); ++ ++static PVRSRV_ERROR ++_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 ui32FreePageCount); ++ ++static PVRSRV_ERROR ++_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, ++ IMG_UINT32 *puiPagesFreed); ++ ++/* A struct for our page pool holding an array of zeroed (!) pages. ++ * We always put units of page arrays to the pool but are ++ * able to take individual pages */ ++typedef struct ++{ ++ /* Linkage for page pool LRU list */ ++ struct list_head sPagePoolItem; ++ ++ /* How many items are still in the page array */ ++ IMG_UINT32 uiItemsRemaining; ++ /* Array of the actual pages */ ++ struct page **ppsPageArray; ++ ++} LinuxPagePoolEntry; ++ ++/* CleanupThread structure to put allocation in page pool */ ++typedef struct ++{ ++ PVRSRV_CLEANUP_THREAD_WORK sCleanupWork; ++ IMG_UINT32 ui32CPUCacheMode; ++ LinuxPagePoolEntry *psPoolEntry; ++} LinuxCleanupData; ++ ++/* A struct for the unpinned items */ ++typedef struct ++{ ++ struct list_head sUnpinPoolItem; ++ PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr; ++} LinuxUnpinEntry; ++ ++ ++/* Caches to hold page pool and page array structures */ ++static struct kmem_cache *g_psLinuxPagePoolCache; ++static struct kmem_cache *g_psLinuxPageArray; ++ ++/* Track what is live, all protected by pool lock. ++ * x86 needs two page pools because we have to change the memory attributes ++ * of the pages which is expensive due to an implicit flush. ++ * See set_pages_array_uc/wc/wb. */ ++static IMG_UINT32 g_ui32UnpinPageCount; ++static IMG_UINT32 g_ui32PagePoolUCCount; ++#if defined(CONFIG_X86) ++static IMG_UINT32 g_ui32PagePoolWCCount; ++#endif ++/* Tracks asynchronous tasks currently accessing the page pool. ++ * It is incremented if a defer free task ++ * is created. Both will decrement the value when they finished the work. ++ * The atomic prevents piling up of deferred work in case the deferred thread ++ * cannot keep up with the application.*/ ++static ATOMIC_T g_iPoolCleanTasks; ++/* We don't want too many asynchronous threads trying to access the page pool ++ * at the same time */ ++#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128 ++ ++/* Defines how many pages the page cache should hold. */ ++#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES) ++static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES; ++#else ++static const IMG_UINT32 g_ui32PagePoolMaxEntries; ++#endif ++ ++/* We double check if we would exceed this limit if we are below MAX_POOL_PAGES ++ and want to add an allocation to the pool. ++ This prevents big allocations being given back to the OS just because they ++ exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */ ++#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES) ++static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES; ++#else ++static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries; ++#endif ++ ++#if defined(CONFIG_X86) ++#define PHYSMEM_OSMEM_NUM_OF_POOLS 2 ++static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC ++}; ++#else ++#define PHYSMEM_OSMEM_NUM_OF_POOLS 1 ++static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = { ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED ++}; ++#endif ++ ++/* Global structures we use to manage the page pool */ ++static DEFINE_MUTEX(g_sPagePoolMutex); ++ ++/* List holding the page array pointers: */ ++static LIST_HEAD(g_sPagePoolList_WC); ++static LIST_HEAD(g_sPagePoolList_UC); ++static LIST_HEAD(g_sUnpinList); ++ ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) ++/* Global structure to manage GPU memory leak */ ++static DEFINE_MUTEX(g_sUMALeakMutex); ++static IMG_UINT32 g_ui32UMALeakCounter = 0; ++#endif ++ ++static inline IMG_UINT32 ++_PagesInPoolUnlocked(void) ++{ ++ IMG_UINT32 uiCnt = g_ui32PagePoolUCCount; ++#if defined(CONFIG_X86) ++ uiCnt += g_ui32PagePoolWCCount; ++#endif ++ return uiCnt; ++} ++ ++static inline void ++_PagePoolLock(void) ++{ ++ mutex_lock(&g_sPagePoolMutex); ++} ++ ++static inline int ++_PagePoolTrylock(void) ++{ ++ return mutex_trylock(&g_sPagePoolMutex); ++} ++ ++static inline void ++_PagePoolUnlock(void) ++{ ++ mutex_unlock(&g_sPagePoolMutex); ++} ++ ++static PVRSRV_ERROR ++_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) ++{ ++ LinuxUnpinEntry *psUnpinEntry; ++ ++ psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry)); ++ if (!psUnpinEntry) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OSAllocMem failed. Cannot add entry to unpin list.", ++ __func__)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData; ++ ++ /* Add into pool that the shrinker can access easily*/ ++ list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList); ++ ++ g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated; ++ ++ return PVRSRV_OK; ++} ++ ++static void ++_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData) ++{ ++ LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry; ++ ++ /* Remove from pool */ ++ list_for_each_entry_safe(psUnpinEntry, ++ psTempUnpinEntry, ++ &g_sUnpinList, ++ sUnpinPoolItem) ++ { ++ if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData) ++ { ++ list_del(&psUnpinEntry->sUnpinPoolItem); ++ break; ++ } ++ } ++ ++ OSFreeMem(psUnpinEntry); ++ ++ g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated; ++} ++ ++static inline IMG_BOOL ++_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags, ++ struct list_head **ppsPoolHead, ++ IMG_UINT32 **ppuiCounter) ++{ ++ switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++#if defined(CONFIG_X86) ++ /* ++ For x86 we need to keep different lists for uncached ++ and write-combined as we must always honour the PAT ++ setting which cares about this difference. ++ */ ++ ++ *ppsPoolHead = &g_sPagePoolList_WC; ++ *ppuiCounter = &g_ui32PagePoolWCCount; ++ break; ++#endif ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++ *ppsPoolHead = &g_sPagePoolList_UC; ++ *ppuiCounter = &g_ui32PagePoolUCCount; ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unknown CPU caching mode. " ++ "Using default UC pool.", ++ __func__)); ++ *ppsPoolHead = &g_sPagePoolList_UC; ++ *ppuiCounter = &g_ui32PagePoolUCCount; ++ PVR_ASSERT(0); ++ return IMG_FALSE; ++ } ++ return IMG_TRUE; ++} ++ ++static struct shrinker g_sShrinker; ++ ++/* Returning the number of pages that still reside in the page pool. */ ++static unsigned long ++_GetNumberOfPagesInPoolUnlocked(void) ++{ ++ return _PagesInPoolUnlocked() + g_ui32UnpinPageCount; ++} ++ ++/* Linux shrinker function that informs the OS about how many pages we are caching and ++ * it is able to reclaim. */ ++static unsigned long ++_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) ++{ ++ int remain; ++ ++ PVR_ASSERT(psShrinker == &g_sShrinker); ++ (void)psShrinker; ++ (void)psShrinkControl; ++ ++ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ ++ if (_PagePoolTrylock() == 0) ++ return 0; ++ remain = _GetNumberOfPagesInPoolUnlocked(); ++ _PagePoolUnlock(); ++ ++ return remain; ++} ++ ++/* Linux shrinker function to reclaim the pages from our page pool */ ++static unsigned long ++_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) ++{ ++ unsigned long uNumToScan = psShrinkControl->nr_to_scan; ++ unsigned long uSurplus = 0; ++ LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry; ++ IMG_UINT32 uiPagesFreed; ++ ++ PVR_ASSERT(psShrinker == &g_sShrinker); ++ (void)psShrinker; ++ ++ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */ ++ if (_PagePoolTrylock() == 0) ++ return SHRINK_STOP; ++ ++ _FreePagesFromPoolUnlocked(uNumToScan, ++ &uiPagesFreed); ++ uNumToScan -= uiPagesFreed; ++ ++ if (uNumToScan == 0) ++ { ++ goto e_exit; ++ } ++ ++ /* Free unpinned memory, starting with LRU entries */ ++ list_for_each_entry_safe(psUnpinEntry, ++ psTempUnpinEntry, ++ &g_sUnpinList, ++ sUnpinPoolItem) ++ { ++ PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr; ++ IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)? ++ psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages; ++ PVRSRV_ERROR eError; ++ ++ /* Free associated pages */ ++ eError = _FreeOSPages(psPageArrayDataPtr, ++ NULL, ++ 0); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)", ++ __func__, ++ PVRSRVGetErrorString(eError), ++ eError)); ++ goto e_exit; ++ } ++ ++ /* Remove item from pool */ ++ list_del(&psUnpinEntry->sUnpinPoolItem); ++ ++ g_ui32UnpinPageCount -= uiNumPages; ++ ++ /* Check if there is more to free or if we already surpassed the limit */ ++ if (uiNumPages < uNumToScan) ++ { ++ uNumToScan -= uiNumPages; ++ ++ } ++ else if (uiNumPages > uNumToScan) ++ { ++ uSurplus += uiNumPages - uNumToScan; ++ uNumToScan = 0; ++ goto e_exit; ++ } ++ else ++ { ++ uNumToScan -= uiNumPages; ++ goto e_exit; ++ } ++ } ++ ++e_exit: ++ if (list_empty(&g_sUnpinList)) ++ { ++ PVR_ASSERT(g_ui32UnpinPageCount == 0); ++ } ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) ++ { ++ int remain; ++ remain = _GetNumberOfPagesInPoolUnlocked(); ++ _PagePoolUnlock(); ++ return remain; ++ } ++#else ++ /* Returning the number of pages freed during the scan */ ++ _PagePoolUnlock(); ++ return psShrinkControl->nr_to_scan - uNumToScan + uSurplus; ++#endif ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0)) ++static int ++_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl) ++{ ++ if (psShrinkControl->nr_to_scan != 0) ++ { ++ return _ScanObjectsInPagePool(psShrinker, psShrinkControl); ++ } ++ else ++ { ++ /* No pages are being reclaimed so just return the page count */ ++ return _CountObjectsInPagePool(psShrinker, psShrinkControl); ++ } ++} ++ ++static struct shrinker g_sShrinker = ++{ ++ .shrink = _ShrinkPagePool, ++ .seeks = DEFAULT_SEEKS ++}; ++#else ++static struct shrinker g_sShrinker = ++{ ++ .count_objects = _CountObjectsInPagePool, ++ .scan_objects = _ScanObjectsInPagePool, ++ .seeks = DEFAULT_SEEKS ++}; ++#endif ++ ++/* Register the shrinker so Linux can reclaim cached pages */ ++void LinuxInitPhysmem(void) ++{ ++ g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL); ++ ++ g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL); ++ if (g_psLinuxPagePoolCache) ++ { ++ /* Only create the shrinker if we created the cache OK */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ register_shrinker(&g_sShrinker, "pvr-shrinker"); ++#else ++ register_shrinker(&g_sShrinker); ++#endif ++ } ++ ++ OSAtomicWrite(&g_iPoolCleanTasks, 0); ++} ++ ++/* Unregister the shrinker and remove all pages from the pool that are still left */ ++void LinuxDeinitPhysmem(void) ++{ ++ IMG_UINT32 uiPagesFreed; ++ ++ if (OSAtomicRead(&g_iPoolCleanTasks) > 0) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running " ++ "while deinitialising memory subsystem.")); ++ } ++ ++ _PagePoolLock(); ++ if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when " ++ "deinitialising memory subsystem.")); ++ PVR_ASSERT(0); ++ } ++ ++ PVR_ASSERT(_PagesInPoolUnlocked() == 0); ++ ++ /* Free the page cache */ ++ kmem_cache_destroy(g_psLinuxPagePoolCache); ++ ++ unregister_shrinker(&g_sShrinker); ++ _PagePoolUnlock(); ++ ++ kmem_cache_destroy(g_psLinuxPageArray); ++} ++ ++static void EnableOOMKiller(void) ++{ ++ current->flags &= ~PF_DUMPCORE; ++} ++ ++static void DisableOOMKiller(void) ++{ ++ /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled. ++ * ++ * As oom_killer_disable() is an inline, non-exported function, we ++ * can't use it from a modular driver. Furthermore, the OOM killer ++ * API doesn't look thread safe, which 'current' is. ++ */ ++ WARN_ON(current->flags & PF_DUMPCORE); ++ current->flags |= PF_DUMPCORE; ++} ++ ++/* Prints out the addresses in a page array for debugging purposes ++ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */ ++/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */ ++static inline void ++_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint) ++{ ++#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY) ++ IMG_UINT32 i; ++ if (pagearray) ++ { ++ printk("Array %p:\n", pagearray); ++ for (i = 0; i < uiPagesToPrint; i++) ++ { ++ printk("%p | ", (pagearray)[i]); ++ } ++ printk("\n"); ++ } ++ else ++ { ++ printk("Array is NULL:\n"); ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(pagearray); ++ PVR_UNREFERENCED_PARAMETER(uiPagesToPrint); ++#endif ++} ++ ++/* Debugging function that dumps out the number of pages for every ++ * page array that is currently in the page pool. ++ * Not defined by default. Define locally to activate feature: */ ++/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */ ++static void ++_DumpPoolStructure(void) ++{ ++#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL) ++ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; ++ struct list_head *psPoolHead = NULL; ++ IMG_UINT32 j; ++ IMG_UINT32 *puiCounter; ++ ++ printk("\n"); ++ /* Empty all pools */ ++ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) ++ { ++ ++ printk("pool = %u\n", j); ++ ++ /* Get the correct list for this caching mode */ ++ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) ++ { ++ break; ++ } ++ ++ list_for_each_entry_safe(psPagePoolEntry, ++ psTempPoolEntry, ++ psPoolHead, ++ sPagePoolItem) ++ { ++ printk("%u | ", psPagePoolEntry->uiItemsRemaining); ++ } ++ printk("\n"); ++ } ++#endif ++} ++ ++/* Free a certain number of pages from the page pool. ++ * Mainly used in error paths or at deinitialisation to ++ * empty the whole pool. */ ++static PVRSRV_ERROR ++_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree, ++ IMG_UINT32 *puiPagesFreed) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; ++ struct list_head *psPoolHead = NULL; ++ IMG_UINT32 i, j; ++ IMG_UINT32 *puiCounter; ++ ++ *puiPagesFreed = uiMaxPagesToFree; ++ ++ /* Empty all pools */ ++ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++) ++ { ++ ++ /* Get the correct list for this caching mode */ ++ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter)) ++ { ++ break; ++ } ++ ++ /* Free the pages and remove page arrays from the pool if they are exhausted */ ++ list_for_each_entry_safe(psPagePoolEntry, ++ psTempPoolEntry, ++ psPoolHead, ++ sPagePoolItem) ++ { ++ IMG_UINT32 uiItemsToFree; ++ struct page **ppsPageArray; ++ ++ /* Check if we are going to free the whole page array or just parts */ ++ if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree) ++ { ++ uiItemsToFree = psPagePoolEntry->uiItemsRemaining; ++ ppsPageArray = psPagePoolEntry->ppsPageArray; ++ } ++ else ++ { ++ uiItemsToFree = uiMaxPagesToFree; ++ ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]); ++ } ++ ++#if defined(CONFIG_X86) ++ /* Set the correct page caching attributes on x86 */ ++ if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j])) ++ { ++ int ret; ++ ret = set_pages_array_wb(ppsPageArray, uiItemsToFree); ++ if (ret) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to reset page attributes", ++ __func__)); ++ eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES; ++ goto e_exit; ++ } ++ } ++#endif ++ ++ /* Free the actual pages */ ++ for (i = 0; i < uiItemsToFree; i++) ++ { ++ __free_pages(ppsPageArray[i], 0); ++ ppsPageArray[i] = NULL; ++ } ++ ++ /* Reduce counters */ ++ uiMaxPagesToFree -= uiItemsToFree; ++ *puiCounter -= uiItemsToFree; ++ psPagePoolEntry->uiItemsRemaining -= uiItemsToFree; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ /* ++ * MemStats usually relies on having the bridge lock held, however ++ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and ++ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so ++ * the page pool lock is used to ensure these calls are mutually ++ * exclusive ++ */ ++ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree); ++#endif ++ ++ /* Is this pool entry exhausted, delete it */ ++ if (psPagePoolEntry->uiItemsRemaining == 0) ++ { ++ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); ++ list_del(&psPagePoolEntry->sPagePoolItem); ++ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); ++ } ++ ++ /* Return if we have all our pages */ ++ if (uiMaxPagesToFree == 0) ++ { ++ goto e_exit; ++ } ++ } ++ } ++ ++e_exit: ++ *puiPagesFreed -= uiMaxPagesToFree; ++ _DumpPoolStructure(); ++ return eError; ++} ++ ++/* Get a certain number of pages from the page pool and ++ * copy them directly into a given page array. */ ++static void ++_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags, ++ IMG_UINT32 uiMaxNumPages, ++ struct page **ppsPageArray, ++ IMG_UINT32 *puiNumReceivedPages) ++{ ++ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry; ++ struct list_head *psPoolHead = NULL; ++ IMG_UINT32 i; ++ IMG_UINT32 *puiCounter; ++ ++ *puiNumReceivedPages = 0; ++ ++ /* Get the correct list for this caching mode */ ++ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) ++ { ++ return; ++ } ++ ++ /* Check if there are actually items in the list */ ++ if (list_empty(psPoolHead)) ++ { ++ return; ++ } ++ ++ PVR_ASSERT(*puiCounter > 0); ++ ++ /* Receive pages from the pool */ ++ list_for_each_entry_safe(psPagePoolEntry, ++ psTempPoolEntry, ++ psPoolHead, ++ sPagePoolItem) ++ { ++ /* Get the pages from this pool entry */ ++ for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--) ++ { ++ ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1]; ++ (*puiNumReceivedPages)++; ++ psPagePoolEntry->uiItemsRemaining--; ++ } ++ ++ /* Is this pool entry exhausted, delete it */ ++ if (psPagePoolEntry->uiItemsRemaining == 0) ++ { ++ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); ++ list_del(&psPagePoolEntry->sPagePoolItem); ++ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); ++ } ++ ++ /* Return if we have all our pages */ ++ if (*puiNumReceivedPages == uiMaxNumPages) ++ { ++ goto exit_ok; ++ } ++ } ++ ++exit_ok: ++ ++ /* Update counters */ ++ *puiCounter -= *puiNumReceivedPages; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ /* MemStats usually relies on having the bridge lock held, however ++ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and ++ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so ++ * the page pool lock is used to ensure these calls are mutually ++ * exclusive ++ */ ++ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages)); ++#endif ++ ++ _DumpPoolStructure(); ++ return; ++} ++ ++/* Same as _GetPagesFromPoolUnlocked but handles locking and ++ * checks first whether pages from the pool are a valid option. */ ++static inline void ++_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32CPUCacheFlags, ++ IMG_UINT32 uiPagesToAlloc, ++ IMG_UINT32 uiOrder, ++ IMG_BOOL bZero, ++ struct page **ppsPageArray, ++ IMG_UINT32 *puiPagesFromPool) ++{ ++#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) ++ PVR_UNREFERENCED_PARAMETER(bZero); ++#else ++ /* Don't get pages from pool if it doesn't provide zeroed pages */ ++ if (bZero) ++ { ++ return; ++ } ++#endif ++ ++ /* The page pool stores only order 0 pages. If we need zeroed memory we ++ * directly allocate from the OS because it is faster than ++ * doing it within the driver. */ ++ if (uiOrder == 0 && ++ !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) ++ { ++ ++ _PagePoolLock(); ++ _GetPagesFromPoolUnlocked(ui32CPUCacheFlags, ++ uiPagesToAlloc, ++ ppsPageArray, ++ puiPagesFromPool); ++ _PagePoolUnlock(); ++ } ++ ++ return; ++} ++ ++/* Takes a page array and maps it into the kernel to write zeros */ ++static PVRSRV_ERROR ++_MemsetPageArray(IMG_UINT32 uiNumToClean, ++ struct page **ppsCleanArray, ++ pgprot_t pgprot, ++ IMG_UINT8 ui8Pattern) ++{ ++ IMG_CPU_VIRTADDR pvAddr; ++ IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, ++ uiNumToClean); ++ ++ /* Map and fill the pages with zeros. ++ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE ++ * at a time. */ ++ while (uiNumToClean != 0) ++ { ++ IMG_UINT32 uiToClean = MIN(uiNumToClean, uiMaxPagesToMap); ++ ++ pvAddr = pvr_vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot); ++ if (!pvAddr) ++ { ++ if (uiMaxPagesToMap <= 1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Out of vmalloc memory, unable to map pages for %s.", ++ __func__, ++ ui8Pattern == PVRSRV_ZERO_VALUE ? "zeroing" : "poisoning")); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ else ++ { ++ /* Halve the pages to map at once and try again. */ ++ uiMaxPagesToMap = uiMaxPagesToMap >> 1; ++ continue; ++ } ++ } ++ ++ if (pgprot_val(pgprot) == pgprot_val(pgprot_noncached(PAGE_KERNEL))) ++ { ++ /* this is most likely unnecessary as all pages must be 8-bytes ++ * aligned so there unaligned access is impossible */ ++ OSDeviceMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean); ++ } ++ else if (pgprot_val(pgprot) == pgprot_val(pgprot_writecombine(PAGE_KERNEL))) ++ { ++ OSCachedMemSetWMB(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean); ++ } ++ else ++ { ++ OSCachedMemSet(pvAddr, ui8Pattern, PAGE_SIZE * uiToClean); ++ } ++ pvr_vunmap(pvAddr, uiToClean, pgprot); ++ ppsCleanArray = &(ppsCleanArray[uiToClean]); ++ uiNumToClean -= uiToClean; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++_CleanupThread_CleanPages(void *pvData) ++{ ++ LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData; ++ LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry; ++ struct list_head *psPoolHead = NULL; ++ IMG_UINT32 *puiCounter = NULL; ++#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) ++ PVRSRV_ERROR eError; ++ pgprot_t pgprot; ++ IMG_UINT32 i; ++#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ ++ ++ /* Get the correct pool for this caching mode. */ ++ _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter); ++ ++#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) ++ switch (PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode)) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++#if defined(CONFIG_X86) ++ /* For x86 we can only map with the same attributes ++ * as in the PAT settings*/ ++ pgprot = pgprot_noncached(PAGE_KERNEL); ++ break; ++#endif ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++ pgprot = pgprot_writecombine(PAGE_KERNEL); ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unknown caching mode to set page protection flags.", ++ __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto eExit; ++ } ++ ++ /* Map and fill the pages with zeros. ++ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE ++ * at a time. */ ++ eError = _MemsetPageArray(psPagePoolEntry->uiItemsRemaining, ++ psPagePoolEntry->ppsPageArray, ++ pgprot, PVRSRV_ZERO_VALUE); ++ if (eError != PVRSRV_OK) ++ { ++ goto eExit; ++ } ++#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ ++ ++ /* Lock down pool and add item */ ++ _PagePoolLock(); ++ ++ /* Pool counters were already updated so don't do it here again*/ ++ ++ /* The pages are all zeroed so return them to the pool. */ ++ list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead); ++ ++ _DumpPoolStructure(); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ /* Calling PVRSRVStatsIncrMemAllocPoolStat and PVRSRVStatsDecrMemAllocPoolStat ++ * inside page pool lock ensures that the stat reflects the state of the pool. */ ++ PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * psPagePoolEntry->uiItemsRemaining); ++#endif ++ ++ _PagePoolUnlock(); ++ ++ OSFreeMem(pvData); ++ OSAtomicDecrement(&g_iPoolCleanTasks); ++ ++ return PVRSRV_OK; ++ ++#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) ++eExit: ++ /* we failed to zero the pages so return the error so we can ++ * retry during the next spin */ ++ if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0) ++ { ++ return eError; ++ } ++ ++ /* this was the last retry, give up and free pages to OS */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Deferred task error, freeing pages to OS.", ++ __func__)); ++ _PagePoolLock(); ++ ++ *puiCounter -= psPagePoolEntry->uiItemsRemaining; ++ ++ _PagePoolUnlock(); ++ ++ for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++) ++ { ++ _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]); ++ } ++ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray); ++ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry); ++ OSFreeMem(psCleanupData); ++ ++ OSAtomicDecrement(&g_iPoolCleanTasks); ++ ++ return PVRSRV_OK; ++#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */ ++} ++ ++ ++/* Put page array to the page pool. ++ * Handles locking and checks whether the pages are ++ * suitable to be stored in the pool. */ ++static inline IMG_BOOL ++_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags, ++ struct page **ppsPageArray, ++ IMG_BOOL bUnpinned, ++ IMG_UINT32 uiOrder, ++ IMG_UINT32 uiNumPages) ++{ ++ LinuxCleanupData *psCleanupData; ++ PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn; ++#if defined(SUPPORT_PHYSMEM_TEST) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++#endif ++ ++ if (uiOrder == 0 && ++ !bUnpinned && ++ !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags)) ++ { ++ IMG_UINT32 uiEntries; ++ IMG_UINT32 *puiCounter; ++ struct list_head *psPoolHead; ++ ++ ++ _PagePoolLock(); ++ ++ uiEntries = _PagesInPoolUnlocked(); ++ ++ /* Check for number of current page pool entries and whether ++ * we have other asynchronous tasks in-flight */ ++ if ( (uiEntries < g_ui32PagePoolMaxEntries) && ++ ((uiEntries + uiNumPages) < ++ (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) )) ++ { ++ if (OSAtomicIncrement(&g_iPoolCleanTasks) <= ++ PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS) ++ { ++#if defined(SUPPORT_PHYSMEM_TEST) ++ if (!psPVRSRVData->hCleanupThread) ++ { ++ goto eDecrement; ++ } ++#endif ++ ++ psCleanupData = OSAllocMem(sizeof(*psCleanupData)); ++ ++ if (!psCleanupData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to get memory for deferred page pool cleanup. " ++ "Trying to free pages immediately", ++ __func__)); ++ goto eDecrement; ++ } ++ ++ psCleanupThreadFn = &psCleanupData->sCleanupWork; ++ psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags; ++ psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL); ++ ++ if (!psCleanupData->psPoolEntry) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to get memory for deferred page pool cleanup. " ++ "Trying to free pages immediately", ++ __func__)); ++ goto eFreeCleanupData; ++ } ++ ++ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to get correct page pool", ++ __func__)); ++ goto eFreePoolEntry; ++ } ++ ++ /* Increase counter here to avoid deferred cleanup tasks piling up */ ++ *puiCounter = *puiCounter + uiNumPages; ++ ++ psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray; ++ psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages; ++ ++ psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages; ++ psCleanupThreadFn->pvData = psCleanupData; ++ psCleanupThreadFn->bDependsOnHW = IMG_FALSE; ++ CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn, ++ CLEANUP_THREAD_RETRY_COUNT_DEFAULT); ++ ++ /* We must not hold the pool lock when calling AddWork because it might call us back to ++ * free pooled pages directly when unloading the driver */ ++ _PagePoolUnlock(); ++ ++ PVRSRVCleanupThreadAddWork(psCleanupThreadFn); ++ ++ ++ } ++ else ++ { ++ goto eDecrement; ++ } ++ ++ } ++ else ++ { ++ goto eUnlock; ++ } ++ } ++ else ++ { ++ goto eExitFalse; ++ } ++ ++ return IMG_TRUE; ++ ++eFreePoolEntry: ++ OSFreeMem(psCleanupData->psPoolEntry); ++eFreeCleanupData: ++ OSFreeMem(psCleanupData); ++eDecrement: ++ OSAtomicDecrement(&g_iPoolCleanTasks); ++eUnlock: ++ _PagePoolUnlock(); ++eExitFalse: ++ return IMG_FALSE; ++} ++ ++/* Get the GFP flags that we pass to the page allocator */ ++static inline gfp_t ++_GetGFPFlags(IMG_BOOL bZero, ++ PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ struct device *psDev = psDevNode->psDevConfig->pvOSDevice; ++ gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC; ++ ++#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) ++ /* Force use of HIGHMEM */ ++ gfp_flags |= __GFP_HIGHMEM; ++ ++ PVR_UNREFERENCED_PARAMETER(psDev); ++#else ++ if (psDev) ++ { ++#if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) ++ if (*psDev->dma_mask > DMA_BIT_MASK(32)) ++ { ++ /* If our system is able to handle large addresses use highmem */ ++ gfp_flags |= __GFP_HIGHMEM; ++ } ++ else if (*psDev->dma_mask == DMA_BIT_MASK(32)) ++ { ++ /* Limit to 32 bit. ++ * Achieved by setting __GFP_DMA32 for 64 bit systems */ ++ gfp_flags |= __GFP_DMA32; ++ } ++ else ++ { ++ /* Limit to size of DMA zone. */ ++ gfp_flags |= __GFP_DMA; ++ } ++#else ++ if (*psDev->dma_mask < DMA_BIT_MASK(32)) ++ { ++ gfp_flags |= __GFP_DMA; ++ } ++ else ++ { ++ gfp_flags |= __GFP_HIGHMEM; ++ } ++#endif /* if defined(CONFIG_64BIT) || defined(CONFIG_ARM_LPAE) || defined(CONFIG_X86_PAE) */ ++ } ++ ++#endif /* if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) */ ++ ++ if (bZero) ++ { ++ gfp_flags |= __GFP_ZERO; ++ } ++ ++ return gfp_flags; ++} ++ ++/* ++ * @Function _PoisonDevicePage ++ * ++ * @Description Poisons a device page. In normal case the device page has the ++ * same size as the OS page and so the ui32DevPageOrder will be ++ * equal to 0 and page argument will point to one OS page ++ * structure. In case of Non4K pages the order will be greater ++ * than 0 and page argument will point to an array of OS ++ * allocated pages. ++ * ++ * @Input psDevNode pointer to the device object ++ * @Input page array of the pages allocated by from the OS ++ * @Input ui32DevPageOrder order of the page (same as the one used to allocate ++ * the page array by alloc_pages()) ++ * @Input ui32CPUCacheFlags CPU cache flags applied to the page ++ * @Input ui8PoisonValue value used to poison the page ++ */ ++static void ++_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode, ++ struct page *page, ++ IMG_UINT32 ui32DevPageOrder, ++ IMG_UINT32 ui32CPUCacheFlags, ++ IMG_BYTE ui8PoisonValue) ++{ ++ IMG_UINT32 ui32OsPageIdx; ++ ++ for (ui32OsPageIdx = 0; ++ ui32OsPageIdx < (1U << ui32DevPageOrder); ++ ui32OsPageIdx++) ++ { ++ struct page *current_page = page + ui32OsPageIdx; ++ IMG_CPU_PHYADDR sCPUPhysAddrStart = {page_to_phys(current_page)}; ++ IMG_CPU_PHYADDR sCPUPhysAddrEnd = {sCPUPhysAddrStart.uiAddr + PAGE_SIZE}; ++ ++ void *kvaddr = kmap_atomic(current_page); ++ ++ /* kmap_atomic maps pages as cached so it's safe to use OSCachedMemSet ++ * here (also pages are always 8 bytes aligned anyway) */ ++ OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE); ++ ++ OSCPUCacheFlushRangeKM(psDevNode, kvaddr, kvaddr + PAGE_SIZE, ++ sCPUPhysAddrStart, sCPUPhysAddrEnd); ++ ++ kunmap_atomic(kvaddr); ++ } ++} ++ ++/* Allocate and initialise the structure to hold the metadata of the allocation */ ++static PVRSRV_ERROR ++_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 uiLog2AllocPageSize, ++ IMG_UINT32 ui32AllocFlags, ++ IMG_UINT32 ui32CPUCacheFlags, ++ IMG_PID uiPid, ++ PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr) ++{ ++ PVRSRV_ERROR eError; ++ PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks; ++ IMG_UINT32 uiNumOSPageSizeVirtPages; ++ IMG_UINT32 uiNumDevPageSizeVirtPages; ++ PMR_OSPAGEARRAY_DATA *psPageArrayData; ++ IMG_UINT64 ui64DmaMask = 0; ++ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks); ++ ++ /* Use of cast below is justified by the assertion that follows to ++ * prove that no significant bits have been truncated */ ++ uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1); ++ PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize); ++ ++ uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT); ++ ++ /* Allocate the struct to hold the metadata */ ++ psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL); ++ if (psPageArrayData == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OS refused the memory allocation for the private data.", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e_freed_none; ++ } ++ ++ /* ++ * Allocate the page array ++ * ++ * We avoid tracking this memory because this structure might go into the page pool. ++ * The OS can drain the pool asynchronously and when doing that we have to avoid ++ * any potential deadlocks. ++ * ++ * In one scenario the process stats vmalloc hash table lock is held and then ++ * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not ++ * try to acquire the vmalloc hash table lock again. ++ */ ++ psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages); ++ if (psPageArrayData->pagearray == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e_free_kmem_cache; ++ } ++ else ++ { ++ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */ ++ psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages); ++ if (psPageArrayData->dmavirtarray == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e_free_pagearray; ++ } ++ ++ psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages); ++ if (psPageArrayData->dmaphysarray == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e_free_cpuvirtaddrarray; ++ } ++ } ++ } ++ ++ if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice) ++ { ++ struct device *psDev = psDevNode->psDevConfig->pvOSDevice; ++ ui64DmaMask = *psDev->dma_mask; ++ } ++ ++ /* Init metadata */ ++ psPageArrayData->psDevNode = psDevNode; ++ psPageArrayData->uiPid = uiPid; ++ psPageArrayData->iNumOSPagesAllocated = 0; ++ psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages; ++ psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize; ++ psPageArrayData->ui64DmaMask = ui64DmaMask; ++ psPageArrayData->ui32AllocFlags = ui32AllocFlags; ++ psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags; ++ psPageArrayData->ui32CMAAdjustedPageCount = 0; ++ ++ *ppsPageArrayDataPtr = psPageArrayData; ++ return PVRSRV_OK; ++ ++/* Error path */ ++e_free_cpuvirtaddrarray: ++ OSFreeMemNoStats(psPageArrayData->dmavirtarray); ++ ++e_free_pagearray: ++ OSFreeMemNoStats(psPageArrayData->pagearray); ++ ++e_free_kmem_cache: ++ kmem_cache_free(g_psLinuxPageArray, psPageArrayData); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OS refused the memory allocation for the page pointer table. " ++ "Did you ask for too much?", ++ __func__)); ++ ++e_freed_none: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static inline void ++_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode, ++ struct page **ppsPage, ++ IMG_UINT32 uiNumPages) ++{ ++ void * pvAddr; ++ ++ if (OSCPUCacheOpAddressType() == OS_CACHE_OP_ADDR_TYPE_VIRTUAL) ++ { ++ pgprot_t pgprot = PAGE_KERNEL; ++ ++ IMG_UINT32 uiNumToClean = uiNumPages; ++ struct page **ppsCleanArray = ppsPage; ++ ++ /* Map and flush page. ++ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE ++ * at a time. */ ++ while (uiNumToClean != 0) ++ { ++ IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES, ++ uiNumToClean); ++ IMG_CPU_PHYADDR sUnused = ++ { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) }; ++ ++ pvAddr = pvr_vmap(ppsCleanArray, uiToClean, -1, pgprot); ++ if (!pvAddr) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Unable to flush page cache for new allocation, skipping flush.")); ++ return; ++ } ++ ++ CacheOpExec(psDevNode, ++ pvAddr, ++ pvAddr + PAGE_SIZE, ++ sUnused, ++ sUnused, ++ PVRSRV_CACHE_OP_FLUSH); ++ ++ pvr_vunmap(pvAddr, uiToClean, pgprot); ++ ppsCleanArray = &(ppsCleanArray[uiToClean]); ++ uiNumToClean -= uiToClean; ++ } ++ } ++ else ++ { ++ IMG_UINT32 ui32Idx; ++ ++ for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx) ++ { ++ IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd; ++ ++ pvAddr = kmap(ppsPage[ui32Idx]); ++ sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]); ++ sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE; ++ ++ /* If we're zeroing, we need to make sure the cleared memory is pushed out ++ * of the cache before the cache lines are invalidated */ ++ CacheOpExec(psDevNode, ++ pvAddr, ++ pvAddr + PAGE_SIZE, ++ sCPUPhysAddrStart, ++ sCPUPhysAddrEnd, ++ PVRSRV_CACHE_OP_FLUSH); ++ ++ kunmap(ppsPage[ui32Idx]); ++ } ++ } ++} ++ ++/* Change the caching attribute of pages on x86 systems and takes care of ++ * cache maintenance. This function is supposed to be called once for pages that ++ * came from alloc_pages(). It expects an array of OS page sized pages! ++ * ++ * Flush/Invalidate pages in case the allocation is not cached. Necessary to ++ * remove pages from the cache that might be flushed later and corrupt memory. */ ++static inline PVRSRV_ERROR ++_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode, ++ struct page **ppsPage, ++ IMG_UINT32 uiNumPages, ++ IMG_BOOL bFlush, ++ IMG_UINT32 ui32CPUCacheFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags); ++ IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags); ++ IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags); ++ ++ if (ppsPage != NULL && uiNumPages != 0) ++ { ++#if defined(CONFIG_X86) ++ /* On x86 we have to set page cache attributes for non-cached pages. ++ * The call is implicitly taking care of all flushing/invalidating ++ * and therefore we can skip the usual cache maintenance after this. */ ++ if (bCPUUncached || bCPUWriteCombine) ++ { ++ /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of ++ current mapping before we map it ourselves */ ++ int ret = IMG_FALSE; ++ ++ switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags)) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++ ret = set_pages_array_uc(ppsPage, uiNumPages); ++ if (ret) ++ { ++ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; ++ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret)); ++ } ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++ ret = set_pages_array_wc(ppsPage, uiNumPages); ++ if (ret) ++ { ++ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE; ++ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret)); ++ } ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: ++ break; ++ ++ default: ++ break; ++ } ++ } ++ else ++#endif ++ { ++ if ( bFlush || ++ bCPUUncached || bCPUWriteCombine || ++ (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) ) ++ { ++ /* We can be given pages which still remain in the cache. ++ In order to make sure that the data we write through our mappings ++ doesn't get overwritten by later cache evictions we invalidate the ++ pages that are given to us. ++ ++ Note: ++ This still seems to be true if we request cold pages, it's just less ++ likely to be in the cache. */ ++ _ApplyCacheMaintenance(psDevNode, ++ ppsPage, ++ uiNumPages); ++ } ++ } ++ } ++ ++ return eError; ++} ++ ++/* Same as _AllocOSPage except it uses DMA framework to perform allocation. ++ * uiPageIndex is expected to be the pagearray index where to store the higher order page. */ ++static PVRSRV_ERROR ++_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ gfp_t gfp_flags, ++ IMG_UINT32 ui32AllocOrder, ++ IMG_UINT32 ui32MinOrder, ++ IMG_UINT32 uiPageIndex) ++{ ++ void *virt_addr; ++ struct page *page; ++ dma_addr_t bus_addr; ++ IMG_UINT32 uiAllocIsMisaligned; ++ size_t alloc_size = PAGE_SIZE << ui32AllocOrder; ++ struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice; ++ PVR_ASSERT(ui32AllocOrder == ui32MinOrder); ++ ++ do ++ { ++ DisableOOMKiller(); ++#if defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC) ++ virt_addr = NULL; ++#else ++ virt_addr = dma_alloc_coherent(dev, alloc_size, &bus_addr, gfp_flags); ++#endif ++ if (virt_addr == NULL) ++ { ++ /* The idea here is primarily to support some older kernels with ++ broken or non-functioning DMA/CMA implementations (< Linux-3.4) ++ and to also handle DMA/CMA allocation failures by attempting a ++ normal page allocation though we expect dma_alloc_coherent() ++ already attempts this internally also before failing but ++ nonetheless it does no harm to retry the allocation ourselves */ ++ page = alloc_pages(gfp_flags, ui32AllocOrder); ++ if (page) ++ { ++ /* Taint bus_addr as alloc_page, needed when freeing; ++ also acquire the low memory page address only, this ++ prevents mapping possible high memory pages into ++ kernel virtual address space which might exhaust ++ the VMALLOC address space */ ++ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); ++ virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; ++ } ++ else ++ { ++ EnableOOMKiller(); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ } ++ else ++ { ++#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) ++ page = pfn_to_page(bus_addr >> PAGE_SHIFT); ++#else ++ /* Assumes bus address space is identical to physical address space */ ++ page = phys_to_page(bus_addr); ++#endif ++ } ++ EnableOOMKiller(); ++ ++ /* Physical allocation alignment works/hidden behind the scene transparently, ++ we do this here if the allocated buffer address does not meet its alignment ++ requirement by over-allocating using the next power-2 order and reporting ++ aligned-adjusted values back to meet the requested alignment constraint. ++ Evidently we waste memory by doing this so should only do so if we do not ++ initially meet the alignment constraint. */ ++ uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE< ui32MinOrder) ++ { ++ IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr); ++ if (ui32AllocOrder == ui32MinOrder) ++ { ++ if (bUsedAllocPages) ++ { ++ __free_pages(page, ui32AllocOrder); ++ } ++ else ++ { ++ dma_free_coherent(dev, alloc_size, virt_addr, bus_addr); ++ } ++ ++ ui32AllocOrder = ui32AllocOrder + 1; ++ alloc_size = PAGE_SIZE << ui32AllocOrder; ++ ++ PVR_ASSERT(uiAllocIsMisaligned != 0); ++ } ++ else ++ { ++ size_t align_adjust = PAGE_SIZE << ui32MinOrder; ++ ++ /* Adjust virtual/bus addresses to meet alignment */ ++ bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr; ++ align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust); ++ align_adjust -= (size_t)bus_addr; ++ ++ if (align_adjust) ++ { ++ if (bUsedAllocPages) ++ { ++ page += align_adjust >> PAGE_SHIFT; ++ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page)); ++ virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE; ++ } ++ else ++ { ++ bus_addr += align_adjust; ++ virt_addr += align_adjust; ++#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) ++ page = pfn_to_page(bus_addr >> PAGE_SHIFT); ++#else ++ /* Assumes bus address space is identical to physical address space */ ++ page = phys_to_page(bus_addr); ++#endif ++ } ++ ++ /* Store adjustments in PAGE_SIZE counts */ ++ align_adjust = align_adjust >> PAGE_SHIFT; ++ bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust); ++ } ++ ++ /* Taint bus_addr due to over-allocation, allows us to free ++ * memory correctly */ ++ bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr); ++ uiAllocIsMisaligned = 0; ++ } ++ } ++ } while (uiAllocIsMisaligned); ++ ++ /* Convert OSPageSize-based index into DevicePageSize-based index */ ++ psPageArrayData->ui32CMAAdjustedPageCount += (alloc_size - (PAGE_SIZE << ui32AllocOrder )); ++ ++ psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr; ++ psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr; ++ psPageArrayData->pagearray[uiPageIndex] = page; ++ ++ return PVRSRV_OK; ++} ++ ++/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at ++ * position uiPageIndex. ++ * ++ * If the order is higher than 0, it splits the page into multiples and ++ * stores them at position uiPageIndex to uiPageIndex+(1<= KERNEL_VERSION(3,10,0)) ++ /* In case we need to, split the higher order page; ++ this should only be used for order-0 allocations ++ as higher order allocations should use DMA/CMA */ ++ if (uiAllocOrder != 0) ++ { ++ split_page(psPage, uiAllocOrder); ++ } ++#endif ++ ++ /* Store the page (or multiple split pages) in the page array */ ++ for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++) ++ { ++ psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ ++static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ struct page *psPage) ++{ ++ IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) }; ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ++ NULL, sCPUPhysAddr, ++ 1 << psPageArrayData->uiLog2AllocPageSize, ++ NULL, psPageArrayData->uiPid ++ DEBUG_MEMSTATS_VALUES); ++} ++ ++static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ struct page *psPage) ++{ ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ++ (IMG_UINT64) page_to_phys(psPage), ++ psPageArrayData->uiPid); ++} ++ ++#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++ ++static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) ++{ ++ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ++ uiSize, uiPid); ++} ++ ++static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid) ++{ ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, ++ uiSize, uiPid); ++} ++ ++#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ ++ ++/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons. ++ * ++ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N ++ * order OS pages at a time we guarantee the device page is contiguous. ++ * ++ * Secondly for performance where we may ask for 2^N order pages to reduce the number ++ * of calls to alloc_pages, and thus reduce time for huge allocations. ++ * ++ * Regardless of page order requested, we need to break them down to track _OS pages. ++ * The maximum order requested is increased if all max order allocations were successful. ++ * If any request fails we reduce the max order. ++ */ ++static PVRSRV_ERROR ++_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 uiArrayIndex = 0; ++ IMG_UINT32 ui32Order; ++ IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; ++ IMG_BOOL bIncreaseMaxOrder = IMG_TRUE; ++ ++ IMG_UINT32 ui32NumPageReq; ++ IMG_UINT32 uiOSPagesToAlloc; ++ IMG_UINT32 uiDevPagesFromPool = 0; ++ ++ gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) : IMG_FALSE, /* Zero all pages later as batch */ ++ psPageArrayData->psDevNode); ++ gfp_t ui32GfpFlags; ++ gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY); ++ ++ struct page **ppsPageArray = psPageArrayData->pagearray; ++ struct page **ppsPageAttributeArray = NULL; ++ ++ uiOSPagesToAlloc = psPageArrayData->uiTotalNumOSPages; ++ ++ /* Try to get pages from the pool since it is faster; ++ the page pool currently only supports zero-order pages ++ thus currently excludes all DMA/CMA allocated memory */ ++ _GetPagesFromPoolLocked(psPageArrayData->psDevNode, ++ psPageArrayData->ui32CPUCacheFlags, ++ uiOSPagesToAlloc, ++ ui32MinOrder, ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO), ++ ppsPageArray, ++ &uiDevPagesFromPool); ++ ++ uiArrayIndex = uiDevPagesFromPool; ++ ++ if ((uiOSPagesToAlloc - uiDevPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD) ++ { /* Small allocations: ask for one device page at a time */ ++ ui32Order = ui32MinOrder; ++ bIncreaseMaxOrder = IMG_FALSE; ++ } ++ else ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) ++ /* Large zero-order or none zero-order allocations, ask for ++ MAX(max-order, min-order) order pages at a time; alloc ++ failures throttles this down to ZeroOrder allocations */ ++ ui32Order = MAX(g_uiMaxOrder, ui32MinOrder); ++#else ++ /* Because split_page() is not available on older kernels ++ we cannot mix-and-match any-order pages in the PMR; ++ only same-order pages must be present in page array. ++ So we unconditionally force it to use ui32MinOrder on ++ these older kernels */ ++ ui32Order = ui32MinOrder; ++#if defined(DEBUG) ++ if (! BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ /* Check that this is zero */ ++ PVR_ASSERT(! ui32Order); ++ } ++#endif ++#endif ++ } ++ ++ /* Only if asking for more contiguity than we actually need, let it fail */ ++ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; ++ ui32NumPageReq = (1 << ui32Order); ++ ++ while (uiArrayIndex < uiOSPagesToAlloc) ++ { ++ IMG_UINT32 ui32PageRemain = uiOSPagesToAlloc - uiArrayIndex; ++ ++ while (ui32NumPageReq > ui32PageRemain) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)) ++ /* Pages to request is larger than that remaining ++ so ask for less so never over allocate */ ++ ui32Order = MAX(ui32Order >> 1, ui32MinOrder); ++#else ++ /* Pages to request is larger than that remaining so ++ do nothing thus over allocate as we do not support ++ mix/match of any-order pages in PMR page-array in ++ older kernels (simplifies page free logic) */ ++ PVR_ASSERT(ui32Order == ui32MinOrder); ++#endif ++ ui32NumPageReq = (1 << ui32Order); ++ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; ++ } ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ /* As the DMA/CMA framework rounds-up request to the ++ next power-of-two, we request multiple uiMinOrder ++ pages to satisfy allocation request in order to ++ minimise wasting memory */ ++ eError = _AllocOSPage_CMA(psPageArrayData, ++ ui32GfpFlags, ++ ui32Order, ++ ui32MinOrder, ++ uiArrayIndex >> ui32MinOrder); ++ } ++ else ++ { ++ /* Allocate uiOrder pages at uiArrayIndex */ ++ eError = _AllocOSPage(psPageArrayData, ++ ui32GfpFlags, ++ ui32Order, ++ ui32MinOrder, ++ uiArrayIndex); ++ } ++ ++ if (eError == PVRSRV_OK) ++ { ++ /* Successful request. Move onto next. */ ++ uiArrayIndex += ui32NumPageReq; ++ } ++ else ++ { ++ if (ui32Order > ui32MinOrder) ++ { ++ /* Last request failed. Let's ask for less next time */ ++ ui32Order = MAX(ui32Order >> 1, ui32MinOrder); ++ bIncreaseMaxOrder = IMG_FALSE; ++ ui32NumPageReq = (1 << ui32Order); ++ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags; ++ g_uiMaxOrder = ui32Order; ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)) ++ /* We should not trigger this code path in older kernels, ++ this is enforced by ensuring ui32Order == ui32MinOrder */ ++ PVR_ASSERT(ui32Order == ui32MinOrder); ++#endif ++ } ++ else ++ { ++ /* Failed to alloc pages at required contiguity. Failed allocation */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)", ++ __func__, ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA) ? "dma_alloc_coherent" : "alloc_pages", ++ uiArrayIndex, ++ uiOSPagesToAlloc, ++ ui32GfpFlags, ++ ui32Order, ++ PVRSRVGetErrorString(eError))); ++ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; ++ goto e_free_pages; ++ } ++ } ++ } ++ ++ if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM)) ++ { /* All successful allocations on max order. Let's ask for more next time */ ++ g_uiMaxOrder++; ++ } ++ ++ /* Construct table of page pointers to apply attributes */ ++ ppsPageAttributeArray = &ppsPageArray[uiDevPagesFromPool]; ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ IMG_UINT32 uiIdx, uiIdy, uiIdz; ++ ++ ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiOSPagesToAlloc); ++ PVR_LOG_GOTO_IF_NOMEM(ppsPageAttributeArray, eError, e_free_pages); ++ ++ for (uiIdx = 0; uiIdx < uiOSPagesToAlloc; uiIdx += ui32NumPageReq) ++ { ++ uiIdy = uiIdx >> ui32Order; ++ for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++) ++ { ++ ppsPageAttributeArray[uiIdx+uiIdz] = ppsPageArray[uiIdy]; ++ ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz; ++ } ++ } ++ } ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) && ui32MinOrder == 0) ++ { ++ eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool, ++ ppsPageAttributeArray, PAGE_KERNEL, ++ PVRSRV_ZERO_VALUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)")); ++ goto e_free_pages; ++ } ++ } ++ else if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC)) ++ { ++ /* need to call twice because ppsPageArray and ppsPageAttributeArray ++ * can point to different allocations: first for pages obtained from ++ * the pool and then the remaining pages */ ++ eError = _MemsetPageArray(uiDevPagesFromPool, ppsPageArray, PAGE_KERNEL, ++ PVRSRV_POISON_ON_ALLOC_VALUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)")); ++ } ++ eError = _MemsetPageArray(uiOSPagesToAlloc - uiDevPagesFromPool, ++ ppsPageAttributeArray, PAGE_KERNEL, ++ PVRSRV_POISON_ON_ALLOC_VALUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to poison pages (fast)")); ++ } ++ ++ /* for poisoning need to also flush the pool pages as the 0s have ++ * been overwritten */ ++ _ApplyCacheMaintenance(psPageArrayData->psDevNode, ppsPageArray, ++ uiDevPagesFromPool); ++ } ++ ++ /* Do the cache management as required */ ++ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, ++ ppsPageAttributeArray, ++ uiOSPagesToAlloc - uiDevPagesFromPool, ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_ZERO) || ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_ALLOC), ++ psPageArrayData->ui32CPUCacheFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); ++ goto e_free_pages; ++ } ++ else ++ { ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ OSFreeMem(ppsPageAttributeArray); ++ } ++ } ++ ++ /* Update metadata */ ++ psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ { ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ IMG_UINT32 ui32NumPages = ++ psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder; ++ IMG_UINT32 i; ++ ++ for (i = 0; i < ui32NumPages; i++) ++ { ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); ++ } ++ else ++ { ++ _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << ui32MinOrder]); ++ } ++ } ++#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++ _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), ++ psPageArrayData->uiPid); ++#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++ } ++#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */ ++ ++ return PVRSRV_OK; ++ ++/* Error path */ ++e_free_pages: ++ { ++ IMG_UINT32 ui32PageToFree; ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order; ++ IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order; ++ PVR_ASSERT(ui32Order == ui32MinOrder); ++ ++ if (ppsPageAttributeArray) ++ { ++ OSFreeMem(ppsPageAttributeArray); ++ } ++ ++ for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++) ++ { ++ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, ++ uiDevPageSize, ++ ui32MinOrder, ++ psPageArrayData->dmavirtarray[ui32PageToFree], ++ psPageArrayData->dmaphysarray[ui32PageToFree], ++ ppsPageArray[ui32PageToFree]); ++ psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0; ++ psPageArrayData->dmavirtarray[ui32PageToFree] = NULL; ++ ppsPageArray[ui32PageToFree] = NULL; ++ } ++ } ++ else ++ { ++ /* Free the pages we got from the pool */ ++ for (ui32PageToFree = 0; ui32PageToFree < uiDevPagesFromPool; ui32PageToFree++) ++ { ++ _FreeOSPage(ui32MinOrder, ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), ++ ppsPageArray[ui32PageToFree]); ++ ppsPageArray[ui32PageToFree] = NULL; ++ } ++ ++ for (ui32PageToFree = uiDevPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++) ++ { ++ _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]); ++ ppsPageArray[ui32PageToFree] = NULL; ++ } ++ } ++ ++ return eError; ++ } ++} ++ ++static INLINE PVRSRV_ERROR ++_CheckIfIndexInRange(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, IMG_UINT32 ui32Limit) ++{ ++ if (pui32Indices[ui32Index] >= ui32Limit) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Given alloc index %u at %u is larger than page array %u.", ++ __func__, pui32Indices[ui32Index], ui32Index, ui32Limit)); ++ return PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static INLINE PVRSRV_ERROR ++_CheckIfPageNotAllocated(IMG_UINT32 ui32Index, IMG_UINT32 *pui32Indices, struct page **ppsPageArray) ++{ ++ if (ppsPageArray[pui32Indices[ui32Index]] != NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping number %u at page array index %u already exists. " ++ "Page struct %p", __func__, pui32Indices[ui32Index], ui32Index, ++ ppsPageArray[pui32Indices[ui32Index]])); ++ return PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* Allocation of OS pages: This function is used for sparse allocations. ++ * ++ * Sparse allocations provide only a proportion of sparse physical backing within the total ++ * virtual range. */ ++static PVRSRV_ERROR ++_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ IMG_UINT32 *puiAllocIndices, ++ IMG_UINT32 uiDevPagesToAlloc) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i; ++ struct page **ppsPageArray = psPageArrayData->pagearray; ++ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; ++ IMG_UINT32 uiDevPagesFromPool = 0; ++ IMG_UINT32 uiOSPagesToAlloc = uiDevPagesToAlloc * (1 << uiOrder); ++ IMG_UINT32 uiDevPagesAllocated = psPageArrayData->uiTotalNumOSPages >> uiOrder; ++ const IMG_UINT32 ui32AllocFlags = psPageArrayData->ui32AllocFlags; ++ gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? BIT_ISSET(ui32AllocFlags, FLAG_ZERO): ++ IMG_FALSE, /* Zero pages later as batch */ ++ psPageArrayData->psDevNode); ++ ++ /* We use this page array to receive pages from the pool and then reuse it afterwards to ++ * store pages that need their cache attribute changed on x86 */ ++ struct page **ppsTempPageArray; ++ IMG_UINT32 uiTempPageArrayIndex = 0; ++ ++ /* Allocate the temporary page array that we need here to receive pages ++ * from the pool and to store pages that need their caching attributes changed. ++ * Allocate number of OS pages to be able to use the attribute function later. */ ++ ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiOSPagesToAlloc); ++ PVR_LOG_GOTO_IF_NOMEM(ppsTempPageArray, eError, e_exit); ++ ++ /* Check the requested number of pages if they fit in the page array */ ++ if (uiDevPagesAllocated < ++ ((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiDevPagesToAlloc)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Trying to allocate more pages (Order %u) than this buffer can handle, " ++ "Request + Allocated < Max! Request %u, Allocated %u, Max %u.", ++ __func__, ++ uiOrder, ++ uiDevPagesToAlloc, ++ psPageArrayData->iNumOSPagesAllocated >> uiOrder, ++ uiDevPagesAllocated)); ++ eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; ++ goto e_free_temp_array; ++ } ++ ++ /* Try to get pages from the pool since it is faster. The pages from pool are going to be ++ * allocated only if: ++ * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 1 && uiOrder == 0 ++ * - PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES == 0 && uiOrder == 0 && ++ * !BIT_ISSET(ui32AllocFlags, FLAG_ZERO) */ ++ _GetPagesFromPoolLocked(psPageArrayData->psDevNode, ++ psPageArrayData->ui32CPUCacheFlags, ++ uiDevPagesToAlloc, ++ uiOrder, ++ BIT_ISSET(ui32AllocFlags, FLAG_ZERO), ++ ppsTempPageArray, ++ &uiDevPagesFromPool); ++ ++ /* In general device pages can have higher order than 0 but page pool always provides only 0 ++ * order pages so they can be assigned to the OS pages values (in other words if we're ++ * allocating non-4k pages uiDevPagesFromPool will always be 0) */ ++ uiTempPageArrayIndex = uiDevPagesFromPool; ++ ++ /* Move pages we got from the pool to the array. */ ++ for (i = 0; i < uiDevPagesFromPool; i++) ++ { ++ eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated); ++ PVR_GOTO_IF_ERROR(eError, e_free_pool_pages); ++ eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray); ++ PVR_GOTO_IF_ERROR(eError, e_free_pool_pages); ++ ++ ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[i]; ++ } ++ ++ /* Allocate pages from the OS */ ++ for (i = uiDevPagesFromPool; i < uiDevPagesToAlloc; i++) ++ { ++ eError = _CheckIfIndexInRange(i, puiAllocIndices, uiDevPagesAllocated); ++ PVR_GOTO_IF_ERROR(eError, e_free_pages); ++ eError = _CheckIfPageNotAllocated(i, puiAllocIndices, ppsPageArray); ++ PVR_GOTO_IF_ERROR(eError, e_free_pages); ++ ++ /* Allocated pages and assign them the array. */ ++ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ /* As the DMA/CMA framework rounds-up request to the ++ next power-of-two, we request multiple uiMinOrder ++ pages to satisfy allocation request in order to ++ minimise wasting memory */ ++ eError = _AllocOSPage_CMA(psPageArrayData, ++ ui32GfpFlags, ++ uiOrder, ++ uiOrder, ++ puiAllocIndices[i]); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages")); ++ goto e_free_pages; ++ } ++ } ++ else ++ { ++ DisableOOMKiller(); ++ ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder); ++ EnableOOMKiller(); ++ } ++ ++ if (ppsPageArray[puiAllocIndices[i]] != NULL) ++ { ++ /* Append pages to the temporary array so it's easier to process ++ * them later on. */ ++ ++ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ IMG_UINT32 idx; ++ struct page *psPageAddr; ++ ++ psPageAddr = ppsPageArray[puiAllocIndices[i]]; ++ ++ /* "divide" CMA pages into OS pages if they have higher order */ ++ for (idx = 0; idx < (1 << uiOrder); idx++) ++ { ++ ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr; ++ psPageAddr++; ++ } ++ uiTempPageArrayIndex += (1 << uiOrder); ++ } ++ else ++ { ++ ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]]; ++ uiTempPageArrayIndex++; ++ } ++ } ++ else ++ { ++ /* Failed to alloc pages at required contiguity. Failed allocation */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u", ++ __func__, i, uiDevPagesToAlloc, ui32GfpFlags, uiOrder)); ++ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; ++ goto e_free_pages; ++ } ++ } ++ ++ if (BIT_ISSET(ui32AllocFlags, FLAG_ZERO) && uiOrder == 0) ++ { ++ /* At this point this array contains pages allocated from the page pool at its start ++ * and pages allocated from the OS after that. ++ * If there are pages from the pool here they must be zeroed already hence we don't have ++ * to do it again. This is because if PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES is enabled pool pages ++ * are zeroed in the cleanup thread. If it's disabled they aren't, and in that case we never ++ * allocate pages with FLAG_ZERO from the pool. This is why those pages need to be zeroed ++ * here. ++ * All of the above is true for the 0 order pages. For higher order we never allocated from ++ * the pool and those pages are allocated already zeroed from the OS. ++ * Long story short we can always skip pages allocated from the pool because they are either ++ * zeroed or we didn't allocate any of them. */ ++ eError = _MemsetPageArray(uiTempPageArrayIndex - uiDevPagesFromPool, ++ &ppsTempPageArray[uiDevPagesFromPool], ++ PAGE_KERNEL, PVRSRV_ZERO_VALUE); ++ PVR_LOG_GOTO_IF_FALSE(eError == PVRSRV_OK, "failed to zero pages (sparse)", e_free_pages); ++ } ++ else if (BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC)) ++ { ++ /* Here we need to poison all of the pages regardless if they were ++ * allocated from the pool or from the system. */ ++ eError = _MemsetPageArray(uiTempPageArrayIndex, ppsTempPageArray, ++ PAGE_KERNEL, PVRSRV_POISON_ON_ALLOC_VALUE); ++ PVR_LOG_IF_FALSE(eError == PVRSRV_OK, "failed to poison pages (sparse)"); ++ ++ /* We need to flush the cache for the poisoned pool pages here. The flush for the pages ++ * allocated from the system is done below because we also need to add appropriate cache ++ * attributes to them. Pages allocated from the pool already come with correct caching ++ * mode. */ ++ _ApplyCacheMaintenance(psPageArrayData->psDevNode, ppsTempPageArray, uiDevPagesFromPool); ++ } ++ ++ /* Do the cache management as required */ ++ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode, ++ &ppsTempPageArray[uiDevPagesFromPool], ++ uiTempPageArrayIndex - uiDevPagesFromPool, ++ BIT_ISSET(ui32AllocFlags, FLAG_ZERO) || ++ BIT_ISSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC), ++ psPageArrayData->ui32CPUCacheFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes")); ++ goto e_free_pages; ++ } ++ ++ /* Update metadata */ ++ psPageArrayData->iNumOSPagesAllocated += uiOSPagesToAlloc; ++ ++ /* Free temporary page array */ ++ OSFreeMem(ppsTempPageArray); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ for (i = 0; i < uiDevPagesToAlloc; i++) ++ { ++ _AddMemAllocRecord_UmaPages(psPageArrayData, ++ ppsPageArray[puiAllocIndices[i]]); ++ } ++#else ++ _IncrMemAllocStat_UmaPages(((uiOSPagesToAlloc * PAGE_SIZE)+(psPageArrayData->ui32CMAAdjustedPageCount)), ++ psPageArrayData->uiPid); ++#endif ++#endif ++ ++ return PVRSRV_OK; ++ ++e_free_pages: ++ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; ++ ++ /* Free the pages we just allocated from the CMA */ ++ for (; i > uiDevPagesFromPool; i--) ++ { ++ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, ++ uiDevPageSize, ++ uiOrder, ++ psPageArrayData->dmavirtarray[puiAllocIndices[i-1]], ++ psPageArrayData->dmaphysarray[puiAllocIndices[i-1]], ++ ppsPageArray[puiAllocIndices[i-1]]); ++ psPageArrayData->dmaphysarray[puiAllocIndices[i-1]]= (dma_addr_t) 0; ++ psPageArrayData->dmavirtarray[puiAllocIndices[i-1]] = NULL; ++ ppsPageArray[puiAllocIndices[i-1]] = NULL; ++ } ++ } ++ else ++ { ++ /* Free the pages we just allocated from the OS */ ++ for (; i > uiDevPagesFromPool; i--) ++ { ++ _FreeOSPage(0, IMG_FALSE, ppsPageArray[puiAllocIndices[i-1]]); ++ ppsPageArray[puiAllocIndices[i-1]] = NULL; ++ } ++ } ++ ++e_free_pool_pages: ++ /* And now free all of the pages we allocated from the pool. */ ++ for (i = 0; i < uiDevPagesFromPool; i++) ++ { ++ _FreeOSPage(0, BIT_ISSET(ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE), ++ ppsTempPageArray[i]); ++ ++ /* not using _CheckIfIndexInRange() to not print error message */ ++ if (puiAllocIndices[i] < uiDevPagesAllocated) ++ { ++ ppsPageArray[puiAllocIndices[i]] = NULL; ++ } ++ } ++ ++e_free_temp_array: ++ OSFreeMem(ppsTempPageArray); ++ ++e_exit: ++ return eError; ++} ++ ++/* Allocate pages for a given page array. ++ * ++ * The executed allocation path depends whether an array with allocation ++ * indices has been passed or not */ ++static PVRSRV_ERROR ++_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ IMG_UINT32 *puiAllocIndices, ++ IMG_UINT32 uiPagesToAlloc) ++{ ++ PVRSRV_ERROR eError; ++ struct page **ppsPageArray; ++ ++ /* Parameter checks */ ++ PVR_ASSERT(NULL != psPageArrayData); ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ PVR_ASSERT(psPageArrayData->dmaphysarray != NULL); ++ PVR_ASSERT(psPageArrayData->dmavirtarray != NULL); ++ } ++ PVR_ASSERT(psPageArrayData->pagearray != NULL); ++ PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); ++ ++ ppsPageArray = psPageArrayData->pagearray; ++ ++ /* Go the sparse alloc path if we have an array with alloc indices.*/ ++ if (puiAllocIndices != NULL) ++ { ++ eError = _AllocOSPages_Sparse(psPageArrayData, ++ puiAllocIndices, ++ uiPagesToAlloc); ++ } ++ else ++ { ++ eError = _AllocOSPages_Fast(psPageArrayData); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ goto e_exit; ++ } ++ ++ _DumpPageArray(ppsPageArray, ++ psPageArrayData->uiTotalNumOSPages >> ++ (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData)); ++ return PVRSRV_OK; ++ ++e_exit: ++ return eError; ++} ++ ++/* Same as _FreeOSPage except free memory using DMA framework */ ++static INLINE void ++_FreeOSPage_CMA(struct device *dev, ++ size_t alloc_size, ++ IMG_UINT32 uiOrder, ++ void *virt_addr, ++ dma_addr_t dev_addr, ++ struct page *psPage) ++{ ++ if (DMA_IS_ALLOCPG_ADDR(dev_addr)) ++ { ++#if defined(CONFIG_X86) ++ void *pvPageVAddr = page_address(psPage); ++ if (pvPageVAddr) ++ { ++ int ret = set_memory_wb((unsigned long)pvPageVAddr, 1); ++ if (ret) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to reset page attribute", ++ __func__)); ++ } ++ } ++#endif ++ ++ if (DMA_IS_ADDR_ADJUSTED(dev_addr)) ++ { ++ psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr); ++ uiOrder += 1; ++ } ++ ++ __free_pages(psPage, uiOrder); ++ } ++ else ++ { ++ if (DMA_IS_ADDR_ADJUSTED(dev_addr)) ++ { ++ size_t align_adjust; ++ ++ align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr); ++ alloc_size = alloc_size << 1; ++ ++ dev_addr = DMA_GET_ADDR(dev_addr); ++ dev_addr -= align_adjust << PAGE_SHIFT; ++ virt_addr -= align_adjust << PAGE_SHIFT; ++ } ++ ++ dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr)); ++ } ++} ++ ++/* Free a single page back to the OS. ++ * Make sure the cache type is set back to the default value. ++ * ++ * Note: ++ * We must _only_ check bUnsetMemoryType in the case where we need to free ++ * the page back to the OS since we may have to revert the cache properties ++ * of the page to the default as given by the OS when it was allocated. */ ++static void ++_FreeOSPage(IMG_UINT32 uiOrder, ++ IMG_BOOL bUnsetMemoryType, ++ struct page *psPage) ++{ ++ ++#if defined(CONFIG_X86) ++ void *pvPageVAddr; ++ pvPageVAddr = page_address(psPage); ++ ++ if (pvPageVAddr && bUnsetMemoryType) ++ { ++ int ret; ++ ++ ret = set_memory_wb((unsigned long)pvPageVAddr, 1); ++ if (ret) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", ++ __func__)); ++ } ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType); ++#endif ++ __free_pages(psPage, uiOrder); ++} ++ ++/* Free the struct holding the metadata */ ++static PVRSRV_ERROR ++_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData) ++{ ++ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData)); ++ ++ /* Check if the page array actually still exists. ++ * It might be the case that has been moved to the page pool */ ++ if (psPageArrayData->pagearray != NULL) ++ { ++ OSFreeMemNoStats(psPageArrayData->pagearray); ++ } ++ ++ kmem_cache_free(g_psLinuxPageArray, psPageArrayData); ++ ++ return PVRSRV_OK; ++} ++ ++/* Free all or some pages from a sparse page array */ ++static PVRSRV_ERROR ++_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 ui32FreePageCount) ++{ ++ IMG_BOOL bSuccess; ++ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; ++ IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0; ++ struct page **ppsPageArray = psPageArrayData->pagearray; ++ IMG_UINT32 uiNumPages; ++ ++ struct page **ppsTempPageArray; ++ IMG_UINT32 uiTempArraySize; ++ ++ /* We really should have something to free before we call this */ ++ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); ++ ++ if (pai32FreeIndices == NULL) ++ { ++ uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder; ++ uiTempArraySize = psPageArrayData->iNumOSPagesAllocated; ++ } ++ else ++ { ++ uiNumPages = ui32FreePageCount; ++ uiTempArraySize = ui32FreePageCount << uiOrder; ++ } ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) ++ for (i = 0; i < uiNumPages; i++) ++ { ++ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; ++ ++ if (NULL != ppsPageArray[idx]) ++ { ++ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]); ++ } ++ } ++#endif ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) ++ { ++ for (i = 0; i < uiNumPages; i++) ++ { ++ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; ++ ++ if (NULL != ppsPageArray[idx]) ++ { ++ _PoisonDevicePage(psPageArrayData->psDevNode, ++ ppsPageArray[idx], ++ uiOrder, ++ psPageArrayData->ui32CPUCacheFlags, ++ PVRSRV_POISON_ON_FREE_VALUE); ++ } ++ } ++ } ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ IMG_UINT32 uiDevNumPages = uiNumPages; ++ IMG_UINT32 uiDevPageSize = 1<uiLog2AllocPageSize; ++ ++ for (i = 0; i < uiDevNumPages; i++) ++ { ++ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i; ++ if (NULL != ppsPageArray[idx]) ++ { ++ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, ++ uiDevPageSize, ++ uiOrder, ++ psPageArrayData->dmavirtarray[idx], ++ psPageArrayData->dmaphysarray[idx], ++ ppsPageArray[idx]); ++ psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0; ++ psPageArrayData->dmavirtarray[idx] = NULL; ++ ppsPageArray[idx] = NULL; ++ uiTempIdx++; ++ } ++ } ++ uiTempIdx <<= uiOrder; ++ } ++ else ++ { ++ ++ /* OSAllocMemNoStats required because this code may be run without the bridge lock held */ ++ ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize); ++ if (ppsTempPageArray == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ /* Put pages in a contiguous array so further processing is easier */ ++ for (i = 0; i < uiNumPages; i++) ++ { ++ uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i; ++ if (NULL != ppsPageArray[uiPageIndex]) ++ { ++ struct page *psPage = ppsPageArray[uiPageIndex]; ++ ++ for (j = 0; j < (1<ui32CPUCacheFlags, ++ ppsTempPageArray, ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED), ++ 0, ++ uiTempIdx); ++ if (bSuccess) ++ { ++ goto exit_ok; ++ } ++ ++ /* Free pages and reset page caching attributes on x86 */ ++#if defined(CONFIG_X86) ++ if (uiTempIdx != 0 && BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) ++ { ++ int iError; ++ iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx); ++ ++ if (iError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__)); ++ } ++ } ++#endif ++ ++ /* Free the pages */ ++ for (i = 0; i < uiTempIdx; i++) ++ { ++ __free_pages(ppsTempPageArray[i], 0); ++ } ++ ++ /* Free the temp page array here if it did not move to the pool */ ++ OSFreeMemNoStats(ppsTempPageArray); ++ } ++ ++exit_ok: ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS) ++ _DecrMemAllocStat_UmaPages(((uiTempIdx * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), ++ psPageArrayData->uiPid); ++#endif ++ ++ if (pai32FreeIndices && ((uiTempIdx >> uiOrder) != ui32FreePageCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Probable sparse duplicate indices: ReqFreeCount: %d " ++ "ActualFreedCount: %d", __func__, ui32FreePageCount, (uiTempIdx >> uiOrder))); ++ } ++ /* Update metadata */ ++ psPageArrayData->iNumOSPagesAllocated -= uiTempIdx; ++ PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated); ++ return PVRSRV_OK; ++} ++ ++/* Free all the pages in a page array */ ++static PVRSRV_ERROR ++_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData) ++{ ++ IMG_BOOL bSuccess; ++ IMG_UINT32 i; ++ IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages; ++ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT; ++ IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder; ++ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder; ++ struct page **ppsPageArray = psPageArrayData->pagearray; ++ ++ /* We really should have something to free before we call this */ ++ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ for (i = 0; i < uiDevNumPages; i++) ++ { ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]); ++ }else ++ { ++ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i << uiOrder]); ++ } ++ } ++#else ++ _DecrMemAllocStat_UmaPages(((uiNumPages * PAGE_SIZE)-(psPageArrayData->ui32CMAAdjustedPageCount)), ++ psPageArrayData->uiPid); ++#endif ++#endif ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_POISON_ON_FREE)) ++ { ++ for (i = 0; i < uiDevNumPages; i++) ++ { ++ _PoisonDevicePage(psPageArrayData->psDevNode, ++ ppsPageArray[i], ++ uiOrder, ++ psPageArrayData->ui32CPUCacheFlags, ++ PVRSRV_POISON_ON_FREE_VALUE); ++ } ++ } ++ ++ /* Try to move the page array to the pool */ ++ bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags, ++ ppsPageArray, ++ BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNPINNED), ++ uiOrder, ++ uiNumPages); ++ if (bSuccess) ++ { ++ psPageArrayData->pagearray = NULL; ++ goto exit_ok; ++ } ++ ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ for (i = 0; i < uiDevNumPages; i++) ++ { ++ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice, ++ uiDevPageSize, ++ uiOrder, ++ psPageArrayData->dmavirtarray[i], ++ psPageArrayData->dmaphysarray[i], ++ ppsPageArray[i]); ++ psPageArrayData->dmaphysarray[i] = (dma_addr_t)0; ++ psPageArrayData->dmavirtarray[i] = NULL; ++ ppsPageArray[i] = NULL; ++ } ++ } ++ else ++ { ++#if defined(CONFIG_X86) ++ if (BIT_ISSET(psPageArrayData->ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE)) ++ { ++ int ret; ++ ++ ret = set_pages_array_wb(ppsPageArray, uiNumPages); ++ if (ret) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", ++ __func__)); ++ } ++ } ++#endif ++ ++ for (i = 0; i < uiNumPages; i++) ++ { ++ _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]); ++ ppsPageArray[i] = NULL; ++ } ++ } ++ ++exit_ok: ++ /* Update metadata */ ++ psPageArrayData->iNumOSPagesAllocated = 0; ++ return PVRSRV_OK; ++} ++ ++/* Free pages from a page array. ++ * Takes care of mem stats and chooses correct free path depending on parameters. */ ++static PVRSRV_ERROR ++_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 ui32FreePageCount) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Go the sparse or non-sparse path */ ++ if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages ++ || pai32FreeIndices != NULL) ++ { ++ eError = _FreeOSPages_Sparse(psPageArrayData, ++ pai32FreeIndices, ++ ui32FreePageCount); ++ } ++ else ++ { ++ eError = _FreeOSPages_Fast(psPageArrayData); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed")); ++ } ++ ++ _DumpPageArray(psPageArrayData->pagearray, ++ psPageArrayData->uiTotalNumOSPages >> ++ (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) ); ++ ++ return eError; ++} ++ ++/* ++ * ++ * Implementation of callback functions ++ * ++ */ ++ ++/* Destruction function is called after last reference disappears, ++ * but before PMR itself is freed. ++ */ ++static PVRSRV_ERROR ++PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PVRSRV_ERROR eError; ++ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; ++ ++ /* We can't free pages until now. */ ++ if (psOSPageArrayData->iNumOSPagesAllocated != 0) ++ { ++#if defined(DEBUG) && defined(SUPPORT_VALIDATION) ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ IMG_UINT32 ui32UMALeakMax = psPVRSRVData->sMemLeakIntervals.ui32GPU; ++ ++ mutex_lock(&g_sUMALeakMutex); ++ ++ g_ui32UMALeakCounter++; ++ if (ui32UMALeakMax && g_ui32UMALeakCounter >= ui32UMALeakMax) ++ { ++ g_ui32UMALeakCounter = 0; ++ mutex_unlock(&g_sUMALeakMutex); ++ ++ PVR_DPF((PVR_DBG_WARNING, "%s: Skipped freeing of PMR 0x%p to trigger memory leak.", __func__, pvPriv)); ++ return PVRSRV_OK; ++ } ++ ++ mutex_unlock(&g_sUMALeakMutex); ++#endif ++ _PagePoolLock(); ++ if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED)) ++ { ++ _RemoveUnpinListEntryUnlocked(psOSPageArrayData); ++ } ++ _PagePoolUnlock(); ++ ++ eError = _FreeOSPages(psOSPageArrayData, ++ NULL, ++ 0); ++ PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ ++ } ++ ++ eError = _FreeOSPagesArray(psOSPageArrayData); ++ PVR_ASSERT(eError == PVRSRV_OK); /* can we do better? */ ++ return PVRSRV_OK; ++} ++ ++/* Callback function for locking the system physical page addresses. ++ * This function must be called before the lookup address func. */ ++static PVRSRV_ERROR ++PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ PVRSRV_ERROR eError; ++ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; ++ ++ if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) ++ { ++ /* Allocate Memory for deferred allocation */ ++ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ } ++ ++ eError = PVRSRV_OK; ++ return eError; ++} ++ ++static PVRSRV_ERROR ++PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv) ++{ ++ /* Just drops the refcount. */ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; ++ ++ if (BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND)) ++ { ++ /* Free Memory for deferred allocation */ ++ eError = _FreeOSPages(psOSPageArrayData, ++ NULL, ++ 0); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ } ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ return eError; ++} ++ ++static INLINE IMG_BOOL IsOffsetValid(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, ++ IMG_UINT32 ui32Offset) ++{ ++ return (ui32Offset >> psOSPageArrayData->uiLog2AllocPageSize) < ++ psOSPageArrayData->uiTotalNumOSPages; ++} ++ ++/* Determine PA for specified offset into page array. */ ++static IMG_DEV_PHYADDR GetOffsetPA(const PMR_OSPAGEARRAY_DATA *psOSPageArrayData, ++ IMG_UINT32 ui32Offset) ++{ ++ IMG_UINT32 ui32Log2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; ++ IMG_UINT32 ui32PageIndex = ui32Offset >> ui32Log2AllocPageSize; ++ IMG_UINT32 ui32InPageOffset = ui32Offset - (ui32PageIndex << ui32Log2AllocPageSize); ++ IMG_DEV_PHYADDR sPA; ++ ++ PVR_ASSERT(ui32InPageOffset < (1U << ui32Log2AllocPageSize)); ++ ++ sPA.uiAddr = page_to_phys(psOSPageArrayData->pagearray[ui32PageIndex]); ++ sPA.uiAddr += ui32InPageOffset; ++ ++ return sPA; ++} ++ ++/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */ ++static PVRSRV_ERROR ++PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_BOOL *pbValid, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; ++ IMG_UINT32 uiIdx; ++ ++ if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Requested physical addresses from PMR " ++ "for incompatible contiguity %u!", ++ __func__, ++ ui32Log2PageSize)); ++ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY; ++ } ++ ++ for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++) ++ { ++ if (pbValid[uiIdx]) ++ { ++ PVR_LOG_RETURN_IF_FALSE(IsOffsetValid(psOSPageArrayData, puiOffset[uiIdx]), ++ "puiOffset out of range", PVRSRV_ERROR_OUT_OF_RANGE); ++ ++ psDevPAddr[uiIdx] = GetOffsetPA(psOSPageArrayData, puiOffset[uiIdx]); ++ ++#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY) ++ /* this is just a precaution, normally this should be always ++ * available */ ++ if (psOSPageArrayData->ui64DmaMask) ++ { ++ if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: physical address" ++ " (%" IMG_UINT64_FMTSPECX ") out of allowable range" ++ " [0; %" IMG_UINT64_FMTSPECX "]", __func__, ++ psDevPAddr[uiIdx].uiAddr, ++ psOSPageArrayData->ui64DmaMask)); ++ BUG(); ++ } ++ } ++#endif ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ { ++ void *pvBase; ++ IMG_UINT32 ui32PageCount; ++ pgprot_t PageProps; ++} PMR_OSPAGEARRAY_KERNMAP_DATA; ++ ++static PVRSRV_ERROR ++PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, ++ size_t uiOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ IMG_HANDLE *phHandleOut, ++ PMR_FLAGS_T ulFlags) ++{ ++ PVRSRV_ERROR eError; ++ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv; ++ void *pvAddress; ++ pgprot_t prot = PAGE_KERNEL; ++ IMG_UINT32 ui32PageOffset=0; ++ size_t uiMapOffset=0; ++ IMG_UINT32 ui32PageCount = 0; ++ IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize; ++ IMG_UINT32 uiOSPageShift = OSGetPageShift(); ++ IMG_UINT32 uiPageSizeDiff = 0; ++ struct page **pagearray; ++ PMR_OSPAGEARRAY_KERNMAP_DATA *psData; ++ ++ /* For cases device page size greater than the OS page size, ++ * multiple physically contiguous OS pages constitute one device page. ++ * However only the first page address of such an ensemble is stored ++ * as part of the mapping table in the driver. Hence when mapping the PMR ++ * in part/full, all OS pages that constitute the device page ++ * must also be mapped to kernel. ++ * ++ * For the case where device page size less than OS page size, ++ * treat it the same way as the page sizes are equal */ ++ if (uiLog2AllocPageSize > uiOSPageShift) ++ { ++ uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift; ++ } ++ ++ /* ++ Zero offset and size as a special meaning which means map in the ++ whole of the PMR, this is due to fact that the places that call ++ this callback might not have access to be able to determine the ++ physical size ++ */ ++ if ((uiOffset == 0) && (uiSize == 0)) ++ { ++ ui32PageOffset = 0; ++ uiMapOffset = 0; ++ /* Page count = amount of OS pages */ ++ ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated; ++ } ++ else ++ { ++ size_t uiEndoffset; ++ ++ ui32PageOffset = uiOffset >> uiLog2AllocPageSize; ++ uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize); ++ uiEndoffset = uiOffset + uiSize - 1; ++ /* Add one as we want the count, not the offset */ ++ /* Page count = amount of device pages (note uiLog2AllocPageSize being used) */ ++ ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1; ++ ui32PageCount -= ui32PageOffset; ++ ++ /* The OS page count to be mapped might be different if the ++ * OS page size is lesser than the device page size */ ++ ui32PageCount <<= uiPageSizeDiff; ++ } ++ ++ switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags)) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++ prot = pgprot_noncached(prot); ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++ prot = pgprot_writecombine(prot); ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: ++ break; ++ ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ if (uiPageSizeDiff) ++ { ++ /* Each device page can be broken down into ui32SubPageCount OS pages */ ++ IMG_UINT32 ui32SubPageCount = 1 << uiPageSizeDiff; ++ IMG_UINT32 i; ++ struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset]; ++ ++ /* Allocate enough memory for the OS page pointers for this mapping */ ++ pagearray = OSAllocMem(ui32PageCount * sizeof(pagearray[0])); ++ ++ if (pagearray == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e0; ++ } ++ ++ /* construct array that holds the page pointers that constitute the requested ++ * mapping */ ++ for (i = 0; i < ui32PageCount; i++) ++ { ++ IMG_UINT32 ui32OSPageArrayIndex = i / ui32SubPageCount; ++ IMG_UINT32 ui32OSPageArrayOffset = i % ui32SubPageCount; ++ ++ /* ++ * The driver only stores OS page pointers for the first OS page ++ * within each device page (psPage[ui32OSPageArrayIndex]). ++ * Get the next OS page structure at device page granularity, ++ * then calculate OS page pointers for all the other pages. ++ */ ++ pagearray[i] = psPage[ui32OSPageArrayIndex] + ui32OSPageArrayOffset; ++ } ++ } ++ else ++ { ++ pagearray = &psOSPageArrayData->pagearray[ui32PageOffset]; ++ } ++ ++ psData = OSAllocMem(sizeof(*psData)); ++ if (psData == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e1; ++ } ++ ++ pvAddress = pvr_vmap(pagearray, ui32PageCount, VM_READ | VM_WRITE, prot); ++ if (pvAddress == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e2; ++ } ++ ++ *ppvKernelAddressOut = pvAddress + uiMapOffset; ++ psData->pvBase = pvAddress; ++ psData->ui32PageCount = ui32PageCount; ++ psData->PageProps = prot; ++ *phHandleOut = psData; ++ ++ if (uiPageSizeDiff) ++ { ++ OSFreeMem(pagearray); ++ } ++ ++ return PVRSRV_OK; ++ ++ /* ++ error exit paths follow ++ */ ++e2: ++ OSFreeMem(psData); ++e1: ++ if (uiPageSizeDiff) ++ { ++ OSFreeMem(pagearray); ++ } ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_HANDLE hHandle) ++{ ++ PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle; ++ PVR_UNREFERENCED_PARAMETER(pvPriv); ++ ++ pvr_vunmap(psData->pvBase, psData->ui32PageCount, psData->PageProps); ++ OSFreeMem(psData); ++} ++ ++static ++PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv) ++{ ++ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Lock down the pool and add the array to the unpin list */ ++ _PagePoolLock(); ++ ++ /* Check current state */ ++ PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED) == IMG_FALSE); ++ PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_ONDEMAND) == IMG_FALSE); ++ ++ eError = _AddUnpinListEntryUnlocked(psOSPageArrayData); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unable to add allocation to unpinned list (%d).", ++ __func__, ++ eError)); ++ ++ goto e_exit; ++ } ++ ++ /* Set the Unpinned bit */ ++ BIT_SET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED); ++ ++e_exit: ++ _PagePoolUnlock(); ++ return eError; ++} ++ ++static ++PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv, ++ PMR_MAPPING_TABLE *psMappingTable) ++{ ++ PVRSRV_ERROR eError; ++ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv; ++ IMG_UINT32 *pui32MapTable = NULL; ++ IMG_UINT32 i, j = 0, ui32Temp = 0; ++ ++ _PagePoolLock(); ++ ++ /* Check current state */ ++ PVR_ASSERT(BIT_ISSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED)); ++ ++ /* Clear unpinned bit */ ++ BIT_UNSET(psOSPageArrayData->ui32AllocFlags, FLAG_UNPINNED); ++ ++ /* If there are still pages in the array remove entries from the pool */ ++ if (psOSPageArrayData->iNumOSPagesAllocated != 0) ++ { ++ _RemoveUnpinListEntryUnlocked(psOSPageArrayData); ++ _PagePoolUnlock(); ++ ++ eError = PVRSRV_OK; ++ goto e_exit_mapalloc_failure; ++ } ++ _PagePoolUnlock(); ++ ++ /* If pages were reclaimed we allocate new ones and ++ * return PVRSRV_ERROR_PMR_NEW_MEMORY */ ++ if (psMappingTable->ui32NumVirtChunks == 1) ++ { ++ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages); ++ } ++ else ++ { ++ pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks); ++ if (NULL == pui32MapTable) ++ { ++ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unable to Alloc Map Table.", ++ __func__)); ++ goto e_exit_mapalloc_failure; ++ } ++ ++ for (i = 0, j = 0; i < psMappingTable->ui32NumVirtChunks; i++) ++ { ++ ui32Temp = psMappingTable->aui32Translation[i]; ++ if (TRANSLATION_INVALID != ui32Temp) ++ { ++ pui32MapTable[j++] = ui32Temp; ++ } ++ } ++ eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unable to get new pages for unpinned allocation.", ++ __func__)); ++ ++ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES; ++ goto e_exit; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Allocating new pages for unpinned allocation. " ++ "Old content is lost!", ++ __func__)); ++ ++ eError = PVRSRV_ERROR_PMR_NEW_MEMORY; ++ ++e_exit: ++ OSFreeMem(pui32MapTable); ++e_exit_mapalloc_failure: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function PMRChangeSparseMemOSMem ++@Description This function Changes the sparse mapping by allocating and ++ freeing of pages. It changes the GPU and CPU maps accordingly. ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++static PVRSRV_ERROR ++PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv, ++ const PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 uiFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappingTable(psPMR); ++ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; ++ struct page **psPageArray = psPMRPageArrayData->pagearray; ++ void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray; ++ dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray; ++ ++ struct page *psPage; ++ dma_addr_t psDMAPAddr; ++ void *pvDMAVAddr; ++ ++ IMG_UINT32 ui32AdtnlAllocPages = 0; /*uiLog2AllocPageSize - PAGE_SHIFT; ++ IMG_BOOL bCMA = BIT_ISSET(psPMRPageArrayData->ui32AllocFlags, FLAG_IS_CMA); ++ ++ ++ /* Check SPARSE flags and calculate pages to allocate and free */ ++ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH)) ++ { ++ ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ? ++ ui32FreePageCount : ui32AllocPageCount; ++ ++ PDUMP_PANIC(PMR_DeviceNode(psPMR), SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported"); ++ } ++ ++ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC)) ++ { ++ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount; ++ } ++ else ++ { ++ ui32AllocPageCount = 0; ++ } ++ ++ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE)) ++ { ++ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount; ++ } ++ else ++ { ++ ui32FreePageCount = 0; ++ } ++ ++ if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages)) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Missing parameters for number of pages to alloc/free", ++ __func__)); ++ return eError; ++ } ++ ++ /* The incoming request is classified into two operations independent of ++ * each other: alloc & free pages. ++ * These operations can be combined with two mapping operations as well ++ * which are GPU & CPU space mappings. ++ * ++ * From the alloc and free page requests, the net amount of pages to be ++ * allocated or freed is computed. Pages that were requested to be freed ++ * will be reused to fulfil alloc requests. ++ * ++ * The order of operations is: ++ * 1. Allocate new pages from the OS ++ * 2. Move the free pages from free request to alloc positions. ++ * 3. Free the rest of the pages not used for alloc ++ * ++ * Alloc parameters are validated at the time of allocation ++ * and any error will be handled then. */ ++ ++ /* Validate the free indices */ ++ if (ui32FreePageCount) ++ { ++ if (NULL != pai32FreeIndices){ ++ ++ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++) ++ { ++ uiFreepgidx = pai32FreeIndices[ui32Loop]; ++ ++ if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) ++ { ++ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; ++ goto e0; ++ } ++ ++ if (NULL == psPageArray[uiFreepgidx]) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Trying to free non-allocated page", ++ __func__)); ++ goto e0; ++ } ++ } ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Given non-zero free count but missing indices array", ++ __func__)); ++ return eError; ++ } ++ } ++ ++ /* Validate the alloc indices */ ++ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++) ++ { ++ uiAllocpgidx = pai32AllocIndices[ui32Loop]; ++ ++ if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder)) ++ { ++ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE; ++ goto e0; ++ } ++ ++ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) ++ { ++ if ((NULL != psPageArray[uiAllocpgidx]) || ++ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx])) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Trying to allocate already allocated page again", ++ __func__)); ++ goto e0; ++ } ++ } ++ else ++ { ++ if ((NULL == psPageArray[uiAllocpgidx]) || ++ (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) ) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unable to remap memory due to missing page", ++ __func__)); ++ goto e0; ++ } ++ } ++ } ++ ++ ui32Loop = 0; ++ ++ /* Allocate new pages from the OS */ ++ if (0 != ui32AdtnlAllocPages) ++ { ++ eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: New Addtl Allocation of pages failed", ++ __func__)); ++ goto e0; ++ } ++ ++ psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages; ++ /*Mark the corresponding pages of translation table as valid */ ++ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++) ++ { ++ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop]; ++ } ++ } ++ ++ ++ ui32Index = ui32Loop; ++ ++ /* Move the corresponding free pages to alloc request */ ++ for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++) ++ { ++ uiAllocpgidx = pai32AllocIndices[ui32Index]; ++ uiFreepgidx = pai32FreeIndices[ui32Loop]; ++ ++ psPage = psPageArray[uiAllocpgidx]; ++ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx]; ++ ++ if (bCMA) ++ { ++ pvDMAVAddr = psDMAVirtArray[uiAllocpgidx]; ++ psDMAPAddr = psDMAPhysArray[uiAllocpgidx]; ++ psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx]; ++ psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx]; ++ } ++ ++ /* Is remap mem used in real world scenario? Should it be turned to a ++ * debug feature? The condition check needs to be out of loop, will be ++ * done at later point though after some analysis */ ++ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM)) ++ { ++ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID; ++ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; ++ psPageArray[uiFreepgidx] = NULL; ++ if (bCMA) ++ { ++ psDMAVirtArray[uiFreepgidx] = NULL; ++ psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0; ++ } ++ } ++ else ++ { ++ psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx; ++ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx; ++ psPageArray[uiFreepgidx] = psPage; ++ if (bCMA) ++ { ++ psDMAVirtArray[uiFreepgidx] = pvDMAVAddr; ++ psDMAPhysArray[uiFreepgidx] = psDMAPAddr; ++ } ++ } ++ } ++ ++ /* Free the additional free pages */ ++ if (0 != ui32AdtnlFreePages) ++ { ++ eError = _FreeOSPages(psPMRPageArrayData, ++ &pai32FreeIndices[ui32Loop], ++ ui32AdtnlFreePages); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages; ++ while (ui32Loop < ui32FreePageCount) ++ { ++ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID; ++ ui32Loop++; ++ } ++ } ++ ++ eError = PVRSRV_OK; ++ ++e0: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function PMRChangeSparseMemCPUMapOSMem ++@Description This function Changes CPU maps accordingly ++@Return PVRSRV_ERROR failure code ++*/ /**************************************************************************/ ++static ++PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv, ++ const PMR *psPMR, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices) ++{ ++ struct page **psPageArray; ++ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv; ++ IMG_CPU_PHYADDR sCPUPAddr; ++ ++ sCPUPAddr.uiAddr = 0; ++ psPageArray = psPMRPageArrayData->pagearray; ++ ++ return OSChangeSparseMemCPUAddrMap((void **)psPageArray, ++ sCpuVAddrBase, ++ sCPUPAddr, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices, ++ IMG_FALSE); ++} ++ ++static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = { ++ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem, ++ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem, ++ .pfnDevPhysAddr = &PMRSysPhysAddrOSMem, ++ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem, ++ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem, ++ .pfnReadBytes = NULL, ++ .pfnWriteBytes = NULL, ++ .pfnUnpinMem = &PMRUnpinOSMem, ++ .pfnPinMem = &PMRPinOSMem, ++ .pfnChangeSparseMem = &PMRChangeSparseMemOSMem, ++ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem, ++ .pfnFinalize = &PMRFinalizeOSMem, ++}; ++ ++/* Wrapper around OS page allocation. */ ++static PVRSRV_ERROR ++DoPageAlloc(PMR_OSPAGEARRAY_DATA *psPrivData, ++ IMG_UINT32 *puiAllocIndices, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32Log2AllocPageSize) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Do we fill the whole page array or just parts (sparse)? */ ++ if (ui32NumPhysChunks == ui32NumVirtChunks) ++ { ++ /* Allocate the physical pages */ ++ eError = _AllocOSPages(psPrivData, ++ NULL, ++ psPrivData->uiTotalNumOSPages >> ++ (ui32Log2AllocPageSize - PAGE_SHIFT)); ++ } ++ else if (ui32NumPhysChunks != 0) ++ { ++ /* Calculate the number of pages we want to allocate */ ++ IMG_UINT32 ui32PagesToAlloc = ++ (IMG_UINT32)((((ui32NumPhysChunks * uiChunkSize) - 1) >> ui32Log2AllocPageSize) + 1); ++ ++ /* Make sure calculation is correct */ ++ PVR_ASSERT(((PMR_SIZE_T) ui32PagesToAlloc << ui32Log2AllocPageSize) == ++ (ui32NumPhysChunks * uiChunkSize)); ++ ++ /* Allocate the physical pages */ ++ eError = _AllocOSPages(psPrivData, puiAllocIndices, ++ ui32PagesToAlloc); ++ } ++ ++ return eError; ++} ++ ++static void _EncodeAllocationFlags(IMG_UINT32 uiLog2AllocPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ IMG_UINT32* ui32AllocFlags) ++{ ++ ++ /* ++ * Use CMA framework if order is greater than OS page size; please note ++ * that OSMMapPMRGeneric() has the same expectation as well. ++ */ ++ /* IsCMA? */ ++ if (uiLog2AllocPageSize > PAGE_SHIFT) ++ { ++ BIT_SET(*ui32AllocFlags, FLAG_IS_CMA); ++ } ++ ++ /* OnDemand? */ ++ if (PVRSRV_CHECK_ON_DEMAND(uiFlags)) ++ { ++ BIT_SET(*ui32AllocFlags, FLAG_ONDEMAND); ++ } ++ ++ /* Zero? */ ++ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) ++ { ++ BIT_SET(*ui32AllocFlags, FLAG_ZERO); ++ } ++ ++ /* Poison on alloc? */ ++ if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) ++ { ++ BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_ALLOC); ++ } ++ ++#if defined(DEBUG) ++ /* Poison on free? */ ++ if (PVRSRV_CHECK_POISON_ON_FREE(uiFlags)) ++ { ++ BIT_SET(*ui32AllocFlags, FLAG_POISON_ON_FREE); ++ } ++#endif ++ ++ /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */ ++ if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || ++ PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags)) ++ { ++ BIT_SET(*ui32AllocFlags, FLAG_UNSET_MEMORY_TYPE); ++ } ++ ++} ++ ++void PhysmemGetOSRamMemStats(PHEAP_IMPL_DATA pvImplData, ++ IMG_UINT64 *pui64TotalSize, ++ IMG_UINT64 *pui64FreeSize) ++{ ++ struct sysinfo sMeminfo; ++ si_meminfo(&sMeminfo); ++ ++ PVR_UNREFERENCED_PARAMETER(pvImplData); ++ ++ *pui64TotalSize = sMeminfo.totalram * sMeminfo.mem_unit; ++ *pui64FreeSize = sMeminfo.freeram * sMeminfo.mem_unit; ++ ++} ++ ++PVRSRV_ERROR ++PhysmemNewOSRamBackedPMR(PHYS_HEAP *psPhysHeap, ++ CONNECTION_DATA *psConnection, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *puiAllocIndices, ++ IMG_UINT32 uiLog2AllocPageSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ IMG_PID uiPid, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eError2; ++ PMR *psPMR; ++ struct _PMR_OSPAGEARRAY_DATA_ *psPrivData; ++ PMR_FLAGS_T uiPMRFlags; ++ IMG_UINT32 ui32CPUCacheFlags; ++ IMG_UINT32 ui32AllocFlags = 0; ++ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPhysHeap); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ /* ++ * The host driver (but not guest) can still use this factory for firmware ++ * allocations ++ */ ++ if (PVRSRV_VZ_MODE_IS(GUEST) && PVRSRV_CHECK_FW_MAIN(uiFlags)) ++ { ++ PVR_ASSERT(0); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto errorOnParam; ++ } ++ ++ /* Select correct caching mode */ ++ eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto errorOnParam; ++ } ++ ++ if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags)) ++ { ++ ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN; ++ } ++ ++ _EncodeAllocationFlags(uiLog2AllocPageSize, uiFlags, &ui32AllocFlags); ++ ++ ++#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) ++ /* Overwrite flags and always zero pages that could go back to UM */ ++ BIT_SET(ui32AllocFlags, FLAG_ZERO); ++ BIT_UNSET(ui32AllocFlags, FLAG_POISON_ON_ALLOC); ++#endif ++ ++ /* Physical allocation alignment is generally not supported except under ++ very restrictive conditions, also there is a maximum alignment value ++ which must not exceed the largest device page-size. If these are not ++ met then fail the aligned-requested allocation */ ++ if (BIT_ISSET(ui32AllocFlags, FLAG_IS_CMA)) ++ { ++ IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize; ++ if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid PA alignment: size 0x%llx, align 0x%x", ++ __func__, uiSize, uiAlign)); ++ eError = PVRSRV_ERROR_INVALID_ALIGNMENT; ++ goto errorOnParam; ++ } ++ PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ); ++ } ++ ++ /* Create Array structure that hold the physical pages */ ++ eError = _AllocOSPageArray(psDevNode, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ uiLog2AllocPageSize, ++ ui32AllocFlags, ++ ui32CPUCacheFlags, ++ uiPid, ++ &psPrivData); ++ if (eError != PVRSRV_OK) ++ { ++ goto errorOnAllocPageArray; ++ } ++ ++ if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) ++ { ++ eError = DoPageAlloc(psPrivData, puiAllocIndices, ui32NumPhysChunks, ++ ui32NumVirtChunks, uiChunkSize, uiLog2AllocPageSize); ++ if (eError != PVRSRV_OK) ++ { ++ goto errorOnAllocPages; ++ } ++ } ++ ++ /* ++ * In this instance, we simply pass flags straight through. ++ * ++ * Generically, uiFlags can include things that control the PMR factory, but ++ * we don't need any such thing (at the time of writing!), and our caller ++ * specifies all PMR flags so we don't need to meddle with what was given to ++ * us. ++ */ ++ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK); ++ ++ /* ++ * Check no significant bits were lost in cast due to different bit widths ++ * for flags ++ */ ++ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK)); ++ ++ if (BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) ++ { ++ PDUMPCOMMENT(PhysHeapDeviceNode(psPhysHeap), "Deferred Allocation PMR (UMA)"); ++ } ++ ++ eError = PMRCreatePMR(psPhysHeap, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ puiAllocIndices, ++ uiLog2AllocPageSize, ++ uiPMRFlags, ++ pszAnnotation, ++ &_sPMROSPFuncTab, ++ psPrivData, ++ PMR_TYPE_OSMEM, ++ &psPMR, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto errorOnCreate; ++ } ++ ++ *ppsPMRPtr = psPMR; ++ ++ return PVRSRV_OK; ++ ++errorOnCreate: ++ if (!BIT_ISSET(ui32AllocFlags, FLAG_ONDEMAND)) ++ { ++ eError2 = _FreeOSPages(psPrivData, NULL, 0); ++ PVR_ASSERT(eError2 == PVRSRV_OK); ++ } ++ ++errorOnAllocPages: ++ eError2 = _FreeOSPagesArray(psPrivData); ++ PVR_ASSERT(eError2 == PVRSRV_OK); ++ ++errorOnAllocPageArray: ++errorOnParam: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/physmem_osmem_linux.h b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.h +new file mode 100644 +index 000000000000..89706fffdc5c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_osmem_linux.h +@@ -0,0 +1,49 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linux OS physmem implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PHYSMEM_OSMEM_LINUX_H ++#define PHYSMEM_OSMEM_LINUX_H ++ ++void LinuxInitPhysmem(void); ++void LinuxDeinitPhysmem(void); ++ ++#endif /* PHYSMEM_OSMEM_LINUX_H */ +diff --git a/drivers/gpu/drm/img-rogue/physmem_test.c b/drivers/gpu/drm/img-rogue/physmem_test.c +new file mode 100644 +index 000000000000..3874594dcdb0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_test.c +@@ -0,0 +1,710 @@ ++/*************************************************************************/ /*! ++@Title Physmem_test ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Single entry point for testing of page factories ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "physmem_test.h" ++#include "device.h" ++#include "syscommon.h" ++#include "pmr.h" ++#include "osfunc.h" ++#include "physmem.h" ++#include "physmem_osmem.h" ++#include "physmem_lma.h" ++#include "pvrsrv.h" ++ ++#define PHYSMEM_TEST_PAGES 2 /* Mem test pages */ ++#define PHYSMEM_TEST_PASSES_MAX 1000 /* Limit number of passes to some reasonable value */ ++ ++ ++/* Test patterns for mem test */ ++ ++static const IMG_UINT64 gui64Patterns[] = { ++ 0, ++ 0xffffffffffffffffULL, ++ 0x5555555555555555ULL, ++ 0xaaaaaaaaaaaaaaaaULL, ++ 0x1111111111111111ULL, ++ 0x2222222222222222ULL, ++ 0x4444444444444444ULL, ++ 0x8888888888888888ULL, ++ 0x3333333333333333ULL, ++ 0x6666666666666666ULL, ++ 0x9999999999999999ULL, ++ 0xccccccccccccccccULL, ++ 0x7777777777777777ULL, ++ 0xbbbbbbbbbbbbbbbbULL, ++ 0xddddddddddddddddULL, ++ 0xeeeeeeeeeeeeeeeeULL, ++ 0x7a6c7258554e494cULL, ++}; ++ ++static const IMG_UINT32 gui32Patterns[] = { ++ 0, ++ 0xffffffffU, ++ 0x55555555U, ++ 0xaaaaaaaaU, ++ 0x11111111U, ++ 0x22222222U, ++ 0x44444444U, ++ 0x88888888U, ++ 0x33333333U, ++ 0x66666666U, ++ 0x99999999U, ++ 0xccccccccU, ++ 0x77777777U, ++ 0xbbbbbbbbU, ++ 0xddddddddU, ++ 0xeeeeeeeeU, ++ 0x7a6c725cU, ++}; ++ ++static const IMG_UINT16 gui16Patterns[] = { ++ 0, ++ 0xffffU, ++ 0x5555U, ++ 0xaaaaU, ++ 0x1111U, ++ 0x2222U, ++ 0x4444U, ++ 0x8888U, ++ 0x3333U, ++ 0x6666U, ++ 0x9999U, ++ 0xccccU, ++ 0x7777U, ++ 0xbbbbU, ++ 0xddddU, ++ 0xeeeeU, ++ 0x7a6cU, ++}; ++ ++static const IMG_UINT8 gui8Patterns[] = { ++ 0, ++ 0xffU, ++ 0x55U, ++ 0xaaU, ++ 0x11U, ++ 0x22U, ++ 0x44U, ++ 0x88U, ++ 0x33U, ++ 0x66U, ++ 0x99U, ++ 0xccU, ++ 0x77U, ++ 0xbbU, ++ 0xddU, ++ 0xeeU, ++ 0x6cU, ++}; ++ ++ ++/* Following function does minimal required initialisation for mem test using dummy device node */ ++static PVRSRV_ERROR ++PhysMemTestInit(PVRSRV_DEVICE_NODE **ppsDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_ERROR eError; ++ ++ /* Dummy device node */ ++ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode)); ++ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "OSAllocZMem"); ++ ++ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; ++ psDeviceNode->psDevConfig = psDevConfig; ++ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; ++ ++ /* Initialise Phys mem heaps */ ++ eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysMemHeapsInit", ErrorSysDevDeInit); ++ ++ *ppsDeviceNode = psDeviceNode; ++ ++ return PVRSRV_OK; ++ ++ErrorSysDevDeInit: ++ psDevConfig->psDevNode = NULL; ++ OSFreeMem(psDeviceNode); ++ return eError; ++} ++ ++/* Undo initialisation done for mem test */ ++static void ++PhysMemTestDeInit(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ /* Deinitialise Phys mem heaps */ ++ PVRSRVPhysMemHeapsDeinit(psDeviceNode); ++ ++ OSFreeMem(psDeviceNode); ++} ++ ++/* Test for PMR factory validation */ ++static PVRSRV_ERROR ++PMRValidationTest(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) ++{ ++ PVRSRV_ERROR eError, eError1; ++ IMG_UINT32 i = 0, j = 0, ui32Index = 0; ++ IMG_UINT32 *pui32MappingTable = NULL; ++ PMR *psPMR = NULL; ++ IMG_BOOL *pbValid; ++ IMG_DEV_PHYADDR *apsDevPAddr; ++ IMG_UINT32 ui32NumOfPages = 10, ui32NumOfPhysPages = 5; ++ size_t uiMappedSize, uiPageSize; ++ IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; ++ IMG_HANDLE hPrivData = NULL; ++ void *pvKernAddr = NULL; ++ ++ uiPageSize = OSGetPageSize(); ++ ++ /* Allocate OS memory for PMR page list */ ++ apsDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); ++ PVR_LOG_RETURN_IF_NOMEM(apsDevPAddr, "OSAllocMem"); ++ ++ /* Allocate OS memory for PMR page state */ ++ pbValid = OSAllocMem(ui32NumOfPages * sizeof(IMG_BOOL)); ++ PVR_LOG_GOTO_IF_NOMEM(pbValid, eError, ErrorFreePMRPageListMem); ++ OSCachedMemSet(pbValid, 0, ui32NumOfPages * sizeof(IMG_BOOL)); ++ ++ /* Allocate OS memory for write buffer */ ++ pcWriteBuffer = OSAllocMem(uiPageSize); ++ PVR_LOG_GOTO_IF_NOMEM(pcWriteBuffer, eError, ErrorFreePMRPageStateMem); ++ OSCachedMemSet(pcWriteBuffer, 0xF, uiPageSize); ++ ++ /* Allocate OS memory for read buffer */ ++ pcReadBuffer = OSAllocMem(uiPageSize); ++ PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); ++ ++ /* Allocate OS memory for mapping table */ ++ pui32MappingTable = (IMG_UINT32 *)OSAllocMem(ui32NumOfPhysPages * sizeof(*pui32MappingTable)); ++ PVR_LOG_GOTO_IF_NOMEM(pui32MappingTable, eError, ErrorFreeReadBuffer); ++ ++ /* Pages having even index will have physical backing in PMR */ ++ for (ui32Index=0; ui32Index < ui32NumOfPages; ui32Index+=2) ++ { ++ pui32MappingTable[i++] = ui32Index; ++ } ++ ++ /* Allocate Sparse PMR with SPARSE | READ | WRITE | UNCACHED_WC attributes */ ++ uiFlags |= PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; ++ ++ /* Allocate a sparse PMR from given physical heap - CPU/GPU/FW */ ++ eError = PhysmemNewRamBackedPMR(NULL, ++ psDeviceNode, ++ ui32NumOfPages * uiPageSize, ++ uiPageSize, ++ ui32NumOfPhysPages, ++ ui32NumOfPages, ++ pui32MappingTable, ++ OSGetPageShift(), ++ uiFlags, ++ sizeof("PMR ValidationTest"), ++ "PMR ValidationTest", ++ OSGetCurrentClientProcessIDKM(), ++ &psPMR, ++ PDUMP_NONE, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate a PMR")); ++ goto ErrorFreeMappingTable; ++ } ++ ++ /* Check whether allocated PMR can be locked and obtain physical addresses ++ * of underlying memory pages. ++ */ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to lock PMR")); ++ goto ErrorUnrefPMR; ++ } ++ ++ /* Get the Device physical addresses of the pages */ ++ eError = PMR_DevPhysAddr(psPMR, OSGetPageShift(), ui32NumOfPages, 0, apsDevPAddr, pbValid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses")); ++ goto ErrorUnlockPhysAddresses; ++ } ++ ++ /* Check whether device address of each physical page is OS PAGE_SIZE aligned */ ++ for (i = 0; i < ui32NumOfPages; i++) ++ { ++ if (pbValid[i]) ++ { ++ if ((apsDevPAddr[i].uiAddr & OSGetPageMask()) != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Physical memory of PMR is not page aligned")); ++ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; ++ goto ErrorUnlockPhysAddresses; ++ } ++ } ++ } ++ ++ /* Acquire kernel virtual address of each physical page and write to it ++ * and then release it. ++ */ ++ for (i = 0; i < ui32NumOfPages; i++) ++ { ++ if (pbValid[i]) ++ { ++ eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); ++ goto ErrorUnlockPhysAddresses; ++ } ++ OSCachedMemCopyWMB(pvKernAddr, pcWriteBuffer, OSGetPageSize()); ++ ++ eError = PMRReleaseKernelMappingData(psPMR, hPrivData); ++ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); ++ } ++ } ++ ++ /* Acquire kernel virtual address of each physical page and read ++ * from it and check where contents are intact. ++ */ ++ for (i = 0; i < ui32NumOfPages; i++) ++ { ++ if (pbValid[i]) ++ { ++ eError = PMRAcquireSparseKernelMappingData(psPMR, (i * uiPageSize), uiPageSize, &pvKernAddr, &uiMappedSize, &hPrivData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to Acquire Kernel Mapping of PMR")); ++ goto ErrorUnlockPhysAddresses; ++ } ++ OSCachedMemSet(pcReadBuffer, 0x0, uiPageSize); ++ OSCachedMemCopy(pcReadBuffer, pvKernAddr, uiMappedSize); ++ ++ eError = PMRReleaseKernelMappingData(psPMR, hPrivData); ++ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData"); ++ ++ for (j = 0; j < uiPageSize; j++) ++ { ++ if (pcReadBuffer[j] != pcWriteBuffer[j]) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", ++ __func__, pcReadBuffer[j], pcWriteBuffer[j])); ++ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; ++ goto ErrorUnlockPhysAddresses; ++ } ++ } ++ } ++ } ++ ++ErrorUnlockPhysAddresses: ++ /* Unlock and Unref the PMR to destroy it */ ++ eError1 = PMRUnlockSysPhysAddresses(psPMR); ++ if (eError1 != PVRSRV_OK) ++ { ++ eError = (eError == PVRSRV_OK)? eError1 : eError; ++ PVR_DPF((PVR_DBG_ERROR, "Failed to unlock PMR")); ++ } ++ ++ErrorUnrefPMR: ++ eError1 = PMRUnrefPMR(psPMR); ++ if (eError1 != PVRSRV_OK) ++ { ++ eError = (eError == PVRSRV_OK)? eError1 : eError; ++ PVR_DPF((PVR_DBG_ERROR, "Failed to free PMR")); ++ } ++ErrorFreeMappingTable: ++ OSFreeMem(pui32MappingTable); ++ErrorFreeReadBuffer: ++ OSFreeMem(pcReadBuffer); ++ErrorFreeWriteBuffer: ++ OSFreeMem(pcWriteBuffer); ++ErrorFreePMRPageStateMem: ++ OSFreeMem(pbValid); ++ErrorFreePMRPageListMem: ++ OSFreeMem(apsDevPAddr); ++ ++ return eError; ++} ++ ++#define DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, Patterns, NumOfPatterns, Error, ptr, i) \ ++ for (i = 0; i < NumOfPatterns; i++) \ ++ { \ ++ /* Write pattern */ \ ++ for (ptr = StartAddr; ptr < EndAddr; ptr++) \ ++ { \ ++ *ptr = Patterns[i]; \ ++ } \ ++ \ ++ /* Read back and validate pattern */ \ ++ for (ptr = StartAddr; ptr < EndAddr ; ptr++) \ ++ { \ ++ if (*ptr != Patterns[i]) \ ++ { \ ++ Error = PVRSRV_ERROR_MEMORY_TEST_FAILED; \ ++ break; \ ++ } \ ++ } \ ++ \ ++ if (Error != PVRSRV_OK) \ ++ { \ ++ break; \ ++ } \ ++ } ++ ++static PVRSRV_ERROR ++TestPatternU8(void *pvKernAddr, size_t uiMappedSize) ++{ ++ IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; ++ IMG_UINT8 *EndAddr = ((IMG_UINT8 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT8)); ++ IMG_UINT8 *p; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT8)) == 0); ++ ++ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui8Patterns, sizeof(gui8Patterns)/sizeof(IMG_UINT8), eError, p, i); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", ++ __func__, *p, gui8Patterns[i])); ++ } ++ ++ return eError; ++} ++ ++ ++static PVRSRV_ERROR ++TestPatternU16(void *pvKernAddr, size_t uiMappedSize) ++{ ++ IMG_UINT16 *StartAddr = (IMG_UINT16 *) pvKernAddr; ++ IMG_UINT16 *EndAddr = ((IMG_UINT16 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT16)); ++ IMG_UINT16 *p; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT16)) == 0); ++ ++ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui16Patterns, sizeof(gui16Patterns)/sizeof(IMG_UINT16), eError, p, i); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Test failed. Got (0x%hx), expected (0x%hx)!", ++ __func__, *p, gui16Patterns[i])); ++ } ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR ++TestPatternU32(void *pvKernAddr, size_t uiMappedSize) ++{ ++ IMG_UINT32 *StartAddr = (IMG_UINT32 *) pvKernAddr; ++ IMG_UINT32 *EndAddr = ((IMG_UINT32 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT32)); ++ IMG_UINT32 *p; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT32)) == 0); ++ ++ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui32Patterns, sizeof(gui32Patterns)/sizeof(IMG_UINT32), eError, p, i); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Test failed. Got (0x%x), expected (0x%x)!", ++ __func__, *p, gui32Patterns[i])); ++ } ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR ++TestPatternU64(void *pvKernAddr, size_t uiMappedSize) ++{ ++ IMG_UINT64 *StartAddr = (IMG_UINT64 *) pvKernAddr; ++ IMG_UINT64 *EndAddr = ((IMG_UINT64 *) pvKernAddr) + (uiMappedSize / sizeof(IMG_UINT64)); ++ IMG_UINT64 *p; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT((uiMappedSize % sizeof(IMG_UINT64)) == 0); ++ ++ DO_MEMTEST_FOR_PATTERNS(StartAddr, EndAddr, gui64Patterns, sizeof(gui64Patterns)/sizeof(IMG_UINT64), eError, p, i); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Test failed. Got (0x%llx), expected (0x%llx)!", ++ __func__, *p, gui64Patterns[i])); ++ } ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR ++TestSplitCacheline(void *pvKernAddr, size_t uiMappedSize) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ size_t uiCacheLineSize; ++ size_t uiBlockSize; ++ size_t j; ++ IMG_UINT8 *pcWriteBuffer, *pcReadBuffer; ++ IMG_UINT8 *StartAddr = (IMG_UINT8 *) pvKernAddr; ++ IMG_UINT8 *EndAddr, *p; ++ ++ uiCacheLineSize = OSCPUCacheAttributeSize(OS_CPU_CACHE_ATTRIBUTE_LINE_SIZE); ++ ++ if (uiCacheLineSize > 0) ++ { ++ uiBlockSize = (uiCacheLineSize * 2)/3; /* split cacheline */ ++ ++ pcWriteBuffer = OSAllocMem(uiBlockSize); ++ PVR_LOG_RETURN_IF_NOMEM(pcWriteBuffer, "OSAllocMem"); ++ ++ /* Fill the write buffer with test data, 0xAB*/ ++ OSCachedMemSet(pcWriteBuffer, 0xAB, uiBlockSize); ++ ++ pcReadBuffer = OSAllocMem(uiBlockSize); ++ PVR_LOG_GOTO_IF_NOMEM(pcReadBuffer, eError, ErrorFreeWriteBuffer); ++ ++ /* Fit only complete blocks in uiMappedSize, ignore leftover bytes */ ++ EndAddr = StartAddr + (uiBlockSize * (uiMappedSize / uiBlockSize)); ++ ++ /* Write blocks into the memory */ ++ for (p = StartAddr; p < EndAddr; p += uiBlockSize) ++ { ++ OSCachedMemCopy(p, pcWriteBuffer, uiBlockSize); ++ } ++ ++ /* Read back blocks and check */ ++ for (p = StartAddr; p < EndAddr; p += uiBlockSize) ++ { ++ OSCachedMemCopy(pcReadBuffer, p, uiBlockSize); ++ ++ for (j = 0; j < uiBlockSize; j++) ++ { ++ if (pcReadBuffer[j] != pcWriteBuffer[j]) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Test failed. Got (0x%hhx), expected (0x%hhx)!", __func__, pcReadBuffer[j], pcWriteBuffer[j])); ++ eError = PVRSRV_ERROR_MEMORY_TEST_FAILED; ++ goto ErrorMemTestFailed; ++ } ++ } ++ } ++ ++ErrorMemTestFailed: ++ OSFreeMem(pcReadBuffer); ++ErrorFreeWriteBuffer: ++ OSFreeMem(pcWriteBuffer); ++ } ++ ++ return eError; ++} ++ ++/* Memory test - writes and reads back different patterns to memory and validate the same */ ++static PVRSRV_ERROR ++MemTestPatterns(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32MappingTable = 0; ++ PMR *psPMR = NULL; ++ size_t uiMappedSize, uiPageSize; ++ IMG_HANDLE hPrivData = NULL; ++ void *pvKernAddr = NULL; ++ ++ uiPageSize = OSGetPageSize(); ++ ++ /* Allocate PMR with READ | WRITE | WRITE_COMBINE attributes */ ++ uiFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC; ++ ++ /*Allocate a PMR from given physical heap */ ++ eError = PhysmemNewRamBackedPMR(NULL, ++ psDeviceNode, ++ uiPageSize * PHYSMEM_TEST_PAGES, ++ uiPageSize * PHYSMEM_TEST_PAGES, ++ 1, ++ 1, ++ &ui32MappingTable, ++ OSGetPageShift(), ++ uiFlags, ++ sizeof("PMR PhysMemTest"), ++ "PMR PhysMemTest", ++ OSGetCurrentClientProcessIDKM(), ++ &psPMR, ++ PDUMP_NONE, ++ NULL); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysmemNewRamBackedPMR"); ++ ++ /* Check whether allocated PMR can be locked and obtain physical ++ * addresses of underlying memory pages. ++ */ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRLockSysPhysAddresses", ErrorUnrefPMR); ++ ++ /* Map the physical page(s) into kernel space, acquire kernel mapping ++ * for PMR. ++ */ ++ eError = PMRAcquireKernelMappingData(psPMR, 0, uiPageSize * PHYSMEM_TEST_PAGES, &pvKernAddr, &uiMappedSize, &hPrivData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", ErrorUnlockPhysAddresses); ++ ++ PVR_ASSERT((uiPageSize * PHYSMEM_TEST_PAGES) == uiMappedSize); ++ ++ /* Test various patterns */ ++ eError = TestPatternU64(pvKernAddr, uiMappedSize); ++ if (eError != PVRSRV_OK) ++ { ++ goto ErrorReleaseKernelMappingData; ++ } ++ ++ eError = TestPatternU32(pvKernAddr, uiMappedSize); ++ if (eError != PVRSRV_OK) ++ { ++ goto ErrorReleaseKernelMappingData; ++ } ++ ++ eError = TestPatternU16(pvKernAddr, uiMappedSize); ++ if (eError != PVRSRV_OK) ++ { ++ goto ErrorReleaseKernelMappingData; ++ } ++ ++ eError = TestPatternU8(pvKernAddr, uiMappedSize); ++ if (eError != PVRSRV_OK) ++ { ++ goto ErrorReleaseKernelMappingData; ++ } ++ ++ /* Test split cachelines */ ++ eError = TestSplitCacheline(pvKernAddr, uiMappedSize); ++ ++ErrorReleaseKernelMappingData: ++ (void) PMRReleaseKernelMappingData(psPMR, hPrivData); ++ ++ErrorUnlockPhysAddresses: ++ /* Unlock and Unref the PMR to destroy it, ignore returned value */ ++ (void) PMRUnlockSysPhysAddresses(psPMR); ++ErrorUnrefPMR: ++ (void) PMRUnrefPMR(psPMR); ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR ++PhysMemTestRun(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_MEMALLOCFLAGS_T uiFlags, IMG_UINT32 ui32Passes) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i; ++ ++ /* PMR validation test */ ++ eError = PMRValidationTest(psDeviceNode, uiFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: PMR validation test failed!", ++ __func__)); ++ return eError; ++ } ++ ++ for (i = 0; i < ui32Passes; i++) ++ { ++ /* Mem test */ ++ eError = MemTestPatterns(psDeviceNode, uiFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: [Pass#%u] MemTestPatterns failed!", ++ __func__, i)); ++ break; ++ } ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = pvDevConfig; ++ PVRSRV_ERROR eError; ++ ++ /* validate memtest passes requested */ ++ ui32MemTestPasses = (ui32MemTestPasses > PHYSMEM_TEST_PASSES_MAX)? PHYSMEM_TEST_PASSES_MAX : ui32MemTestPasses; ++ ++ /* Do minimal initialisation before test */ ++ eError = PhysMemTestInit(&psDeviceNode, psDevConfig); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Test failed to initialize", __func__)); ++ return eError; ++ } ++ ++ /* GPU local mem */ ++ eError = PhysMemTestRun(psDeviceNode, 0, ui32MemTestPasses); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "GPU local memory test failed!")); ++ goto ErrorPhysMemTestDeinit; ++ } ++ ++ /* CPU local mem */ ++ eError = PhysMemTestRun(psDeviceNode, PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL), ui32MemTestPasses); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "CPU local memory test failed!")); ++ goto ErrorPhysMemTestDeinit; ++ } ++ ++ PVR_LOG(("PhysMemTest: Passed.")); ++ goto PhysMemTestPassed; ++ ++ErrorPhysMemTestDeinit: ++ PVR_DPF((PVR_DBG_ERROR, "PhysMemTest: Failed.")); ++PhysMemTestPassed: ++ PhysMemTestDeInit(psDeviceNode); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/physmem_test.h b/drivers/gpu/drm/img-rogue/physmem_test.h +new file mode 100644 +index 000000000000..684c729d0e51 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/physmem_test.h +@@ -0,0 +1,51 @@ ++/*************************************************************************/ /*! ++@Title Physmem test header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for single entry point for testing of page factories ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SRVSRV_PHYSMEM_TEST_H ++#define SRVSRV_PHYSMEM_TEST_H ++/* ++ * PhysMemTest ++ */ ++PVRSRV_ERROR ++PhysMemTest(void *pvDevConfig, IMG_UINT32 ui32MemTestPasses); ++ ++#endif /* SRVSRV_PHYSMEM_TEST_H */ +diff --git a/drivers/gpu/drm/img-rogue/plato_drv.h b/drivers/gpu/drm/img-rogue/plato_drv.h +new file mode 100644 +index 000000000000..3a0414b86a73 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/plato_drv.h +@@ -0,0 +1,416 @@ ++/* ++ * @File plato_drv.h ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _PLATO_DRV_H ++#define _PLATO_DRV_H ++ ++/* ++ * This contains the hooks for the plato pci driver, as used by the ++ * Rogue and PDP sub-devices, and the platform data passed to each of their ++ * drivers ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++// Debug output: ++// Sometimes will want to always output info or error even in release mode. ++// In that case use dev_info, dev_err directly. ++#if defined(PLATO_DRM_DEBUG) ++ #define plato_dev_info(dev, fmt, ...) \ ++ dev_info(dev, fmt, ##__VA_ARGS__) ++ #define plato_dev_warn(dev, fmt, ...) \ ++ dev_warn(dev, fmt, ##__VA_ARGS__) ++ #define plato_dev_error(dev, fmt, ...) \ ++ dev_err(dev, fmt, ##__VA_ARGS__) ++ #define PLATO_DRM_CHECKPOINT pr_info("line %d\n", __LINE__) ++#else ++ #define plato_dev_info(dev, fmt, ...) ++ #define plato_dev_warn(dev, fmt, ...) ++ #define plato_dev_error(dev, fmt, ...) ++ #define PLATO_DRM_CHECKPOINT ++#endif ++ ++#define PLATO_INIT_SUCCESS 0 ++#define PLATO_INIT_FAILURE 1 ++#define PLATO_INIT_RETRY 2 ++ ++#define PCI_VENDOR_ID_PLATO (0x1AEE) ++#define PCI_DEVICE_ID_PLATO (0x0003) ++ ++#define PLATO_SYSTEM_NAME "Plato" ++ ++/* Interrupt defines */ ++enum PLATO_INTERRUPT { ++ PLATO_INTERRUPT_GPU = 0, ++ PLATO_INTERRUPT_PDP, ++ PLATO_INTERRUPT_HDMI, ++ PLATO_INTERRUPT_MAX, ++}; ++ ++#define PLATO_INT_SHIFT_GPU (0) ++#define PLATO_INT_SHIFT_PDP (8) ++#define PLATO_INT_SHIFT_HDMI (9) ++#define PLATO_INT_SHIFT_HDMI_WAKEUP (11) ++#define PLATO_INT_SHIFT_TEMP_A (12) ++ ++ ++struct plato_region { ++ resource_size_t base; ++ resource_size_t size; ++}; ++ ++struct plato_io_region { ++ struct plato_region region; ++ void __iomem *registers; ++}; ++ ++/* The following structs are initialised and passed down by the parent plato ++ * driver to the respective sub-drivers ++ */ ++ ++#define PLATO_DEVICE_NAME_PDP "plato_pdp" ++#define PLATO_PDP_RESOURCE_REGS "pdp-regs" ++#define PLATO_PDP_RESOURCE_BIF_REGS "pdp-bif-regs" ++ ++#define PLATO_DEVICE_NAME_HDMI "plato_hdmi" ++#define PLATO_HDMI_RESOURCE_REGS "hdmi-regs" ++ ++struct plato_pdp_platform_data { ++ resource_size_t memory_base; ++ ++ /* The following is used by the drm_pdp driver as it manages the ++ * pdp memory ++ */ ++ resource_size_t pdp_heap_memory_base; ++ resource_size_t pdp_heap_memory_size; ++ ++ /* Used to export host address instead of pdp address, ++ * defaults to false. ++ */ ++ bool dma_map_export_host_addr; ++}; ++ ++struct plato_hdmi_platform_data { ++ resource_size_t plato_memory_base; ++}; ++ ++ ++#define PLATO_DEVICE_NAME_ROGUE "plato_rogue" ++#define PLATO_ROGUE_RESOURCE_REGS "rogue-regs" ++ ++struct plato_rogue_platform_data { ++ ++ /* The base address of the plato memory (CPU physical address) - ++ * used to convert from CPU-Physical to device-physical addresses ++ */ ++ resource_size_t plato_memory_base; ++ ++ /* The following is used to setup the services heaps */ ++ int has_nonmappable; ++ struct plato_region rogue_heap_mappable; ++ resource_size_t rogue_heap_dev_addr; ++ struct plato_region rogue_heap_nonmappable; ++#if defined(SUPPORT_PLATO_DISPLAY) ++ struct plato_region pdp_heap; ++#endif ++}; ++ ++struct plato_interrupt_handler { ++ bool enabled; ++ void (*handler_function)(void *data); ++ void *handler_data; ++}; ++ ++struct plato_device { ++ struct pci_dev *pdev; ++ ++ struct plato_io_region sys_io; ++ struct plato_io_region aon_regs; ++ ++ spinlock_t interrupt_handler_lock; ++ spinlock_t interrupt_enable_lock; ++ ++ struct plato_interrupt_handler interrupt_handlers[PLATO_INTERRUPT_MAX]; ++ ++ struct plato_region rogue_mem; ++ struct plato_region rogue_heap_mappable; ++ struct plato_region rogue_heap_nonmappable; ++ int has_nonmappable; ++ ++ resource_size_t dev_mem_base; /* Pointer to device memory base */ ++ ++ struct platform_device *rogue_dev; ++ ++#if defined(SUPPORT_PLATO_DISPLAY) ++ struct platform_device *pdp_dev; ++ struct plato_region pdp_heap; ++ ++ struct platform_device *hdmi_dev; ++#endif ++ ++#if defined(CONFIG_MTRR) || (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)) ++ int mtrr; ++#endif ++}; ++ ++#if defined(PLATO_LOG_CHECKPOINTS) ++#define PLATO_CHECKPOINT(p) dev_info(&p->pdev->dev, \ ++ "- %s: %d", __func__, __LINE__) ++#else ++#define PLATO_CHECKPOINT(p) ++#endif ++ ++#define plato_write_reg32(base, offset, value) \ ++ iowrite32(value, (base) + (offset)) ++#define plato_read_reg32(base, offset) ioread32(base + offset) ++#define plato_sleep_ms(x) msleep(x) ++#define plato_sleep_us(x) msleep(x/1000) ++ ++/* Valid values for the PLATO_MEMORY_CONFIG configuration option */ ++#define PLATO_MEMORY_LOCAL (1) ++#define PLATO_MEMORY_HOST (2) ++#define PLATO_MEMORY_HYBRID (3) ++ ++#if defined(PLATO_MEMORY_CONFIG) ++#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HYBRID) ++#define PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID 2 ++#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL) ++#define PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID 1 ++#endif ++#endif /* PLATO_MEMORY_CONFIG */ ++ ++#define DCPDP_PHYS_HEAP_ID PVRSRV_PHYS_HEAP_CONFIG_PDP_LOCAL_ID ++ ++#define PLATO_PDP_MEM_SIZE (384 * 1024 * 1024) ++ ++#define SYS_PLATO_REG_PCI_BASENUM (1) ++#define SYS_PLATO_REG_REGION_SIZE (4 * 1024 * 1024) ++ ++/* ++ * Give system region a whole span of the reg space including ++ * RGX registers. That's because there are sys register segments ++ * both before and after the RGX segment. ++ */ ++#define SYS_PLATO_REG_SYS_OFFSET (0x0) ++#define SYS_PLATO_REG_SYS_SIZE (4 * 1024 * 1024) ++ ++/* Entire Peripheral region */ ++#define SYS_PLATO_REG_PERIP_OFFSET (0x20000) ++#define SYS_PLATO_REG_PERIP_SIZE (164 * 1024) ++ ++/* Chip level registers */ ++#define SYS_PLATO_REG_CHIP_LEVEL_OFFSET (SYS_PLATO_REG_PERIP_OFFSET) ++#define SYS_PLATO_REG_CHIP_LEVEL_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_TEMPA_OFFSET (0x80000) ++#define SYS_PLATO_REG_TEMPA_SIZE (64 * 1024) ++ ++/* USB, DMA not included */ ++ ++#define SYS_PLATO_REG_DDR_A_CTRL_OFFSET (0x120000) ++#define SYS_PLATO_REG_DDR_A_CTRL_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_DDR_B_CTRL_OFFSET (0x130000) ++#define SYS_PLATO_REG_DDR_B_CTRL_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_DDR_A_PUBL_OFFSET (0x140000) ++#define SYS_PLATO_REG_DDR_A_PUBL_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_DDR_B_PUBL_OFFSET (0x150000) ++#define SYS_PLATO_REG_DDR_B_PUBL_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_NOC_OFFSET (0x160000) ++#define SYS_PLATO_REG_NOC_SIZE (64 * 1024) ++ ++/* Debug NOC registers */ ++#define SYS_PLATO_REG_NOC_DBG_DDR_A_CTRL_OFFSET (0x1500) ++#define SYS_PLATO_REG_NOC_DBG_DDR_A_DATA_OFFSET (0x1580) ++#define SYS_PLATO_REG_NOC_DBG_DDR_A_PUBL_OFFSET (0x1600) ++#define SYS_PLATO_REG_NOC_DBG_DDR_B_CTRL_OFFSET (0x1680) ++#define SYS_PLATO_REG_NOC_DBG_DDR_B_DATA_OFFSET (0x1700) ++#define SYS_PLATO_REG_NOC_DBG_DDR_B_PUBL_OFFSET (0x1780) ++#define SYS_PLATO_REG_NOC_DBG_DISPLAY_S_OFFSET (0x1800) ++#define SYS_PLATO_REG_NOC_DBG_GPIO_0_S_OFFSET (0x1900) ++#define SYS_PLATO_REG_NOC_DBG_GPIO_1_S_OFFSET (0x1980) ++#define SYS_PLATO_REG_NOC_DBG_GPU_S_OFFSET (0x1A00) ++#define SYS_PLATO_REG_NOC_DBG_PCI_PHY_OFFSET (0x1A80) ++#define SYS_PLATO_REG_NOC_DBG_PCI_REG_OFFSET (0x1B00) ++#define SYS_PLATO_REG_NOC_DBG_PCI_S_OFFSET (0x1B80) ++#define SYS_PLATO_REG_NOC_DBG_PERIPH_S_OFFSET (0x1c00) ++#define SYS_PLATO_REG_NOC_DBG_RET_REG_OFFSET (0x1D00) ++#define SYS_PLATO_REG_NOC_DBG_SERVICE_OFFSET (0x1E00) ++ ++#define SYS_PLATO_REG_RGX_OFFSET (0x170000) ++#define SYS_PLATO_REG_RGX_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_AON_OFFSET (0x180000) ++#define SYS_PLATO_REG_AON_SIZE (64 * 1024) ++ ++#define SYS_PLATO_REG_PDP_OFFSET (0x200000) ++#define SYS_PLATO_REG_PDP_SIZE (0x1000) ++ ++#define SYS_PLATO_REG_PDP_BIF_OFFSET \ ++ (SYS_PLATO_REG_PDP_OFFSET + SYS_PLATO_REG_PDP_SIZE) ++#define SYS_PLATO_REG_PDP_BIF_SIZE (0x200) ++ ++#define SYS_PLATO_REG_HDMI_OFFSET \ ++ (SYS_PLATO_REG_PDP_OFFSET + 0x20000) ++#define SYS_PLATO_REG_HDMI_SIZE (128 * 1024) ++ ++/* Device memory (including HP mapping) on base register 4 */ ++#define SYS_DEV_MEM_PCI_BASENUM (4) ++ ++/* Device memory size */ ++#define ONE_GB_IN_BYTES (0x40000000ULL) ++#define SYS_DEV_MEM_REGION_SIZE \ ++ (PLATO_MEMORY_SIZE_GIGABYTES * ONE_GB_IN_BYTES) ++ ++/* Plato DDR offset in device memory map at 32GB */ ++#define PLATO_DDR_DEV_PHYSICAL_BASE (0x800000000) ++ ++/* DRAM is split at 48GB */ ++#define PLATO_DRAM_SPLIT_ADDR (0xc00000000) ++ ++/* ++ * Plato DDR region is aliased if less than 32GB memory is present. ++ * This defines memory base closest to the DRAM split point. ++ * If 32GB is present this is equal to PLATO_DDR_DEV_PHYSICAL_BASE ++ */ ++#define PLATO_DDR_ALIASED_DEV_PHYSICAL_BASE \ ++ (PLATO_DRAM_SPLIT_ADDR - (SYS_DEV_MEM_REGION_SIZE >> 1)) ++ ++#define PLATO_DDR_ALIASED_DEV_PHYSICAL_END \ ++ (PLATO_DRAM_SPLIT_ADDR + (SYS_DEV_MEM_REGION_SIZE >> 1)) ++ ++#define PLATO_DDR_ALIASED_DEV_SEGMENT_SIZE \ ++ ((32ULL / PLATO_MEMORY_SIZE_GIGABYTES) * ONE_GB_IN_BYTES) ++ ++/* Plato Host memory offset in device memory map at 512GB */ ++#define PLATO_HOSTRAM_DEV_PHYSICAL_BASE (0x8000000000) ++ ++/* Plato PLL, DDR/GPU, PDP and HDMI-SFR/CEC clocks */ ++#define PLATO_PLL_REF_CLOCK_SPEED (19200000) ++ ++/* 600 MHz */ ++#define PLATO_MEM_CLOCK_SPEED (600000000) ++#define PLATO_MIN_MEM_CLOCK_SPEED (600000000) ++#define PLATO_MAX_MEM_CLOCK_SPEED (800000000) ++ ++/* 396 MHz (~400 MHz) on HW, around 1MHz on the emulator */ ++#if defined(EMULATOR) || defined(VIRTUAL_PLATFORM) ++#define PLATO_RGX_CORE_CLOCK_SPEED (1000000) ++#define PLATO_RGX_MIN_CORE_CLOCK_SPEED (1000000) ++#define PLATO_RGX_MAX_CORE_CLOCK_SPEED (1000000) ++#else ++ ++#define PLATO_RGX_CORE_CLOCK_SPEED (396000000) ++#define PLATO_RGX_MIN_CORE_CLOCK_SPEED (396000000) ++#define PLATO_RGX_MAX_CORE_CLOCK_SPEED (742500000) ++#endif ++ ++#define PLATO_MIN_PDP_CLOCK_SPEED (165000000) ++#define PLATO_TARGET_HDMI_SFR_CLOCK_SPEED (27000000) ++#define PLATO_TARGET_HDMI_CEC_CLOCK_SPEED (32768) ++ ++#define REG_TO_CELSIUS(reg) (((reg) * 352/4096) - 109) ++#define CELSIUS_TO_REG(temp) ((((temp) + 109) * 4096) / 352) ++#define PLATO_MAX_TEMP_CELSIUS (100) ++ ++#define PLATO_LMA_HEAP_REGION_MAPPABLE 0 ++#define PLATO_LMA_HEAP_REGION_NONMAPPABLE 1 ++ ++struct plato_debug_register { ++ char *description; ++ unsigned int offset; ++ unsigned int value; ++}; ++ ++#if defined(ENABLE_PLATO_HDMI) ++ ++#if defined(HDMI_PDUMP) ++/* Hard coded video formats for pdump type run only */ ++#define VIDEO_FORMAT_1280_720p 0 ++#define VIDEO_FORMAT_1920_1080p 1 ++#define DC_DEFAULT_VIDEO_FORMAT (VIDEO_FORMAT_1920_1080p) ++#endif ++ ++#endif /* ENABLE_PLATO_HDMI */ ++ ++/* Exposed APIs */ ++int plato_enable(struct device *dev); ++void plato_disable(struct device *dev); ++ ++int plato_enable_interrupt(struct device *dev, ++ enum PLATO_INTERRUPT interrupt_id); ++int plato_disable_interrupt(struct device *dev, ++ enum PLATO_INTERRUPT interrupt_id); ++ ++int plato_set_interrupt_handler(struct device *dev, ++ enum PLATO_INTERRUPT interrupt_id, ++ void (*handler_function)(void *), ++ void *handler_data); ++unsigned int plato_core_clock_speed(struct device *dev); ++unsigned int plato_mem_clock_speed(struct device *dev); ++unsigned int plato_pll_clock_speed(struct device *dev, ++ unsigned int clock_speed); ++void plato_enable_pdp_clock(struct device *dev); ++void plato_enable_pixel_clock(struct device *dev, u32 pixel_clock); ++ ++int plato_debug_info(struct device *dev, ++ struct plato_debug_register *noc_dbg_regs, ++ struct plato_debug_register *aon_dbg_regs); ++ ++/* Internal */ ++int plato_memory_init(struct plato_device *plato); ++void plato_memory_deinit(struct plato_device *plato); ++int plato_cfg_init(struct plato_device *plato); ++int request_pci_io_addr(struct pci_dev *pdev, u32 index, ++ resource_size_t offset, resource_size_t length); ++void release_pci_io_addr(struct pci_dev *pdev, u32 index, ++ resource_size_t start, resource_size_t length); ++ ++#endif /* _PLATO_DRV_H */ +diff --git a/drivers/gpu/drm/img-rogue/pmr.c b/drivers/gpu/drm/img-rogue/pmr.c +new file mode 100644 +index 000000000000..8c4575b7b61b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pmr.c +@@ -0,0 +1,3697 @@ ++/*************************************************************************/ /*! ++@File ++@Title Physmem (PMR) abstraction ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ the "PMR" abstraction. A PMR (Physical Memory Resource) ++ represents some unit of physical memory which is ++ allocated/freed/mapped/unmapped as an indivisible unit ++ (higher software levels provide an abstraction above that ++ to deal with dividing this down into smaller manageable units). ++ Importantly, this module knows nothing of virtual memory, or ++ of MMUs etc., with one excusable exception. We have the ++ concept of a "page size", which really means nothing in ++ physical memory, but represents a "contiguity quantum" such ++ that the higher level modules which map this memory are able ++ to verify that it matches the needs of the page size for the ++ virtual realm into which it is being mapped. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++ ++#include "pdump.h" ++#include "devicemem_server_utils.h" ++ ++#include "osfunc.h" ++#include "pdump_km.h" ++#include "pdump_physmem.h" ++#include "pmr_impl.h" ++#include "pmr_os.h" ++#include "pvrsrv.h" ++ ++#include "allocmem.h" ++#include "lock.h" ++#include "uniq_key_splay_tree.h" ++ ++#if defined(SUPPORT_SECURE_EXPORT) ++#include "secure_export.h" ++#include "ossecure_export.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#include "ri_server.h" ++#endif ++ ++/* ourselves */ ++#include "pmr.h" ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++#include "mmap_stats.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#include "proc_stats.h" ++#endif ++ ++#include "pdump_km.h" ++ ++/* Memalloc flags can be converted into pmr, ra or psplay flags. ++ * Ensure flags types are same size. ++ */ ++static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(PMR_FLAGS_T), ++ "Mismatch memalloc and pmr flags type size."); ++static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(RA_FLAGS_T), ++ "Mismatch memalloc and ra flags type size."); ++static_assert(sizeof(PVRSRV_MEMALLOCFLAGS_T) == sizeof(IMG_PSPLAY_FLAGS_T), ++ "Mismatch memalloc and psplay flags type size."); ++ ++/* A "context" for the physical memory block resource allocator. ++ * ++ * Context is probably the wrong word. ++ * ++ * There is almost certainly only one of these, ever, in the system. ++ * But, let's keep the notion of a context anyway, "just-in-case". ++ */ ++static struct _PMR_CTX_ ++{ ++ /* For debugging, and PDump, etc., let's issue a forever incrementing ++ * serial number to each allocation. ++ */ ++ IMG_UINT64 uiNextSerialNum; ++ ++ /* For security, we only allow a PMR to be mapped if the caller knows ++ * its key. We can pseudo-randomly generate keys ++ */ ++ IMG_UINT64 uiNextKey; ++ ++ /* For debugging only, I guess: Number of live PMRs */ ++ IMG_UINT32 uiNumLivePMRs; ++ ++ /* Lock for this structure */ ++ POS_LOCK hLock; ++ ++ /* In order to seed the uiNextKey, we enforce initialisation at driver ++ * load time. Also, we can debug check at driver unload that the PMR ++ * count is zero. ++ */ ++ IMG_BOOL bModuleInitialised; ++} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE }; ++ ++ ++/* A PMR. One per physical allocation. May be "shared". ++ * ++ * "shared" is ambiguous. We need to be careful with terminology. ++ * There are two ways in which a PMR may be "shared" and we need to be sure ++ * that we are clear which we mean. ++ * ++ * i) multiple small allocations living together inside one PMR. ++ * ++ * ii) one single allocation filling a PMR but mapped into multiple memory ++ * contexts. ++ * ++ * This is more important further up the stack - at this level, all we care is ++ * that the PMR is being referenced multiple times. ++ */ ++struct _PMR_ ++{ ++ /* This object is strictly refcounted. References include: ++ * - mapping ++ * - live handles (to this object) ++ * - live export handles ++ * (thus it is normal for allocated and exported memory to have a refcount of 3) ++ * The object is destroyed when and only when the refcount reaches 0 ++ */ ++ ++ /* Physical address translation (device <> cpu) is done on a per device ++ * basis which means we need the physical heap info ++ */ ++ PHYS_HEAP *psPhysHeap; ++ ++ ATOMIC_T iRefCount; ++ ++ /* Lock count - this is the number of times PMRLockSysPhysAddresses() ++ * has been called, less the number of PMRUnlockSysPhysAddresses() ++ * calls. This is arguably here for debug reasons only, as the refcount ++ * is already incremented as a matter of course. ++ * Really, this just allows us to trap protocol errors: i.e. calling ++ * PMRSysPhysAddr(), without a lock, or calling ++ * PMRUnlockSysPhysAddresses() too many or too few times. ++ */ ++ ATOMIC_T iLockCount; ++ ++ /* Lock for this structure */ ++ POS_LOCK hLock; ++ ++ /* Incrementing serial number to each allocation. */ ++ IMG_UINT64 uiSerialNum; ++ ++ /* For security, we only allow a PMR to be mapped if the caller knows ++ * its key. We can pseudo-randomly generate keys ++ */ ++ PMR_PASSWORD_T uiKey; ++ ++ /* Callbacks for per-flavour functions */ ++ const PMR_IMPL_FUNCTAB *psFuncTab; ++ ++ /* Data associated with the "subtype" */ ++ PMR_IMPL_PRIVDATA pvFlavourData; ++ ++ /* What kind of PMR do we have? */ ++ PMR_IMPL_TYPE eFlavour; ++ ++ /* And for pdump */ ++ const IMG_CHAR *pszPDumpDefaultMemspaceName; ++ ++ /* Allocation annotation */ ++ IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN]; ++ ++#if defined(PDUMP) ++ ++ IMG_HANDLE hPDumpAllocHandle; ++ ++ IMG_UINT32 uiNumPDumpBlocks; ++#endif ++ ++ /* Logical size of allocation. "logical", because a PMR can represent ++ * memory that will never physically exist. This is the amount of ++ * virtual space that the PMR would consume when it's mapped into a ++ * virtual allocation. ++ */ ++ PMR_SIZE_T uiLogicalSize; ++ ++ /* Mapping table for the allocation. ++ * PMR's can be sparse in which case not all the "logic" addresses in ++ * it are valid. We need to know which addresses are and aren't valid ++ * when mapping or reading the PMR. ++ * The mapping table translates "logical" offsets into physical offsets ++ * which is what we always pass to the PMR factory (so it doesn't have ++ * to be concerned about sparseness issues) ++ */ ++ PMR_MAPPING_TABLE *psMappingTable; ++ ++ /* Indicates whether this PMR has been allocated as sparse. ++ * The condition for this variable to be set at allocation time is: ++ * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1) ++ */ ++ IMG_BOOL bSparseAlloc; ++ ++ /* Indicates whether this PMR has been unpinned. ++ * By default, all PMRs are pinned at creation. ++ */ ++ IMG_BOOL bIsUnpinned; ++ ++ /* ++ * Flag that conveys mutability of the PMR: ++ * - TRUE indicates the PMR is immutable (no more memory changes) ++ * - FALSE means the memory layout associated with the PMR is mutable ++ * ++ * A PMR is always mutable by default but is marked immutable on the ++ * first export for the rest of its life. ++ * ++ * Also, any PMRs that track the same memory through imports are ++ * marked immutable as well. ++ */ ++ IMG_BOOL bNoLayoutChange; ++ ++ /* Minimum Physical Contiguity Guarantee. Might be called "page size", ++ * but that would be incorrect, as page size is something meaningful ++ * only in virtual realm. This contiguity guarantee provides an ++ * inequality that can be verified/asserted/whatever to ensure that ++ * this PMR conforms to the page size requirement of the place the PMR ++ * gets mapped. (May be used to select an appropriate heap in variable ++ * page size systems) ++ * ++ * The absolutely necessary condition is this: ++ * ++ * device MMU page size <= actual physical contiguity. ++ * ++ * We go one step further in order to be able to provide an early ++ * warning / early compatibility check and say this: ++ * ++ * device MMU page size <= ++ * 2**(uiLog2ContiguityGuarantee) <= ++ * actual physical contiguity. ++ * ++ * In this way, it is possible to make the page table reservation ++ * in the device MMU without even knowing the granularity of the ++ * physical memory (i.e. useful for being able to allocate virtual ++ * before physical) ++ */ ++ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee; ++ ++ /* Flags. We store a copy of the "PMR flags" (usually a subset of the ++ * flags given at allocation time) and return them to any caller of ++ * PMR_Flags(). The intention of these flags is that the ones stored ++ * here are used to represent permissions, such that no one is able ++ * to map a PMR in a mode in which they are not allowed, e.g., ++ * writeable for a read-only PMR, etc. ++ */ ++ PMR_FLAGS_T uiFlags; ++ ++ /* Do we really need this? ++ * For now we'll keep it, until we know we don't. ++ * NB: this is not the "memory context" in client terms - this is ++ * _purely_ the "PMR" context, of which there is almost certainly only ++ * ever one per system as a whole, but we'll keep the concept anyway, ++ * just-in-case. ++ */ ++ struct _PMR_CTX_ *psContext; ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ /* Stored handle to PMR RI entry */ ++ void *hRIHandle; ++#endif ++}; ++ ++/* Do we need a struct for the export handle? ++ * I'll use one for now, but if nothing goes in it, we'll lose it ++ */ ++struct _PMR_EXPORT_ ++{ ++ struct _PMR_ *psPMR; ++}; ++ ++struct _PMR_PAGELIST_ ++{ ++ struct _PMR_ *psReferencePMR; ++}; ++ ++#if defined(PDUMP) ++static INLINE IMG_BOOL _IsHostDevicePMR(const PMR *const psPMR) ++{ ++ const PVRSRV_DEVICE_NODE *psDevNode = PVRSRVGetPVRSRVData()->psHostMemDeviceNode; ++ return psPMR->psPhysHeap == psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_CPU_LOCAL]; ++} ++ ++static void ++PDumpPMRFreePMR(PMR *psPMR, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiBlockSize, ++ IMG_UINT32 uiLog2Contiguity, ++ IMG_HANDLE hPDumpAllocationInfoHandle); ++ ++static void ++PDumpPMRMallocPMR(PMR *psPMR, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiBlockSize, ++ IMG_UINT32 ui32ChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *puiMappingTable, ++ IMG_UINT32 uiLog2Contiguity, ++ IMG_BOOL bInitialise, ++ IMG_UINT32 ui32InitValue, ++ IMG_HANDLE *phPDumpAllocInfoOut, ++ IMG_UINT32 ui32PDumpFlags); ++ ++static void ++PDumpPMRChangeSparsePMR(PMR *psPMR, ++ IMG_UINT32 uiBlockSize, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_BOOL bInitialise, ++ IMG_UINT32 ui32InitValue, ++ IMG_HANDLE *phPDumpAllocInfoOut); ++#endif /* defined PDUMP */ ++ ++PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR) ++{ ++ PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL; ++ ++ PVR_ASSERT(psExportPMR != NULL); ++ if (psExportPMR) ++ { ++ PVR_ASSERT(psExportPMR->psPMR != NULL); ++ if (psExportPMR->psPMR) ++ { ++ PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0); ++ if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0) ++ { ++ psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR); ++ } ++ } ++ } ++ ++ return psReturnedDeviceNode; ++} ++ ++static PVRSRV_ERROR ++_PMRCreate(PMR_SIZE_T uiLogicalSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, ++ PMR_FLAGS_T uiFlags, ++ PMR **ppsPMR) ++{ ++ void *pvPMRLinAddr; ++ PMR *psPMR; ++ PMR_MAPPING_TABLE *psMappingTable; ++ struct _PMR_CTX_ *psContext; ++ IMG_UINT32 i, ui32Temp = 0; ++ IMG_UINT32 ui32Remainder; ++ PVRSRV_ERROR eError; ++ IMG_BOOL bSparse = IMG_FALSE; ++ ++ psContext = &_gsSingletonPMRContext; ++ ++ /* Do we have a sparse allocation? */ ++ if ( (ui32NumVirtChunks != ui32NumPhysChunks) || ++ (ui32NumVirtChunks > 1) ) ++ { ++ bSparse = IMG_TRUE; ++ } ++ ++ /* Extra checks required for sparse PMRs */ ++ if (uiLogicalSize != uiChunkSize) ++ { ++ /* Check the logical size and chunk information agree with each other */ ++ if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)", ++ __func__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks)); ++ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE; ++ } ++ ++ /* Check that the chunk size is a multiple of the contiguity */ ++ OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder); ++ if (ui32Remainder) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Bad chunk size, must be a multiple of the contiguity " ++ "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)", ++ __func__, ++ (unsigned long long) uiChunkSize, ++ uiLog2ContiguityGuarantee)); ++ return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE; ++ } ++ } ++ ++ pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks); ++ PVR_RETURN_IF_NOMEM(pvPMRLinAddr); ++ ++ psPMR = (PMR *) pvPMRLinAddr; ++ psMappingTable = IMG_OFFSET_ADDR(pvPMRLinAddr, sizeof(*psPMR)); ++ ++ /* Setup the mapping table */ ++ psMappingTable->uiChunkSize = uiChunkSize; ++ psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks; ++ psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks; ++ OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])* ++ ui32NumVirtChunks); ++ for (i=0; iaui32Translation[ui32Temp] = ui32Temp; ++ } ++ else ++ { ++ OSFreeMem(psPMR); ++ return PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY; ++ } ++ } ++ ++ eError = OSLockCreate(&psPMR->hLock); ++ if (eError != PVRSRV_OK) ++ { ++ OSFreeMem(psPMR); ++ return eError; ++ } ++ ++ /* Setup the PMR */ ++ OSAtomicWrite(&psPMR->iRefCount, 0); ++ ++ /* If allocation is not made on demand, it will be backed now and ++ * backing will not be removed until the PMR is destroyed, therefore ++ * we can initialise the iLockCount to 1 rather than 0. ++ */ ++ OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1)); ++ ++ psPMR->psContext = psContext; ++ psPMR->uiLogicalSize = uiLogicalSize; ++ psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee; ++ psPMR->uiFlags = uiFlags; ++ psPMR->psMappingTable = psMappingTable; ++ psPMR->bSparseAlloc = bSparse; ++ psPMR->bIsUnpinned = IMG_FALSE; ++ psPMR->bNoLayoutChange = IMG_FALSE; ++ psPMR->szAnnotation[0] = '\0'; ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ psPMR->hRIHandle = NULL; ++#endif ++ ++ OSLockAcquire(psContext->hLock); ++ psPMR->uiKey = psContext->uiNextKey; ++ psPMR->uiSerialNum = psContext->uiNextSerialNum; ++ psContext->uiNextKey = (0x80200003 * psContext->uiNextKey) ++ ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr); ++ psContext->uiNextSerialNum++; ++ *ppsPMR = psPMR; ++ PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR)); ++ /* Increment live PMR count */ ++ psContext->uiNumLivePMRs++; ++ OSLockRelease(psContext->hLock); ++ ++ return PVRSRV_OK; ++} ++ ++/* This function returns true if the PMR is in use and false otherwise. ++ * This function is not thread safe and hence the caller ++ * needs to ensure the thread safety by explicitly taking ++ * the lock on the PMR or through other means */ ++IMG_BOOL PMRIsPMRLive(PMR *psPMR) ++{ ++ return (OSAtomicRead(&psPMR->iRefCount) > 0); ++} ++ ++static IMG_UINT32 ++_Ref(PMR *psPMR) ++{ ++ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0); ++ return OSAtomicIncrement(&psPMR->iRefCount); ++} ++ ++static IMG_UINT32 ++_Unref(PMR *psPMR) ++{ ++ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0); ++ return OSAtomicDecrement(&psPMR->iRefCount); ++} ++ ++static void ++_UnrefAndMaybeDestroy(PMR *psPMR) ++{ ++ PVRSRV_ERROR eError2; ++ struct _PMR_CTX_ *psCtx; ++ IMG_INT iRefCount; ++ ++ PVR_ASSERT(psPMR != NULL); ++ ++ /* Acquire PMR factory lock if provided */ ++ if (psPMR->psFuncTab->pfnGetPMRFactoryLock) ++ { ++ psPMR->psFuncTab->pfnGetPMRFactoryLock(); ++ } ++ ++ iRefCount = _Unref(psPMR); ++ ++ if (iRefCount == 0) ++ { ++ if (psPMR->psFuncTab->pfnFinalize != NULL) ++ { ++ eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData); ++ ++ /* PMR unref can be called asynchronously by the kernel or other ++ * third party modules (eg. display) which doesn't go through the ++ * usual services bridge. The same PMR can be referenced simultaneously ++ * in a different path that results in a race condition. ++ * Hence depending on the race condition, a factory may refuse to destroy ++ * the resource associated with this PMR if a reference on it was taken ++ * prior to unref. In that case the PMR factory function returns the error. ++ * ++ * When such an error is encountered, the factory needs to ensure the state ++ * associated with PMR is undisturbed. At this point we just bail out from ++ * freeing the PMR itself. The PMR handle will then be freed at a later point ++ * when the same PMR is unreferenced. ++ * */ ++ if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2) ++ { ++ if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) ++ { ++ psPMR->psFuncTab->pfnReleasePMRFactoryLock(); ++ } ++ return; ++ } ++ PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */ ++ } ++#if defined(PDUMP) ++ /* if allocation is done on the host node don't include it in the PDUMP */ ++ if (!_IsHostDevicePMR(psPMR)) ++ { ++ PDumpPMRFreePMR(psPMR, ++ psPMR->uiLogicalSize, ++ (1 << psPMR->uiLog2ContiguityGuarantee), ++ psPMR->uiLog2ContiguityGuarantee, ++ psPMR->hPDumpAllocHandle); ++ } ++#endif ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++ /* This PMR is about to be destroyed, update its mmap stats record (if present) ++ * to avoid dangling pointer. Additionally, this is required because mmap stats ++ * are identified by PMRs and a new PMR down the line "might" get the same address ++ * as the one we're about to free and we'd like 2 different entries in mmaps ++ * stats for such cases */ ++ MMapStatsRemovePMR(psPMR); ++#endif ++ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */ ++ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Delete RI entry */ ++ if (psPMR->hRIHandle) ++ { ++ eError = RIDeletePMREntryKM (psPMR->hRIHandle); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ /* continue destroying the PMR */ ++ } ++ } ++ } ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ psCtx = psPMR->psContext; ++ ++ OSLockDestroy(psPMR->hLock); ++ ++ /* Release PMR factory lock acquired if any */ ++ if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) ++ { ++ psPMR->psFuncTab->pfnReleasePMRFactoryLock(); ++ } ++ ++ OSFreeMem(psPMR); ++ ++ /* Decrement live PMR count. Probably only of interest for debugging */ ++ PVR_ASSERT(psCtx->uiNumLivePMRs > 0); ++ ++ OSLockAcquire(psCtx->hLock); ++ psCtx->uiNumLivePMRs--; ++ OSLockRelease(psCtx->hLock); ++ } ++ else ++ { ++ /* Release PMR factory lock acquired if any */ ++ if (psPMR->psFuncTab->pfnReleasePMRFactoryLock) ++ { ++ psPMR->psFuncTab->pfnReleasePMRFactoryLock(); ++ } ++ } ++} ++ ++static IMG_BOOL _PMRIsSparse(const PMR *psPMR) ++{ ++ return psPMR->bSparseAlloc; ++} ++ ++PVRSRV_ERROR ++PMRCreatePMR(PHYS_HEAP *psPhysHeap, ++ PMR_SIZE_T uiLogicalSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, ++ PMR_FLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ const PMR_IMPL_FUNCTAB *psFuncTab, ++ PMR_IMPL_PRIVDATA pvPrivData, ++ PMR_IMPL_TYPE eType, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PMR *psPMR = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszAnnotation != NULL, "pszAnnotation"); ++ ++ eError = _PMRCreate(uiLogicalSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ uiLog2ContiguityGuarantee, ++ uiFlags, ++ &psPMR); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ psPMR->psPhysHeap = psPhysHeap; ++ psPMR->psFuncTab = psFuncTab; ++ psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap); ++ psPMR->pvFlavourData = pvPrivData; ++ psPMR->eFlavour = eType; ++ OSAtomicWrite(&psPMR->iRefCount, 1); ++ ++ OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN); ++ ++#if defined(PDUMP) ++ /* if allocation was done on the host node don't include it in the PDUMP */ ++ if (!_IsHostDevicePMR(psPMR)) ++ { ++ PMR_FLAGS_T uiFlags = psPMR->uiFlags; ++ IMG_BOOL bInitialise = IMG_FALSE; ++ IMG_UINT32 ui32InitValue = 0; ++ ++ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags)) ++ { ++ bInitialise = IMG_TRUE; ++ } ++ else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags)) ++ { ++ ui32InitValue = 0xDEADBEEF; ++ bInitialise = IMG_TRUE; ++ } ++ ++ PDumpPMRMallocPMR(psPMR, ++ (uiChunkSize * ui32NumVirtChunks), ++ 1ULL<hPDumpAllocHandle, ++ ui32PDumpFlags); ++ } ++#endif ++ ++ *ppsPMRPtr = psPMR; ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR, ++ IMG_UINT32 ui32NestingLevel) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psPMR != NULL); ++ ++ /* Note: taking this lock is not required to protect the PMR reference ++ * count, because the PMR reference count is atomic. Rather, taking ++ * the lock here guarantees that no caller will exit this function ++ * without the underlying physical addresses being locked. ++ */ ++ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); ++ /* We also count the locks as references, so that the PMR is not freed ++ * while someone is using a physical address. ++ * "lock" here simply means incrementing the refcount. It means the ++ * refcount is multipurpose, but that's okay. We only have to promise ++ * that physical addresses are valid after this point, and remain valid ++ * until the corresponding PMRUnlockSysPhysAddressesOSMem() ++ */ ++ _Ref(psPMR); ++ ++ /* Also count locks separately from other types of references, to ++ * allow for debug assertions ++ */ ++ ++ /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */ ++ if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2)) ++ { ++ if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL) ++ { ++ /* must always have lock and unlock in pairs! */ ++ PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL); ++ ++ eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData); ++ ++ PVR_GOTO_IF_ERROR(eError, e1); ++ } ++ } ++ OSLockRelease(psPMR->hLock); ++ ++ return PVRSRV_OK; ++ ++e1: ++ OSAtomicDecrement(&psPMR->iLockCount); ++ _Unref(psPMR); ++ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0); ++ OSLockRelease(psPMR->hLock); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRLockSysPhysAddresses(PMR *psPMR) ++{ ++ return PMRLockSysPhysAddressesNested(psPMR, 0); ++} ++ ++PVRSRV_ERROR ++PMRUnlockSysPhysAddresses(PMR *psPMR) ++{ ++ return PMRUnlockSysPhysAddressesNested(psPMR, 2); ++} ++ ++PVRSRV_ERROR ++PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psPMR != NULL); ++ ++ /* Acquiring the lock here, as well as during the Lock operation ensures ++ * the lock count hitting zero and the unlocking of the phys addresses is ++ * an atomic operation ++ */ ++ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel); ++ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); ++ ++ if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)) ++ { ++ if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL) ++ { ++ PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL); ++ ++ eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData); ++ /* must never fail */ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ } ++ ++ OSLockRelease(psPMR->hLock); ++ ++ /* We also count the locks as references, so that the PMR is not ++ * freed while someone is using a physical address. ++ */ ++ _UnrefAndMaybeDestroy(psPMR); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT(psPMR != NULL); ++ ++ OSLockAcquire(psPMR->hLock); ++ /* Stop if we still have references on the PMR */ ++ if ( ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2)) ++ || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) ) ++ { ++ OSLockRelease(psPMR->hLock); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: PMR is still referenced %u times. " ++ "That means this PMR is probably exported or used somewhere else. " ++ "Allowed are 2 references if it is mapped to device, otherwise 1.", ++ __func__, ++ OSAtomicRead(&psPMR->iRefCount))); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_STILL_REFERENCED, e_exit); ++ } ++ OSLockRelease(psPMR->hLock); ++ ++ if (psPMR->psFuncTab->pfnUnpinMem != NULL) ++ { ++ eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData); ++ if (eError == PVRSRV_OK) ++ { ++ psPMR->bIsUnpinned = IMG_TRUE; ++ } ++ } ++ ++e_exit: ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRPinPMR(PMR *psPMR) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT(psPMR != NULL); ++ ++ if (psPMR->psFuncTab->pfnPinMem != NULL) ++ { ++ eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData, ++ psPMR->psMappingTable); ++ if (eError == PVRSRV_OK) ++ { ++ psPMR->bIsUnpinned = IMG_FALSE; ++ } ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRMakeLocalImportHandle(PMR *psPMR, ++ PMR **ppsPMR) ++{ ++ PMRRefPMR(psPMR); ++ *ppsPMR = psPMR; ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRUnmakeLocalImportHandle(PMR *psPMR) ++{ ++ PMRUnrefPMR(psPMR); ++ return PVRSRV_OK; ++} ++ ++/* ++ Note: ++ We pass back the PMR as it was passed in as a different handle type ++ (DEVMEM_MEM_IMPORT) and it allows us to change the import structure ++ type if we should need to embed any meta data in it. ++ */ ++PVRSRV_ERROR ++PMRLocalImportPMR(PMR *psPMR, ++ PMR **ppsPMR, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ _Ref(psPMR); ++ ++ /* Return the PMR */ ++ *ppsPMR = psPMR; ++ *puiSize = psPMR->uiLogicalSize; ++ *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee; ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRGetUID(PMR *psPMR, ++ IMG_UINT64 *pui64UID) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ *pui64UID = psPMR->uiSerialNum; ++ ++ return PVRSRV_OK; ++} ++ ++#if defined(SUPPORT_INSECURE_EXPORT) ++PVRSRV_ERROR ++PMRExportPMR(PMR *psPMR, ++ PMR_EXPORT **ppsPMRExportPtr, ++ PMR_SIZE_T *puiSize, ++ PMR_LOG2ALIGN_T *puiLog2Contig, ++ PMR_PASSWORD_T *puiPassword) ++{ ++ IMG_UINT64 uiPassword; ++ PMR_EXPORT *psPMRExport; ++ ++ uiPassword = psPMR->uiKey; ++ ++ psPMRExport = OSAllocMem(sizeof(*psPMRExport)); ++ PVR_RETURN_IF_NOMEM(psPMRExport); ++ ++ psPMRExport->psPMR = psPMR; ++ _Ref(psPMR); ++ /* The layout of a PMR can't change once exported ++ * to make sure the importers view of the memory is ++ * the same as exporter. */ ++ psPMR->bNoLayoutChange = IMG_TRUE; ++ ++ *ppsPMRExportPtr = psPMRExport; ++ *puiSize = psPMR->uiLogicalSize; ++ *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee; ++ *puiPassword = uiPassword; ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PMRUnexportPMR(PMR_EXPORT *psPMRExport) ++{ ++ PVR_ASSERT(psPMRExport != NULL); ++ PVR_ASSERT(psPMRExport->psPMR != NULL); ++ PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); ++ ++ _UnrefAndMaybeDestroy(psPMRExport->psPMR); ++ ++ OSFreeMem(psPMRExport); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PMRImportPMR(PMR_EXPORT *psPMRExport, ++ PMR_PASSWORD_T uiPassword, ++ PMR_SIZE_T uiSize, ++ PMR_LOG2ALIGN_T uiLog2Contig, ++ PMR **ppsPMR) ++{ ++ PMR *psPMR; ++ ++ PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0); ++ ++ psPMR = psPMRExport->psPMR; ++ ++ PVR_ASSERT((psPMR->bNoLayoutChange == IMG_TRUE)); ++ ++ if (psPMR->uiKey != uiPassword) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PMRImport: Import failed, password specified does not match the export")); ++ return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR; ++ } ++ ++ if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig) ++ { ++ return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES; ++ } ++ ++ _Ref(psPMR); ++ ++ *ppsPMR = psPMR; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRUnimportPMR(PMR *psPMR) ++{ ++ _UnrefAndMaybeDestroy(psPMR); ++ ++ return PVRSRV_OK; ++} ++ ++#else /* if defined(SUPPORT_INSECURE_EXPORT) */ ++ ++PVRSRV_ERROR ++PMRExportPMR(PMR *psPMR, ++ PMR_EXPORT **ppsPMRExportPtr, ++ PMR_SIZE_T *puiSize, ++ PMR_LOG2ALIGN_T *puiLog2Contig, ++ PMR_PASSWORD_T *puiPassword) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr); ++ PVR_UNREFERENCED_PARAMETER(puiSize); ++ PVR_UNREFERENCED_PARAMETER(puiLog2Contig); ++ PVR_UNREFERENCED_PARAMETER(puiPassword); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PMRUnexportPMR(PMR_EXPORT *psPMRExport) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMRExport); ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PMRImportPMR(PMR_EXPORT *psPMRExport, ++ PMR_PASSWORD_T uiPassword, ++ PMR_SIZE_T uiSize, ++ PMR_LOG2ALIGN_T uiLog2Contig, ++ PMR **ppsPMR) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMRExport); ++ PVR_UNREFERENCED_PARAMETER(uiPassword); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(uiLog2Contig); ++ PVR_UNREFERENCED_PARAMETER(ppsPMR); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRUnimportPMR(PMR *psPMR) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ return PVRSRV_OK; ++} ++#endif /* if defined(SUPPORT_INSECURE_EXPORT) */ ++ ++#if defined(SUPPORT_SECURE_EXPORT) ++PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR) ++{ ++ _UnrefAndMaybeDestroy(psPMR); ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport) ++{ ++ return PMRSecureUnexportPMR(psExport); ++} ++ ++PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ PMR *psPMR, ++ IMG_SECURE_TYPE *phSecure, ++ PMR **ppsPMR, ++ CONNECTION_DATA **ppsSecureConnection) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ PVR_UNREFERENCED_PARAMETER(ppsSecureConnection); ++ ++ /* We are acquiring reference to PMR here because OSSecureExport ++ * releases bridge lock and PMR lock for a moment and we don't want PMR ++ * to be removed by other thread in the meantime. */ ++ _Ref(psPMR); ++ ++ eError = OSSecureExport("secure_pmr", ++ _ReleaseSecurePMR, ++ (void *) psPMR, ++ phSecure); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ *ppsPMR = psPMR; ++ ++ /* Mark the PMR immutable once exported ++ * This allows the importers and exporter to have ++ * the same view of the memory */ ++ psPMR->bNoLayoutChange = IMG_TRUE; ++ ++ return PVRSRV_OK; ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ _UnrefAndMaybeDestroy(psPMR); ++ return eError; ++} ++ ++PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_SECURE_TYPE hSecure, ++ PMR **ppsPMR, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psPMR; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ eError = OSSecureImport(hSecure, (void **) &psPMR); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ PVR_LOG_RETURN_IF_FALSE(PhysHeapDeviceNode(psPMR->psPhysHeap) == psDevNode, ++ "PMR invalid for this device", ++ PVRSRV_ERROR_PMR_NOT_PERMITTED); ++ ++ _Ref(psPMR); ++ /* The PMR should be immutable once exported ++ * This allows the importers and exporter to have ++ * the same view of the memory */ ++ PVR_ASSERT(psPMR->bNoLayoutChange == IMG_TRUE); ++ ++ /* Return the PMR */ ++ *ppsPMR = psPMR; ++ *puiSize = psPMR->uiLogicalSize; ++ *puiAlign = 1ull << psPMR->uiLog2ContiguityGuarantee; ++ return PVRSRV_OK; ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR) ++{ ++ _UnrefAndMaybeDestroy(psPMR); ++ return PVRSRV_OK; ++} ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++PVRSRV_ERROR ++PMRStoreRIHandle(PMR *psPMR, ++ void *hRIHandle) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ psPMR->hRIHandle = hRIHandle; ++ return PVRSRV_OK; ++} ++#endif ++ ++static PVRSRV_ERROR ++_PMRAcquireKernelMappingData(PMR *psPMR, ++ size_t uiLogicalOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ size_t *puiLengthOut, ++ IMG_HANDLE *phPrivOut, ++ IMG_BOOL bMapSparse) ++{ ++ PVRSRV_ERROR eError; ++ void *pvKernelAddress; ++ IMG_HANDLE hPriv; ++ ++ PVR_ASSERT(psPMR != NULL); ++ ++ if (_PMRIsSparse(psPMR) && !bMapSparse) ++ { ++ /* Mapping of sparse allocations must be signalled. */ ++ return PVRSRV_ERROR_PMR_NOT_PERMITTED; ++ } ++ ++ /* Acquire/Release functions must be overridden in pairs */ ++ if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL) ++ { ++ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL); ++ ++ /* If PMR implementation does not supply this pair of ++ * functions, it means they do not permit the PMR to be mapped ++ * into kernel memory at all ++ */ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); ++ } ++ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); ++ ++ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, ++ uiLogicalOffset, ++ uiSize, ++ &pvKernelAddress, ++ &hPriv, ++ psPMR->uiFlags); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ *ppvKernelAddressOut = pvKernelAddress; ++ if (uiSize == 0) ++ { ++ /* Zero size means map in the whole PMR ... */ ++ *puiLengthOut = (size_t)psPMR->uiLogicalSize; ++ } ++ else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee)) ++ { ++ /* ... map in the requested pages ... */ ++ *puiLengthOut = uiSize; ++ } ++ else ++ { ++ /* ... otherwise we just map in one page */ ++ *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee; ++ } ++ *phPrivOut = hPriv; ++ ++ return PVRSRV_OK; ++ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRAcquireKernelMappingData(PMR *psPMR, ++ size_t uiLogicalOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ size_t *puiLengthOut, ++ IMG_HANDLE *phPrivOut) ++{ ++ return _PMRAcquireKernelMappingData(psPMR, ++ uiLogicalOffset, ++ uiSize, ++ ppvKernelAddressOut, ++ puiLengthOut, ++ phPrivOut, ++ IMG_FALSE); ++} ++ ++PVRSRV_ERROR ++PMRAcquireSparseKernelMappingData(PMR *psPMR, ++ size_t uiLogicalOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ size_t *puiLengthOut, ++ IMG_HANDLE *phPrivOut) ++{ ++ return _PMRAcquireKernelMappingData(psPMR, ++ uiLogicalOffset, ++ uiSize, ++ ppvKernelAddressOut, ++ puiLengthOut, ++ phPrivOut, ++ IMG_TRUE); ++} ++ ++PVRSRV_ERROR ++PMRReleaseKernelMappingData(PMR *psPMR, ++ IMG_HANDLE hPriv) ++{ ++ PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL); ++ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL); ++ ++ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, ++ hPriv); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ _PMRLogicalOffsetToPhysicalOffset ++ ++ Translate between the "logical" offset which the upper levels ++ provide and the physical offset which is what the PMR ++ factories works on. ++ ++ As well as returning the physical offset we return the number of ++ bytes remaining till the next chunk and if this chunk is valid. ++ ++ For multi-page operations, upper layers communicate their ++ Log2PageSize else argument is redundant (set to zero). ++ */ ++ ++static void ++_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset, ++ IMG_UINT32 *pui32BytesRemain, ++ IMG_BOOL *bValid) ++{ ++ PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable; ++ IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize; ++ IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset; ++ IMG_UINT64 ui64ChunkIndex; ++ IMG_UINT32 ui32Remain; ++ IMG_UINT32 idx; ++ ++ /* Must be translating at least a page */ ++ PVR_ASSERT(ui32NumOfPages); ++ ++ if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks) ++ { ++ /* Fast path the common case, as logical and physical offsets are ++ equal we assume the ui32NumOfPages span is also valid */ ++ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset); ++ puiPhysicalOffset[0] = uiOffset; ++ bValid[0] = IMG_TRUE; ++ ++ if (ui32NumOfPages > 1) ++ { ++ /* initial offset may not be page aligned, round down */ ++ uiOffset &= ~(uiPageSize-1); ++ for (idx=1; idx < ui32NumOfPages; idx++) ++ { ++ uiOffset += uiPageSize; ++ puiPhysicalOffset[idx] = uiOffset; ++ bValid[idx] = IMG_TRUE; ++ } ++ } ++ } ++ else ++ { ++ for (idx=0; idx < ui32NumOfPages; idx++) ++ { ++ ui64ChunkIndex = OSDivide64r64( ++ uiOffset, ++ TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize), ++ &ui32Remain); ++ ++ if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID) ++ { ++ bValid[idx] = IMG_FALSE; ++ } ++ else ++ { ++ bValid[idx] = IMG_TRUE; ++ } ++ ++ if (idx == 0) ++ { ++ if (ui32Remain == 0) ++ { ++ /* Start of chunk so return the chunk size */ ++ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize); ++ } ++ else ++ { ++ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain); ++ } ++ ++ puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain; ++ ++ /* initial offset may not be page aligned, round down */ ++ uiOffset &= ~(uiPageSize-1); ++ } ++ else ++ { ++ puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain; ++ } ++ uiOffset += uiPageSize; ++ } ++ } ++} ++ ++static PVRSRV_ERROR ++_PMR_ReadBytesPhysical(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiPhysicalOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (psPMR->psFuncTab->pfnReadBytes != NULL) ++ { ++ /* defer to callback if present */ ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData, ++ uiPhysicalOffset, ++ pcBuffer, ++ uiBufSz, ++ puiNumBytes); ++ PMRUnlockSysPhysAddresses(psPMR); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ } ++ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) ++ { ++ /* "default" handler for reading bytes */ ++ ++ IMG_HANDLE hKernelMappingHandle; ++ IMG_UINT8 *pcKernelAddress; ++ ++ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, ++ (size_t) uiPhysicalOffset, ++ uiBufSz, ++ (void **)&pcKernelAddress, ++ &hKernelMappingHandle, ++ psPMR->uiFlags); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ /* Use the conservative 'DeviceMemCopy' here because we can't ++ * know if this PMR will be mapped cached. ++ */ ++ ++ OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz); ++ *puiNumBytes = uiBufSz; ++ ++ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, ++ hKernelMappingHandle); ++ } ++ else ++ { ++ OSPanic(); ++ PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_INVALID_PARAMS, e0); ++ } ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ *puiNumBytes = 0; ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMR_ReadBytes(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_DEVMEM_OFFSET_T uiPhysicalOffset; ++ size_t uiBytesCopied = 0; ++ ++ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) ++ { ++ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); ++ } ++ PVR_ASSERT(uiBufSz > 0); ++ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); ++ ++ /* PMR implementations can override this. If they don't, a "default" ++ * handler uses kernel virtual mappings. If the kernel can't ++ * provide a kernel virtual mapping, this function fails. ++ */ ++ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || ++ psPMR->psFuncTab->pfnReadBytes != NULL); ++ ++ while (uiBytesCopied != uiBufSz) ++ { ++ IMG_UINT32 ui32Remain; ++ size_t uiBytesToCopy; ++ size_t uiRead; ++ IMG_BOOL bValid; ++ ++ _PMRLogicalOffsetToPhysicalOffset(psPMR, ++ 0, ++ 1, ++ uiLogicalOffset, ++ &uiPhysicalOffset, ++ &ui32Remain, ++ &bValid); ++ /* Copy till either then end of the chunk or end ++ * of the buffer ++ */ ++ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); ++ ++ if (bValid) ++ { ++ /* Read the data from the PMR */ ++ eError = _PMR_ReadBytesPhysical(psPMR, ++ uiPhysicalOffset, ++ &pcBuffer[uiBytesCopied], ++ uiBytesToCopy, ++ &uiRead); ++ if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", ++ __func__, ++ PVRSRVGetErrorString(eError), ++ uiRead, ++ uiBytesToCopy)); ++ /* Bail out as soon as we hit an error */ ++ break; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")", ++ __func__, ++ uiLogicalOffset, ++ psPMR->uiLogicalSize)); ++ /* Fill invalid chunks with 0 */ ++ OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy); ++ uiRead = uiBytesToCopy; ++ eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR; ++ } ++ uiLogicalOffset += uiRead; ++ uiBytesCopied += uiRead; ++ } ++ ++ *puiNumBytes = uiBytesCopied; ++ return eError; ++} ++ ++static PVRSRV_ERROR ++_PMR_WriteBytesPhysical(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiPhysicalOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (psPMR->psFuncTab->pfnWriteBytes != NULL) ++ { ++ /* defer to callback if present */ ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData, ++ uiPhysicalOffset, ++ pcBuffer, ++ uiBufSz, ++ puiNumBytes); ++ PMRUnlockSysPhysAddresses(psPMR); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ } ++ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData) ++ { ++ /* "default" handler for reading bytes */ ++ ++ IMG_HANDLE hKernelMappingHandle; ++ IMG_UINT8 *pcKernelAddress; ++ ++ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData, ++ (size_t) uiPhysicalOffset, ++ uiBufSz, ++ (void **)&pcKernelAddress, ++ &hKernelMappingHandle, ++ psPMR->uiFlags); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ /* Use the conservative 'DeviceMemCopy' here because we can't know ++ * if this PMR will be mapped cached. ++ */ ++ ++ OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz); ++ *puiNumBytes = uiBufSz; ++ ++ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData, ++ hKernelMappingHandle); ++ } ++ else ++ { ++ /* The write callback is optional as it's only required by the ++ * debug tools ++ */ ++ OSPanic(); ++ PVR_LOG_GOTO_WITH_ERROR("psPMR->psFuncTab", eError, PVRSRV_ERROR_PMR_NOT_PERMITTED, e0); ++ } ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ *puiNumBytes = 0; ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMR_WriteBytes(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_DEVMEM_OFFSET_T uiPhysicalOffset; ++ size_t uiBytesCopied = 0; ++ ++ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize) ++ { ++ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset); ++ } ++ PVR_ASSERT(uiBufSz > 0); ++ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize); ++ ++ /* PMR implementations can override this. If they don't, a "default" ++ * handler uses kernel virtual mappings. If the kernel can't provide ++ * a kernel virtual mapping, this function fails. ++ */ ++ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL || ++ psPMR->psFuncTab->pfnWriteBytes != NULL); ++ ++ while (uiBytesCopied != uiBufSz) ++ { ++ IMG_UINT32 ui32Remain; ++ size_t uiBytesToCopy; ++ size_t uiWrite; ++ IMG_BOOL bValid; ++ ++ _PMRLogicalOffsetToPhysicalOffset(psPMR, ++ 0, ++ 1, ++ uiLogicalOffset, ++ &uiPhysicalOffset, ++ &ui32Remain, ++ &bValid); ++ ++ /* Copy till either then end of the chunk or end of the buffer ++ */ ++ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain); ++ ++ if (bValid) ++ { ++ /* Write the data to the PMR */ ++ eError = _PMR_WriteBytesPhysical(psPMR, ++ uiPhysicalOffset, ++ &pcBuffer[uiBytesCopied], ++ uiBytesToCopy, ++ &uiWrite); ++ if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")", ++ __func__, ++ PVRSRVGetErrorString(eError), ++ uiWrite, ++ uiBytesToCopy)); ++ /* Bail out as soon as we hit an error */ ++ break; ++ } ++ } ++ else ++ { ++ /* Ignore writes to invalid pages */ ++ uiWrite = uiBytesToCopy; ++ } ++ uiLogicalOffset += uiWrite; ++ uiBytesCopied += uiWrite; ++ } ++ ++ *puiNumBytes = uiBytesCopied; ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) ++{ ++ if (psPMR->psFuncTab->pfnMMap) ++ { ++ return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData); ++ } ++ ++ return OSMMapPMRGeneric(psPMR, pOSMMapData); ++} ++ ++void ++PMRRefPMR(PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ _Ref(psPMR); ++} ++ ++PVRSRV_ERROR ++PMRUnrefPMR(PMR *psPMR) ++{ ++ _UnrefAndMaybeDestroy(psPMR); ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRUnrefUnlockPMR(PMR *psPMR) ++{ ++ PMRUnlockSysPhysAddresses(psPMR); ++ ++ PMRUnrefPMR(psPMR); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_DEVICE_NODE * ++PMR_DeviceNode(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ return PhysHeapDeviceNode(psPMR->psPhysHeap); ++} ++ ++PMR_FLAGS_T ++PMR_Flags(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ return psPMR->uiFlags; ++} ++ ++IMG_BOOL ++PMR_IsSparse(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ return _PMRIsSparse(psPMR); ++} ++ ++IMG_BOOL ++PMR_IsUnpinned(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ return psPMR->bIsUnpinned; ++} ++ ++/* Function that alters the mutability property ++ * of the PMR ++ * Setting it to TRUE makes sure the PMR memory layout ++ * can't be changed through future calls */ ++void ++PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ psPMR->bNoLayoutChange = bFlag; ++} ++ ++IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ return psPMR->bNoLayoutChange; ++} ++ ++void ++PMR_LogicalSize(const PMR *psPMR, ++ IMG_DEVMEM_SIZE_T *puiLogicalSize) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ *puiLogicalSize = psPMR->uiLogicalSize; ++} ++ ++PVRSRV_ERROR ++PMR_PhysicalSize(const PMR *psPMR, ++ IMG_DEVMEM_SIZE_T *puiPhysicalSize) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ ++ /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */ ++ if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned) ++ { ++ if (psPMR->bSparseAlloc) ++ { ++ *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks; ++ } ++ else ++ { ++ *puiPhysicalSize = psPMR->uiLogicalSize; ++ } ++ } ++ else ++ { ++ *puiPhysicalSize = 0; ++ } ++ return PVRSRV_OK; ++} ++ ++PHYS_HEAP * ++PMR_PhysHeap(const PMR *psPMR) ++{ ++ return psPMR->psPhysHeap; ++} ++ ++PVRSRV_ERROR ++PMR_IsOffsetValid(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_BOOL *pbValid) ++{ ++ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; ++ IMG_UINT32 *pui32BytesRemain = aui32BytesRemain; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_ASSERT(psPMR != NULL); ++ PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset); ++ ++ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); ++ PVR_GOTO_IF_NOMEM(puiPhysicalOffset, eError, e0); ++ ++ pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32)); ++ PVR_GOTO_IF_NOMEM(pui32BytesRemain, eError, e0); ++ } ++ ++ _PMRLogicalOffsetToPhysicalOffset(psPMR, ++ ui32Log2PageSize, ++ ui32NumOfPages, ++ uiLogicalOffset, ++ puiPhysicalOffset, ++ pui32BytesRemain, ++ pbValid); ++ ++e0: ++ if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL) ++ { ++ OSFreeMem(puiPhysicalOffset); ++ } ++ ++ if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL) ++ { ++ OSFreeMem(pui32BytesRemain); ++ } ++ ++ return eError; ++} ++ ++PMR_MAPPING_TABLE * ++PMR_GetMappingTable(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ return psPMR->psMappingTable; ++ ++} ++ ++IMG_UINT32 ++PMR_GetLog2Contiguity(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ return psPMR->uiLog2ContiguityGuarantee; ++} ++ ++const IMG_CHAR * ++PMR_GetAnnotation(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ return psPMR->szAnnotation; ++} ++ ++PMR_IMPL_TYPE ++PMR_GetType(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ return psPMR->eFlavour; ++} ++ ++IMG_INT32 ++PMR_GetRefCount(const PMR *psPMR) ++{ ++ PVR_ASSERT(psPMR != NULL); ++ return OSAtomicRead(&psPMR->iRefCount); ++} ++ ++/* must have called PMRLockSysPhysAddresses() before calling this! */ ++PVRSRV_ERROR ++PMR_DevPhysAddr(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEV_PHYADDR *psDevAddrPtr, ++ IMG_BOOL *pbValid) ++{ ++ IMG_UINT32 ui32Remain; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset; ++ ++ PVR_ASSERT(psPMR != NULL); ++ PVR_ASSERT(ui32NumOfPages > 0); ++ PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL); ++ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1)); ++#endif ++ ++ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T)); ++ PVR_RETURN_IF_NOMEM(puiPhysicalOffset); ++ } ++ ++ _PMRLogicalOffsetToPhysicalOffset(psPMR, ++ ui32Log2PageSize, ++ ui32NumOfPages, ++ uiLogicalOffset, ++ puiPhysicalOffset, ++ &ui32Remain, ++ pbValid); ++ if (*pbValid || _PMRIsSparse(psPMR)) ++ { ++ /* Sparse PMR may not always have the first page valid */ ++ eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData, ++ ui32Log2PageSize, ++ ui32NumOfPages, ++ puiPhysicalOffset, ++ pbValid, ++ psDevAddrPtr); ++ PVR_GOTO_IF_ERROR(eError, FreeOffsetArray); ++ ++#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) ++ /* Currently excluded from the default build because of performance ++ * concerns. ++ * We do not need this part in all systems because the GPU has the same ++ * address view of system RAM as the CPU. ++ * Alternatively this could be implemented as part of the PMR-factories ++ * directly */ ++ if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA || ++ PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA) ++ { ++ IMG_UINT32 i; ++ IMG_DEV_PHYADDR sDevPAddrCorrected; ++ ++ /* Copy the translated addresses to the correct array */ ++ for (i = 0; i < ui32NumOfPages; i++) ++ { ++ PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap, ++ 1, ++ &sDevPAddrCorrected, ++ (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]); ++ psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr; ++ } ++ } ++#endif ++ } ++ ++FreeOffsetArray: ++ if (puiPhysicalOffset != auiPhysicalOffset) ++ { ++ OSFreeMem(puiPhysicalOffset); ++ } ++ ++ return eError; ++} ++ ++/* must have called PMRLockSysPhysAddresses() before calling this! */ ++PVRSRV_ERROR ++PMR_CpuPhysAddr(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_CPU_PHYADDR *psCpuAddrPtr, ++ IMG_BOOL *pbValid) ++{ ++ IMG_UINT32 idx; ++ PVRSRV_ERROR eError; ++ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr; ++ ++ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR)); ++ PVR_GOTO_IF_NOMEM(psDevPAddr, eError, e0); ++ } ++ ++ eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages, ++ uiLogicalOffset, psDevPAddr, pbValid); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ if (_PMRIsSparse(psPMR)) ++ { ++ /* Loop over each page. ++ * If Dev addr valid, populate the CPU addr from the Dev addr ++ */ ++ for (idx = 0; idx < ui32NumOfPages; idx++) ++ { ++ if (pbValid[idx]) ++ { ++ PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, 1, &psCpuAddrPtr[idx], &psDevPAddr[idx]); ++ } ++ } ++ } ++ else ++ { ++ /* In this case all addrs will be valid, so we can block translate */ ++ PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr); ++ } ++ ++ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ OSFreeMem(psDevPAddr); ++ } ++ ++ return PVRSRV_OK; ++e1: ++ if (psDevPAddr != asDevPAddr) ++ { ++ OSFreeMem(psDevPAddr); ++ } ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 uiSparseFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (IMG_TRUE == psPMR->bNoLayoutChange) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This PMR layout cannot be changed", ++ __func__)); ++ return PVRSRV_ERROR_PMR_NOT_PERMITTED; ++ } ++ ++ if (NULL == psPMR->psFuncTab->pfnChangeSparseMem) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This type of sparse PMR cannot be changed.", ++ __func__)); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ ++ eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData, ++ psPMR, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices, ++ uiSparseFlags); ++ if (eError != PVRSRV_OK) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ if (eError == PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) ++ { ++ PVRSRVStatsUpdateOOMStats(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, ++ OSGetCurrentClientProcessIDKM()); ++ } ++#endif ++ goto e0; ++ } ++ ++#if defined(PDUMP) ++ { ++ IMG_BOOL bInitialise = IMG_FALSE; ++ IMG_UINT32 ui32InitValue = 0; ++ ++ if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR))) ++ { ++ bInitialise = IMG_TRUE; ++ } ++ else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR))) ++ { ++ ui32InitValue = 0xDEADBEEF; ++ bInitialise = IMG_TRUE; ++ } ++ ++ PDumpPMRChangeSparsePMR(psPMR, ++ 1 << psPMR->uiLog2ContiguityGuarantee, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices, ++ bInitialise, ++ ui32InitValue, ++ &psPMR->hPDumpAllocHandle); ++ } ++ ++#endif ++ ++e0: ++ return eError; ++} ++ ++ ++PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices) ++{ ++ PVRSRV_ERROR eError; ++ ++ if ((NULL == psPMR->psFuncTab) || ++ (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This type of sparse PMR cannot be changed.", ++ __func__)); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ ++ if (IMG_TRUE == psPMR->bNoLayoutChange) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: This PMR layout cannot be changed", ++ __func__)); ++ return PVRSRV_ERROR_PMR_NOT_PERMITTED; ++ } ++ ++ eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData, ++ psPMR, ++ sCpuVAddrBase, ++ ui32AllocPageCount, ++ pai32AllocIndices, ++ ui32FreePageCount, ++ pai32FreeIndices); ++ ++ return eError; ++} ++ ++ ++#if defined(PDUMP) ++ ++static PVRSRV_ERROR ++_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiPhysicalOffset, ++ IMG_UINT32 ui32MemspaceNameLen, ++ IMG_CHAR *pszMemspaceName, ++ IMG_UINT32 ui32SymbolicAddrLen, ++ IMG_CHAR *pszSymbolicAddr, ++ IMG_DEVMEM_OFFSET_T *puiNewOffset, ++ IMG_DEVMEM_OFFSET_T *puiNextSymName) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = PhysHeapDeviceNode(psPMR->psPhysHeap); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ if (PVRSRV_CHECK_PHYS_HEAP(FW_CODE, psPMR->uiFlags) || ++ PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, psPMR->uiFlags) || ++ PVRSRV_CHECK_PHYS_HEAP(GPU_SECURE, psPMR->uiFlags)) ++ { ++ OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, ++ psPMR->pszPDumpDefaultMemspaceName); ++ } ++ else ++#endif ++ if (DevmemCPUCacheCoherency(psDevNode, psPMR->uiFlags) || ++ DevmemDeviceCacheCoherency(psDevNode, psPMR->uiFlags)) ++ { ++ OSSNPrintf(pszMemspaceName, ++ ui32MemspaceNameLen, ++ PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC, ++ psPMR->pszPDumpDefaultMemspaceName); ++ } ++ else ++ { ++ OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC, ++ psPMR->pszPDumpDefaultMemspaceName); ++ } ++ ++ OSSNPrintf(pszSymbolicAddr, ++ ui32SymbolicAddrLen, ++ PMR_SYMBOLICADDR_FMTSPEC, ++ PMR_DEFAULT_PREFIX, ++ psPMR->uiSerialNum, ++ uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR), ++ psPMR->szAnnotation); ++ ++ if (pszSymbolicAddr) ++ { ++ PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr)); ++ } ++ ++ ++ *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1); ++ *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1) ++ << PMR_GetLog2Contiguity(psPMR)); ++ ++ return eError; ++} ++ ++ ++PVRSRV_ERROR ++PMR_PDumpSymbolicAddr(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32MemspaceNameLen, ++ IMG_CHAR *pszMemspaceName, ++ IMG_UINT32 ui32SymbolicAddrLen, ++ IMG_CHAR *pszSymbolicAddr, ++ IMG_DEVMEM_OFFSET_T *puiNewOffset, ++ IMG_DEVMEM_OFFSET_T *puiNextSymName ++) ++{ ++ IMG_DEVMEM_OFFSET_T uiPhysicalOffset; ++ IMG_UINT32 ui32Remain; ++ IMG_BOOL bValid; ++ ++ PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize); ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ _PMRLogicalOffsetToPhysicalOffset(psPMR, ++ 0, ++ 1, ++ uiLogicalOffset, ++ &uiPhysicalOffset, ++ &ui32Remain, ++ &bValid); ++ ++ if (!bValid) ++ { ++ /* For sparse allocations, for a given logical address, there ++ * may not be a physical memory backing, the virtual range can ++ * still be valid. ++ */ ++ uiPhysicalOffset = uiLogicalOffset; ++ } ++ ++ return _PMR_PDumpSymbolicAddrPhysical(psPMR, ++ uiPhysicalOffset, ++ ui32MemspaceNameLen, ++ pszMemspaceName, ++ ui32SymbolicAddrLen, ++ pszSymbolicAddr, ++ puiNewOffset, ++ puiNextSymName); ++} ++ ++/*! ++ * @brief Writes a WRW command to the script2 buffer, representing a ++ * dword write to a physical allocation. Size is always ++ * sizeof(IMG_UINT32). ++ * @param psPMR - PMR object representing allocation ++ * @param uiLogicalOffset - offset ++ * @param ui32Value - value to write ++ * @param uiPDumpFlags - pdump flags ++ * @return PVRSRV_ERROR ++ */ ++PVRSRV_ERROR ++PMRPDumpLoadMemValue32(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize); ++ /* Especially make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) ++ <= uiPMRPageSize)); ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Get the symbolic address of the PMR */ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpSymbolicOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Write the WRW script command */ ++ eError = PDumpPMRWRW32(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpSymbolicOffset, ++ ui32Value, ++ uiPDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ * @brief Writes a RDW followed by a WRW command to the pdump script to perform ++ * an effective copy from memory to memory. Memory copied is of size ++ * sizeof(IMG_UINT32) ++ * ++ * @param psDstPMR - PMR object representing allocation of destination ++ * @param uiDstLogicalOffset - destination offset ++ * @param psSrcPMR - PMR object representing allocation of source ++ * @param uiSrcLogicalOffset - source offset ++ * @param pszTmpVar - pdump temporary variable used during the copy ++ * @param uiPDumpFlags - pdump flags ++ * @return PVRSRV_ERROR ++ */ ++PVRSRV_ERROR ++PMRPDumpCopyMem32(PMR *psDstPMR, ++ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, ++ PMR *psSrcPMR, ++ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, ++ const IMG_CHAR *pszTmpVar, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; ++ const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; ++ ++ PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); ++ /* Especially make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) ++ <= uiSrcPMRPageSize)); ++ ++ PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); ++ /* Especially make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) ++ <= uiDstPMRPageSize)); ++ ++ eError = PMRLockSysPhysAddresses(psSrcPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Get the symbolic address of the source PMR */ ++ eError = PMR_PDumpSymbolicAddr(psSrcPMR, ++ uiSrcLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpSymbolicOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Issue PDump read command */ ++ eError = PDumpPMRRDW32MemToInternalVar(PMR_DeviceNode(psSrcPMR), ++ pszTmpVar, ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpSymbolicOffset, ++ uiPDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PMRUnlockSysPhysAddresses(psSrcPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ ++ eError = PMRLockSysPhysAddresses(psDstPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ /* Get the symbolic address of the destination PMR */ ++ eError = PMR_PDumpSymbolicAddr(psDstPMR, ++ uiDstLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpSymbolicOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ /* Write the WRW script command */ ++ eError = PDumpPMRWRW32InternalVarToMem(PMR_DeviceNode(psDstPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpSymbolicOffset, ++ pszTmpVar, ++ uiPDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ eError = PMRUnlockSysPhysAddresses(psDstPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ * @brief Writes a WRW64 command to the script2 buffer, representing a ++ * dword write to a physical allocation. Size is always ++ * sizeof(IMG_UINT64). ++ * @param psPMR - PMR object representing allocation ++ * @param uiLogicalOffset - offset ++ * @param ui64Value - value to write ++ * @param uiPDumpFlags - pdump flags ++ * @return PVRSRV_ERROR ++ */ ++PVRSRV_ERROR ++PMRPDumpLoadMemValue64(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT64 ui64Value, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize); ++ /* Especially make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value)) ++ <= uiPMRPageSize)); ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Get the symbolic address of the PMR */ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpSymbolicOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Write the WRW script command */ ++ eError = PDumpPMRWRW64(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpSymbolicOffset, ++ ui64Value, ++ uiPDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to ++ * perform an effective copy from memory to memory. Memory copied is of ++ * size sizeof(IMG_UINT32) ++ * ++ * @param psDstPMR - PMR object representing allocation of destination ++ * @param uiDstLogicalOffset - destination offset ++ * @param psSrcPMR - PMR object representing allocation of source ++ * @param uiSrcLogicalOffset - source offset ++ * @param pszTmpVar - pdump temporary variable used during the copy ++ * @param uiPDumpFlags - pdump flags ++ * @return PVRSRV_ERROR ++ */ ++PVRSRV_ERROR ++PMRPDumpCopyMem64(PMR *psDstPMR, ++ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, ++ PMR *psSrcPMR, ++ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, ++ const IMG_CHAR *pszTmpVar, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee; ++ const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee; ++ ++ PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize); ++ /* Especially make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32)) ++ <= uiSrcPMRPageSize)); ++ ++ PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize); ++ /* Especially make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32)) ++ <= uiDstPMRPageSize)); ++ ++ eError = PMRLockSysPhysAddresses(psSrcPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Get the symbolic address of the source PMR */ ++ eError = PMR_PDumpSymbolicAddr(psSrcPMR, ++ uiSrcLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpSymbolicOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Issue PDump read command */ ++ eError = PDumpPMRRDW64MemToInternalVar(PMR_DeviceNode(psSrcPMR), ++ pszTmpVar, ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpSymbolicOffset, ++ uiPDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PMRUnlockSysPhysAddresses(psSrcPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ ++ eError = PMRLockSysPhysAddresses(psDstPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ /* Get the symbolic address of the destination PMR */ ++ eError = PMR_PDumpSymbolicAddr(psDstPMR, ++ uiDstLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpSymbolicOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ /* Write the WRW script command */ ++ eError = PDumpPMRWRW64InternalVarToMem(PMR_DeviceNode(psDstPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpSymbolicOffset, ++ pszTmpVar, ++ uiPDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ ++ eError = PMRUnlockSysPhysAddresses(psDstPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ * @brief PDumps the contents of the given allocation. ++ * If bZero is IMG_TRUE then the zero page in the parameter stream is used ++ * as the source of data, rather than the allocation's actual backing. ++ * @param psPMR - PMR object representing allocation ++ * @param uiLogicalOffset - Offset to write at ++ * @param uiSize - Number of bytes to write ++ * @param uiPDumpFlags - PDump flags ++ * @param bZero - Use the PDump zero page as the source ++ * @return PVRSRV_ERROR ++ */ ++PVRSRV_ERROR ++PMRPDumpLoadMem(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PDUMP_FLAGS_T uiPDumpFlags, ++ IMG_BOOL bZero) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiOutOffset; ++ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName = 0; ++ const IMG_CHAR *pszParamStreamFileName; ++ PDUMP_FILEOFFSET_T uiParamStreamFileOffset; ++ ++ /* required when !bZero */ ++#define PMR_MAX_PDUMP_BUFSZ (1<<21) ++ IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME]; ++ IMG_UINT8 *pcBuffer = NULL; ++ size_t uiBufSz; ++ IMG_BOOL bValid; ++ IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize; ++ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(psDevNode)) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); ++ ++ /* Check if pdump client is connected */ ++ if (!PDumpCheckFlagsWrite(psDevNode, ++ PDUMP_FLAGS_CONTINUOUS)) ++ { ++ /* Dumping of memory in Pdump buffer will be rejected for no client connected case. ++ * So return early and save reading of data from PMR. */ ++ return PVRSRV_OK; ++ } ++ ++ /* Get the correct PDump stream file name */ ++ if (bZero) ++ { ++ PDumpCommentWithFlags(psDevNode, ++ uiPDumpFlags, ++ "Zeroing allocation (" IMG_DEVMEM_SIZE_FMTSPEC " bytes)", ++ uiSize); ++ ++ /* get the zero page information. it is constant for this function */ ++ PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset, ++ &uiBufSz, ++ &pszParamStreamFileName); ++ } ++ else ++ { ++ ++ uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR); ++ PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ); ++ ++ pcBuffer = OSAllocMem(uiBufSz); ++ ++ PVR_LOG_RETURN_IF_NOMEM(pcBuffer, "OSAllocMem"); ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ pszParamStreamFileName = aszParamStreamFilename; ++ } ++ ++ /* Loop over all touched symbolic addresses of the PMR and ++ * emit LDBs to load the contents. */ ++ while (uiCurrentOffset < (uiLogicalOffset + uiSize)) ++ { ++ /* Get the correct symbolic name for the current offset */ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiCurrentOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiOutOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz); ++ ++ PMR_IsOffsetValid(psPMR, ++ 0, ++ 1, ++ uiCurrentOffset, ++ &bValid); ++ ++ /* Either just LDB the zeros or read from the PMR and store that ++ * in the pdump stream */ ++ if (bValid) ++ { ++ size_t uiNumBytes; ++ ++ if (bZero) ++ { ++ uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset); ++ } ++ else ++ { ++ IMG_DEVMEM_OFFSET_T uiReadOffset; ++ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? ++ uiLogicalOffset + uiSize - uiCurrentOffset : ++ uiNextSymName - uiCurrentOffset); ++ ++ eError = PMR_ReadBytes(psPMR, ++ uiCurrentOffset, ++ pcBuffer, ++ uiReadOffset, ++ &uiNumBytes); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PDumpWriteParameterBlob(psDevNode, ++ pcBuffer, ++ uiNumBytes, ++ uiPDumpFlags, ++ &aszParamStreamFilename[0], ++ sizeof(aszParamStreamFilename), ++ &uiParamStreamFileOffset); ++ if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED) ++ { ++ /* Write to parameter file prevented under the flags and ++ * current state of the driver so skip further writes. ++ */ ++ eError = PVRSRV_OK; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PDUMP_ERROR(psDevNode, ++ eError, "Failed to write PMR memory to parameter file"); ++ } ++ } ++ ++ /* Emit the LDB command to the current symbolic address */ ++ eError = PDumpPMRLDB(psDevNode, ++ aszMemspaceName, ++ aszSymbolicName, ++ uiOutOffset, ++ uiNumBytes, ++ pszParamStreamFileName, ++ uiParamStreamFileOffset, ++ uiPDumpFlags); ++ uiSizeRemain = uiSizeRemain - uiNumBytes; ++ } ++ uiCurrentOffset = uiNextSymName; ++ } ++ ++ if (!bZero) ++ { ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ OSFreeMem(pcBuffer); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++ ++ ++PVRSRV_ERROR ++PMRPDumpSaveToFile(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiArraySize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiOutOffset; ++ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName = 0; ++ IMG_UINT32 uiCurrentFileOffset = uiFileOffset; ++ ++ PVR_UNREFERENCED_PARAMETER(uiArraySize); ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize); ++ ++ while (uiCurrentOffset < (uiLogicalOffset + uiSize)) ++ { ++ IMG_DEVMEM_OFFSET_T uiReadOffset; ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiCurrentOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiOutOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize); ++ ++ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ? ++ uiLogicalOffset + uiSize - uiCurrentOffset : ++ uiNextSymName - uiCurrentOffset); ++ ++ eError = PDumpPMRSAB(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiOutOffset, ++ uiReadOffset, ++ pszFilename, ++ uiCurrentFileOffset); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ uiCurrentFileOffset += uiNextSymName - uiCurrentOffset; ++ uiCurrentOffset = uiNextSymName; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRPDumpPol32(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) ++ <= uiPMRPageSize)); ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpOffset, ++ &uiNextSymName); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++#define _MEMPOLL_DELAY (1000) ++#define _MEMPOLL_COUNT (2000000000 / _MEMPOLL_DELAY) ++ ++ eError = PDumpPMRPOL(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpOffset, ++ ui32Value, ++ ui32Mask, ++ eOperator, ++ _MEMPOLL_COUNT, ++ _MEMPOLL_DELAY, ++ uiPDumpFlags); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRPDumpCheck32(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee; ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Make sure to not cross a block boundary */ ++ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value)) ++ < uiPMRPageSize)); ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiLogicalOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpOffset, ++ &uiNextSymName); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ eError = PDumpPMRPOL(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpOffset, ++ ui32Value, ++ ui32Mask, ++ eOperator, ++ 1, ++ 1, ++ uiPDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRPDumpCBP(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiReadOffset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPDumpOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ ++ /* Confirm that the device node's ui32InternalID matches the bound ++ * PDump device stored* in PVRSRV_DATA. ++ */ ++ if (!PDumpIsDevicePermitted(PMR_DeviceNode(psPMR))) ++ { ++ return PVRSRV_OK; ++ } ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiReadOffset, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiPDumpOffset, ++ &uiNextSymName); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ eError = PDumpPMRCBP(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiPDumpOffset, ++ uiWriteOffset, ++ uiPacketSize, ++ uiBufferSize); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e0: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static void ++PDumpPMRChangeSparsePMR(PMR *psPMR, ++ IMG_UINT32 uiBlockSize, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_BOOL bInitialise, ++ IMG_UINT32 ui32InitValue, ++ IMG_HANDLE *phPDumpAllocInfoOut) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle; ++ ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ IMG_UINT32 i, uiIndex; ++ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); ++ ++ /* Remove pages from the PMR */ ++ for (i = 0; i < ui32FreePageCount; i++) ++ { ++ uiIndex = pai32FreeIndices[i]; ++ ++ eError = PDumpFree(psDevNode, ++ phPDumpAllocInfo[uiIndex]); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ phPDumpAllocInfo[uiIndex] = NULL; ++ } ++ ++ /* Add new pages to the PMR */ ++ for (i = 0; i < ui32AllocPageCount; i++) ++ { ++ uiIndex = pai32AllocIndices[i]; ++ ++ PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL); ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiIndex * uiBlockSize, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PDumpMalloc(psDevNode, ++ aszMemspaceName, ++ aszSymbolicName, ++ uiBlockSize, ++ uiBlockSize, ++ bInitialise, ++ ui32InitValue, ++ &phPDumpAllocInfo[uiIndex], ++ PDUMP_NONE); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ ++ /* (IMG_HANDLE) <- (IMG_HANDLE*) */ ++ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; ++} ++ ++static void ++PDumpPMRFreePMR(PMR *psPMR, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiBlockSize, ++ IMG_UINT32 uiLog2Contiguity, ++ IMG_HANDLE hPDumpAllocationInfoHandle) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i; ++ ++ /* (IMG_HANDLE*) <- (IMG_HANDLE) */ ++ IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle; ++ ++ for (i = 0; i < psPMR->uiNumPDumpBlocks; i++) ++ { ++ if (ahPDumpAllocHandleArray[i] != NULL) ++ { ++ eError = PDumpFree(PMR_DeviceNode(psPMR), ++ ahPDumpAllocHandleArray[i]); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ahPDumpAllocHandleArray[i] = NULL; ++ } ++ } ++ ++ OSFreeMem(ahPDumpAllocHandleArray); ++} ++ ++static void ++PDumpPMRMallocPMR(PMR *psPMR, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiBlockSize, ++ IMG_UINT32 ui32ChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *puiMappingTable, ++ IMG_UINT32 uiLog2Contiguity, ++ IMG_BOOL bInitialise, ++ IMG_UINT32 ui32InitValue, ++ IMG_HANDLE *phPDumpAllocInfoOut, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE *phPDumpAllocInfo; ++ ++ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++ IMG_UINT32 uiNumPhysBlocks; ++ IMG_UINT32 uiNumVirtBlocks; ++ IMG_UINT32 i, uiIndex; ++ ++ if (PMR_IsSparse(psPMR)) ++ { ++ uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity; ++ /* Make sure we did not cut off anything */ ++ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks)); ++ } ++ else ++ { ++ uiNumPhysBlocks = uiSize >> uiLog2Contiguity; ++ /* Make sure we did not cut off anything */ ++ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize); ++ } ++ ++ uiNumVirtBlocks = uiSize >> uiLog2Contiguity; ++ PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize); ++ ++ psPMR->uiNumPDumpBlocks = uiNumVirtBlocks; ++ ++ phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE)); ++ ++ ++ for (i = 0; i < uiNumPhysBlocks; i++) ++ { ++ uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i; ++ ++ eError = PMR_PDumpSymbolicAddr(psPMR, ++ uiIndex * uiBlockSize, ++ sizeof(aszMemspaceName), ++ &aszMemspaceName[0], ++ sizeof(aszSymbolicName), ++ &aszSymbolicName[0], ++ &uiOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PDumpMalloc(PMR_DeviceNode(psPMR), ++ aszMemspaceName, ++ aszSymbolicName, ++ uiBlockSize, ++ uiBlockSize, ++ bInitialise, ++ ui32InitValue, ++ &phPDumpAllocInfo[uiIndex], ++ ui32PDumpFlags); ++ PVR_LOG_RETURN_VOID_IF_FALSE((eError != PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE), ++ "PDumpPMRMalloc PDump capture bound to other device"); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ ++ /* (IMG_HANDLE) <- (IMG_HANDLE*) */ ++ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo; ++ ++} ++#endif /* PDUMP */ ++ ++ ++void *PMRGetPrivateData(const PMR *psPMR, ++ const PMR_IMPL_FUNCTAB *psFuncTab) ++{ ++ return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL; ++} ++ ++#define PMR_PM_WORD_SIZE 4 ++ ++PVRSRV_ERROR ++PMRWritePMPageList(/* Target PMR, offset, and length */ ++ PMR *psPageListPMR, ++ IMG_DEVMEM_OFFSET_T uiTableOffset, ++ IMG_DEVMEM_SIZE_T uiTableLength, ++ /* Referenced PMR, and "page" granularity */ ++ PMR *psReferencePMR, ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, ++ PMR_PAGELIST **ppsPageList) ++{ ++ PVRSRV_ERROR eError; ++ IMG_DEVMEM_SIZE_T uiWordSize; ++ IMG_UINT32 uiNumPages; ++ IMG_UINT32 uiPageIndex; ++ PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags; ++ PMR_PAGELIST *psPageList; ++#if defined(PDUMP) ++ IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset; ++ IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH]; ++ IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiPagePDumpOffset; ++ IMG_DEVMEM_OFFSET_T uiNextSymName; ++#endif ++#if !defined(NO_HARDWARE) ++ IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee; ++ IMG_UINT64 uiPageListPMRPage = 0; ++ IMG_UINT64 uiPrevPageListPMRPage = 0; ++ IMG_HANDLE hPrivData = NULL; ++ void *pvKernAddr = NULL; ++ IMG_UINT32 *pui32DataPtr = NULL; ++ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_DEV_PHYADDR *pasDevAddrPtr; ++ IMG_BOOL *pbPageIsValid; ++#endif ++ ++ uiWordSize = PMR_PM_WORD_SIZE; ++ ++ /* check we're being asked to write the same number of 4-byte units as there are pages */ ++ uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize); ++ ++ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize) ++ { ++ /* Strictly speaking, it's possible to provoke this error in two ways: ++ (i) if it's not a whole multiple of the page size; or ++ (ii) if there are more than 4 billion pages. ++ The latter is unlikely. :) but the check is required in order to justify the cast. ++ */ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); ++ } ++ uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages; ++ if (uiNumPages * uiWordSize != uiTableLength) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, return_error); ++ } ++ ++ /* Check we're not being asked to write off the end of the PMR */ ++ PVR_GOTO_IF_INVALID_PARAM(uiTableOffset + uiTableLength <= psPageListPMR->uiLogicalSize, eError, return_error); ++ ++ /* the PMR into which we are writing must not be user CPU mappable: */ ++ if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Masked flags = 0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC, ++ (PMR_FLAGS_T)(uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)))); ++ PVR_DPF((PVR_DBG_ERROR, ++ "Page list PMR allows CPU mapping (0x%" PVRSRV_MEMALLOCFLAGS_FMTSPEC ")", ++ uiFlags)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS, return_error); ++ } ++ ++ if (_PMRIsSparse(psPageListPMR)) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psPageListPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); ++ } ++ ++ if (_PMRIsSparse(psReferencePMR)) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psReferencePMR", eError, PVRSRV_ERROR_INVALID_PARAMS, return_error); ++ } ++ ++ psPageList = OSAllocMem(sizeof(PMR_PAGELIST)); ++ PVR_LOG_GOTO_IF_NOMEM(psPageList, eError, return_error); ++ ++ psPageList->psReferencePMR = psReferencePMR; ++ ++ /* Need to lock down the physical addresses of the reference PMR */ ++ /* N.B. This also checks that the requested "contiguity" is achievable */ ++ eError = PMRLockSysPhysAddresses(psReferencePMR); ++ PVR_GOTO_IF_ERROR(eError, free_page_list); ++ ++#if !defined(NO_HARDWARE) ++ if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR)); ++ PVR_LOG_GOTO_IF_NOMEM(pasDevAddrPtr, eError, unlock_phys_addrs); ++ ++ pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL)); ++ if (pbPageIsValid == NULL) ++ { ++ /* Clean-up before exit */ ++ OSFreeMem(pasDevAddrPtr); ++ ++ PVR_LOG_GOTO_WITH_ERROR("pbPageIsValid", eError, PVRSRV_ERROR_OUT_OF_MEMORY, free_devaddr_array); ++ } ++ } ++ else ++ { ++ pasDevAddrPtr = asDevPAddr; ++ pbPageIsValid = abValid; ++ } ++ ++ eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0, ++ pasDevAddrPtr, pbPageIsValid); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMR_DevPhysAddr", free_valid_array); ++#endif ++ ++ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) ++ { ++ IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex); ++ ++#if defined(PDUMP) ++ eError = PMR_PDumpSymbolicAddr(psPageListPMR, ++ uiPMROffset, ++ sizeof(aszTableEntryMemspaceName), ++ &aszTableEntryMemspaceName[0], ++ sizeof(aszTableEntrySymbolicName), ++ &aszTableEntrySymbolicName[0], ++ &uiTableEntryPDumpOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PMR_PDumpSymbolicAddr(psReferencePMR, ++ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, ++ sizeof(aszPageMemspaceName), ++ &aszPageMemspaceName[0], ++ sizeof(aszPageSymbolicName), ++ &aszPageSymbolicName[0], ++ &uiPagePDumpOffset, ++ &uiNextSymName); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ eError = PDumpWriteShiftedMaskedValue(PMR_DeviceNode(psReferencePMR), ++ /* destination */ ++ aszTableEntryMemspaceName, ++ aszTableEntrySymbolicName, ++ uiTableEntryPDumpOffset, ++ /* source */ ++ aszPageMemspaceName, ++ aszPageSymbolicName, ++ uiPagePDumpOffset, ++ /* shift right */ ++ uiLog2PageSize, ++ /* shift left */ ++ 0, ++ /* mask */ ++ 0xffffffff, ++ /* word size */ ++ uiWordSize, ++ /* flags */ ++ PDUMP_FLAGS_CONTINUOUS); ++ PVR_ASSERT(eError == PVRSRV_OK); ++#else ++ PVR_UNREFERENCED_PARAMETER(uiPMROffset); ++#endif ++ ++#if !defined(NO_HARDWARE) ++ ++ /* ++ We check for sparse PMR's at function entry, but as we can, ++ check that every page is valid ++ */ ++ PVR_ASSERT(pbPageIsValid[uiPageIndex]); ++ PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0); ++ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); ++ ++ uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee; ++ ++ if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage)) ++ { ++ size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1)); ++ size_t uiMappedSize; ++ ++ /* If we already had a page list mapped, we need to unmap it... */ ++ if (pui32DataPtr != NULL) ++ { ++ PMRReleaseKernelMappingData(psPageListPMR, hPrivData); ++ } ++ ++ eError = PMRAcquireKernelMappingData(psPageListPMR, ++ uiMappingOffset, ++ uiPageListPageSize, ++ &pvKernAddr, ++ &uiMappedSize, ++ &hPrivData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)", ++ uiPageListPMRPage, eError)); ++ goto free_valid_array; ++ } ++ ++ uiPrevPageListPMRPage = uiPageListPMRPage; ++ PVR_ASSERT(uiMappedSize >= uiPageListPageSize); ++ PVR_ASSERT(pvKernAddr != NULL); ++ ++ pui32DataPtr = IMG_OFFSET_ADDR(pvKernAddr, (uiPMROffset & (uiPageListPageSize - 1))); ++ } ++ ++ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0); ++ ++ /* Write the physical page index into the page list PMR */ ++ *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize); ++ ++ /* Last page so unmap */ ++ if (uiPageIndex == (uiNumPages - 1)) ++ { ++ PMRReleaseKernelMappingData(psPageListPMR, hPrivData); ++ } ++#endif ++ } ++ ++ /* if this memory is allocated as write-combine we must flush write ++ * buffers */ ++ if (PVRSRV_CHECK_CPU_WRITE_COMBINE(psPageListPMR->uiFlags)) ++ { ++ OSWriteMemoryBarrier(NULL); ++ } ++ ++#if !defined(NO_HARDWARE) ++ if (pasDevAddrPtr != asDevPAddr) ++ { ++ OSFreeMem(pbPageIsValid); ++ OSFreeMem(pasDevAddrPtr); ++ } ++#endif ++ *ppsPageList = psPageList; ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++#if !defined(NO_HARDWARE) ++ ++free_valid_array: ++ if (pbPageIsValid != abValid) ++ { ++ OSFreeMem(pbPageIsValid); ++ } ++ ++free_devaddr_array: ++ if (pasDevAddrPtr != asDevPAddr) ++ { ++ OSFreeMem(pasDevAddrPtr); ++ } ++ ++unlock_phys_addrs: ++ PMRUnlockSysPhysAddresses(psReferencePMR); ++#endif ++ ++free_page_list: ++ OSFreeMem(psPageList); ++ ++return_error: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR ++PMRUnwritePMPageList(PMR_PAGELIST *psPageList) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ OSFreeMem(psPageList); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PMRZeroingPMR(PMR *psPMR, ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) ++{ ++ IMG_UINT32 uiNumPages; ++ IMG_UINT32 uiPageIndex; ++ IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize; ++ IMG_HANDLE hPrivData = NULL; ++ void *pvKernAddr = NULL; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ size_t uiMappedSize; ++ ++ PVR_ASSERT(psPMR); ++ ++ /* Calculate number of pages in this PMR */ ++ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); ++ ++ /* Verify the logical Size is a multiple or the physical page size */ ++ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: PMR is not a multiple of %u", ++ __func__, ++ ui32PageSize)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); ++ } ++ ++ if (_PMRIsSparse(psPMR)) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); ++ } ++ ++ /* Scan through all pages of the PMR */ ++ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) ++ { ++ /* map the physical page (for a given PMR offset) into kernel space */ ++ eError = PMRAcquireKernelMappingData(psPMR, ++ (size_t)uiPageIndex << uiLog2PageSize, ++ ui32PageSize, ++ &pvKernAddr, ++ &uiMappedSize, ++ &hPrivData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PMRAcquireKernelMappingData", AcquireKernelMapping_Error); ++ ++ /* ensure the mapped page size is the same as the physical page size */ ++ if (uiMappedSize != ui32PageSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx, ++ __func__, ++ ui32PageSize, ++ (IMG_UINT64)uiMappedSize)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, MappingSize_Error); ++ } ++ ++ /* Use the conservative 'DeviceMemSet' here because we can't know ++ * if this PMR will be mapped cached. ++ */ ++ OSDeviceMemSet(pvKernAddr, 0, ui32PageSize); ++ ++ /* release mapping */ ++ PMRReleaseKernelMappingData(psPMR, hPrivData); ++ ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Zeroing PMR %p done (num pages %u, page size %u)", ++ __func__, ++ psPMR, ++ uiNumPages, ++ ui32PageSize)); ++ ++ return PVRSRV_OK; ++ ++ ++ /* Error handling */ ++ ++MappingSize_Error: ++ PMRReleaseKernelMappingData(psPMR, hPrivData); ++ ++AcquireKernelMapping_Error: ++Sparse_Error: ++MultiPage_Error: ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRDumpPageList(PMR *psPMR, ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize) ++{ ++ IMG_DEV_PHYADDR sDevAddrPtr; ++ IMG_UINT32 uiNumPages; ++ IMG_UINT32 uiPageIndex; ++ IMG_BOOL bPageIsValid; ++ IMG_UINT32 ui32Col = 16; ++ IMG_UINT32 ui32SizePerCol = 11; ++ IMG_UINT32 ui32ByteCount = 0; ++ IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1]; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Get number of pages */ ++ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize); ++ ++ /* Verify the logical Size is a multiple or the physical page size */ ++ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PMR is not a multiple of %" IMG_UINT64_FMTSPEC, ++ __func__, (IMG_UINT64) (1ULL << uiLog2PageSize))); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE, MultiPage_Error); ++ } ++ ++ if (_PMRIsSparse(psPMR)) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("psPMR", eError, PVRSRV_ERROR_INVALID_PARAMS, Sparse_Error); ++ } ++ ++ PVR_LOG((" PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize)); ++ ++ /* Print the address of the physical pages */ ++ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++) ++ { ++ /* Get Device physical Address */ ++ eError = PMR_DevPhysAddr(psPMR, ++ uiLog2PageSize, ++ 1, ++ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize, ++ &sDevAddrPtr, ++ &bPageIsValid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PMR %p failed to get DevPhysAddr with error %u", ++ __func__, ++ psPMR, ++ eError)); ++ goto DevPhysAddr_Error; ++ } ++ ++ ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize)); ++ PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol); ++ ++ if (uiPageIndex % ui32Col == ui32Col-1) ++ { ++ PVR_LOG((" Phys Page: %s", pszBuffer)); ++ ui32ByteCount = 0; ++ } ++ } ++ if (ui32ByteCount > 0) ++ { ++ PVR_LOG((" Phys Page: %s", pszBuffer)); ++ } ++ ++ return PVRSRV_OK; ++ ++ /* Error handling */ ++DevPhysAddr_Error: ++Sparse_Error: ++MultiPage_Error: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Singleton PMR context already initialised */ ++ if (_gsSingletonPMRContext.bModuleInitialised) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); ++ } ++ ++ eError = OSLockCreate(&_gsSingletonPMRContext.hLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", out); ++ ++ _gsSingletonPMRContext.uiNextSerialNum = 1; ++ ++ _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext; ++ ++ _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE; ++ ++ _gsSingletonPMRContext.uiNumLivePMRs = 0; ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++ eError = MMapStatsInit(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "MMapStatsInit", out); ++#endif ++ ++out: ++ PVR_ASSERT(eError == PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++PMRDeInit(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ goto out; ++ } ++ ++ /* Singleton PMR context is not initialised */ ++ if (!_gsSingletonPMRContext.bModuleInitialised) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); ++ } ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++ MMapStatsDeInit(); ++#endif ++ ++ if (_gsSingletonPMRContext.uiNumLivePMRs != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain", ++ __func__, ++ _gsSingletonPMRContext.uiNumLivePMRs)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable", ++ __func__)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR, out); ++ } ++ ++ OSLockDestroy(_gsSingletonPMRContext.hLock); ++ ++ _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE; ++ ++out: ++ PVR_ASSERT(eError == PVRSRV_OK); ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/pmr.h b/drivers/gpu/drm/img-rogue/pmr.h +new file mode 100644 +index 000000000000..6a1f0e8442f7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pmr.h +@@ -0,0 +1,1023 @@ ++/*************************************************************************/ /*! ++@File ++@Title Physmem (PMR) abstraction ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This module is responsible for ++ the "PMR" abstraction. A PMR (Physical Memory Resource) ++ represents some unit of physical memory which is ++ allocated/freed/mapped/unmapped as an indivisible unit ++ (higher software levels provide an abstraction above that ++ to deal with dividing this down into smaller manageable units). ++ Importantly, this module knows nothing of virtual memory, or ++ of MMUs etc., with one excusable exception. We have the ++ concept of a "page size", which really means nothing in ++ physical memory, but represents a "contiguity quantum" such ++ that the higher level modules which map this memory are able ++ to verify that it matches the needs of the page size for the ++ virtual realm into which it is being mapped. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SRVSRV_PMR_H ++#define SRVSRV_PMR_H ++ ++/* include/ */ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pdumpdefs.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memallocflags.h" ++#include "devicemem_typedefs.h" /* Required for export DEVMEM_EXPORTCOOKIE */ ++ ++/* services/include */ ++#include "pdump.h" ++#include "physheap.h" ++ ++/* services/server/include/ */ ++#include "pmr_impl.h" ++#include "opaque_types.h" ++ ++#define PMR_MAX_TRANSLATION_STACK_ALLOC (32) ++ ++/* Maximum number of pages a PMR can have is 1G of memory */ ++#define PMR_MAX_SUPPORTED_PAGE_COUNT (262144) ++ ++typedef IMG_UINT64 PMR_BASE_T; ++typedef IMG_UINT64 PMR_SIZE_T; ++#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX ++#define PMR_VALUE32_FMTSPEC "0x%08X" ++#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX ++typedef IMG_UINT32 PMR_LOG2ALIGN_T; ++typedef IMG_UINT64 PMR_PASSWORD_T; ++ ++struct _PMR_MAPPING_TABLE_ ++{ ++ PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */ ++ IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */ ++ IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */ ++ /* Must be last */ ++ IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */ ++}; ++ ++#define TRANSLATION_INVALID 0xFFFFFFFFUL ++ ++typedef struct _PMR_EXPORT_ PMR_EXPORT; ++ ++typedef struct _PMR_PAGELIST_ PMR_PAGELIST; ++ ++/* ++ * PMRCreatePMR ++ * ++ * Not to be called directly, only via implementations of PMR ++ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc. ++ * ++ * Creates a PMR object, with callbacks and private data as per the ++ * FuncTab/PrivData args. ++ * ++ * Note that at creation time the PMR must set in stone the "logical ++ * size" and the "contiguity guarantee" ++ * ++ * Flags are also set at this time. (T.B.D. flags also immutable for ++ * the life of the PMR?) ++ * ++ * Logical size is the amount of Virtual space this allocation would ++ * take up when mapped. Note that this does not have to be the same ++ * as the actual physical size of the memory. For example, consider ++ * the sparsely allocated non-power-of-2 texture case. In this ++ * instance, the "logical size" would be the virtual size of the ++ * rounded-up power-of-2 texture. That some pages of physical memory ++ * may not exist does not affect the logical size calculation. ++ * ++ * The PMR must also supply the "contiguity guarantee" which is the ++ * finest granularity of alignment and size of physical pages that the ++ * PMR will provide after LockSysPhysAddresses is called. Note that ++ * the calling code may choose to call PMRSysPhysAddr with a finer ++ * granularity than this, for example if it were to map into a device ++ * MMU with a smaller page size, and it's also OK for the PMR to ++ * supply physical memory in larger chunks than this. But ++ * importantly, never the other way around. ++ * ++ * More precisely, the following inequality must be maintained ++ * whenever mappings and/or physical addresses exist: ++ * ++ * (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory) ++ * ++ * The function table will contain the following callbacks which may ++ * be overridden by the PMR implementation: ++ * ++ * pfnLockPhysAddresses ++ * ++ * Called when someone locks requests that Physical pages are to ++ * be locked down via the PMRLockSysPhysAddresses() API. Note ++ * that if physical pages are prefaulted at PMR creation time and ++ * therefore static, it would not be necessary to override this ++ * function, in which case NULL may be supplied. ++ * ++ * pfnUnlockPhysAddresses ++ * ++ * The reverse of pfnLockPhysAddresses. Note that this should be ++ * NULL if and only if pfnLockPhysAddresses is NULL ++ * ++ * pfnSysPhysAddr ++ * ++ * This function is mandatory. This is the one which returns the ++ * system physical address for a given offset into this PMR. The ++ * "lock" function will have been called, if overridden, before ++ * this function, thus the implementation should not increase any ++ * refcount when answering this call. Refcounting, if necessary, ++ * should be done in the lock/unlock calls. Refcounting would ++ * not be necessary in the prefaulted/static scenario, as the ++ * pmr.c abstraction will handle the refcounting for the whole ++ * PMR. ++ * ++ * pfnFinalize ++ * ++ * Called when the PMR's refcount reaches zero and it gets ++ * destroyed. This allows the implementation to free up any ++ * resource acquired during creation time. ++ * ++ */ ++PVRSRV_ERROR ++PMRCreatePMR(PHYS_HEAP *psPhysHeap, ++ PMR_SIZE_T uiLogicalSize, ++ PMR_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee, ++ PMR_FLAGS_T uiFlags, ++ const IMG_CHAR *pszAnnotation, ++ const PMR_IMPL_FUNCTAB *psFuncTab, ++ PMR_IMPL_PRIVDATA pvPrivData, ++ PMR_IMPL_TYPE eType, ++ PMR **ppsPMRPtr, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/* ++ * PMRLockSysPhysAddresses() ++ * ++ * Calls the relevant callback to lock down the system physical addresses of ++ * the memory that makes up the whole PMR. ++ * ++ * Before this call, it is not valid to use any of the information ++ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(), ++ * [ see note below about lock/unlock semantics ] ++ * ++ * The caller of this function does not have to care about how the PMR ++ * is implemented. He only has to know that he is allowed access to ++ * the physical addresses _after_ calling this function and _until_ ++ * calling PMRUnlockSysPhysAddresses(). ++ * ++ * ++ * Notes to callback implementers (authors of PMR Factories): ++ * ++ * Some PMR implementations will be such that the physical memory exists for ++ * the lifetime of the PMR, with a static address, (and normally flags and ++ * symbolic address are static too) and so it is legal for a PMR ++ * implementation to not provide an implementation for the lock callback. ++ * ++ * Some PMR implementation may wish to page memory in from secondary storage ++ * on demand. The lock/unlock callbacks _may_ be the place to do this. ++ * (More likely, there would be a separate API for doing this, but this API ++ * provides a useful place to assert that it has been done) ++ */ ++ ++PVRSRV_ERROR ++PMRLockSysPhysAddresses(PMR *psPMR); ++ ++PVRSRV_ERROR ++PMRLockSysPhysAddressesNested(PMR *psPMR, ++ IMG_UINT32 ui32NestingLevel); ++ ++/* ++ * PMRUnlockSysPhysAddresses() ++ * ++ * the reverse of PMRLockSysPhysAddresses() ++ */ ++PVRSRV_ERROR ++PMRUnlockSysPhysAddresses(PMR *psPMR); ++ ++PVRSRV_ERROR ++PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel); ++ ++ ++/*************************************************************************/ /*! ++@Function PMRUnpinPMR ++@Description This is the counterpart to PMRPinPMR(). It is meant to be ++ called before repinning an allocation. ++ ++ For a detailed description see client API documentation. ++ ++@Input psPMR The physical memory to unpin. ++ ++@Input bDevMapped A flag that indicates if this PMR has been ++ mapped to device virtual space. ++ Needed to check if this PMR is allowed to be ++ unpinned or not. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is ++ registered to be reclaimed. Error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped); ++ ++/*************************************************************************/ /*! ++@Function PMRPinPMR ++@Description This is the counterpart to PMRUnpinPMR(). It is meant to be ++ called after unpinning an allocation. ++ ++ For a detailed description see client API documentation. ++ ++@Input psPMR The physical memory to pin. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content ++ was successfully restored. ++ ++ PVRSRV_ERROR_PMR_NEW_MEMORY when the content ++ could not be restored and new physical memory ++ was allocated. ++ ++ A different error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR PMRPinPMR(PMR *psPMR); ++ ++/* ++ * PhysmemPMRExport() ++ * ++ * Given a PMR, creates a PMR "Export", which is a handle that ++ * provides sufficient data to be able to "import" this PMR elsewhere. ++ * The PMR Export is an object in its own right, whose existence ++ * implies a reference on the PMR, thus the PMR cannot be destroyed ++ * while the PMR Export exists. The intention is that the PMR Export ++ * will be wrapped in the devicemem layer by a cross process handle, ++ * and some IPC by which to communicate the handle value and password ++ * to other processes. The receiving process is able to unwrap this ++ * to gain access to the same PMR Export in this layer, and, via ++ * PhysmemPMRImport(), obtain a reference to the original PMR. ++ * ++ * The caller receives, along with the PMR Export object, information ++ * about the size and contiguity guarantee for the PMR, and also the ++ * PMRs secret password, in order to authenticate the subsequent ++ * import. ++ * ++ * N.B. If you call PMRExportPMR() (and it succeeds), you are ++ * promising to later call PMRUnexportPMR() ++ */ ++PVRSRV_ERROR ++PMRExportPMR(PMR *psPMR, ++ PMR_EXPORT **ppsPMRExport, ++ PMR_SIZE_T *puiSize, ++ PMR_LOG2ALIGN_T *puiLog2Contig, ++ PMR_PASSWORD_T *puiPassword); ++ ++/*! ++******************************************************************************* ++ ++ @Function PMRMakeLocalImportHandle ++ ++ @Description ++ ++ Transform a general handle type into one that we are able to import. ++ Takes a PMR reference. ++ ++ @Input psPMR The input PMR. ++ @Output ppsPMR The output PMR that is going to be transformed to the ++ correct handle type. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ++PMRMakeLocalImportHandle(PMR *psPMR, ++ PMR **ppsPMR); ++ ++/*! ++******************************************************************************* ++ ++ @Function PMRUnmakeLocalImportHandle ++ ++ @Description ++ ++ Take a PMR, destroy the handle and release a reference. ++ Counterpart to PMRMakeServerExportClientExport(). ++ ++ @Input psPMR PMR to destroy. ++ Created by PMRMakeLocalImportHandle(). ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ++PMRUnmakeLocalImportHandle(PMR *psPMR); ++ ++/* ++ * PMRUnexporPMRt() ++ * ++ * The reverse of PMRExportPMR(). This causes the PMR to no longer be ++ * exported. If the PMR has already been imported, the imported PMR ++ * reference will still be valid, but no further imports will be possible. ++ */ ++PVRSRV_ERROR ++PMRUnexportPMR(PMR_EXPORT *psPMRExport); ++ ++/* ++ * PMRImportPMR() ++ * ++ * Takes a PMR Export object, as obtained by PMRExportPMR(), and ++ * obtains a reference to the original PMR. ++ * ++ * The password must match, and is assumed to have been (by whatever ++ * means, IPC etc.) preserved intact from the former call to ++ * PMRExportPMR() ++ * ++ * The size and contiguity arguments are entirely irrelevant for the ++ * import, however they are verified in order to trap bugs. ++ * ++ * N.B. If you call PhysmemPMRImport() (and it succeeds), you are ++ * promising to later call PhysmemPMRUnimport() ++ */ ++PVRSRV_ERROR ++PMRImportPMR(PMR_EXPORT *psPMRExport, ++ PMR_PASSWORD_T uiPassword, ++ PMR_SIZE_T uiSize, ++ PMR_LOG2ALIGN_T uiLog2Contig, ++ PMR **ppsPMR); ++ ++/* Function that alters the mutability property ++ * of the PMR ++ * Setting it to TRUE makes sure the PMR memory layout ++ * can't be changed through future calls */ ++void ++PMR_SetLayoutFixed(PMR *psPMR, IMG_BOOL bFlag); ++ ++IMG_BOOL PMR_IsMemLayoutFixed(PMR *psPMR); ++ ++/* ++ * PMRUnimportPMR() ++ * ++ * releases the reference on the PMR as obtained by PMRImportPMR() ++ */ ++PVRSRV_ERROR ++PMRUnimportPMR(PMR *psPMR); ++ ++PVRSRV_ERROR ++PMRLocalImportPMR(PMR *psPMR, ++ PMR **ppsPMR, ++ IMG_DEVMEM_SIZE_T *puiSize, ++ IMG_DEVMEM_ALIGN_T *puiAlign); ++ ++/* ++ * Equivalent mapping functions when in kernel mode. ++ */ ++PVRSRV_ERROR ++PMRAcquireKernelMappingData(PMR *psPMR, ++ size_t uiLogicalOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ size_t *puiLengthOut, ++ IMG_HANDLE *phPrivOut); ++ ++PVRSRV_ERROR ++PMRAcquireSparseKernelMappingData(PMR *psPMR, ++ size_t uiLogicalOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ size_t *puiLengthOut, ++ IMG_HANDLE *phPrivOut); ++ ++PVRSRV_ERROR ++PMRReleaseKernelMappingData(PMR *psPMR, ++ IMG_HANDLE hPriv); ++ ++/* ++ * PMR_ReadBytes() ++ * ++ * calls into the PMR implementation to read up to uiBufSz bytes, ++ * returning the actual number read in *puiNumBytes ++ * ++ * this will read up to the end of the PMR, or the next symbolic name ++ * boundary, or until the requested number of bytes is read, whichever ++ * comes first ++ * ++ * In the case of sparse PMR's the caller doesn't know what offsets are ++ * valid and which ones aren't so we will just write 0 to invalid offsets ++ */ ++PVRSRV_ERROR ++PMR_ReadBytes(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes); ++ ++/* ++ * PMR_WriteBytes() ++ * ++ * calls into the PMR implementation to write up to uiBufSz bytes, ++ * returning the actual number read in *puiNumBytes ++ * ++ * this will write up to the end of the PMR, or the next symbolic name ++ * boundary, or until the requested number of bytes is written, whichever ++ * comes first ++ * ++ * In the case of sparse PMR's the caller doesn't know what offsets are ++ * valid and which ones aren't so we will just ignore data at invalid offsets ++ */ ++PVRSRV_ERROR ++PMR_WriteBytes(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes); ++ ++/*************************************************************************/ /*! ++@Function PMRMMapPMR ++@Description Performs the necessary steps to map the PMR into a user process ++ address space. The caller does not need to call ++ PMRLockSysPhysAddresses before calling this function. ++ ++@Input psPMR PMR to map. ++ ++@Input pOSMMapData OS specific data needed to create a mapping. ++ ++@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); ++ ++/* ++ * PMRRefPMR() ++ * ++ * Take a reference on the passed in PMR ++ */ ++void ++PMRRefPMR(PMR *psPMR); ++ ++/* ++ * PMRUnrefPMR() ++ * ++ * This undoes a call to any of the PhysmemNew* family of APIs ++ * (i.e. any PMR factory "constructor") ++ * ++ * This relinquishes a reference to the PMR, and, where the refcount ++ * reaches 0, causes the PMR to be destroyed (calling the finalizer ++ * callback on the PMR, if there is one) ++ */ ++PVRSRV_ERROR ++PMRUnrefPMR(PMR *psPMR); ++ ++/* ++ * PMRUnrefUnlockPMR() ++ * ++ * Same as above but also unlocks the PMR. ++ */ ++PVRSRV_ERROR ++PMRUnrefUnlockPMR(PMR *psPMR); ++ ++PPVRSRV_DEVICE_NODE ++PMR_DeviceNode(const PMR *psPMR); ++ ++/* ++ * PMRIsPMRLive() ++ * ++ * This function returns true if the PMR is in use and false otherwise. ++ * This function is not thread safe and hence the caller needs to ensure the ++ * thread safety by explicitly taking PMR or through other means. ++ */ ++IMG_BOOL PMRIsPMRLive(PMR *psPMR); ++ ++/* ++ * PMR_Flags() ++ * ++ * Flags are static and guaranteed for the life of the PMR. Thus this ++ * function is idempotent and acquire/release semantics is not required. ++ * ++ * Returns the flags as specified on the PMR. The flags are to be ++ * interpreted as mapping permissions ++ */ ++PMR_FLAGS_T ++PMR_Flags(const PMR *psPMR); ++ ++IMG_BOOL ++PMR_IsSparse(const PMR *psPMR); ++ ++IMG_BOOL ++PMR_IsUnpinned(const PMR *psPMR); ++ ++void ++PMR_LogicalSize(const PMR *psPMR, ++ IMG_DEVMEM_SIZE_T *puiLogicalSize); ++ ++PVRSRV_ERROR ++PMR_PhysicalSize(const PMR *psPMR, ++ IMG_DEVMEM_SIZE_T *puiPhysicalSize); ++ ++PHYS_HEAP * ++PMR_PhysHeap(const PMR *psPMR); ++ ++PMR_MAPPING_TABLE * ++PMR_GetMappingTable(const PMR *psPMR); ++ ++IMG_UINT32 ++PMR_GetLog2Contiguity(const PMR *psPMR); ++ ++const IMG_CHAR * ++PMR_GetAnnotation(const PMR *psPMR); ++ ++/* ++ * PMR_IsOffsetValid() ++ * ++ * Returns if an address offset inside a PMR has a valid ++ * physical backing. ++ */ ++PVRSRV_ERROR ++PMR_IsOffsetValid(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_BOOL *pbValid); ++ ++PMR_IMPL_TYPE ++PMR_GetType(const PMR *psPMR); ++ ++IMG_INT32 ++PMR_GetRefCount(const PMR *psPMR); ++ ++/* ++ * PMR_DevPhysAddr() ++ * ++ * A note regarding Lock/Unlock semantics ++ * ====================================== ++ * ++ * PMR_DevPhysAddr may only be called after PMRLockSysPhysAddresses() ++ * has been called. The data returned may be used only until ++ * PMRUnlockSysPhysAddresses() is called after which time the licence ++ * to use the data is revoked and the information may be invalid. ++ * ++ * Given an offset, this function returns the device physical address of the ++ * corresponding page in the PMR. It may be called multiple times ++ * until the address of all relevant pages has been determined. ++ * ++ * If caller only wants one physical address it is sufficient to pass in: ++ * ui32Log2PageSize==0 and ui32NumOfPages==1 ++ */ ++PVRSRV_ERROR ++PMR_DevPhysAddr(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEV_PHYADDR *psDevAddr, ++ IMG_BOOL *pbValid); ++ ++/* ++ * PMR_CpuPhysAddr() ++ * ++ * See note above about Lock/Unlock semantics. ++ * ++ * Given an offset, this function returns the CPU physical address of the ++ * corresponding page in the PMR. It may be called multiple times ++ * until the address of all relevant pages has been determined. ++ * ++ */ ++PVRSRV_ERROR ++PMR_CpuPhysAddr(const PMR *psPMR, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_CPU_PHYADDR *psCpuAddrPtr, ++ IMG_BOOL *pbValid); ++ ++PVRSRV_ERROR ++PMRGetUID(PMR *psPMR, ++ IMG_UINT64 *pui64UID); ++/* ++ * PMR_ChangeSparseMem() ++ * ++ * See note above about Lock/Unlock semantics. ++ * ++ * This function alters the memory map of the given PMR in device space by ++ * adding/deleting the pages as requested. ++ * ++ */ ++PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 uiSparseFlags); ++ ++/* ++ * PMR_ChangeSparseMemCPUMap() ++ * ++ * See note above about Lock/Unlock semantics. ++ * ++ * This function alters the memory map of the given PMR in CPU space by ++ * adding/deleting the pages as requested. ++ */ ++PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices); ++ ++#if defined(PDUMP) ++ ++/* ++ * PMR_PDumpSymbolicAddr() ++ * ++ * Given an offset, returns the pdump memspace name and symbolic ++ * address of the corresponding page in the PMR. ++ * ++ * Note that PDump memspace names and symbolic addresses are static ++ * and valid for the lifetime of the PMR, therefore we don't require ++ * acquire/release semantics here. ++ * ++ * Note that it is expected that the pdump "mapping" code will call ++ * this function multiple times as each page is mapped in turn ++ * ++ * Note that NextSymName is the offset from the base of the PMR to the ++ * next pdump symbolic address (or the end of the PMR if the PMR only ++ * had one PDUMPMALLOC ++ */ ++PVRSRV_ERROR ++PMR_PDumpSymbolicAddr(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32NamespaceNameLen, ++ IMG_CHAR *pszNamespaceName, ++ IMG_UINT32 ui32SymbolicAddrLen, ++ IMG_CHAR *pszSymbolicAddr, ++ IMG_DEVMEM_OFFSET_T *puiNewOffset, ++ IMG_DEVMEM_OFFSET_T *puiNextSymName ++ ); ++ ++/* ++ * PMRPDumpLoadMemValue32() ++ * ++ * writes the current contents of a dword in PMR memory to the pdump ++ * script stream. Useful for patching a buffer by simply editing the ++ * script output file in ASCII plain text. ++ * ++ */ ++PVRSRV_ERROR ++PMRPDumpLoadMemValue32(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * PMRPDumpCopyMem32 ++ * ++ * Adds in the pdump script stream a copy of a dword in one PMR memory ++ * location to another PMR memory location. ++ * ++ */ ++PVRSRV_ERROR ++PMRPDumpCopyMem32(PMR *psDstPMR, ++ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, ++ PMR *psSrcPMR, ++ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, ++ const IMG_CHAR *pszTmpVar, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * PMRPDumpLoadMemValue64() ++ * ++ * writes the current contents of a dword in PMR memory to the pdump ++ * script stream. Useful for patching a buffer by simply editing the ++ * script output file in ASCII plain text. ++ * ++ */ ++PVRSRV_ERROR ++PMRPDumpLoadMemValue64(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT64 ui64Value, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * PMRPDumpCopyMem64 ++ * ++ * Adds in the pdump script stream a copy of a quadword in one PMR memory ++ * location to another PMR memory location. ++ */ ++PVRSRV_ERROR ++PMRPDumpCopyMem64(PMR *psDstPMR, ++ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset, ++ PMR *psSrcPMR, ++ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset, ++ const IMG_CHAR *pszTmpVar, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++/* ++ * PMRPDumpLoadMem() ++ * ++ * Writes the current contents of the PMR memory to the pdump PRM stream, ++ * and emits some PDump code to the script stream to LDB said bytes from ++ * said file. If bZero is IMG_TRUE then the PDump zero page is used as the ++ * source for the LDB. ++ */ ++PVRSRV_ERROR ++PMRPDumpLoadMem(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PDUMP_FLAGS_T uiPDumpFlags, ++ IMG_BOOL bZero); ++ ++/* ++ * PMRPDumpSaveToFile() ++ * ++ * Emits some PDump that does an SAB (save bytes) using the PDump symbolic ++ * address of the PMR. Note that this is generally not the preferred way to ++ * dump the buffer contents. There is an equivalent function in ++ * devicemem_server.h which also emits SAB but using the virtual address, ++ * which is the "right" way to dump the buffer contents to a file. ++ * This function exists just to aid testing by providing a means to dump ++ * the PMR directly by symbolic address also. ++ */ ++PVRSRV_ERROR ++PMRPDumpSaveToFile(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiArraySize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset); ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMR_PDumpSymbolicAddr) ++#endif ++static INLINE PVRSRV_ERROR ++PMR_PDumpSymbolicAddr(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32NamespaceNameLen, ++ IMG_CHAR *pszNamespaceName, ++ IMG_UINT32 ui32SymbolicAddrLen, ++ IMG_CHAR *pszSymbolicAddr, ++ IMG_DEVMEM_OFFSET_T *puiNewOffset, ++ IMG_DEVMEM_OFFSET_T *puiNextSymName) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen); ++ PVR_UNREFERENCED_PARAMETER(pszNamespaceName); ++ PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen); ++ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr); ++ PVR_UNREFERENCED_PARAMETER(puiNewOffset); ++ PVR_UNREFERENCED_PARAMETER(puiNextSymName); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpLoadMemValue32) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpLoadMemValue32(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpLoadMemValue64) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpLoadMemValue64(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT64 ui64Value, ++ PDUMP_FLAGS_T uiPDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(ui64Value); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpLoadMem) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpLoadMem(PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PDUMP_FLAGS_T uiPDumpFlags, ++ IMG_BOOL bZero) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++ PVR_UNREFERENCED_PARAMETER(bZero); ++ return PVRSRV_OK; ++} ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpSaveToFile) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpSaveToFile(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT32 uiArraySize, ++ const IMG_CHAR *pszFilename, ++ IMG_UINT32 uiFileOffset) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(uiSize); ++ PVR_UNREFERENCED_PARAMETER(uiArraySize); ++ PVR_UNREFERENCED_PARAMETER(pszFilename); ++ PVR_UNREFERENCED_PARAMETER(uiFileOffset); ++ return PVRSRV_OK; ++} ++ ++#endif /* PDUMP */ ++ ++/* This function returns the private data that a pmr subtype embedded in ++ * here. We use the function table pointer as "authorisation" that this ++ * function is being called by the pmr subtype implementation. We can ++ * assume (assert) that. It would be a bug in the implementation of the ++ * pmr subtype if this assertion ever fails. ++ */ ++void * ++PMRGetPrivateData(const PMR *psPMR, ++ const PMR_IMPL_FUNCTAB *psFuncTab); ++ ++PVRSRV_ERROR ++PMRZeroingPMR(PMR *psPMR, ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); ++ ++PVRSRV_ERROR ++PMRDumpPageList(PMR *psReferencePMR, ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize); ++ ++PVRSRV_ERROR ++PMRWritePMPageList(/* Target PMR, offset, and length */ ++ PMR *psPageListPMR, ++ IMG_DEVMEM_OFFSET_T uiTableOffset, ++ IMG_DEVMEM_SIZE_T uiTableLength, ++ /* Referenced PMR, and "page" granularity */ ++ PMR *psReferencePMR, ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize, ++ PMR_PAGELIST **ppsPageList); ++ ++/* Doesn't actually erase the page list - just releases ++ * the appropriate refcounts ++ */ ++PVRSRV_ERROR // should be void, surely ++PMRUnwritePMPageList(PMR_PAGELIST *psPageList); ++ ++#if defined(PDUMP) ++PVRSRV_ERROR ++PMRPDumpPol32(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiFlags); ++ ++PVRSRV_ERROR ++PMRPDumpCheck32(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiPDumpFlags); ++ ++PVRSRV_ERROR ++PMRPDumpCBP(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiReadOffset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize); ++#else ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpPol32) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpPol32(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpCheck32) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpCheck32(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(uiFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PMRPDumpCBP) ++#endif ++static INLINE PVRSRV_ERROR ++PMRPDumpCBP(const PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiReadOffset, ++ IMG_DEVMEM_OFFSET_T uiWriteOffset, ++ IMG_DEVMEM_SIZE_T uiPacketSize, ++ IMG_DEVMEM_SIZE_T uiBufferSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMR); ++ PVR_UNREFERENCED_PARAMETER(uiReadOffset); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++ return PVRSRV_OK; ++} ++#endif ++ ++PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR); ++ ++/* ++ * PMRInit() ++ * ++ * To be called once and only once to initialise the internal data in ++ * the PMR module (mutexes and such) ++ * ++ * Not for general use. Only PVRSRVInit(); should be calling this. ++ */ ++PVRSRV_ERROR ++PMRInit(void); ++ ++/* ++ * PMRDeInit() ++ * ++ * To be called once and only once to deinitialise the internal data in ++ * the PMR module (mutexes and such) and for debug checks ++ * ++ * Not for general use. Only PVRSRVDeInit(); should be calling this. ++ */ ++PVRSRV_ERROR ++PMRDeInit(void); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++PVRSRV_ERROR ++PMRStoreRIHandle(PMR *psPMR, void *hRIHandle); ++#endif ++ ++#endif /* #ifdef SRVSRV_PMR_H */ +diff --git a/drivers/gpu/drm/img-rogue/pmr_impl.h b/drivers/gpu/drm/img-rogue/pmr_impl.h +new file mode 100644 +index 000000000000..cae0b7eef165 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pmr_impl.h +@@ -0,0 +1,539 @@ ++/**************************************************************************/ /*! ++@File ++@Title Implementation Callbacks for Physmem (PMR) abstraction ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Part of the memory management. This file is for definitions ++ that are private to the world of PMRs, but that need to be ++ shared between pmr.c itself and the modules that implement the ++ callbacks for the PMR. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVSRV_PMR_IMPL_H ++#define SRVSRV_PMR_IMPL_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++/*! Physical Memory Resource type. ++ */ ++typedef struct _PMR_ PMR; ++ ++/*! Per-flavour callbacks need to be shared with generic implementation ++ * (pmr.c). ++ */ ++typedef void *PMR_IMPL_PRIVDATA; ++ ++/*! Type for holding flags passed to the PMR factory. ++ */ ++typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T; ++ ++/*! Mapping table for the allocation. ++ * ++ * PMR's can be sparse in which case not all the logical addresses in it are ++ * valid. The mapping table translates logical offsets into physical offsets. ++ * ++ * This table is always passed to the PMR factory regardless if the memory is ++ * sparse or not. In case of non-sparse memory all virtual offsets are mapped ++ * to physical offsets. ++ */ ++typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE; ++ ++/*! Private data passed to the ::PFN_MMAP_FN function. ++ */ ++typedef void *PMR_MMAP_DATA; ++ ++/*! PMR factory type. ++ */ ++typedef enum _PMR_IMPL_TYPE_ ++{ ++ PMR_TYPE_NONE = 0, ++ PMR_TYPE_OSMEM, ++ PMR_TYPE_LMA, ++ PMR_TYPE_DMABUF, ++ PMR_TYPE_EXTMEM, ++ PMR_TYPE_DC, ++ PMR_TYPE_TDFWMEM, ++ PMR_TYPE_TDSECBUF ++} PMR_IMPL_TYPE; ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_LOCK_PHYS_ADDRESSES_FN ++ ++@Description Called to lock down the physical addresses for all pages ++ allocated for a PMR. ++ The default implementation is to simply increment a ++ lock-count for debugging purposes. ++ If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will ++ be called when someone first requires a physical address, ++ and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be ++ called when the last such reference is released. ++ The PMR implementation may assume that physical addresses ++ will have been "locked" in this manner before any call is ++ made to the pfnDevPhysAddr() callback ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++ ++@Return PVRSRV_OK if the operation was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN ++ ++@Description Called to release the lock taken on the physical addresses ++ for all pages allocated for a PMR. ++ The default implementation is to simply decrement a ++ lock-count for debugging purposes. ++ If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be ++ called when the last reference taken on the PMR is ++ released. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++ ++@Return PVRSRV_OK if the operation was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_DEV_PHYS_ADDR_FN ++ ++@Description Called to obtain one or more physical addresses for given ++ offsets within a PMR. ++ ++ The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is ++ guaranteed to have been called prior to calling the ++ PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to ++ rely on the physical address thus obtained after the ++ PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called. ++ ++ Implementation of this callback is mandatory. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input ui32Log2PageSize The log2 page size. ++@Input ui32NumOfAddr The number of addresses to be returned ++@Input puiOffset The offset from the start of the PMR ++ (in bytes) for which the physical ++ address is required. Where multiple ++ addresses are requested, this will ++ contain a list of offsets. ++@Output pbValid List of boolean flags indicating which ++ addresses in the returned list ++ (psDevAddrPtr) are valid (for sparse ++ allocations, not all pages may have a ++ physical backing) ++@Output psDevAddrPtr Returned list of physical addresses ++ ++@Return PVRSRV_OK if the operation was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEVMEM_OFFSET_T *puiOffset, ++ IMG_BOOL *pbValid, ++ IMG_DEV_PHYADDR *psDevAddrPtr); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN ++ ++@Description Called to obtain a kernel-accessible address (mapped to a ++ virtual address if required) for the PMR for use internally ++ in Services. ++ ++ Implementation of this function for the (default) PMR factory providing ++ OS-allocations is mandatory (the driver will expect to be able to call ++ this function for OS-provided allocations). ++ For other PMR factories, implementation of this function is only necessary ++ where an MMU mapping is required for the Kernel to be able to access the ++ allocated memory. ++ If no mapping is needed, this function can remain unimplemented and the ++ pfn may be set to NULL. ++@Input pvPriv Private data (which was generated by ++ the PMR factory when PMR was created) ++@Input uiOffset Offset from the beginning of the PMR ++ at which mapping is to start ++@Input uiSize Size of mapping (in bytes) ++@Output ppvKernelAddressOut Mapped kernel address ++@Output phHandleOut Returned handle of the new mapping ++@Input ulFlags Mapping flags ++ ++@Return PVRSRV_OK if the mapping was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, ++ size_t uiOffset, ++ size_t uiSize, ++ void **ppvKernelAddressOut, ++ IMG_HANDLE *phHandleOut, ++ PMR_FLAGS_T ulFlags); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN ++ ++@Description Called to release a mapped kernel virtual address ++ ++ Implementation of this callback is mandatory if ++ PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN is provided for the PMR factory, ++ otherwise this function can remain unimplemented and the pfn may be set ++ to NULL. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input hHandle Handle of the mapping to be released ++ ++@Return None ++*/ /**************************************************************************/ ++typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_HANDLE hHandle); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_READ_BYTES_FN ++ ++@Description Called to read bytes from an unmapped allocation ++ ++ Implementation of this callback is optional - where it is not provided, ++ the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire ++ PMR (if an MMU mapping is required for the Kernel to be able to access the ++ allocated memory). ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input uiOffset Offset from the beginning of the PMR at ++ which to begin reading ++@Output pcBuffer Buffer in which to return the read data ++@Input uiBufSz Number of bytes to be read ++@Output puiNumBytes Number of bytes actually read (may be ++ less than uiBufSz) ++ ++@Return PVRSRV_OK if the read was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_WRITE_BYTES_FN ++ ++@Description Called to write bytes into an unmapped allocation ++ ++ Implementation of this callback is optional - where it is not provided, ++ the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN to map the entire ++ PMR (if an MMU mapping is required for the Kernel to be able to access the ++ allocated memory). ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input uiOffset Offset from the beginning of the PMR at ++ which to begin writing ++@Input pcBuffer Buffer containing the data to be written ++@Input uiBufSz Number of bytes to be written ++@Output puiNumBytes Number of bytes actually written (may be ++ less than uiBufSz) ++ ++@Return PVRSRV_OK if the write was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_UINT8 *pcBuffer, ++ size_t uiBufSz, ++ size_t *puiNumBytes); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_UNPIN_MEM_FN ++ ++@Description Called to unpin an allocation. ++ Once unpinned, the pages backing the allocation may be ++ re-used by the Operating System for another purpose. ++ When the pages are required again, they may be re-pinned ++ (by calling PFN_PIN_MEM_FN). The driver will try to return ++ same pages as before. The caller will be told if the ++ content of these returned pages has been modified or if ++ the pages returned are not the original pages. ++ ++ Implementation of this callback is optional. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++ ++@Return PVRSRV_OK if the unpin was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_PIN_MEM_FN ++ ++@Description Called to pin a previously unpinned allocation. ++ The driver will try to return same pages as were previously ++ assigned to the allocation. The caller will be told if the ++ content of these returned pages has been modified or if ++ the pages returned are not the original pages. ++ ++ Implementation of this callback is optional. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++ ++@Input psMappingTable Mapping table, which describes how ++ virtual 'chunks' are to be mapped to ++ physical 'chunks' for the allocation. ++ ++@Return PVRSRV_OK if the original pages were returned unmodified. ++ PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified ++ or different pages were returned. ++ Another PVRSRV_ERROR code on failure. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, ++ PMR_MAPPING_TABLE *psMappingTable); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN ++ ++@Description Called to modify the physical backing for a given sparse ++ allocation. ++ The caller provides a list of the pages within the sparse ++ allocation which should be backed with a physical allocation ++ and a list of the pages which do not require backing. ++ ++ Implementation of this callback is mandatory. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input psPMR The PMR of the sparse allocation to be ++ modified ++@Input ui32AllocPageCount The number of pages specified in ++ pai32AllocIndices ++@Input pai32AllocIndices The list of pages in the sparse ++ allocation that should be backed with a ++ physical allocation. Pages are ++ referenced by their index within the ++ sparse allocation (e.g. in a 10 page ++ allocation, pages are denoted by ++ indices 0 to 9) ++@Input ui32FreePageCount The number of pages specified in ++ pai32FreeIndices ++@Input pai32FreeIndices The list of pages in the sparse ++ allocation that do not require ++ a physical allocation. ++@Input ui32Flags Allocation flags ++ ++@Return PVRSRV_OK if the sparse allocation physical backing was updated ++ successfully, an error code otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv, ++ const PMR *psPMR, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices, ++ IMG_UINT32 uiFlags); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN ++ ++@Description Called to modify which pages are mapped for a given sparse ++ allocation. ++ The caller provides a list of the pages within the sparse ++ allocation which should be given a CPU mapping and a list ++ of the pages which do not require a CPU mapping. ++ ++ Implementation of this callback is mandatory. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input psPMR The PMR of the sparse allocation to be ++ modified ++@Input sCpuVAddrBase The virtual base address of the sparse ++ allocation ++@Input ui32AllocPageCount The number of pages specified in ++ pai32AllocIndices ++@Input pai32AllocIndices The list of pages in the sparse ++ allocation that should be given a CPU ++ mapping. Pages are referenced by their ++ index within the sparse allocation (e.g. ++ in a 10 page allocation, pages are ++ denoted by indices 0 to 9) ++@Input ui32FreePageCount The number of pages specified in ++ pai32FreeIndices ++@Input pai32FreeIndices The list of pages in the sparse ++ allocation that do not require a CPU ++ mapping. ++ ++@Return PVRSRV_OK if the page mappings were updated successfully, an ++ error code otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv, ++ const PMR *psPMR, ++ IMG_UINT64 sCpuVAddrBase, ++ IMG_UINT32 ui32AllocPageCount, ++ IMG_UINT32 *pai32AllocIndices, ++ IMG_UINT32 ui32FreePageCount, ++ IMG_UINT32 *pai32FreeIndices); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_MMAP_FN ++ ++@Description Called to map pages in the specified PMR. ++ ++ Implementation of this callback is optional. ++ Where it is provided, it will be used in place of OSMMapPMRGeneric(). ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++@Input psPMR The PMR of the allocation to be mapped ++@Input pMMapData OS-specific data to describe how mapping ++ should be performed ++ ++@Return PVRSRV_OK if the mapping was successful, an error code ++ otherwise. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv, ++ PMR *psPMR, ++ PMR_MMAP_DATA pMMapData); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_FINALIZE_FN ++ ++@Description Called to destroy the PMR. ++ This callback will be called only when all references to ++ the PMR have been dropped. ++ The PMR was created via a call to PhysmemNewRamBackedPMR() ++ and is destroyed via this callback. ++ ++ Implementation of this callback is mandatory. ++ ++@Input pvPriv Private data (which was generated by the ++ PMR factory when PMR was created) ++ ++@Return PVRSRV_OK if the PMR destruction was successful, an error ++ code otherwise. ++ Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only ++ error returned from physmem_dmabuf.c layer and on this ++ error, destroying of the PMR is aborted without disturbing ++ the PMR state. ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_ACQUIRE_PMR_FACTORY_LOCK_FN ++ ++@Description Called to acquire the PMR factory's global lock, if it has one, ++ hence callback optional. Factories which support entry points ++ in addition to the normal bridge calls, for example, from the ++ native OS that manipulate the PMR reference count should ++ create a factory lock and implementations for these call backs. ++ ++ Implementation of this callback is optional. ++ ++@Return None ++*/ ++/*****************************************************************************/ ++typedef void (*PFN_ACQUIRE_PMR_FACTORY_LOCK_FN)(void); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_RELEASE_PMR_FACTORY_LOCK_FN ++ ++@Description Called to release the PMR factory's global lock acquired by calling ++ pfn_acquire_pmr_factory_lock callback. ++ ++ Implementation of this callback is optional. ++ ++@Return None ++*/ /**************************************************************************/ ++typedef void (*PFN_RELEASE_PMR_FACTORY_LOCK_FN)(void); ++ ++/*! PMR factory callback table. ++ */ ++struct _PMR_IMPL_FUNCTAB_ { ++ /*! Callback function pointer, see ::PFN_LOCK_PHYS_ADDRESSES_FN */ ++ PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses; ++ /*! Callback function pointer, see ::PFN_UNLOCK_PHYS_ADDRESSES_FN */ ++ PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses; ++ ++ /*! Callback function pointer, see ::PFN_DEV_PHYS_ADDR_FN */ ++ PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr; ++ ++ /*! Callback function pointer, see ::PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN */ ++ PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData; ++ /*! Callback function pointer, see ::PFN_RELEASE_KERNEL_MAPPING_DATA_FN */ ++ PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData; ++ ++ /*! Callback function pointer, see ::PFN_READ_BYTES_FN */ ++ PFN_READ_BYTES_FN pfnReadBytes; ++ /*! Callback function pointer, see ::PFN_WRITE_BYTES_FN */ ++ PFN_WRITE_BYTES_FN pfnWriteBytes; ++ ++ /*! Callback function pointer, see ::PFN_UNPIN_MEM_FN */ ++ PFN_UNPIN_MEM_FN pfnUnpinMem; ++ /*! Callback function pointer, see ::PFN_PIN_MEM_FN */ ++ PFN_PIN_MEM_FN pfnPinMem; ++ ++ /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_FN */ ++ PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem; ++ /*! Callback function pointer, see ::PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN */ ++ PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap; ++ ++ /*! Callback function pointer, see ::PFN_MMAP_FN */ ++ PFN_MMAP_FN pfnMMap; ++ ++ /*! Callback function pointer, see ::PFN_FINALIZE_FN */ ++ PFN_FINALIZE_FN pfnFinalize; ++ ++ /*! Callback function pointer, see ::PFN_ACQUIRE_PMR_FACTORY_LOCK_FN */ ++ PFN_ACQUIRE_PMR_FACTORY_LOCK_FN pfnGetPMRFactoryLock; ++ ++ /*! Callback function pointer, see ::PFN_RELEASE_PMR_FACTORY_LOCK_FN */ ++ PFN_RELEASE_PMR_FACTORY_LOCK_FN pfnReleasePMRFactoryLock; ++}; ++ ++/*! PMR factory callback table. ++ */ ++typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB; ++ ++#endif /* SRVSRV_PMR_IMPL_H */ +diff --git a/drivers/gpu/drm/img-rogue/pmr_os.c b/drivers/gpu/drm/img-rogue/pmr_os.c +new file mode 100644 +index 000000000000..5dacfe28e1c7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pmr_os.c +@@ -0,0 +1,619 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linux OS PMR functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++#include ++#include ++#endif ++ ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "allocmem.h" ++#include "devicemem_server_utils.h" ++#include "pmr.h" ++#include "pmr_os.h" ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++#include "mmap_stats.h" ++#endif ++ ++#include "kernel_compatibility.h" ++ ++/* ++ * x86_32: ++ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM ++ * pages with default memory attributes; these HIGHMEM pages are skipped in ++ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range(). ++ * Also vm_insert_page is faster. ++ * ++ * x86_64: ++ * Use vm_insert_page because it is faster. ++ * ++ * Other platforms: ++ * Use remap_pfn_range by default because it does not issue a cache flush. ++ * It is known that ARM32 benefits from this. When other platforms become ++ * available it has to be investigated if this assumption holds for them as well. ++ * ++ * Since vm_insert_page does more precise memory accounting we have the build ++ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug ++ * feature. ++ * ++ */ ++#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT) ++#define PMR_OS_USE_VM_INSERT_PAGE 1 ++#endif ++ ++static void MMapPMROpen(struct vm_area_struct *ps_vma) ++{ ++ PMR *psPMR = ps_vma->vm_private_data; ++ ++ /* Our VM flags should ensure this function never gets called */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Unexpected mmap open call, this is probably an application bug.", ++ __func__)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p", ++ __func__, ++ ps_vma, ++ ps_vma->vm_start, ++ ps_vma->vm_end - ps_vma->vm_start, ++ psPMR)); ++ ++ /* In case we get called anyway let's do things right by increasing the refcount and ++ * locking down the physical addresses. */ ++ PMRRefPMR(psPMR); ++ ++ if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__)); ++ PMRUnrefPMR(psPMR); ++ } ++} ++ ++static void MMapPMRClose(struct vm_area_struct *ps_vma) ++{ ++ PMR *psPMR = ps_vma->vm_private_data; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ { ++ uintptr_t vAddr = ps_vma->vm_start; ++ ++ while (vAddr < ps_vma->vm_end) ++ { ++ /* USER MAPPING */ ++ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, ++ (IMG_UINT64)vAddr, ++ OSGetCurrentClientProcessIDKM()); ++ vAddr += PAGE_SIZE; ++ } ++ } ++#else ++ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, ++ ps_vma->vm_end - ps_vma->vm_start, ++ OSGetCurrentClientProcessIDKM()); ++#endif ++#endif ++ ++ PMRUnlockSysPhysAddresses(psPMR); ++ PMRUnrefPMR(psPMR); ++} ++ ++/* ++ * This vma operation is used to read data from mmap regions. It is called ++ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace ++ * requests and reads from /proc//mem. ++ */ ++static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr, ++ void *buf, int len, int write) ++{ ++ PMR *psPMR = ps_vma->vm_private_data; ++ unsigned long ulOffset = addr - ps_vma->vm_start; ++ size_t uiBytesCopied; ++ PVRSRV_ERROR eError; ++ int iRetVal = -EINVAL; ++ ++ if (write) ++ { ++ eError = PMR_WriteBytes(psPMR, ++ (IMG_DEVMEM_OFFSET_T) ulOffset, ++ buf, ++ len, ++ &uiBytesCopied); ++ } ++ else ++ { ++ eError = PMR_ReadBytes(psPMR, ++ (IMG_DEVMEM_OFFSET_T) ulOffset, ++ buf, ++ len, ++ &uiBytesCopied); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)", ++ __func__, ++ write ? "PMR_WriteBytes" : "PMR_ReadBytes", ++ eError)); ++ } ++ else ++ { ++ iRetVal = uiBytesCopied; ++ } ++ ++ return iRetVal; ++} ++ ++static const struct vm_operations_struct gsMMapOps = ++{ ++ .open = &MMapPMROpen, ++ .close = &MMapPMRClose, ++ .access = MMapVAccess, ++}; ++ ++static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode, ++ struct vm_area_struct *ps_vma, ++ IMG_DEVMEM_OFFSET_T uiOffset, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_UINT32 uiLog2PageSize, ++ IMG_BOOL bUseVMInsertPage, ++ IMG_BOOL bUseMixedMap) ++{ ++ IMG_INT32 iStatus; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ pfn_t sPFN; ++#else ++ unsigned long uiPFN; ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0); ++#else ++ uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT; ++ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr); ++#endif ++ ++ /* ++ * vm_insert_page() allows insertion of individual pages into user ++ * VMA space _only_ if page is a order-zero allocated page ++ */ ++ if (bUseVMInsertPage) ++ { ++ if (bUseMixedMap) ++ { ++ /* ++ * This path is just for debugging. It should be ++ * equivalent to the remap_pfn_range() path. ++ */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) ++ vm_fault_t vmf; ++ ++ vmf = vmf_insert_mixed(ps_vma, ++ ps_vma->vm_start + uiOffset, ++ sPFN); ++ if (vmf & VM_FAULT_ERROR) ++ { ++ iStatus = vm_fault_to_errno(vmf, 0); ++ } ++ else ++ { ++ iStatus = 0; ++ } ++#else ++ iStatus = vm_insert_mixed(ps_vma, ++ ps_vma->vm_start + uiOffset, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ sPFN); ++#else ++ uiPFN); ++#endif ++#endif ++ } ++ else ++ { ++ /* Since kernel 3.7 this sets VM_MIXEDMAP internally */ ++ iStatus = vm_insert_page(ps_vma, ++ ps_vma->vm_start + uiOffset, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ pfn_t_to_page(sPFN)); ++#else ++ pfn_to_page(uiPFN)); ++#endif ++ } ++ } ++ else ++ { ++ /* ++ NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR() ++ ++ The current services mmap model maps in a PMR's full-length size ++ into the user VMA & applies any user specified offset to the kernel ++ returned zero-offset based VA in services client; this essentially ++ means services server ignores ps_vma->vm_pgoff (this houses hPMR) ++ during a mmap call. ++ ++ Furthermore, during a DMA/CMA memory allocation, multiple order-n ++ pages are used to satisfy an allocation request due to DMA/CMA ++ framework rounding-up allocation size to next power-of-two which ++ can lead to wasted memory (so we don't allocate using single call). ++ ++ The combination of the above two issues mean that we cannot use the ++ dma_mmap_coherent() for a number of reasons outlined below: ++ ++ - Services mmap semantics does not fit with dma_mmap_coherent() ++ which requires proper ps_vma->vm_pgoff; seeing this houses a ++ hPMR handle value, calls into dma_mmap_coherent() fails. This ++ could be avoided by forcing ps_vma->vm_pgoff to zero but the ++ ps_vma->vm_pgoff is applied to DMA bus address PFN and not ++ user VMA which is always mapped at ps_vma->vm_start. ++ ++ - As multiple order-n pages are used for DMA/CMA allocations, a ++ single dma_mmap_coherent() call with a vma->vm_pgoff set to ++ zero cannot (maybe) be used because there is no guarantee that ++ all of the multiple order-n pages in the PMR are physically ++ contiguous from the first entry to the last. Whilst this is ++ highly likely to be the case, there is no guarantee that it ++ will be so we cannot depend on this being the case. ++ ++ The solution is to manually mmap DMA/CMA pages into user VMA ++ using remap_pfn_range() directly. Furthermore, accounting is ++ always compromised for DMA/CMA allocations. ++ */ ++ size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize; ++ ++ iStatus = remap_pfn_range(ps_vma, ++ ps_vma->vm_start + uiOffset, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ pfn_t_to_pfn(sPFN), ++#else ++ uiPFN, ++#endif ++ uiNumContiguousBytes, ++ ps_vma->vm_page_prot); ++ } ++ ++ return iStatus; ++} ++ ++PVRSRV_ERROR ++OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData) ++{ ++ struct vm_area_struct *ps_vma = pOSMMapData; ++ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR); ++ PVRSRV_ERROR eError; ++ size_t uiLength; ++ IMG_INT32 iStatus; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_UINT32 ui32CPUCacheFlags; ++ pgprot_t sPageProt; ++ IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC]; ++ IMG_UINT32 uiOffsetIdx; ++ IMG_UINT32 uiNumOfPFNs; ++ IMG_UINT32 uiLog2PageSize; ++ IMG_CPU_PHYADDR *psCpuPAddr; ++ IMG_BOOL *pbValid; ++ IMG_BOOL bUseMixedMap = IMG_FALSE; ++ IMG_BOOL bUseVMInsertPage = IMG_FALSE; ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ if (((ps_vma->vm_flags & VM_WRITE) != 0) && ++ ((ps_vma->vm_flags & VM_SHARED) == 0)) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e1; ++ } ++ ++ sPageProt = vm_get_page_prot(ps_vma->vm_flags); ++ ++ eError = DevmemCPUCacheMode(psDevNode, ++ PMR_Flags(psPMR), ++ &ui32CPUCacheFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ switch (ui32CPUCacheFlags) ++ { ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED: ++ sPageProt = pgprot_noncached(sPageProt); ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC: ++ sPageProt = pgprot_writecombine(sPageProt); ++ break; ++ ++ case PVRSRV_MEMALLOCFLAG_CPU_CACHED: ++ { ++/* Do not set to write-combine for plato */ ++#if !defined(PLATO_MEMORY_CONFIG) ++ PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR); ++ ++ if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA) ++ sPageProt = pgprot_writecombine(sPageProt); ++#endif ++ break; ++ } ++ ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e1; ++ } ++ ps_vma->vm_page_prot = sPageProt; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ vm_flags_set(ps_vma, VM_IO); ++ ++ /* Don't include the mapping in core dumps */ ++ vm_flags_set(ps_vma, VM_DONTDUMP); ++ ++ /* ++ * Disable mremap because our nopage handler assumes all ++ * page requests have already been validated. ++ */ ++ vm_flags_set(ps_vma, VM_DONTEXPAND); ++ ++ /* Don't allow mapping to be inherited across a process fork */ ++ vm_flags_set(ps_vma, VM_DONTCOPY); ++#else ++ ps_vma->vm_flags |= VM_IO; ++ ++ /* Don't include the mapping in core dumps */ ++ ps_vma->vm_flags |= VM_DONTDUMP; ++ ++ /* ++ * Disable mremap because our nopage handler assumes all ++ * page requests have already been validated. ++ */ ++ ps_vma->vm_flags |= VM_DONTEXPAND; ++ ++ /* Don't allow mapping to be inherited across a process fork */ ++ ps_vma->vm_flags |= VM_DONTCOPY; ++#endif ++ ++ uiLength = ps_vma->vm_end - ps_vma->vm_start; ++ ++ /* Is this mmap targeting non order-zero pages or does it use pfn mappings? ++ * If yes, don't use vm_insert_page */ ++ uiLog2PageSize = PMR_GetLog2Contiguity(psPMR); ++ ++#if defined(PMR_OS_USE_VM_INSERT_PAGE) ++ bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM); ++#endif ++ ++ /* Can we use stack allocations */ ++ uiNumOfPFNs = uiLength >> uiLog2PageSize; ++ if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr)); ++ if (psCpuPAddr == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e1; ++ } ++ ++ /* Should allocation fail, clean-up here before exiting */ ++ pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid)); ++ if (pbValid == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ OSFreeMem(psCpuPAddr); ++ goto e2; ++ } ++ } ++ else ++ { ++ psCpuPAddr = asCpuPAddr; ++ pbValid = abValid; ++ } ++ ++ /* Obtain map range pfns */ ++ eError = PMR_CpuPhysAddr(psPMR, ++ uiLog2PageSize, ++ uiNumOfPFNs, ++ 0, ++ psCpuPAddr, ++ pbValid); ++ if (eError != PVRSRV_OK) ++ { ++ goto e3; ++ } ++ ++ /* ++ * Scan the map range for pfns without struct page* handling. If ++ * we find one, this is a mixed map, and we can't use vm_insert_page() ++ * NOTE: vm_insert_page() allows insertion of individual pages into user ++ * VMA space _only_ if said page is an order-zero allocated page. ++ */ ++ if (bUseVMInsertPage) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ pfn_t sPFN; ++#else ++ unsigned long uiPFN; ++#endif ++ ++ for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx) ++ { ++ if (pbValid[uiOffsetIdx]) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) ++ sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0); ++ ++ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0) ++#else ++ uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT; ++ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr); ++ ++ if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0) ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */ ++ { ++ bUseMixedMap = IMG_TRUE; ++ break; ++ } ++ } ++ } ++ ++ if (bUseMixedMap) ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ vm_flags_set(ps_vma, VM_MIXEDMAP); ++#else ++ ps_vma->vm_flags |= VM_MIXEDMAP; ++#endif ++ } ++ } ++ else ++ { ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ vm_flags_set(ps_vma, VM_PFNMAP); ++#else ++ ps_vma->vm_flags |= VM_PFNMAP; ++#endif ++ } ++ ++ /* For each PMR page-size contiguous bytes, map page(s) into user VMA */ ++ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<> uiLog2PageSize; ++ /* ++ * Only map in pages that are valid, any that aren't will be ++ * picked up by the nopage handler which will return a zeroed ++ * page for us. ++ */ ++ if (pbValid[uiOffsetIdx]) ++ { ++ iStatus = _OSMMapPMR(psDevNode, ++ ps_vma, ++ uiOffset, ++ &psCpuPAddr[uiOffsetIdx], ++ uiLog2PageSize, ++ bUseVMInsertPage, ++ bUseMixedMap); ++ if (iStatus) ++ { ++ /* Failure error code doesn't get propagated */ ++ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED; ++ PVR_ASSERT(0); ++ goto e3; ++ } ++ } ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS) ++#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD ++ { ++ IMG_CPU_PHYADDR sPAddr; ++ sPAddr.uiAddr = pbValid[uiOffsetIdx] ? ++ psCpuPAddr[uiOffsetIdx].uiAddr : ++ IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR); ++ ++ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, ++ (void*)(uintptr_t)(ps_vma->vm_start + uiOffset), ++ sPAddr, ++ 1<vm_private_data = psPMR; ++ ++ /* Install open and close handlers for ref-counting */ ++ ps_vma->vm_ops = &gsMMapOps; ++ ++ /* ++ * Take a reference on the PMR so that it can't be freed while mapped ++ * into the user process. ++ */ ++ PMRRefPMR(psPMR); ++ ++#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS) ++ /* record the stats */ ++ MMapStatsAddOrUpdatePMR(psPMR, uiLength); ++#endif ++ ++ return PVRSRV_OK; ++ ++ /* Error exit paths follow */ ++e3: ++ if (pbValid != abValid) ++ { ++ OSFreeMem(pbValid); ++ } ++e2: ++ if (psCpuPAddr != asCpuPAddr) ++ { ++ OSFreeMem(psCpuPAddr); ++ } ++e1: ++ PMRUnlockSysPhysAddresses(psPMR); ++e0: ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/pmr_os.h b/drivers/gpu/drm/img-rogue/pmr_os.h +new file mode 100644 +index 000000000000..7b4a2117091c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pmr_os.h +@@ -0,0 +1,62 @@ ++/*************************************************************************/ /*! ++@File ++@Title OS PMR functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description OS specific PMR functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PMR_OS_H) ++#define PMR_OS_H ++ ++#include "pmr_impl.h" ++ ++/*************************************************************************/ /*! ++@Function OSMMapPMRGeneric ++@Description Implements a generic PMR mapping function, which is used ++ to CPU map a PMR where the PMR does not have a mapping ++ function defined by the creating PMR factory. ++@Input psPMR the PMR to be mapped ++@Output pOSMMapData pointer to any private data ++ needed by the generic mapping function ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData); ++ ++#endif /* !defined(PMR_OS_H) */ +diff --git a/drivers/gpu/drm/img-rogue/power.c b/drivers/gpu/drm/img-rogue/power.c +new file mode 100644 +index 000000000000..5ad6695fc574 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/power.c +@@ -0,0 +1,929 @@ ++/*************************************************************************/ /*! ++@File power.c ++@Title Power management functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Main APIs for power management functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pdump_km.h" ++#include "allocmem.h" ++#include "osfunc.h" ++ ++#include "lock.h" ++#include "pvrsrv.h" ++#include "pvr_debug.h" ++#include "process_stats.h" ++ ++ ++struct _PVRSRV_POWER_DEV_TAG_ ++{ ++ PFN_PRE_POWER pfnDevicePrePower; ++ PFN_POST_POWER pfnDevicePostPower; ++ PFN_SYS_PRE_POWER pfnSystemPrePower; ++ PFN_SYS_POST_POWER pfnSystemPostPower; ++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange; ++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange; ++ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest; ++ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest; ++ PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange; ++ IMG_HANDLE hSysData; ++ IMG_HANDLE hDevCookie; ++ PVRSRV_DEV_POWER_STATE eDefaultPowerState; ++ ATOMIC_T eCurrentPowerState; ++}; ++ ++/*! ++ Typedef for a pointer to a function that will be called for re-acquiring ++ device powerlock after releasing it temporarily for some timeout period ++ in function PVRSRVDeviceIdleRequestKM ++ */ ++typedef PVRSRV_ERROR (*PFN_POWER_LOCK_ACQUIRE) (PPVRSRV_DEVICE_NODE psDevNode); ++ ++static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void) ++{ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ return OSClockns64(); ++#else ++ return 0; ++#endif ++} ++ ++static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void) ++{ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ return OSClockus(); ++#else ++ return 0; ++#endif ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function _IsSystemStatePowered ++ ++ @Description Tests whether a given system state represents powered-up. ++ ++ @Input eSystemPowerState : a system power state ++ ++ @Return IMG_BOOL ++ ++******************************************************************************/ ++static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState) ++{ ++ return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON); ++} ++ ++/* We don't expect PID=0 to acquire device power-lock */ ++#define PWR_LOCK_OWNER_PID_CLR_VAL 0 ++ ++PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = OSLockCreate(&psDeviceNode->hPowerLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); ++ ++ psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; ++ return PVRSRV_OK; ++} ++ ++void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; ++ OSLockDestroy(psDeviceNode->hPowerLock); ++} ++ ++IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ return OSLockIsLocked(psDeviceNode->hPowerLock) && ++ OSGetCurrentClientProcessIDKM() == psDeviceNode->uiPwrLockOwnerPID; ++} ++ ++PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ OSLockAcquire(psDeviceNode->hPowerLock); ++ ++ /* Only allow to take powerlock when the system power is on */ ++ if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) ++ { ++ psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); ++ return PVRSRV_OK; ++ } ++ ++ OSLockRelease(psDeviceNode->hPowerLock); ++ ++ return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; ++} ++ ++PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ if (!(OSTryLockAcquire(psDeviceNode->hPowerLock))) ++ { ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ /* Only allow to take powerlock when the system power is on */ ++ if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState)) ++ { ++ psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); ++ ++ /* System is powered ON, return OK */ ++ return PVRSRV_OK; ++ } ++ else ++ { ++ /* System is powered OFF, release the lock and return error */ ++ OSLockRelease(psDeviceNode->hPowerLock); ++ return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF; ++ } ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function _PVRSRVForcedPowerLock ++ ++ @Description Obtain the mutex for power transitions regardless of system ++ power state ++ ++ @Return Always returns PVRSRV_OK. Function prototype required same as ++ PFN_POWER_LOCK_ACQUIRE ++ ++******************************************************************************/ ++static PVRSRV_ERROR _PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ OSLockAcquire(psDeviceNode->hPowerLock); ++ psDeviceNode->uiPwrLockOwnerPID = OSGetCurrentClientProcessIDKM(); ++ ++ return PVRSRV_OK; ++} ++ ++void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDeviceNode)); ++ ++ /* Reset uiPwrLockOwnerPID before releasing lock */ ++ psDeviceNode->uiPwrLockOwnerPID = PWR_LOCK_OWNER_PID_CLR_VAL; ++ OSLockRelease(psDeviceNode->hPowerLock); ++} ++ ++IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice) ++{ ++ return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF); ++} ++ ++PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_DEV_POWER_STATE eNewPowerState) ++{ ++ PVRSRV_POWER_DEV *psPowerDevice; ++ ++ psPowerDevice = psDeviceNode->psPowerDev; ++ if (psPowerDevice == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_DEVICE; ++ } ++ ++ psPowerDevice->eDefaultPowerState = eNewPowerState; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ @Input pfnPowerLockAcquire : Function to re-acquire power-lock in-case ++ it was necessary to release it. ++*/ ++static PVRSRV_ERROR _PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, ++ IMG_BOOL bDeviceOffPermitted, ++ PFN_POWER_LOCK_ACQUIRE pfnPowerLockAcquire) ++{ ++ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; ++ PVRSRV_ERROR eError; ++ ++ if ((psPowerDev && psPowerDev->pfnForcedIdleRequest) && ++ (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev))) ++ { ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie, ++ bDeviceOffPermitted); ++ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) ++ { ++ PVRSRV_ERROR eErrPwrLockAcq; ++ /* FW denied idle request */ ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ ++ eErrPwrLockAcq = pfnPowerLockAcquire(psDeviceNode); ++ if (eErrPwrLockAcq != PVRSRV_OK) ++ { ++ /* We only understand PVRSRV_ERROR_RETRY, so assert on others. ++ * Moreover, we've ended-up releasing the power-lock which was ++ * originally "held" by caller before calling this function - ++ * since this needs vigilant handling at call-site, we pass ++ * back an explicit error, for caller(s) to "avoid" calling ++ * PVRSRVPowerUnlock */ ++ PVR_ASSERT(eErrPwrLockAcq == PVRSRV_ERROR_RETRY); ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to re-acquire power-lock " ++ "(%s) after releasing it for a time-out", ++ __func__, PVRSRVGetErrorString(eErrPwrLockAcq))); ++ return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED; ++ } ++ } ++ else ++ { ++ /* idle request successful or some other error occurred, return */ ++ break; ++ } ++ } END_LOOP_UNTIL_TIMEOUT(); ++ } ++ else ++ { ++ return PVRSRV_OK; ++ } ++ ++ return eError; ++} ++ ++/* ++ * Wrapper function helps limiting calling complexity of supplying additional ++ * PFN_POWER_LOCK_ACQUIRE argument (required by _PVRSRVDeviceIdleRequestKM) ++ */ ++inline PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, ++ IMG_BOOL bDeviceOffPermitted) ++{ ++ return _PVRSRVDeviceIdleRequestKM(psDeviceNode, ++ pfnIsDefaultStateOff, ++ bDeviceOffPermitted, ++ PVRSRVPowerLock); ++} ++ ++PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev; ++ ++ if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest) ++ { ++ return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVDevicePrePowerStateKM ++ ++ @Description ++ ++ Perform device-specific processing required before a power transition ++ ++ @Input psPowerDevice : Power device ++ @Input eNewPowerState : New power state ++ @Input ePwrFlags : Power state change flags ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static ++PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState; ++ IMG_UINT64 ui64SysTimer1 = 0; ++ IMG_UINT64 ui64SysTimer2 = 0; ++ IMG_UINT64 ui64DevTimer1 = 0; ++ IMG_UINT64 ui64DevTimer2 = 0; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); ++ ++ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); ++ ++ if (psPowerDevice->pfnDevicePrePower != NULL) ++ { ++ ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); ++ ++ /* Call the device's power callback. */ ++ eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie, ++ eNewPowerState, ++ eCurrentPowerState, ++ BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)); ++ ++ ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ /* Do any required system-layer processing. */ ++ if (psPowerDevice->pfnSystemPrePower != NULL) ++ { ++ ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); ++ ++ eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData, ++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ? ++ PVRSRV_SYS_POWER_STATE_ON : ++ PVRSRV_SYS_POWER_STATE_OFF, ++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ? ++ PVRSRV_SYS_POWER_STATE_ON : ++ PVRSRV_SYS_POWER_STATE_OFF, ++ ePwrFlags); ++ ++ ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2, ++ ui64DevTimer1, ui64DevTimer2, ++ BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED), ++ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, ++ IMG_TRUE); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVDevicePostPowerStateKM ++ ++ @Description ++ ++ Perform device-specific processing required after a power transition ++ ++ @Input psPowerDevice : Power device ++ @Input eNewPowerState : New power state ++ @Input ePwrFlags : Power state change flags ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static ++PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState; ++ IMG_UINT64 ui64SysTimer1 = 0; ++ IMG_UINT64 ui64SysTimer2 = 0; ++ IMG_UINT64 ui64DevTimer1 = 0; ++ IMG_UINT64 ui64DevTimer2 = 0; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); ++ ++ eCurrentPowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); ++ ++ /* Do any required system-layer processing. */ ++ if (psPowerDevice->pfnSystemPostPower != NULL) ++ { ++ ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs(); ++ ++ eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData, ++ (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ? ++ PVRSRV_SYS_POWER_STATE_ON : ++ PVRSRV_SYS_POWER_STATE_OFF, ++ (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) ? ++ PVRSRV_SYS_POWER_STATE_ON : ++ PVRSRV_SYS_POWER_STATE_OFF, ++ ePwrFlags); ++ ++ ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs(); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ if (psPowerDevice->pfnDevicePostPower != NULL) ++ { ++ ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs(); ++ ++ /* Call the device's power callback. */ ++ eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie, ++ eNewPowerState, ++ eCurrentPowerState, ++ BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)); ++ ++ ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs(); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2, ++ ui64DevTimer1, ui64DevTimer2, ++ BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED), ++ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON, ++ IMG_FALSE); ++ ++ OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eNewPowerState); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_POWER_DEV *psPowerDevice; ++ ++ psPowerDevice = psDeviceNode->psPowerDev; ++ if (!psPowerDevice) ++ { ++ return PVRSRV_OK; ++ } ++ ++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ++ { ++ eNewPowerState = psPowerDevice->eDefaultPowerState; ++ } ++ ++ if (OSAtomicRead(&psPowerDevice->eCurrentPowerState) != eNewPowerState) ++ { ++ eError = PVRSRVDevicePrePowerStateKM(psPowerDevice, ++ eNewPowerState, ++ ePwrFlags); ++ PVR_GOTO_IF_ERROR(eError, ErrorExit); ++ ++ eError = PVRSRVDevicePostPowerStateKM(psPowerDevice, ++ eNewPowerState, ++ ePwrFlags); ++ PVR_GOTO_IF_ERROR(eError, ErrorExit); ++ ++ /* Signal Device Watchdog Thread about power mode change. */ ++ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON) ++ { ++ psPVRSRVData->ui32DevicesWatchdogPwrTrans++; ++#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT) ++#endif ++ { ++ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ } ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF) ++ { ++ /* signal watchdog thread and give it a chance to switch to ++ * longer / infinite wait time */ ++ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ ++ } ++ ++ return PVRSRV_OK; ++ ++ErrorExit: ++ ++ if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Transition to %d was denied, Flags=0x%08x", ++ __func__, eNewPowerState, ePwrFlags)); ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Transition to %d FAILED (%s)", ++ __func__, eNewPowerState, PVRSRVGetErrorString(eError))); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_SYS_POWER_STATE eNewSysPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT uiStage = 0; ++ ++ PVRSRV_DEV_POWER_STATE eNewDevicePowerState = ++ _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF; ++ ++ /* If setting devices to default state, force idle all devices whose default state is off */ ++ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff = ++ (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL; ++ ++ /* require a proper power state */ ++ if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Prevent simultaneous SetPowerStateKM calls */ ++ _PVRSRVForcedPowerLock(psDeviceNode); ++ ++ /* no power transition requested, so do nothing */ ++ if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState) ++ { ++ PVRSRVPowerUnlock(psDeviceNode); ++ return PVRSRV_OK; ++ } ++ ++ eError = _PVRSRVDeviceIdleRequestKM(psDeviceNode, pfnIsDefaultStateOff, ++ IMG_TRUE, _PVRSRVForcedPowerLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "_PVRSRVDeviceIdleRequestKM"); ++ uiStage++; ++ goto ErrorExit; ++ } ++ ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState, ++ ePwrFlags | PVRSRV_POWER_FLAGS_FORCED); ++ if (eError != PVRSRV_OK) ++ { ++ uiStage++; ++ goto ErrorExit; ++ } ++ ++ psDeviceNode->eCurrentSysPowerState = eNewSysPowerState; ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ return PVRSRV_OK; ++ ++ErrorExit: ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.", ++ __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState, ++ PVRSRVGetErrorString(eError), uiStage)); ++ ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ PVRSRV_SYS_POWER_STATE eNewSysPowerState) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_NODE *psDevNode = psDevConfig->psDevNode; ++ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState; ++ ++ if (psDevNode != NULL) ++ { ++ eCurrentSysPowerState = psDevNode->eCurrentSysPowerState; ++ } ++ else ++ { ++ /* assume power is off if no device node */ ++ eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; ++ } ++ ++ /* no power transition requested, so do nothing */ ++ if (eNewSysPowerState == eCurrentSysPowerState) ++ { ++ return PVRSRV_OK; ++ } ++ ++ if (psDevConfig->pfnPrePowerState != NULL) ++ { ++ eError = psDevConfig->pfnPrePowerState(psDevConfig->hSysData, ++ eNewSysPowerState, ++ eCurrentSysPowerState, ++ PVRSRV_POWER_FLAGS_FORCED); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ if (psDevConfig->pfnPostPowerState != NULL) ++ { ++ eError = psDevConfig->pfnPostPowerState(psDevConfig->hSysData, ++ eNewSysPowerState, ++ eCurrentSysPowerState, ++ PVRSRV_POWER_FLAGS_FORCED); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ if (psDevNode != NULL) ++ { ++ psDevNode->eCurrentSysPowerState = eNewSysPowerState; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_POWER_DEV *psPowerDevice, ++ PFN_PRE_POWER pfnDevicePrePower, ++ PFN_POST_POWER pfnDevicePostPower, ++ PFN_SYS_PRE_POWER pfnSystemPrePower, ++ PFN_SYS_POST_POWER pfnSystemPostPower, ++ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, ++ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest) ++{ ++ if (psPowerDevice != NULL) ++ { ++ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) ++ { ++ psPowerDevice->pfnSystemPrePower = NULL; ++ psPowerDevice->pfnSystemPostPower = NULL; ++ } ++ else ++ { ++ psPowerDevice->pfnSystemPrePower = pfnSystemPrePower; ++ psPowerDevice->pfnSystemPostPower = pfnSystemPostPower; ++ } ++ ++ psPowerDevice->pfnDevicePrePower = pfnDevicePrePower; ++ psPowerDevice->pfnDevicePostPower = pfnDevicePostPower; ++ psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest; ++ psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest; ++ } ++} ++ ++PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PFN_PRE_POWER pfnDevicePrePower, ++ PFN_POST_POWER pfnDevicePostPower, ++ PFN_SYS_PRE_POWER pfnSystemPrePower, ++ PFN_SYS_POST_POWER pfnSystemPostPower, ++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, ++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, ++ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, ++ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, ++ PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, ++ IMG_HANDLE hDevCookie, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_DEV_POWER_STATE eDefaultPowerState) ++{ ++ PVRSRV_POWER_DEV *psPowerDevice; ++ ++ PVR_ASSERT(!psDeviceNode->psPowerDev); ++ ++ PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); ++ PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT); ++ ++ psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV)); ++ PVR_LOG_RETURN_IF_NOMEM(psPowerDevice, "psPowerDevice"); ++ ++ /* setup device for power manager */ ++ PVRSRVSetPowerCallbacks(psDeviceNode, ++ psPowerDevice, ++ pfnDevicePrePower, ++ pfnDevicePostPower, ++ pfnSystemPrePower, ++ pfnSystemPostPower, ++ pfnForcedIdleRequest, ++ pfnForcedIdleCancelRequest); ++ ++ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange; ++ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange; ++ psPowerDevice->pfnGPUUnitsPowerChange = pfnGPUUnitsPowerChange; ++ psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData; ++ psPowerDevice->hDevCookie = hDevCookie; ++ OSAtomicWrite(&psPowerDevice->eCurrentPowerState, eCurrentPowerState); ++ psPowerDevice->eDefaultPowerState = eDefaultPowerState; ++ ++ psDeviceNode->psPowerDev = psPowerDevice; ++ ++ return PVRSRV_OK; ++} ++ ++void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ if (psDeviceNode->psPowerDev) ++ { ++ OSFreeMem(psDeviceNode->psPowerDev); ++ psDeviceNode->psPowerDev = NULL; ++ } ++} ++ ++PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, ++ PPVRSRV_DEV_POWER_STATE pePowerState) ++{ ++ PVRSRV_POWER_DEV *psPowerDevice; ++ ++ psPowerDevice = psDeviceNode->psPowerDev; ++ if (psPowerDevice == NULL) ++ { ++ return PVRSRV_ERROR_UNKNOWN_POWER_STATE; ++ } ++ ++ *pePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); ++ ++ return PVRSRV_OK; ++} ++ ++IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode) ++{ ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ ++ if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK) ++ { ++ return IMG_FALSE; ++ } ++ ++ return (ePowerState == PVRSRV_DEV_POWER_STATE_ON); ++} ++ ++PVRSRV_ERROR ++PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, ++ IMG_BOOL bIdleDevice, ++ void* pvInfo) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_POWER_DEV *psPowerDevice; ++ IMG_UINT64 ui64StartTimer, ui64StopTimer; ++ ++ PVR_UNREFERENCED_PARAMETER(pvInfo); ++ ++ ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); ++ ++ /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); ++ ++ psPowerDevice = psDeviceNode->psPowerDev; ++ if (psPowerDevice) ++ { ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState = ++ OSAtomicRead(&psPowerDevice->eCurrentPowerState); ++ ++ if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) ++ { ++ /* We can change the clock speed if the device is either IDLE or OFF */ ++ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ /* FW Can signal denied when busy with SPM or other work it can not idle */ ++ if (eError != PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Error (%s) from %s()", __func__, ++ PVRSRVGETERRORSTRING(eError), "PVRSRVDeviceIdleRequestKM")); ++ } ++ if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) ++ { ++ PVRSRVPowerUnlock(psDeviceNode); ++ } ++ return eError; ++ } ++ } ++ ++ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie, ++ eCurrentPowerState); ++ } ++ ++ ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); ++ ++ InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer); ++ ++ return eError; ++} ++ ++void ++PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, ++ IMG_BOOL bIdleDevice, ++ void* pvInfo) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_POWER_DEV *psPowerDevice; ++ IMG_UINT64 ui64StartTimer, ui64StopTimer; ++ ++ PVR_UNREFERENCED_PARAMETER(pvInfo); ++ ++ ui64StartTimer = PVRSRVProcessStatsGetTimeUs(); ++ ++ psPowerDevice = psDeviceNode->psPowerDev; ++ if (psPowerDevice) ++ { ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState = ++ OSAtomicRead(&psPowerDevice->eCurrentPowerState); ++ ++ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie, ++ eCurrentPowerState); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", ++ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); ++ } ++ ++ if ((eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice) ++ { ++ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM"); ++ } ++ } ++ ++ /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */ ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges); ++ ++ ui64StopTimer = PVRSRVProcessStatsGetTimeUs(); ++ ++ InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer); ++} ++ ++PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, ++ IMG_UINT32 ui32NewValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_POWER_DEV *psPowerDevice; ++ ++ psPowerDevice = psDeviceNode->psPowerDev; ++ if (psPowerDevice) ++ { ++ PVRSRV_DEV_POWER_STATE eDevicePowerState; ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); ++ ++ eDevicePowerState = OSAtomicRead(&psPowerDevice->eCurrentPowerState); ++ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) ++ { ++ /* Device must be idle to change GPU unit(s) power state */ ++ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); ++ if (eError == PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) ++ { ++ goto ErrorExit; ++ } ++ goto ErrorUnlockAndExit; ++ } ++ } ++ ++ if (psPowerDevice->pfnGPUUnitsPowerChange != NULL) ++ { ++ PVRSRV_ERROR eError2 = psPowerDevice->pfnGPUUnitsPowerChange(psPowerDevice->hDevCookie, ui32NewValue); ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)", ++ __func__, psDeviceNode, ++ PVRSRVGetErrorString(eError2))); ++ } ++ } ++ ++ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON) ++ { ++ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDeviceIdleCancelRequestKM", ErrorUnlockAndExit); ++ } ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ } ++ ++ return eError; ++ ++ErrorUnlockAndExit: ++ PVRSRVPowerUnlock(psDeviceNode); ++ErrorExit: ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (power.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/power.h b/drivers/gpu/drm/img-rogue/power.h +new file mode 100644 +index 000000000000..333e7992eb34 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/power.h +@@ -0,0 +1,430 @@ ++/*************************************************************************/ /*! ++@File ++@Title Power Management Functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Main APIs for power management functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef POWER_H ++#define POWER_H ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_device.h" ++#include "pvrsrv_error.h" ++#include "servicesext.h" ++#include "opaque_types.h" ++ ++/*! ++ ***************************************************************************** ++ * Power management ++ *****************************************************************************/ ++ ++typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV; ++ ++typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice); ++ ++/* Power transition handler prototypes */ ++ ++/*! ++ Typedef for a pointer to a Function that will be called before a transition ++ from one power state to another. See also PFN_POST_POWER. ++ */ ++typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++/*! ++ Typedef for a pointer to a Function that will be called after a transition ++ from one power state to another. See also PFN_PRE_POWER. ++ */ ++typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++PVRSRV_ERROR PVRSRVPowerLockInit(PPVRSRV_DEVICE_NODE psDeviceNode); ++void PVRSRVPowerLockDeInit(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVPowerLock ++ ++ @Description Obtain the mutex for power transitions. Only allowed when ++ system power is on. ++ ++ @Return PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or PVRSRV_OK ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVPowerUnlock ++ ++ @Description Release the mutex for power transitions ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++void PVRSRVPowerUnlock(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVPowerTryLock ++ ++ @Description Try to obtain the mutex for power transitions. Only allowed when ++ system power is on. ++ ++ @Return PVRSRV_ERROR_RETRY or PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF or ++ PVRSRV_OK ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVPowerTryLock(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVPwrLockIsLockedByMe ++ ++ @Description Determine if the calling context is holding the device power-lock ++ ++ @Return IMG_BOOL ++ ++******************************************************************************/ ++IMG_BOOL PVRSRVPwrLockIsLockedByMe(PCPVRSRV_DEVICE_NODE psDeviceNode); ++IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVSetDevicePowerStateKM ++ ++ @Description Set the Device into a new state ++ ++ @Input psDeviceNode : Device node ++ @Input eNewPowerState : New power state ++ @Input ePwrFlags : Power state change flags ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVSetDeviceSystemPowerState ++@Description Set the device into a new power state based on the systems power ++ state ++@Input psDeviceNode Device node ++@Input eNewSysPowerState New system power state ++@Input ePwrFlags Power state change flags ++@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_SYS_POWER_STATE eNewSysPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVSetDeviceDefaultPowerState ++ ++ @Description Set the default device power state to eNewPowerState ++ ++ @Input psDeviceNode : Device node ++ @Input eNewPowerState : New power state ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_DEV_POWER_STATE eNewPowerState); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVSetSystemPowerState ++ ++ @Description Set the system power state to eNewPowerState ++ ++ @Input psDeviceConfig : Device config ++ @Input eNewPowerState : New power state ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVSetSystemPowerState(PVRSRV_DEVICE_CONFIG * psDeviceConfig, ++ PVRSRV_SYS_POWER_STATE eNewSysPowerState); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVSetPowerCallbacks ++ ++ @Description Initialise the Power Device's function pointers ++ to the appropriate callbacks depending on driver mode and ++ system setup. ++ ++ @Input psDeviceNode : Device node ++ @Input psPowerDevice : Power device ++ @Input pfnDevicePrePower : regular device pre power callback ++ @Input pfnDevicePostPower : regular device post power callback ++ @Input pfnSystemPrePower : regular system pre power callback ++ @Input pfnDevicePostPower : regular system post power callback ++ @Input pfnSystemPrePower : regular device pre power callback ++ @Input pfnSystemPostPower : regular device pre power callback ++ @Input pfnForcedIdleRequest : forced idle request callback ++ @Input pfnForcedIdleCancelRequest : forced idle request cancel callback ++ ++******************************************************************************/ ++void PVRSRVSetPowerCallbacks(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PVRSRV_POWER_DEV *psPowerDevice, ++ PFN_PRE_POWER pfnDevicePrePower, ++ PFN_POST_POWER pfnDevicePostPower, ++ PFN_SYS_PRE_POWER pfnSystemPrePower, ++ PFN_SYS_POST_POWER pfnSystemPostPower, ++ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, ++ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest); ++ ++/* Type PFN_DC_REGISTER_POWER */ ++PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PFN_PRE_POWER pfnDevicePrePower, ++ PFN_POST_POWER pfnDevicePostPower, ++ PFN_SYS_PRE_POWER pfnSystemPrePower, ++ PFN_SYS_POST_POWER pfnSystemPostPower, ++ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange, ++ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange, ++ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest, ++ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest, ++ PFN_GPU_UNITS_POWER_CHANGE pfnGPUUnitsPowerChange, ++ IMG_HANDLE hDevCookie, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_DEV_POWER_STATE eDefaultPowerState); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVRemovePowerDevice ++ ++ @Description ++ ++ Removes device from power management register. Device is located by Device Index ++ ++ @Input psDeviceNode : Device node ++ ++******************************************************************************/ ++void PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVGetDevicePowerState ++ ++ @Description ++ ++ Return the device power state ++ ++ @Input psDeviceNode : Device node ++ @Output pePowerState : Current power state ++ ++ @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. ++ PVRSRV_OK otherwise. ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode, ++ PPVRSRV_DEV_POWER_STATE pePowerState); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVIsDevicePowered ++ ++ @Description ++ ++ Whether the device is powered, for the purposes of lockup detection. ++ ++ @Input psDeviceNode : Device node ++ ++ @Return IMG_BOOL ++ ++******************************************************************************/ ++IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDevicePreClockSpeedChange ++ ++@Description This function is called before a voltage/frequency change is ++ made to the GPU HW. It informs the host driver of the intention ++ to make a DVFS change. If allows the host driver to idle ++ the GPU and begin a hold off period from starting new work ++ on the GPU. ++ When this call succeeds the caller *must* call ++ PVRSRVDevicePostClockSpeedChange() to end the hold off period ++ to allow new work to be submitted to the GPU. ++ ++ Called from system layer or OS layer implementation that ++ is responsible for triggering a GPU DVFS transition. ++ ++@Input psDeviceNode pointer to the device affected by DVFS transition. ++@Input bIdleDevice when True, the driver will wait for the GPU to ++ reach an idle state before the call returns. ++@Input pvInfo unused ++ ++@Return PVRSRV_OK on success, power lock acquired and held on exit, ++ GPU idle. ++ PVRSRV_ERROR on failure, power lock not held on exit, do not ++ call PVRSRVDevicePostClockSpeedChange(). ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, ++ IMG_BOOL bIdleDevice, ++ void *pvInfo); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDevicePostClockSpeedChange ++ ++@Description This function is called after a voltage/frequency change has ++ been made to the GPU HW following a call to ++ PVRSRVDevicePreClockSpeedChange(). ++ Before calling this function the caller must ensure the system ++ data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has ++ been updated with the new frequency set, measured in Hz. ++ The function informs the host driver that the DVFS change has ++ completed. The driver will end the work hold off period, cancel ++ the device idle period and update its time data records. ++ When this call returns work submissions are unblocked and ++ are submitted to the GPU as normal. ++ This function *must* not be called if the preceding call to ++ PVRSRVDevicePreClockSpeedChange() failed. ++ ++ Called from system layer or OS layer implementation that ++ is responsible for triggering a GPU DVFS transition. ++ ++@Input psDeviceNode pointer to the device affected by DVFS transition. ++@Input bIdleDevice when True, the driver will cancel the GPU ++ device idle state before the call returns. Value ++ given must match that used in the call to ++ PVRSRVDevicePreClockSpeedChange() otherwise ++ undefined behaviour will result. ++@Input pvInfo unused ++ ++@Return void power lock released, no longer held on exit. ++*/ /**************************************************************************/ ++void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode, ++ IMG_BOOL bIdleDevice, ++ void *pvInfo); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVDeviceIdleRequestKM ++ ++ @Description Perform device-specific processing required to force the device ++ idle. The device power-lock might be temporarily released (and ++ again re-acquired) during the course of this call, hence to ++ maintain lock-ordering power-lock should be the last acquired ++ lock before calling this function ++ ++ @Input psDeviceNode : Device node ++ ++ @Input pfnIsDefaultStateOff : When specified, the idle request is only ++ processed if this function passes. ++ ++ @Input bDeviceOffPermitted : IMG_TRUE if the transition should not fail ++ if device off ++ IMG_FALSE if the transition should fail if ++ device off ++ ++ @Return PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED ++ When re-acquisition of power-lock failed. ++ This error NEEDS EXPLICIT HANDLING at call ++ site as it signifies the caller needs to ++ AVOID calling PVRSRVPowerUnlock, since ++ power-lock is no longer "possessed" by ++ this context. ++ ++ PVRSRV_OK When idle request succeeded. ++ PVRSRV_ERROR Other system errors. ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode, ++ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff, ++ IMG_BOOL bDeviceOffPermitted); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVDeviceIdleCancelRequestKM ++ ++ @Description Perform device-specific processing required to cancel the forced idle state ++ on the device, returning to normal operation. ++ ++ @Input psDeviceNode : Device node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++@Function PVRSRVDeviceGPUUnitsPowerChange ++@Description Request from system layer for changing power state of GPU ++ units ++@Input psDeviceNode RGX Device Node. ++@Input ui32NewValue Value indicating the new power state ++ of GPU units. how this is interpreted ++ depends upon the device-specific ++ function subsequently called by the ++ server via a pfn. ++@Return PVRSRV_ERROR. ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVDeviceGPUUnitsPowerChange(PPVRSRV_DEVICE_NODE psDeviceNode, ++ IMG_UINT32 ui32NewValue); ++ ++ ++#endif /* POWER_H */ ++ ++/****************************************************************************** ++ End of file (power.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h b/drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h +new file mode 100644 +index 000000000000..41eaaaecd19e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/powervr/buffer_attribs.h +@@ -0,0 +1,193 @@ ++/*************************************************************************/ /*! ++@File ++@Title 3D types for use by IMG APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef POWERVR_BUFFER_ATTRIBS_H ++#define POWERVR_BUFFER_ATTRIBS_H ++ ++/*! ++ * Memory layouts ++ * Defines how pixels are laid out within a surface. ++ */ ++typedef enum ++{ ++ IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */ ++ IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled to match HW */ ++ IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */ ++ IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */ ++ IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */ ++ IMG_MEMLAYOUT_INVNTWIDDLED, /**< Resource is 2D twiddled !N style */ ++} IMG_MEMLAYOUT; ++ ++/*! ++ * Rotation types ++ */ ++typedef enum ++{ ++ IMG_ROTATION_0DEG = 0, ++ IMG_ROTATION_90DEG = 1, ++ IMG_ROTATION_180DEG = 2, ++ IMG_ROTATION_270DEG = 3, ++ IMG_ROTATION_FLIP_Y = 4, ++ ++ IMG_ROTATION_BAD = 255, ++} IMG_ROTATION; ++ ++/*! ++ * Alpha types. ++ */ ++typedef enum ++{ ++ IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x0UL << 16, ++ IMG_COLOURSPACE_FORMAT_LINEAR = 0x1UL << 16, ++ IMG_COLOURSPACE_FORMAT_SRGB = 0x2UL << 16, ++ IMG_COLOURSPACE_FORMAT_SCRGB = 0x3UL << 16, ++ IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR = 0x4UL << 16, ++ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR = 0x5UL << 16, ++ IMG_COLOURSPACE_FORMAT_DISPLAY_P3 = 0x6UL << 16, ++ IMG_COLOURSPACE_FORMAT_BT2020_PQ = 0x7UL << 16, ++ IMG_COLOURSPACE_FORMAT_BT2020_LINEAR = 0x8UL << 16, ++ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_PASSTHROUGH = 0x9UL << 16, ++ IMG_COLOURSPACE_FORMAT_MASK = 0xFUL << 16, ++} IMG_COLOURSPACE_FORMAT; ++ ++/*! ++ * Determines if FB Compression is Lossy ++ */ ++#define IS_FBCDC_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_TRUE : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_TRUE : IMG_FALSE) ++ ++/*! ++ * Determines if FB Compression is Packed ++ */ ++#define IS_FBCDC_PACKED(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_TRUE : IMG_FALSE) ++ ++/*! ++ * Returns type of FB Compression ++ */ ++#define GET_FBCDC_BLOCK_TYPE(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) ++ ++/*! ++ * Adds Packing compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_PACKED_8x8 : mode) ++ ++/*! ++ * Removes Packing compression setting from mode ++ */ ++#define FBCDC_MODE_REMOVE_PACKING(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_PACKED_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : mode) ++ ++/*! ++ * Adds Lossy25 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY25(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2 : mode) ++ ++/*! ++ * Adds Lossy37 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY37(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2 : mode) ++ ++/*! ++ * Adds Lossy50 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY50(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2 : mode) ++ ++/*! ++ * Adds Lossy75 compression setting to mode if viable ++ */ ++#define FBCDC_MODE_ADD_LOSSY75(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_8x8) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_16x4) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_32x2) ? IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2 : mode) ++ ++/*! ++ * Removes Lossy compression setting from mode ++ */ ++#define FBCDC_MODE_REMOVE_LOSSY(mode) ((mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8) ? IMG_FB_COMPRESSION_DIRECT_8x8 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4) ? IMG_FB_COMPRESSION_DIRECT_16x4 : \ ++ (mode == IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2) ? IMG_FB_COMPRESSION_DIRECT_32x2 : mode) ++ ++/*! ++ * Types of framebuffer compression ++ */ ++typedef enum ++{ ++ IMG_FB_COMPRESSION_NONE, ++ IMG_FB_COMPRESSION_DIRECT_8x8, ++ IMG_FB_COMPRESSION_DIRECT_16x4, ++ IMG_FB_COMPRESSION_DIRECT_32x2, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY25_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY25_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY25_32x2, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY75_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY50_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY50_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY50_32x2, ++ IMG_FB_COMPRESSION_DIRECT_PACKED_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY75_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY75_32x2, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY37_8x8, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY37_16x4, ++ IMG_FB_COMPRESSION_DIRECT_LOSSY37_32x2, ++} IMG_FB_COMPRESSION; ++ ++ ++#endif /* POWERVR_BUFFER_ATTRIBS_H */ +diff --git a/drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h b/drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h +new file mode 100644 +index 000000000000..5fd79a6c413e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/powervr/img_drm_fourcc.h +@@ -0,0 +1,140 @@ ++/*************************************************************************/ /*! ++@File ++@Title Wrapper around drm_fourcc.h ++@Description FourCCs and DRM framebuffer modifiers that are not in the ++ Kernel's and libdrm's drm_fourcc.h can be added here. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef IMG_DRM_FOURCC_H ++#define IMG_DRM_FOURCC_H ++ ++#if defined(__KERNEL__) ++#include ++#else ++/* ++ * Include types.h to workaround versions of libdrm older than 2.4.68 ++ * not including the correct headers. ++ */ ++#include ++ ++#include ++#endif ++ ++/* ++ * Don't get too inspired by this example :) ++ * ADF doesn't support DRM modifiers, so the memory layout had to be ++ * included in the fourcc name, but the proper way to specify information ++ * additional to pixel formats is to use DRM modifiers. ++ * ++ * See upstream drm_fourcc.h for the proper naming convention. ++ */ ++#ifndef DRM_FORMAT_BGRA8888_DIRECT_16x4 ++#define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0') ++#endif ++ ++#if !defined(__KERNEL__) ++/* ++ * A definition for the same format was added in Linux kernel 5.2 in commit ++ * 88ab9c76d191ad8645b483f31e2b394b0f3e280e. As such, this definition has been ++ * deprecated and the DRM_FORMAT_ABGR16161616F kernel define should be used ++ * instead of this one. ++ */ ++#define DRM_FORMAT_ABGR16_IMG_DEPRECATED fourcc_code('I', 'M', 'G', '1') ++#endif ++ ++/* ++ * Upstream does not have a packed 10 Bits Per Channel YVU format yet, ++ * so let`s make one up. ++ * Note: at the moment this format is not intended to be used with ++ * a framebuffer, so the kernels core DRM doesn`t need to know ++ * about this format. This means that the kernel doesn`t need ++ * to be patched. ++ */ ++#if !defined(__KERNEL__) ++#define DRM_FORMAT_YVU444_PACK10_IMG fourcc_code('I', 'M', 'G', '2') ++#define DRM_FORMAT_YUV422_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '3') ++#define DRM_FORMAT_YUV420_2PLANE_PACK10_IMG fourcc_code('I', 'M', 'G', '4') ++#endif ++ ++/* ++ * Value chosen in the middle of 255 pool to minimise the chance of hitting ++ * the same value potentially defined by other vendors in the drm_fourcc.h ++ */ ++#define DRM_FORMAT_MOD_VENDOR_PVR 0x92 ++ ++#ifndef DRM_FORMAT_MOD_VENDOR_NONE ++#define DRM_FORMAT_MOD_VENDOR_NONE 0 ++#endif ++ ++#ifndef DRM_FORMAT_RESERVED ++#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1) ++#endif ++ ++#define img_fourcc_mod_combine(uiModHi, uiModLo) \ ++ ((__u64) ((__u32) (uiModHi)) << 32 | (__u64) ((__u32) (uiModLo))) ++ ++#define img_fourcc_mod_hi(ui64Mod) \ ++ ((__u32) ((__u64) (ui64Mod) >> 32)) ++ ++#define img_fourcc_mod_lo(ui64Mod) \ ++ ((__u32) ((__u64) (ui64Mod)) & 0xffffffff) ++ ++#ifndef fourcc_mod_code ++#define fourcc_mod_code(vendor, val) \ ++ ((((__u64)DRM_FORMAT_MOD_VENDOR_## vendor) << 56) | (val & 0x00ffffffffffffffULL)) ++#endif ++ ++#ifndef DRM_FORMAT_MOD_INVALID ++#define DRM_FORMAT_MOD_INVALID fourcc_mod_code(NONE, DRM_FORMAT_RESERVED) ++#endif ++ ++#ifndef DRM_FORMAT_MOD_LINEAR ++#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) ++#endif ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V1 fourcc_mod_code(PVR, 3) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V1 fourcc_mod_code(PVR, 9) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V7 fourcc_mod_code(PVR, 6) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V7 fourcc_mod_code(PVR, 12) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V10 fourcc_mod_code(PVR, 21) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V10 fourcc_mod_code(PVR, 22) ++#define DRM_FORMAT_MOD_PVR_FBCDC_32x2_V10 fourcc_mod_code(PVR, 23) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V12 fourcc_mod_code(PVR, 15) ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V12 fourcc_mod_code(PVR, 16) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_8x8_V13 fourcc_mod_code(PVR, 24) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_8x8_V13 fourcc_mod_code(PVR, 25) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_8x8_V13 fourcc_mod_code(PVR, 26) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_8x8_V13 fourcc_mod_code(PVR, 27) ++ ++#define DRM_FORMAT_MOD_PVR_FBCDC_16x4_V13 fourcc_mod_code(PVR, 28) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY25_16x4_V13 fourcc_mod_code(PVR, 29) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY50_16x4_V13 fourcc_mod_code(PVR, 30) ++#define DRM_FORMAT_MOD_PVR_FBCDC_LOSSY75_16x4_V13 fourcc_mod_code(PVR, 31) ++ ++#endif /* IMG_DRM_FOURCC_H */ +diff --git a/drivers/gpu/drm/img-rogue/powervr/mem_types.h b/drivers/gpu/drm/img-rogue/powervr/mem_types.h +new file mode 100644 +index 000000000000..a6dce8fe9889 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/powervr/mem_types.h +@@ -0,0 +1,64 @@ ++/*************************************************************************/ /*! ++@File ++@Title Public types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef POWERVR_TYPES_H ++#define POWERVR_TYPES_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#if defined(_MSC_VER) ++ #include "msvc_types.h" ++#elif defined(__linux__) && defined(__KERNEL__) ++ #include ++ #include ++#else ++ #include ++ #define __iomem ++#endif ++ ++typedef void *IMG_CPU_VIRTADDR; ++ ++/* device virtual address */ ++typedef struct ++{ ++ uint64_t uiAddr; ++#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var) ++ ++} IMG_DEV_VIRTADDR; ++ ++typedef uint64_t IMG_DEVMEM_SIZE_T; ++typedef uint64_t IMG_DEVMEM_ALIGN_T; ++typedef uint64_t IMG_DEVMEM_OFFSET_T; ++typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h b/drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h +new file mode 100644 +index 000000000000..30f7972444cd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/powervr/pvrsrv_sync_ext.h +@@ -0,0 +1,72 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services external synchronisation interface header ++@Description Defines synchronisation structures that are visible internally ++ and externally ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License MIT ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef POWERVR_SYNC_EXT_H ++#define POWERVR_SYNC_EXT_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/*! ++ * Number of sync prims still used internally in operations ++ */ ++#define PVRSRV_MAX_SYNC_PRIMS 4U ++ ++/*! ++ * Maximum number of dev var updates passed in a kick call ++ */ ++#define PVRSRV_MAX_DEV_VARS 13U ++ ++/*! ++ * Number of UFOs in operations ++ */ ++#define PVRSRV_MAX_SYNCS (PVRSRV_MAX_SYNC_PRIMS + PVRSRV_MAX_DEV_VARS) ++ ++/*! Implementation independent types for passing fence/timeline to Services. ++ */ ++typedef int32_t PVRSRV_FENCE; ++typedef int32_t PVRSRV_TIMELINE; ++ ++/*! Maximum length for an annotation name string for fence sync model objects. ++ */ ++#define PVRSRV_SYNC_NAME_LENGTH 32U ++ ++/* Macros for API callers using the fence sync model ++ */ ++#define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1) ++#define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1) ++#define PVRSRV_NO_FENCE_PTR NULL ++#define PVRSRV_NO_TIMELINE_PTR NULL ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/private_data.h b/drivers/gpu/drm/img-rogue/private_data.h +new file mode 100644 +index 000000000000..60a1fac0b970 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/private_data.h +@@ -0,0 +1,59 @@ ++/*************************************************************************/ /*! ++@File ++@Title Linux private data structure ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(INCLUDED_PRIVATE_DATA_H) ++#define INCLUDED_PRIVATE_DATA_H ++ ++#include ++ ++#include "connection_server.h" ++#include "pvr_drm.h" ++ ++#define PVR_SRVKM_PRIV_DATA_IDX 0 ++#define PVR_SYNC_PRIV_DATA_IDX 1 ++ ++#define PVR_NUM_PRIV_DATA_IDXS 2 ++ ++CONNECTION_DATA *LinuxServicesConnectionFromFile(struct file *pFile); ++CONNECTION_DATA *LinuxSyncConnectionFromFile(struct file *pFile); ++ ++#endif /* !defined(INCLUDED_PRIVATE_DATA_H) */ +diff --git a/drivers/gpu/drm/img-rogue/proc_stats.h b/drivers/gpu/drm/img-rogue/proc_stats.h +new file mode 100644 +index 000000000000..a4e9c786518c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/proc_stats.h +@@ -0,0 +1,135 @@ ++/*************************************************************************/ /*! ++@File ++@Title Process and driver statistic definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PROC_STATS_H ++#define PROC_STATS_H ++ ++/* X-Macro for Process stat keys */ ++#define PVRSRV_PROCESS_STAT_KEY \ ++ X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES, "RenderContextTDMStores") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_OOM_VIRTMEM_COUNT, "MemoryOOMCountDeviceVirtual") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_OOM_PHYSMEM_COUNT, "MemoryOOMCountPhysicalHeap") \ ++ X(PVRSRV_PROCESS_STAT_TYPE_INVALID_VIRTMEM, "MemoryOOMCountDeviceVirtualAtAddress") ++ ++ ++/* X-Macro for Driver stat keys */ ++#define PVRSRV_DRIVER_STAT_KEY \ ++ X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA/LMA") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, "MemoryUsageDmaBufImport") \ ++ X(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT_MAX, "MemoryUsageDmaBufImportMax") ++ ++ ++typedef enum { ++#define X(stat_type, stat_str) stat_type, ++ PVRSRV_PROCESS_STAT_KEY ++#undef X ++ PVRSRV_PROCESS_STAT_TYPE_COUNT ++}PVRSRV_PROCESS_STAT_TYPE; ++ ++typedef enum { ++#define X(stat_type, stat_str) stat_type, ++ PVRSRV_DRIVER_STAT_KEY ++#undef X ++ PVRSRV_DRIVER_STAT_TYPE_COUNT ++}PVRSRV_DRIVER_STAT_TYPE; ++ ++extern const IMG_CHAR *const pszProcessStatType[]; ++ ++extern const IMG_CHAR *const pszDriverStatType[]; ++ ++#endif // PROC_STATS_H +diff --git a/drivers/gpu/drm/img-rogue/process_stats.c b/drivers/gpu/drm/img-rogue/process_stats.c +new file mode 100644 +index 000000000000..5867e2aef28a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/process_stats.c +@@ -0,0 +1,3358 @@ ++/*************************************************************************/ /*! ++@File ++@Title Process based statistics ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Manages a collection of statistics based around a process ++ and referenced via OS agnostic methods. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvr_debug.h" ++#include "lock.h" ++#include "allocmem.h" ++#include "osfunc.h" ++#include "lists.h" ++#include "process_stats.h" ++#include "ri_server.h" ++#include "hash.h" ++#include "connection_server.h" ++#include "pvrsrv.h" ++#include "proc_stats.h" ++#include "htbuffer.h" ++#include "pvr_ricommon.h" ++#include "di_server.h" ++#if defined(__linux__) ++#include "trace_events.h" ++#endif ++ ++/* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */ ++#if defined(__linux__) && ( \ ++ defined(PVRSRV_ENABLE_PERPID_STATS) || \ ++ defined(PVRSRV_ENABLE_CACHEOP_STATS) || \ ++ defined(PVRSRV_ENABLE_MEMORY_STATS) || \ ++ defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ) ++#define ENABLE_DEBUGFS_PIDS ++#endif ++ ++/* Enable GPU memory accounting tracepoint */ ++#if defined(__linux__) && ( \ ++ defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ) ++#define ENABLE_GPU_MEM_TRACEPOINT ++#endif ++ ++/* ++ * Maximum history of process statistics that will be kept. ++ */ ++#define MAX_DEAD_LIST_PROCESSES (10) ++ ++/* ++ * Definition of all the strings used to format process based statistics. ++ */ ++ ++#if defined(PVRSRV_ENABLE_PERPID_STATS) ++/* Array of Process stat type defined using the X-Macro */ ++#define X(stat_type, stat_str) stat_str, ++const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY }; ++#undef X ++#endif ++ ++/* Array of Driver stat type defined using the X-Macro */ ++#define X(stat_type, stat_str) stat_str, ++const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY }; ++#undef X ++ ++/* structure used in hash table to track statistic entries */ ++typedef struct { ++ size_t uiSizeInBytes; ++ IMG_PID uiPid; ++} _PVR_STATS_TRACKING_HASH_ENTRY; ++ ++/* Function used internally to decrement tracked per-process statistic entries */ ++static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, ++ PVRSRV_MEM_ALLOC_TYPE eAllocType); ++ ++#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) ++int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++#endif ++int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++ ++/* Note: all of the accesses to the global stats should be protected ++ * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the ++ * invocations of macros *_GLOBAL_STAT_VALUE. */ ++ ++/* Macros for fetching stat values */ ++#define GET_STAT_VALUE(ptr,var) (ptr)->i32StatValue[(var)] ++#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui64StatValue[idx] ++ ++#define GET_GPUMEM_GLOBAL_STAT_VALUE() \ ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + \ ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA) + \ ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + \ ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + \ ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT) ++ ++#define GET_GPUMEM_PERPID_STAT_VALUE(ptr) \ ++ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA) + \ ++ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA) + \ ++ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) + \ ++ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES) + \ ++ GET_STAT_VALUE((ptr), PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT) ++/* ++ * Macros for updating stat values. ++ */ ++#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while (0) ++#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while (0) ++#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] += (val); if ((var).ui64StatValue[(idx)] > (var).ui64StatValue[(idx##_MAX)]) {(var).ui64StatValue[(idx##_MAX)] = (var).ui64StatValue[(idx)];} } while (0) ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++/* Allow stats to go negative */ ++#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] -= (val); } while (0) ++#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui64StatValue[(idx)] -= (val); } while (0) ++#else ++#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while (0) ++#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui64StatValue[(idx)] >= (val)) { (var).ui64StatValue[(idx)] -= (val); } else { (var).ui64StatValue[(idx)] = 0; } } while (0) ++#endif ++#define MAX_CACHEOP_STAT 16 ++#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1)) ++#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1)) ++ ++/* ++ * Structures for holding statistics... ++ */ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++typedef struct _PVRSRV_MEM_ALLOC_REC_ ++{ ++ PVRSRV_MEM_ALLOC_TYPE eAllocType; ++ IMG_UINT64 ui64Key; ++ void* pvCpuVAddr; ++ IMG_CPU_PHYADDR sCpuPAddr; ++ size_t uiBytes; ++ void* pvPrivateData; ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) ++ void* pvAllocdFromFile; ++ IMG_UINT32 ui32AllocdFromLine; ++#endif ++ IMG_PID pid; ++ struct _PVRSRV_MEM_ALLOC_REC_* psNext; ++ struct _PVRSRV_MEM_ALLOC_REC_** ppsThis; ++} PVRSRV_MEM_ALLOC_REC; ++#endif ++ ++typedef struct _PVRSRV_PROCESS_STATS_ { ++ ++ /* Linked list pointers */ ++ struct _PVRSRV_PROCESS_STATS_* psNext; ++ struct _PVRSRV_PROCESS_STATS_* psPrev; ++ ++ /* Create per process lock that need to be held ++ * to edit of its members */ ++ POS_LOCK hLock; ++ ++ /* OS level process ID */ ++ IMG_PID pid; ++ IMG_UINT32 ui32RefCount; ++ ++ /* Stats... */ ++ IMG_INT32 i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT]; ++ IMG_UINT32 ui32StatAllocFlags; ++ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++ struct _CACHEOP_STRUCT_ { ++ PVRSRV_CACHE_OP uiCacheOp; ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ IMG_DEV_VIRTADDR sDevVAddr; ++ IMG_DEV_PHYADDR sDevPAddr; ++ RGXFWIF_DM eFenceOpType; ++#endif ++ IMG_DEVMEM_SIZE_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_UINT64 ui64ExecuteTime; ++ IMG_BOOL bUserModeFlush; ++ IMG_BOOL bIsFence; ++ IMG_PID ownerPid; ++ } asCacheOp[MAX_CACHEOP_STAT]; ++ IMG_INT32 uiCacheOpWriteIndex; ++#endif ++ ++ /* Other statistics structures */ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ PVRSRV_MEM_ALLOC_REC* psMemoryRecords; ++#endif ++} PVRSRV_PROCESS_STATS; ++ ++#if defined(ENABLE_DEBUGFS_PIDS) ++ ++typedef struct _PVRSRV_OS_STAT_ENTRY_ ++{ ++ DI_GROUP *psStatsDIGroup; ++ DI_ENTRY *psProcessStatsDIEntry; ++ DI_ENTRY *psMemStatsDIEntry; ++ DI_ENTRY *psRIMemStatsDIEntry; ++ DI_ENTRY *psCacheOpStatsDIEntry; ++} PVRSRV_OS_STAT_ENTRY; ++ ++static PVRSRV_OS_STAT_ENTRY gsLiveStatEntries; ++static PVRSRV_OS_STAT_ENTRY gsRetiredStatEntries; ++ ++int GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++int GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData); ++ ++/* ++ * Functions for printing the information stored... ++ */ ++#if defined(PVRSRV_ENABLE_PERPID_STATS) ++void ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats); ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++void MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats); ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats); ++#endif ++ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++void CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats); ++#endif ++ ++typedef void (PVRSRV_STATS_PRINT_ELEMENTS)(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats); ++ ++typedef enum ++{ ++ PVRSRV_STAT_TYPE_PROCESS, ++ PVRSRV_STAT_TYPE_MEMORY, ++ PVRSRV_STAT_TYPE_RIMEMORY, ++ PVRSRV_STAT_TYPE_CACHEOP, ++ PVRSRV_STAT_TYPE_LAST ++} PVRSRV_STAT_TYPE; ++ ++#define SEPARATOR_STR_LEN 166 ++ ++typedef struct _PVRSRV_STAT_PV_DATA_ { ++ ++ PVRSRV_STAT_TYPE eStatType; ++ PVRSRV_STATS_PRINT_ELEMENTS* pfnStatsPrintElements; ++ IMG_CHAR szLiveStatsHeaderStr[SEPARATOR_STR_LEN + 1]; ++ IMG_CHAR szRetiredStatsHeaderStr[SEPARATOR_STR_LEN + 1]; ++ ++} PVRSRV_STAT_PV_DATA; ++ ++static PVRSRV_STAT_PV_DATA g_StatPvDataArr[] = { ++ { PVRSRV_STAT_TYPE_PROCESS, NULL, " Process" , " Process" }, ++ { PVRSRV_STAT_TYPE_MEMORY, NULL, " Memory Allocation" , " Memory Allocation" }, ++ { PVRSRV_STAT_TYPE_RIMEMORY, NULL, " Resource Allocation" , " Resource Allocation" }, ++ { PVRSRV_STAT_TYPE_CACHEOP, NULL, " Cache Maintenance Ops" , " Cache Maintenance Ops" } ++ }; ++ ++#define GET_STAT_ENTRY_ID(STAT_TYPE) &g_StatPvDataArr[(STAT_TYPE)] ++ ++/* Generic header strings */ ++static const IMG_CHAR g_szLiveHeaderStr[] = " Statistics for LIVE Processes "; ++static const IMG_CHAR g_szRetiredHeaderStr[] = " Statistics for RETIRED Processes "; ++ ++/* Separator string used for separating stats for different PIDs */ ++static IMG_CHAR g_szSeparatorStr[SEPARATOR_STR_LEN + 1] = ""; ++ ++static inline void ++_prepareStatsHeaderString(IMG_CHAR *pszStatsSpecificStr, const IMG_CHAR* pszGenericHeaderStr) ++{ ++ IMG_UINT32 ui32NumSeparators; ++ IMG_CHAR szStatsHeaderFooterStr[75]; ++ ++ /* Prepare text content of the header in a local string */ ++ OSStringLCopy(szStatsHeaderFooterStr, pszStatsSpecificStr, ARRAY_SIZE(szStatsHeaderFooterStr)); ++ OSStringLCat(szStatsHeaderFooterStr, pszGenericHeaderStr, ARRAY_SIZE(szStatsHeaderFooterStr)); ++ ++ /* Write all '-' characters to the header string */ ++ memset(pszStatsSpecificStr, '-', SEPARATOR_STR_LEN); ++ pszStatsSpecificStr[SEPARATOR_STR_LEN] = '\0'; ++ ++ /* Find the spot for text content in the header string */ ++ ui32NumSeparators = (SEPARATOR_STR_LEN - OSStringLength(szStatsHeaderFooterStr)) >> 1; ++ ++ /* Finally write the text content */ ++ OSSNPrintf(pszStatsSpecificStr + ui32NumSeparators, ++ OSStringLength(szStatsHeaderFooterStr), ++ "%s", szStatsHeaderFooterStr); ++ ++ /* Overwrite the '\0' character added by OSSNPrintf() */ ++ if (OSStringLength(szStatsHeaderFooterStr) > 0) ++ { ++ pszStatsSpecificStr[ui32NumSeparators + OSStringLength(szStatsHeaderFooterStr) - 1] = ' '; ++ } ++} ++ ++static inline void ++_prepareSeparatorStrings(void) ++{ ++ IMG_UINT32 i; ++ ++ /* Prepare header strings for each stat type */ ++ for (i = 0; i < PVRSRV_STAT_TYPE_LAST; ++i) ++ { ++ _prepareStatsHeaderString(g_StatPvDataArr[i].szLiveStatsHeaderStr, g_szLiveHeaderStr); ++ _prepareStatsHeaderString(g_StatPvDataArr[i].szRetiredStatsHeaderStr, g_szRetiredHeaderStr); ++ } ++ ++ /* Prepare separator string to separate stats for different PIDs */ ++ memset(g_szSeparatorStr, '-', SEPARATOR_STR_LEN); ++ g_szSeparatorStr[SEPARATOR_STR_LEN] = '\0'; ++} ++ ++static inline void ++_prepareStatsPrivateData(void) ++{ ++#if defined(PVRSRV_ENABLE_PERPID_STATS) ++ g_StatPvDataArr[PVRSRV_STAT_TYPE_PROCESS].pfnStatsPrintElements = ProcessStatsPrintElements; ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ g_StatPvDataArr[PVRSRV_STAT_TYPE_MEMORY].pfnStatsPrintElements = MemStatsPrintElements; ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ g_StatPvDataArr[PVRSRV_STAT_TYPE_RIMEMORY].pfnStatsPrintElements = RIMemStatsPrintElements; ++#endif ++ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++ g_StatPvDataArr[PVRSRV_STAT_TYPE_CACHEOP].pfnStatsPrintElements = CacheOpStatsPrintElements; ++#endif ++ ++ _prepareSeparatorStrings(); ++} ++ ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC) ++static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC) ++#endif ++ ++/* ++ * Global Boolean to flag when the statistics are ready to monitor ++ * memory allocations. ++ */ ++static IMG_BOOL bProcessStatsInitialised = IMG_FALSE; ++ ++/* ++ * Linked lists for process stats. Live stats are for processes which are still running ++ * and the dead list holds those that have exited. ++ */ ++static PVRSRV_PROCESS_STATS *g_psLiveList; ++static PVRSRV_PROCESS_STATS *g_psDeadList; ++ ++static POS_LOCK g_psLinkedListLock; ++/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type. ++ * This allows it to group all such instances of the same lock type under one class ++ * The consequence of this is that, if lock acquisition is nested on different instances, it generates ++ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition. ++ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */ ++#define PROCESS_LOCK_SUBCLASS_CURRENT 1 ++#define PROCESS_LOCK_SUBCLASS_PREV 2 ++#define PROCESS_LOCK_SUBCLASS_NEXT 3 ++#if defined(ENABLE_DEBUGFS_PIDS) ++/* ++ * Pointer to OS folder to hold PID folders. ++ */ ++static DI_GROUP *psProcStatsDIGroup; ++#endif ++#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) ++static DI_ENTRY *psProcStatsDIEntry; ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++/* Global driver PID stats registration handle */ ++static IMG_HANDLE g_hDriverProcessStats; ++#endif ++ ++/* Global driver-data folders */ ++typedef struct _GLOBAL_STATS_ ++{ ++ IMG_UINT64 ui64StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT]; ++ POS_LOCK hGlobalStatsLock; ++} GLOBAL_STATS; ++ ++static DI_ENTRY *psGlobalMemDIEntry; ++static GLOBAL_STATS gsGlobalStats; ++ ++#define HASH_INITIAL_SIZE 5 ++/* A hash table used to store the size of any vmalloc'd allocation ++ * against its address (not needed for kmallocs as we can use ksize()) */ ++static HASH_TABLE* gpsSizeTrackingHashTable; ++static POS_LOCK gpsSizeTrackingHashTableLock; ++ ++static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid); ++ ++static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats); ++static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats); ++static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats); ++ ++static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats); ++ ++static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ PVRSRV_PROCESS_STATS* psProcessStats, ++ IMG_UINT32 uiBytes); ++/* ++ * Power statistics related definitions ++ */ ++ ++/* For the mean time, use an exponentially weighted moving average with a ++ * 1/4 weighting for the new measurement. ++ */ ++#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) ) ++ ++#define UPDATE_TIME(time, newtime) \ ++ ((time) > 0 ? MEAN_TIME((time), (newtime)) : (newtime)) ++ ++/* Enum to be used as input to GET_POWER_STAT_INDEX */ ++typedef enum ++{ ++ DEVICE = 0, ++ SYSTEM = 1, ++ POST_POWER = 0, ++ PRE_POWER = 2, ++ POWER_OFF = 0, ++ POWER_ON = 4, ++ NOT_FORCED = 0, ++ FORCED = 8, ++} PVRSRV_POWER_STAT_TYPE; ++ ++/* Macro used to access one of the power timing statistics inside an array */ ++#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \ ++ ((forced) + (powon) + (prepow) + (system)) ++ ++/* For the power timing stats we need 16 variables to store all the ++ * combinations of forced/not forced, power-on/power-off, pre-power/post-power ++ * and device/system statistics ++ */ ++#define NUM_POWER_STATS (16) ++static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS]; ++ ++static DI_ENTRY *psPowerStatsDIEntry; ++ ++typedef struct _EXTRA_POWER_STATS_ ++{ ++ IMG_UINT64 ui64PreClockSpeedChangeDuration; ++ IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration; ++ IMG_UINT64 ui64PostClockSpeedChangeDuration; ++} EXTRA_POWER_STATS; ++ ++#define NUM_EXTRA_POWER_STATS 10 ++ ++static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS]; ++static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd; ++ ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, ++ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, ++ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) ++{ ++ IMG_UINT32 *pui32Stat; ++ IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime; ++ IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime; ++ IMG_UINT32 ui32Index; ++ ++ if (bPrePower) ++ { ++ HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff); ++ } ++ else ++ { ++ HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff, ui64DeviceDiff); ++ } ++ ++ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, ++ bPowerOn ? POWER_ON : POWER_OFF, ++ bPrePower ? PRE_POWER : POST_POWER, ++ DEVICE); ++ pui32Stat = &aui32PowerTimingStats[ui32Index]; ++ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff); ++ ++ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED, ++ bPowerOn ? POWER_ON : POWER_OFF, ++ bPrePower ? PRE_POWER : POST_POWER, ++ SYSTEM); ++ pui32Stat = &aui32PowerTimingStats[ui32Index]; ++ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff); ++} ++ ++static IMG_UINT64 ui64PreClockSpeedChangeMark; ++ ++void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) ++{ ++ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer; ++ ++ ui64PreClockSpeedChangeMark = OSClockus(); ++} ++ ++void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) ++{ ++ IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark; ++ ++ PVR_ASSERT(ui64PreClockSpeedChangeMark > 0); ++ ++ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration; ++ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer; ++ ++ ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS; ++ ++ if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart) ++ { ++ ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS; ++ } ++ ++ ui64PreClockSpeedChangeMark = 0; ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function _FindProcessStatsInLiveList ++@Description Searches the Live Process List for a statistics structure that ++ matches the PID given. ++@Input pid Process to search for. ++@Return Pointer to stats structure for the process. ++*/ /**************************************************************************/ ++static PVRSRV_PROCESS_STATS* ++_FindProcessStatsInLiveList(IMG_PID pid) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; ++ ++ while (psProcessStats != NULL) ++ { ++ if (psProcessStats->pid == pid) ++ { ++ return psProcessStats; ++ } ++ ++ psProcessStats = psProcessStats->psNext; ++ } ++ ++ return NULL; ++} /* _FindProcessStatsInLiveList */ ++ ++/*************************************************************************/ /*! ++@Function _FindProcessStatsInDeadList ++@Description Searches the Dead Process List for a statistics structure that ++ matches the PID given. ++@Input pid Process to search for. ++@Return Pointer to stats structure for the process. ++*/ /**************************************************************************/ ++static PVRSRV_PROCESS_STATS* ++_FindProcessStatsInDeadList(IMG_PID pid) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; ++ ++ while (psProcessStats != NULL) ++ { ++ if (psProcessStats->pid == pid) ++ { ++ return psProcessStats; ++ } ++ ++ psProcessStats = psProcessStats->psNext; ++ } ++ ++ return NULL; ++} /* _FindProcessStatsInDeadList */ ++ ++/*************************************************************************/ /*! ++@Function _FindProcessStats ++@Description Searches the Live and Dead Process Lists for a statistics ++ structure that matches the PID given. ++@Input pid Process to search for. ++@Return Pointer to stats structure for the process. ++*/ /**************************************************************************/ ++static PVRSRV_PROCESS_STATS* ++_FindProcessStats(IMG_PID pid) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid); ++ ++ if (psProcessStats == NULL) ++ { ++ psProcessStats = _FindProcessStatsInDeadList(pid); ++ } ++ ++ return psProcessStats; ++} /* _FindProcessStats */ ++ ++/*************************************************************************/ /*! ++@Function _CompressMemoryUsage ++@Description Reduces memory usage by deleting old statistics data. ++ This function requires that the list lock is not held! ++*/ /**************************************************************************/ ++static void ++_CompressMemoryUsage(void) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed; ++ IMG_UINT32 ui32ItemsRemaining; ++ ++ /* ++ * We hold the lock whilst checking the list, but we'll release it ++ * before freeing memory (as that will require the lock too)! ++ */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ /* Check that the dead list is not bigger than the max size... */ ++ psProcessStats = g_psDeadList; ++ psProcessStatsToBeFreed = NULL; ++ ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES; ++ ++ while (psProcessStats != NULL && ui32ItemsRemaining > 0) ++ { ++ ui32ItemsRemaining--; ++ if (ui32ItemsRemaining == 0) ++ { ++ /* This is the last allowed process, cut the linked list here! */ ++ psProcessStatsToBeFreed = psProcessStats->psNext; ++ psProcessStats->psNext = NULL; ++ } ++ else ++ { ++ psProcessStats = psProcessStats->psNext; ++ } ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++ ++ /* Any processes stats remaining will need to be destroyed... */ ++ while (psProcessStatsToBeFreed != NULL) ++ { ++ PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext; ++ ++ psProcessStatsToBeFreed->psNext = NULL; ++ _DestroyProcessStat(psProcessStatsToBeFreed); ++ psProcessStatsToBeFreed = psNextProcessStats; ++ } ++} /* _CompressMemoryUsage */ ++ ++/* These functions move the process stats from the live to the dead list. ++ * _MoveProcessToDeadList moves the entry in the global lists and ++ * it needs to be protected by g_psLinkedListLock. ++ * _MoveProcessToDeadList performs the OS calls and it ++ * shouldn't be used under g_psLinkedListLock because this could generate a ++ * lockdep warning. */ ++static void ++_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats) ++{ ++ /* Take the element out of the live list and append to the dead list... */ ++ _RemoveProcessStatsFromList(psProcessStats); ++ _AddProcessStatsToFrontOfDeadList(psProcessStats); ++} /* _MoveProcessToDeadList */ ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++/* These functions move the process stats from the dead to the live list. ++ * _MoveProcessToLiveList moves the entry in the global lists and ++ * it needs to be protected by g_psLinkedListLock. ++ * _MoveProcessToLiveList performs the OS calls and it ++ * shouldn't be used under g_psLinkedListLock because this could generate a ++ * lockdep warning. */ ++static void ++_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats) ++{ ++ /* Take the element out of the live list and append to the dead list... */ ++ _RemoveProcessStatsFromList(psProcessStats); ++ _AddProcessStatsToFrontOfLiveList(psProcessStats); ++} /* _MoveProcessToLiveList */ ++#endif ++ ++/*************************************************************************/ /*! ++@Function _AddProcessStatsToFrontOfLiveList ++@Description Add a statistic to the live list head. ++@Input psProcessStats Process stats to add. ++*/ /**************************************************************************/ ++static void ++_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats) ++{ ++ /* This function should always be called under global list lock g_psLinkedListLock. ++ */ ++ PVR_ASSERT(psProcessStats != NULL); ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ if (g_psLiveList != NULL) ++ { ++ PVR_ASSERT(psProcessStats != g_psLiveList); ++ OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); ++ g_psLiveList->psPrev = psProcessStats; ++ OSLockRelease(g_psLiveList->hLock); ++ psProcessStats->psNext = g_psLiveList; ++ } ++ ++ g_psLiveList = psProcessStats; ++ ++ OSLockRelease(psProcessStats->hLock); ++} /* _AddProcessStatsToFrontOfLiveList */ ++ ++/*************************************************************************/ /*! ++@Function _AddProcessStatsToFrontOfDeadList ++@Description Add a statistic to the dead list head. ++@Input psProcessStats Process stats to add. ++*/ /**************************************************************************/ ++static void ++_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats) ++{ ++ PVR_ASSERT(psProcessStats != NULL); ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ if (g_psDeadList != NULL) ++ { ++ PVR_ASSERT(psProcessStats != g_psDeadList); ++ OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); ++ g_psDeadList->psPrev = psProcessStats; ++ OSLockRelease(g_psDeadList->hLock); ++ psProcessStats->psNext = g_psDeadList; ++ } ++ ++ g_psDeadList = psProcessStats; ++ ++ OSLockRelease(psProcessStats->hLock); ++} /* _AddProcessStatsToFrontOfDeadList */ ++ ++/*************************************************************************/ /*! ++@Function _RemoveProcessStatsFromList ++@Description Detaches a process from either the live or dead list. ++@Input psProcessStats Process stats to remove. ++*/ /**************************************************************************/ ++static void ++_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats) ++{ ++ PVR_ASSERT(psProcessStats != NULL); ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ /* Remove the item from the linked lists... */ ++ if (g_psLiveList == psProcessStats) ++ { ++ g_psLiveList = psProcessStats->psNext; ++ ++ if (g_psLiveList != NULL) ++ { ++ PVR_ASSERT(psProcessStats != g_psLiveList); ++ OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV); ++ g_psLiveList->psPrev = NULL; ++ OSLockRelease(g_psLiveList->hLock); ++ ++ } ++ } ++ else if (g_psDeadList == psProcessStats) ++ { ++ g_psDeadList = psProcessStats->psNext; ++ ++ if (g_psDeadList != NULL) ++ { ++ PVR_ASSERT(psProcessStats != g_psDeadList); ++ OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV); ++ g_psDeadList->psPrev = NULL; ++ OSLockRelease(g_psDeadList->hLock); ++ } ++ } ++ else ++ { ++ PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext; ++ PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev; ++ ++ if (psProcessStats->psNext != NULL) ++ { ++ PVR_ASSERT(psProcessStats != psNext); ++ OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT); ++ psProcessStats->psNext->psPrev = psPrev; ++ OSLockRelease(psNext->hLock); ++ } ++ if (psProcessStats->psPrev != NULL) ++ { ++ PVR_ASSERT(psProcessStats != psPrev); ++ OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV); ++ psProcessStats->psPrev->psNext = psNext; ++ OSLockRelease(psPrev->hLock); ++ } ++ } ++ ++ ++ /* Reset the pointers in this cell, as it is not attached to anything */ ++ psProcessStats->psNext = NULL; ++ psProcessStats->psPrev = NULL; ++ ++ OSLockRelease(psProcessStats->hLock); ++ ++} /* _RemoveProcessStatsFromList */ ++ ++static PVRSRV_ERROR ++_AllocateProcessStats(PVRSRV_PROCESS_STATS **ppsProcessStats, IMG_PID ownerPid) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_PROCESS_STATS *psProcessStats; ++ ++ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS)); ++ PVR_RETURN_IF_NOMEM(psProcessStats); ++ ++ psProcessStats->pid = ownerPid; ++ psProcessStats->ui32RefCount = 1; ++ ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1; ++ ++ eError = OSLockCreateNoStats(&psProcessStats->hLock); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ *ppsProcessStats = psProcessStats; ++ return PVRSRV_OK; ++ ++e0: ++ OSFreeMemNoStats(psProcessStats); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++} ++ ++/*************************************************************************/ /*! ++@Function _DestroyProcessStat ++@Description Frees memory and resources held by a process statistic. ++@Input psProcessStats Process stats to destroy. ++*/ /**************************************************************************/ ++static void ++_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats) ++{ ++ PVR_ASSERT(psProcessStats != NULL); ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ /* Free the memory statistics... */ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ while (psProcessStats->psMemoryRecords) ++ { ++ List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryRecords); ++ } ++#endif ++ OSLockRelease(psProcessStats->hLock); ++ ++ /*Destroy the lock */ ++ OSLockDestroyNoStats(psProcessStats->hLock); ++ ++ /* Free the memory... */ ++ OSFreeMemNoStats(psProcessStats); ++} /* _DestroyProcessStat */ ++ ++#if defined(ENABLE_DEBUGFS_PIDS) ++static inline void ++_createStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries, ++ DI_PFN_SHOW pfnStatsShow) ++{ ++ PVRSRV_ERROR eError; ++ DI_ITERATOR_CB sIterator = {.pfnShow = pfnStatsShow}; ++ ++#if defined(PVRSRV_ENABLE_PERPID_STATS) ++ eError = DICreateEntry("process_stats", psStatsEntries->psStatsDIGroup, ++ &sIterator, ++ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_PROCESS), ++ DI_ENTRY_TYPE_GENERIC, ++ &psStatsEntries->psProcessStatsDIEntry); ++ PVR_LOG_IF_ERROR(eError, "DICreateEntry (1)"); ++#endif ++ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++ eError = DICreateEntry("cache_ops_exec", psStatsEntries->psStatsDIGroup, ++ &sIterator, ++ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_CACHEOP), ++ DI_ENTRY_TYPE_GENERIC, ++ &psStatsEntries->psCacheOpStatsDIEntry); ++ PVR_LOG_IF_ERROR(eError, "DICreateEntry (2)"); ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ eError = DICreateEntry("mem_area", psStatsEntries->psStatsDIGroup, ++ &sIterator, ++ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_MEMORY), ++ DI_ENTRY_TYPE_GENERIC, ++ &psStatsEntries->psMemStatsDIEntry); ++ PVR_LOG_IF_ERROR(eError, "DICreateEntry (3)"); ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ eError = DICreateEntry("gpu_mem_area", psStatsEntries->psStatsDIGroup, ++ &sIterator, ++ GET_STAT_ENTRY_ID(PVRSRV_STAT_TYPE_RIMEMORY), ++ DI_ENTRY_TYPE_GENERIC, ++ &psStatsEntries->psRIMemStatsDIEntry); ++ PVR_LOG_IF_ERROR(eError, "DICreateEntry (4)"); ++#endif ++} ++ ++static inline void ++_createStatisticsEntries(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = DICreateGroup("proc_stats", NULL, &psProcStatsDIGroup); ++ PVR_LOG_IF_ERROR(eError, "DICreateGroup (1)"); ++ eError = DICreateGroup("live_pids_stats", psProcStatsDIGroup, ++ &gsLiveStatEntries.psStatsDIGroup); ++ PVR_LOG_IF_ERROR(eError, "DICreateGroup (2)"); ++ eError = DICreateGroup("retired_pids_stats", psProcStatsDIGroup, ++ &gsRetiredStatEntries.psStatsDIGroup); ++ PVR_LOG_IF_ERROR(eError, "DICreateGroup (3)"); ++ ++ _createStatsFiles(&gsLiveStatEntries, GenericStatsPrintElementsLive); ++ _createStatsFiles(&gsRetiredStatEntries, GenericStatsPrintElementsRetired); ++ ++ _prepareStatsPrivateData(); ++} ++ ++static inline void ++_removeStatsFiles(PVRSRV_OS_STAT_ENTRY* psStatsEntries) ++{ ++#if defined(PVRSRV_ENABLE_PERPID_STATS) ++ DIDestroyEntry(psStatsEntries->psProcessStatsDIEntry); ++ psStatsEntries->psProcessStatsDIEntry = NULL; ++#endif ++ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++ DIDestroyEntry(psStatsEntries->psCacheOpStatsDIEntry); ++ psStatsEntries->psCacheOpStatsDIEntry = NULL; ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ DIDestroyEntry(psStatsEntries->psMemStatsDIEntry); ++ psStatsEntries->psMemStatsDIEntry = NULL; ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ DIDestroyEntry(psStatsEntries->psRIMemStatsDIEntry); ++ psStatsEntries->psRIMemStatsDIEntry = NULL; ++#endif ++} ++ ++static inline void ++_removeStatisticsEntries(void) ++{ ++ _removeStatsFiles(&gsLiveStatEntries); ++ _removeStatsFiles(&gsRetiredStatEntries); ++ ++ DIDestroyGroup(gsLiveStatEntries.psStatsDIGroup); ++ gsLiveStatEntries.psStatsDIGroup = NULL; ++ DIDestroyGroup(gsRetiredStatEntries.psStatsDIGroup); ++ gsRetiredStatEntries.psStatsDIGroup = NULL; ++ DIDestroyGroup(psProcStatsDIGroup); ++ psProcStatsDIGroup = NULL; ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function PVRSRVStatsInitialise ++@Description Entry point for initialising the statistics module. ++@Return Standard PVRSRV_ERROR error code. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVStatsInitialise(void) ++{ ++ PVRSRV_ERROR error; ++ ++ PVR_ASSERT(g_psLiveList == NULL); ++ PVR_ASSERT(g_psDeadList == NULL); ++ PVR_ASSERT(g_psLinkedListLock == NULL); ++ PVR_ASSERT(gpsSizeTrackingHashTable == NULL); ++ PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE); ++ ++ /* We need a lock to protect the linked lists... */ ++ error = OSLockCreate(&g_psLinkedListLock); ++ PVR_GOTO_IF_ERROR(error, return_); ++ ++ /* We also need a lock to protect the hash table used for size tracking. */ ++ error = OSLockCreate(&gpsSizeTrackingHashTableLock); ++ PVR_GOTO_IF_ERROR(error, detroy_linked_list_lock_); ++ ++ /* We also need a lock to protect the GlobalStat counters */ ++ error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock); ++ PVR_GOTO_IF_ERROR(error, destroy_hashtable_lock_); ++ ++ /* Flag that we are ready to start monitoring memory allocations. */ ++ ++ gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE); ++ PVR_GOTO_IF_NOMEM(gpsSizeTrackingHashTable, error, destroy_stats_lock_); ++ ++ OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges)); ++ ++ bProcessStatsInitialised = IMG_TRUE; ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ /* Register our 'system' PID to hold driver-wide alloc stats */ ++ _RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID); ++#endif ++ ++#if defined(ENABLE_DEBUGFS_PIDS) ++ _createStatisticsEntries(); ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = RawProcessStatsPrintElements}; ++ error = DICreateEntry("memtrack_stats", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, &psProcStatsDIEntry); ++ PVR_LOG_IF_ERROR(error, "DICreateEntry (1)"); ++ } ++#endif ++ ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = PowerStatsPrintElements}; ++ /* Create power stats entry... */ ++ error = DICreateEntry("power_timing_stats", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, &psPowerStatsDIEntry); ++ PVR_LOG_IF_ERROR(error, "DICreateEntry (2)"); ++ } ++ ++ { ++ DI_ITERATOR_CB sIterator = {.pfnShow = GlobalStatsPrintElements}; ++ error = DICreateEntry("driver_stats", NULL, &sIterator, NULL, ++ DI_ENTRY_TYPE_GENERIC, &psGlobalMemDIEntry); ++ PVR_LOG_IF_ERROR(error, "DICreateEntry (3)"); ++ } ++ ++ return PVRSRV_OK; ++ ++destroy_stats_lock_: ++ OSLockDestroy(gsGlobalStats.hGlobalStatsLock); ++ gsGlobalStats.hGlobalStatsLock = NULL; ++destroy_hashtable_lock_: ++ OSLockDestroy(gpsSizeTrackingHashTableLock); ++ gpsSizeTrackingHashTableLock = NULL; ++detroy_linked_list_lock_: ++ OSLockDestroy(g_psLinkedListLock); ++ g_psLinkedListLock = NULL; ++return_: ++ return error; ++ ++} ++ ++static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v, void* pvPriv) ++{ ++#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) ++ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v; ++ IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k; ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__, ++ psNewTrackingHashEntry->uiSizeInBytes, ++ uiCpuVAddr, ++ psNewTrackingHashEntry->uiPid)); ++ ++ PVR_UNREFERENCED_PARAMETER(pvPriv); ++#endif ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function PVRSRVStatsDestroy ++@Description Method for destroying the statistics module data. ++*/ /**************************************************************************/ ++void ++PVRSRVStatsDestroy(void) ++{ ++ PVR_ASSERT(bProcessStatsInitialised); ++ ++#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) ++ if (psProcStatsDIEntry != NULL) ++ { ++ DIDestroyEntry(psProcStatsDIEntry); ++ psProcStatsDIEntry = NULL; ++ } ++#endif ++ ++ /* Destroy the power stats entry... */ ++ if (psPowerStatsDIEntry!=NULL) ++ { ++ DIDestroyEntry(psPowerStatsDIEntry); ++ psPowerStatsDIEntry = NULL; ++ } ++ ++ /* Destroy the global data entry */ ++ if (psGlobalMemDIEntry!=NULL) ++ { ++ DIDestroyEntry(psGlobalMemDIEntry); ++ psGlobalMemDIEntry = NULL; ++ } ++ ++#if defined(ENABLE_DEBUGFS_PIDS) ++ _removeStatisticsEntries(); ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ /* Deregister our 'system' PID which holds driver-wide alloc stats */ ++ PVRSRVStatsDeregisterProcess(g_hDriverProcessStats); ++#endif ++ ++ /* Stop monitoring memory allocations... */ ++ bProcessStatsInitialised = IMG_FALSE; ++ ++ /* Destroy the locks... */ ++ if (g_psLinkedListLock != NULL) ++ { ++ OSLockDestroy(g_psLinkedListLock); ++ g_psLinkedListLock = NULL; ++ } ++ ++ /* Free the live and dead lists... */ ++ while (g_psLiveList != NULL) ++ { ++ PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList; ++ _RemoveProcessStatsFromList(psProcessStats); ++ _DestroyProcessStat(psProcessStats); ++ } ++ ++ while (g_psDeadList != NULL) ++ { ++ PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList; ++ _RemoveProcessStatsFromList(psProcessStats); ++ _DestroyProcessStat(psProcessStats); ++ } ++ ++ if (gpsSizeTrackingHashTable != NULL) ++ { ++ /* Dump all remaining entries in HASH table (list any remaining vmallocs) */ ++ HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries, NULL); ++ HASH_Delete(gpsSizeTrackingHashTable); ++ } ++ if (gpsSizeTrackingHashTableLock != NULL) ++ { ++ OSLockDestroy(gpsSizeTrackingHashTableLock); ++ gpsSizeTrackingHashTableLock = NULL; ++ } ++ ++ if (NULL != gsGlobalStats.hGlobalStatsLock) ++ { ++ OSLockDestroy(gsGlobalStats.hGlobalStatsLock); ++ gsGlobalStats.hGlobalStatsLock = NULL; ++ } ++ ++} ++ ++static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes) ++{ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ IMG_UINT64 ui64InitialSize; ++#endif ++ ++ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); ++#endif ++ ++ switch (eAllocType) ++ { ++ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: ++ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); ++ break; ++ ++ default: ++ PVR_ASSERT(0); ++ break; ++ } ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ { ++ IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); ++ if (ui64Size != ui64InitialSize) ++ { ++ TracepointUpdateGPUMemGlobal(0, ui64Size); ++ } ++ } ++#endif ++ ++ OSLockRelease(gsGlobalStats.hGlobalStatsLock); ++} ++ ++static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes) ++{ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ IMG_UINT64 ui64InitialSize; ++#endif ++ ++ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ ui64InitialSize = GET_GPUMEM_GLOBAL_STAT_VALUE(); ++#endif ++ ++ switch (eAllocType) ++ { ++ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes); ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: ++ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT, uiBytes); ++ break; ++ ++ default: ++ PVR_ASSERT(0); ++ break; ++ } ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ { ++ IMG_UINT64 ui64Size = GET_GPUMEM_GLOBAL_STAT_VALUE(); ++ if (ui64Size != ui64InitialSize) ++ { ++ TracepointUpdateGPUMemGlobal(0, ui64Size); ++ } ++ } ++#endif ++ ++ OSLockRelease(gsGlobalStats.hGlobalStatsLock); ++} ++ ++static PVRSRV_ERROR ++_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats=NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(phProcessStats != NULL); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]", ++ __func__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID) ++ ? "system" : OSGetCurrentClientProcessNameKM())); ++ ++ /* Check the PID has not already moved to the dead list... */ ++ OSLockAcquire(g_psLinkedListLock); ++ psProcessStats = _FindProcessStatsInDeadList(ownerPid); ++ if (psProcessStats != NULL) ++ { ++ /* Move it back onto the live list! */ ++ _RemoveProcessStatsFromList(psProcessStats); ++ _AddProcessStatsToFrontOfLiveList(psProcessStats); ++ } ++ else ++ { ++ /* Check the PID is not already registered in the live list... */ ++ psProcessStats = _FindProcessStatsInLiveList(ownerPid); ++ } ++ ++ /* If the PID is on the live list then just increment the ref count and return... */ ++ if (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ psProcessStats->ui32RefCount++; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount; ++ UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS], ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]); ++ OSLockRelease(psProcessStats->hLock); ++ OSLockRelease(g_psLinkedListLock); ++ ++ *phProcessStats = psProcessStats; ++ ++ return PVRSRV_OK; ++ } ++ OSLockRelease(g_psLinkedListLock); ++ ++ /* Allocate a new node structure and initialise it... */ ++ eError = _AllocateProcessStats(&psProcessStats, ownerPid); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ /* Add it to the live list... */ ++ OSLockAcquire(g_psLinkedListLock); ++ _AddProcessStatsToFrontOfLiveList(psProcessStats); ++ OSLockRelease(g_psLinkedListLock); ++ ++ /* Done */ ++ *phProcessStats = (IMG_HANDLE) psProcessStats; ++ ++ return PVRSRV_OK; ++ ++e0: ++ *phProcessStats = (IMG_HANDLE) NULL; ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++} /* _RegisterProcess */ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVStatsRegisterProcess ++@Description Register a process into the list statistics list. ++@Output phProcessStats Handle to the process to be used to deregister. ++@Return Standard PVRSRV_ERROR error code. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats) ++{ ++ return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM()); ++} ++ ++/*************************************************************************/ /*! ++@Function PVRSRVStatsDeregisterProcess ++@Input hProcessStats Handle to the process returned when registered. ++@Description Method for destroying the statistics module data. ++*/ /**************************************************************************/ ++void ++PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats) ++{ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]", ++ __func__, OSGetCurrentClientProcessIDKM(), ++ OSGetCurrentProcessName())); ++ ++ if (hProcessStats != (IMG_HANDLE) NULL) ++ { ++ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats; ++ ++ /* Lower the reference count, if zero then move it to the dead list */ ++ OSLockAcquire(g_psLinkedListLock); ++ if (psProcessStats->ui32RefCount > 0) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ psProcessStats->ui32RefCount--; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount; ++ ++#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ if (psProcessStats->ui32RefCount == 0) ++ { ++ OSLockRelease(psProcessStats->hLock); ++ _MoveProcessToDeadList(psProcessStats); ++ }else ++#endif ++ { ++ OSLockRelease(psProcessStats->hLock); ++ } ++ } ++ OSLockRelease(g_psLinkedListLock); ++ ++ /* Check if the dead list needs to be reduced */ ++ _CompressMemoryUsage(); ++ } ++} /* PVRSRVStatsDeregisterProcess */ ++ ++void ++PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ void *pvCpuVAddr, ++ IMG_CPU_PHYADDR sCpuPAddr, ++ size_t uiBytes, ++ void *pvPrivateData, ++ IMG_PID currentPid ++ DEBUG_MEMSTATS_PARAMS) ++{ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); ++ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_MEM_ALLOC_REC* psRecord = NULL; ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ enum { PVRSRV_PROC_NOTFOUND, ++ PVRSRV_PROC_FOUND, ++ PVRSRV_PROC_RESURRECTED ++ } eProcSearch = PVRSRV_PROC_FOUND; ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ IMG_UINT64 ui64InitialSize; ++#endif ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Called when process statistics module is not initialised", ++ __func__)); ++#endif ++ return; ++ } ++ ++ /* ++ * To prevent a recursive loop, we make the memory allocations for our ++ * memstat records via OSAllocMemNoStats(), which does not try to ++ * create a memstat record entry. ++ */ ++ ++ /* Allocate the memory record... */ ++ psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC)); ++ if (psRecord == NULL) ++ { ++ return; ++ } ++ ++ psRecord->eAllocType = eAllocType; ++ psRecord->pvCpuVAddr = pvCpuVAddr; ++ psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr; ++ psRecord->uiBytes = uiBytes; ++ psRecord->pvPrivateData = pvPrivateData; ++ ++ psRecord->pid = currentPid; ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) ++ psRecord->pvAllocdFromFile = pvAllocFromFile; ++ psRecord->ui32AllocdFromLine = ui32AllocFromLine; ++#endif ++ ++ _increase_global_stat(eAllocType, uiBytes); ++ /* Lock while we find the correct process... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ if (psPVRSRVData) ++ { ++ if ((currentPid == psPVRSRVData->cleanupThreadPid) && ++ (currentCleanupPid != 0)) ++ { ++ psProcessStats = _FindProcessStats(currentCleanupPid); ++ } ++ else ++ { ++ psProcessStats = _FindProcessStatsInLiveList(currentPid); ++ if (!psProcessStats) ++ { ++ psProcessStats = _FindProcessStatsInDeadList(currentPid); ++ eProcSearch = PVRSRV_PROC_RESURRECTED; ++ } ++ } ++ } ++ else ++ { ++ psProcessStats = _FindProcessStatsInLiveList(currentPid); ++ if (!psProcessStats) ++ { ++ psProcessStats = _FindProcessStatsInDeadList(currentPid); ++ eProcSearch = PVRSRV_PROC_RESURRECTED; ++ } ++ } ++ ++ if (psProcessStats == NULL) ++ { ++ eProcSearch = PVRSRV_PROC_NOTFOUND; ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Process stat increment called for 'unknown' process PID(%d)", ++ __func__, currentPid)); ++ ++ if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) ++ { ++ OSLockRelease(g_psLinkedListLock); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", ++ __func__, currentPid, OSGetCurrentProcessName(), uiBytes)); ++ goto free_record; ++ } ++ ++ /* Add it to the live list... */ ++ _AddProcessStatsToFrontOfLiveList(psProcessStats); ++ ++ OSLockRelease(g_psLinkedListLock); ++ ++#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ ++ OSLockRelease(g_psLinkedListLock); ++ goto free_record; ++#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ ++ } ++ else ++ { ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ if (eProcSearch == PVRSRV_PROC_RESURRECTED) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Process stat incremented on 'dead' process PID(%d)", ++ __func__, currentPid)); ++ /* Move process from dead list to live list */ ++ _MoveProcessToLiveList(psProcessStats); ++ } ++#endif ++ OSLockRelease(g_psLinkedListLock); ++ } ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ /* Insert the memory record... */ ++ if (psRecord != NULL) ++ { ++ List_PVRSRV_MEM_ALLOC_REC_Insert(&psProcessStats->psMemoryRecords, psRecord); ++ } ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); ++#endif ++ ++ /* Update the memory watermarks... */ ++ switch (eAllocType) ++ { ++ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: ++ { ++ if (psRecord != NULL) ++ { ++ if (pvCpuVAddr == NULL) ++ { ++ break; ++ } ++ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: ++ { ++ if (psRecord != NULL) ++ { ++ if (pvCpuVAddr == NULL) ++ { ++ break; ++ } ++ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: ++ { ++ if (psRecord != NULL) ++ { ++ if (pvCpuVAddr == NULL) ++ { ++ break; ++ } ++ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: ++ { ++ if (psRecord != NULL) ++ { ++ if (pvCpuVAddr == NULL) ++ { ++ break; ++ } ++ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: ++ { ++ if (psRecord != NULL) ++ { ++ psRecord->ui64Key = sCpuPAddr.uiAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: ++ { ++ if (psRecord != NULL) ++ { ++ if (pvCpuVAddr == NULL) ++ { ++ break; ++ } ++ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: ++ { ++ if (psRecord != NULL) ++ { ++ psRecord->ui64Key = sCpuPAddr.uiAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: ++ { ++ if (psRecord != NULL) ++ { ++ psRecord->ui64Key = sCpuPAddr.uiAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: ++ { ++ if (psRecord != NULL) ++ { ++ if (pvCpuVAddr == NULL) ++ { ++ break; ++ } ++ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr; ++ } ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ default: ++ { ++ PVR_ASSERT(0); ++ } ++ break; ++ } ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) ++ { ++ IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); ++ if (ui64Size != ui64InitialSize) ++ { ++ TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); ++ } ++ } ++#endif ++ ++ OSLockRelease(psProcessStats->hLock); ++ ++ return; ++ ++free_record: ++ if (psRecord != NULL) ++ { ++ OSFreeMemNoStats(psRecord); ++ } ++#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */ ++} /* PVRSRVStatsAddMemAllocRecord */ ++ ++void ++PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ IMG_UINT64 ui64Key, ++ IMG_PID currentPid) ++{ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); ++ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_PROCESS_STATS* psProcessStats = NULL; ++ PVRSRV_MEM_ALLOC_REC* psRecord = NULL; ++ IMG_BOOL bFound = IMG_FALSE; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Called when process statistics module is not initialised", ++ __func__)); ++#endif ++ return; ++ } ++ ++ /* Lock while we find the correct process and remove this record... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ if (psPVRSRVData) ++ { ++ if ((currentPid == psPVRSRVData->cleanupThreadPid) && ++ (currentCleanupPid != 0)) ++ { ++ psProcessStats = _FindProcessStats(currentCleanupPid); ++ } ++ else ++ { ++ psProcessStats = _FindProcessStats(currentPid); ++ } ++ } ++ else ++ { ++ psProcessStats = _FindProcessStats(currentPid); ++ } ++ if (psProcessStats != NULL) ++ { ++ psRecord = psProcessStats->psMemoryRecords; ++ while (psRecord != NULL) ++ { ++ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) ++ { ++ bFound = IMG_TRUE; ++ break; ++ } ++ ++ psRecord = psRecord->psNext; ++ } ++ } ++ ++ /* If not found, we need to do a full search in case it was allocated to a different PID... */ ++ if (!bFound) ++ { ++ PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats; ++ ++ /* Search all live lists first... */ ++ psProcessStats = g_psLiveList; ++ while (psProcessStats != NULL) ++ { ++ if (psProcessStats != psProcessStatsAlreadyChecked) ++ { ++ psRecord = psProcessStats->psMemoryRecords; ++ while (psRecord != NULL) ++ { ++ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) ++ { ++ bFound = IMG_TRUE; ++ break; ++ } ++ ++ psRecord = psRecord->psNext; ++ } ++ } ++ ++ if (bFound) ++ { ++ break; ++ } ++ ++ psProcessStats = psProcessStats->psNext; ++ } ++ ++ /* If not found, then search all dead lists next... */ ++ if (!bFound) ++ { ++ psProcessStats = g_psDeadList; ++ while (psProcessStats != NULL) ++ { ++ if (psProcessStats != psProcessStatsAlreadyChecked) ++ { ++ psRecord = psProcessStats->psMemoryRecords; ++ while (psRecord != NULL) ++ { ++ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType) ++ { ++ bFound = IMG_TRUE; ++ break; ++ } ++ ++ psRecord = psRecord->psNext; ++ } ++ } ++ ++ if (bFound) ++ { ++ break; ++ } ++ ++ psProcessStats = psProcessStats->psNext; ++ } ++ } ++ } ++ ++ /* Update the watermark and remove this record...*/ ++ if (bFound) ++ { ++ _decrease_global_stat(eAllocType, psRecord->uiBytes); ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ _DecreaseProcStatValue(eAllocType, ++ psProcessStats, ++ psRecord->uiBytes); ++ ++ List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord); ++ OSLockRelease(psProcessStats->hLock); ++ OSLockRelease(g_psLinkedListLock); ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ /* If all stats are now zero, remove the entry for this thread */ ++ if (psProcessStats->ui32StatAllocFlags == 0) ++ { ++ OSLockAcquire(g_psLinkedListLock); ++ _MoveProcessToDeadList(psProcessStats); ++ OSLockRelease(g_psLinkedListLock); ++ ++ /* Check if the dead list needs to be reduced */ ++ _CompressMemoryUsage(); ++ } ++#endif ++ /* ++ * Free the record outside the lock so we don't deadlock and so we ++ * reduce the time the lock is held. ++ */ ++ OSFreeMemNoStats(psRecord); ++ } ++ else ++ { ++ OSLockRelease(g_psLinkedListLock); ++ } ++ ++#else ++PVR_UNREFERENCED_PARAMETER(eAllocType); ++PVR_UNREFERENCED_PARAMETER(ui64Key); ++#endif ++} /* PVRSRVStatsRemoveMemAllocRecord */ ++ ++void ++PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes, ++ IMG_UINT64 uiCpuVAddr, ++ IMG_PID uiPid) ++{ ++ IMG_BOOL bRes = IMG_FALSE; ++ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL; ++ ++ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) ++ { ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Called when process statistics module is not initialised", ++ __func__)); ++#endif ++ return; ++ } ++ ++ /* Alloc untracked memory for the new hash table entry */ ++ psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry)); ++ if (psNewTrackingHashEntry == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!", ++ __func__, __LINE__)); ++ return; ++ } ++ ++ /* Fill-in the size of the allocation and PID of the allocating process */ ++ psNewTrackingHashEntry->uiSizeInBytes = uiBytes; ++ psNewTrackingHashEntry->uiPid = uiPid; ++ OSLockAcquire(gpsSizeTrackingHashTableLock); ++ /* Insert address of the new struct into the hash table */ ++ bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry); ++ OSLockRelease(gpsSizeTrackingHashTableLock); ++ if (bRes) ++ { ++ PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!", ++ __func__, __LINE__)); ++ /* Free the memory allocated for psNewTrackingHashEntry, as we ++ * failed to insert it into the Hash table. ++ */ ++ OSFreeMemNoStats(psNewTrackingHashEntry); ++ } ++} ++ ++void ++PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes, ++ IMG_PID currentPid) ++ ++{ ++ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); ++ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_PROCESS_STATS* psProcessStats = NULL; ++ enum { PVRSRV_PROC_NOTFOUND, ++ PVRSRV_PROC_FOUND, ++ PVRSRV_PROC_RESURRECTED ++ } eProcSearch = PVRSRV_PROC_FOUND; ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ IMG_UINT64 ui64InitialSize; ++#endif ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Called when process statistics module is not initialised", ++ __func__)); ++#endif ++ return; ++ } ++ ++ _increase_global_stat(eAllocType, uiBytes); ++ OSLockAcquire(g_psLinkedListLock); ++ if (psPVRSRVData) ++ { ++ if ((currentPid == psPVRSRVData->cleanupThreadPid) && ++ (currentCleanupPid != 0)) ++ { ++ psProcessStats = _FindProcessStats(currentCleanupPid); ++ } ++ else ++ { ++ psProcessStats = _FindProcessStatsInLiveList(currentPid); ++ if (!psProcessStats) ++ { ++ psProcessStats = _FindProcessStatsInDeadList(currentPid); ++ eProcSearch = PVRSRV_PROC_RESURRECTED; ++ } ++ } ++ } ++ else ++ { ++ psProcessStats = _FindProcessStatsInLiveList(currentPid); ++ if (!psProcessStats) ++ { ++ psProcessStats = _FindProcessStatsInDeadList(currentPid); ++ eProcSearch = PVRSRV_PROC_RESURRECTED; ++ } ++ } ++ ++ if (psProcessStats == NULL) ++ { ++ eProcSearch = PVRSRV_PROC_NOTFOUND; ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Process stat increment called for 'unknown' process PID(%d)", ++ __func__, currentPid)); ++ ++ if (bProcessStatsInitialised) ++ { ++ if (_AllocateProcessStats(&psProcessStats, currentPid) != PVRSRV_OK) ++ { ++ OSLockRelease(g_psLinkedListLock); ++ return; ++ } ++ /* Add it to the live list... */ ++ _AddProcessStatsToFrontOfLiveList(psProcessStats); ++ } ++#else ++ OSLockRelease(g_psLinkedListLock); ++#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */ ++ ++ } ++ ++ if (psProcessStats != NULL) ++ { ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ if (eProcSearch == PVRSRV_PROC_RESURRECTED) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Process stat incremented on 'dead' process PID(%d)", ++ __func__, currentPid)); ++ ++ /* Move process from dead list to live list */ ++ _MoveProcessToLiveList(psProcessStats); ++ } ++#endif ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ /* Release the list lock as soon as we acquire the process lock, ++ * this ensures if the process is in deadlist the entry cannot be ++ * deleted or modified ++ */ ++ OSLockRelease(g_psLinkedListLock); ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); ++#endif ++ ++ /* Update the memory watermarks... */ ++ switch (eAllocType) ++ { ++ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: ++ { ++ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes); ++ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ break; ++ ++ default: ++ { ++ PVR_ASSERT(0); ++ } ++ break; ++ } ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) ++ { ++ IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); ++ if (ui64Size != ui64InitialSize) ++ { ++ TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ++ ui64Size); ++ } ++ } ++#endif ++ ++ OSLockRelease(psProcessStats->hLock); ++ } ++ ++} ++ ++static void ++_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ PVRSRV_PROCESS_STATS* psProcessStats, ++ IMG_UINT32 uiBytes) ++{ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ IMG_UINT64 ui64InitialSize = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); ++#endif ++ ++ switch (eAllocType) ++ { ++ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: ++ { ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT, (IMG_UINT32)uiBytes); ++ if (psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT] == 0) ++ { ++ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT-PVRSRV_PROCESS_STAT_TYPE_KMALLOC)); ++ } ++ } ++ break; ++ ++ default: ++ { ++ PVR_ASSERT(0); ++ } ++ break; ++ } ++ ++#if defined(ENABLE_GPU_MEM_TRACEPOINT) ++ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) ++ { ++ IMG_UINT64 ui64Size = GET_GPUMEM_PERPID_STAT_VALUE(psProcessStats); ++ if (ui64Size != ui64InitialSize) ++ { ++ TracepointUpdateGPUMemPerProcess(0, psProcessStats->pid, ui64Size); ++ } ++ } ++#endif ++} ++ ++#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE) ++int RawProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_PROCESS_STATS *psProcessStats; ++ ++ DIPrintf(psEntry, ++ "%s,%s,%s,%s,%s,%s,%s\n", ++ "PID", ++ "MemoryUsageKMalloc", // PVRSRV_PROCESS_STAT_TYPE_KMALLOC ++ "MemoryUsageAllocPTMemoryUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA ++ "MemoryUsageAllocPTMemoryLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA ++ "MemoryUsageAllocGPUMemLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES ++ "MemoryUsageAllocGPUMemUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES ++ "MemoryUsageDmaBufImport"); // PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT ++ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = g_psLiveList; ++ ++ while (psProcessStats != NULL) ++ { ++ if (psProcessStats->pid != PVR_SYS_ALLOC_PID) ++ { ++ DIPrintf(psEntry, ++ "%d,%d,%d,%d,%d,%d,%d\n", ++ psProcessStats->pid, ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC], ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA], ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA], ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES], ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES], ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_DMA_BUF_IMPORT]); ++ } ++ ++ psProcessStats = psProcessStats->psNext; ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++ ++ return 0; ++} /* RawProcessStatsPrintElements */ ++#endif ++ ++void ++PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, ++ IMG_PID decrPID) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(decrPID); ++ ++ if (psProcessStats != NULL) ++ { ++ /* Decrement the kmalloc memory stat... */ ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes); ++ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes); ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} ++ ++static void ++_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry, ++ PVRSRV_MEM_ALLOC_TYPE eAllocType) ++{ ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid); ++ ++ if (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ /* Decrement the memory stat... */ ++ _DecreaseProcStatValue(eAllocType, ++ psProcessStats, ++ psTrackingHashEntry->uiSizeInBytes); ++ OSLockRelease(psProcessStats->hLock); ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} ++ ++void ++PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ IMG_UINT64 uiCpuVAddr) ++{ ++ _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL; ++ ++ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL)) ++ { ++ return; ++ } ++ ++ OSLockAcquire(gpsSizeTrackingHashTableLock); ++ psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr); ++ OSLockRelease(gpsSizeTrackingHashTableLock); ++ if (psTrackingHashEntry) ++ { ++ _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType); ++ OSFreeMemNoStats(psTrackingHashEntry); ++ } ++} ++ ++void ++PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes, ++ IMG_PID currentPid) ++{ ++ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid(); ++ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_PROCESS_STATS* psProcessStats = NULL; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ _decrease_global_stat(eAllocType, uiBytes); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ if (psPVRSRVData) ++ { ++ if ((currentPid == psPVRSRVData->cleanupThreadPid) && ++ (currentCleanupPid != 0)) ++ { ++ psProcessStats = _FindProcessStats(currentCleanupPid); ++ } ++ else ++ { ++ psProcessStats = _FindProcessStats(currentPid); ++ } ++ } ++ else ++ { ++ psProcessStats = _FindProcessStats(currentPid); ++ } ++ ++ ++ if (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ /* Release the list lock as soon as we acquire the process lock, ++ * this ensures if the process is in deadlist the entry cannot be ++ * deleted or modified ++ */ ++ OSLockRelease(g_psLinkedListLock); ++ /* Update the memory watermarks... */ ++ _DecreaseProcStatValue(eAllocType, ++ psProcessStats, ++ uiBytes); ++ OSLockRelease(psProcessStats->hLock); ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ /* If all stats are now zero, remove the entry for this thread */ ++ if (psProcessStats->ui32StatAllocFlags == 0) ++ { ++ OSLockAcquire(g_psLinkedListLock); ++ _MoveProcessToDeadList(psProcessStats); ++ OSLockRelease(g_psLinkedListLock); ++ ++ /* Check if the dead list needs to be reduced */ ++ _CompressMemoryUsage(); ++ } ++#endif ++ }else{ ++ OSLockRelease(g_psLinkedListLock); ++ } ++} ++ ++/* For now we do not want to expose the global stats API ++ * so we wrap it into this specific function for pooled pages. ++ * As soon as we need to modify the global stats directly somewhere else ++ * we want to replace these functions with more general ones. ++ */ ++void ++PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes) ++{ ++ _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); ++} ++ ++void ++PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes) ++{ ++ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes); ++} ++ ++void ++PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType, ++ IMG_PID pidOwner) ++{ ++ PVRSRV_PROCESS_STAT_TYPE eOOMStatType = (PVRSRV_PROCESS_STAT_TYPE) ui32OOMStatType; ++ IMG_PID pidCurrent = pidOwner; ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ /* Lock while we find the correct process and update the record... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(pidCurrent); ++ if (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ psProcessStats->i32StatValue[eOOMStatType]++; ++ OSLockRelease(psProcessStats->hLock); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateOOMStats: Process not found for Pid=%d", pidCurrent)); ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} /* PVRSRVStatsUpdateOOMStats */ ++ ++PVRSRV_ERROR ++PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType, ++ IMG_PID pidOwner) ++{ ++ if (ui32OOMStatType >= PVRSRV_PROCESS_STAT_TYPE_COUNT) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVRSRVStatsUpdateOOMStats(ui32OOMStatType, pidOwner); ++ ++ return PVRSRV_OK; ++} ++ ++void ++PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders, ++ IMG_UINT32 ui32TotalNumOutOfMemory, ++ IMG_UINT32 ui32NumTAStores, ++ IMG_UINT32 ui32Num3DStores, ++ IMG_UINT32 ui32NumCDMStores, ++ IMG_UINT32 ui32NumTDMStores, ++ IMG_PID pidOwner) ++{ ++ IMG_PID pidCurrent = pidOwner; ++ ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ /* Lock while we find the correct process and update the record... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(pidCurrent); ++ if (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TDM_STORES]+= ui32NumTDMStores; ++ OSLockRelease(psProcessStats->hLock); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Process not found for Pid=%d", pidCurrent)); ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} /* PVRSRVStatsUpdateRenderContextStats */ ++ ++void ++PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp, ++ IMG_UINT32 ui32NumReqByFW, ++ IMG_PID owner) ++{ ++ IMG_PID currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner; ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ /* Lock while we find the correct process and update the record... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(currentPid); ++ if (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW; ++ OSLockRelease(psProcessStats->hLock); ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} /* PVRSRVStatsUpdateZSBufferStats */ ++ ++void ++PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp, ++ IMG_UINT32 ui32NumGrowReqByFW, ++ IMG_UINT32 ui32InitFLPages, ++ IMG_UINT32 ui32NumHighPages, ++ IMG_PID ownerPid) ++{ ++ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ /* Lock while we find the correct process and update the record... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(currentPid); ++ ++ if (psProcessStats != NULL) ++ { ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp; ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW; ++ ++ UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT], ++ (IMG_INT32) ui32InitFLPages); ++ ++ UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES], ++ (IMG_INT32) ui32NumHighPages); ++ ++ OSLockRelease(psProcessStats->hLock); ++ ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} /* PVRSRVStatsUpdateFreelistStats */ ++ ++ ++#if defined(ENABLE_DEBUGFS_PIDS) ++ ++int ++GenericStatsPrintElementsLive(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); ++ ++ DIPrintf(psEntry, "%s\n", psStatType->szLiveStatsHeaderStr); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = g_psLiveList; ++ ++ if (psProcessStats == NULL) ++ { ++ DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); ++ } ++ else ++ { ++ while (psProcessStats != NULL) ++ { ++ psStatType->pfnStatsPrintElements(psEntry, psProcessStats); ++ psProcessStats = psProcessStats->psNext; ++ DIPrintf(psEntry, "%s\n", g_szSeparatorStr); ++ } ++ } ++ OSLockRelease(g_psLinkedListLock); ++ ++ return 0; ++} ++ ++int ++GenericStatsPrintElementsRetired(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVRSRV_STAT_PV_DATA *psStatType = DIGetPrivData(psEntry); ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ PVR_ASSERT(psStatType->pfnStatsPrintElements != NULL); ++ ++ DIPrintf(psEntry, "%s\n", psStatType->szRetiredStatsHeaderStr); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = g_psDeadList; ++ ++ if (psProcessStats == NULL) ++ { ++ DIPrintf(psEntry, "No Stats to display\n%s\n", g_szSeparatorStr); ++ } ++ else ++ { ++ while (psProcessStats != NULL) ++ { ++ psStatType->pfnStatsPrintElements(psEntry, psProcessStats); ++ psProcessStats = psProcessStats->psNext; ++ DIPrintf(psEntry, "%s\n", g_szSeparatorStr); ++ } ++ } ++ OSLockRelease(g_psLinkedListLock); ++ ++ return 0; ++} ++ ++#if defined(PVRSRV_ENABLE_PERPID_STATS) ++/*************************************************************************/ /*! ++@Function ProcessStatsPrintElements ++@Description Prints all elements for this process statistic record. ++@Input pvStatPtr Pointer to statistics structure. ++@Input pfnOSStatsPrintf Printf function to use for output. ++*/ /**************************************************************************/ ++void ++ProcessStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats) ++{ ++ IMG_UINT32 ui32StatNumber; ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); ++ ++ /* Loop through all the values and print them... */ ++ for (ui32StatNumber = 0; ++ ui32StatNumber < ARRAY_SIZE(pszProcessStatType); ++ ui32StatNumber++) ++ { ++ if (OSStringNCompare(pszProcessStatType[ui32StatNumber], "", 1) != 0) ++ { ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) || ++ (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES)) ++ { ++ /* get the stat from RI */ ++ IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->pid, ++ (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) ++ ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA); ++ ++ DIPrintf(psEntry, "%-34s%10d %8dK\n", ++ pszProcessStatType[ui32StatNumber], ui32Total, ui32Total>>10); ++ } ++ else ++#endif ++ { ++ if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC && ++ ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX) ++ { ++ DIPrintf(psEntry, "%-34s%10d %8dK\n", ++ pszProcessStatType[ui32StatNumber], ++ psProcessStats->i32StatValue[ui32StatNumber], ++ psProcessStats->i32StatValue[ui32StatNumber] >> 10); ++ } ++ else ++ { ++ DIPrintf(psEntry, "%-34s%10d\n", ++ pszProcessStatType[ui32StatNumber], ++ psProcessStats->i32StatValue[ui32StatNumber]); ++ } ++ } ++ } ++ } ++ ++ OSLockRelease(psProcessStats->hLock); ++} /* ProcessStatsPrintElements */ ++#endif ++ ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++void ++PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEV_PHYADDR sDevPAddr, ++#endif ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT64 ui64ExecuteTime, ++ IMG_BOOL bUserModeFlush, ++ IMG_PID ownerPid) ++{ ++ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM(); ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ /* Don't do anything if we are not initialised or we are shutting down! */ ++ if (!bProcessStatsInitialised) ++ { ++ return; ++ } ++ ++ /* Lock while we find the correct process and update the record... */ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ psProcessStats = _FindProcessStats(currentPid); ++ ++ if (psProcessStats != NULL) ++ { ++ IMG_INT32 Idx; ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ /* Look-up next buffer write index */ ++ Idx = psProcessStats->uiCacheOpWriteIndex; ++ psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx); ++ ++ /* Store all CacheOp meta-data */ ++ psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp; ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr; ++ psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr; ++#endif ++ psProcessStats->asCacheOp[Idx].uiOffset = uiOffset; ++ psProcessStats->asCacheOp[Idx].uiSize = uiSize; ++ psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush; ++ psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime; ++ ++ OSLockRelease(psProcessStats->hLock); ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++} /* PVRSRVStatsUpdateCacheOpStats */ ++ ++/*************************************************************************/ /*! ++@Function CacheOpStatsPrintElements ++@Description Prints all elements for this process statistic CacheOp record. ++@Input pvStatPtr Pointer to statistics structure. ++@Input pfnOSStatsPrintf Printf function to use for output. ++*/ /**************************************************************************/ ++void ++CacheOpStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats) ++{ ++ IMG_CHAR *pszCacheOpType, *pszFlushType, *pszFlushMode; ++ IMG_INT32 i32WriteIdx, i32ReadIdx; ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ #define CACHEOP_RI_PRINTF_HEADER \ ++ "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s\n" ++ #define CACHEOP_RI_PRINTF \ ++ "%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu\n" ++#else ++ #define CACHEOP_PRINTF_HEADER \ ++ "%-10s %-10s %-5s %-10s %-10s %-12s\n" ++ #define CACHEOP_PRINTF \ ++ "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu\n" ++#endif ++ ++ DIPrintf(psEntry, "PID %u\n", psProcessStats->pid); ++ ++ /* File header info */ ++ DIPrintf(psEntry, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ CACHEOP_RI_PRINTF_HEADER, ++#else ++ CACHEOP_PRINTF_HEADER, ++#endif ++ "CacheOp", ++ "Type", ++ "Mode", ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ "DevVAddr", ++ "DevPAddr", ++#endif ++ "Offset", ++ "Size", ++ "Time (us)"); ++ ++ /* Take a snapshot of write index, read backwards in buffer ++ and wrap round at boundary */ ++ i32WriteIdx = psProcessStats->uiCacheOpWriteIndex; ++ for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx); ++ i32ReadIdx != i32WriteIdx; ++ i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx)) ++ { ++ IMG_UINT64 ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime; ++ IMG_DEVMEM_SIZE_T ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift(); ++ ++ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC) ++ { ++ pszFlushType = "RBF.Fast"; ++ } ++ else ++ { ++ pszFlushType = "RBF.Slow"; ++ } ++ ++ if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush) ++ { ++ pszFlushMode = "UM"; ++ } ++ else ++ { ++ pszFlushMode = "KM"; ++ } ++ ++ switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp) ++ { ++ case PVRSRV_CACHE_OP_NONE: ++ pszCacheOpType = "None"; ++ break; ++ case PVRSRV_CACHE_OP_CLEAN: ++ pszCacheOpType = "Clean"; ++ break; ++ case PVRSRV_CACHE_OP_INVALIDATE: ++ pszCacheOpType = "Invalidate"; ++ break; ++ case PVRSRV_CACHE_OP_FLUSH: ++ pszCacheOpType = "Flush"; ++ break; ++ default: ++ pszCacheOpType = "Unknown"; ++ break; ++ } ++ ++ DIPrintf(psEntry, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ CACHEOP_RI_PRINTF, ++#else ++ CACHEOP_PRINTF, ++#endif ++ pszCacheOpType, ++ pszFlushType, ++ pszFlushMode, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr, ++ psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr, ++#endif ++ psProcessStats->asCacheOp[i32ReadIdx].uiOffset, ++ psProcessStats->asCacheOp[i32ReadIdx].uiSize, ++ ui64ExecuteTime); ++ } ++ ++} /* CacheOpStatsPrintElements */ ++#endif ++ ++#if defined(PVRSRV_ENABLE_MEMORY_STATS) ++/*************************************************************************/ /*! ++@Function MemStatsPrintElements ++@Description Prints all elements for the memory statistic record. ++@Input pvStatPtr Pointer to statistics structure. ++@Input pfnOSStatsPrintf Printf function to use for output. ++*/ /**************************************************************************/ ++void ++MemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats) ++{ ++ IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32); ++ IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32); ++ PVRSRV_MEM_ALLOC_REC *psRecord; ++ IMG_UINT32 ui32ItemNumber; ++ ++ /* Write the header... */ ++ DIPrintf(psEntry, "PID "); ++ ++ DIPrintf(psEntry, "Type VAddress"); ++ for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) ++ { ++ DIPrintf(psEntry, " "); ++ } ++ ++ DIPrintf(psEntry, " PAddress"); ++ for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) ++ { ++ DIPrintf(psEntry, " "); ++ } ++ ++ DIPrintf(psEntry, " Size(bytes)\n"); ++ ++ psRecord = psProcessStats->psMemoryRecords; ++ if (psRecord == NULL) ++ { ++ DIPrintf(psEntry, "%-5d\n", psProcessStats->pid); ++ } ++ ++ while (psRecord != NULL) ++ { ++ IMG_BOOL bPrintStat = IMG_TRUE; ++ ++ DIPrintf(psEntry, "%-5d ", psProcessStats->pid); ++ ++ switch (psRecord->eAllocType) ++ { ++ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: DIPrintf(psEntry, "KMALLOC "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: DIPrintf(psEntry, "VMALLOC "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_LMA "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: DIPrintf(psEntry, "ALLOC_PAGES_PT_UMA "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: DIPrintf(psEntry, "IOREMAP_PT_LMA "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: DIPrintf(psEntry, "VMAP_PT_UMA "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: DIPrintf(psEntry, "ALLOC_LMA_PAGES "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: DIPrintf(psEntry, "ALLOC_UMA_PAGES "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: DIPrintf(psEntry, "MAP_UMA_LMA_PAGES "); break; ++ case PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT: DIPrintf(psEntry, "DMA_BUF_IMPORT "); break; ++ default: DIPrintf(psEntry, "INVALID "); break; ++ } ++ ++ if (bPrintStat) ++ { ++ for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++) ++ { ++ DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1)); ++ } ++ DIPrintf(psEntry, " "); ++ ++ for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++) ++ { ++ DIPrintf(psEntry, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1)); ++ } ++ ++#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS_ON) ++ DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC, psRecord->uiBytes); ++ ++ DIPrintf(psEntry, " %s", (IMG_CHAR*) psRecord->pvAllocdFromFile); ++ ++ DIPrintf(psEntry, " %d\n", psRecord->ui32AllocdFromLine); ++#else ++ DIPrintf(psEntry, " " IMG_SIZE_FMTSPEC "\n", psRecord->uiBytes); ++#endif ++ } ++ /* Move to next record... */ ++ psRecord = psRecord->psNext; ++ } ++} /* MemStatsPrintElements */ ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++/*************************************************************************/ /*! ++@Function RIMemStatsPrintElements ++@Description Prints all elements for the RI Memory record. ++@Input pvStatPtr Pointer to statistics structure. ++@Input pfnOSStatsPrintf Printf function to use for output. ++*/ /**************************************************************************/ ++void RIMemStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_PROCESS_STATS *psProcessStats) ++{ ++ IMG_CHAR *pszStatFmtText = NULL; ++ IMG_HANDLE *pRIHandle = NULL; ++ ++ /* Acquire RI lock */ ++ RILockAcquireKM(); ++ ++ /* ++ * Loop through the RI system to get each line of text. ++ */ ++ while (RIGetListEntryKM(psProcessStats->pid, ++ &pRIHandle, ++ &pszStatFmtText)) ++ { ++ DIPrintf(psEntry, "%s", pszStatFmtText); ++ } ++ ++ /* Release RI lock */ ++ RILockReleaseKM(); ++ ++} /* RIMemStatsPrintElements */ ++#endif ++ ++#endif ++ ++static IMG_UINT32 ui32FirmwareStartTimestamp; ++static IMG_UINT64 ui64FirmwareIdleDuration; ++ ++void SetFirmwareStartTime(IMG_UINT32 ui32Time) ++{ ++ ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time); ++} ++ ++void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration) ++{ ++ ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration); ++} ++ ++static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats, ++ OSDI_IMPL_ENTRY *psEntry, ++ PVRSRV_POWER_STAT_TYPE eForced, ++ PVRSRV_POWER_STAT_TYPE ePowerOn) ++{ ++ IMG_UINT32 ui32Index; ++ ++ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE); ++ DIPrintf(psEntry, " Pre-Device: %9u\n", pui32Stats[ui32Index]); ++ ++ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM); ++ DIPrintf(psEntry, " Pre-System: %9u\n", pui32Stats[ui32Index]); ++ ++ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM); ++ DIPrintf(psEntry, " Post-System: %9u\n", pui32Stats[ui32Index]); ++ ++ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE); ++ DIPrintf(psEntry, " Post-Device: %9u\n", pui32Stats[ui32Index]); ++} ++ ++int PowerStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0]; ++ IMG_UINT32 ui32Idx; ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ DIPrintf(psEntry, "Forced Power-on Transition (nanoseconds):\n"); ++ PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_ON); ++ DIPrintf(psEntry, "\n"); ++ ++ DIPrintf(psEntry, "Forced Power-off Transition (nanoseconds):\n"); ++ PowerStatsPrintGroup(pui32Stats, psEntry, FORCED, POWER_OFF); ++ DIPrintf(psEntry, "\n"); ++ ++ DIPrintf(psEntry, "Not Forced Power-on Transition (nanoseconds):\n"); ++ PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_ON); ++ DIPrintf(psEntry, "\n"); ++ ++ DIPrintf(psEntry, "Not Forced Power-off Transition (nanoseconds):\n"); ++ PowerStatsPrintGroup(pui32Stats, psEntry, NOT_FORCED, POWER_OFF); ++ DIPrintf(psEntry, "\n"); ++ ++ ++ DIPrintf(psEntry, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp); ++ DIPrintf(psEntry, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration)); ++ DIPrintf(psEntry, "\n"); ++ ++ DIPrintf(psEntry, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS); ++ DIPrintf(psEntry, "Prepare DVFS\tDVFS Change\tPost DVFS\n"); ++ ++ for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS) ++ { ++ DIPrintf(psEntry, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration, ++ asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration, ++ asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration); ++ } ++ ++ return 0; ++} /* PowerStatsPrintElements */ ++ ++int GlobalStatsPrintElements(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ IMG_UINT32 ui32StatNumber; ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); ++ ++ for (ui32StatNumber = 0; ++ ui32StatNumber < ARRAY_SIZE(pszDriverStatType); ++ ui32StatNumber++) ++ { ++ if (OSStringNCompare(pszDriverStatType[ui32StatNumber], "", 1) != 0) ++ { ++ DIPrintf(psEntry, "%-34s%12llu\n", ++ pszDriverStatType[ui32StatNumber], ++ GET_GLOBAL_STAT_VALUE(ui32StatNumber)); ++ } ++ } ++ ++ OSLockRelease(gsGlobalStats.hGlobalStatsLock); ++ ++ return 0; ++} ++ ++/*************************************************************************/ /*! ++@Function PVRSRVFindProcessMemStats ++@Description Using the provided PID find memory stats for that process. ++ Memstats will be provided for live/connected processes only. ++ Memstat values provided by this API relate only to the physical ++ memory allocated by the process and does not relate to any of ++ the mapped or imported memory. ++@Input pid Process to search for. ++@Input ArraySize Size of the array where memstat ++ records will be stored ++@Input bAllProcessStats Flag to denote if stats for ++ individual process are requested ++ stats for all processes are ++ requested ++@Input MemoryStats Handle to the memory where memstats ++ are stored. ++@Output Memory statistics records for the requested pid. ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats) ++{ ++ IMG_INT i; ++ PVRSRV_PROCESS_STATS* psProcessStats; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pui32MemoryStats, "pui32MemoryStats"); ++ ++ if (bAllProcessStats) ++ { ++ PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_DRIVER_STAT_TYPE_COUNT, ++ "MemStats array size is incorrect", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); ++ ++ for (i = 0; i < ui32ArrSize; i++) ++ { ++ pui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i); ++ } ++ ++ OSLockRelease(gsGlobalStats.hGlobalStatsLock); ++ ++ return PVRSRV_OK; ++ } ++ ++ PVR_LOG_RETURN_IF_FALSE(ui32ArrSize == PVRSRV_PROCESS_STAT_TYPE_COUNT, ++ "MemStats array size is incorrect", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ ++ /* Search for the given PID in the Live List */ ++ psProcessStats = _FindProcessStatsInLiveList(pid); ++ ++ if (psProcessStats == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid)); ++ OSLockRelease(g_psLinkedListLock); ++ ++ return PVRSRV_ERROR_PROCESS_NOT_FOUND; ++ } ++ ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ for (i = 0; i < ui32ArrSize; i++) ++ { ++ pui32MemoryStats[i] = psProcessStats->i32StatValue[i]; ++ } ++ OSLockRelease(psProcessStats->hLock); ++ ++ OSLockRelease(g_psLinkedListLock); ++ ++ return PVRSRV_OK; ++ ++} /* PVRSRVFindProcessMemStats */ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetProcessMemUsage ++@Description Calculate allocated kernel and graphics memory for all live or ++ connected processes. Memstat values provided by this API relate ++ only to the physical memory allocated by the process and does ++ not relate to any of the mapped or imported memory. ++@Output pui32TotalMem Total memory usage for all live ++ PIDs connected to the driver. ++@Output pui32NumberOfLivePids Number of live pids currently ++ connected to the server. ++@Output ppsPerProcessMemUsageData Handle to an array of ++ PVRSRV_PER_PROCESS_MEM_USAGE, ++ number of elements defined by ++ pui32NumberOfLivePids. ++@Return PVRSRV_OK Success ++ PVRSRV_ERROR_PROCESS_NOT_FOUND No live processes. ++ PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate memory for ++ ppsPerProcessMemUsageData. ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, ++ IMG_UINT32 *pui32NumberOfLivePids, ++ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData) ++{ ++ IMG_UINT32 ui32Counter = 0; ++ IMG_UINT32 ui32NumberOfLivePids = 0; ++ PVRSRV_ERROR eError = PVRSRV_ERROR_PROCESS_NOT_FOUND; ++ PVRSRV_PROCESS_STATS* psProcessStats = NULL; ++ PVRSRV_PER_PROCESS_MEM_USAGE* psPerProcessMemUsageData = NULL; ++ ++ OSLockAcquire(gsGlobalStats.hGlobalStatsLock); ++ ++ *pui32TotalMem = GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC) + ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC) + ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA) + ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA) + ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA) + ++ GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA); ++ ++ OSLockRelease(gsGlobalStats.hGlobalStatsLock); ++ ++ OSLockAcquire(g_psLinkedListLock); ++ psProcessStats = g_psLiveList; ++ ++ while (psProcessStats != NULL) ++ { ++ psProcessStats = psProcessStats->psNext; ++ ui32NumberOfLivePids++; ++ } ++ ++ if (ui32NumberOfLivePids > 0) ++ { ++ /* Use OSAllocZMemNoStats to prevent deadlock. */ ++ psPerProcessMemUsageData = OSAllocZMemNoStats(ui32NumberOfLivePids * sizeof(*psPerProcessMemUsageData)); ++ ++ if (psPerProcessMemUsageData) ++ { ++ psProcessStats = g_psLiveList; ++ ++ while (psProcessStats != NULL) ++ { ++ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT); ++ ++ psPerProcessMemUsageData[ui32Counter].ui32Pid = (IMG_UINT32)psProcessStats->pid; ++ ++ psPerProcessMemUsageData[ui32Counter].ui32KernelMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] + ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC]; ++ ++ psPerProcessMemUsageData[ui32Counter].ui32GraphicsMemUsage = psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] + ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] + ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] + ++ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]; ++ ++ OSLockRelease(psProcessStats->hLock); ++ psProcessStats = psProcessStats->psNext; ++ ui32Counter++; ++ } ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ } ++ ++ OSLockRelease(g_psLinkedListLock); ++ *pui32NumberOfLivePids = ui32NumberOfLivePids; ++ *ppsPerProcessMemUsageData = psPerProcessMemUsageData; ++ ++ return eError; ++ ++} /* PVRSRVGetProcessMemUsage */ +diff --git a/drivers/gpu/drm/img-rogue/process_stats.h b/drivers/gpu/drm/img-rogue/process_stats.h +new file mode 100644 +index 000000000000..4003997363aa +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/process_stats.h +@@ -0,0 +1,223 @@ ++/*************************************************************************/ /*! ++@File ++@Title Functions for creating and reading proc filesystem entries. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PROCESS_STATS_H ++#define PROCESS_STATS_H ++ ++#include ++ ++#include "pvrsrv_error.h" ++#include "allocmem.h" ++#include "cache_ops.h" ++ ++/* ++ * The publishing of Process Stats is controlled by the ++ * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory ++ * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option. ++ * ++ * Note: There will be a performance degradation with memory allocation ++ * recording enabled! ++ */ ++ ++ ++/* ++ * Memory types which can be tracked... ++ */ ++typedef enum { ++ PVRSRV_MEM_ALLOC_TYPE_KMALLOC, /* memory allocated by kmalloc() */ ++ PVRSRV_MEM_ALLOC_TYPE_VMALLOC, /* memory allocated by vmalloc() */ ++ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, /* pages allocated from UMA to hold page table information */ ++ PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, /* ALLOC_PAGES_PT_UMA mapped to kernel address space */ ++ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, /* pages allocated from LMA to hold page table information */ ++ PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */ ++ PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */ ++ PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */ ++ PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */ ++ PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */ ++ PVRSRV_MEM_ALLOC_TYPE_DMA_BUF_IMPORT, /* dma-buf imports */ ++ ++ /* Must be the last enum...*/ ++ PVRSRV_MEM_ALLOC_TYPE_COUNT ++} PVRSRV_MEM_ALLOC_TYPE; ++ ++ ++/* ++ * Functions for managing the processes recorded... ++ */ ++PVRSRV_ERROR PVRSRVStatsInitialise(void); ++void PVRSRVStatsDestroy(void); ++ ++PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats); ++ ++void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats); ++ ++#define MAX_POWER_STAT_ENTRIES 51 ++ ++/* ++ * Functions for recording the statistics... ++ */ ++ ++void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ void *pvCpuVAddr, ++ IMG_CPU_PHYADDR sCpuPAddr, ++ size_t uiBytes, ++ void *pvPrivateData, ++ IMG_PID uiPid ++ DEBUG_MEMSTATS_PARAMS); ++ ++void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ IMG_UINT64 ui64Key, ++ IMG_PID uiPid); ++ ++void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes, ++ IMG_PID uiPid); ++ ++/* ++ * Increases the memory stat for eAllocType. Tracks the allocation size value ++ * by inserting a value into a hash table with uiCpuVAddr as key. ++ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack(). ++ */ ++void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes, ++ IMG_UINT64 uiCpuVAddr, ++ IMG_PID uiPid); ++ ++void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ size_t uiBytes, ++ IMG_PID uiPid); ++ ++void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes, ++ IMG_PID decrPID); ++ ++/* ++ * Decrease the memory stat for eAllocType. Takes the allocation size value ++ * from the hash table with uiCpuVAddr as key. ++ * Pair with PVRSRVStatsIncrMemAllocStatAndTrack(). ++ */ ++void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType, ++ IMG_UINT64 uiCpuVAddr); ++ ++void ++PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes); ++ ++void ++PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes); ++ ++void ++PVRSRVStatsUpdateOOMStats(IMG_UINT32 ui32OOMStatType, ++ IMG_PID pidOwner); ++ ++PVRSRV_ERROR ++PVRSRVServerUpdateOOMStats(IMG_UINT32 ui32OOMStatType, ++ IMG_PID pidOwner); ++ ++void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders, ++ IMG_UINT32 ui32TotalNumOutOfMemory, ++ IMG_UINT32 ui32TotalTAStores, ++ IMG_UINT32 ui32Total3DStores, ++ IMG_UINT32 ui32TotalCDMStores, ++ IMG_UINT32 ui32TotalTDMStores, ++ IMG_PID owner); ++ ++void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp, ++ IMG_UINT32 ui32NumReqByFW, ++ IMG_PID owner); ++ ++void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp, ++ IMG_UINT32 ui32NumGrowReqByFW, ++ IMG_UINT32 ui32InitFLPages, ++ IMG_UINT32 ui32NumHighPages, ++ IMG_PID ownerPid); ++#if defined(PVRSRV_ENABLE_CACHEOP_STATS) ++void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp, ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) && defined(DEBUG) ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEV_PHYADDR sDevPAddr, ++#endif ++ IMG_DEVMEM_SIZE_T uiOffset, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_UINT64 ui64ExecuteTimeMs, ++ IMG_BOOL bUserModeFlush, ++ IMG_PID ownerPid); ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++/* Update pre/post power transition timing statistics */ ++void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, ++ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, ++ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower); ++ ++void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer); ++void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer); ++#else ++/* Update pre/post power transition timing statistics */ ++static inline ++void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime, ++ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime, ++ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {} ++static inline ++void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {} ++ ++static inline ++void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {} ++#endif ++ ++void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp); ++ ++void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration); ++ ++/* Functions used for calculating the memory usage statistics of a process */ ++PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, ++ IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemoryStats); ++ ++typedef struct { ++ IMG_UINT32 ui32Pid; ++ IMG_UINT32 ui32KernelMemUsage; ++ IMG_UINT32 ui32GraphicsMemUsage; ++} PVRSRV_PER_PROCESS_MEM_USAGE; ++ ++PVRSRV_ERROR PVRSRVGetProcessMemUsage(IMG_UINT32 *pui32TotalMem, ++ IMG_UINT32 *pui32NumberOfLivePids, ++ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsageData); ++ ++#endif /* PROCESS_STATS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_bridge.h b/drivers/gpu/drm/img-rogue/pvr_bridge.h +new file mode 100644 +index 000000000000..dc3cf769f7ee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_bridge.h +@@ -0,0 +1,457 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR Bridge Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the PVR Bridge code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_BRIDGE_H ++#define PVR_BRIDGE_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "pvrsrv_error.h" ++#if defined(SUPPORT_DISPLAY_CLASS) ++#include "common_dc_bridge.h" ++#if defined(SUPPORT_DCPLAT_BRIDGE) ++#include "common_dcplat_bridge.h" ++#endif ++#endif ++#include "common_mm_bridge.h" ++#if defined(SUPPORT_MMPLAT_BRIDGE) ++#include "common_mmplat_bridge.h" ++#endif ++#if defined(SUPPORT_WRAP_EXTMEM) ++#include "common_mmextmem_bridge.h" ++#endif ++#if !defined(EXCLUDE_CMM_BRIDGE) ++#include "common_cmm_bridge.h" ++#endif ++#if defined(__linux__) ++#include "common_dmabuf_bridge.h" ++#endif ++#if defined(PDUMP) ++#include "common_pdump_bridge.h" ++#include "common_pdumpctrl_bridge.h" ++#include "common_pdumpmm_bridge.h" ++#endif ++#include "common_cache_bridge.h" ++#if defined(SUPPORT_DMA_TRANSFER) ++#include "common_dma_bridge.h" ++#endif ++#include "common_srvcore_bridge.h" ++#include "common_sync_bridge.h" ++#if defined(SUPPORT_SECURE_EXPORT) ++#include "common_smm_bridge.h" ++#endif ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++#include "common_htbuffer_bridge.h" ++#endif ++#include "common_pvrtl_bridge.h" ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#include "common_ri_bridge.h" ++#endif ++ ++#if defined(SUPPORT_VALIDATION_BRIDGE) ++#include "common_validation_bridge.h" ++#endif ++ ++#if defined(PVR_TESTING_UTILS) ++#include "common_tutils_bridge.h" ++#endif ++ ++#include "common_devicememhistory_bridge.h" ++#include "common_synctracking_bridge.h" ++ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++#include "common_syncfallback_bridge.h" ++#endif ++ ++#if defined(SUPPORT_DI_BRG_IMPL) ++#include "common_di_bridge.h" ++#endif ++ ++/* ++ * Bridge Cmd Ids ++ */ ++ ++ ++/* Note: The pattern ++ * #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1) ++ * #if defined(SUPPORT_FEATURE) ++ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1) ++ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST) ++ * #else ++ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0 ++ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST) ++ * #endif ++ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_* ++ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled. ++ * ++ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where ++ * the feature is not enabled (each bridge group retains its own ioctl number). ++ */ ++ ++#define PVRSRV_BRIDGE_FIRST 0UL ++ ++/* 0: Default handler */ ++#define PVRSRV_BRIDGE_DEFAULT 0UL ++#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL ++#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST) ++/* 1: CORE functions */ ++#define PVRSRV_BRIDGE_SRVCORE 1UL ++#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1) ++#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST) ++ ++/* 2: SYNC functions */ ++#define PVRSRV_BRIDGE_SYNC 2UL ++#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST) ++ ++/* 3,4: Reserved */ ++#define PVRSRV_BRIDGE_RESERVED1 3UL ++#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST) ++ ++#define PVRSRV_BRIDGE_RESERVED2 4UL ++#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RESERVED2_DISPATCH_LAST (PVRSRV_BRIDGE_RESERVED1_DISPATCH_LAST) ++ ++/* 5: PDUMP CTRL layer functions */ ++#define PVRSRV_BRIDGE_PDUMPCTRL 5UL ++#if defined(PDUMP) ++#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST) ++#endif ++ ++/* 6: Memory Management functions */ ++#define PVRSRV_BRIDGE_MM 6UL ++#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_MM_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST) ++ ++/* 7: Non-Linux Memory Management functions */ ++#define PVRSRV_BRIDGE_MMPLAT 7UL ++#if defined(SUPPORT_MMPLAT_BRIDGE) ++#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_LAST) ++#endif ++ ++/* 8: Context Memory Management functions */ ++#define PVRSRV_BRIDGE_CMM 8UL ++#if !defined(EXCLUDE_CMM_BRIDGE) ++#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST) ++#endif ++ ++/* 9: PDUMP Memory Management functions */ ++#define PVRSRV_BRIDGE_PDUMPMM 9UL ++#if defined(PDUMP) ++#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST) ++#endif ++ ++/* 10: PDUMP functions */ ++#define PVRSRV_BRIDGE_PDUMP 10UL ++#if defined(PDUMP) ++#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST) ++#endif ++ ++/* 11: DMABUF functions */ ++#define PVRSRV_BRIDGE_DMABUF 11UL ++#if defined(__linux__) ++#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST) ++#endif ++ ++/* 12: Display Class functions */ ++#define PVRSRV_BRIDGE_DC 12UL ++#if defined(SUPPORT_DISPLAY_CLASS) ++#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST) ++#endif ++ ++/* 13: Cache interface functions */ ++#define PVRSRV_BRIDGE_CACHE 13UL ++#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST) ++ ++/* 14: Secure Memory Management functions */ ++#define PVRSRV_BRIDGE_SMM 14UL ++#if defined(SUPPORT_SECURE_EXPORT) ++#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST) ++#endif ++ ++/* 15: Transport Layer interface functions */ ++#define PVRSRV_BRIDGE_PVRTL 15UL ++#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST) ++ ++/* 16: Resource Information (RI) interface functions */ ++#define PVRSRV_BRIDGE_RI 16UL ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST) ++#endif ++ ++/* 17: Validation interface functions */ ++#define PVRSRV_BRIDGE_VALIDATION 17UL ++#if defined(SUPPORT_VALIDATION_BRIDGE) ++#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_LAST) ++#endif ++ ++/* 18: TUTILS interface functions */ ++#define PVRSRV_BRIDGE_TUTILS 18UL ++#if defined(PVR_TESTING_UTILS) ++#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST) ++#endif ++ ++/* 19: DevMem history interface functions */ ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY 19UL ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST) ++ ++/* 20: Host Trace Buffer interface functions */ ++#define PVRSRV_BRIDGE_HTBUFFER 20UL ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST) ++#endif ++ ++/* 21: Non-Linux Display functions */ ++#define PVRSRV_BRIDGE_DCPLAT 21UL ++#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) ++#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST) ++#endif ++ ++/* 22: Extmem functions */ ++#define PVRSRV_BRIDGE_MMEXTMEM 22UL ++#if defined(SUPPORT_WRAP_EXTMEM) ++#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST) ++#endif ++ ++/* 23: Sync tracking functions */ ++#define PVRSRV_BRIDGE_SYNCTRACKING 23UL ++#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST) ++ ++/* 24: Sync fallback functions */ ++#define PVRSRV_BRIDGE_SYNCFALLBACK 24UL ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST) ++#endif ++ ++/* 25: Debug Information (DI) interface functions */ ++#define PVRSRV_BRIDGE_DI 25UL ++#if defined(SUPPORT_DI_BRG_IMPL) ++#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_DI_DISPATCH_LAST (PVRSRV_BRIDGE_DI_DISPATCH_FIRST + PVRSRV_BRIDGE_DI_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_DI_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_DI_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST) ++#endif ++ ++/* 26: DMA transfer functions */ ++ ++#define PVRSRV_BRIDGE_DMA 26UL ++#if defined(SUPPORT_DMA_TRANSFER) ++#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST (PVRSRV_BRIDGE_DI_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST (PVRSRV_BRIDGE_DMA_DISPATCH_FIRST + PVRSRV_BRIDGE_DMA_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_DMA_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_DMA_DISPATCH_LAST (PVRSRV_BRIDGE_DI_DISPATCH_LAST) ++#endif ++ ++/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */ ++#define PVRSRV_BRIDGE_LAST (PVRSRV_BRIDGE_DMA) ++/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */ ++#define PVRSRV_BRIDGE_DISPATCH_LAST (PVRSRV_BRIDGE_DMA_DISPATCH_LAST) ++ ++/* bit mask representing the enabled PVR bridges */ ++ ++static const IMG_UINT32 gui32PVRBridges = ++ (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST)) ++ | (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST)) ++ | (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST)) ++ ++#if defined(PDUMP) ++ | (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST)) ++#if defined(SUPPORT_MMPLAT_BRIDGE) ++ | (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_CMM) ++ | (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(PDUMP) ++ | (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST)) ++ | (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(__linux__) ++ | (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_DISPLAY_CLASS) ++ | (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST)) ++#if defined(SUPPORT_SECURE_EXPORT) ++ | (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST)) ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_VALIDATION) ++ | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(PVR_TESTING_UTILS) ++ | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) ++#if defined(SUPPORT_HTBUFFER) ++ | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_DISPLAY_CLASS) && defined(SUPPORT_DCPLAT_BRIDGE) ++ | (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_WRAP_EXTMEM) ++ | (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ | (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_DI_BRG_IMPL) ++ | (1U << (PVRSRV_BRIDGE_DI - PVRSRV_BRIDGE_FIRST)) ++#endif ++#if defined(SUPPORT_DMA_TRANSFER) ++ | (1U << (PVRSRV_BRIDGE_DMA - PVRSRV_BRIDGE_FIRST)) ++#endif ++ ; ++ ++/* bit field representing which PVR bridge groups may optionally not ++ * be present in the server ++ */ ++#define PVR_BRIDGES_OPTIONAL \ ++ ( \ ++ (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) \ ++ ) ++ ++/****************************************************************************** ++ * Generic bridge structures ++ *****************************************************************************/ ++ ++ ++/****************************************************************************** ++ * bridge packaging structure ++ *****************************************************************************/ ++typedef struct PVRSRV_BRIDGE_PACKAGE_TAG ++{ ++ IMG_UINT32 ui32BridgeID; /*!< ioctl bridge group */ ++ IMG_UINT32 ui32FunctionID; /*!< ioctl function index */ ++ IMG_UINT32 ui32Size; /*!< size of structure */ ++ void __user *pvParamIn; /*!< input data buffer */ ++ IMG_UINT32 ui32InBufferSize; /*!< size of input data buffer */ ++ void __user *pvParamOut; /*!< output data buffer */ ++ IMG_UINT32 ui32OutBufferSize; /*!< size of output data buffer */ ++}PVRSRV_BRIDGE_PACKAGE; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* PVR_BRIDGE_H */ ++ ++/****************************************************************************** ++ End of file (pvr_bridge.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvr_bridge_k.c b/drivers/gpu/drm/img-rogue/pvr_bridge_k.c +new file mode 100644 +index 000000000000..7211ef025de3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_bridge_k.c +@@ -0,0 +1,582 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR Bridge Module (kernel side) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Receives calls from the user portion of services and ++ despatches them to functions in the kernel portion. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++ ++#include ++ ++#include "img_defs.h" ++#include "pvr_bridge.h" ++#include "pvr_bridge_k.h" ++#include "connection_server.h" ++#include "syscommon.h" ++#include "pvr_debug.h" ++#include "di_server.h" ++#include "private_data.h" ++#include "linkage.h" ++#include "pmr.h" ++#include "rgx_bvnc_defs_km.h" ++#include "pvrsrv_bridge_init.h" ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#else ++#include ++#endif ++ ++#include "pvr_drm.h" ++#include "pvr_drv.h" ++ ++#include "env_connection.h" ++#include ++#include ++ ++/* RGX: */ ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++ ++#include "srvcore.h" ++#include "common_srvcore_bridge.h" ++ ++PVRSRV_ERROR InitDMABUFBridge(void); ++void DeinitDMABUFBridge(void); ++ ++#if defined(MODULE_TEST) ++/************************************************************************/ ++// additional includes for services testing ++/************************************************************************/ ++#include "pvr_test_bridge.h" ++#include "kern_test.h" ++/************************************************************************/ ++// end of additional includes ++/************************************************************************/ ++#endif ++ ++/* The mmap code has its own mutex, to prevent possible re-entrant issues ++ * when the same PMR is mapped from two different connections/processes. ++ */ ++static DEFINE_MUTEX(g_sMMapMutex); ++ ++#define _DRIVER_SUSPENDED 1 ++#define _DRIVER_NOT_SUSPENDED 0 ++static ATOMIC_T g_iDriverSuspended; ++static ATOMIC_T g_iNumActiveDriverThreads; ++static ATOMIC_T g_iNumActiveKernelThreads; ++static IMG_HANDLE g_hDriverThreadEventObject; ++ ++#if defined(DEBUG_BRIDGE_KM) ++static DI_ENTRY *gpsDIBridgeStatsEntry; ++ ++static void *BridgeStatsDIStart(OSDI_IMPL_ENTRY *psEntry, IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); ++ ++ if (psDispatchTable == NULL || *pui64Pos > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) ++ { ++ return NULL; ++ } ++ ++ if (*pui64Pos == 0) ++ { ++ return DI_START_TOKEN; ++ } ++ ++ return &(psDispatchTable[*pui64Pos - 1]); ++} ++ ++static void BridgeStatsDIStop(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ PVR_UNREFERENCED_PARAMETER(psEntry); ++ PVR_UNREFERENCED_PARAMETER(pvData); ++} ++ ++static void *BridgeStatsDINext(OSDI_IMPL_ENTRY *psEntry, void *pvData, ++ IMG_UINT64 *pui64Pos) ++{ ++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = DIGetPrivData(psEntry); ++ IMG_UINT64 uiItemAskedFor = *pui64Pos; /* pui64Pos on entry is the index to return */ ++ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ /* Is the item asked for (starts at 0) a valid table index? */ ++ if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) ++ { ++ (*pui64Pos)++; /* on exit it is the next DI index to ask for */ ++ return &(psDispatchTable[uiItemAskedFor]); ++ } ++ ++ /* Now passed the end of the table to indicate stop */ ++ return NULL; ++} ++ ++static int BridgeStatsDIShow(OSDI_IMPL_ENTRY *psEntry, void *pvData) ++{ ++ if (pvData == DI_START_TOKEN) ++ { ++ DIPrintf(psEntry, ++ "Total ioctl call count = %u\n" ++ "Total number of bytes copied via copy_from_user = %u\n" ++ "Total number of bytes copied via copy_to_user = %u\n" ++ "Total number of bytes copied via copy_*_user = %u\n\n" ++ "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s\n", ++ g_BridgeGlobalStats.ui32IOCTLCount, ++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes, ++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, ++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + ++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes, ++ "#", ++ "Bridge Name", ++ "Wrapper Function", ++ "Call Count", ++ "copy_from_user (B)", ++ "copy_to_user (B)", ++ "Total Time (us)", ++ "Max Time (us)"); ++ } ++ else if (pvData != NULL) ++ { ++ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psTableEntry = pvData; ++ IMG_UINT32 ui32Remainder; ++ ++ DIPrintf(psEntry, ++ "%3d: %-60s %-48s %-10u %-20u %-20u %-20" IMG_UINT64_FMTSPEC " %-20" IMG_UINT64_FMTSPEC "\n", ++ (IMG_UINT32)(((size_t)psTableEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)), ++ psTableEntry->pszIOCName, ++ (psTableEntry->pfFunction != NULL) ? psTableEntry->pszFunctionName : "(null)", ++ psTableEntry->ui32CallCount, ++ psTableEntry->ui32CopyFromUserTotalBytes, ++ psTableEntry->ui32CopyToUserTotalBytes, ++ OSDivide64r64(psTableEntry->ui64TotalTimeNS, 1000, &ui32Remainder), ++ OSDivide64r64(psTableEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); ++ } ++ ++ return 0; ++} ++ ++static IMG_INT64 BridgeStatsWrite(const IMG_CHAR *pcBuffer, ++ IMG_UINT64 ui64Count, IMG_UINT64 *pui64Pos, ++ void *pvData) ++{ ++ IMG_UINT32 i; ++ ++ PVR_RETURN_IF_FALSE(pcBuffer != NULL, -EIO); ++ PVR_RETURN_IF_FALSE(pui64Pos != NULL && *pui64Pos == 0, -EIO); ++ PVR_RETURN_IF_FALSE(ui64Count >= 1, -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[0] == '0', -EINVAL); ++ PVR_RETURN_IF_FALSE(pcBuffer[ui64Count - 1] == '\0', -EINVAL); ++ ++ /* Reset stats. */ ++ ++ BridgeGlobalStatsLock(); ++ ++ g_BridgeGlobalStats.ui32IOCTLCount = 0; ++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0; ++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0; ++ ++ for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++) ++ { ++ g_BridgeDispatchTable[i].ui32CallCount = 0; ++ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0; ++ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0; ++ g_BridgeDispatchTable[i].ui64TotalTimeNS = 0; ++ g_BridgeDispatchTable[i].ui64MaxTimeNS = 0; ++ } ++ ++ BridgeGlobalStatsUnlock(); ++ ++ return ui64Count; ++} ++ ++#endif /* defined(DEBUG_BRIDGE_KM) */ ++ ++PVRSRV_ERROR OSPlatformBridgeInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = InitDMABUFBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge"); ++ ++ OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED); ++ OSAtomicWrite(&g_iNumActiveDriverThreads, 0); ++ OSAtomicWrite(&g_iNumActiveKernelThreads, 0); ++ ++ eError = OSEventObjectCreate("Global driver thread event object", ++ &g_hDriverThreadEventObject); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", error_); ++ ++#if defined(DEBUG_BRIDGE_KM) ++ { ++ DI_ITERATOR_CB sIter = { ++ .pfnStart = BridgeStatsDIStart, ++ .pfnStop = BridgeStatsDIStop, ++ .pfnNext = BridgeStatsDINext, ++ .pfnShow = BridgeStatsDIShow, ++ .pfnWrite = BridgeStatsWrite, ++ ++ //Expects '0' + Null terminator ++ .ui32WriteLenMax = ((1U)+1U) ++ }; ++ ++ eError = DICreateEntry("bridge_stats", NULL, &sIter, ++ &g_BridgeDispatchTable[0], ++ DI_ENTRY_TYPE_GENERIC, ++ &gpsDIBridgeStatsEntry); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DICreateEntry", error_); ++ } ++#endif ++ ++ return PVRSRV_OK; ++ ++error_: ++ if (g_hDriverThreadEventObject) { ++ OSEventObjectDestroy(g_hDriverThreadEventObject); ++ g_hDriverThreadEventObject = NULL; ++ } ++ ++ return eError; ++} ++ ++void OSPlatformBridgeDeInit(void) ++{ ++#if defined(DEBUG_BRIDGE_KM) ++ if (gpsDIBridgeStatsEntry != NULL) ++ { ++ DIDestroyEntry(gpsDIBridgeStatsEntry); ++ } ++#endif ++ ++ DeinitDMABUFBridge(); ++ ++ if (g_hDriverThreadEventObject != NULL) { ++ OSEventObjectDestroy(g_hDriverThreadEventObject); ++ g_hDriverThreadEventObject = NULL; ++ } ++} ++ ++PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hEvent; ++ ++ eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); ++ return eError; ++ } ++ ++ if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED, ++ _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto out_put; ++ } ++ ++ /* now wait for any threads currently in the server to exit */ ++ while (OSAtomicRead(&g_iNumActiveDriverThreads) != 0 || ++ (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown)) ++ { ++ if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0) ++ { ++ PVR_LOG(("%s: waiting for user threads (%d)", __func__, ++ OSAtomicRead(&g_iNumActiveDriverThreads))); ++ } ++ if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0) ++ { ++ PVR_LOG(("%s: waiting for kernel threads (%d)", __func__, ++ OSAtomicRead(&g_iNumActiveKernelThreads))); ++ } ++ /* Regular wait is called here (and not OSEventObjectWaitKernel) because ++ * this code is executed by the caller of .suspend/.shutdown callbacks ++ * which is most likely PM (or other actor responsible for suspend ++ * process). Because of that this thread shouldn't and most likely ++ * event cannot be frozen. */ ++ OSEventObjectWait(hEvent); ++ } ++ ++out_put: ++ OSEventObjectClose(hEvent); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* resume the driver and then signal so any waiting threads wake up */ ++ if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED, ++ _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ eError = OSEventObjectSignal(g_hDriverThreadEventObject); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s", ++ __func__, PVRSRVGetErrorString(eError))); ++ } ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) ++ { ++ PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event" ++ " object: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ } ++ ++ return eError; ++} ++ ++void LinuxBridgeNumActiveKernelThreadsIncrement(void) ++{ ++ OSAtomicIncrement(&g_iNumActiveKernelThreads); ++} ++ ++void LinuxBridgeNumActiveKernelThreadsDecrement(void) ++{ ++ OSAtomicDecrement(&g_iNumActiveKernelThreads); ++ PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0); ++ ++ /* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is ++ * waiting for the threads to freeze. ++ * (error is logged in called function so ignore, we can't do much with ++ * it anyway) */ ++ (void) LinuxBridgeSignalIfSuspended(); ++} ++ ++static PVRSRV_ERROR _WaitForDriverUnsuspend(void) ++{ ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hEvent; ++ ++ eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__)); ++ return eError; ++ } ++ ++ while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) ++ { ++ /* we should be able to use normal (not kernel) wait here since ++ * we were just unfrozen and most likely we're not going to ++ * be frozen again (?) */ ++ OSEventObjectWait(hEvent); ++ } ++ ++ OSEventObjectClose(hEvent); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVDriverThreadEnter(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* increment first so there is no race between this value and ++ * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */ ++ OSAtomicIncrement(&g_iNumActiveDriverThreads); ++ ++ if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED) ++ { ++ /* decrement here because the driver is going to be suspended and ++ * this thread is going to be frozen so we don't want to wait for ++ * it in LinuxBridgeBlockClientsAccess() */ ++ OSAtomicDecrement(&g_iNumActiveDriverThreads); ++ ++ /* during suspend procedure this will put the current thread to ++ * the freezer but during shutdown this will just return */ ++ try_to_freeze(); ++ ++ /* if the thread was unfrozen but the flag is not yet set to ++ * _DRIVER_NOT_SUSPENDED wait for it ++ * in case this is a shutdown the thread was not frozen so we'll ++ * wait here indefinitely but this is ok (and this is in fact what ++ * we want) because no thread should be entering the driver in such ++ * case */ ++ eError = _WaitForDriverUnsuspend(); ++ ++ /* increment here because that means that the thread entered the ++ * driver */ ++ OSAtomicIncrement(&g_iNumActiveDriverThreads); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver" ++ " unsuspend: %s", __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++void PVRSRVDriverThreadExit(void) ++{ ++ OSAtomicDecrement(&g_iNumActiveDriverThreads); ++ /* if the driver is being suspended then we need to signal the ++ * event object as the thread suspending the driver is waiting ++ * for active threads to exit ++ * error is logged in called function so ignore returned error ++ */ ++ (void) LinuxBridgeSignalIfSuspended(); ++} ++ ++int ++PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile) ++{ ++ struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg; ++ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 }; ++ CONNECTION_DATA *psConnection = LinuxServicesConnectionFromFile(pDRMFile->filp); ++ PVRSRV_ERROR error; ++ ++ if (psConnection == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Invalid connection data")); ++ return -EFAULT; ++ } ++ ++ PVR_ASSERT(psSrvkmCmd != NULL); ++ ++ DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d", ++ task_tgid_nr(current), ++ ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner, ++ psSrvkmCmd->bridge_id, ++ psSrvkmCmd->bridge_func_id); ++ ++ error = PVRSRVDriverThreadEnter(); ++ PVR_LOG_GOTO_IF_ERROR(error, "PVRSRVDriverThreadEnter", e0); ++ ++ sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id; ++ sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id; ++ sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM); ++ sBridgePackageKM.pvParamIn = (void __user *)(uintptr_t)psSrvkmCmd->in_data_ptr; ++ sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size; ++ sBridgePackageKM.pvParamOut = (void __user *)(uintptr_t)psSrvkmCmd->out_data_ptr; ++ sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size; ++ ++ error = BridgedDispatchKM(psConnection, &sBridgePackageKM); ++ ++ PVRSRVDriverThreadExit(); ++ ++e0: ++ return OSPVRSRVToNativeError(error); ++} ++ ++int ++PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma) ++{ ++ CONNECTION_DATA *psConnection = LinuxServicesConnectionFromFile(pFile); ++ IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff); ++ PMR *psPMR; ++ PVRSRV_ERROR eError; ++ ++ if (psConnection == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Invalid connection data")); ++ return -ENOENT; ++ } ++ ++ eError = PVRSRVDriverThreadEnter(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVDriverThreadEnter", e0); ++ ++ /* ++ * The bridge lock used here to protect PVRSRVLookupHandle is replaced ++ * by a specific lock considering that the handle functions have now ++ * their own lock. This change was necessary to solve the lockdep issues ++ * related with the PVRSRV_MMap. ++ */ ++ ++ eError = PVRSRVLookupHandle(psConnection->psHandleBase, ++ (void **)&psPMR, ++ hSecurePMRHandle, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ mutex_lock(&g_sMMapMutex); ++ /* Note: PMRMMapPMR will take a reference on the PMR. ++ * Unref the handle immediately, because we have now done ++ * the required operation on the PMR (whether it succeeded or not) ++ */ ++ eError = PMRMMapPMR(psPMR, ps_vma); ++ mutex_unlock(&g_sMMapMutex); ++ PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto e0; ++ } ++ ++ PVRSRVDriverThreadExit(); ++ ++ return 0; ++ ++e0: ++ PVRSRVDriverThreadExit(); ++ ++ PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError)); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return -ENOENT; // -EAGAIN // or what? ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_bridge_k.h b/drivers/gpu/drm/img-rogue/pvr_bridge_k.h +new file mode 100644 +index 000000000000..859ec641bd1b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_bridge_k.h +@@ -0,0 +1,103 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR Bridge Module (kernel side) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Receives calls from the user portion of services and ++ despatches them to functions in the kernel portion. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_BRIDGE_K_H ++#define PVR_BRIDGE_K_H ++ ++#include "pvrsrv_error.h" ++ ++/*! ++****************************************************************************** ++ @Function LinuxBridgeBlockClientsAccess ++ @Description This function will wait for any existing threads in the Server ++ to exit and then disable access to the driver. New threads will ++ not be allowed to enter the Server until the driver is ++ unsuspended (see LinuxBridgeUnblockClientsAccess). ++ @Input bShutdown this flag indicates that the function was called ++ from a shutdown callback and therefore it will ++ not wait for the kernel threads to get frozen ++ (because this doesn't happen during shutdown ++ procedure) ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown); ++ ++/*! ++****************************************************************************** ++ @Function LinuxBridgeUnblockClientsAccess ++ @Description This function will re-enable the bridge and allow any threads ++ waiting to enter the Server to continue. ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void); ++ ++void LinuxBridgeNumActiveKernelThreadsIncrement(void); ++void LinuxBridgeNumActiveKernelThreadsDecrement(void); ++ ++/*! ++****************************************************************************** ++ @Function PVRSRVDriverThreadEnter ++ @Description Increments number of client threads currently operating ++ in the driver's context. ++ If the driver is currently being suspended this function ++ will call try_to_freeze() on behalf of the client thread. ++ When the driver is resumed the function will exit and allow ++ the thread into the driver. ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDriverThreadEnter(void); ++ ++/*! ++****************************************************************************** ++ @Function PVRSRVDriverThreadExit ++ @Description Decrements the number of client threads currently operating ++ in the driver's context to match the call to ++ PVRSRVDriverThreadEnter(). ++ The function also signals the driver that a thread left the ++ driver context so if it's waiting to suspend it knows that ++ the number of threads decreased. ++******************************************************************************/ ++void PVRSRVDriverThreadExit(void); ++ ++#endif /* PVR_BRIDGE_K_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_buffer_sync.c b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.c +new file mode 100644 +index 000000000000..eedfbf2c9be3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.c +@@ -0,0 +1,646 @@ ++/* ++ * @File ++ * @Title Linux buffer sync interface ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "services_kernel_client.h" ++#include "pvr_dma_resv.h" ++#include "pvr_buffer_sync.h" ++#include "pvr_buffer_sync_shared.h" ++#include "pvr_drv.h" ++#include "pvr_fence.h" ++ ++struct pvr_buffer_sync_context { ++ struct mutex ctx_lock; ++ struct pvr_fence_context *fence_ctx; ++ struct ww_acquire_ctx acquire_ctx; ++}; ++ ++struct pvr_buffer_sync_check_data { ++ struct dma_fence_cb base; ++ ++ u32 nr_fences; ++ struct pvr_fence **fences; ++}; ++ ++struct pvr_buffer_sync_append_data { ++ struct pvr_buffer_sync_context *ctx; ++ ++ u32 nr_pmrs; ++ struct _PMR_ **pmrs; ++ u32 *pmr_flags; ++ ++ struct pvr_fence *update_fence; ++ struct pvr_buffer_sync_check_data *check_data; ++}; ++ ++static struct dma_resv * ++pmr_reservation_object_get(struct _PMR_ *pmr) ++{ ++ struct dma_buf *dmabuf; ++ ++ dmabuf = PhysmemGetDmaBuf(pmr); ++ if (dmabuf) ++ return dmabuf->resv; ++ ++ return NULL; ++} ++ ++static int ++pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx, ++ u32 nr_pmrs, ++ struct _PMR_ **pmrs) ++{ ++ struct dma_resv *resv, *cresv = NULL, *lresv = NULL; ++ int i, err; ++ struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; ++ ++ mutex_lock(&ctx->ctx_lock); ++ ++ ww_acquire_init(acquire_ctx, &reservation_ww_class); ++retry: ++ for (i = 0; i < nr_pmrs; i++) { ++ resv = pmr_reservation_object_get(pmrs[i]); ++ if (!resv) { ++ pr_err("%s: Failed to get reservation object from pmr %p\n", ++ __func__, pmrs[i]); ++ err = -EINVAL; ++ goto fail; ++ } ++ ++ if (resv != lresv) { ++ err = ww_mutex_lock_interruptible(&resv->lock, ++ acquire_ctx); ++ if (err) { ++ cresv = (err == -EDEADLK) ? resv : NULL; ++ goto fail; ++ } ++ } else { ++ lresv = NULL; ++ } ++ } ++ ++ ww_acquire_done(acquire_ctx); ++ ++ return 0; ++ ++fail: ++ while (i--) { ++ resv = pmr_reservation_object_get(pmrs[i]); ++ if (WARN_ON_ONCE(!resv)) ++ continue; ++ ww_mutex_unlock(&resv->lock); ++ } ++ ++ if (lresv) ++ ww_mutex_unlock(&lresv->lock); ++ ++ if (cresv) { ++ err = ww_mutex_lock_slow_interruptible(&cresv->lock, ++ acquire_ctx); ++ if (!err) { ++ lresv = cresv; ++ cresv = NULL; ++ goto retry; ++ } ++ } ++ ++ ww_acquire_fini(acquire_ctx); ++ ++ mutex_unlock(&ctx->ctx_lock); ++ return err; ++} ++ ++static void ++pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx, ++ u32 nr_pmrs, ++ struct _PMR_ **pmrs) ++{ ++ struct dma_resv *resv; ++ int i; ++ struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx; ++ ++ for (i = 0; i < nr_pmrs; i++) { ++ resv = pmr_reservation_object_get(pmrs[i]); ++ if (WARN_ON_ONCE(!resv)) ++ continue; ++ ww_mutex_unlock(&resv->lock); ++ } ++ ++ ww_acquire_fini(acquire_ctx); ++ ++ mutex_unlock(&ctx->ctx_lock); ++} ++ ++static u32 ++pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs, ++ u32 *pmr_flags) ++{ ++ struct dma_resv *resv; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ struct dma_resv_iter cursor; ++#else ++ struct dma_resv_list *resv_list; ++#endif ++ struct dma_fence *fence; ++ u32 fence_count = 0; ++ bool exclusive; ++ int i; ++ ++ for (i = 0; i < nr_pmrs; i++) { ++ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); ++ ++ resv = pmr_reservation_object_get(pmrs[i]); ++ if (WARN_ON_ONCE(!resv)) ++ continue; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { ++ fence_count++; ++ } ++#else ++ resv_list = dma_resv_shared_list(resv); ++ fence = dma_resv_excl_fence(resv); ++ ++ if (fence && ++ (!exclusive || !resv_list || !resv_list->shared_count)) ++ fence_count++; ++ ++ if (exclusive && resv_list) ++ fence_count += resv_list->shared_count; ++#endif ++ } ++ ++ return fence_count; ++} ++ ++static struct pvr_buffer_sync_check_data * ++pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx, ++ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, ++ u32 nr_pmrs, ++ struct _PMR_ **pmrs, ++ u32 *pmr_flags) ++{ ++ struct pvr_buffer_sync_check_data *data; ++ struct dma_resv *resv; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ struct dma_resv_iter cursor; ++#else ++ struct dma_resv_list *resv_list; ++ int j; ++#endif ++ struct dma_fence *fence; ++ u32 fence_count; ++ bool exclusive; ++ int i; ++ int err; ++ ++ data = kzalloc(sizeof(*data), GFP_KERNEL); ++ if (!data) ++ return NULL; ++ ++ fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs, ++ pmr_flags); ++ if (fence_count) { ++ data->fences = kcalloc(fence_count, sizeof(*data->fences), ++ GFP_KERNEL); ++ if (!data->fences) ++ goto err_check_data_free; ++ } ++ ++ for (i = 0; i < nr_pmrs; i++) { ++ resv = pmr_reservation_object_get(pmrs[i]); ++ if (WARN_ON_ONCE(!resv)) ++ continue; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); ++ if (!exclusive) { ++ err = dma_resv_reserve_fences(resv, 1); ++ if (err) ++ goto err_destroy_fences; ++ } ++ ++ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { ++ data->fences[data->nr_fences++] = ++ pvr_fence_create_from_fence(fence_ctx, ++ sync_checkpoint_ctx, ++ fence, ++ PVRSRV_NO_FENCE, ++ "exclusive check fence"); ++ if (!data->fences[data->nr_fences - 1]) { ++ data->nr_fences--; ++ PVR_FENCE_TRACE(fence, ++ "waiting on exclusive fence\n"); ++ WARN_ON(dma_fence_wait(fence, true) <= 0); ++ } ++ } ++#else ++ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE); ++ if (!exclusive) { ++ err = dma_resv_reserve_shared(resv ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) ++ , 1 ++#endif ++ ); ++ if (err) ++ goto err_destroy_fences; ++ } ++ ++ resv_list = dma_resv_shared_list(resv); ++ fence = dma_resv_excl_fence(resv); ++ ++ if (fence && ++ (!exclusive || !resv_list || !resv_list->shared_count)) { ++ data->fences[data->nr_fences++] = ++ pvr_fence_create_from_fence(fence_ctx, ++ sync_checkpoint_ctx, ++ fence, ++ PVRSRV_NO_FENCE, ++ "exclusive check fence"); ++ if (!data->fences[data->nr_fences - 1]) { ++ data->nr_fences--; ++ PVR_FENCE_TRACE(fence, ++ "waiting on exclusive fence\n"); ++ WARN_ON(dma_fence_wait(fence, true) <= 0); ++ } ++ } ++ ++ if (exclusive && resv_list) { ++ for (j = 0; j < resv_list->shared_count; j++) { ++ fence = rcu_dereference_protected(resv_list->shared[j], ++ dma_resv_held(resv)); ++ data->fences[data->nr_fences++] = ++ pvr_fence_create_from_fence(fence_ctx, ++ sync_checkpoint_ctx, ++ fence, ++ PVRSRV_NO_FENCE, ++ "check fence"); ++ if (!data->fences[data->nr_fences - 1]) { ++ data->nr_fences--; ++ PVR_FENCE_TRACE(fence, ++ "waiting on non-exclusive fence\n"); ++ WARN_ON(dma_fence_wait(fence, true) <= 0); ++ } ++ } ++ } ++#endif ++ } ++ ++ WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count)); ++ ++ return data; ++ ++err_destroy_fences: ++ for (i = 0; i < data->nr_fences; i++) ++ pvr_fence_destroy(data->fences[i]); ++ kfree(data->fences); ++err_check_data_free: ++ kfree(data); ++ return NULL; ++} ++ ++static void ++pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data) ++{ ++ int i; ++ ++ for (i = 0; i < data->nr_fences; i++) ++ pvr_fence_destroy(data->fences[i]); ++ ++ kfree(data->fences); ++ kfree(data); ++} ++ ++struct pvr_buffer_sync_context * ++pvr_buffer_sync_context_create(struct device *dev, const char *name) ++{ ++ struct drm_device *ddev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = ddev->dev_private; ++ struct pvr_buffer_sync_context *ctx; ++ int err; ++ ++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ++ if (!ctx) { ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ ++ ctx->fence_ctx = pvr_fence_context_create(priv->dev_node, ++ NativeSyncGetFenceStatusWq(), ++ name); ++ if (!ctx->fence_ctx) { ++ err = -ENOMEM; ++ goto err_free_ctx; ++ } ++ ++ mutex_init(&ctx->ctx_lock); ++ ++ return ctx; ++ ++err_free_ctx: ++ kfree(ctx); ++err_exit: ++ return ERR_PTR(err); ++} ++ ++void ++pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx) ++{ ++ pvr_fence_context_destroy(ctx->fence_ctx); ++ kfree(ctx); ++} ++ ++int ++pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, ++ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, ++ u32 nr_pmrs, ++ struct _PMR_ **pmrs, ++ u32 *pmr_flags, ++ u32 *nr_fence_checkpoints_out, ++ PSYNC_CHECKPOINT **fence_checkpoints_out, ++ PSYNC_CHECKPOINT *update_checkpoints_out, ++ struct pvr_buffer_sync_append_data **data_out) ++{ ++ struct pvr_buffer_sync_append_data *data; ++ PSYNC_CHECKPOINT *fence_checkpoints; ++ const size_t data_size = sizeof(*data); ++ const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs; ++ const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs; ++ int i; ++ int j; ++ int err; ++ ++ if (unlikely((nr_pmrs && !(pmrs && pmr_flags)) || ++ !nr_fence_checkpoints_out || !fence_checkpoints_out || ++ !update_checkpoints_out)) ++ return -EINVAL; ++ ++ for (i = 0; i < nr_pmrs; i++) { ++ if (unlikely(!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK))) { ++ pr_err("%s: Invalid flags %#08x for pmr %p\n", ++ __func__, pmr_flags[i], pmrs[i]); ++ return -EINVAL; ++ } ++ } ++ ++#if defined(NO_HARDWARE) ++ /* ++ * For NO_HARDWARE there's no checking or updating of sync checkpoints ++ * which means SW waits on our fences will cause a deadlock (since they ++ * will never be signalled). Avoid this by not creating any fences. ++ */ ++ nr_pmrs = 0; ++#endif ++ ++ if (!nr_pmrs) { ++ *nr_fence_checkpoints_out = 0; ++ *fence_checkpoints_out = NULL; ++ *update_checkpoints_out = NULL; ++ *data_out = NULL; ++ ++ return 0; ++ } ++ ++ data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL); ++ if (unlikely(!data)) ++ return -ENOMEM; ++ ++ data->ctx = ctx; ++ data->pmrs = (struct _PMR_ **)(void *)(data + 1); ++ data->pmr_flags = (u32 *)(void *)(data->pmrs + nr_pmrs); ++ ++ /* ++ * It's expected that user space will provide a set of unique PMRs ++ * but, as a PMR can have multiple handles, it's still possible to ++ * end up here with duplicates. Take this opportunity to filter out ++ * any remaining duplicates (updating flags when necessary) before ++ * trying to process them further. ++ */ ++ for (i = 0; i < nr_pmrs; i++) { ++ for (j = 0; j < data->nr_pmrs; j++) { ++ if (data->pmrs[j] == pmrs[i]) { ++ data->pmr_flags[j] |= pmr_flags[i]; ++ break; ++ } ++ } ++ ++ if (j == data->nr_pmrs) { ++ data->pmrs[j] = pmrs[i]; ++ data->pmr_flags[j] = pmr_flags[i]; ++ data->nr_pmrs++; ++ } ++ } ++ ++ err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs); ++ if (unlikely(err)) { ++ /* ++ * -EINTR is returned if a signal arrives while trying to acquire a PMR ++ * lock. In this case the operation should be retried after the signal ++ * has been serviced. As this is expected behaviour, don't print an ++ * error in this case. ++ */ ++ if (err != -EINTR) { ++ pr_err("%s: failed to lock pmrs (errno=%d)\n", ++ __func__, err); ++ } ++ goto err_free_data; ++ } ++ ++ /* create the check data */ ++ data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx, ++ sync_checkpoint_ctx, ++ data->nr_pmrs, ++ data->pmrs, ++ data->pmr_flags); ++ if (unlikely(!data->check_data)) { ++ err = -ENOMEM; ++ goto err_pmrs_unlock; ++ } ++ ++ fence_checkpoints = kcalloc(data->check_data->nr_fences, ++ sizeof(*fence_checkpoints), ++ GFP_KERNEL); ++ if (fence_checkpoints) { ++ pvr_fence_get_checkpoints(data->check_data->fences, ++ data->check_data->nr_fences, ++ fence_checkpoints); ++ } else { ++ if (unlikely(data->check_data->nr_fences)) { ++ err = -ENOMEM; ++ goto err_free_check_data; ++ } ++ } ++ ++ /* create the update fence */ ++ data->update_fence = pvr_fence_create(ctx->fence_ctx, ++ sync_checkpoint_ctx, ++ SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence"); ++ if (unlikely(!data->update_fence)) { ++ err = -ENOMEM; ++ goto err_free_fence_checkpoints; ++ } ++ ++ /* ++ * We need to clean up the fences once the HW has finished with them. ++ * We can do this using fence callbacks. However, instead of adding a ++ * callback to every fence, which would result in more work, we can ++ * simply add one to the update fence since this will be the last fence ++ * to be signalled. This callback can do all the necessary clean up. ++ * ++ * Note: we take an additional reference on the update fence in case ++ * it signals before we can add it to a reservation object. ++ */ ++ PVR_FENCE_TRACE(&data->update_fence->base, ++ "create fence calling dma_fence_get\n"); ++ dma_fence_get(&data->update_fence->base); ++ ++ *nr_fence_checkpoints_out = data->check_data->nr_fences; ++ *fence_checkpoints_out = fence_checkpoints; ++ *update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence); ++ *data_out = data; ++ ++ return 0; ++ ++err_free_fence_checkpoints: ++ kfree(fence_checkpoints); ++err_free_check_data: ++ pvr_buffer_sync_check_fences_destroy(data->check_data); ++err_pmrs_unlock: ++ pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs); ++err_free_data: ++ kfree(data); ++ return err; ++} ++ ++void ++pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data) ++{ ++ struct dma_resv *resv; ++ int i; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ int r; ++#endif ++ ++ dma_fence_enable_sw_signaling(&data->update_fence->base); ++ ++ for (i = 0; i < data->nr_pmrs; i++) { ++ resv = pmr_reservation_object_get(data->pmrs[i]); ++ if (WARN_ON_ONCE(!resv)) ++ continue; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ r = dma_resv_reserve_fences(resv, 1); ++ if (r) { ++ /* As last resort on OOM we block for the fence */ ++ dma_fence_wait(&data->update_fence->base, false); ++ continue; ++ } ++#endif ++ ++ if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) { ++ PVR_FENCE_TRACE(&data->update_fence->base, ++ "added exclusive fence (%s) to resv %p\n", ++ data->update_fence->name, resv); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ dma_resv_add_fence(resv, ++ &data->update_fence->base, ++ DMA_RESV_USAGE_WRITE); ++#else ++ dma_resv_add_excl_fence(resv, ++ &data->update_fence->base); ++#endif ++ } else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) { ++ PVR_FENCE_TRACE(&data->update_fence->base, ++ "added non-exclusive fence (%s) to resv %p\n", ++ data->update_fence->name, resv); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ dma_resv_add_fence(resv, ++ &data->update_fence->base, ++ DMA_RESV_USAGE_READ); ++#else ++ dma_resv_add_shared_fence(resv, ++ &data->update_fence->base); ++#endif ++ } ++ } ++ ++ /* ++ * Now that the fence has been added to the necessary ++ * reservation objects we can safely drop the extra reference ++ * we took in pvr_buffer_sync_resolve_and_create_fences(). ++ */ ++ dma_fence_put(&data->update_fence->base); ++ pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, ++ data->pmrs); ++ ++ /* destroy the check fences */ ++ pvr_buffer_sync_check_fences_destroy(data->check_data); ++ /* destroy the update fence */ ++ pvr_fence_destroy(data->update_fence); ++ ++ /* free the append data */ ++ kfree(data); ++} ++ ++void ++pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data) ++{ ++ ++ /* drop the extra reference we took on the update fence in ++ * pvr_buffer_sync_resolve_and_create_fences(). ++ */ ++ dma_fence_put(&data->update_fence->base); ++ ++ if (data->nr_pmrs > 0) ++ pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs, ++ data->pmrs); ++ ++ /* destroy the check fences */ ++ pvr_buffer_sync_check_fences_destroy(data->check_data); ++ /* destroy the update fence */ ++ pvr_fence_destroy(data->update_fence); ++ ++ /* free the append data */ ++ kfree(data); ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_buffer_sync.h b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.h +new file mode 100644 +index 000000000000..b6aadf940479 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_buffer_sync.h +@@ -0,0 +1,125 @@ ++/* ++ * @File pvr_buffer_sync.h ++ * @Title PowerVR Linux buffer sync interface ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef PVR_BUFFER_SYNC_H ++#define PVR_BUFFER_SYNC_H ++ ++#include ++#include ++#include ++ ++struct _PMR_; ++struct pvr_buffer_sync_context; ++struct pvr_buffer_sync_append_data; ++ ++/** ++ * pvr_buffer_sync_context_create - creates a buffer sync context ++ * @dev: Linux device ++ * @name: context name (used for debugging) ++ * ++ * pvr_buffer_sync_context_destroy() should be used to clean up the buffer ++ * sync context. ++ * ++ * Return: A buffer sync context or NULL if it fails for any reason. ++ */ ++struct pvr_buffer_sync_context * ++pvr_buffer_sync_context_create(struct device *dev, const char *name); ++ ++/** ++ * pvr_buffer_sync_context_destroy() - frees a buffer sync context ++ * @ctx: buffer sync context ++ */ ++void ++pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx); ++ ++/** ++ * pvr_buffer_sync_resolve_and_create_fences() - create checkpoints from ++ * buffers ++ * @ctx: buffer sync context ++ * @sync_checkpoint_ctx: context in which to create sync checkpoints ++ * @nr_pmrs: number of buffer objects (PMRs) ++ * @pmrs: buffer array ++ * @pmr_flags: internal flags ++ * @nr_fence_checkpoints_out: returned number of fence sync checkpoints ++ * @fence_checkpoints_out: returned array of fence sync checkpoints ++ * @update_checkpoint_out: returned update sync checkpoint ++ * @data_out: returned buffer sync data ++ * ++ * After this call, either pvr_buffer_sync_kick_succeeded() or ++ * pvr_buffer_sync_kick_failed() must be called. ++ * ++ * Return: 0 on success or an error code otherwise. ++ */ ++int ++pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx, ++ PSYNC_CHECKPOINT_CONTEXT sync_checkpoint_ctx, ++ u32 nr_pmrs, ++ struct _PMR_ **pmrs, ++ u32 *pmr_flags, ++ u32 *nr_fence_checkpoints_out, ++ PSYNC_CHECKPOINT **fence_checkpoints_out, ++ PSYNC_CHECKPOINT *update_checkpoint_out, ++ struct pvr_buffer_sync_append_data **data_out); ++ ++/** ++ * pvr_buffer_sync_kick_succeeded() - cleans up after a successful kick ++ * operation ++ * @data: buffer sync data returned by ++ * pvr_buffer_sync_resolve_and_create_fences() ++ * ++ * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). ++ */ ++void ++pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data); ++ ++/** ++ * pvr_buffer_sync_kick_failed() - cleans up after a failed kick operation ++ * @data: buffer sync data returned by ++ * pvr_buffer_sync_resolve_and_create_fences() ++ * ++ * Should only be called following pvr_buffer_sync_resolve_and_create_fences(). ++ */ ++void ++pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data); ++ ++#endif /* PVR_BUFFER_SYNC_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h b/drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h +new file mode 100644 +index 000000000000..7a110910dbd6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_buffer_sync_shared.h +@@ -0,0 +1,57 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR buffer sync shared ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shared definitions between client and server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_BUFFER_SYNC_SHARED_H ++#define PVR_BUFFER_SYNC_SHARED_H ++ ++#define PVR_BUFFER_FLAG_READ (1U << 0) ++#define PVR_BUFFER_FLAG_WRITE (1U << 1) ++#define PVR_BUFFER_FLAG_MASK (PVR_BUFFER_FLAG_READ | \ ++ PVR_BUFFER_FLAG_WRITE) ++ ++/* Maximum number of PMRs passed ++ * in a kick when using buffer sync ++ */ ++#define PVRSRV_MAX_BUFFERSYNC_PMRS 32 ++ ++#endif /* PVR_BUFFER_SYNC_SHARED_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_counting_timeline.c b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.c +new file mode 100644 +index 000000000000..3fa890316dcc +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.c +@@ -0,0 +1,308 @@ ++/* ++ * @File ++ * @Title PowerVR Linux software "counting" timeline fence implementation ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++ ++#include "services_kernel_client.h" ++#include "pvr_counting_timeline.h" ++#include "pvr_sw_fence.h" ++ ++#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ ++ do { \ ++ if (pfnDumpDebugPrintf) \ ++ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ ++ ## __VA_ARGS__); \ ++ else \ ++ pr_err(fmt "\n", ## __VA_ARGS__); \ ++ } while (0) ++ ++struct pvr_counting_fence_timeline { ++ struct pvr_sw_fence_context *context; ++ ++ void *dbg_request_handle; ++ ++ spinlock_t active_fences_lock; ++ u64 current_value; /* guarded by active_fences_lock */ ++ u64 next_value; /* guarded by active_fences_lock */ ++ struct list_head active_fences; ++ ++ struct kref kref; ++}; ++ ++struct pvr_counting_fence { ++ u64 value; ++ struct dma_fence *fence; ++ struct list_head active_list_entry; ++}; ++ ++void pvr_counting_fence_timeline_dump_timeline( ++ void *data, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file) ++{ ++ ++ struct pvr_counting_fence_timeline *timeline = ++ (struct pvr_counting_fence_timeline *) data; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&timeline->active_fences_lock, flags); ++ ++ PVR_DUMPDEBUG_LOG(dump_debug_printf, ++ dump_debug_file, ++ "TL:%s SeqNum: %llu/%llu", ++ pvr_sw_fence_context_name( ++ timeline->context), ++ timeline->current_value, ++ timeline->next_value); ++ ++ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); ++} ++ ++static void ++pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ struct pvr_counting_fence_timeline *timeline = ++ (struct pvr_counting_fence_timeline *)data; ++ struct pvr_counting_fence *obj; ++ unsigned long flags; ++ char value[128]; ++ ++ if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) { ++ spin_lock_irqsave(&timeline->active_fences_lock, flags); ++ pvr_sw_fence_context_value_str(timeline->context, value, ++ sizeof(value)); ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ "sw: %s @%s cur=%llu", ++ pvr_sw_fence_context_name(timeline->context), ++ value, timeline->current_value); ++ list_for_each_entry(obj, &timeline->active_fences, ++ active_list_entry) { ++ obj->fence->ops->fence_value_str(obj->fence, ++ value, sizeof(value)); ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ " @%s: val=%llu", value, obj->value); ++ } ++ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); ++ } ++} ++ ++struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( ++ const char *name) ++{ ++ PVRSRV_ERROR srv_err; ++ struct pvr_counting_fence_timeline *timeline = ++ kzalloc(sizeof(*timeline), GFP_KERNEL); ++ ++ if (!timeline) ++ goto err_out; ++ ++ timeline->context = pvr_sw_fence_context_create(name, ++ "pvr_sw_sync"); ++ if (!timeline->context) ++ goto err_free_timeline; ++ ++ srv_err = PVRSRVRegisterDriverDbgRequestNotify( ++ &timeline->dbg_request_handle, ++ pvr_counting_fence_timeline_debug_request, ++ DEBUG_REQUEST_LINUXFENCE, ++ timeline); ++ if (srv_err != PVRSRV_OK) { ++ pr_err("%s: failed to register debug request callback (%s)\n", ++ __func__, PVRSRVGetErrorString(srv_err)); ++ goto err_free_timeline_ctx; ++ } ++ ++ timeline->current_value = 0; ++ timeline->next_value = 1; ++ kref_init(&timeline->kref); ++ spin_lock_init(&timeline->active_fences_lock); ++ INIT_LIST_HEAD(&timeline->active_fences); ++ ++err_out: ++ return timeline; ++ ++err_free_timeline_ctx: ++ pvr_sw_fence_context_destroy(timeline->context); ++ ++err_free_timeline: ++ kfree(timeline); ++ timeline = NULL; ++ goto err_out; ++} ++ ++void pvr_counting_fence_timeline_force_complete( ++ struct pvr_counting_fence_timeline *timeline) ++{ ++ struct list_head *entry, *tmp; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&timeline->active_fences_lock, flags); ++ ++#if defined(DEBUG) && !defined(SUPPORT_AUTOVZ) ++ /* This is just a safety measure. Normally we should never see any ++ * unsignaled sw fences when we come here. Warn if we still do! ++ */ ++ WARN_ON(!list_empty(&timeline->active_fences)); ++#endif ++ ++ list_for_each_safe(entry, tmp, &timeline->active_fences) { ++ struct pvr_counting_fence *fence = ++ list_entry(entry, struct pvr_counting_fence, ++ active_list_entry); ++ dma_fence_signal(fence->fence); ++ dma_fence_put(fence->fence); ++ fence->fence = NULL; ++ list_del(&fence->active_list_entry); ++ kfree(fence); ++ } ++ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); ++} ++ ++static void pvr_counting_fence_timeline_destroy( ++ struct kref *kref) ++{ ++ struct pvr_counting_fence_timeline *timeline = ++ container_of(kref, struct pvr_counting_fence_timeline, kref); ++ ++ WARN_ON(!list_empty(&timeline->active_fences)); ++ ++ PVRSRVUnregisterDriverDbgRequestNotify(timeline->dbg_request_handle); ++ ++ pvr_sw_fence_context_destroy(timeline->context); ++ kfree(timeline); ++} ++ ++void pvr_counting_fence_timeline_put( ++ struct pvr_counting_fence_timeline *timeline) ++{ ++ kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy); ++} ++ ++struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( ++ struct pvr_counting_fence_timeline *timeline) ++{ ++ if (!timeline) ++ return NULL; ++ kref_get(&timeline->kref); ++ return timeline; ++} ++ ++struct dma_fence *pvr_counting_fence_create( ++ struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) ++{ ++ unsigned long flags; ++ struct dma_fence *sw_fence; ++ struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); ++ ++ if (!fence) ++ return NULL; ++ ++ sw_fence = pvr_sw_fence_create(timeline->context); ++ if (!sw_fence) ++ goto err_free_fence; ++ ++ fence->fence = dma_fence_get(sw_fence); ++ ++ spin_lock_irqsave(&timeline->active_fences_lock, flags); ++ ++ fence->value = timeline->next_value++; ++ if (sync_pt_idx) ++ *sync_pt_idx = fence->value; ++ ++ list_add_tail(&fence->active_list_entry, &timeline->active_fences); ++ ++ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); ++ ++ /* Counting fences can be signalled any time after creation */ ++ dma_fence_enable_sw_signaling(sw_fence); ++ ++ return sw_fence; ++ ++err_free_fence: ++ kfree(fence); ++ return NULL; ++} ++ ++bool pvr_counting_fence_timeline_inc( ++ struct pvr_counting_fence_timeline *timeline, u64 *sync_pt_idx) ++{ ++ struct list_head *entry, *tmp; ++ unsigned long flags; ++ bool res; ++ ++ spin_lock_irqsave(&timeline->active_fences_lock, flags); ++ ++ if (timeline->current_value == timeline->next_value-1) { ++ res = false; ++ goto exit_unlock; ++ } ++ ++ timeline->current_value++; ++ ++ if (sync_pt_idx) ++ *sync_pt_idx = timeline->current_value; ++ ++ list_for_each_safe(entry, tmp, &timeline->active_fences) { ++ struct pvr_counting_fence *fence = ++ list_entry(entry, struct pvr_counting_fence, ++ active_list_entry); ++ if (fence->value <= timeline->current_value) { ++ dma_fence_signal(fence->fence); ++ dma_fence_put(fence->fence); ++ fence->fence = NULL; ++ list_del(&fence->active_list_entry); ++ kfree(fence); ++ } ++ } ++ ++ res = true; ++ ++exit_unlock: ++ spin_unlock_irqrestore(&timeline->active_fences_lock, flags); ++ ++ return res; ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_counting_timeline.h b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.h +new file mode 100644 +index 000000000000..2cb8db1e5c50 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_counting_timeline.h +@@ -0,0 +1,68 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_COUNTING_TIMELINE_H__) ++#define __PVR_COUNTING_TIMELINE_H__ ++ ++#include "pvr_linux_fence.h" ++ ++struct pvr_counting_fence_timeline; ++ ++void pvr_counting_fence_timeline_dump_timeline( ++ void *data, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file); ++ ++struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create( ++ const char *name); ++void pvr_counting_fence_timeline_put( ++ struct pvr_counting_fence_timeline *fence_timeline); ++struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get( ++ struct pvr_counting_fence_timeline *fence_timeline); ++struct dma_fence *pvr_counting_fence_create( ++ struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); ++bool pvr_counting_fence_timeline_inc( ++ struct pvr_counting_fence_timeline *fence_timeline, u64 *sync_pt_idx); ++void pvr_counting_fence_timeline_force_complete( ++ struct pvr_counting_fence_timeline *fence_timeline); ++ ++#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_debug.c b/drivers/gpu/drm/img-rogue/pvr_debug.c +new file mode 100644 +index 000000000000..8cd34dc221ad +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_debug.c +@@ -0,0 +1,481 @@ ++/*************************************************************************/ /*! ++@File ++@Title Debug Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides kernel side Debug Functionality. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "linkage.h" ++#include "pvrsrv.h" ++#include "osfunc.h" ++#include "di_server.h" ++ ++#if defined(PVRSRV_NEED_PVR_DPF) ++ ++/******** BUFFERED LOG MESSAGES ********/ ++ ++/* Because we don't want to have to handle CCB wrapping, each buffered ++ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means ++ * there is the same fixed number of messages that can be stored, ++ * regardless of message length. ++ */ ++ ++#if defined(PVRSRV_DEBUG_CCB_MAX) ++ ++#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN ++ ++typedef struct ++{ ++ const IMG_CHAR *pszFile; ++ IMG_INT iLine; ++ IMG_UINT32 ui32TID; ++ IMG_UINT32 ui32PID; ++ IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX]; ++ struct timeval sTimeVal; ++} ++PVRSRV_DEBUG_CCB; ++ ++static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX]; ++ ++static IMG_UINT giOffset; ++ ++/* protects access to gsDebugCCB */ ++static DEFINE_SPINLOCK(gsDebugCCBLock); ++ ++static void ++AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, ++ const IMG_CHAR *szBuffer) ++{ ++ unsigned long uiFlags; ++ ++ spin_lock_irqsave(&gsDebugCCBLock, uiFlags); ++ ++ gsDebugCCB[giOffset].pszFile = pszFileName; ++ gsDebugCCB[giOffset].iLine = ui32Line; ++ gsDebugCCB[giOffset].ui32TID = current->pid; ++ gsDebugCCB[giOffset].ui32PID = current->tgid; ++ ++ do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal); ++ ++ OSStringLCopy(gsDebugCCB[giOffset].pcMesg, szBuffer, ++ PVRSRV_DEBUG_CCB_MESG_MAX); ++ ++ giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX; ++ ++ spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); ++} ++ ++void PVRSRVDebugPrintfDumpCCB(void) ++{ ++ int i; ++ unsigned long uiFlags; ++ ++ spin_lock_irqsave(&gsDebugCCBLock, uiFlags); ++ ++ for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++) ++ { ++ PVRSRV_DEBUG_CCB *psDebugCCBEntry = ++ &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX]; ++ ++ /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */ ++ if (!psDebugCCBEntry->pszFile) ++ { ++ continue; ++ } ++ ++ printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n", ++ psDebugCCBEntry->pszFile, ++ psDebugCCBEntry->iLine, ++ (long)psDebugCCBEntry->sTimeVal.tv_sec, ++ (long)psDebugCCBEntry->sTimeVal.tv_usec, ++ psDebugCCBEntry->ui32TID, ++ psDebugCCBEntry->ui32PID, ++ psDebugCCBEntry->pcMesg); ++ ++ /* Clear this entry so it doesn't get printed the next time again. */ ++ psDebugCCBEntry->pszFile = NULL; ++ } ++ ++ spin_unlock_irqrestore(&gsDebugCCBLock, uiFlags); ++} ++ ++#else /* defined(PVRSRV_DEBUG_CCB_MAX) */ ++ ++static INLINE void ++AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line, ++ const IMG_CHAR *szBuffer) ++{ ++ (void)pszFileName; ++ (void)szBuffer; ++ (void)ui32Line; ++} ++ ++void PVRSRVDebugPrintfDumpCCB(void) ++{ ++ /* Not available */ ++} ++ ++#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ ++ ++static IMG_UINT32 gPVRDebugLevel = ++ ( ++ DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING ++#if defined(PVRSRV_DEBUG_CCB_MAX) ++ | DBGPRIV_BUFFERED ++#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */ ++#if defined(PVR_DPF_ADHOC_DEBUG_ON) ++ | DBGPRIV_DEBUG ++#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */ ++ ); ++ ++module_param(gPVRDebugLevel, uint, 0644); ++MODULE_PARM_DESC(gPVRDebugLevel, ++ "Sets the level of debug output (default 0x7)"); ++ ++IMG_UINT32 OSDebugLevel(void) ++{ ++ return gPVRDebugLevel; ++} ++ ++void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel) ++{ ++ gPVRDebugLevel = ui32DebugLevel; ++} ++ ++IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel) ++{ ++ return (gPVRDebugLevel & ui32DebugLevel) != 0; ++} ++ ++#else /* defined(PVRSRV_NEED_PVR_DPF) */ ++ ++IMG_UINT32 OSDebugLevel(void) ++{ ++ return 0; ++} ++ ++void OSSetDebugLevel(IMG_UINT32 ui32DebugLevel) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32DebugLevel); ++} ++ ++IMG_BOOL OSIsDebugLevel(IMG_UINT32 ui32DebugLevel) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32DebugLevel); ++ return IMG_FALSE; ++} ++ ++#endif /* defined(PVRSRV_NEED_PVR_DPF) */ ++ ++#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN ++ ++/* Message buffer for messages */ ++static IMG_CHAR gszBuffer[PVR_MAX_MSG_LEN + 1]; ++ ++/* The lock is used to control access to gszBuffer */ ++static DEFINE_SPINLOCK(gsDebugLock); ++ ++/* ++ * Append a string to a buffer using formatted conversion. ++ * The function takes a variable number of arguments, pointed ++ * to by the var args list. ++ */ ++__printf(3, 0) ++static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs) ++{ ++ IMG_UINT32 ui32Used; ++ IMG_UINT32 ui32Space; ++ IMG_INT32 i32Len; ++ ++ ui32Used = OSStringLength(pszBuf); ++ BUG_ON(ui32Used >= ui32BufSiz); ++ ui32Space = ui32BufSiz - ui32Used; ++ ++ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs); ++ pszBuf[ui32BufSiz - 1] = 0; ++ ++ /* Return true if string was truncated */ ++ return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space; ++} ++ ++/*************************************************************************/ /*! ++@Function PVRSRVReleasePrintf ++@Description To output an important message to the user in release builds ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++*/ /**************************************************************************/ ++void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) ++{ ++ va_list vaArgs; ++ unsigned long ulLockFlags = 0; ++ IMG_CHAR *pszBuf = gszBuffer; ++ IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); ++ IMG_INT32 result; ++ ++ va_start(vaArgs, pszFormat); ++ ++ spin_lock_irqsave(&gsDebugLock, ulLockFlags); ++ ++ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid); ++ PVR_ASSERT(result>0); ++ ui32BufSiz -= result; ++ ++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) ++ { ++ printk(KERN_INFO "%s (truncated)\n", pszBuf); ++ } ++ else ++ { ++ printk(KERN_INFO "%s\n", pszBuf); ++ } ++ ++ spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); ++ va_end(vaArgs); ++} ++ ++#if defined(PVRSRV_NEED_PVR_TRACE) ++ ++/*************************************************************************/ /*! ++@Function PVRTrace ++@Description To output a debug message to the user ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++*/ /**************************************************************************/ ++void PVRSRVTrace(const IMG_CHAR *pszFormat, ...) ++{ ++ va_list VArgs; ++ unsigned long ulLockFlags = 0; ++ IMG_CHAR *pszBuf = gszBuffer; ++ IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); ++ IMG_INT32 result; ++ ++ va_start(VArgs, pszFormat); ++ ++ spin_lock_irqsave(&gsDebugLock, ulLockFlags); ++ ++ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid); ++ PVR_ASSERT(result>0); ++ ui32BufSiz -= result; ++ ++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs)) ++ { ++ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf); ++ } ++ else ++ { ++ printk(KERN_ERR "%s\n", pszBuf); ++ } ++ ++ spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); ++ ++ va_end(VArgs); ++} ++ ++#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ ++ ++#if defined(PVRSRV_NEED_PVR_DPF) ++ ++/* ++ * Append a string to a buffer using formatted conversion. ++ * The function takes a variable number of arguments, calling ++ * VBAppend to do the actual work. ++ */ ++__printf(3, 4) ++static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...) ++{ ++ va_list VArgs; ++ IMG_BOOL bTrunc; ++ ++ va_start (VArgs, pszFormat); ++ ++ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs); ++ ++ va_end (VArgs); ++ ++ return bTrunc; ++} ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugPrintf ++@Description To output a debug message to the user ++@Input uDebugLevel The current debug level ++@Input pszFile The source file generating the message ++@Input uLine The line of the source file ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++*/ /**************************************************************************/ ++void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, ++ const IMG_CHAR *pszFullFileName, ++ IMG_UINT32 ui32Line, ++ const IMG_CHAR *pszFormat, ++ ...) ++{ ++ const IMG_CHAR *pszFileName = pszFullFileName; ++ IMG_CHAR *pszLeafName; ++ va_list vaArgs; ++ unsigned long ulLockFlags = 0; ++ IMG_CHAR *pszBuf = gszBuffer; ++ IMG_UINT32 ui32BufSiz = sizeof(gszBuffer); ++ ++ if (!(gPVRDebugLevel & ui32DebugLevel)) ++ { ++ return; ++ } ++ ++ va_start(vaArgs, pszFormat); ++ ++ spin_lock_irqsave(&gsDebugLock, ulLockFlags); ++ ++ switch (ui32DebugLevel) ++ { ++ case DBGPRIV_FATAL: ++ { ++ OSStringLCopy(pszBuf, "PVR_K:(Fatal): ", ui32BufSiz); ++ PVRSRV_REPORT_ERROR(); ++ break; ++ } ++ case DBGPRIV_ERROR: ++ { ++ OSStringLCopy(pszBuf, "PVR_K:(Error): ", ui32BufSiz); ++ PVRSRV_REPORT_ERROR(); ++ break; ++ } ++ case DBGPRIV_WARNING: ++ { ++ OSStringLCopy(pszBuf, "PVR_K:(Warn): ", ui32BufSiz); ++ break; ++ } ++ case DBGPRIV_MESSAGE: ++ { ++ OSStringLCopy(pszBuf, "PVR_K:(Mesg): ", ui32BufSiz); ++ break; ++ } ++ case DBGPRIV_VERBOSE: ++ { ++ OSStringLCopy(pszBuf, "PVR_K:(Verb): ", ui32BufSiz); ++ break; ++ } ++ case DBGPRIV_DEBUG: ++ { ++ OSStringLCopy(pszBuf, "PVR_K:(Debug): ", ui32BufSiz); ++ break; ++ } ++ case DBGPRIV_CALLTRACE: ++ case DBGPRIV_ALLOC: ++ case DBGPRIV_BUFFERED: ++ default: ++ { ++ OSStringLCopy(pszBuf, "PVR_K: ", ui32BufSiz); ++ break; ++ } ++ } ++ ++ if (current->pid == task_tgid_nr(current)) ++ { ++ (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid); ++ } ++ else ++ { ++ (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */); ++ } ++ ++ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs)) ++ { ++ printk(KERN_ERR "%s (truncated)\n", pszBuf); ++ } ++ else ++ { ++ IMG_BOOL bTruncated = IMG_FALSE; ++ ++#if !defined(__sh__) ++ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/'); ++ ++ if (pszLeafName) ++ { ++ pszFileName = pszLeafName+1; ++ } ++#endif /* __sh__ */ ++ ++#if defined(DEBUG) ++ { ++ static const IMG_CHAR *lastFile; ++ ++ if (lastFile == pszFileName) ++ { ++ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); ++ } ++ else ++ { ++ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line); ++ lastFile = pszFileName; ++ } ++ } ++#else ++ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line); ++#endif ++ ++ if (bTruncated) ++ { ++ printk(KERN_ERR "%s (truncated)\n", pszBuf); ++ } ++ else ++ { ++ if (ui32DebugLevel & DBGPRIV_BUFFERED) ++ { ++ AddToBufferCCB(pszFileName, ui32Line, pszBuf); ++ } ++ else ++ { ++ printk(KERN_ERR "%s\n", pszBuf); ++ } ++ } ++ } ++ ++ spin_unlock_irqrestore(&gsDebugLock, ulLockFlags); ++ ++ va_end (vaArgs); ++} ++ ++#endif /* PVRSRV_NEED_PVR_DPF */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_debug.h b/drivers/gpu/drm/img-rogue/pvr_debug.h +new file mode 100644 +index 000000000000..56bbb13f1c16 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_debug.h +@@ -0,0 +1,898 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR Debug Declarations ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides debug functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_DEBUG_H ++#define PVR_DEBUG_H ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++/*! @cond Doxygen_Suppress */ ++#if defined(_MSC_VER) ++# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127)) ++#else ++# define MSC_SUPPRESS_4127 ++#endif ++/*! @endcond */ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */ ++ ++/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */ ++#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */ ++#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */ ++#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */ ++#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */ ++#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */ ++#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */ ++#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */ ++#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */ ++#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */ ++#define DBGPRIV_LAST 0x100UL /*!< Always set to highest mask value. Privately used by pvr_debug. */ ++ ++/* Enable DPF logging for locally from some make targets */ ++#if defined(PVRSRV_NEED_PVR_DPF_LOCAL) ++#undef PVRSRV_NEED_PVR_DPF ++#define PVRSRV_NEED_PVR_DPF ++#endif ++ ++#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG) ++#define PVRSRV_NEED_PVR_ASSERT ++#endif ++ ++#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF) ++#define PVRSRV_NEED_PVR_DPF ++#endif ++ ++#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING)) ++#define PVRSRV_NEED_PVR_TRACE ++#endif ++ ++#if !defined(DOXYGEN) ++/*************************************************************************/ /* ++PVRSRVGetErrorString ++Returns a string describing the provided PVRSRV_ERROR code ++NB No doxygen comments provided as this function does not require porting ++ for other operating systems ++*/ /**************************************************************************/ ++const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError); ++#define PVRSRVGETERRORSTRING PVRSRVGetErrorString ++#endif ++ ++/* PVR_ASSERT() and PVR_DBG_BREAK handling */ ++ ++#if defined(__KLOCWORK__) ++/* A dummy no-return function to be used under Klocwork to mark unreachable ++ paths instead of abort() in order to avoid MISRA.STDLIB.ABORT issues. */ ++__noreturn void klocwork_abort(void); ++#endif ++ ++#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN) ++ ++/* Unfortunately the Klocwork static analysis checker doesn't understand our ++ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert ++ * macros in a special way when the code is analysed by Klocwork avoids ++ * them. ++ */ ++#if defined(__KLOCWORK__) ++#define PVR_ASSERT(x) do { if (!(x)) {klocwork_abort();} } while (false) ++#else /* ! __KLOCWORKS__ */ ++ ++#if defined(_WIN32) ++#define PVR_ASSERT(expr) do \ ++ { \ ++ MSC_SUPPRESS_4127 \ ++ if (unlikely(!(expr))) \ ++ { \ ++ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\ ++ "*** Debug assertion failed!"); \ ++ __debugbreak(); \ ++ } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++#else ++ ++#if defined(__linux__) && defined(__KERNEL__) ++#include ++#include ++ ++/* In Linux kernel mode, use WARN_ON() directly. This produces the ++ * correct filename and line number in the warning message. ++ */ ++#define PVR_ASSERT(EXPR) do \ ++ { \ ++ if (unlikely(!(EXPR))) \ ++ { \ ++ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \ ++ "Debug assertion failed!"); \ ++ WARN_ON(1); \ ++ } \ ++ } while (false) ++ ++#else /* defined(__linux__) && defined(__KERNEL__) */ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugAssertFail ++@Description Indicate to the user that a debug assertion has failed and ++ prevent the program from continuing. ++ Invoked from the macro PVR_ASSERT(). ++@Input pszFile The name of the source file where the assertion failed ++@Input ui32Line The line number of the failed assertion ++@Input pszAssertion String describing the assertion ++@Return NEVER! ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV __noreturn ++PVRSRVDebugAssertFail(const IMG_CHAR *pszFile, ++ IMG_UINT32 ui32Line, ++ const IMG_CHAR *pszAssertion); ++ ++#define PVR_ASSERT(EXPR) do \ ++ { \ ++ if (unlikely(!(EXPR))) \ ++ { \ ++ PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \ ++ } \ ++ } while (false) ++ ++#endif /* defined(__linux__) && defined(__KERNEL__) */ ++#endif /* defined(_WIN32) */ ++#endif /* defined(__KLOCWORK__) */ ++ ++#if defined(__KLOCWORK__) ++ #define PVR_DBG_BREAK do { klocwork_abort(); } while (false) ++#else ++ #if defined(WIN32) ++ #define PVR_DBG_BREAK __debugbreak() /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */ ++ #else ++ #if defined(PVR_DBG_BREAK_ASSERT_FAIL) ++ /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */ ++ #if defined(_WIN32) ++ #define PVR_DBG_BREAK DBG_BREAK ++ #else ++ #if defined(__linux__) && defined(__KERNEL__) ++ #define PVR_DBG_BREAK BUG() ++ #else ++ #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK") ++ #endif ++ #endif ++ #else ++ /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ ++ #define PVR_DBG_BREAK ++ #endif ++ #endif ++#endif ++ ++ ++#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ /* Unfortunately the Klocwork static analysis checker doesn't understand our ++ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert ++ * macros in a special way when the code is analysed by Klocwork avoids ++ * them. ++ */ ++ #if defined(__KLOCWORK__) && !defined(SERVICES_SC) ++ #define PVR_ASSERT(EXPR) do { if (!(EXPR)) {klocwork_abort();} } while (false) ++ #else ++ #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */ ++ #endif ++ ++ #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */ ++ ++#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ ++ ++/* PVR_DPF() handling */ ++ ++#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN) ++ ++ /* New logging mechanism */ ++ #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */ ++ #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */ ++ #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */ ++ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */ ++ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */ ++ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE /*!< Debug level passed to PVRSRVDebugPrintf() for function tracing purposes. */ ++ #define PVR_DBG_ALLOC DBGPRIV_ALLOC /*!< Debug level passed to PVRSRVDebugPrintf() for tracking some of drivers memory operations. */ ++ #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */ ++ #define PVR_DBG_DEBUG DBGPRIV_DEBUG /*!< Debug level passed to PVRSRVDebugPrintf() for debug messages. */ ++ ++ /* These levels are always on with PVRSRV_NEED_PVR_DPF */ ++ /*! @cond Doxygen_Suppress */ ++ #define PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__) ++ #define PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__) ++ #define PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__) ++ ++ /* ++ * The AdHoc-Debug level is only supported when enabled in the local ++ * build environment and may need to be used in both debug and release ++ * builds. An error is generated in the formal build if it is checked in. ++ */ ++#if defined(PVR_DPF_ADHOC_DEBUG_ON) ++ #define PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__) ++#else ++ /* Use an undefined token here to stop compilation dead in the offending module */ ++ #define PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing ++#endif ++ ++ /* Some are compiled out completely in release builds */ ++#if defined(DEBUG) || defined(DOXYGEN) ++ #define PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__) ++ #define PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__) ++ #define PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__) ++ #define PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__) ++ #define PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__) ++#else ++ #define PVR_DPF_0x004UL(...) ++ #define PVR_DPF_0x008UL(...) ++ #define PVR_DPF_0x010UL(...) ++ #define PVR_DPF_0x020UL(...) ++ #define PVR_DPF_0x040UL(...) ++#endif ++ ++ /* Translate the different log levels to separate macros ++ * so they can each be compiled out. ++ */ ++#if defined(DEBUG) ++ #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__) ++#else ++ #define PVR_DPF_EX(lvl, ...) PVR_DPF_ ## lvl ("", __LINE__, __VA_ARGS__) ++#endif ++ /*! @endcond */ ++ ++ /* Get rid of the double bracketing */ ++ #define PVR_DPF(x) PVR_DPF_EX x ++ ++ #define PVR_LOG_ERROR(_rc, _call) \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)) ++ ++ #define PVR_LOG_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_WARN_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do \ ++ { if (unlikely(_expr == NULL)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do \ ++ { if (unlikely(_expr == NULL)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", #_expr, __func__)); \ ++ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ return; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do \ ++ { PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \ ++ _err = _rc; \ ++ goto _go; \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_IF_FALSE(_expr, _msg) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ return; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", _param, __func__)); \ ++ return PVRSRV_ERROR_INVALID_PARAMS; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s invalid in %s()", #_expr, __func__)); \ ++ _err = PVRSRV_ERROR_INVALID_PARAMS; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_MSG(_lvl, _msg) \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__)) ++ ++ #define PVR_LOG_VA(_lvl, _msg, ...) \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)) ++ ++ #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((_lvl, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ return _rc; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ goto _go; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("At %s: "_msg), __func__, __VA_ARGS__)); \ ++ return _rc; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++ #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do \ ++ { if (unlikely(!(_expr))) { \ ++ PVR_DPF((PVR_DBG_ERROR, ("In %s() "_msg), __func__, __VA_ARGS__)); \ ++ goto _go; \ ++ } MSC_SUPPRESS_4127\ ++ } while (false) ++ ++#else /* defined(PVRSRV_NEED_PVR_DPF) */ ++ ++ #define PVR_DPF(X) /*!< Null Implementation of PowerVR Debug Printf (does nothing) */ ++ ++ #define PVR_LOG_MSG(_lvl, _msg) ++ #define PVR_LOG_VA(_lvl, _msg, ...) ++ #define PVR_LOG_ERROR(_rc, _call) (void)(_rc) ++ #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc) ++ #define PVR_WARN_IF_ERROR(_rc, _call) (void)(_rc) ++ ++ #define PVR_LOG_IF_ERROR_VA(_lvl, _rc, _msg, ...) (void)(_rc) ++ #define PVR_LOG_IF_FALSE_VA(_lvl, _expr, _msg, ...) (void)(_expr) ++ ++ #define PVR_LOG_RETURN_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return PVRSRV_ERROR_OUT_OF_MEMORY; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_NOMEM(_expr, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_RETURN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_RETURN_IF_ERROR_VA(_rc, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_RETURN_VOID_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_GOTO_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_ERROR_VA(_rc, _go, _msg, ...) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_WITH_ERROR(_call, _err, _rc, _go) do { _err = _rc; goto _go; MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr) ++ #define PVR_LOG_RETURN_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_RETURN_IF_FALSE_VA(_expr, _rc, _msg, ...) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_RETURN_VOID_IF_FALSE(_expr, _msg) do { if (unlikely(!(_expr))) { return; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_FALSE_VA(_expr, _go, _msg, ...) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_LOG_RETURN_IF_INVALID_PARAM(_expr, _param) do { if (unlikely(!(_expr))) { return PVRSRV_ERROR_INVALID_PARAMS; } MSC_SUPPRESS_4127 } while (false) ++ #define PVR_LOG_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do { if (unlikely(!(_expr))) { _err = PVRSRV_ERROR_INVALID_PARAMS; goto _go; } MSC_SUPPRESS_4127 } while (false) ++ ++ #undef PVR_DPF_FUNCTION_TRACE_ON ++ ++#endif /* defined(PVRSRV_NEED_PVR_DPF) */ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugPrintf ++@Description Output a debug message to the user, using an OS-specific ++ method, to a log or console which can be read by developers ++ Invoked from the macro PVR_DPF(). ++@Input ui32DebugLevel The debug level of the message. This can ++ be used to restrict the output of debug ++ messages based on their severity. ++ If this is PVR_DBG_BUFFERED, the message ++ should be written into a debug circular ++ buffer instead of being output immediately ++ (useful when performance would otherwise ++ be adversely affected). ++ The debug circular buffer shall only be ++ output when PVRSRVDebugPrintfDumpCCB() is ++ called. ++@Input pszFileName The source file containing the code that is ++ generating the message ++@Input ui32Line The line number in the source file ++@Input pszFormat The formatted message string ++@Input ... Zero or more arguments for use by the ++ formatted string ++@Return None ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel, ++ const IMG_CHAR *pszFileName, ++ IMG_UINT32 ui32Line, ++ const IMG_CHAR *pszFormat, ++ ...) __printf(4, 5); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugPrintfDumpCCB ++@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel ++ specified as DBGPRIV_BUFFERED, the debug shall be written to ++ the debug circular buffer instead of being output immediately. ++ (This could be used to obtain debug without incurring a ++ performance hit by printing it at that moment). ++ This function shall dump the contents of that debug circular ++ buffer to be output in an OS-specific method to a log or ++ console which can be read by developers. ++@Return None ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void); ++ ++#if !defined(DOXYGEN) ++#define PVR_DPF_FUNC__(lvl, message, ...) PVR_DPF((lvl, "%s: " message, __func__, ##__VA_ARGS__)) ++#define PVR_DPF_FUNC(x) PVR_DPF_FUNC__ x ++#endif /*!defined(DOXYGEN) */ ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_ERROR macro. ++ */ ++#define PVR_RETURN_IF_ERROR(_rc) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_FALSE macro. ++ */ ++#define PVR_RETURN_IF_FALSE(_expr, _rc) do \ ++ { if (unlikely(!(_expr))) { \ ++ return _rc; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_INVALID_PARAM macro. ++ */ ++#define PVR_RETURN_IF_INVALID_PARAM(_expr) do \ ++ { if (unlikely(!(_expr))) { \ ++ return PVRSRV_ERROR_INVALID_PARAMS; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_RETURN_IF_NOMEM macro. ++ */ ++#define PVR_RETURN_IF_NOMEM(_expr) do \ ++ { if (unlikely(!(_expr))) { \ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_NOMEM macro. ++ */ ++#define PVR_GOTO_IF_NOMEM(_expr, _err, _go) do \ ++ { if (unlikely(_expr == NULL)) { \ ++ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_INVALID_PARAM macro. ++ */ ++#define PVR_GOTO_IF_INVALID_PARAM(_expr, _err, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ _err = PVRSRV_ERROR_INVALID_PARAMS; \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_FALSE macro. ++ */ ++#define PVR_GOTO_IF_FALSE(_expr, _go) do \ ++ { if (unlikely(!(_expr))) { \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_IF_ERROR macro. ++ */ ++#define PVR_GOTO_IF_ERROR(_rc, _go) do \ ++ { if (unlikely(_rc != PVRSRV_OK)) { \ ++ goto _go; } \ ++ MSC_SUPPRESS_4127\ ++ } while (false) ++ ++/* Note: Use only when a log message due to the error absolutely should not ++ * be printed. Otherwise use PVR_LOG_GOTO_WITH_ERROR macro. ++ */ ++#define PVR_GOTO_WITH_ERROR(_err, _rc, _go) do \ ++ { _err = _rc; goto _go; \ ++ MSC_SUPPRESS_4127 \ ++ } while (false) ++ ++/*! @cond Doxygen_Suppress */ ++#if defined(PVR_DPF_FUNCTION_TRACE_ON) ++ ++ #define PVR_DPF_ENTERED \ ++ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__)) ++ ++ #define PVR_DPF_ENTERED1(p1) \ ++ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1))) ++ ++ #define PVR_DPF_RETURN_RC(a) \ ++ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN_RC1(a,p1) \ ++ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN_VAL(a) \ ++ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__)); return (a); MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN_OK \ ++ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (false) ++ ++ #define PVR_DPF_RETURN \ ++ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (false) ++ ++ #if !defined(DEBUG) ++ #error PVR DPF Function trace enabled in release build, rectify ++ #endif ++ ++#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ ++ ++ #define PVR_DPF_ENTERED ++ #define PVR_DPF_ENTERED1(p1) ++ #define PVR_DPF_RETURN_RC(a) return (a) ++ #define PVR_DPF_RETURN_RC1(a,p1) return (a) ++ #define PVR_DPF_RETURN_VAL(a) return (a) ++ #define PVR_DPF_RETURN_OK return PVRSRV_OK ++ #define PVR_DPF_RETURN return ++ ++#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */ ++/*! @endcond */ ++ ++#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__) ++/*Use PVR_DPF() unless message is necessary in release build */ ++#define PVR_LOG(X) PVRSRVReleasePrintf X ++ ++/*************************************************************************/ /*! ++@Function PVRSRVReleasePrintf ++@Description Output an important message, using an OS-specific method, ++ to the Server log or console which will always be output in ++ both release and debug builds. ++ Invoked from the macro PVR_LOG(). Used in Services Server only. ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++@Return None ++*/ /**************************************************************************/ ++void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2); ++#endif ++ ++/* PVR_TRACE() handling */ ++ ++#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN) ++ ++ #define PVR_TRACE(X) PVRSRVTrace X /*!< PowerVR Debug Trace Macro */ ++ /* Empty string implementation that is -O0 build friendly */ ++ #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", "")) ++ ++/*************************************************************************/ /*! ++@Function PVRTrace ++@Description Output a debug message to the user ++ Invoked from the macro PVR_TRACE(). ++@Input pszFormat The message format string ++@Input ... Zero or more arguments for use by the format string ++*/ /**************************************************************************/ ++IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... ) ++ __printf(1, 2); ++ ++#else /* defined(PVRSRV_NEED_PVR_TRACE) */ ++ /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */ ++ #define PVR_TRACE(X) ++ ++#endif /* defined(PVRSRV_NEED_PVR_TRACE) */ ++ ++ ++#if defined(PVRSRV_NEED_PVR_ASSERT) ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(TRUNCATE_64BITS_TO_32BITS) ++#endif ++ INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput) ++ { ++ IMG_UINT32 uiTruncated; ++ ++ uiTruncated = (IMG_UINT32)uiInput; ++ PVR_ASSERT(uiInput == uiTruncated); ++ return uiTruncated; ++ } ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(TRUNCATE_64BITS_TO_SIZE_T) ++#endif ++ INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput) ++ { ++ size_t uiTruncated; ++ ++ uiTruncated = (size_t)uiInput; ++ PVR_ASSERT(uiInput == uiTruncated); ++ return uiTruncated; ++ } ++ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(TRUNCATE_SIZE_T_TO_32BITS) ++#endif ++ INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput) ++ { ++ IMG_UINT32 uiTruncated; ++ ++ uiTruncated = (IMG_UINT32)uiInput; ++ PVR_ASSERT(uiInput == uiTruncated); ++ return uiTruncated; ++ } ++ ++ ++#else /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr)) ++ #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr)) ++ #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr)) ++#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */ ++ ++/*! @cond Doxygen_Suppress */ ++/* Macros used to trace calls */ ++#if defined(DEBUG) ++ #define PVR_DBG_FILELINE , (__FILE__), (__LINE__) ++ #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line ++ #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line ++ #define PVR_DBG_FILELINE_FMT " %s:%u" ++ #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \ ++ PVR_UNREFERENCED_PARAMETER(ui32Line); } while (false) ++#else ++ #define PVR_DBG_FILELINE ++ #define PVR_DBG_FILELINE_PARAM ++ #define PVR_DBG_FILELINE_ARG ++ #define PVR_DBG_FILELINE_FMT ++ #define PVR_DBG_FILELINE_UNREF() ++#endif ++/*! @endcond */ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++/*! ++ @def PVR_ASSERT ++ @brief Aborts the program if assertion fails. ++ ++ The macro will be defined only when PVRSRV_NEED_PVR_ASSERT macro is ++ enabled. It's ignored otherwise. ++ ++ @def PVR_DPF ++ @brief PowerVR Debug Printf logging macro used throughout the driver. ++ ++ The macro allows to print logging messages to appropriate log. The ++ destination log is based on the component (user space / kernel space) and ++ operating system (Linux, Android, etc.). ++ ++ The macro also supports severity levels that allow to turn on/off messages ++ based on their importance. ++ ++ This macro will print messages with severity level higher that error only ++ if PVRSRV_NEED_PVR_DPF macro is defined. ++ ++ @def PVR_LOG_ERROR ++ @brief Logs error. ++ ++ @def PVR_LOG_IF_ERROR ++ @brief Logs error if not PVRSRV_OK. ++ ++ @def PVR_WARN_IF_ERROR ++ @brief Logs warning if not PVRSRV_OK. ++ ++ @def PVR_LOG_RETURN_IF_NOMEM ++ @brief Logs error if expression is NULL and returns PVRSRV_ERROR_OUT_OF_MEMORY. ++ ++ @def PVR_LOG_GOTO_IF_NOMEM ++ @brief Logs error if expression is NULL and jumps to given label. ++ ++ @def PVR_LOG_RETURN_IF_ERROR ++ @brief Logs error if not PVRSRV_OK and returns the error. ++ ++ @def PVR_LOG_RETURN_VOID_IF_ERROR ++ @brief Logs error if not PVRSRV_OK and returns (used in function that return void). ++ ++ @def PVR_LOG_GOTO_IF_ERROR ++ @brief Logs error if not PVRSRV_OK and jumps to label. ++ ++ @def PVR_LOG_GOTO_WITH_ERROR ++ @brief Logs error, goes to a label and sets the error code. ++ ++ @def PVR_LOG_IF_FALSE ++ @brief Prints error message if expression is false. ++ ++ @def PVR_LOG_RETURN_IF_FALSE ++ @brief Prints error message if expression is false and returns given error. ++ ++ @def PVR_LOG_RETURN_VOID_IF_FALSE ++ @brief Prints error message if expression is false and returns (used in function that return void). ++ ++ @def PVR_LOG_GOTO_IF_FALSE ++ @brief Prints error message if expression is false and jumps to label. ++ ++ @def PVR_LOG_RETURN_IF_INVALID_PARAM ++ @brief Prints error message if expression is false and returns PVRSRV_ERROR_INVALID_PARAMS. ++ ++ @def PVR_LOG_GOTO_IF_INVALID_PARAM ++ @brief Prints error message if expression is false and jumps to label. ++ ++ @def PVR_RETURN_IF_ERROR ++ @brief Returns passed error code if it's different than PVRSRV_OK; ++ ++ @def PVR_RETURN_IF_FALSE ++ @brief Returns passed error code if expression is false. ++ ++ @def PVR_RETURN_IF_INVALID_PARAM ++ @brief Returns PVRSRV_ERROR_INVALID_PARAMS if expression is false. ++ ++ @def PVR_RETURN_IF_NOMEM ++ @brief Returns PVRSRV_ERROR_OUT_OF_MEMORY if expression is NULL. ++ ++ @def PVR_GOTO_IF_NOMEM ++ @brief Goes to a label if expression is NULL. ++ ++ @def PVR_GOTO_IF_INVALID_PARAM ++ @brief Goes to a label if expression is false. ++ ++ @def PVR_GOTO_IF_FALSE ++ @brief Goes to a label if expression is false. ++ ++ @def PVR_GOTO_IF_ERROR ++ @brief Goes to a label if the error code is different than PVRSRV_OK; ++ ++ @def PVR_GOTO_WITH_ERROR ++ @brief Goes to a label and sets the error code. ++ ++ @def PVR_LOG ++ @brief Prints message to a log unconditionally. ++ ++ This macro will print messages only if PVRSRV_NEED_PVR_LOG macro is defined. ++ @def PVR_LOG_MSG ++ @brief Prints message to a log with the given log-level. ++ ++ @def PVR_LOG_VA ++ @brief Prints message with var-args to a log with the given log-level. ++ ++ @def PVR_LOG_IF_ERROR_VA ++ @brief Prints message with var-args to a log if the error code is different than PVRSRV_OK. ++ ++ @def PVR_LOG_IF_FALSE_VA ++ @brief Prints message with var-args if expression is false. ++ ++ @def PVR_LOG_RETURN_IF_ERROR_VA ++ @brief Prints message with var-args to a log and returns the error code. ++ ++ @def PVR_LOG_GOTO_IF_ERROR_VA ++ @brief Prints message with var-args to a log and goes to a label if the error code is different than PVRSRV_OK. ++ ++ @def PVR_LOG_RETURN_IF_FALSE_VA ++ @brief Logs the error message with var-args if the expression is false and returns the error code. ++ ++ @def PVR_LOG_GOTO_IF_FALSE_VA ++ @brief Logs the error message with var-args and goes to a label if the expression is false. ++ ++ @def PVR_TRACE_EMPTY_LINE ++ @brief Prints empty line to a log (PVRSRV_NEED_PVR_LOG must be defined). ++ ++ @def TRUNCATE_64BITS_TO_32BITS ++ @brief Truncates 64 bit value to 32 bit value (with possible precision loss). ++ ++ @def TRUNCATE_64BITS_TO_SIZE_T ++ @brief Truncates 64 bit value to size_t value (with possible precision loss). ++ ++ @def TRUNCATE_SIZE_T_TO_32BITS ++ @brief Truncates size_t value to 32 bit value (with possible precision loss). ++ */ ++ ++#endif /* PVR_DEBUG_H */ ++ ++/****************************************************************************** ++ End of file (pvr_debug.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvr_debugfs.c b/drivers/gpu/drm/img-rogue/pvr_debugfs.c +new file mode 100644 +index 000000000000..fa6a94c646bd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_debugfs.c +@@ -0,0 +1,623 @@ ++/*************************************************************************/ /*! ++@File ++@Title DebugFS implementation of Debug Info interface. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements osdi_impl.h API to provide access to driver's ++ debug data via DebugFS. ++ ++ Note about locking in DebugFS module. ++ ++ Access to DebugFS is protected against the race where any ++ file could be removed while being accessed or accessed while ++ being removed. Any calls to debugfs_remove() will block ++ until all operations are finished. ++ ++ See implementation of proxy file operations (FULL_PROXY_FUNC) ++ and implementation of debugfs_file_[get|put]() in ++ fs/debugfs/file.c in Linux kernel sources for more details. ++ ++ Not about locking for sequential files. ++ ++ The seq_file objects have a mutex that protects access ++ to all of the file operations hence all of the sequential ++ *read* operations are protected. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvr_debugfs.h" ++#include "osfunc.h" ++#include "allocmem.h" ++#include "pvr_bridge_k.h" ++#include "pvr_uaccess.h" ++#include "osdi_impl.h" ++ ++#define _DRIVER_THREAD_ENTER() \ ++ do { \ ++ PVRSRV_ERROR eLocalError = PVRSRVDriverThreadEnter(); \ ++ if (eLocalError != PVRSRV_OK) \ ++ { \ ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s", \ ++ __func__, PVRSRVGetErrorString(eLocalError))); \ ++ return OSPVRSRVToNativeError(eLocalError); \ ++ } \ ++ } while (0) ++ ++#define _DRIVER_THREAD_EXIT() \ ++ PVRSRVDriverThreadExit() ++ ++#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBG_ERROR ++ ++typedef struct DFS_DIR ++{ ++ struct dentry *psDirEntry; ++ struct DFS_DIR *psParentDir; ++} DFS_DIR; ++ ++typedef struct DFS_ENTRY ++{ ++ OSDI_IMPL_ENTRY sImplEntry; ++ DI_ITERATOR_CB sIterCb; ++} DFS_ENTRY; ++ ++typedef struct DFS_FILE ++{ ++ struct dentry *psFileEntry; ++ struct DFS_DIR *psParentDir; ++ const struct seq_operations *psSeqOps; ++ struct DFS_ENTRY sEntry; ++ DI_ENTRY_TYPE eType; ++} DFS_FILE; ++ ++/* ----- native callbacks interface ----------------------------------------- */ ++ ++static void _WriteData(void *pvNativeHandle, const void *pvData, ++ IMG_UINT32 uiSize) ++{ ++ seq_write(pvNativeHandle, pvData, uiSize); ++} ++ ++static void _VPrintf(void *pvNativeHandle, const IMG_CHAR *pszFmt, ++ va_list pArgs) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) ++ seq_vprintf(pvNativeHandle, pszFmt, pArgs); ++#else ++ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; ++ ++ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFmt, pArgs); ++ seq_printf(pvNativeHandle, "%s", szBuffer); ++#endif ++} ++ ++static void _Puts(void *pvNativeHandle, const IMG_CHAR *pszStr) ++{ ++ seq_puts(pvNativeHandle, pszStr); ++} ++ ++static IMG_BOOL _HasOverflowed(void *pvNativeHandle) ++{ ++ struct seq_file *psSeqFile = pvNativeHandle; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) ++ return seq_has_overflowed(psSeqFile); ++#else ++ return psSeqFile->count == psSeqFile->size; ++#endif ++} ++ ++static OSDI_IMPL_ENTRY_CB _g_sEntryCallbacks = { ++ .pfnWrite = _WriteData, ++ .pfnVPrintf = _VPrintf, ++ .pfnPuts = _Puts, ++ .pfnHasOverflowed = _HasOverflowed, ++}; ++ ++/* ----- sequential file operations ----------------------------------------- */ ++ ++static void *_Start(struct seq_file *psSeqFile, loff_t *puiPos) ++{ ++ DFS_ENTRY *psEntry = psSeqFile->private; ++ ++ void *pvRet = psEntry->sIterCb.pfnStart(&psEntry->sImplEntry, puiPos); ++ ++ if (pvRet == DI_START_TOKEN) ++ { ++ return SEQ_START_TOKEN; ++ } ++ ++ return pvRet; ++} ++ ++static void _Stop(struct seq_file *psSeqFile, void *pvPriv) ++{ ++ DFS_ENTRY *psEntry = psSeqFile->private; ++ ++ psEntry->sIterCb.pfnStop(&psEntry->sImplEntry, pvPriv); ++} ++ ++static void *_Next(struct seq_file *psSeqFile, void *pvPriv, loff_t *puiPos) ++{ ++ DFS_ENTRY *psEntry = psSeqFile->private; ++ ++ return psEntry->sIterCb.pfnNext(&psEntry->sImplEntry, pvPriv, puiPos); ++} ++ ++static int _Show(struct seq_file *psSeqFile, void *pvPriv) ++{ ++ DFS_ENTRY *psEntry = psSeqFile->private; ++ ++ if (pvPriv == SEQ_START_TOKEN) ++ { ++ pvPriv = DI_START_TOKEN; ++ } ++ ++ return psEntry->sIterCb.pfnShow(&psEntry->sImplEntry, pvPriv); ++} ++ ++static struct seq_operations _g_sSeqOps = { ++ .start = _Start, ++ .stop = _Stop, ++ .next = _Next, ++ .show = _Show ++}; ++ ++/* ----- file operations ---------------------------------------------------- */ ++ ++static int _Open(struct inode *psINode, struct file *psFile) ++{ ++ DFS_FILE *psDFSFile; ++ int iRes; ++ ++ PVR_LOG_RETURN_IF_FALSE(psINode != NULL && psINode->i_private != NULL, ++ "psDFSFile is NULL", -EIO); ++ ++ _DRIVER_THREAD_ENTER(); ++ ++ psDFSFile = psINode->i_private; ++ ++ if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) ++ { ++ iRes = seq_open(psFile, psDFSFile->psSeqOps); ++ } ++ else ++ { ++ /* private data is NULL as it's going to be set below */ ++ iRes = single_open(psFile, _Show, NULL); ++ } ++ ++ if (iRes == 0) ++ { ++ struct seq_file *psSeqFile = psFile->private_data; ++ ++ DFS_ENTRY *psEntry = OSAllocMem(sizeof(*psEntry)); ++ if (psEntry == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem() failed", __func__)); ++ iRes = -ENOMEM; ++ goto return_; ++ } ++ ++ *psEntry = psDFSFile->sEntry; ++ psSeqFile->private = psEntry; ++ psEntry->sImplEntry.pvNative = psSeqFile; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", ++ __func__, iRes)); ++ } ++ ++return_: ++ _DRIVER_THREAD_EXIT(); ++ ++ return iRes; ++} ++ ++static int _Close(struct inode *psINode, struct file *psFile) ++{ ++ DFS_FILE *psDFSFile = psINode->i_private; ++ DFS_ENTRY *psEntry; ++ int iRes; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", ++ -EIO); ++ ++ _DRIVER_THREAD_ENTER(); ++ ++ /* save pointer to DFS_ENTRY */ ++ psEntry = ((struct seq_file *) psFile->private_data)->private; ++ ++ if (psDFSFile->sEntry.sIterCb.pfnStart != NULL) ++ { ++ iRes = seq_release(psINode, psFile); ++ } ++ else ++ { ++ iRes = single_release(psINode, psFile); ++ } ++ ++ /* free DFS_ENTRY allocated in _Open */ ++ OSFreeMem(psEntry); ++ ++ /* Validation check as seq_release (and single_release which calls it) ++ * never fail */ ++ if (iRes != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release psFile, returning %d", ++ __func__, iRes)); ++ } ++ ++ _DRIVER_THREAD_EXIT(); ++ ++ return iRes; ++} ++ ++static ssize_t _Read(struct file *psFile, char __user *pcBuffer, ++ size_t uiCount, loff_t *puiPos) ++{ ++ DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; ++ ssize_t iRes = -1; ++ ++ _DRIVER_THREAD_ENTER(); ++ ++ if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) ++ { ++ iRes = seq_read(psFile, pcBuffer, uiCount, puiPos); ++ if (iRes < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " ++ "returned %zd", __func__, iRes)); ++ goto return_; ++ } ++ } ++ else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) ++ { ++ DFS_ENTRY *psEntry = &psDFSFile->sEntry; ++ IMG_UINT64 ui64Count = uiCount; ++ ++ IMG_CHAR *pcLocalBuffer = OSAllocMem(uiCount); ++ PVR_GOTO_IF_FALSE(pcLocalBuffer != NULL, return_); ++ ++ iRes = psEntry->sIterCb.pfnRead(pcLocalBuffer, ui64Count, puiPos, ++ psEntry->sImplEntry.pvPrivData); ++ if (iRes < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to read from file, pfnRead() " ++ "returned %zd", __func__, iRes)); ++ OSFreeMem(pcLocalBuffer); ++ goto return_; ++ } ++ ++ if (pvr_copy_to_user(pcBuffer, pcLocalBuffer, iRes) != 0) ++ { ++ iRes = -1; ++ } ++ ++ OSFreeMem(pcLocalBuffer); ++ } ++ ++return_: ++ _DRIVER_THREAD_EXIT(); ++ ++ return iRes; ++} ++ ++static loff_t _LSeek(struct file *psFile, loff_t iOffset, int iOrigin) ++{ ++ DFS_FILE *psDFSFile = psFile->f_path.dentry->d_inode->i_private; ++ loff_t iRes = -1; ++ ++ _DRIVER_THREAD_ENTER(); ++ ++ if (psDFSFile->eType == DI_ENTRY_TYPE_GENERIC) ++ { ++ iRes = seq_lseek(psFile, iOffset, iOrigin); ++ if (iRes < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position in psFile<%p> to offset " ++ "%lld, iOrigin %d, seq_lseek() returned %lld (dentry='%s')", __func__, ++ psFile, iOffset, iOrigin, iRes, psFile->f_path.dentry->d_name.name)); ++ goto return_; ++ } ++ } ++ else if (psDFSFile->eType == DI_ENTRY_TYPE_RANDOM_ACCESS) ++ { ++ DFS_ENTRY *psEntry = &psDFSFile->sEntry; ++ IMG_UINT64 ui64Pos; ++ ++ switch (iOrigin) ++ { ++ case SEEK_SET: ++ ui64Pos = psFile->f_pos + iOffset; ++ break; ++ case SEEK_CUR: ++ ui64Pos = iOffset; ++ break; ++ case SEEK_END: ++ /* not supported as we don't know the file size here */ ++ /* fall through */ ++ default: ++ return -1; ++ } ++ ++ /* only pass the absolute position to the callback, it's up to the ++ * implementer to determine if the position is valid */ ++ ++ iRes = psEntry->sIterCb.pfnSeek(ui64Pos, ++ psEntry->sImplEntry.pvPrivData); ++ if (iRes < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to set file position to offset " ++ "%lld, pfnSeek() returned %lld", __func__, ++ iOffset, iRes)); ++ goto return_; ++ } ++ ++ psFile->f_pos = ui64Pos; ++ } ++ ++return_: ++ _DRIVER_THREAD_EXIT(); ++ ++ return iRes; ++} ++ ++static ssize_t _Write(struct file *psFile, const char __user *pszBuffer, ++ size_t uiCount, loff_t *puiPos) ++{ ++ struct inode *psINode = psFile->f_path.dentry->d_inode; ++ DFS_FILE *psDFSFile = psINode->i_private; ++ DI_ITERATOR_CB *psIter = &psDFSFile->sEntry.sIterCb; ++ IMG_CHAR *pcLocalBuffer; ++ IMG_UINT64 ui64Count; ++ IMG_INT64 i64Res = -EIO; ++ IMG_UINT64 ui64Pos = *puiPos; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDFSFile != NULL, "psDFSFile is NULL", ++ -EIO); ++ PVR_LOG_RETURN_IF_FALSE(psIter->pfnWrite != NULL, "pfnWrite is NULL", ++ -EIO); ++ ++ ++ /* Make sure we allocate the smallest amount of needed memory*/ ++ ui64Count = psIter->ui32WriteLenMax; ++ PVR_LOG_GOTO_IF_FALSE(uiCount <= ui64Count, "uiCount too long", return_); ++ ui64Count = MIN(uiCount + 1, ui64Count); ++ ++ _DRIVER_THREAD_ENTER(); ++ ++ /* allocate buffer with one additional byte for NUL character */ ++ pcLocalBuffer = OSAllocMem(ui64Count); ++ PVR_LOG_GOTO_IF_FALSE(pcLocalBuffer != NULL, "OSAllocMem() failed", ++ return_); ++ ++ i64Res = pvr_copy_from_user(pcLocalBuffer, pszBuffer, ui64Count); ++ PVR_LOG_GOTO_IF_FALSE(i64Res == 0, "pvr_copy_from_user() failed", ++ free_local_buffer_); ++ ++ /* ensure that the framework user gets a NUL terminated buffer */ ++ pcLocalBuffer[ui64Count - 1] = '\0'; ++ ++ i64Res = psIter->pfnWrite(pcLocalBuffer, ui64Count, &ui64Pos, ++ psDFSFile->sEntry.sImplEntry.pvPrivData); ++ PVR_LOG_GOTO_IF_FALSE(i64Res >= 0, "pfnWrite failed", free_local_buffer_); ++ ++ *puiPos = ui64Pos; ++ ++free_local_buffer_: ++ OSFreeMem(pcLocalBuffer); ++ ++return_: ++ _DRIVER_THREAD_EXIT(); ++ ++ return i64Res; ++} ++ ++static const struct file_operations _g_psFileOpsGen = { ++ .owner = THIS_MODULE, ++ .open = _Open, ++ .release = _Close, ++ .read = _Read, ++ .llseek = _LSeek, ++ .write = _Write, ++}; ++ ++static const struct file_operations _g_psFileOpsRndAcc = { ++ .owner = THIS_MODULE, ++ .read = _Read, ++ .llseek = _LSeek, ++ .write = _Write, ++}; ++ ++/* ----- DI implementation interface ---------------------------------------- */ ++ ++static PVRSRV_ERROR _Init(void) ++{ ++ return PVRSRV_OK; ++} ++ ++static void _DeInit(void) ++{ ++} ++ ++static PVRSRV_ERROR _CreateFile(const IMG_CHAR *pszName, ++ DI_ENTRY_TYPE eType, ++ const DI_ITERATOR_CB *psIterCb, ++ void *pvPrivData, ++ void *pvParentDir, ++ void **pvFile) ++{ ++ DFS_DIR *psParentDir = pvParentDir; ++ DFS_FILE *psFile; ++ umode_t uiMode = S_IFREG; ++ struct dentry *psEntry; ++ const struct file_operations *psFileOps = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pvFile != NULL, "pvFile"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pvParentDir != NULL, "pvParentDir"); ++ ++ switch (eType) ++ { ++ case DI_ENTRY_TYPE_GENERIC: ++ psFileOps = &_g_psFileOpsGen; ++ break; ++ case DI_ENTRY_TYPE_RANDOM_ACCESS: ++ psFileOps = &_g_psFileOpsRndAcc; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "eType invalid in %s()", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto return_; ++ } ++ ++ psFile = OSAllocMem(sizeof(*psFile)); ++ PVR_LOG_GOTO_IF_NOMEM(psFile, eError, return_); ++ ++ uiMode |= psIterCb->pfnShow != NULL || psIterCb->pfnRead != NULL ? ++ S_IRUGO : 0; ++ uiMode |= psIterCb->pfnWrite != NULL ? S_IWUSR : 0; ++ ++ psEntry = debugfs_create_file(pszName, uiMode, psParentDir->psDirEntry, ++ psFile, psFileOps); ++ if (IS_ERR_OR_NULL(psEntry)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create debugfs '%s' file", ++ __func__, pszName)); ++ ++ eError = psEntry == NULL ? ++ PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_DEVICE; ++ goto free_file_; ++ } ++ ++ psFile->eType = eType; ++ psFile->psSeqOps = &_g_sSeqOps; ++ psFile->sEntry.sIterCb = *psIterCb; ++ psFile->sEntry.sImplEntry.pvPrivData = pvPrivData; ++ psFile->sEntry.sImplEntry.pvNative = NULL; ++ psFile->sEntry.sImplEntry.psCb = &_g_sEntryCallbacks; ++ psFile->psParentDir = psParentDir; ++ psFile->psFileEntry = psEntry; ++ ++ *pvFile = psFile; ++ ++ return PVRSRV_OK; ++ ++free_file_: ++ OSFreeMem(psFile); ++ ++return_: ++ return eError; ++} ++ ++static void _DestroyFile(void *pvFile) ++{ ++ DFS_FILE *psFile = pvFile; ++ ++ PVR_ASSERT(psFile != NULL); ++ ++ psFile->psFileEntry->d_inode->i_private = NULL; ++ ++ debugfs_remove(psFile->psFileEntry); ++ OSFreeMem(psFile); ++} ++ ++static PVRSRV_ERROR _CreateDir(const IMG_CHAR *pszName, ++ void *pvParentDir, ++ void **ppvDir) ++{ ++ DFS_DIR *psNewDir; ++ struct dentry *psDirEntry, *psParentDir = NULL; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pszName != NULL, "pszName"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ppvDir != NULL, "ppvDir"); ++ ++ psNewDir = OSAllocMem(sizeof(*psNewDir)); ++ PVR_LOG_RETURN_IF_NOMEM(psNewDir, "OSAllocMem"); ++ ++ psNewDir->psParentDir = pvParentDir; ++ ++ if (pvParentDir != NULL) ++ { ++ psParentDir = psNewDir->psParentDir->psDirEntry; ++ } ++ ++ psDirEntry = debugfs_create_dir(pszName, psParentDir); ++ if (IS_ERR_OR_NULL(psDirEntry)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot create '%s' debugfs directory", ++ __func__, pszName)); ++ OSFreeMem(psNewDir); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psNewDir->psDirEntry = psDirEntry; ++ *ppvDir = psNewDir; ++ ++ return PVRSRV_OK; ++} ++ ++static void _DestroyDir(void *pvDir) ++{ ++ DFS_DIR *psDir = pvDir; ++ ++ PVR_ASSERT(psDir != NULL); ++ ++ debugfs_remove(psDir->psDirEntry); ++ OSFreeMem(psDir); ++} ++ ++PVRSRV_ERROR PVRDebugFsRegister(void) ++{ ++ OSDI_IMPL_CB sImplCb = { ++ .pfnInit = _Init, ++ .pfnDeInit = _DeInit, ++ .pfnCreateEntry = _CreateFile, ++ .pfnDestroyEntry = _DestroyFile, ++ .pfnCreateGroup = _CreateDir, ++ .pfnDestroyGroup = _DestroyDir ++ }; ++ ++ return DIRegisterImplementation("debugfs", &sImplCb); ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_debugfs.h b/drivers/gpu/drm/img-rogue/pvr_debugfs.h +new file mode 100644 +index 000000000000..23ae55b12069 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_debugfs.h +@@ -0,0 +1,50 @@ ++/*************************************************************************/ /*! ++@File ++@Title DebugFS implementation of Debug Info interface. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_DEBUGFS_H ++#define PVR_DEBUGFS_H ++ ++#include "pvrsrv_error.h" ++ ++PVRSRV_ERROR PVRDebugFsRegister(void); ++ ++#endif /* PVR_DEBUGFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_dicommon.h b/drivers/gpu/drm/img-rogue/pvr_dicommon.h +new file mode 100644 +index 000000000000..c729dde2fab6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_dicommon.h +@@ -0,0 +1,59 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Debug Information (DI) common types and definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Debug Information (DI) common types and definitions included ++ in both user mode and kernel mode source. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_DICOMMON_H ++#define PVR_DICOMMON_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/*! Maximum DI entry path length including the null byte. */ ++#define DI_IMPL_BRG_PATH_LEN 64 ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* PVR_DICOMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_dma_resv.h b/drivers/gpu/drm/img-rogue/pvr_dma_resv.h +new file mode 100644 +index 000000000000..a51c9de84ada +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_dma_resv.h +@@ -0,0 +1,80 @@ ++/*************************************************************************/ /*! ++@Title Kernel reservation object compatibility header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Per-version macros to allow code to seamlessly use older kernel ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef __PVR_DMA_RESV_H__ ++#define __PVR_DMA_RESV_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) ++#include ++#else ++#include ++ ++/* Reservation object types */ ++#define dma_resv reservation_object ++#define dma_resv_list reservation_object_list ++ ++/* Reservation object functions */ ++#define dma_resv_add_excl_fence reservation_object_add_excl_fence ++#define dma_resv_add_shared_fence reservation_object_add_shared_fence ++#define dma_resv_fini reservation_object_fini ++#define dma_resv_get_excl reservation_object_get_excl ++#define dma_resv_get_list reservation_object_get_list ++#define dma_resv_held reservation_object_held ++#define dma_resv_init reservation_object_init ++#define dma_resv_reserve_shared reservation_object_reserve_shared ++#define dma_resv_test_signaled_rcu reservation_object_test_signaled_rcu ++#define dma_resv_wait_timeout_rcu reservation_object_wait_timeout_rcu ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) ++ ++#define dma_resv_shared_list dma_resv_get_list ++#define dma_resv_excl_fence dma_resv_get_excl ++#define dma_resv_wait_timeout dma_resv_wait_timeout_rcu ++#define dma_resv_test_signaled dma_resv_test_signaled_rcu ++#define dma_resv_get_fences dma_resv_get_fences_rcu ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)) */ ++ ++#endif /* __PVR_DMA_RESV_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_drm.c b/drivers/gpu/drm/img-rogue/pvr_drm.c +new file mode 100644 +index 000000000000..65a8e511e730 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_drm.c +@@ -0,0 +1,336 @@ ++/* ++ * @File ++ * @Title PowerVR DRM driver ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#include ++#else ++#include /* include before drm_crtc.h for kernels older than 3.9 */ ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "module_common.h" ++#include "pvr_drm.h" ++#include "pvr_drv.h" ++#include "pvrversion.h" ++#include "services_kernel_client.h" ++#include "pvr_sync_ioctl_drm.h" ++ ++#include "kernel_compatibility.h" ++ ++#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME ++#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM" ++#define PVR_DRM_DRIVER_DATE "20170530" ++ ++/* ++ * Protects global PVRSRV_DATA on a multi device system. i.e. this is used to ++ * protect the PVRSRVCommonDeviceXXXX() APIs in the Server common layer which ++ * are not re-entrant for device creation and initialisation. ++ */ ++static DEFINE_MUTEX(g_device_mutex); ++ ++static int pvr_pm_suspend(struct device *dev) ++{ ++ struct drm_device *ddev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = ddev->dev_private; ++ ++ DRM_DEBUG_DRIVER("device %p\n", dev); ++ ++ return PVRSRVDeviceSuspend(priv->dev_node); ++} ++ ++static int pvr_pm_resume(struct device *dev) ++{ ++ struct drm_device *ddev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = ddev->dev_private; ++ ++ DRM_DEBUG_DRIVER("device %p\n", dev); ++ ++ return PVRSRVDeviceResume(priv->dev_node); ++} ++ ++const struct dev_pm_ops pvr_pm_ops = { ++ .suspend = pvr_pm_suspend, ++ .resume = pvr_pm_resume, ++}; ++ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) ++static ++#endif ++int pvr_drm_load(struct drm_device *ddev, unsigned long flags) ++{ ++ struct pvr_drm_private *priv; ++ enum PVRSRV_ERROR_TAG srv_err; ++ int err, deviceId; ++ ++ DRM_DEBUG_DRIVER("device %p\n", ddev->dev); ++ ++ dev_set_drvdata(ddev->dev, ddev); ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) ++ /* ++ * Older kernels do not have render drm_minor member in drm_device, ++ * so we fallback to primary node for device identification ++ */ ++ deviceId = ddev->primary->index; ++#else ++ if (ddev->render) ++ deviceId = ddev->render->index; ++ else /* when render node is NULL, fallback to primary node */ ++ deviceId = ddev->primary->index; ++#endif ++ ++ priv = kzalloc(sizeof(*priv), GFP_KERNEL); ++ if (!priv) { ++ err = -ENOMEM; ++ goto err_exit; ++ } ++ ddev->dev_private = priv; ++ ++ if (!ddev->dev->dma_parms) ++ ddev->dev->dma_parms = &priv->dma_parms; ++ dma_set_max_seg_size(ddev->dev, DMA_BIT_MASK(32)); ++ ++ mutex_lock(&g_device_mutex); ++ ++ srv_err = PVRSRVCommonDeviceCreate(ddev->dev, deviceId, &priv->dev_node); ++ if (srv_err != PVRSRV_OK) { ++ DRM_ERROR("failed to create device node for device %p (%s)\n", ++ ddev->dev, PVRSRVGetErrorString(srv_err)); ++ if (srv_err == PVRSRV_ERROR_PROBE_DEFER) ++ err = -EPROBE_DEFER; ++ else ++ err = -ENODEV; ++ goto err_unset_dma_parms; ++ } ++ ++ err = PVRSRVDeviceInit(priv->dev_node); ++ if (err) { ++ DRM_ERROR("device %p initialisation failed (err=%d)\n", ++ ddev->dev, err); ++ goto err_device_destroy; ++ } ++ ++ drm_mode_config_init(ddev); ++ ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) ++ srv_err = PVRSRVCommonDeviceInitialise(priv->dev_node); ++ if (srv_err != PVRSRV_OK) { ++ err = -ENODEV; ++ DRM_ERROR("device %p initialisation failed (err=%d)\n", ++ ddev->dev, err); ++ goto err_device_deinit; ++ } ++#endif ++ ++ mutex_unlock(&g_device_mutex); ++ ++ return 0; ++ ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) ++err_device_deinit: ++ drm_mode_config_cleanup(ddev); ++ PVRSRVDeviceDeinit(priv->dev_node); ++#endif ++err_device_destroy: ++ PVRSRVCommonDeviceDestroy(priv->dev_node); ++err_unset_dma_parms: ++ mutex_unlock(&g_device_mutex); ++ if (ddev->dev->dma_parms == &priv->dma_parms) ++ ddev->dev->dma_parms = NULL; ++ kfree(priv); ++err_exit: ++ return err; ++} ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) ++static ++#endif ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++int pvr_drm_unload(struct drm_device *ddev) ++#else ++void pvr_drm_unload(struct drm_device *ddev) ++#endif ++{ ++ struct pvr_drm_private *priv = ddev->dev_private; ++ ++ DRM_DEBUG_DRIVER("device %p\n", ddev->dev); ++ ++ drm_mode_config_cleanup(ddev); ++ ++ PVRSRVDeviceDeinit(priv->dev_node); ++ ++ mutex_lock(&g_device_mutex); ++ PVRSRVCommonDeviceDestroy(priv->dev_node); ++ mutex_unlock(&g_device_mutex); ++ ++ if (ddev->dev->dma_parms == &priv->dma_parms) ++ ddev->dev->dma_parms = NULL; ++ ++ kfree(priv); ++ ddev->dev_private = NULL; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ return 0; ++#endif ++} ++ ++static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile) ++{ ++#if (PVRSRV_DEVICE_INIT_MODE != PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ struct pvr_drm_private *priv = ddev->dev_private; ++ int err; ++#endif ++ ++ if (!try_module_get(THIS_MODULE)) { ++ DRM_ERROR("failed to get module reference\n"); ++ return -ENOENT; ++ } ++ ++#if (PVRSRV_DEVICE_INIT_MODE != PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ err = PVRSRVDeviceServicesOpen(priv->dev_node, dfile); ++ if (err) ++ module_put(THIS_MODULE); ++ ++ return err; ++#else ++ return 0; ++#endif ++} ++ ++static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile) ++{ ++ struct pvr_drm_private *priv = ddev->dev_private; ++ ++ PVRSRVDeviceRelease(priv->dev_node, dfile); ++ ++ module_put(THIS_MODULE); ++} ++ ++/* ++ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set. ++ */ ++static struct drm_ioctl_desc pvr_drm_ioctls[] = { ++ DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, ++ DRM_RENDER_ALLOW | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(PVR_SRVKM_INIT, drm_pvr_srvkm_init, ++ DRM_RENDER_ALLOW | DRM_UNLOCKED), ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) && !defined(USE_PVRSYNC_DEVNODE) ++ DRM_IOCTL_DEF_DRV(PVR_SYNC_RENAME_CMD, pvr_sync_rename_ioctl, ++ DRM_RENDER_ALLOW | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(PVR_SYNC_FORCE_SW_ONLY_CMD, pvr_sync_force_sw_only_ioctl, ++ DRM_RENDER_ALLOW | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_CREATE_FENCE_CMD, pvr_sw_sync_create_fence_ioctl, ++ DRM_RENDER_ALLOW | DRM_UNLOCKED), ++ DRM_IOCTL_DEF_DRV(PVR_SW_SYNC_INC_CMD, pvr_sw_sync_inc_ioctl, ++ DRM_RENDER_ALLOW | DRM_UNLOCKED), ++#endif ++}; ++ ++#if defined(CONFIG_COMPAT) ++static long pvr_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg) ++{ ++ unsigned int nr = DRM_IOCTL_NR(cmd); ++ ++ if (nr < DRM_COMMAND_BASE) ++ return drm_compat_ioctl(file, cmd, arg); ++ ++ return drm_ioctl(file, cmd, arg); ++} ++#endif /* defined(CONFIG_COMPAT) */ ++ ++const struct file_operations pvr_drm_fops = { ++ .owner = THIS_MODULE, ++ .open = drm_open, ++ .release = drm_release, ++ .unlocked_ioctl = drm_ioctl, ++#if defined(CONFIG_COMPAT) ++ .compat_ioctl = pvr_compat_ioctl, ++#endif ++ .mmap = PVRSRV_MMap, ++ .poll = drm_poll, ++ .read = drm_read, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) ++ .fasync = drm_fasync, ++#endif ++}; ++ ++const struct drm_driver pvr_drm_generic_driver = { ++ .driver_features = DRIVER_MODESET | DRIVER_RENDER, ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ .load = NULL, ++ .unload = NULL, ++#else ++ .load = pvr_drm_load, ++ .unload = pvr_drm_unload, ++#endif ++ .open = pvr_drm_open, ++ .postclose = pvr_drm_release, ++ ++ .ioctls = pvr_drm_ioctls, ++ .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls), ++ .fops = &pvr_drm_fops, ++ ++ .name = PVR_DRM_DRIVER_NAME, ++ .desc = PVR_DRM_DRIVER_DESC, ++ .date = PVR_DRM_DRIVER_DATE, ++ .major = PVRVERSION_MAJ, ++ .minor = PVRVERSION_MIN, ++ .patchlevel = PVRVERSION_BUILD, ++}; +diff --git a/drivers/gpu/drm/img-rogue/pvr_drm.h b/drivers/gpu/drm/img-rogue/pvr_drm.h +new file mode 100644 +index 000000000000..c0d00c98d8c0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_drm.h +@@ -0,0 +1,146 @@ ++/* ++ * @File ++ * @Title PVR DRM definitions shared between kernel and user space. ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_DRM_H__) ++#define __PVR_DRM_H__ ++ ++#include ++ ++#if defined(__KERNEL__) ++#include ++#else ++#include ++#endif ++ ++/* ++ * IMPORTANT: ++ * All structures below are designed to be the same size when compiled for 32 ++ * and/or 64 bit architectures, i.e. there should be no compiler inserted ++ * padding. This is achieved by sticking to the following rules: ++ * 1) only use fixed width types ++ * 2) always naturally align fields by arranging them appropriately and by using ++ * padding fields when necessary ++ * ++ * These rules should _always_ be followed when modifying or adding new ++ * structures to this file. ++ */ ++ ++struct drm_pvr_srvkm_cmd { ++ __u32 bridge_id; ++ __u32 bridge_func_id; ++ __u64 in_data_ptr; ++ __u64 out_data_ptr; ++ __u32 in_data_size; ++ __u32 out_data_size; ++}; ++ ++struct pvr_sync_rename_ioctl_data { ++ char szName[32]; ++}; ++ ++struct pvr_sw_sync_create_fence_data { ++ char name[32]; ++ __s32 fence; ++ __u32 pad; ++ __u64 sync_pt_idx; ++}; ++ ++struct pvr_sw_timeline_advance_data { ++ __u64 sync_pt_idx; ++}; ++ ++#define PVR_SRVKM_SERVICES_INIT 1 ++#define PVR_SRVKM_SYNC_INIT 2 ++struct drm_pvr_srvkm_init_data { ++ __u32 init_module; ++}; ++ ++/* Values used to configure the PVRSRV_DEVICE_INIT_MODE tunable (Linux-only) */ ++#define PVRSRV_LINUX_DEV_INIT_ON_PROBE 1 ++#define PVRSRV_LINUX_DEV_INIT_ON_OPEN 2 ++#define PVRSRV_LINUX_DEV_INIT_ON_CONNECT 3 ++ ++/* ++ * DRM command numbers, relative to DRM_COMMAND_BASE. ++ * These defines must be prefixed with "DRM_". ++ */ ++ ++/* PVR Services command */ ++#define DRM_PVR_SRVKM_CMD 0 ++ ++/* PVR Sync commands */ ++#define DRM_PVR_SYNC_RENAME_CMD 1 ++#define DRM_PVR_SYNC_FORCE_SW_ONLY_CMD 2 ++ ++/* PVR Software Sync commands */ ++#define DRM_PVR_SW_SYNC_CREATE_FENCE_CMD 3 ++#define DRM_PVR_SW_SYNC_INC_CMD 4 ++ ++/* PVR Services Render Device Init command */ ++#define DRM_PVR_SRVKM_INIT 5 ++ ++/* These defines must be prefixed with "DRM_IOCTL_". */ ++#define DRM_IOCTL_PVR_SRVKM_CMD \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, \ ++ struct drm_pvr_srvkm_cmd) ++ ++#define DRM_IOCTL_PVR_SYNC_RENAME_CMD \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SYNC_RENAME_CMD, \ ++ struct pvr_sync_rename_ioctl_data) ++ ++#define DRM_IOCTL_PVR_SYNC_FORCE_SW_ONLY_CMD \ ++ DRM_IO(DRM_COMMAND_BASE + DRM_PVR_SYNC_FORCE_SW_ONLY_CMD) ++ ++#define DRM_IOCTL_PVR_SW_SYNC_CREATE_FENCE_CMD \ ++ DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, \ ++ struct pvr_sw_sync_create_fence_data) ++ ++#define DRM_IOCTL_PVR_SW_SYNC_INC_CMD \ ++ DRM_IOR(DRM_COMMAND_BASE + DRM_PVR_SW_SYNC_INC_CMD, \ ++ struct pvr_sw_timeline_advance_data) ++ ++#define DRM_IOCTL_PVR_SRVKM_INIT \ ++ DRM_IOW(DRM_COMMAND_BASE + DRM_PVR_SRVKM_INIT, \ ++ struct drm_pvr_srvkm_init_data) ++ ++#endif /* defined(__PVR_DRM_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_drv.h b/drivers/gpu/drm/img-rogue/pvr_drv.h +new file mode 100644 +index 000000000000..15887da30f0f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_drv.h +@@ -0,0 +1,106 @@ ++/* ++ * @File ++ * @Title PowerVR DRM driver ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_DRV_H__) ++#define __PVR_DRV_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#else ++#include ++#endif ++ ++#include ++ ++struct file; ++struct _PVRSRV_DEVICE_NODE_; ++struct workqueue_struct; ++struct vm_area_struct; ++ ++/* This structure is used to store Linux specific per-device information. */ ++struct pvr_drm_private { ++ struct _PVRSRV_DEVICE_NODE_ *dev_node; ++ ++ /* ++ * This is needed for devices that don't already have their own dma ++ * parameters structure, e.g. platform devices, and, if necessary, will ++ * be assigned to the 'struct device' during device initialisation. It ++ * should therefore never be accessed directly via this structure as ++ * this may not be the version of dma parameters in use. ++ */ ++ struct device_dma_parameters dma_parms; ++ ++ /* PVR Sync debug notify handle */ ++ void *sync_debug_notify_handle; ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++ /* Only used in fence sync as sync_debug_notify_handle is used ++ * to print a header only. Content is registered separately. ++ * Used to print foreign sync debug ++ */ ++ void *sync_foreign_debug_notify_handle; ++#endif ++}; ++ ++extern const struct dev_pm_ops pvr_pm_ops; ++extern const struct drm_driver pvr_drm_generic_driver; ++extern const struct file_operations pvr_drm_fops; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++int pvr_drm_load(struct drm_device *ddev, unsigned long flags); ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++int pvr_drm_unload(struct drm_device *ddev); ++#else ++void pvr_drm_unload(struct drm_device *ddev); ++#endif ++#endif ++ ++int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg, ++ struct drm_file *file); ++int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma); ++ ++#endif /* !defined(__PVR_DRV_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h b/drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h +new file mode 100644 +index 000000000000..3645e29079b1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_fd_sync_kernel.h +@@ -0,0 +1,64 @@ ++/*************************************************************************/ /*! ++@File pvr_fd_sync_kernel.h ++@Title Kernel/userspace interface definitions to use the kernel sync ++ driver ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++ ++#ifndef _PVR_FD_SYNC_KERNEL_H_ ++#define _PVR_FD_SYNC_KERNEL_H_ ++ ++#include ++#include ++ ++#include "pvr_drm.h" ++ ++#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14 ++ ++struct pvr_sync_pt_info { ++ /* Output */ ++ __u32 id; ++ __u32 ui32FWAddr; ++ __u32 ui32CurrOp; ++ __u32 ui32NextOp; ++ __u32 ui32TlTaken; ++} __attribute__((packed, aligned(8))); ++ ++#endif /* _PVR_FD_SYNC_KERNEL_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_fence.c b/drivers/gpu/drm/img-rogue/pvr_fence.c +new file mode 100644 +index 000000000000..39f71da0e8df +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_fence.c +@@ -0,0 +1,1149 @@ ++/* ++ * @File ++ * @Title PowerVR Linux fence interface ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "pvr_fence.h" ++#include "services_kernel_client.h" ++#include "sync_checkpoint_external.h" ++ ++#define CREATE_TRACE_POINTS ++#include "pvr_fence_trace.h" ++ ++/* This header must always be included last */ ++#include "kernel_compatibility.h" ++ ++/* Global kmem_cache for pvr_fence object allocations */ ++static struct kmem_cache *pvr_fence_cache; ++static DEFINE_MUTEX(pvr_fence_cache_mutex); ++static u32 pvr_fence_cache_refcount; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++struct workqueue_struct *gpFenceDestroyWq; ++#endif ++ ++#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ ++ do { \ ++ if (pfnDumpDebugPrintf) \ ++ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ ++ ## __VA_ARGS__); \ ++ else \ ++ pr_err(fmt "\n", ## __VA_ARGS__); \ ++ } while (0) ++ ++static inline void ++pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags) ++{ ++ SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags); ++} ++ ++static inline bool ++pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags) ++{ ++ return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, ++ fence_sync_flags); ++} ++ ++static inline u32 ++pvr_fence_sync_value(struct pvr_fence *pvr_fence) ++{ ++ if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint, ++ PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ return PVRSRV_SYNC_CHECKPOINT_ERRORED; ++ else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, ++ PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ return PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ else ++ return PVRSRV_SYNC_CHECKPOINT_ACTIVE; ++} ++ ++static void ++pvr_fence_context_check_status(struct work_struct *data) ++{ ++ PVRSRVCheckStatus(NULL); ++} ++ ++void ++pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size) ++{ ++ snprintf(str, size, ++ "%u ctx=%llu refs=%u", ++ atomic_read(&fctx->fence_seqno), ++ fctx->fence_context, ++ refcount_read(&fctx->kref.refcount)); ++} ++ ++static void ++pvr_fence_context_fences_dump(struct pvr_fence_context *fctx, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ struct pvr_fence *pvr_fence; ++ unsigned long flags; ++ char value[128]; ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ pvr_context_value_str(fctx, value, sizeof(value)); ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ "%s: @%s", fctx->name, value); ++ list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { ++ struct dma_fence *fence = pvr_fence->fence; ++ const char *timeline_value_str = "unknown timeline value"; ++ const char *fence_value_str = "unknown fence value"; ++ ++ pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value, ++ sizeof(value)); ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ " @%s", value); ++ ++ if (is_pvr_fence(fence)) ++ continue; ++ ++ if (fence->ops->timeline_value_str) { ++ fence->ops->timeline_value_str(fence, value, ++ sizeof(value)); ++ timeline_value_str = value; ++ } ++ ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ " | %s: %s (driver: %s)", ++ fence->ops->get_timeline_name(fence), ++ timeline_value_str, ++ fence->ops->get_driver_name(fence)); ++ ++ if (fence->ops->fence_value_str) { ++ fence->ops->fence_value_str(fence, value, ++ sizeof(value)); ++ fence_value_str = value; ++ } ++ ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ " | @%s (foreign)", value); ++ } ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++} ++ ++static inline unsigned int ++pvr_fence_context_seqno_next(struct pvr_fence_context *fctx) ++{ ++ return atomic_inc_return(&fctx->fence_seqno) - 1; ++} ++ ++/* This function prepends seqno to fence name */ ++static inline void ++pvr_fence_prepare_name(char *fence_name, size_t fence_name_size, ++ const char *name, unsigned int seqno) ++{ ++ unsigned int len; ++ ++ len = OSStringUINT32ToStr(fence_name, fence_name_size, seqno); ++ if (likely((len > 0) && (fence_name_size >= (len + 1)))) { ++ fence_name[len] = '-'; ++ fence_name[len + 1] = '\0'; ++ } ++ strlcat(fence_name, name, fence_name_size); ++} ++ ++static void ++pvr_fence_sched_free(struct rcu_head *rcu) ++{ ++ struct pvr_fence *pvr_fence = container_of(rcu, struct pvr_fence, rcu); ++ ++ kmem_cache_free(pvr_fence_cache, pvr_fence); ++} ++ ++static inline void ++pvr_fence_context_free_deferred(struct pvr_fence_context *fctx) ++{ ++ struct pvr_fence *pvr_fence, *tmp; ++ LIST_HEAD(deferred_free_list); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ list_for_each_entry_safe(pvr_fence, tmp, ++ &fctx->deferred_free_list, ++ fence_head) ++ list_move(&pvr_fence->fence_head, &deferred_free_list); ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ ++ list_for_each_entry_safe(pvr_fence, tmp, ++ &deferred_free_list, ++ fence_head) { ++ list_del(&pvr_fence->fence_head); ++ SyncCheckpointFree(pvr_fence->sync_checkpoint); ++ call_rcu(&pvr_fence->rcu, pvr_fence_sched_free); ++ module_put(THIS_MODULE); ++ } ++} ++ ++void ++pvr_fence_context_free_deferred_callback(void *data) ++{ ++ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; ++ ++ /* ++ * Free up any fence objects we have deferred freeing. ++ */ ++ pvr_fence_context_free_deferred(fctx); ++} ++ ++static void ++pvr_fence_context_signal_fences(void *data) ++{ ++ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; ++ struct pvr_fence *pvr_fence, *tmp; ++ unsigned long flags1; ++ ++ LIST_HEAD(signal_list); ++ ++ /* ++ * We can't call fence_signal while holding the lock as we can end up ++ * in a situation whereby pvr_fence_foreign_signal_sync, which also ++ * takes the list lock, ends up being called as a result of the ++ * fence_signal below, i.e. fence_signal(fence) -> fence->callback() ++ * -> fence_signal(foreign_fence) -> foreign_fence->callback() where ++ * the foreign_fence callback is pvr_fence_foreign_signal_sync. ++ * ++ * So extract the items we intend to signal and add them to their own ++ * queue. ++ */ ++ spin_lock_irqsave(&fctx->list_lock, flags1); ++ list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list, signal_head) { ++ if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ list_move_tail(&pvr_fence->signal_head, &signal_list); ++ } ++ spin_unlock_irqrestore(&fctx->list_lock, flags1); ++ ++ list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) { ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n", ++ pvr_fence->name); ++ trace_pvr_fence_signal_fence(pvr_fence); ++ spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags1); ++ list_del(&pvr_fence->signal_head); ++ spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags1); ++ dma_fence_signal(pvr_fence->fence); ++ dma_fence_put(pvr_fence->fence); ++ } ++ ++ /* ++ * Take this opportunity to free up any fence objects we ++ * have deferred freeing. ++ */ ++ pvr_fence_context_free_deferred(fctx); ++} ++ ++void ++pvr_fence_context_signal_fences_nohw(void *data) ++{ ++ pvr_fence_context_signal_fences(data); ++} ++ ++static void ++pvr_fence_context_destroy_internal(struct pvr_fence_context *fctx) ++{ ++ pvr_fence_context_free_deferred(fctx); ++ ++ if (WARN_ON(!list_empty_careful(&fctx->fence_list))) ++ pvr_fence_context_fences_dump(fctx, NULL, NULL); ++ ++ PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); ++ ++ // wait for all fences to be freed before kmem_cache_destroy() is called ++ rcu_barrier(); ++ ++ /* Destroy pvr_fence object cache, if no one is using it */ ++ WARN_ON(pvr_fence_cache == NULL); ++ mutex_lock(&pvr_fence_cache_mutex); ++ if (--pvr_fence_cache_refcount == 0) ++ kmem_cache_destroy(pvr_fence_cache); ++ mutex_unlock(&pvr_fence_cache_mutex); ++ ++ kfree(fctx); ++} ++ ++static void ++pvr_fence_context_unregister_dbg(void *dbg_request_handle) ++{ ++ PVRSRVUnregisterDeviceDbgRequestNotify(dbg_request_handle); ++} ++ ++static void ++pvr_fence_foreign_context_destroy_work(struct work_struct *data) ++{ ++ struct pvr_fence_context *fctx = ++ container_of(data, struct pvr_fence_context, destroy_work); ++ ++ pvr_fence_context_destroy_internal(fctx); ++} ++ ++static void ++pvr_fence_context_destroy_work(struct work_struct *data) ++{ ++ struct pvr_fence_context *fctx = ++ container_of(data, struct pvr_fence_context, destroy_work); ++ ++ pvr_fence_context_unregister_dbg(fctx->dbg_request_handle); ++ pvr_fence_context_destroy_internal(fctx); ++} ++ ++static void ++pvr_fence_context_debug_request(void *data, u32 verbosity, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data; ++ ++ if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) ++ pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf, ++ pvDumpDebugFile); ++} ++ ++static struct pvr_fence_context * ++pvr_fence_context_create_internal(struct workqueue_struct *fence_status_wq, ++ const char *name, ++ work_func_t destroy_callback) ++{ ++ struct pvr_fence_context *fctx; ++ PVRSRV_ERROR srv_err; ++ ++ fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); ++ if (!fctx) ++ return NULL; ++ ++ spin_lock_init(&fctx->lock); ++ atomic_set(&fctx->fence_seqno, 0); ++ INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status); ++ INIT_WORK(&fctx->destroy_work, destroy_callback); ++ spin_lock_init(&fctx->list_lock); ++ INIT_LIST_HEAD(&fctx->signal_list); ++ INIT_LIST_HEAD(&fctx->fence_list); ++ INIT_LIST_HEAD(&fctx->deferred_free_list); ++ ++ fctx->fence_wq = fence_status_wq; ++ ++ fctx->fence_context = dma_fence_context_alloc(1); ++ strlcpy(fctx->name, name, sizeof(fctx->name)); ++ ++ srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle, ++ pvr_fence_context_signal_fences, ++ fctx); ++ if (srv_err != PVRSRV_OK) { ++ pr_err("%s: failed to register command complete callback (%s)\n", ++ __func__, PVRSRVGetErrorString(srv_err)); ++ goto err_free_fctx; ++ } ++ ++ /* Create pvr_fence object cache, if not already created */ ++ mutex_lock(&pvr_fence_cache_mutex); ++ if (pvr_fence_cache_refcount == 0) { ++ pvr_fence_cache = KMEM_CACHE(pvr_fence, 0); ++ if (!pvr_fence_cache) { ++ pr_err("%s: failed to allocate pvr_fence cache\n", ++ __func__); ++ mutex_unlock(&pvr_fence_cache_mutex); ++ goto err_unregister_cmd_complete_notify; ++ } ++ } ++ pvr_fence_cache_refcount++; ++ mutex_unlock(&pvr_fence_cache_mutex); ++ ++ kref_init(&fctx->kref); ++ ++ PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name); ++ trace_pvr_fence_context_create(fctx); ++ ++ return fctx; ++ ++err_unregister_cmd_complete_notify: ++ PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle); ++err_free_fctx: ++ kfree(fctx); ++ return NULL; ++} ++ ++/** ++ * pvr_fence_context_register_dbg - registers the debug handler for a ++ * fence context ++ * ++ * @dbg_request_handle: handle used to keep a reference for deregister ++ * @dev: device to attach the debug notifier. ++ * @pvr_fence_context: context used as data to the callback for debug ++ * ++ * Registers a debug notifier for a given context for a given device. ++ * ++ * Returns PVRSRV_OK if successful. ++ */ ++PVRSRV_ERROR pvr_fence_context_register_dbg(void *dbg_request_handle, ++ void *dev, ++ struct pvr_fence_context *fctx) ++{ ++ PVRSRV_ERROR srv_err; ++ ++ srv_err = PVRSRVRegisterDeviceDbgRequestNotify(dbg_request_handle, ++ dev, ++ pvr_fence_context_debug_request, ++ DEBUG_REQUEST_LINUXFENCE, ++ fctx); ++ if (srv_err != PVRSRV_OK) { ++ pr_err("%s: failed to register debug request callback (%s)\n", ++ __func__, PVRSRVGetErrorString(srv_err)); ++ } ++ ++ return srv_err; ++} ++ ++/** ++ * pvr_fence_foreign_context_create - creates a PVR fence context ++ * @fence_status_wq: linux workqueue used to signal foreign fences ++ * @name: context name (used for debugging) ++ * ++ * Creates a PVR foreign fence context that can be used to create PVR fences ++ * or to create PVR fences from an existing fence. ++ * ++ * pvr_fence_context_destroy should be called to clean up the fence context. ++ * ++ * Returns NULL if a context cannot be created. ++ */ ++struct pvr_fence_context * ++pvr_fence_foreign_context_create(struct workqueue_struct *fence_status_wq, ++ const char *name) ++{ ++ return pvr_fence_context_create_internal(fence_status_wq, name, ++ pvr_fence_foreign_context_destroy_work); ++} ++ ++/** ++ * pvr_fence_context_create - creates a PVR fence context ++ * @dev_cookie: services device cookie ++ * @fence_status_wq: Status workqueue to queue fence update CBs. ++ * @name: context name (used for debugging) ++ * ++ * Creates a PVR fence context that can be used to create PVR fences or to ++ * create PVR fences from an existing fence. ++ * ++ * pvr_fence_context_destroy should be called to clean up the fence context. ++ * ++ * Returns NULL if a context cannot be created. ++ */ ++struct pvr_fence_context * ++pvr_fence_context_create(void *dev_cookie, ++ struct workqueue_struct *fence_status_wq, ++ const char *name) ++{ ++ struct pvr_fence_context *fctx; ++ PVRSRV_ERROR eError; ++ ++ fctx = pvr_fence_context_create_internal(fence_status_wq, name, ++ pvr_fence_context_destroy_work); ++ if (fctx == NULL) { ++ pr_err("%s: failed to create fence context", __func__); ++ goto err_out; ++ } ++ ++ eError = pvr_fence_context_register_dbg(&fctx->dbg_request_handle, ++ dev_cookie, ++ fctx); ++ if (eError != PVRSRV_OK) { ++ pr_err("%s: failed to register fence context debug (%s)\n", ++ __func__, PVRSRVGetErrorString(eError)); ++ goto err_destroy_ctx; ++ } ++ ++ return fctx; ++ ++err_destroy_ctx: ++ pvr_fence_context_destroy(fctx); ++err_out: ++ return NULL; ++} ++ ++static void pvr_fence_context_destroy_kref(struct kref *kref) ++{ ++ struct pvr_fence_context *fctx = ++ container_of(kref, struct pvr_fence_context, kref); ++ ++ PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name); ++ ++ trace_pvr_fence_context_destroy_kref(fctx); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ if (!gpFenceDestroyWq) { ++ gpFenceDestroyWq = create_freezable_workqueue("pvr_fence_destroy"); ++ if (!gpFenceDestroyWq) { ++ pr_err("%s: Failed to create fence destroy workqueue", __func__); ++ return; ++ } ++ } ++ queue_work(gpFenceDestroyWq, &fctx->destroy_work); ++#else ++ schedule_work(&fctx->destroy_work); ++#endif ++} ++ ++/** ++ * pvr_fence_context_destroy - destroys a context ++ * @fctx: PVR fence context to destroy ++ * ++ * Destroys a PVR fence context with the expectation that all fences have been ++ * destroyed. ++ */ ++void ++pvr_fence_context_destroy(struct pvr_fence_context *fctx) ++{ ++ trace_pvr_fence_context_destroy(fctx); ++ ++ kref_put(&fctx->kref, pvr_fence_context_destroy_kref); ++} ++ ++static const char * ++pvr_fence_get_driver_name(struct dma_fence *fence) ++{ ++ return PVR_LDM_DRIVER_REGISTRATION_NAME; ++} ++ ++static const char * ++pvr_fence_get_timeline_name(struct dma_fence *fence) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ ++ if (pvr_fence) ++ return pvr_fence->fctx->name; ++ return NULL; ++} ++ ++static ++void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ ++ if (!pvr_fence) ++ return; ++ ++ snprintf(str, size, ++ "%llu: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s", ++ (u64) pvr_fence->fence->seqno, ++ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, ++ &pvr_fence->fence->flags) ? "+" : "-", ++ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, ++ &pvr_fence->fence->flags) ? "+" : "-", ++ refcount_read(&pvr_fence->fence->refcount.refcount), ++ SyncCheckpointGetFirmwareAddr( ++ pvr_fence->sync_checkpoint), ++ SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint), ++ SyncCheckpointGetStateString(pvr_fence->sync_checkpoint), ++ pvr_fence->name, ++ (&pvr_fence->base != pvr_fence->fence) ? ++ "(foreign)" : ""); ++} ++ ++static ++void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ ++ if (pvr_fence) ++ pvr_context_value_str(pvr_fence->fctx, str, size); ++} ++ ++static bool ++pvr_fence_enable_signaling(struct dma_fence *fence) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ unsigned long flags; ++ ++ if (!pvr_fence) ++ return false; ++ ++ WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock)); ++ ++ if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ return false; ++ ++ dma_fence_get(&pvr_fence->base); ++ ++ spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags); ++ list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list); ++ spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags); ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n", ++ pvr_fence->name); ++ trace_pvr_fence_enable_signaling(pvr_fence); ++ ++ return true; ++} ++ ++static bool ++pvr_fence_is_signaled(struct dma_fence *fence) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ ++ if (pvr_fence) ++ return pvr_fence_sync_is_signaled(pvr_fence, ++ PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT); ++ return false; ++} ++ ++static void ++pvr_fence_release(struct dma_fence *fence) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ unsigned long flags; ++ ++ if (pvr_fence) { ++ struct pvr_fence_context *fctx = pvr_fence->fctx; ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n", ++ pvr_fence->name); ++ trace_pvr_fence_release(pvr_fence); ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ list_move(&pvr_fence->fence_head, ++ &fctx->deferred_free_list); ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ ++ kref_put(&fctx->kref, pvr_fence_context_destroy_kref); ++ } ++} ++ ++const struct dma_fence_ops pvr_fence_ops = { ++ .get_driver_name = pvr_fence_get_driver_name, ++ .get_timeline_name = pvr_fence_get_timeline_name, ++ .fence_value_str = pvr_fence_fence_value_str, ++ .timeline_value_str = pvr_fence_timeline_value_str, ++ .enable_signaling = pvr_fence_enable_signaling, ++ .signaled = pvr_fence_is_signaled, ++ .wait = dma_fence_default_wait, ++ .release = pvr_fence_release, ++}; ++ ++/** ++ * pvr_fence_create - creates a PVR fence ++ * @fctx: PVR fence context on which the PVR fence should be created ++ * @sync_checkpoint_ctx: context in which to create sync checkpoints ++ * @timeline_fd: timeline on which the PVR fence should be created ++ * @name: PVR fence name (used for debugging) ++ * ++ * Creates a PVR fence. ++ * ++ * Once the fence is finished with, pvr_fence_destroy should be called. ++ * ++ * Returns NULL if a PVR fence cannot be created. ++ */ ++struct pvr_fence * ++pvr_fence_create(struct pvr_fence_context *fctx, ++ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, ++ int timeline_fd, const char *name) ++{ ++ struct pvr_fence *pvr_fence; ++ unsigned int seqno; ++ unsigned long flags; ++ PVRSRV_ERROR srv_err; ++ ++ if (!try_module_get(THIS_MODULE)) ++ goto err_exit; ++ ++ /* Note: As kmem_cache is used to allocate pvr_fence objects, ++ * make sure that all members of pvr_fence struct are initialized ++ * here ++ */ ++ pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); ++ if (unlikely(!pvr_fence)) ++ goto err_module_put; ++ ++ srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, ++ (PVRSRV_TIMELINE) timeline_fd, PVRSRV_NO_FENCE, ++ name, &pvr_fence->sync_checkpoint); ++ if (unlikely(srv_err != PVRSRV_OK)) ++ goto err_free_fence; ++ ++ INIT_LIST_HEAD(&pvr_fence->fence_head); ++ INIT_LIST_HEAD(&pvr_fence->signal_head); ++ pvr_fence->fctx = fctx; ++ seqno = pvr_fence_context_seqno_next(fctx); ++ /* Add the seqno to the fence name for easier debugging */ ++ pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), ++ name, seqno); ++ ++ /* Reset cb to zero */ ++ memset(&pvr_fence->cb, 0, sizeof(pvr_fence->cb)); ++ pvr_fence->fence = &pvr_fence->base; ++ ++ dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock, ++ fctx->fence_context, seqno); ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ ++ kref_get(&fctx->kref); ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name); ++ trace_pvr_fence_create(pvr_fence); ++ ++ return pvr_fence; ++ ++err_free_fence: ++ kmem_cache_free(pvr_fence_cache, pvr_fence); ++err_module_put: ++ module_put(THIS_MODULE); ++err_exit: ++ return NULL; ++} ++ ++static const char * ++pvr_fence_foreign_get_driver_name(struct dma_fence *fence) ++{ ++ return PVR_LDM_DRIVER_REGISTRATION_NAME; ++} ++ ++static const char * ++pvr_fence_foreign_get_timeline_name(struct dma_fence *fence) ++{ ++ return "foreign"; ++} ++ ++static ++void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str, ++ int size) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ u32 sync_addr = 0; ++ u32 sync_value_next; ++ ++ if (WARN_ON(!pvr_fence)) ++ return; ++ ++ sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint); ++ sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ ++ /* ++ * Include the fence flag bits from the foreign fence instead of our ++ * shadow copy. This is done as the shadow fence flag bits aren't used. ++ */ ++ snprintf(str, size, ++ "%llu: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s", ++ (u64) fence->seqno, ++ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, ++ &pvr_fence->fence->flags) ? "+" : "-", ++ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, ++ &pvr_fence->fence->flags) ? "+" : "-", ++ refcount_read(&fence->refcount.refcount), ++ sync_addr, ++ pvr_fence_sync_value(pvr_fence), ++ sync_value_next, ++ pvr_fence->name); ++} ++ ++static ++void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str, ++ int size) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ ++ if (pvr_fence) ++ pvr_context_value_str(pvr_fence->fctx, str, size); ++} ++ ++static bool ++pvr_fence_foreign_enable_signaling(struct dma_fence *fence) ++{ ++ WARN_ON("cannot enable signalling on foreign fence"); ++ return false; ++} ++ ++static signed long ++pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout) ++{ ++ WARN_ON("cannot wait on foreign fence"); ++ return 0; ++} ++ ++static void ++pvr_fence_foreign_release(struct dma_fence *fence) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ unsigned long flags; ++ ++ if (pvr_fence) { ++ struct pvr_fence_context *fctx = pvr_fence->fctx; ++ struct dma_fence *foreign_fence = pvr_fence->fence; ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, ++ "released fence for foreign fence %llu#%d (%s)\n", ++ (u64) pvr_fence->fence->context, ++ pvr_fence->fence->seqno, pvr_fence->name); ++ trace_pvr_fence_foreign_release(pvr_fence); ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ list_move(&pvr_fence->fence_head, ++ &fctx->deferred_free_list); ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ ++ dma_fence_put(foreign_fence); ++ ++ kref_put(&fctx->kref, ++ pvr_fence_context_destroy_kref); ++ } ++} ++ ++const struct dma_fence_ops pvr_fence_foreign_ops = { ++ .get_driver_name = pvr_fence_foreign_get_driver_name, ++ .get_timeline_name = pvr_fence_foreign_get_timeline_name, ++ .fence_value_str = pvr_fence_foreign_fence_value_str, ++ .timeline_value_str = pvr_fence_foreign_timeline_value_str, ++ .enable_signaling = pvr_fence_foreign_enable_signaling, ++ .wait = pvr_fence_foreign_wait, ++ .release = pvr_fence_foreign_release, ++}; ++ ++static void ++pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb) ++{ ++ struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb); ++ struct pvr_fence_context *fctx = pvr_fence->fctx; ++ ++ WARN_ON_ONCE(is_pvr_fence(fence)); ++ ++ /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */ ++ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC); ++ ++ trace_pvr_fence_foreign_signal(pvr_fence); ++ ++ queue_work(fctx->fence_wq, &fctx->check_status_work); ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, ++ "foreign fence %llu#%d signalled (%s)\n", ++ (u64) pvr_fence->fence->context, ++ pvr_fence->fence->seqno, pvr_fence->name); ++ ++ /* Drop the reference on the base fence */ ++ dma_fence_put(&pvr_fence->base); ++} ++ ++/** ++ * pvr_fence_create_from_fence - creates a PVR fence from a fence ++ * @fctx: PVR fence context on which the PVR fence should be created ++ * @sync_checkpoint_ctx: context in which to create sync checkpoints ++ * @fence: fence from which the PVR fence should be created ++ * @fence_fd: fd for the sync file to which the fence belongs. If it doesn't ++ * belong to a sync file then PVRSRV_NO_FENCE should be given ++ * instead. ++ * @name: PVR fence name (used for debugging) ++ * ++ * Creates a PVR fence from an existing fence. If the fence is a foreign fence, ++ * i.e. one that doesn't originate from a PVR fence context, then a new PVR ++ * fence will be created using the specified sync_checkpoint_context. ++ * Otherwise, a reference will be taken on the underlying fence and the PVR ++ * fence will be returned. ++ * ++ * Once the fence is finished with, pvr_fence_destroy should be called. ++ * ++ * Returns NULL if a PVR fence cannot be created. ++ */ ++ ++struct pvr_fence * ++pvr_fence_create_from_fence(struct pvr_fence_context *fctx, ++ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, ++ struct dma_fence *fence, ++ PVRSRV_FENCE fence_fd, ++ const char *name) ++{ ++ struct pvr_fence *pvr_fence = to_pvr_fence(fence); ++ unsigned int seqno; ++ unsigned long flags; ++ PVRSRV_ERROR srv_err; ++ int err; ++ ++ if (pvr_fence) { ++ if (WARN_ON(fence->ops == &pvr_fence_foreign_ops)) ++ return NULL; ++ dma_fence_get(fence); ++ ++ PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n", ++ name); ++ return pvr_fence; ++ } ++ ++ if (!try_module_get(THIS_MODULE)) ++ goto err_exit; ++ ++ /* Note: As kmem_cache is used to allocate pvr_fence objects, ++ * make sure that all members of pvr_fence struct are initialized ++ * here ++ */ ++ pvr_fence = kmem_cache_alloc(pvr_fence_cache, GFP_KERNEL); ++ if (!pvr_fence) ++ goto err_module_put; ++ ++ srv_err = SyncCheckpointAlloc(sync_checkpoint_ctx, ++ SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, ++ fence_fd, ++ name, &pvr_fence->sync_checkpoint); ++ if (srv_err != PVRSRV_OK) ++ goto err_free_pvr_fence; ++ ++ INIT_LIST_HEAD(&pvr_fence->fence_head); ++ INIT_LIST_HEAD(&pvr_fence->signal_head); ++ pvr_fence->fctx = fctx; ++ pvr_fence->fence = dma_fence_get(fence); ++ seqno = pvr_fence_context_seqno_next(fctx); ++ /* Add the seqno to the fence name for easier debugging */ ++ pvr_fence_prepare_name(pvr_fence->name, sizeof(pvr_fence->name), ++ name, seqno); ++ ++ /* ++ * We use the base fence to refcount the PVR fence and to do the ++ * necessary clean up once the refcount drops to 0. ++ */ ++ dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock, ++ fctx->fence_context, seqno); ++ ++ /* ++ * Take an extra reference on the base fence that gets dropped when the ++ * foreign fence is signalled. ++ */ ++ dma_fence_get(&pvr_fence->base); ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ list_add_tail(&pvr_fence->fence_head, &fctx->fence_list); ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ kref_get(&fctx->kref); ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, ++ "created fence from foreign fence %llu#%d (%s)\n", ++ (u64) pvr_fence->fence->context, ++ pvr_fence->fence->seqno, name); ++ ++ err = dma_fence_add_callback(fence, &pvr_fence->cb, ++ pvr_fence_foreign_signal_sync); ++ if (err) { ++ if (err != -ENOENT) { ++ pr_err("%s: failed to add fence callback (err=%d)", ++ __func__, err); ++ goto err_put_ref; ++ } ++ ++ /* ++ * The fence has already signalled so set the sync as signalled. ++ * The "signalled" hwperf packet should be emitted because the ++ * callback won't be called for already signalled fence hence, ++ * PVRSRV_FENCE_FLAG_NONE flag. ++ */ ++ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); ++ PVR_FENCE_TRACE(&pvr_fence->base, ++ "foreign fence %llu#%d already signaled (%s)\n", ++ (u64) pvr_fence->fence->context, ++ pvr_fence->fence->seqno, ++ name); ++ dma_fence_put(&pvr_fence->base); ++ } ++ ++ trace_pvr_fence_foreign_create(pvr_fence); ++ ++ return pvr_fence; ++ ++err_put_ref: ++ kref_put(&fctx->kref, pvr_fence_context_destroy_kref); ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ list_del(&pvr_fence->fence_head); ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ SyncCheckpointFree(pvr_fence->sync_checkpoint); ++err_free_pvr_fence: ++ kmem_cache_free(pvr_fence_cache, pvr_fence); ++err_module_put: ++ module_put(THIS_MODULE); ++err_exit: ++ return NULL; ++} ++ ++/** ++ * pvr_fence_destroy - destroys a PVR fence ++ * @pvr_fence: PVR fence to destroy ++ * ++ * Destroys a PVR fence. Upon return, the PVR fence may still exist if something ++ * else still references the underlying fence, e.g. a reservation object, or if ++ * software signalling has been enabled and the fence hasn't yet been signalled. ++ */ ++void ++pvr_fence_destroy(struct pvr_fence *pvr_fence) ++{ ++ PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n", ++ pvr_fence->name); ++ ++ dma_fence_put(&pvr_fence->base); ++} ++ ++/** ++ * pvr_fence_sw_signal - signals a PVR fence sync ++ * @pvr_fence: PVR fence to signal ++ * ++ * Sets the PVR fence sync value to signalled. ++ * ++ * Returns -EINVAL if the PVR fence represents a foreign fence. ++ */ ++int ++pvr_fence_sw_signal(struct pvr_fence *pvr_fence) ++{ ++ if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) ++ return -EINVAL; ++ ++ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE); ++ ++ queue_work(pvr_fence->fctx->fence_wq, ++ &pvr_fence->fctx->check_status_work); ++ ++ PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n", ++ pvr_fence->name); ++ ++ return 0; ++} ++ ++/** ++ * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence ++ * @pvr_fence: PVR fence to error ++ * ++ * Sets the PVR fence sync checkpoint value to errored. ++ * ++ * Returns -EINVAL if the PVR fence represents a foreign fence. ++ */ ++int ++pvr_fence_sw_error(struct pvr_fence *pvr_fence) ++{ ++ if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base)) ++ return -EINVAL; ++ ++ SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE); ++ PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n", ++ pvr_fence->name); ++ ++ return 0; ++} ++ ++int ++pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, ++ struct SYNC_CHECKPOINT_TAG **fence_checkpoints) ++{ ++ struct SYNC_CHECKPOINT_TAG **next_fence_checkpoint = fence_checkpoints; ++ struct pvr_fence **next_pvr_fence = pvr_fences; ++ int fence_checkpoint_idx; ++ ++ if (nr_fences > 0) { ++ ++ for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences; ++ fence_checkpoint_idx++) { ++ struct pvr_fence *next_fence = *next_pvr_fence++; ++ *next_fence_checkpoint++ = next_fence->sync_checkpoint; ++ /* Take reference on sync checkpoint (will be dropped ++ * later by kick code) ++ */ ++ SyncCheckpointTakeRef(next_fence->sync_checkpoint); ++ } ++ } ++ ++ return 0; ++} ++ ++struct SYNC_CHECKPOINT_TAG * ++pvr_fence_get_checkpoint(struct pvr_fence *update_fence) ++{ ++ return update_fence->sync_checkpoint; ++} ++ ++/** ++ * pvr_fence_dump_info_on_stalled_ufos - displays debug ++ * information on a native fence associated with any of ++ * the ufos provided. This function will be called from ++ * pvr_sync_file.c if the driver determines any GPU work ++ * is stuck waiting for a sync checkpoint representing a ++ * foreign sync to be signalled. ++ * @nr_ufos: number of ufos in vaddrs ++ * @vaddrs: array of FW addresses of UFOs which the ++ * driver is waiting on. ++ * ++ * Output debug information to kernel log on linux fences ++ * which would be responsible for signalling the sync ++ * checkpoints indicated by the ufo vaddresses. ++ * ++ * Returns the number of ufos in the array which were found ++ * to be associated with foreign syncs. ++ */ ++u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, ++ u32 nr_ufos, u32 *vaddrs) ++{ ++ int our_ufo_ct = 0; ++ struct pvr_fence *pvr_fence; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&fctx->list_lock, flags); ++ /* dump info on any ufos in our active list */ ++ list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) { ++ u32 *this_ufo_vaddr = vaddrs; ++ int ufo_num; ++ DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL; ++ ++ for (ufo_num = 0; ufo_num < nr_ufos; ufo_num++) { ++ struct SYNC_CHECKPOINT_TAG *checkpoint = ++ pvr_fence->sync_checkpoint; ++ const u32 fence_ufo_addr = ++ SyncCheckpointGetFirmwareAddr(checkpoint); ++ ++ if (fence_ufo_addr != this_ufo_vaddr[ufo_num]) ++ continue; ++ ++ /* Dump sync info */ ++ PVR_DUMPDEBUG_LOG(pfnDummy, NULL, ++ "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)", ++ SyncCheckpointGetId(checkpoint), ++ fence_ufo_addr, ++ SyncCheckpointGetTimeline(checkpoint), ++ pvr_fence->fence, ++ pvr_fence->name); ++ our_ufo_ct++; ++ } ++ } ++ spin_unlock_irqrestore(&fctx->list_lock, flags); ++ return our_ufo_ct; ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_fence.h b/drivers/gpu/drm/img-rogue/pvr_fence.h +new file mode 100644 +index 000000000000..48ce32af0b5c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_fence.h +@@ -0,0 +1,248 @@ ++/* ++ * @File ++ * @Title PowerVR Linux fence interface ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_FENCE_H__) ++#define __PVR_FENCE_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) ++static inline void pvr_fence_cleanup(void) ++{ ++} ++#else ++#include "services_kernel_client.h" ++#include "pvr_linux_fence.h" ++#include ++#include ++#include ++ ++struct SYNC_CHECKPOINT_CONTEXT_TAG; ++struct SYNC_CHECKPOINT_TAG; ++ ++/** ++ * pvr_fence_context - PVR fence context used to create and manage PVR fences ++ * @lock: protects the context and fences created on the context ++ * @name: fence context name (used for debugging) ++ * @dbg_request_handle: handle for callback used to dump debug data ++ * @fence_context: fence context with which to associate fences ++ * @fence_seqno: sequence number to use for the next fence ++ * @fence_wq: work queue for signalled fence work ++ * @check_status_work: work item used to inform services when a foreign fence ++ * has signalled ++ * @cmd_complete_handle: handle for callback used to signal fences when fence ++ * syncs are met ++ * @list_lock: protects the active and active foreign lists ++ * @signal_list: list of fences waiting to be signalled ++ * @fence_list: list of fences (used for debugging) ++ * @deferred_free_list: list of fences that we will free when we are no longer ++ * holding spinlocks. The frees get implemented when an update fence is ++ * signalled or the context is freed. ++ */ ++struct pvr_fence_context { ++ spinlock_t lock; ++ char name[32]; ++ void *dbg_request_handle; ++ u64 fence_context; ++ atomic_t fence_seqno; ++ ++ struct workqueue_struct *fence_wq; ++ struct work_struct check_status_work; ++ ++ void *cmd_complete_handle; ++ ++ spinlock_t list_lock; ++ struct list_head signal_list; ++ struct list_head fence_list; ++ struct list_head deferred_free_list; ++ ++ struct kref kref; ++ struct work_struct destroy_work; ++}; ++ ++/** ++ * pvr_fence - PVR fence that represents both native and foreign fences ++ * @base: fence structure ++ * @fctx: fence context on which this fence was created ++ * @name: fence name (used for debugging) ++ * @fence: pointer to base fence structure or foreign fence ++ * @sync_checkpoint: services sync checkpoint used by hardware ++ * @fence_head: entry on the context fence and deferred free list ++ * @signal_head: entry on the context signal list ++ * @cb: foreign fence callback to set the sync to signalled ++ */ ++struct pvr_fence { ++ struct dma_fence base; ++ struct pvr_fence_context *fctx; ++ char name[32]; ++ ++ struct dma_fence *fence; ++ struct SYNC_CHECKPOINT_TAG *sync_checkpoint; ++ ++ struct list_head fence_head; ++ struct list_head signal_head; ++ struct dma_fence_cb cb; ++ struct rcu_head rcu; ++}; ++ ++extern const struct dma_fence_ops pvr_fence_ops; ++extern const struct dma_fence_ops pvr_fence_foreign_ops; ++ ++static inline bool is_our_fence(struct pvr_fence_context *fctx, ++ struct dma_fence *fence) ++{ ++ return (fence->context == fctx->fence_context); ++} ++ ++static inline bool is_pvr_fence(struct dma_fence *fence) ++{ ++ return ((fence->ops == &pvr_fence_ops) || ++ (fence->ops == &pvr_fence_foreign_ops)); ++} ++ ++static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence) ++{ ++ if (is_pvr_fence(fence)) ++ return container_of(fence, struct pvr_fence, base); ++ ++ return NULL; ++} ++ ++PVRSRV_ERROR pvr_fence_context_register_dbg(void *dbg_request_handle, ++ void *dev, ++ struct pvr_fence_context *fctx); ++struct pvr_fence_context * ++pvr_fence_foreign_context_create(struct workqueue_struct *fence_status_wq, ++ const char *name); ++struct pvr_fence_context * ++pvr_fence_context_create(void *dev_cookie, ++ struct workqueue_struct *fence_status_wq, ++ const char *name); ++void pvr_fence_context_destroy(struct pvr_fence_context *fctx); ++void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size); ++ ++struct pvr_fence * ++pvr_fence_create(struct pvr_fence_context *fctx, ++ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, ++ int timeline_fd, const char *name); ++struct pvr_fence * ++pvr_fence_create_from_fence(struct pvr_fence_context *fctx, ++ struct SYNC_CHECKPOINT_CONTEXT_TAG *sync_checkpoint_ctx, ++ struct dma_fence *fence, ++ PVRSRV_FENCE fence_fd, ++ const char *name); ++void pvr_fence_destroy(struct pvr_fence *pvr_fence); ++int pvr_fence_sw_signal(struct pvr_fence *pvr_fence); ++int pvr_fence_sw_error(struct pvr_fence *pvr_fence); ++ ++int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences, ++ struct SYNC_CHECKPOINT_TAG **fence_checkpoints); ++struct SYNC_CHECKPOINT_TAG * ++pvr_fence_get_checkpoint(struct pvr_fence *update_fence); ++ ++void pvr_fence_context_signal_fences_nohw(void *data); ++ ++void pvr_fence_context_free_deferred_callback(void *data); ++ ++u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx, ++ u32 nr_ufos, ++ u32 *vaddrs); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++extern struct workqueue_struct *gpFenceDestroyWq; ++#endif ++ ++static inline void pvr_fence_cleanup(void) ++{ ++ /* ++ * Ensure all PVR fence contexts have been destroyed, by flushing ++ * the global workqueue. ++ */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) ++ if (gpFenceDestroyWq) ++ flush_workqueue(gpFenceDestroyWq); ++#else ++ flush_scheduled_work(); ++#endif ++} ++ ++#if defined(PVR_FENCE_DEBUG) ++#define PVR_FENCE_CTX_TRACE(c, fmt, ...) \ ++ do { \ ++ struct pvr_fence_context *__fctx = (c); \ ++ pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ ++ ## __VA_ARGS__); \ ++ } while (0) ++#else ++#define PVR_FENCE_CTX_TRACE(c, fmt, ...) ++#endif ++ ++#define PVR_FENCE_CTX_WARN(c, fmt, ...) \ ++ do { \ ++ struct pvr_fence_context *__fctx = (c); \ ++ pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ ++ ## __VA_ARGS__); \ ++ } while (0) ++ ++#define PVR_FENCE_CTX_ERR(c, fmt, ...) \ ++ do { \ ++ struct pvr_fence_context *__fctx = (c); \ ++ pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \ ++ ## __VA_ARGS__); \ ++ } while (0) ++ ++#if defined(PVR_FENCE_DEBUG) ++#define PVR_FENCE_TRACE(f, fmt, ...) \ ++ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) ++#else ++#define PVR_FENCE_TRACE(f, fmt, ...) ++#endif ++ ++#define PVR_FENCE_WARN(f, fmt, ...) \ ++ DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__) ++ ++#define PVR_FENCE_ERR(f, fmt, ...) \ ++ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__) ++ ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */ ++#endif /* !defined(__PVR_FENCE_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_fence_trace.h b/drivers/gpu/drm/img-rogue/pvr_fence_trace.h +new file mode 100644 +index 000000000000..e2f044caad1c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_fence_trace.h +@@ -0,0 +1,225 @@ ++/* ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM pvr_fence ++ ++#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_PVR_FENCE_H ++ ++#include ++ ++struct pvr_fence; ++struct pvr_fence_context; ++ ++DECLARE_EVENT_CLASS(pvr_fence_context, ++ ++ TP_PROTO(struct pvr_fence_context *fctx), ++ TP_ARGS(fctx), ++ ++ TP_STRUCT__entry( ++ __string(name, fctx->name) ++ __array(char, val, 128) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(name, fctx->name) ++ pvr_context_value_str(fctx, __entry->val, ++ sizeof(__entry->val)); ++ ), ++ ++ TP_printk("name=%s val=%s", ++ __get_str(name), ++ __entry->val ++ ) ++); ++ ++DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create, ++ TP_PROTO(struct pvr_fence_context *fctx), ++ TP_ARGS(fctx) ++); ++ ++DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy, ++ TP_PROTO(struct pvr_fence_context *fctx), ++ TP_ARGS(fctx) ++); ++ ++DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref, ++ TP_PROTO(struct pvr_fence_context *fctx), ++ TP_ARGS(fctx) ++); ++ ++DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences, ++ TP_PROTO(struct pvr_fence_context *fctx), ++ TP_ARGS(fctx) ++); ++ ++DECLARE_EVENT_CLASS(pvr_fence, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence), ++ ++ TP_STRUCT__entry( ++ __string(driver, ++ fence->base.ops->get_driver_name(&fence->base)) ++ __string(timeline, ++ fence->base.ops->get_timeline_name(&fence->base)) ++ __array(char, val, 128) ++ __field(u64, context) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(driver, ++ fence->base.ops->get_driver_name(&fence->base)) ++ __assign_str(timeline, ++ fence->base.ops->get_timeline_name(&fence->base)) ++ fence->base.ops->fence_value_str(&fence->base, ++ __entry->val, sizeof(__entry->val)); ++ __entry->context = fence->base.context; ++ ), ++ ++ TP_printk("driver=%s timeline=%s ctx=%llu val=%s", ++ __get_str(driver), __get_str(timeline), ++ __entry->context, __entry->val ++ ) ++); ++ ++DEFINE_EVENT(pvr_fence, pvr_fence_create, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++DEFINE_EVENT(pvr_fence, pvr_fence_release, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++DECLARE_EVENT_CLASS(pvr_fence_foreign, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence), ++ ++ TP_STRUCT__entry( ++ __string(driver, ++ fence->base.ops->get_driver_name(&fence->base)) ++ __string(timeline, ++ fence->base.ops->get_timeline_name(&fence->base)) ++ __array(char, val, 128) ++ __field(u64, context) ++ __string(foreign_driver, ++ fence->fence->ops->get_driver_name ? ++ fence->fence->ops->get_driver_name(fence->fence) : ++ "unknown") ++ __string(foreign_timeline, ++ fence->fence->ops->get_timeline_name ? ++ fence->fence->ops->get_timeline_name(fence->fence) : ++ "unknown") ++ __array(char, foreign_val, 128) ++ __field(u64, foreign_context) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(driver, ++ fence->base.ops->get_driver_name(&fence->base)) ++ __assign_str(timeline, ++ fence->base.ops->get_timeline_name(&fence->base)) ++ fence->base.ops->fence_value_str(&fence->base, __entry->val, ++ sizeof(__entry->val)); ++ __entry->context = fence->base.context; ++ __assign_str(foreign_driver, ++ fence->fence->ops->get_driver_name ? ++ fence->fence->ops->get_driver_name(fence->fence) : ++ "unknown") ++ __assign_str(foreign_timeline, ++ fence->fence->ops->get_timeline_name ? ++ fence->fence->ops->get_timeline_name(fence->fence) : ++ "unknown") ++ fence->fence->ops->fence_value_str ? ++ fence->fence->ops->fence_value_str( ++ fence->fence, __entry->foreign_val, ++ sizeof(__entry->foreign_val)) : ++ (void) strlcpy(__entry->foreign_val, ++ "unknown", sizeof(__entry->foreign_val)); ++ __entry->foreign_context = fence->fence->context; ++ ), ++ ++ TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s", ++ __get_str(driver), __get_str(timeline), __entry->context, ++ __entry->val, __get_str(foreign_driver), ++ __get_str(foreign_timeline), __entry->foreign_context, ++ __entry->foreign_val ++ ) ++); ++ ++DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal, ++ TP_PROTO(struct pvr_fence *fence), ++ TP_ARGS(fence) ++); ++ ++#endif /* _TRACE_PVR_FENCE_H */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_PATH . ++ ++/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ ++#define TRACE_INCLUDE_FILE pvr_fence_trace ++ ++/* This part must be outside protection */ ++#include +diff --git a/drivers/gpu/drm/img-rogue/pvr_gputrace.c b/drivers/gpu/drm/img-rogue/pvr_gputrace.c +new file mode 100644 +index 000000000000..3e65aa3de4a7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_gputrace.c +@@ -0,0 +1,1281 @@ ++/*************************************************************************/ /*! ++@File pvr_gputrace.c ++@Title PVR GPU Trace module Linux implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) ++#include ++#else ++#include ++#endif ++ ++#include "pvrsrv_error.h" ++#include "pvrsrv_apphint.h" ++#include "pvr_debug.h" ++#include "ospvr_gputrace.h" ++#include "rgxhwperf.h" ++#include "rgxtimecorr.h" ++#include "device.h" ++#include "trace_events.h" ++#include "pvrsrv.h" ++#include "pvrsrv_tlstreams.h" ++#include "tlclient.h" ++#include "pvr_debug.h" ++#define CREATE_TRACE_POINTS ++#include "rogue_trace_events.h" ++ ++/****************************************************************************** ++ Module internal implementation ++******************************************************************************/ ++ ++typedef enum { ++ PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0, ++ ++ PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1, ++ PVR_GPUTRACE_SWITCH_TYPE_END = 2, ++ PVR_GPUTRACE_SWITCH_TYPE_SINGLE = 3 ++} PVR_GPUTRACE_SWITCH_TYPE; ++ ++typedef struct RGX_HWPERF_FTRACE_DATA { ++ /* This lock ensures the HWPerf TL stream reading resources are not destroyed ++ * by one thread disabling it while another is reading from it. Keeps the ++ * state and resource create/destroy atomic and consistent. */ ++ POS_LOCK hFTraceResourceLock; ++ ++ IMG_HANDLE hGPUTraceCmdCompleteHandle; ++ IMG_HANDLE hGPUTraceTLStream; ++ IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp; ++ IMG_UINT32 ui32FTraceLastOrdinal; ++} RGX_HWPERF_FTRACE_DATA; ++ ++/* This lock ensures state change of GPU_TRACING on/off is done atomically */ ++static POS_LOCK ghGPUTraceStateLock; ++static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU; ++ ++/* This lock ensures that the reference counting operation on the FTrace UFO ++ * events and enable/disable operation on firmware event are performed as ++ * one atomic operation. This should ensure that there are no race conditions ++ * between reference counting and firmware event state change. ++ * See below comment for guiUfoEventRef. ++ */ ++static POS_LOCK ghLockFTraceEventLock; ++ ++/* Multiple FTrace UFO events are reflected in the firmware as only one event. When ++ * we enable FTrace UFO event we want to also at the same time enable it in ++ * the firmware. Since there is a multiple-to-one relation between those events ++ * we count how many FTrace UFO events is enabled. If at least one event is ++ * enabled we enabled the firmware event. When all FTrace UFO events are disabled ++ * we disable firmware event. */ ++static IMG_UINT guiUfoEventRef; ++ ++/****************************************************************************** ++ Module In-bound API ++******************************************************************************/ ++ ++static PVRSRV_ERROR _GpuTraceDisable( ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_BOOL bDeInit); ++ ++static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE); ++ ++PVRSRV_ERROR PVRGpuTraceSupportInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (ghLockFTraceEventLock != NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized")); ++ return PVRSRV_OK; ++ } ++ ++ /* common module params initialization */ ++ eError = OSLockCreate(&ghLockFTraceEventLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); ++ ++ eError = OSLockCreate(&ghGPUTraceStateLock); ++ PVR_LOG_RETURN_IF_ERROR (eError, "OSLockCreate"); ++ ++ return PVRSRV_OK; ++} ++ ++void PVRGpuTraceSupportDeInit(void) ++{ ++ if (ghGPUTraceStateLock) ++ { ++ OSLockDestroy(ghGPUTraceStateLock); ++ } ++ ++ if (ghLockFTraceEventLock) ++ { ++ OSLockDestroy(ghLockFTraceEventLock); ++ ghLockFTraceEventLock = NULL; ++ } ++} ++ ++PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ RGX_HWPERF_FTRACE_DATA *psData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA)); ++ psDevInfo->pvGpuFtraceData = psData; ++ PVR_LOG_GOTO_IF_NOMEM(psData, eError, e0); ++ ++ /* We initialise it only once because we want to track if any ++ * packets were dropped. */ ++ psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1; ++ ++ eError = OSLockCreate(&psData->hFTraceResourceLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); ++ ++ return PVRSRV_OK; ++ ++e0: ++ PVRGpuTraceDeInitDevice(psDeviceNode); ++ return eError; ++} ++ ++void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData; ++ ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ if (psData) ++ { ++ /* first disable the tracing, to free up TL resources */ ++ if (psData->hFTraceResourceLock) ++ { ++ OSLockAcquire(psData->hFTraceResourceLock); ++ _GpuTraceDisable(psDeviceNode->pvDevice, IMG_TRUE); ++ OSLockRelease(psData->hFTraceResourceLock); ++ ++ /* now free all the FTrace resources */ ++ OSLockDestroy(psData->hFTraceResourceLock); ++ } ++ OSFreeMem(psData); ++ psDevInfo->pvGpuFtraceData = NULL; ++ } ++} ++ ++IMG_BOOL PVRGpuTraceIsEnabled(void) ++{ ++ return gbFTraceGPUEventsEnabled; ++} ++ ++void PVRGpuTraceInitIfEnabled(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ if (PVRGpuTraceIsEnabled()) ++ { ++ PVRSRV_ERROR eError = PVRGpuTraceSetEnabled(psDeviceNode, IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing" ++ " (%s)", PVRSRVGetErrorString(eError))); ++ } ++ ++ /* below functions will enable FTrace events which in turn will ++ * execute HWPerf callbacks that set appropriate filter values ++ * note: unfortunately the functions don't allow to pass private ++ * data so they enable events for all of the devices ++ * at once, which means that this can happen more than once ++ * if there is more than one device */ ++ ++ /* single events can be enabled by calling trace_set_clr_event() ++ * with the event name, e.g.: ++ * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */ ++#if defined(CONFIG_EVENT_TRACING) /* this is a kernel config option */ ++#if defined(ANDROID) || defined(CHROMIUMOS_KERNEL) ++ if (trace_set_clr_event("gpu", NULL, 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event" ++ " group")); ++ } ++ else ++ { ++ PVR_LOG(("FTrace events from \"gpu\" group enabled")); ++ } ++#endif /* defined(ANDROID) || defined(CHROMIUMOS_KERNEL) */ ++ if (trace_set_clr_event("rogue", NULL, 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event" ++ " group")); ++ } ++ else ++ { ++ PVR_LOG(("FTrace events from \"rogue\" group enabled")); ++ } ++#endif /* defined(CONFIG_EVENT_TRACING) */ ++ } ++} ++ ++/* Caller must now hold hFTraceResourceLock before calling this method. ++ */ ++static PVRSRV_ERROR _GpuTraceEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGX_HWPERF_FTRACE_DATA *psFtraceData; ++ PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; ++ IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psRgxDevInfo); ++ ++ psFtraceData = psRgxDevInfo->pvGpuFtraceData; ++ ++ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); ++ ++ /* return if already enabled */ ++ if (psFtraceData->hGPUTraceTLStream) ++ { ++ return PVRSRV_OK; ++ } ++ ++#if defined(SUPPORT_RGX) ++ /* Signal FW to enable event generation */ ++ if (psRgxDevInfo->bFirmwareInitialised) ++ { ++ IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter & ++ (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO); ++ ++ /* Do not call into PVRSRVRGXCtrlHWPerfKM if we're in GUEST mode. */ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, ++ RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, ++ RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | ++ ui64UFOFilter); ++ } ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out); ++ } ++ else ++#endif ++ { ++ /* only set filter and exit */ ++ psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH | ++ ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) & ++ psRgxDevInfo->ui64HWPerfFilter); ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", ++ psRgxDevInfo->ui64HWPerfFilter)); ++ ++ return PVRSRV_OK; ++ } ++ ++ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ ++ if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", ++ PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32OsDeviceID) < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to form HWPerf stream name for device %d", ++ __func__, ++ psRgxDevNode->sDevId.i32OsDeviceID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Open the TL Stream for HWPerf data consumption */ ++ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, ++ pszHWPerfStreamName, ++ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, ++ &psFtraceData->hGPUTraceTLStream); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLClientOpenStream", err_out); ++ ++#if defined(SUPPORT_RGX) ++ if (RGXTimeCorrGetClockSource(psRgxDevNode) != RGXTIMECORR_CLOCK_SCHED) ++ { ++ /* Set clock source for timer correlation data to sched_clock */ ++ psRgxDevInfo->ui32LastClockSource = RGXTimeCorrGetClockSource(psRgxDevNode); ++ RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED); ++ } ++#endif ++ ++ /* Reset the OS timestamp coming from the timer correlation data ++ * associated with the latest HWPerf event we processed. ++ */ ++ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0; ++ ++ /* Register a notifier to collect HWPerf data whenever the HW completes ++ * an operation. ++ */ ++ eError = PVRSRVRegisterCmdCompleteNotify( ++ &psFtraceData->hGPUTraceCmdCompleteHandle, ++ &_GpuTraceCmdCompleteNotify, ++ psRgxDevInfo); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream); ++ ++err_out: ++ PVR_DPF_RETURN_RC(eError); ++ ++err_close_stream: ++ TLClientCloseStream(DIRECT_BRIDGE_HANDLE, ++ psFtraceData->hGPUTraceTLStream); ++ psFtraceData->hGPUTraceTLStream = NULL; ++ goto err_out; ++} ++ ++/* Caller must now hold hFTraceResourceLock before calling this method. ++ */ ++static PVRSRV_ERROR _GpuTraceDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGX_HWPERF_FTRACE_DATA *psFtraceData; ++#if defined(SUPPORT_RGX) ++ PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode; ++#endif ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psRgxDevInfo); ++ ++ psFtraceData = psRgxDevInfo->pvGpuFtraceData; ++ ++ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock)); ++ ++ /* if FW is not yet initialised, just set filter and exit */ ++ if (!psRgxDevInfo->bFirmwareInitialised) ++ { ++ psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE; ++#if !defined(NO_HARDWARE) ++ PVR_DPF((PVR_DBG_WARNING, ++ "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", ++ psRgxDevInfo->ui64HWPerfFilter)); ++#endif ++ return PVRSRV_OK; ++ } ++ ++ if (NULL == psFtraceData->hGPUTraceTLStream) ++ { ++ /* Tracing already disabled, just return */ ++ return PVRSRV_OK; ++ } ++ ++#if defined(SUPPORT_RGX) ++ if (!bDeInit) ++ { ++ /* Do not call into PVRSRVRGXCtrlHWPerfKM if we are in GUEST mode. */ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode, ++ RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE, ++ (RGX_HWPERF_EVENT_MASK_NONE)); ++ } ++ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); ++ } ++#endif ++ ++ if (psFtraceData->hGPUTraceCmdCompleteHandle) ++ { ++ /* Tracing is being turned off. Unregister the notifier. */ ++ eError = PVRSRVUnregisterCmdCompleteNotify( ++ psFtraceData->hGPUTraceCmdCompleteHandle); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify"); ++ psFtraceData->hGPUTraceCmdCompleteHandle = NULL; ++ } ++ ++ if (psFtraceData->hGPUTraceTLStream) ++ { ++ IMG_PBYTE pbTmp = NULL; ++ IMG_UINT32 ui32Tmp = 0; ++ ++ /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there ++ * are some events left unprocessed in this FTrace/systrace "session" ++ * (note that even if we have just disabled HWPerf on the FW some packets ++ * could have been generated and already copied to L2 by the MISR handler). ++ * ++ * With the following calls we will both copy new data to the Host buffer ++ * (done by the producer callback in TLClientAcquireData) and advance ++ * the read offset in the buffer to catch up with the latest events. ++ */ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, ++ psFtraceData->hGPUTraceTLStream, ++ &pbTmp, &ui32Tmp); ++ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); ++ ++ /* Let close stream perform the release data on the outstanding acquired data */ ++ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, ++ psFtraceData->hGPUTraceTLStream); ++ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); ++ ++ psFtraceData->hGPUTraceTLStream = NULL; ++ } ++ ++#if defined(SUPPORT_RGX) ++ if (psRgxDevInfo->ui32LastClockSource != RGXTIMECORR_CLOCK_SCHED) ++ { ++ RGXTimeCorrSetClockSource(psRgxDevNode, psRgxDevInfo->ui32LastClockSource); ++ } ++#endif ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++static PVRSRV_ERROR _GpuTraceSetEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_BOOL bNewValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGX_HWPERF_FTRACE_DATA *psFtraceData; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psRgxDevInfo); ++ psFtraceData = psRgxDevInfo->pvGpuFtraceData; ++ ++ /* About to create/destroy FTrace resources, lock critical section ++ * to avoid HWPerf MISR thread contention. ++ */ ++ OSLockAcquire(psFtraceData->hFTraceResourceLock); ++ ++ eError = (bNewValue ? _GpuTraceEnable(psRgxDevInfo) ++ : _GpuTraceDisable(psRgxDevInfo, IMG_FALSE)); ++ ++ OSLockRelease(psFtraceData->hFTraceResourceLock); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++static PVRSRV_ERROR _GpuTraceSetEnabledForAllDevices(IMG_BOOL bNewValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++ /* enable/disable GPU trace on all devices */ ++ while (psDeviceNode) ++ { ++ eError = _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); ++ if (eError != PVRSRV_OK) ++ { ++ break; ++ } ++ psDeviceNode = psDeviceNode->psNext; ++ } ++ ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++PVRSRV_ERROR PVRGpuTraceSetEnabled(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bNewValue) ++{ ++ return _GpuTraceSetEnabled(psDeviceNode->pvDevice, bNewValue); ++} ++ ++/* ----- HWPerf to FTrace packet processing and events injection ------------ */ ++ ++static const IMG_CHAR *_HWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType) ++{ ++ static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = { ++#if defined(RGX_FEATURE_HWPERF_VOLCANIC) ++ "TA3D", "CDM", "RS", "SHG", "TQTDM", "SYNC", "TA", "3D", "LAST" ++#else ++ "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "TA", "3D", "LAST" ++#endif ++ }; ++ ++ /* cast in case of negative value */ ++ if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST) ++ { ++ return ""; ++ } ++ ++ return aszKickType[eKickType]; ++} ++ ++void PVRGpuTraceEnqueueEvent( ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32FirmwareCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE eKickType) ++{ ++ const IMG_CHAR *pszKickType = _HWPerfKickTypeToStr(eKickType); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "PVRGpuTraceEnqueueEvent(%s): contextId %u, " ++ "jobId %u", pszKickType, ui32FirmwareCtx, ui32IntJobRef)); ++ ++ if (PVRGpuTraceIsEnabled()) ++ { ++ trace_rogue_job_enqueue(ui32FirmwareCtx, ui32IntJobRef, ui32ExtJobRef, ++ pszKickType); ++ } ++} ++ ++static void _GpuTraceWorkSwitch( ++ IMG_UINT64 ui64HWTimestampInOSTime, ++ IMG_UINT32 ui32CtxId, ++ IMG_UINT32 ui32CtxPriority, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ const IMG_CHAR* pszWorkType, ++ PVR_GPUTRACE_SWITCH_TYPE eSwType) ++{ ++ PVR_ASSERT(pszWorkType); ++ trace_rogue_sched_switch(pszWorkType, eSwType, ui64HWTimestampInOSTime, ++ ui32CtxId, 2-ui32CtxPriority, ui32IntJobRef, ui32ExtJobRef); ++} ++ ++static void _GpuTraceUfo( ++ IMG_UINT64 ui64OSTimestamp, ++ const RGX_HWPERF_UFO_EV eEvType, ++ const IMG_UINT32 ui32CtxId, ++ const IMG_UINT32 ui32ExtJobRef, ++ const IMG_UINT32 ui32IntJobRef, ++ const IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++ switch (eEvType) { ++ case RGX_HWPERF_UFO_EV_UPDATE: ++ trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId, ++ ui32ExtJobRef, ui32IntJobRef, ui32UFOCount, puData); ++ break; ++ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: ++ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId, ++ ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, ++ puData); ++ break; ++ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: ++ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId, ++ ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, ++ puData); ++ break; ++ case RGX_HWPERF_UFO_EV_CHECK_FAIL: ++ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId, ++ ui32ExtJobRef, ui32IntJobRef, IMG_FALSE, ui32UFOCount, ++ puData); ++ break; ++ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: ++ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId, ++ ui32ExtJobRef, ui32IntJobRef, IMG_TRUE, ui32UFOCount, ++ puData); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void _GpuTraceFirmware( ++ IMG_UINT64 ui64HWTimestampInOSTime, ++ const IMG_CHAR* pszWorkType, ++ PVR_GPUTRACE_SWITCH_TYPE eSwType) ++{ ++ trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType); ++} ++ ++static void _GpuTraceEventsLost( ++ const RGX_HWPERF_STREAM_ID eStreamId, ++ const IMG_UINT32 ui32LastOrdinal, ++ const IMG_UINT32 ui32CurrOrdinal) ++{ ++ trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal); ++} ++ ++/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */ ++static uint64_t CalculateEventTimestamp( ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ uint32_t ui32TimeCorrIndex, ++ uint64_t ui64EventTimestamp) ++{ ++ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; ++ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex]; ++ uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp; ++ uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp; ++ uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs; ++ uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns; ++ ++ if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp) ++ { ++ /* The previous packet had a time reference (time correlation data) more ++ * recent than the one in the current packet, it means the timer ++ * correlation array wrapped too quickly (buffer too small) and in the ++ * previous call to _GpuTraceUfoEvent we read one of the ++ * newest timer correlations rather than one of the oldest ones. ++ */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be " ++ "wrong! The time correlation array size should be increased " ++ "to avoid this.", __func__)); ++ } ++ ++ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp; ++ ++ /* RGX CR timer ticks delta */ ++ deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp; ++ /* RGX time delta in nanoseconds */ ++ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); ++ /* Calculate OS time of HWPerf event */ ++ ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns; ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u", ++ __func__, ui64CRTimeStamp, ui64OSTimeStamp, ++ psTimeCorr->ui32CoreClockSpeed)); ++ ++ return ui64EventOSTimestamp; ++} ++ ++static void _GpuTraceSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, ++ PVR_GPUTRACE_SWITCH_TYPE eSwType) ++{ ++ IMG_UINT64 ui64Timestamp; ++ RGX_HWPERF_HW_DATA* psHWPerfPktData; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psHWPerfPkt); ++ PVR_ASSERT(pszWorkName); ++ ++ psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); ++ ++ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, ++ psHWPerfPkt->ui64Timestamp); ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d", ++ pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType)); ++ ++ _GpuTraceWorkSwitch(ui64Timestamp, ++ psHWPerfPktData->ui32DMContext, ++ psHWPerfPktData->ui32CtxPriority, ++ psHWPerfPktData->ui32ExtJobRef, ++ psHWPerfPktData->ui32IntJobRef, ++ pszWorkName, ++ eSwType); ++ ++ PVR_DPF_RETURN; ++} ++ ++static void _GpuTraceUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) ++{ ++ IMG_UINT64 ui64Timestamp; ++ RGX_HWPERF_UFO_DATA *psHWPerfPktData; ++ IMG_UINT32 ui32UFOCount; ++ RGX_HWPERF_UFO_DATA_ELEMENT *puData; ++ ++ psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); ++ ++ ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo); ++ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) IMG_OFFSET_ADDR(psHWPerfPktData, RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo)); ++ ++ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, ++ psHWPerfPkt->ui64Timestamp); ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceUfoEvent: ui32ExtJobRef=%d, " ++ "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef, ++ psHWPerfPktData->ui32IntJobRef)); ++ ++ _GpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType, ++ psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32ExtJobRef, ++ psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData); ++} ++ ++static void _GpuTraceFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName, ++ PVR_GPUTRACE_SWITCH_TYPE eSwType) ++ ++{ ++ uint64_t ui64Timestamp; ++ RGX_HWPERF_FW_DATA *psHWPerfPktData = RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt); ++ ++ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex, ++ psHWPerfPkt->ui64Timestamp); ++ ++ _GpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType); ++} ++ ++static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt) ++{ ++ RGX_HWPERF_EVENT_TYPE eType; ++ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData; ++ IMG_UINT32 ui32HwEventTypeIndex; ++ static const struct { ++ IMG_CHAR* pszName; ++ PVR_GPUTRACE_SWITCH_TYPE eSwType; ++ } aszHwEventTypeMap[] = { ++#define _T(T) PVR_GPUTRACE_SWITCH_TYPE_##T ++ { "BG", _T(BEGIN) }, /* RGX_HWPERF_FW_BGSTART */ ++ { "BG", _T(END) }, /* RGX_HWPERF_FW_BGEND */ ++ { "IRQ", _T(BEGIN) }, /* RGX_HWPERF_FW_IRQSTART */ ++ { "IRQ", _T(END) }, /* RGX_HWPERF_FW_IRQEND */ ++ { "DBG", _T(BEGIN) }, /* RGX_HWPERF_FW_DBGSTART */ ++ { "DBG", _T(END) }, /* RGX_HWPERF_FW_DBGEND */ ++ { "PMOOM_TAPAUSE", _T(END) }, /* RGX_HWPERF_HW_PMOOM_TAPAUSE */ ++ { "TA", _T(BEGIN) }, /* RGX_HWPERF_HW_TAKICK */ ++ { "TA", _T(END) }, /* RGX_HWPERF_HW_TAFINISHED */ ++ { "TQ3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DTQKICK */ ++ { "3D", _T(BEGIN) }, /* RGX_HWPERF_HW_3DKICK */ ++ { "3D", _T(END) }, /* RGX_HWPERF_HW_3DFINISHED */ ++ { "CDM", _T(BEGIN) }, /* RGX_HWPERF_HW_CDMKICK */ ++ { "CDM", _T(END) }, /* RGX_HWPERF_HW_CDMFINISHED */ ++ { "TQ2D", _T(BEGIN) }, /* RGX_HWPERF_HW_TLAKICK */ ++ { "TQ2D", _T(END) }, /* RGX_HWPERF_HW_TLAFINISHED */ ++ { "3DSPM", _T(BEGIN) }, /* RGX_HWPERF_HW_3DSPMKICK */ ++ { NULL, 0 }, /* RGX_HWPERF_HW_PERIODIC (unsupported) */ ++ { "RTU", _T(BEGIN) }, /* RGX_HWPERF_HW_RTUKICK */ ++ { "RTU", _T(END) }, /* RGX_HWPERF_HW_RTUFINISHED */ ++ { "SHG", _T(BEGIN) }, /* RGX_HWPERF_HW_SHGKICK */ ++ { "SHG", _T(END) }, /* RGX_HWPERF_HW_SHGFINISHED */ ++ { "TQ3D", _T(END) }, /* RGX_HWPERF_HW_3DTQFINISHED */ ++ { "3DSPM", _T(END) }, /* RGX_HWPERF_HW_3DSPMFINISHED */ ++ { "PMOOM_TARESUME", _T(BEGIN) }, /* RGX_HWPERF_HW_PMOOM_TARESUME */ ++ { "TDM", _T(BEGIN) }, /* RGX_HWPERF_HW_TDMKICK */ ++ { "TDM", _T(END) }, /* RGX_HWPERF_HW_TDMFINISHED */ ++ { "NULL", _T(SINGLE) }, /* RGX_HWPERF_HW_NULLKICK */ ++#undef _T ++ }; ++ static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1, ++ "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE"); ++ ++ PVR_ASSERT(psHWPerfPkt); ++ eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt); ++ ++ if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1) ++ { ++ RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt); ++ _GpuTraceEventsLost(eStreamId, ++ psFtraceData->ui32FTraceLastOrdinal, ++ psHWPerfPkt->ui32Ordinal); ++ PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)", ++ eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal)); ++ } ++ ++ psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal; ++ ++ /* Process UFO packets */ ++ if (eType == RGX_HWPERF_UFO) ++ { ++ _GpuTraceUfoEvent(psDevInfo, psHWPerfPkt); ++ return IMG_TRUE; ++ } ++ ++ if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) ++ { ++ /* this ID belongs to range 0, so index directly in range 0 */ ++ ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; ++ } ++ else ++ { ++ /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */ ++ ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) + ++ (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1); ++ } ++ ++ if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap)) ++ goto err_unsupported; ++ ++ if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL) ++ { ++ /* Not supported map entry, ignore event */ ++ goto err_unsupported; ++ } ++ ++ if (HWPERF_PACKET_IS_HW_TYPE(eType)) ++ { ++ if (aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType == PVR_GPUTRACE_SWITCH_TYPE_SINGLE) ++ { ++ _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, ++ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, ++ PVR_GPUTRACE_SWITCH_TYPE_BEGIN); ++ _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, ++ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, ++ PVR_GPUTRACE_SWITCH_TYPE_END); ++ } ++ else ++ { ++ _GpuTraceSwitchEvent(psDevInfo, psHWPerfPkt, ++ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, ++ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); ++ } ++ } ++ else if (HWPERF_PACKET_IS_FW_TYPE(eType)) ++ { ++ _GpuTraceFirmwareEvent(psDevInfo, psHWPerfPkt, ++ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName, ++ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType); ++ } ++ else ++ { ++ goto err_unsupported; ++ } ++ ++ return IMG_TRUE; ++ ++err_unsupported: ++ PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType)); ++ return IMG_FALSE; ++} ++ ++ ++static void _GpuTraceProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo, ++ void *pBuffer, IMG_UINT32 ui32ReadLen) ++{ ++ IMG_UINT32 ui32TlPackets = 0; ++ IMG_UINT32 ui32HWPerfPackets = 0; ++ IMG_UINT32 ui32HWPerfPacketsSent = 0; ++ void *pBufferEnd; ++ PVRSRVTL_PPACKETHDR psHDRptr; ++ PVRSRVTL_PACKETTYPE ui16TlType; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psDevInfo); ++ PVR_ASSERT(pBuffer); ++ PVR_ASSERT(ui32ReadLen); ++ ++ /* Process the TL Packets ++ */ ++ pBufferEnd = IMG_OFFSET_ADDR(pBuffer, ui32ReadLen); ++ psHDRptr = GET_PACKET_HDR(pBuffer); ++ while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd ) ++ { ++ ui16TlType = GET_PACKET_TYPE(psHDRptr); ++ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) ++ { ++ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); ++ if (0 == ui16DataLen) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "_GpuTraceProcessPackets: ZERO Data in TL data packet: %p", psHDRptr)); ++ } ++ else ++ { ++ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt; ++ RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd; ++ ++ /* Check for lost hwperf data packets */ ++ psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen); ++ psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)); ++ do ++ { ++ if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt)) ++ { ++ ui32HWPerfPacketsSent++; ++ } ++ ui32HWPerfPackets++; ++ psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt); ++ } ++ while (psHWPerfPkt < psHWPerfEnd); ++ } ++ } ++ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Indication that the transport buffer was full")); ++ } ++ else ++ { ++ /* else Ignore padding packet type and others */ ++ PVR_DPF((PVR_DBG_MESSAGE, "_GpuTraceProcessPackets: Ignoring TL packet, type %d", ui16TlType )); ++ } ++ ++ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); ++ ui32TlPackets++; ++ } ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceProcessPackets: TL " ++ "Packets processed %03d, HWPerf packets %03d, sent %03d", ++ ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent)); ++ ++ PVR_DPF_RETURN; ++} ++ ++ ++static void _GpuTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) ++{ ++ PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle; ++ RGX_HWPERF_FTRACE_DATA* psFtraceData; ++ PVRSRV_ERROR eError; ++ IMG_PBYTE pBuffer; ++ IMG_UINT32 ui32ReadLen; ++ IMG_BOOL bFTraceLockAcquired = IMG_FALSE; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psDeviceInfo != NULL); ++ ++ psFtraceData = psDeviceInfo->pvGpuFtraceData; ++ ++ /* Command-complete notifiers can run concurrently. If this is ++ * happening, just bail out and let the previous call finish. ++ * This is ok because we can process the queued packets on the next call. ++ */ ++ bFTraceLockAcquired = OSTryLockAcquire(psFtraceData->hFTraceResourceLock); ++ if (IMG_FALSE == bFTraceLockAcquired) ++ { ++ PVR_DPF_RETURN; ++ } ++ ++ /* If this notifier is called, it means the TL resources will be valid at-least ++ * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock ++ * to clean-up the TL resources and un-register the notifier, so just assert here. ++ */ ++ PVR_ASSERT(psFtraceData->hGPUTraceTLStream); ++ ++ /* If we have a valid stream attempt to acquire some data */ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen); ++ if (eError == PVRSRV_OK) ++ { ++ /* Process the HWPerf packets and release the data */ ++ if (ui32ReadLen > 0) ++ { ++ PVR_DPF((PVR_DBG_VERBOSE, "_GpuTraceCmdCompleteNotify: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen)); ++ ++ /* Process the transport layer data for HWPerf packets... */ ++ _GpuTraceProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen); ++ ++ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "TLClientReleaseData"); ++ ++ /* Serious error, disable FTrace GPU events */ ++ ++ /* Release TraceLock so we always have the locking ++ * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/ ++ OSLockRelease(psFtraceData->hFTraceResourceLock); ++ OSLockAcquire(psFtraceData->hFTraceResourceLock); ++ _GpuTraceDisable(psDeviceInfo, IMG_FALSE); ++ OSLockRelease(psFtraceData->hFTraceResourceLock); ++ goto out; ++ ++ } ++ } /* else no data, ignore */ ++ } ++ else if (eError != PVRSRV_ERROR_TIMEOUT) ++ { ++ PVR_LOG_ERROR(eError, "TLClientAcquireData"); ++ } ++ if (bFTraceLockAcquired) ++ { ++ OSLockRelease(psFtraceData->hFTraceResourceLock); ++ } ++out: ++ PVR_DPF_RETURN; ++} ++ ++/* ----- AppHint interface -------------------------------------------------- */ ++ ++static PVRSRV_ERROR _GpuTraceIsEnabledCallback( ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, ++ IMG_BOOL *value) ++{ ++ PVR_UNREFERENCED_PARAMETER(device); ++ PVR_UNREFERENCED_PARAMETER(private_data); ++ ++ *value = gbFTraceGPUEventsEnabled; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _GpuTraceSetEnabledCallback( ++ const PVRSRV_DEVICE_NODE *device, ++ const void *private_data, ++ IMG_BOOL value) ++{ ++ PVR_UNREFERENCED_PARAMETER(device); ++ ++ /* Lock down the state to avoid concurrent writes */ ++ OSLockAcquire(ghGPUTraceStateLock); ++ ++ if (value != gbFTraceGPUEventsEnabled) ++ { ++ PVRSRV_ERROR eError; ++ if ((eError = _GpuTraceSetEnabledForAllDevices(value)) == PVRSRV_OK) ++ { ++ PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED")); ++ gbFTraceGPUEventsEnabled = value; ++ } ++ else ++ { ++ PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable")); ++ /* On failure, partial enable/disable might have resulted. ++ * Try best to restore to previous state. Ignore error */ ++ _GpuTraceSetEnabledForAllDevices(gbFTraceGPUEventsEnabled); ++ ++ OSLockRelease(ghGPUTraceStateLock); ++ return eError; ++ } ++ } ++ else ++ { ++ PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled")); ++ } ++ ++ OSLockRelease(ghGPUTraceStateLock); ++ ++ return PVRSRV_OK; ++} ++ ++void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU, ++ _GpuTraceIsEnabledCallback, ++ _GpuTraceSetEnabledCallback, ++ psDeviceNode, NULL); ++} ++ ++/* ----- FTrace event callbacks -------------------------------------------- */ ++ ++void PVRGpuTraceEnableUfoCallback(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo; ++ PVRSRV_ERROR eError; ++#endif ++ ++ /* Lock down events state, for consistent value of guiUfoEventRef */ ++ OSLockAcquire(ghLockFTraceEventLock); ++ if (guiUfoEventRef++ == 0) ++ { ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++ /* make sure UFO events are enabled on all rogue devices */ ++ while (psDeviceNode) ++ { ++#if defined(SUPPORT_RGX) ++ IMG_UINT64 ui64Filter; ++ ++ psRgxDevInfo = psDeviceNode->pvDevice; ++ ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) | ++ psRgxDevInfo->ui64HWPerfFilter; ++ /* Small chance exists that ui64HWPerfFilter can be changed here and ++ * the newest filter value will be changed to the old one + UFO event. ++ * This is not a critical problem. */ ++ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, ++ IMG_FALSE, ui64Filter); ++ if (eError == PVRSRV_ERROR_NOT_INITIALISED) ++ { ++ /* If we land here that means that the FW is not initialised yet. ++ * We stored the filter and it will be passed to the firmware ++ * during its initialisation phase. So ignore. */ ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32OsDeviceID)); ++ } ++#endif ++ psDeviceNode = psDeviceNode->psNext; ++ } ++ ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ } ++ OSLockRelease(ghLockFTraceEventLock); ++} ++ ++void PVRGpuTraceDisableUfoCallback(void) ++{ ++#if defined(SUPPORT_RGX) ++ PVRSRV_ERROR eError; ++#endif ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ /* We have to check if lock is valid because on driver unload ++ * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace ++ * events. This means that the lock will be destroyed before this callback ++ * is called. ++ * We can safely return if that situation happens because driver will be ++ * unloaded so we don't care about HWPerf state anymore. */ ++ if (ghLockFTraceEventLock == NULL) ++ return; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++ /* Lock down events state, for consistent value of guiUfoEventRef */ ++ OSLockAcquire(ghLockFTraceEventLock); ++ if (--guiUfoEventRef == 0) ++ { ++ /* make sure UFO events are disabled on all rogue devices */ ++ while (psDeviceNode) ++ { ++#if defined(SUPPORT_RGX) ++ IMG_UINT64 ui64Filter; ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; ++ ++ ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) & ++ psRgxDevInfo->ui64HWPerfFilter; ++ /* Small chance exists that ui64HWPerfFilter can be changed here and ++ * the newest filter value will be changed to the old one + UFO event. ++ * This is not a critical problem. */ ++ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, ++ IMG_FALSE, ui64Filter); ++ if (eError == PVRSRV_ERROR_NOT_INITIALISED) ++ { ++ /* If we land here that means that the FW is not initialised yet. ++ * We stored the filter and it will be passed to the firmware ++ * during its initialisation phase. So ignore. */ ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d", ++ psDeviceNode->sDevId.i32OsDeviceID)); ++ } ++#endif ++ ++ psDeviceNode = psDeviceNode->psNext; ++ } ++ } ++ OSLockRelease(ghLockFTraceEventLock); ++ ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++} ++ ++void PVRGpuTraceEnableFirmwareActivityCallback(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo; ++ uint64_t ui64Filter, ui64FWEventsFilter = 0; ++ int i; ++ ++ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; ++ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) ++ { ++ ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i); ++ } ++#endif ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++ OSLockAcquire(ghLockFTraceEventLock); ++ /* Enable all FW events on all the devices */ ++ while (psDeviceNode) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_ERROR eError; ++ psRgxDevInfo = psDeviceNode->pvDevice; ++ ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter; ++ ++ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, ++ IMG_FALSE, ui64Filter); ++ if ((eError != PVRSRV_OK) && !PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware" ++ " task timings (%s).", PVRSRVGetErrorString(eError))); ++ } ++#endif ++ psDeviceNode = psDeviceNode->psNext; ++ } ++ OSLockRelease(ghLockFTraceEventLock); ++ ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++} ++ ++void PVRGpuTraceDisableFirmwareActivityCallback(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++#if defined(SUPPORT_RGX) ++ IMG_UINT64 ui64FWEventsFilter = ~0; ++ int i; ++#endif ++ ++ /* We have to check if lock is valid because on driver unload ++ * PVRGpuTraceSupportDeInit is called before kernel disables the ftrace ++ * events. This means that the lock will be destroyed before this callback ++ * is called. ++ * We can safely return if that situation happens because driver will be ++ * unloaded so we don't care about HWPerf state anymore. */ ++ if (ghLockFTraceEventLock == NULL) ++ return; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++#if defined(SUPPORT_RGX) ++ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE; ++ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++) ++ { ++ ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i); ++ } ++#endif ++ ++ OSLockAcquire(ghLockFTraceEventLock); ++ ++ /* Disable all FW events on all the devices */ ++ while (psDeviceNode) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter; ++ ++ if ((PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW, ++ IMG_FALSE, ui64Filter) != PVRSRV_OK) && ++ !PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings.")); ++ } ++#endif ++ psDeviceNode = psDeviceNode->psNext; ++ } ++ ++ OSLockRelease(ghLockFTraceEventLock); ++ ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++} ++ ++/****************************************************************************** ++ End of file (pvr_gputrace.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvr_intrinsics.h b/drivers/gpu/drm/img-rogue/pvr_intrinsics.h +new file mode 100644 +index 000000000000..410a2f5a50b5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_intrinsics.h +@@ -0,0 +1,70 @@ ++/*************************************************************************/ /*! ++@File ++@Title Intrinsics definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_INTRINSICS_H ++#define PVR_INTRINSICS_H ++ ++/* PVR_CTZLL: ++ * Count the number of trailing zeroes in a long long integer ++ */ ++ ++#if defined(__GNUC__) ++#if defined(__x86_64__) ++ ++ #define PVR_CTZLL __builtin_ctzll ++#endif ++#endif ++ ++/* PVR_CLZLL: ++ * Count the number of leading zeroes in a long long integer ++ */ ++ ++#if defined(__GNUC__) ++#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \ ++ defined(__arm__) || defined(__mips) ++ ++#define PVR_CLZLL __builtin_clzll ++ ++#endif ++#endif ++ ++#endif /* PVR_INTRINSICS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_ion_stats.h b/drivers/gpu/drm/img-rogue/pvr_ion_stats.h +new file mode 100644 +index 000000000000..c34180785453 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_ion_stats.h +@@ -0,0 +1,80 @@ ++/*************************************************************************/ /*! ++@File ++@Title Functions for recording ION memory stats. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_ION_STATS_H ++#define PVR_ION_STATS_H ++ ++#include "pvrsrv_error.h" ++#include "img_defs.h" ++ ++struct dma_buf; ++ ++#if defined(PVRSRV_ENABLE_PVR_ION_STATS) ++PVRSRV_ERROR PVRSRVIonStatsInitialise(void); ++ ++void PVRSRVIonStatsDestroy(void); ++ ++void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf); ++ ++void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf); ++#else ++static INLINE PVRSRV_ERROR PVRSRVIonStatsInitialise(void) ++{ ++ return PVRSRV_OK; ++} ++ ++static INLINE void PVRSRVIonStatsDestroy(void) ++{ ++} ++ ++static INLINE void PVRSRVIonAddMemAllocRecord(struct dma_buf *psDmaBuf) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDmaBuf); ++} ++ ++static INLINE void PVRSRVIonRemoveMemAllocRecord(struct dma_buf *psDmaBuf) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDmaBuf); ++} ++#endif /* defined(PVRSRV_ENABLE_PVR_ION_STATS) */ ++ ++#endif /* PVR_ION_STATS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_linux_fence.h b/drivers/gpu/drm/img-rogue/pvr_linux_fence.h +new file mode 100644 +index 000000000000..b9c542a26a65 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_linux_fence.h +@@ -0,0 +1,103 @@ ++/* ++ * @File ++ * @Title PowerVR Linux fence compatibility header ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_LINUX_FENCE_H__) ++#define __PVR_LINUX_FENCE_H__ ++ ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ ++ !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) ++#include ++#else ++#include ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \ ++ !defined(CHROMIUMOS_KERNEL_HAS_DMA_FENCE) ++/* Structures */ ++#define dma_fence fence ++#define dma_fence_array fence_array ++#define dma_fence_cb fence_cb ++#define dma_fence_ops fence_ops ++ ++/* Defines and Enums */ ++#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT ++#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT ++#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS ++ ++#define DMA_FENCE_ERR FENCE_ERR ++#define DMA_FENCE_TRACE FENCE_TRACE ++#define DMA_FENCE_WARN FENCE_WARN ++ ++/* Functions */ ++#define dma_fence_add_callback fence_add_callback ++#define dma_fence_context_alloc fence_context_alloc ++#define dma_fence_default_wait fence_default_wait ++#define dma_fence_is_signaled fence_is_signaled ++#define dma_fence_enable_sw_signaling fence_enable_sw_signaling ++#define dma_fence_free fence_free ++#define dma_fence_get fence_get ++#define dma_fence_get_rcu fence_get_rcu ++#define dma_fence_init fence_init ++#define dma_fence_is_array fence_is_array ++#define dma_fence_put fence_put ++#define dma_fence_signal fence_signal ++#define dma_fence_wait fence_wait ++#define to_dma_fence_array to_fence_array ++ ++static inline signed long ++dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) ++{ ++ signed long lret; ++ ++ lret = fence_wait_timeout(fence, intr, timeout); ++ if (lret || timeout) ++ return lret; ++ ++ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0; ++} ++ ++#endif ++ ++#endif /* !defined(__PVR_LINUX_FENCE_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_notifier.c b/drivers/gpu/drm/img-rogue/pvr_notifier.c +new file mode 100644 +index 000000000000..583d4c517d2c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_notifier.c +@@ -0,0 +1,647 @@ ++/*************************************************************************/ /*! ++@File ++@Title PowerVR notifier interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "allocmem.h" ++#include "dllist.h" ++ ++#include "device.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "pvrversion.h" ++#include "connection_server.h" ++ ++#include "osfunc.h" ++#include "sofunc_pvr.h" ++ ++#define PVR_DUMP_DRIVER_INFO(x, y) \ ++ PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x", \ ++ (x), \ ++ PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion), \ ++ PVRVERSION_UNPACK_MIN((y).ui32BuildVersion), \ ++ (y).ui32BuildRevision, \ ++ (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release", \ ++ (y).ui32BuildOptions); ++ ++#if !defined(WINDOW_SYSTEM) ++#define WINDOW_SYSTEM "Unknown" ++#endif ++ ++#define IS_DECLARED(x) (x[0] != '\0') ++ ++/*************************************************************************/ /*! ++Command Complete Notifier Interface ++*/ /**************************************************************************/ ++ ++typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG ++{ ++ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle; ++ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify; ++ DLLIST_NODE sListNode; ++} PVRSRV_CMDCOMP_NOTIFY; ++ ++/* Head of the list of callbacks called when command complete happens */ ++static DLLIST_NODE g_sCmdCompNotifyHead; ++static POSWR_LOCK g_hCmdCompNotifyLock; ++ ++PVRSRV_ERROR ++PVRSRVCmdCompleteInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = OSWRLockCreate(&g_hCmdCompNotifyLock); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ dllist_init(&g_sCmdCompNotifyHead); ++ ++ return PVRSRV_OK; ++} ++ ++void ++PVRSRVCmdCompleteDeinit(void) ++{ ++ /* Check that all notify function have been unregistered */ ++ if (!dllist_is_empty(&g_sCmdCompNotifyHead)) ++ { ++ PDLLIST_NODE psNode; ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Command complete notify list is not empty!", __func__)); ++ ++ /* Clean up any stragglers */ ++ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); ++ while (psNode) ++ { ++ PVRSRV_CMDCOMP_NOTIFY *psNotify; ++ ++ dllist_remove_node(psNode); ++ ++ psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); ++ OSFreeMem(psNotify); ++ ++ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead); ++ } ++ } ++ ++ if (g_hCmdCompNotifyLock) ++ { ++ OSWRLockDestroy(g_hCmdCompNotifyLock); ++ } ++} ++ ++PVRSRV_ERROR ++PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, ++ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, ++ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) ++{ ++ PVRSRV_CMDCOMP_NOTIFY *psNotify; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pfnCmdCompleteNotify, "pfnCmdCompleteNotify"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(hCmdCompHandle, "hCmdCompHandle"); ++ ++ psNotify = OSAllocMem(sizeof(*psNotify)); ++ PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); ++ ++ /* Set-up the notify data */ ++ psNotify->hCmdCompHandle = hCmdCompHandle; ++ psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify; ++ ++ /* Add it to the list of Notify functions */ ++ OSWRLockAcquireWrite(g_hCmdCompNotifyLock); ++ dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode); ++ OSWRLockReleaseWrite(g_hCmdCompNotifyLock); ++ ++ *phNotify = psNotify; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify) ++{ ++ PVRSRV_CMDCOMP_NOTIFY *psNotify; ++ ++ psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify; ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "hNotify"); ++ ++ OSWRLockAcquireWrite(g_hCmdCompNotifyLock); ++ dllist_remove_node(&psNotify->sListNode); ++ OSWRLockReleaseWrite(g_hCmdCompNotifyLock); ++ ++ OSFreeMem(psNotify); ++ ++ return PVRSRV_OK; ++} ++ ++void ++PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) ++{ ++#if !defined(NO_HARDWARE) ++ DLLIST_NODE *psNode, *psNext; ++ ++ /* Call notify callbacks to check if blocked work items can now proceed */ ++ OSWRLockAcquireRead(g_hCmdCompNotifyLock); ++ dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext) ++ { ++ PVRSRV_CMDCOMP_NOTIFY *psNotify = ++ IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode); ++ ++ if (hCmdCompCallerHandle != psNotify->hCmdCompHandle) ++ { ++ psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle); ++ } ++ } ++ OSWRLockReleaseRead(g_hCmdCompNotifyLock); ++#endif ++} ++ ++inline void ++PVRSRVSignalGlobalEO(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ if (psPVRSRVData->hGlobalEventObject) ++ { ++ OSEventObjectSignal(psPVRSRVData->hGlobalEventObject); ++ } ++} ++ ++inline void ++PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle) ++{ ++ PVRSRVNotifyCommandCompletion(hCmdCompCallerHandle); ++ PVRSRVSignalGlobalEO(); ++} ++ ++/*************************************************************************/ /*! ++Debug Notifier Interface ++*/ /**************************************************************************/ ++ ++/* Lockdep sees both locks as the same class due to same struct used thus warns ++ * about a possible deadlock (false positive), ++ * using nested api we can supply separate Class' ++ * */ ++#define DN_LOCKCLASS_DRIVER 0 ++#define DN_LOCKCLASS_DEVICE 1 ++ ++typedef struct DEBUG_REQUEST_ENTRY_TAG ++{ ++ IMG_UINT32 ui32RequesterID; ++ DLLIST_NODE sListHead; ++} DEBUG_REQUEST_ENTRY; ++ ++typedef struct DEBUG_REQUEST_TABLE_TAG ++{ ++ POSWR_LOCK hLock; ++ DEBUG_REQUEST_ENTRY asEntry[1]; ++} DEBUG_REQUEST_TABLE; ++ ++typedef struct DEBUG_REQUEST_NOTIFY_TAG ++{ ++ IMG_HANDLE hDebugTable; ++ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle; ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify; ++ IMG_UINT32 ui32RequesterID; ++ DLLIST_NODE sListNode; ++} DEBUG_REQUEST_NOTIFY; ++ ++static DEBUG_REQUEST_TABLE *g_psDriverDebugTable; ++ ++static const IMG_UINT32 g_aui32DebugOrderTable[] = { ++ DEBUG_REQUEST_SRV, ++ DEBUG_REQUEST_RGX, ++ DEBUG_REQUEST_SYS, ++ DEBUG_REQUEST_APPHINT, ++ DEBUG_REQUEST_HTB, ++ DEBUG_REQUEST_DC, ++ DEBUG_REQUEST_SYNCCHECKPOINT, ++ DEBUG_REQUEST_SYNCTRACKING, ++ DEBUG_REQUEST_ANDROIDSYNC, ++ DEBUG_REQUEST_FALLBACKSYNC, ++ DEBUG_REQUEST_LINUXFENCE ++}; ++static const IMG_UINT32 g_ui32DebugOrderTableReqCount = ARRAY_SIZE(g_aui32DebugOrderTable); ++ ++static PVRSRV_ERROR ++_RegisterDebugTableI(DEBUG_REQUEST_TABLE **ppsDebugTable) ++{ ++ DEBUG_REQUEST_TABLE *psDebugTable; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError; ++ ++ if (*ppsDebugTable) ++ { ++ return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED; ++ } ++ ++ psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) + ++ (sizeof(DEBUG_REQUEST_ENTRY) * (g_ui32DebugOrderTableReqCount-1))); ++ PVR_RETURN_IF_NOMEM(psDebugTable); ++ ++ eError = OSWRLockCreate(&psDebugTable->hLock); ++ PVR_GOTO_IF_ERROR(eError, ErrorFreeDebugTable); ++ ++ /* Init the list heads */ ++ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) ++ { ++ psDebugTable->asEntry[i].ui32RequesterID = g_aui32DebugOrderTable[i]; ++ dllist_init(&psDebugTable->asEntry[i].sListHead); ++ } ++ ++ *ppsDebugTable = psDebugTable; ++ ++ return PVRSRV_OK; ++ ++ErrorFreeDebugTable: ++ OSFreeMem(psDebugTable); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVRegisterDeviceDbgTable(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ return _RegisterDebugTableI((DEBUG_REQUEST_TABLE**)&psDevNode->hDebugTable); ++} ++ ++PVRSRV_ERROR ++PVRSRVRegisterDriverDbgTable(void) ++{ ++ return _RegisterDebugTableI(&g_psDriverDebugTable); ++} ++ ++static void _UnregisterDbgTableI(DEBUG_REQUEST_TABLE **ppsDebugTable) ++{ ++ DEBUG_REQUEST_TABLE *psDebugTable; ++ IMG_UINT32 i; ++ ++ PVR_ASSERT(*ppsDebugTable); ++ psDebugTable = *ppsDebugTable; ++ *ppsDebugTable = NULL; ++ ++ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) ++ { ++ if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d", ++ __func__, i)); ++ } ++ } ++ ++ OSWRLockDestroy(psDebugTable->hLock); ++ psDebugTable->hLock = NULL; ++ ++ OSFreeMem(psDebugTable); ++} ++ ++void ++PVRSRVUnregisterDeviceDbgTable(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ _UnregisterDbgTableI((DEBUG_REQUEST_TABLE**)&psDevNode->hDebugTable); ++ PVR_ASSERT(!psDevNode->hDebugTable); ++} ++ ++void ++PVRSRVUnregisterDriverDbgTable(void) ++{ ++ _UnregisterDbgTableI(&g_psDriverDebugTable); ++ PVR_ASSERT(!g_psDriverDebugTable); ++} ++ ++static PVRSRV_ERROR ++_RegisterDbgRequestNotifyI(IMG_HANDLE *phNotify, ++ DEBUG_REQUEST_TABLE *psDebugTable, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ IMG_UINT32 ui32RequesterID, ++ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) ++{ ++ DEBUG_REQUEST_NOTIFY *psNotify; ++ PDLLIST_NODE psHead = NULL; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(phNotify, "phNotify"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDebugTable, "psDebugTable"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM(pfnDbgRequestNotify, "pfnDbRequestNotify"); ++ ++ /* NoStats used since this may be called outside of the register/de-register ++ * process calls which track memory use. */ ++ psNotify = OSAllocMemNoStats(sizeof(*psNotify)); ++ PVR_LOG_RETURN_IF_NOMEM(psNotify, "psNotify"); ++ ++ /* Set-up the notify data */ ++ psNotify->hDebugTable = psDebugTable; ++ psNotify->hDbgRequestHandle = hDbgRequestHandle; ++ psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify; ++ psNotify->ui32RequesterID = ui32RequesterID; ++ ++ /* Lock down all the lists */ ++ OSWRLockAcquireWrite(psDebugTable->hLock); ++ ++ /* Find which list to add it to */ ++ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) ++ { ++ if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID) ++ { ++ psHead = &psDebugTable->asEntry[i].sListHead; ++ } ++ } ++ ++ /* Failed to find debug requester */ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psHead, eError, ErrorReleaseLock); ++ ++ /* Add it to the list of Notify functions */ ++ dllist_add_to_tail(psHead, &psNotify->sListNode); ++ ++ /* Unlock the lists */ ++ OSWRLockReleaseWrite(psDebugTable->hLock); ++ ++ *phNotify = psNotify; ++ ++ return PVRSRV_OK; ++ ++ErrorReleaseLock: ++ OSWRLockReleaseWrite(psDebugTable->hLock); ++ OSFreeMem(psNotify); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVRegisterDeviceDbgRequestNotify(IMG_HANDLE *phNotify, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ IMG_UINT32 ui32RequesterID, ++ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) ++{ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psDevNode, "psDevNode"); ++ if (!psDevNode->hDebugTable) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: psDevNode->hDebugTable not yet initialised!", ++ __func__)); ++ return PVRSRV_ERROR_NOT_INITIALISED; ++ } ++ ++ return _RegisterDbgRequestNotifyI(phNotify, ++ (DEBUG_REQUEST_TABLE *)psDevNode->hDebugTable, ++ pfnDbgRequestNotify, ++ ui32RequesterID, ++ hDbgRequestHandle); ++} ++ ++PVRSRV_ERROR ++PVRSRVRegisterDriverDbgRequestNotify(IMG_HANDLE *phNotify, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ IMG_UINT32 ui32RequesterID, ++ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) ++{ ++ if (!g_psDriverDebugTable) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: g_psDriverDebugTable not yet initialised!", ++ __func__)); ++ return PVRSRV_ERROR_NOT_INITIALISED; ++ } ++ ++ return _RegisterDbgRequestNotifyI(phNotify, ++ g_psDriverDebugTable, ++ pfnDbgRequestNotify, ++ ui32RequesterID, ++ hDbgRequestHandle); ++} ++ ++PVRSRV_ERROR ++SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ IMG_UINT32 ui32RequesterID, ++ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle) ++{ ++ return PVRSRVRegisterDeviceDbgRequestNotify(phNotify, ++ psDevNode, ++ pfnDbgRequestNotify, ++ ui32RequesterID, ++ hDbgRequestHandle); ++} ++ ++static PVRSRV_ERROR ++_UnregisterDbgRequestNotify(IMG_HANDLE hNotify) ++{ ++ DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify; ++ DEBUG_REQUEST_TABLE *psDebugTable; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psNotify, "psNotify"); ++ ++ psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->hDebugTable; ++ ++ OSWRLockAcquireWrite(psDebugTable->hLock); ++ dllist_remove_node(&psNotify->sListNode); ++ OSWRLockReleaseWrite(psDebugTable->hLock); ++ ++ OSFreeMemNoStats(psNotify); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVUnregisterDeviceDbgRequestNotify(IMG_HANDLE hNotify) ++{ ++ return _UnregisterDbgRequestNotify(hNotify); ++} ++ ++PVRSRV_ERROR ++PVRSRVUnregisterDriverDbgRequestNotify(IMG_HANDLE hNotify) ++{ ++ return _UnregisterDbgRequestNotify(hNotify); ++} ++ ++PVRSRV_ERROR ++SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify) ++{ ++ return _UnregisterDbgRequestNotify(hNotify); ++} ++ ++void ++PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ DEBUG_REQUEST_TABLE *psDebugTable = ++ (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable; ++ DEBUG_REQUEST_TABLE *psDriverDebugTable = ++ (DEBUG_REQUEST_TABLE *) g_psDriverDebugTable; ++ static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" }; ++ const IMG_CHAR *szVerbosityLevel; ++ const IMG_CHAR *Bit32 = "32 Bit", *Bit64 = "64 Bit"; ++ IMG_UINT32 i; ++ ++ static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1, ++ "Incorrect number of verbosity levels"); ++ ++ PVR_ASSERT(psDebugTable); ++ PVR_ASSERT(psDriverDebugTable); ++ ++ if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable)) ++ { ++ szVerbosityLevel = apszVerbosityTable[ui32VerbLevel]; ++ } ++ else ++ { ++ szVerbosityLevel = "unknown"; ++ PVR_ASSERT(!"Invalid verbosity level received"); ++ } ++ ++ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------", ++ szVerbosityLevel); ++ ++#if defined(RGX_IRQ_HYPERV_HANDLER) ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++#endif ++ { ++ OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ ++ PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s", ++ PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR); ++ ++ PVR_DUMPDEBUG_LOG("Time now: %" IMG_UINT64_FMTSPEC "us", ++ OSClockus64()); ++ ++ switch (psPVRSRVData->eServicesState) ++ { ++ case PVRSRV_SERVICES_STATE_OK: ++ PVR_DUMPDEBUG_LOG("Services State: OK"); ++ break; ++ case PVRSRV_SERVICES_STATE_BAD: ++ PVR_DUMPDEBUG_LOG("Services State: BAD"); ++ break; ++ case PVRSRV_SERVICES_STATE_UNDEFINED: ++ PVR_DUMPDEBUG_LOG("Services State: UNDEFINED"); ++ break; ++ default: ++ PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)", ++ psPVRSRVData->eServicesState); ++ break; ++ } ++ ++ PVR_DUMPDEBUG_LOG("Server Errors: %d", ++ PVRSRV_KM_ERRORS); ++ ++ PVRSRVConnectionDebugNotify(psDevNode, pfnDumpDebugPrintf, pvDumpDebugFile); ++ ++ PVR_DUMPDEBUG_LOG("------[ Driver Info ]------"); ++ ++ PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", ++ (psPVRSRVData->sDriverInfo.bIsNoMatch) ? "MISMATCH" : "MATCHING"); ++ ++ PVR_DUMPDEBUG_LOG("KM Arch: %s", ++ (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); ++ ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ PVR_DUMPDEBUG_LOG("Driver Mode: %s", ++ (PVRSRV_VZ_MODE_IS(HOST)) ? "Host":"Guest"); ++ } ++ ++ if (psPVRSRVData->sDriverInfo.ui8UMSupportedArch) ++ { ++ if ((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) == ++ BUILD_ARCH_BOTH) ++ { ++ PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32); ++ ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("UM Connected Clients: %s", ++ (psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_64BIT) ? Bit64 : Bit32); ++ } ++ } ++ ++ PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo); ++ PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo); ++ ++ PVR_DUMPDEBUG_LOG("Window system: %s", (IS_DECLARED(WINDOW_SYSTEM)) ? (WINDOW_SYSTEM) : "Not declared"); ++ ++ /* Driver debug table */ ++ OSWRLockAcquireReadNested(psDriverDebugTable->hLock, DN_LOCKCLASS_DRIVER); ++ /* Device debug table*/ ++ OSWRLockAcquireReadNested(psDebugTable->hLock, DN_LOCKCLASS_DEVICE); ++ ++ /* For each requester in Driver and Device table */ ++ for (i = 0; i < g_ui32DebugOrderTableReqCount; i++) ++ { ++ DLLIST_NODE *psNode; ++ DLLIST_NODE *psNext; ++ ++ /* For each notifier on this requestor */ ++ dllist_foreach_node(&psDriverDebugTable->asEntry[i].sListHead, psNode, psNext) ++ { ++ DEBUG_REQUEST_NOTIFY *psNotify = ++ IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode); ++ psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel, ++ pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ ++ /* For each notifier on this requestor */ ++ dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext) ++ { ++ DEBUG_REQUEST_NOTIFY *psNotify = ++ IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode); ++ psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, ui32VerbLevel, ++ pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ } ++ ++ OSWRLockReleaseRead(psDebugTable->hLock); ++ OSWRLockReleaseRead(psDriverDebugTable->hLock); ++ ++ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------"); ++ ++ if (!pfnDumpDebugPrintf) ++ { ++ /* Only notify OS of an issue if the debug dump has gone there */ ++ OSWarnOn(IMG_TRUE); ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_notifier.h b/drivers/gpu/drm/img-rogue/pvr_notifier.h +new file mode 100644 +index 000000000000..57172363ca14 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_notifier.h +@@ -0,0 +1,326 @@ ++/*************************************************************************/ /*! ++@File ++@Title PowerVR notifier interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PVR_NOTIFIER_H) ++#define PVR_NOTIFIER_H ++ ++#include "img_types.h" ++#include "pvr_debug.h" ++ ++ ++/*************************************************************************/ /*! ++Command Complete Notifier Interface ++*/ /**************************************************************************/ ++ ++typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE; ++#ifndef CMDCOMPNOTIFY_PFN ++typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); ++#define CMDCOMPNOTIFY_PFN ++#endif ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCmdCompleteInit ++@Description Performs initialisation of the command complete notifier ++ interface. ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVCmdCompleteInit(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCmdCompleteDeinit ++@Description Performs cleanup for the command complete notifier interface. ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++void ++PVRSRVCmdCompleteDeinit(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRegisterCmdCompleteNotify ++@Description Register a callback function that is called when some device ++ finishes some work, which is signalled via a call to ++ PVRSRVCheckStatus. ++@Output phNotify On success, points to command complete ++ notifier handle ++@Input pfnCmdCompleteNotify Function callback ++@Input hPrivData Data to be passed back to the caller via ++ the callback function ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify, ++ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, ++ PVRSRV_CMDCOMP_HANDLE hPrivData); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVUnregisterCmdCompleteNotify ++@Description Unregister a previously registered callback function. ++@Input hNotify Command complete notifier handle ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCheckStatus ++@Description Calls PVRSRVNotifyCommandCompletion() to notify registered ++ command complete handlers of work completion and then calls ++ PVRSRVSignalGlobalEO() to signal the global event object. ++@Input hCmdCompCallerHandle Used to prevent a handler from being ++ notified. A NULL value results in all ++ handlers being notified. ++*/ /**************************************************************************/ ++void ++PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVNotifyCommandCompletion ++@Description Notify any registered command complete handlers that some work ++ has been finished (unless hCmdCompCallerHandle matches a ++ handler's hPrivData). ++@Input hCmdCompCallerHandle Used to prevent a handler from being ++ notified. A NULL value results in all ++ handlers being notified. ++*/ /**************************************************************************/ ++void ++PVRSRVNotifyCommandCompletion(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVSignalGlobalEO ++@Description Signals the global event object. ++*/ /**************************************************************************/ ++void ++PVRSRVSignalGlobalEO(void); ++ ++ ++/*************************************************************************/ /*! ++Debug Notifier Interface ++*/ /**************************************************************************/ ++ ++#define DEBUG_REQUEST_DC 0 ++#define DEBUG_REQUEST_SYNCTRACKING 1 ++#define DEBUG_REQUEST_SRV 2 ++#define DEBUG_REQUEST_SYS 3 ++#define DEBUG_REQUEST_RGX 4 ++#define DEBUG_REQUEST_ANDROIDSYNC 5 ++#define DEBUG_REQUEST_LINUXFENCE 6 ++#define DEBUG_REQUEST_SYNCCHECKPOINT 7 ++#define DEBUG_REQUEST_HTB 8 ++#define DEBUG_REQUEST_APPHINT 9 ++#define DEBUG_REQUEST_FALLBACKSYNC 10 ++ ++#define DEBUG_REQUEST_VERBOSITY_LOW 0 ++#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 ++#define DEBUG_REQUEST_VERBOSITY_HIGH 2 ++#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH ++ ++#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) ++ ++/* ++ * Macro used within debug dump functions to send output either to PVR_LOG or ++ * a custom function. The custom function should be stored as a function ++ * pointer in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' ++ * is also required as a local variable to serve as a file identifier for the ++ * printf function if required. ++ */ ++#define PVR_DUMPDEBUG_LOG(...) \ ++ do \ ++ { \ ++ if (pfnDumpDebugPrintf) \ ++ pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \ ++ else \ ++ PVR_LOG((__VA_ARGS__)); \ ++ } while (0) ++ ++struct _PVRSRV_DEVICE_NODE_; ++ ++typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE; ++#ifndef DBGNOTIFY_PFNS ++typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, ++ const IMG_CHAR *pszFormat, ...); ++typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++#define DBGNOTIFY_PFNS ++#endif ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRegisterDeviceDbgTable ++@Description Registers a debug requester table for the given device. The ++ order in which the debug requester IDs appear in the ++ table determine the order in which a set of notifier callbacks ++ will be called. In other words, the requester ID that appears ++ first will have all of its associated debug notifier callbacks ++ called first. This will then be followed by all the callbacks ++ associated with the next requester ID in the table and so on. ++ The order table is handled internally. ++@Input psDevNode Device node to register requester table with ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRegisterDeviceDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRegisterDriverDbgTable ++@Description Registers a debug requester table for the driver. The ++ order in which the debug requester IDs appear in the ++ table determine the order in which a set of notifier callbacks ++ will be called. In other words, the requester ID that appears ++ first will have all of its associated debug notifier callbacks ++ called first. This will then be followed by all the callbacks ++ associated with the next requester ID in the table and so on. ++ The order table is handled internally. ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRegisterDriverDbgTable(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVUnregisterDeviceDbgTable ++@Description Unregisters a debug requester table. ++@Input psDevNode Device node for which the requester table should ++ be unregistered ++@Return void ++*/ /**************************************************************************/ ++void ++PVRSRVUnregisterDeviceDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVUnregisterDriverDbgTable ++@Description Unregisters the driver debug requester table. ++@Return void ++*/ /**************************************************************************/ ++void ++PVRSRVUnregisterDriverDbgTable(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRegisterDeviceDbgRequestNotify ++@Description Register a callback function on a given device that is called ++ when a debug request is made via a call PVRSRVDebugRequest. ++ There are a number of verbosity levels ranging from ++ DEBUG_REQUEST_VERBOSITY_LOW up to ++ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once ++ for each level up to the highest level specified to ++ PVRSRVDebugRequest. ++@Output phNotify Points to debug notifier handle on success ++@Input psDevNode Device node for which the debug callback ++ should be registered ++@Input pfnDbgRequestNotify Function callback ++@Input ui32RequesterID Requester ID. This is used to determine ++ the order in which callbacks are called ++@Input hDbgReqeustHandle Data to be passed back to the caller via ++ the callback function ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRegisterDeviceDbgRequestNotify(IMG_HANDLE *phNotify, ++ struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ IMG_UINT32 ui32RequesterID, ++ PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRegisterDriverDbgRequestNotify ++@Description Register a callback function that is called when a debug request ++ is made via a call PVRSRVDebugRequest. There are a number of ++ verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to ++ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once ++ for each level up to the highest level specified to ++ PVRSRVDebugRequest. ++@Output phNotify Points to debug notifier handle on success ++@Input pfnDbgRequestNotify Function callback ++@Input ui32RequesterID Requester ID. This is used to determine ++ the order in which callbacks are called ++@Input hDbgReqeustHandle Data to be passed back to the caller via ++ the callback function ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRegisterDriverDbgRequestNotify(IMG_HANDLE *phNotify, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ IMG_UINT32 ui32RequesterID, ++ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVUnregisterDeviceDbgRequestNotify ++@Description Unregister a previously registered (device context) callback ++ function. ++@Input hNotify Debug notifier handle. ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVUnregisterDeviceDbgRequestNotify(IMG_HANDLE hNotify); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVUnregisterDriverDbgRequestNotify ++@Description Unregister a previously registered (driver context) callback ++ function. ++@Input hNotify Debug notifier handle. ++@Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVUnregisterDriverDbgRequestNotify(IMG_HANDLE hNotify); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDebugRequest ++@Description Notify any registered debug request handlers that a debug ++ request has been made and at what level. ++@Input psDevNode Device node for which the debug request ++ has been made ++@Input ui32VerbLevel The maximum verbosity level to dump ++@Input pfnDumpDebugPrintf Used to specify the print function that ++ should be used to dump any debug ++ information. If this argument is NULL then ++ PVR_LOG() will be used as the default ++ print function. ++@Input pvDumpDebugFile Optional file identifier to be passed to ++ the print function if required. ++@Return void ++*/ /**************************************************************************/ ++void ++PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++#endif /* !defined(PVR_NOTIFIER_H) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_platform_drv.c b/drivers/gpu/drm/img-rogue/pvr_platform_drv.c +new file mode 100644 +index 000000000000..172f8e52faa3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_platform_drv.c +@@ -0,0 +1,326 @@ ++/* ++ * @File ++ * @Title PowerVR DRM platform driver ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) ++#include ++#include ++#include ++#include ++#include ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++ ++#include "module_common.h" ++#include "pvr_drv.h" ++#include "pvrmodule.h" ++#include "sysinfo.h" ++ ++ ++/* This header must always be included last */ ++#include "kernel_compatibility.h" ++ ++static struct drm_driver pvr_drm_platform_driver; ++ ++#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) ++/* ++ * This is an arbitrary value. If it's changed then the 'num_devices' module ++ * parameter description should also be updated to match. ++ */ ++#define MAX_DEVICES 16 ++ ++static unsigned int pvr_num_devices = 1; ++static struct platform_device **pvr_devices; ++ ++#if defined(NO_HARDWARE) ++static int pvr_num_devices_set(const char *val, ++ const struct kernel_param *param) ++{ ++ int err; ++ ++ err = param_set_uint(val, param); ++ if (err) ++ return err; ++ ++ if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static const struct kernel_param_ops pvr_num_devices_ops = { ++ .set = pvr_num_devices_set, ++ .get = param_get_uint, ++}; ++ ++module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444); ++MODULE_PARM_DESC(num_devices, ++ "Number of platform devices to register (default: 1 - max: 16)"); ++#endif /* defined(NO_HARDWARE) */ ++#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ ++ ++static int pvr_devices_register(void) ++{ ++#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) ++ struct platform_device_info pvr_dev_info = { ++ .name = SYS_RGX_DEV_NAME, ++ .id = -2, ++#if defined(NO_HARDWARE) ++ /* Not all cores have 40 bit physical support, but this ++ * will work unless > 32 bit address is returned on those cores. ++ * In the future this will be fixed more correctly. ++ */ ++ .dma_mask = DMA_BIT_MASK(40), ++#else ++ .dma_mask = DMA_BIT_MASK(32), ++#endif ++ }; ++ unsigned int i; ++ ++ BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES); ++ ++ pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices), ++ GFP_KERNEL); ++ if (!pvr_devices) ++ return -ENOMEM; ++ ++ for (i = 0; i < pvr_num_devices; i++) { ++ pvr_devices[i] = platform_device_register_full(&pvr_dev_info); ++ if (IS_ERR(pvr_devices[i])) { ++ DRM_ERROR("unable to register device %u (err=%ld)\n", ++ i, PTR_ERR(pvr_devices[i])); ++ pvr_devices[i] = NULL; ++ return -ENODEV; ++ } ++ } ++#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ ++ ++ return 0; ++} ++ ++static void pvr_devices_unregister(void) ++{ ++#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) ++ unsigned int i; ++ ++ BUG_ON(!pvr_devices); ++ ++ for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++) ++ platform_device_unregister(pvr_devices[i]); ++ ++ kfree(pvr_devices); ++ pvr_devices = NULL; ++#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */ ++} ++ ++static int pvr_probe(struct platform_device *pdev) ++{ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ struct drm_device *ddev; ++ int ret; ++ ++ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); ++ ++ ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) ++ if (IS_ERR(ddev)) ++ return PTR_ERR(ddev); ++#else ++ if (!ddev) ++ return -ENOMEM; ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ /* Needed by drm_platform_set_busid */ ++ ddev->platformdev = pdev; ++#endif ++ ++ /* ++ * The load callback, called from drm_dev_register, is deprecated, ++ * because of potential race conditions. Calling the function here, ++ * before calling drm_dev_register, avoids those potential races. ++ */ ++ BUG_ON(pvr_drm_platform_driver.load != NULL); ++ ret = pvr_drm_load(ddev, 0); ++ if (ret) ++ goto err_drm_dev_put; ++ ++ ret = drm_dev_register(ddev, 0); ++ if (ret) ++ goto err_drm_dev_unload; ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) ++ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", ++ pvr_drm_platform_driver.name, ++ pvr_drm_platform_driver.major, ++ pvr_drm_platform_driver.minor, ++ pvr_drm_platform_driver.patchlevel, ++ pvr_drm_platform_driver.date, ++ ddev->primary->index); ++#endif ++ return 0; ++ ++err_drm_dev_unload: ++ pvr_drm_unload(ddev); ++err_drm_dev_put: ++ drm_dev_put(ddev); ++ return ret; ++#else ++ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); ++ ++ return drm_platform_init(&pvr_drm_platform_driver, pdev); ++#endif ++} ++ ++static int pvr_remove(struct platform_device *pdev) ++{ ++ struct drm_device *ddev = platform_get_drvdata(pdev); ++ ++ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) ++ drm_dev_unregister(ddev); ++ ++ /* The unload callback, called from drm_dev_unregister, is ++ * deprecated. Call the unload function directly. ++ */ ++ BUG_ON(pvr_drm_platform_driver.unload != NULL); ++ pvr_drm_unload(ddev); ++ ++ drm_dev_put(ddev); ++#else ++ drm_put_dev(ddev); ++#endif ++ return 0; ++} ++ ++static void pvr_shutdown(struct platform_device *pdev) ++{ ++ struct drm_device *ddev = platform_get_drvdata(pdev); ++ struct pvr_drm_private *priv = ddev->dev_private; ++ ++ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev); ++ ++ PVRSRVDeviceShutdown(priv->dev_node); ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) ++static const struct of_device_id pvr_of_ids[] = { ++#if defined(SYS_RGX_OF_COMPATIBLE) ++ { .compatible = SYS_RGX_OF_COMPATIBLE, }, ++#endif ++ {}, ++}; ++ ++#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) ++//MODULE_DEVICE_TABLE(of, pvr_of_ids); ++#endif ++#endif ++ ++static struct platform_device_id pvr_platform_ids[] = { ++#if defined(SYS_RGX_DEV_NAME) ++ { SYS_RGX_DEV_NAME, 0 }, ++#endif ++ { } ++}; ++ ++#if !defined(CHROMIUMOS_KERNEL) || !defined(MODULE) ++//MODULE_DEVICE_TABLE(platform, pvr_platform_ids); ++#endif ++ ++static struct platform_driver pvr_platform_driver = { ++ .driver = { ++ .name = DRVNAME, ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)) ++ .of_match_table = of_match_ptr(pvr_of_ids), ++#endif ++ .pm = &pvr_pm_ops, ++ }, ++ .id_table = pvr_platform_ids, ++ .probe = pvr_probe, ++ .remove = pvr_remove, ++ .shutdown = pvr_shutdown, ++}; ++ ++static int __init pvr_init(void) ++{ ++ int err; ++ ++ DRM_DEBUG_DRIVER("\n"); ++ ++ pvr_drm_platform_driver = pvr_drm_generic_driver; ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) ++ pvr_drm_platform_driver.set_busid = drm_platform_set_busid; ++#endif ++ ++ err = PVRSRVDriverInit(); ++ if (err) ++ return err; ++ ++ err = platform_driver_register(&pvr_platform_driver); ++ if (err) ++ return err; ++ ++ return pvr_devices_register(); ++} ++ ++static void __exit pvr_exit(void) ++{ ++ DRM_DEBUG_DRIVER("\n"); ++ ++ pvr_devices_unregister(); ++ platform_driver_unregister(&pvr_platform_driver); ++ PVRSRVDriverDeinit(); ++ ++ DRM_DEBUG_DRIVER("done\n"); ++} ++ ++late_initcall(pvr_init); ++module_exit(pvr_exit); +diff --git a/drivers/gpu/drm/img-rogue/pvr_procfs.h b/drivers/gpu/drm/img-rogue/pvr_procfs.h +new file mode 100644 +index 000000000000..61a1f0ee28d3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_procfs.h +@@ -0,0 +1,50 @@ ++/*************************************************************************/ /*! ++@File ++@Title ProcFS implementation of Debug Info interface. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVR_PROCFS_H ++#define PVR_PROCFS_H ++ ++#include "pvrsrv_error.h" ++ ++PVRSRV_ERROR PVRProcFsRegister(void); ++ ++#endif /* PVR_PROCFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_ricommon.h b/drivers/gpu/drm/img-rogue/pvr_ricommon.h +new file mode 100644 +index 000000000000..0521aa151794 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_ricommon.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Resource Information (RI) common types and definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Resource Information (RI) common types and definitions included ++ in both user mode and kernel mode source. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVR_RICOMMON_H ++#define PVR_RICOMMON_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++ ++/*! Maximum text string length including the null byte */ ++#define PRVSRVRI_MAX_TEXT_LENGTH 20U ++ ++/* PID used to hold PMR allocations which are driver-wide (i.e. have a lifetime ++ * longer than an application process) ++ */ ++#define PVR_SYS_ALLOC_PID 1 ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* PVR_RICOMMON_H */ ++/****************************************************************************** ++ End of file (pvr_ricommon.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvr_sw_fence.c b/drivers/gpu/drm/img-rogue/pvr_sw_fence.c +new file mode 100644 +index 000000000000..4f2404a21161 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sw_fence.c +@@ -0,0 +1,199 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pvr_sw_fence.h" ++ ++struct pvr_sw_fence_context { ++ struct kref kref; ++ unsigned int context; ++ char context_name[32]; ++ char driver_name[32]; ++ atomic_t seqno; ++ atomic_t fence_count; ++}; ++ ++struct pvr_sw_fence { ++ struct dma_fence base; ++ struct pvr_sw_fence_context *fence_context; ++ spinlock_t lock; ++}; ++ ++#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base) ++ ++const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx) ++{ ++ return fctx->context_name; ++} ++ ++void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, ++ char *str, int size) ++{ ++ snprintf(str, size, "%d", atomic_read(&fctx->seqno)); ++} ++ ++static inline unsigned ++pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context) ++{ ++ return atomic_inc_return(&fence_context->seqno) - 1; ++} ++ ++static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence) ++{ ++ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); ++ ++ return pvr_sw_fence->fence_context->driver_name; ++} ++ ++static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence) ++{ ++ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); ++ ++ return pvr_sw_fence_context_name(pvr_sw_fence->fence_context); ++} ++ ++static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size) ++{ ++ snprintf(str, size, "%llu", (u64) fence->seqno); ++} ++ ++static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence, ++ char *str, int size) ++{ ++ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); ++ ++ pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size); ++} ++ ++static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence) ++{ ++ return true; ++} ++ ++static void pvr_sw_fence_context_destroy_kref(struct kref *kref) ++{ ++ struct pvr_sw_fence_context *fence_context = ++ container_of(kref, struct pvr_sw_fence_context, kref); ++ unsigned int fence_count; ++ ++ fence_count = atomic_read(&fence_context->fence_count); ++ if (WARN_ON(fence_count)) ++ pr_debug("%s context has %u fence(s) remaining\n", ++ fence_context->context_name, fence_count); ++ ++ kfree(fence_context); ++} ++ ++static void pvr_sw_fence_release(struct dma_fence *fence) ++{ ++ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence); ++ ++ atomic_dec(&pvr_sw_fence->fence_context->fence_count); ++ kref_put(&pvr_sw_fence->fence_context->kref, ++ pvr_sw_fence_context_destroy_kref); ++ kfree(pvr_sw_fence); ++} ++ ++static const struct dma_fence_ops pvr_sw_fence_ops = { ++ .get_driver_name = pvr_sw_fence_get_driver_name, ++ .get_timeline_name = pvr_sw_fence_get_timeline_name, ++ .fence_value_str = pvr_sw_fence_value_str, ++ .timeline_value_str = pvr_sw_fence_timeline_value_str, ++ .enable_signaling = pvr_sw_fence_enable_signaling, ++ .wait = dma_fence_default_wait, ++ .release = pvr_sw_fence_release, ++}; ++ ++struct pvr_sw_fence_context * ++pvr_sw_fence_context_create(const char *context_name, const char *driver_name) ++{ ++ struct pvr_sw_fence_context *fence_context; ++ ++ fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL); ++ if (!fence_context) ++ return NULL; ++ ++ fence_context->context = dma_fence_context_alloc(1); ++ strlcpy(fence_context->context_name, context_name, ++ sizeof(fence_context->context_name)); ++ strlcpy(fence_context->driver_name, driver_name, ++ sizeof(fence_context->driver_name)); ++ atomic_set(&fence_context->seqno, 0); ++ atomic_set(&fence_context->fence_count, 0); ++ kref_init(&fence_context->kref); ++ ++ return fence_context; ++} ++ ++void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context) ++{ ++ kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref); ++} ++ ++struct dma_fence * ++pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context) ++{ ++ struct pvr_sw_fence *pvr_sw_fence; ++ unsigned int seqno; ++ ++ pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL); ++ if (!pvr_sw_fence) ++ return NULL; ++ ++ spin_lock_init(&pvr_sw_fence->lock); ++ pvr_sw_fence->fence_context = fence_context; ++ ++ seqno = pvr_sw_fence_context_seqno_next(fence_context); ++ dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops, ++ &pvr_sw_fence->lock, fence_context->context, seqno); ++ ++ atomic_inc(&fence_context->fence_count); ++ kref_get(&fence_context->kref); ++ ++ return &pvr_sw_fence->base; ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_sw_fence.h b/drivers/gpu/drm/img-rogue/pvr_sw_fence.h +new file mode 100644 +index 000000000000..bebbcb75e23b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sw_fence.h +@@ -0,0 +1,60 @@ ++/* ++ * @File ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#if !defined(__PVR_SW_FENCES_H__) ++#define __PVR_SW_FENCES_H__ ++ ++#include "pvr_linux_fence.h" ++ ++struct pvr_sw_fence_context; ++ ++struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name, ++ const char *driver_name); ++void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context); ++struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context * ++ fence_context); ++ ++const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx); ++void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx, ++ char *str, int size); ++ ++#endif /* !defined(__PVR_SW_FENCES_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync.h b/drivers/gpu/drm/img-rogue/pvr_sync.h +new file mode 100644 +index 000000000000..a8ecd8b2caa0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync.h +@@ -0,0 +1,120 @@ ++/* ++ * @File pvr_sync.h ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _PVR_SYNC_H ++#define _PVR_SYNC_H ++ ++#include ++ ++#include "pvr_fd_sync_kernel.h" ++#include "services_kernel_client.h" ++ ++ ++/* Services internal interface */ ++ ++/** ++ * pvr_sync_register_functions() ++ * ++ * Return: PVRSRV_OK on success. ++ */ ++enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void); ++ ++/** ++ * pvr_sync_init - register the pvr_sync misc device ++ * ++ * Return: error code, 0 on success. ++ */ ++int pvr_sync_init(void); ++ ++/** ++ * pvr_sync_deinit - unregister the pvr_sync misc device ++ */ ++void pvr_sync_deinit(void); ++ ++/** ++ * pvr_sync_device_init() - create an internal sync context ++ * @dev: Linux device ++ * ++ * Return: PVRSRV_OK on success. ++ */ ++enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev); ++ ++/** ++ * pvr_sync_device_deinit() - destroy an internal sync context ++ * ++ * Drains any work items with outstanding sync fence updates/dependencies. ++ */ ++void pvr_sync_device_deinit(struct device *dev); ++ ++enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms); ++ ++enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence); ++ ++enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out); ++ ++enum PVRSRV_ERROR_TAG ++pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node, ++ int timeline_fd, ++ const char *fence_name, ++ int *fence_fd_out, ++ u64 *sync_pt_idx); ++ ++enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, ++ u64 *sync_pt_idx); ++ ++enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline); ++ ++enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, ++ void **timeline_out); ++ ++enum PVRSRV_ERROR_TAG ++sync_dump_fence(void *sw_fence_obj, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file); ++ ++enum PVRSRV_ERROR_TAG ++sync_sw_dump_timeline(void *sw_timeline_obj, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file); ++ ++#endif /* _PVR_SYNC_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_api.h b/drivers/gpu/drm/img-rogue/pvr_sync_api.h +new file mode 100644 +index 000000000000..060dd85a6158 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync_api.h +@@ -0,0 +1,61 @@ ++/* ++ * @File pvr_sync_api.h ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _PVR_SYNC_API_H ++#define _PVR_SYNC_API_H ++ ++int pvr_sync_api_init(void *file_handle, void **api_priv); ++int pvr_sync_api_deinit(void *api_priv, bool is_sw); ++int pvr_sync_api_rename(void *api_priv, void *user_data); ++int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new); ++int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data); ++int pvr_sync_api_sw_inc(void *api_priv, void *user_data); ++ ++struct file; ++ ++int pvr_sync_ioctl_init(void); ++void pvr_sync_ioctl_deinit(void); ++void *pvr_sync_get_api_priv(struct file *file); ++struct file *pvr_sync_get_file_struct(void *file_handle); ++ ++#endif /* _PVR_SYNC_API_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_file.c b/drivers/gpu/drm/img-rogue/pvr_sync_file.c +new file mode 100644 +index 000000000000..e3656108e963 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync_file.c +@@ -0,0 +1,1094 @@ ++/* ++ * @File pvr_sync_file.c ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "services_kernel_client.h" ++#include "pvr_drv.h" ++#include "pvr_sync.h" ++#include "pvr_fence.h" ++#include "pvr_counting_timeline.h" ++ ++#include "linux_sw_sync.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pvr_sync_api.h" ++ ++/* This header must always be included last */ ++#include "kernel_compatibility.h" ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL) ++#define sync_file_user_name(s) ((s)->name) ++#else ++#define sync_file_user_name(s) ((s)->user_name) ++#endif ++ ++#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \ ++ do { \ ++ if (pfnDumpDebugPrintf) \ ++ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \ ++ ## __VA_ARGS__); \ ++ else \ ++ pr_err(fmt "\n", ## __VA_ARGS__); \ ++ } while (0) ++ ++#define FILE_NAME "pvr_sync_file" ++ ++struct sw_sync_create_fence_data { ++ __u32 value; ++ char name[32]; ++ __s32 fence; ++}; ++#define SW_SYNC_IOC_MAGIC 'W' ++#define SW_SYNC_IOC_CREATE_FENCE \ ++ (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data)) ++#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32) ++ ++/* Global data for the sync driver */ ++static struct { ++ struct pvr_fence_context *foreign_fence_context; ++ PFN_SYNC_CHECKPOINT_STRUCT sync_checkpoint_ops; ++} pvr_sync_data; ++ ++#if defined(NO_HARDWARE) ++static DEFINE_MUTEX(pvr_timeline_active_list_lock); ++static struct list_head pvr_timeline_active_list; ++#endif ++ ++/* This is the actual timeline metadata. We might keep this around after the ++ * base sync driver has destroyed the pvr_sync_timeline_wrapper object. ++ */ ++struct pvr_sync_timeline { ++ char name[32]; ++ void *file_handle; ++ bool is_sw; ++ /* Fence context used for hw fences */ ++ struct pvr_fence_context *hw_fence_context; ++ /* Timeline and context for sw fences */ ++ struct pvr_counting_fence_timeline *sw_fence_timeline; ++#if defined(NO_HARDWARE) ++ /* List of all timelines (used to advance all timelines in nohw builds) */ ++ struct list_head list; ++#endif ++}; ++ ++static ++void pvr_sync_free_checkpoint_list_mem(void *mem_ptr) ++{ ++ kfree(mem_ptr); ++} ++ ++#if defined(NO_HARDWARE) ++/* function used to signal pvr fence in nohw builds */ ++static ++void pvr_sync_nohw_signal_fence(void *fence_data_to_signal) ++{ ++ struct pvr_sync_timeline *this_timeline; ++ ++ mutex_lock(&pvr_timeline_active_list_lock); ++ list_for_each_entry(this_timeline, &pvr_timeline_active_list, list) { ++ pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context); ++ } ++ mutex_unlock(&pvr_timeline_active_list_lock); ++} ++#endif ++ ++static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd) ++{ ++ struct file *file = fget(fd); ++ struct pvr_sync_timeline *timeline; ++ ++ if (!file) ++ return NULL; ++ ++ timeline = pvr_sync_get_api_priv(file); ++ if (!timeline) ++ fput(file); ++ ++ return timeline; ++} ++ ++static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline) ++{ ++ struct file *file = pvr_sync_get_file_struct(timeline->file_handle); ++ ++ if (file) ++ fput(file); ++ else ++ pr_err(FILE_NAME ": %s: Timeline incomplete\n", __func__); ++} ++ ++/* ioctl and fops handling */ ++ ++int pvr_sync_api_init(void *file_handle, void **api_priv) ++{ ++ struct pvr_sync_timeline *timeline; ++ char task_comm[TASK_COMM_LEN]; ++ ++ get_task_comm(task_comm, current); ++ ++ timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); ++ if (!timeline) ++ return -ENOMEM; ++ ++ strlcpy(timeline->name, task_comm, sizeof(timeline->name)); ++ timeline->file_handle = file_handle; ++ timeline->is_sw = false; ++ ++ *api_priv = (void *)timeline; ++ ++ return 0; ++} ++ ++int pvr_sync_api_deinit(void *api_priv, bool is_sw) ++{ ++ struct pvr_sync_timeline *timeline = api_priv; ++ ++ if (!timeline) ++ return 0; ++ ++ if (timeline->sw_fence_timeline) { ++ /* This makes sure any outstanding SW syncs are marked as ++ * complete at timeline close time. Otherwise it'll leak the ++ * timeline (as outstanding fences hold a ref) and possibly ++ * wedge the system if something is waiting on one of those ++ * fences ++ */ ++ pvr_counting_fence_timeline_force_complete( ++ timeline->sw_fence_timeline); ++ pvr_counting_fence_timeline_put(timeline->sw_fence_timeline); ++ } ++ ++ if (timeline->hw_fence_context) { ++#if defined(NO_HARDWARE) ++ mutex_lock(&pvr_timeline_active_list_lock); ++ list_del(&timeline->list); ++ mutex_unlock(&pvr_timeline_active_list_lock); ++#endif ++ pvr_fence_context_destroy(timeline->hw_fence_context); ++ } ++ ++ kfree(timeline); ++ ++ return 0; ++} ++ ++/* ++ * This is the function that kick code will call in order to 'finalise' a ++ * created output fence just prior to returning from the kick function. ++ * The OS native sync code needs to implement a function meeting this ++ * specification - the implementation may be a nop if the OS does not need ++ * to perform any actions at this point. ++ * ++ * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value ++ * will have been returned by an earlier call to ++ * pvr_sync_create_fence(). ++ * Input: finalise_data The finalise data returned by an earlier call ++ * to pvr_sync_create_fence(). ++ */ ++static enum PVRSRV_ERROR_TAG ++pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd, void *finalise_data) ++{ ++ struct sync_file *sync_file = finalise_data; ++ struct pvr_fence *pvr_fence; ++ ++ if (!sync_file || (fence_fd < 0)) { ++ pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ pvr_fence = to_pvr_fence(sync_file->fence); ++ ++ if (!pvr_fence) { ++ pr_err(FILE_NAME ": %s: Fence not a pvr fence\n", __func__); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* pvr fences can be signalled any time after creation */ ++ dma_fence_enable_sw_signaling(&pvr_fence->base); ++ ++ fd_install(fence_fd, sync_file->file); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * This is the function that kick code will call in order to obtain a new ++ * PVRSRV_FENCE from the OS native sync code and the PSYNC_CHECKPOINT used ++ * in that fence. The OS native sync code needs to implement a function ++ * meeting this specification. ++ * ++ * Input: device Device node to use in creating a hw_fence_ctx ++ * Input: fence_name A string to annotate the fence with (for ++ * debug). ++ * Input: timeline The timeline on which the new fence is to be ++ * created. ++ * Output: new_fence The new PVRSRV_FENCE to be returned by the ++ * kick call. ++ * Output: fence_uid Unique ID of the update fence. ++ * Output: fence_finalise_data Pointer to data needed to finalise the fence. ++ * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence. ++ */ ++static enum PVRSRV_ERROR_TAG ++pvr_sync_create_fence( ++ struct _PVRSRV_DEVICE_NODE_ *device, ++ const char *fence_name, ++ PVRSRV_TIMELINE new_fence_timeline, ++ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE *new_fence, u64 *fence_uid, ++ void **fence_finalise_data, ++ PSYNC_CHECKPOINT *new_checkpoint_handle, ++ void **timeline_update_sync, ++ __u32 *timeline_update_value) ++{ ++ PVRSRV_ERROR err = PVRSRV_OK; ++ PVRSRV_FENCE new_fence_fd = -1; ++ struct pvr_sync_timeline *timeline; ++ struct pvr_fence *pvr_fence; ++ PSYNC_CHECKPOINT checkpoint; ++ struct sync_file *sync_file; ++ ++ if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle ++ || !fence_finalise_data) { ++ pr_err(FILE_NAME ": %s: Invalid input params\n", __func__); ++ err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_out; ++ } ++ ++ /* We reserve the new fence FD before taking any operations ++ * as we do not want to fail (e.g. run out of FDs) ++ */ ++ new_fence_fd = get_unused_fd_flags(O_CLOEXEC); ++ if (new_fence_fd < 0) { ++ pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__); ++ err = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; ++ goto err_out; ++ } ++ ++ timeline = pvr_sync_timeline_fget(new_fence_timeline); ++ if (!timeline) { ++ pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n", ++ __func__, new_fence_timeline); ++ err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_put_fd; ++ } ++ ++ if (timeline->is_sw) { ++ /* This should never happen! */ ++ pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n", ++ __func__, new_fence_timeline); ++ err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_put_timeline; ++ } ++ ++ if (!timeline->hw_fence_context) { ++ /* First time we use this timeline, so create a context. */ ++ timeline->hw_fence_context = ++ pvr_fence_context_create( ++ device, ++ NativeSyncGetFenceStatusWq(), ++ timeline->name); ++ if (!timeline->hw_fence_context) { ++ pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n", ++ __func__, new_fence_timeline); ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_put_timeline; ++ } ++#if defined(NO_HARDWARE) ++ /* Add timeline to active list */ ++ INIT_LIST_HEAD(&timeline->list); ++ mutex_lock(&pvr_timeline_active_list_lock); ++ list_add_tail(&timeline->list, &pvr_timeline_active_list); ++ mutex_unlock(&pvr_timeline_active_list_lock); ++#endif ++ } ++ ++ pvr_fence = pvr_fence_create(timeline->hw_fence_context, ++ psSyncCheckpointContext, ++ new_fence_timeline, ++ fence_name); ++ if (!pvr_fence) { ++ pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n", ++ __func__); ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_put_timeline; ++ } ++ ++ checkpoint = pvr_fence_get_checkpoint(pvr_fence); ++ if (!checkpoint) { ++ pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n", ++ __func__); ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_destroy_fence; ++ } ++ ++ sync_file = sync_file_create(&pvr_fence->base); ++ if (!sync_file) { ++ pr_err(FILE_NAME ": %s: Failed to create sync_file\n", ++ __func__); ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_destroy_fence; ++ } ++ strlcpy(sync_file_user_name(sync_file), ++ pvr_fence->name, ++ sizeof(sync_file_user_name(sync_file))); ++ dma_fence_put(&pvr_fence->base); ++ ++ *new_fence = new_fence_fd; ++ *fence_finalise_data = sync_file; ++ *new_checkpoint_handle = checkpoint; ++ *fence_uid = OSGetCurrentClientProcessIDKM(); ++ *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX); ++ /* not used but don't want to return dangling pointers */ ++ *timeline_update_sync = NULL; ++ *timeline_update_value = 0; ++ ++ pvr_sync_timeline_fput(timeline); ++err_out: ++ return err; ++ ++err_destroy_fence: ++ pvr_fence_destroy(pvr_fence); ++err_put_timeline: ++ pvr_sync_timeline_fput(timeline); ++err_put_fd: ++ put_unused_fd(new_fence_fd); ++ *fence_uid = PVRSRV_NO_FENCE; ++ goto err_out; ++} ++ ++/* ++ * This is the function that kick code will call in order to 'rollback' a ++ * created output fence should an error occur when submitting the kick. ++ * The OS native sync code needs to implement a function meeting this ++ * specification. ++ * ++ * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence ++ * should be destroyed and any actions taken due to ++ * its creation that need to be undone should be ++ * reverted. ++ * Input: finalise_data The finalise data for the fence to be 'rolled back'. ++ */ ++static enum PVRSRV_ERROR_TAG ++pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, ++ void *fence_data_to_rollback) ++{ ++ struct sync_file *sync_file = fence_data_to_rollback; ++ struct pvr_fence *pvr_fence; ++ ++ if (!sync_file || fence_to_rollback < 0) { ++ pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__, ++ fence_to_rollback); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ pvr_fence = to_pvr_fence(sync_file->fence); ++ if (!pvr_fence) { ++ pr_err(FILE_NAME ++ ": %s: Non-PVR fence (%p)\n", ++ __func__, sync_file->fence); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ fput(sync_file->file); ++ ++ put_unused_fd(fence_to_rollback); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * This is the function that kick code will call in order to obtain a list of ++ * the PSYNC_CHECKPOINTs for a given PVRSRV_FENCE passed to a kick function. ++ * The OS native sync code will allocate the memory to hold the returned list ++ * of PSYNC_CHECKPOINT ptrs. The caller will free this memory once it has ++ * finished referencing it. ++ * ++ * Input: fence The input (check) fence ++ * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs ++ * returned in the checkpoint_handles ++ * parameter. ++ * Output: fence_uid Unique ID of the check fence ++ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs. ++ */ ++static enum PVRSRV_ERROR_TAG ++pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints, ++ PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid) ++{ ++ PSYNC_CHECKPOINT *checkpoints = NULL; ++ unsigned int i, num_fences = 0, num_used_fences = 0; ++ struct dma_fence **fences = NULL; ++ struct dma_fence *fence; ++ PVRSRV_ERROR err = PVRSRV_OK; ++ ++ if (!nr_checkpoints || !checkpoint_handles || !fence_uid) { ++ pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", ++ __func__); ++ err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_out; ++ } ++ ++ *nr_checkpoints = 0; ++ *checkpoint_handles = NULL; ++ *fence_uid = 0; ++ ++ if (fence_to_resolve < 0) ++ goto err_out; ++ ++ fence = sync_file_get_fence(fence_to_resolve); ++ if (!fence) { ++ pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", ++ __func__, fence_to_resolve); ++ err = PVRSRV_ERROR_HANDLE_NOT_FOUND; ++ goto err_out; ++ } ++ ++ if (dma_fence_is_array(fence)) { ++ struct dma_fence_array *array = to_dma_fence_array(fence); ++ ++ if (array) { ++ fences = array->fences; ++ num_fences = array->num_fences; ++ } ++ } else { ++ fences = &fence; ++ num_fences = 1; ++ } ++ ++ checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT), ++ GFP_KERNEL); ++ if (!checkpoints) { ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_put_fence; ++ } ++ for (i = 0; i < num_fences; i++) { ++ /* ++ * Only return the checkpoint if the fence is still active. ++ * Don't checked for signalled on PDUMP drivers as we need ++ * to make sure that all fences make it to the pdump. ++ */ ++#if !defined(PDUMP) ++ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, ++ &fences[i]->flags)) ++#endif ++ { ++ struct pvr_fence *pvr_fence = ++ pvr_fence_create_from_fence( ++ pvr_sync_data.foreign_fence_context, ++ psSyncCheckpointContext, ++ fences[i], ++ fence_to_resolve, ++ "foreign"); ++ if (!pvr_fence) { ++ pr_err(FILE_NAME ": %s: Failed to create fence\n", ++ __func__); ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_free_checkpoints; ++ } ++ checkpoints[num_used_fences] = ++ pvr_fence_get_checkpoint(pvr_fence); ++ SyncCheckpointTakeRef(checkpoints[num_used_fences]); ++ ++num_used_fences; ++ dma_fence_put(&pvr_fence->base); ++ } ++ } ++ /* If we don't return any checkpoints, delete the array because ++ * the caller will not. ++ */ ++ if (num_used_fences == 0) { ++ kfree(checkpoints); ++ checkpoints = NULL; ++ } ++ ++ *checkpoint_handles = checkpoints; ++ *nr_checkpoints = num_used_fences; ++ *fence_uid = OSGetCurrentClientProcessIDKM(); ++ *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX); ++ ++err_put_fence: ++ dma_fence_put(fence); ++err_out: ++ return err; ++ ++err_free_checkpoints: ++ for (i = 0; i < num_used_fences; i++) { ++ if (checkpoints[i]) ++ SyncCheckpointDropRef(checkpoints[i]); ++ } ++ kfree(checkpoints); ++ goto err_put_fence; ++} ++ ++/* ++ * This is the function that driver code will call in order to request the ++ * sync implementation to output debug information relating to any sync ++ * checkpoints it may have created which appear in the provided array of ++ * FW addresses of Unified Fence Objects (UFOs). ++ * ++ * Input: nr_ufos The number of FW addresses provided in the ++ * vaddrs parameter. ++ * Input: vaddrs The array of FW addresses of UFOs. The sync ++ * implementation should check each of these to ++ * see if any relate to sync checkpoints it has ++ * created and where they do output debug information ++ * pertaining to the native/fallback sync with ++ * which it is associated. ++ */ ++static u32 ++pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs) ++{ ++ return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context, ++ nr_ufos, ++ vaddrs); ++} ++ ++#if defined(PDUMP) ++static enum PVRSRV_ERROR_TAG ++pvr_sync_fence_get_checkpoints(PVRSRV_FENCE fence_to_pdump, u32 *nr_checkpoints, ++ struct SYNC_CHECKPOINT_TAG ***checkpoint_handles) ++{ ++ struct dma_fence **fences = NULL; ++ struct dma_fence *fence; ++ struct pvr_fence *pvr_fence; ++ struct SYNC_CHECKPOINT_TAG **checkpoints = NULL; ++ unsigned int i, num_fences, num_used_fences = 0; ++ enum PVRSRV_ERROR_TAG err; ++ ++ if (fence_to_pdump < 0) { ++ err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_out; ++ } ++ ++ if (!nr_checkpoints || !checkpoint_handles) { ++ pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n", ++ __func__); ++ err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_out; ++ } ++ ++ fence = sync_file_get_fence(fence_to_pdump); ++ if (!fence) { ++ pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n", ++ __func__, fence_to_pdump); ++ err = PVRSRV_ERROR_HANDLE_NOT_FOUND; ++ goto err_out; ++ } ++ ++ if (dma_fence_is_array(fence)) { ++ struct dma_fence_array *array = to_dma_fence_array(fence); ++ ++ fences = array->fences; ++ num_fences = array->num_fences; ++ } else { ++ fences = &fence; ++ num_fences = 1; ++ } ++ ++ checkpoints = kmalloc_array(num_fences, sizeof(*checkpoints), ++ GFP_KERNEL); ++ if (!checkpoints) { ++ pr_err("pvr_sync_file: %s: Failed to alloc memory for returned list of sync checkpoints\n", ++ __func__); ++ err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_put_fence; ++ } ++ ++ for (i = 0; i < num_fences; i++) { ++ pvr_fence = to_pvr_fence(fences[i]); ++ if (!pvr_fence) ++ continue; ++ checkpoints[num_used_fences] = pvr_fence_get_checkpoint(pvr_fence); ++ ++num_used_fences; ++ } ++ ++ *checkpoint_handles = checkpoints; ++ *nr_checkpoints = num_used_fences; ++ err = PVRSRV_OK; ++ ++err_put_fence: ++ dma_fence_put(fence); ++err_out: ++ return err; ++} ++#endif ++ ++int pvr_sync_api_rename(void *api_priv, void *user_data) ++{ ++ struct pvr_sync_timeline *timeline = api_priv; ++ struct pvr_sync_rename_ioctl_data *data = user_data; ++ ++ data->szName[sizeof(data->szName) - 1] = '\0'; ++ strlcpy(timeline->name, data->szName, sizeof(timeline->name)); ++ if (timeline->hw_fence_context) ++ strlcpy(timeline->hw_fence_context->name, data->szName, ++ sizeof(timeline->hw_fence_context->name)); ++ ++ return 0; ++} ++ ++int pvr_sync_api_force_sw_only(void *api_priv, void **api_priv_new) ++{ ++ struct pvr_sync_timeline *timeline = api_priv; ++ ++ /* Already in SW mode? */ ++ if (timeline->sw_fence_timeline) ++ return 0; ++ ++ /* Create a sw_sync timeline with the old GPU timeline's name */ ++ timeline->sw_fence_timeline = pvr_counting_fence_timeline_create( ++ timeline->name); ++ if (!timeline->sw_fence_timeline) ++ return -ENOMEM; ++ ++ timeline->is_sw = true; ++ ++ return 0; ++} ++ ++int pvr_sync_api_sw_create_fence(void *api_priv, void *user_data) ++{ ++ struct pvr_sync_timeline *timeline = api_priv; ++ struct pvr_sw_sync_create_fence_data *data = user_data; ++ struct sync_file *sync_file; ++ int fd = get_unused_fd_flags(O_CLOEXEC); ++ struct dma_fence *fence; ++ int err; ++ ++ if (fd < 0) { ++ pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n", ++ __func__, fd); ++ err = -EMFILE; ++ goto err_out; ++ } ++ ++ fence = pvr_counting_fence_create(timeline->sw_fence_timeline, &data->sync_pt_idx); ++ if (!fence) { ++ pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", ++ __func__, fd); ++ err = -ENOMEM; ++ goto err_put_fd; ++ } ++ ++ sync_file = sync_file_create(fence); ++ dma_fence_put(fence); ++ if (!sync_file) { ++ pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n", ++ __func__, fd); ++ err = -ENOMEM; ++ goto err_put_fd; ++ } ++ ++ data->fence = fd; ++ ++ fd_install(fd, sync_file->file); ++ ++ return 0; ++ ++err_put_fd: ++ put_unused_fd(fd); ++err_out: ++ return err; ++} ++ ++int pvr_sync_api_sw_inc(void *api_priv, void *user_data) ++{ ++ struct pvr_sync_timeline *timeline = api_priv; ++ struct pvr_sw_timeline_advance_data *data = user_data; ++ bool res; ++ ++ res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, &data->sync_pt_idx); ++ ++ /* pvr_counting_fence_timeline_inc won't allow sw timeline to be ++ * advanced beyond the last defined point ++ */ ++ if (!res) { ++ pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n"); ++ return -EPERM; ++ } ++ ++ return 0; ++} ++ ++static void ++pvr_sync_debug_request_heading(void *data, u32 verbosity, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ if (DD_VERB_LVL_ENABLED(verbosity, DEBUG_REQUEST_VERBOSITY_MEDIUM)) ++ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, ++ "------[ Native Fence Sync: timelines ]------"); ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_register_functions(void) ++{ ++ /* Register the resolve fence and create fence functions with ++ * sync_checkpoint.c ++ * The pvr_fence context registers its own EventObject callback to ++ * update sync status ++ */ ++ /* Initialise struct and register with sync_checkpoint.c */ ++ pvr_sync_data.sync_checkpoint_ops.pfnFenceResolve = pvr_sync_resolve_fence; ++ pvr_sync_data.sync_checkpoint_ops.pfnFenceCreate = pvr_sync_create_fence; ++ pvr_sync_data.sync_checkpoint_ops.pfnFenceDataRollback = pvr_sync_rollback_fence_data; ++ pvr_sync_data.sync_checkpoint_ops.pfnFenceFinalise = pvr_sync_finalise_fence; ++#if defined(NO_HARDWARE) ++ pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = pvr_sync_nohw_signal_fence; ++#else ++ pvr_sync_data.sync_checkpoint_ops.pfnNoHWUpdateTimelines = NULL; ++#endif ++ pvr_sync_data.sync_checkpoint_ops.pfnFreeCheckpointListMem = ++ pvr_sync_free_checkpoint_list_mem; ++ pvr_sync_data.sync_checkpoint_ops.pfnDumpInfoOnStalledUFOs = ++ pvr_sync_dump_info_on_stalled_ufos; ++ strlcpy(pvr_sync_data.sync_checkpoint_ops.pszImplName, "pvr_sync_file", ++ SYNC_CHECKPOINT_IMPL_MAX_STRLEN); ++#if defined(PDUMP) ++ pvr_sync_data.sync_checkpoint_ops.pfnSyncFenceGetCheckpoints = ++ pvr_sync_fence_get_checkpoints; ++#endif ++ ++ return SyncCheckpointRegisterFunctions(&pvr_sync_data.sync_checkpoint_ops); ++} ++ ++int pvr_sync_init(void) ++{ ++ int err; ++ ++ pvr_sync_data.foreign_fence_context = ++ pvr_fence_foreign_context_create( ++ NativeSyncGetFenceStatusWq(), ++ "foreign_sync"); ++ if (!pvr_sync_data.foreign_fence_context) { ++ pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n", ++ __func__); ++ err = -ENOMEM; ++ goto err_out; ++ } ++ ++#if defined(NO_HARDWARE) ++ INIT_LIST_HEAD(&pvr_timeline_active_list); ++#endif ++ ++ err = pvr_sync_ioctl_init(); ++ if (err) { ++ pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n", ++ __func__, err); ++ goto err_ioctl_init; ++ } ++ ++ return 0; ++ ++err_ioctl_init: ++ pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); ++ pvr_fence_cleanup(); ++err_out: ++ return err; ++} ++ ++void pvr_sync_deinit(void) ++{ ++ pvr_sync_ioctl_deinit(); ++ pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context); ++ pvr_fence_cleanup(); ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_device_init(struct device *dev) ++{ ++ struct drm_device *ddev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = ddev->dev_private; ++ enum PVRSRV_ERROR_TAG error; ++ ++ error = PVRSRVRegisterDeviceDbgRequestNotify( ++ &priv->sync_debug_notify_handle, ++ priv->dev_node, ++ pvr_sync_debug_request_heading, ++ DEBUG_REQUEST_LINUXFENCE, ++ NULL); ++ if (error != PVRSRV_OK) { ++ pr_err("%s: failed to register debug request callback (%s)\n", ++ __func__, PVRSRVGetErrorString(error)); ++ goto err_out; ++ } ++ ++ /* Register the foreign sync context debug notifier on each device */ ++ error = pvr_fence_context_register_dbg( ++ &priv->sync_foreign_debug_notify_handle, ++ priv->dev_node, ++ pvr_sync_data.foreign_fence_context); ++ if (error != PVRSRV_OK) { ++ pr_err("%s: failed to register fence debug request callback (%s)\n", ++ __func__, PVRSRVGetErrorString(error)); ++ goto err_context_regdbg; ++ } ++ ++#if defined(NO_HARDWARE) ++ INIT_LIST_HEAD(&pvr_timeline_active_list); ++#endif ++ ++ return PVRSRV_OK; ++ ++err_context_regdbg: ++ PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle); ++err_out: ++ return error; ++} ++ ++void pvr_sync_device_deinit(struct device *dev) ++{ ++ struct drm_device *ddev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = ddev->dev_private; ++ ++ PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_foreign_debug_notify_handle); ++ PVRSRVUnregisterDeviceDbgRequestNotify(priv->sync_debug_notify_handle); ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_fence_wait(void *fence, u32 timeout_in_ms) ++{ ++ long timeout = msecs_to_jiffies(timeout_in_ms); ++ int err; ++ ++ err = dma_fence_wait_timeout(fence, true, timeout); ++ /* ++ * dma_fence_wait_timeout returns: ++ * - the remaining timeout on success ++ * - 0 on timeout ++ * - -ERESTARTSYS if interrupted ++ */ ++ if (err > 0) ++ return PVRSRV_OK; ++ else if (err == 0) ++ return PVRSRV_ERROR_TIMEOUT; ++ ++ return PVRSRV_ERROR_FAILED_DEPENDENCIES; ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_fence_release(void *fence) ++{ ++ dma_fence_put(fence); ++ ++ return PVRSRV_OK; ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_fence_get(int fence_fd, void **fence_out) ++{ ++ struct dma_fence *fence; ++ ++ fence = sync_file_get_fence(fence_fd); ++ if (fence == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ *fence_out = fence; ++ ++ return PVRSRV_OK; ++} ++ ++enum PVRSRV_ERROR_TAG ++pvr_sync_sw_timeline_fence_create(struct _PVRSRV_DEVICE_NODE_ *pvrsrv_dev_node, ++ int timeline_fd, ++ const char *fence_name, ++ int *fence_fd_out, ++ u64 *sync_pt_idx) ++{ ++ enum PVRSRV_ERROR_TAG srv_err; ++ struct pvr_sync_timeline *timeline; ++ struct dma_fence *fence = NULL; ++ struct sync_file *sync_file = NULL; ++ int fd; ++ ++ (void)(pvrsrv_dev_node); ++ ++ fd = get_unused_fd_flags(O_CLOEXEC); ++ if (fd < 0) ++ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE; ++ ++ timeline = pvr_sync_timeline_fget(timeline_fd); ++ if (!timeline) { ++ /* unrecognised timeline */ ++ srv_err = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ goto err_put_fd; ++ } ++ if (!timeline->is_sw) { ++ pvr_sync_timeline_fput(timeline); ++ srv_err = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_put_fd; ++ } ++ ++ fence = pvr_counting_fence_create(timeline->sw_fence_timeline, sync_pt_idx); ++ pvr_sync_timeline_fput(timeline); ++ if (!fence) { ++ srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_put_fd; ++ } ++ ++ sync_file = sync_file_create(fence); ++ dma_fence_put(fence); ++ if (!sync_file) { ++ srv_err = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_put_fd; ++ } ++ ++ fd_install(fd, sync_file->file); ++ ++ *fence_fd_out = fd; ++ ++ return PVRSRV_OK; ++ ++err_put_fd: ++ put_unused_fd(fd); ++ return srv_err; ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_advance(void *timeline, u64 *sync_pt_idx) ++{ ++ if (timeline == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ pvr_counting_fence_timeline_inc(timeline, sync_pt_idx); ++ ++ return PVRSRV_OK; ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_release(void *timeline) ++{ ++ if (timeline == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ pvr_counting_fence_timeline_put(timeline); ++ ++ return PVRSRV_OK; ++} ++ ++enum PVRSRV_ERROR_TAG pvr_sync_sw_timeline_get(int timeline_fd, ++ void **timeline_out) ++{ ++ struct pvr_counting_fence_timeline *sw_timeline; ++ struct pvr_sync_timeline *timeline; ++ ++ timeline = pvr_sync_timeline_fget(timeline_fd); ++ if (!timeline) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ sw_timeline = ++ pvr_counting_fence_timeline_get(timeline->sw_fence_timeline); ++ pvr_sync_timeline_fput(timeline); ++ if (!sw_timeline) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ *timeline_out = sw_timeline; ++ ++ return PVRSRV_OK; ++} ++static void _dump_sync_point(struct dma_fence *fence, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file) ++{ ++ const struct dma_fence_ops *fence_ops = fence->ops; ++ bool signaled = dma_fence_is_signaled(fence); ++ char time[16] = { '\0' }; ++ ++ fence_ops->timeline_value_str(fence, time, sizeof(time)); ++ ++ PVR_DUMPDEBUG_LOG(dump_debug_printf, ++ dump_debug_file, ++ "<%p> Seq#=%llu TS=%s State=%s TLN=%s", ++ fence, ++ (u64) fence->seqno, ++ time, ++ (signaled) ? "Signalled" : "Active", ++ fence_ops->get_timeline_name(fence)); ++} ++ ++static void _dump_fence(struct dma_fence *fence, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file) ++{ ++ if (dma_fence_is_array(fence)) { ++ struct dma_fence_array *fence_array = to_dma_fence_array(fence); ++ int i; ++ ++ if (fence_array) { ++ PVR_DUMPDEBUG_LOG(dump_debug_printf, ++ dump_debug_file, ++ "Fence: [%p] Sync Points:\n", ++ fence_array); ++ ++ for (i = 0; i < fence_array->num_fences; i++) ++ _dump_sync_point(fence_array->fences[i], ++ dump_debug_printf, ++ dump_debug_file); ++ } ++ ++ } else { ++ _dump_sync_point(fence, dump_debug_printf, dump_debug_file); ++ } ++} ++ ++enum PVRSRV_ERROR_TAG ++sync_dump_fence(void *sw_fence_obj, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file) ++{ ++ struct dma_fence *fence = (struct dma_fence *) sw_fence_obj; ++ ++ _dump_fence(fence, dump_debug_printf, dump_debug_file); ++ ++ return PVRSRV_OK; ++} ++ ++enum PVRSRV_ERROR_TAG ++sync_sw_dump_timeline(void *sw_timeline_obj, ++ DUMPDEBUG_PRINTF_FUNC *dump_debug_printf, ++ void *dump_debug_file) ++{ ++ pvr_counting_fence_timeline_dump_timeline(sw_timeline_obj, ++ dump_debug_printf, ++ dump_debug_file); ++ ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c +new file mode 100644 +index 000000000000..60ba3555e779 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.c +@@ -0,0 +1,277 @@ ++/* ++ * @File pvr_sync_ioctl_common.c ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++ ++#include "pvr_drm.h" ++#include "pvr_sync_api.h" ++#include "pvr_sync_ioctl_common.h" ++ ++/* ++ * The PVR Sync API is unusual in that some operations configure the ++ * timeline for use, and are no longer allowed once the timeline is ++ * in use. A locking mechanism, such as a read/write semaphore, would ++ * be one method of helping to ensure the API rules are followed, but ++ * this would add unnecessary overhead once the timeline has been ++ * configured, as read locks would continue to have to be taken after ++ * the timeline is in use. To avoid locks, two atomic variables are used, ++ * together with memory barriers. The in_setup variable indicates a "rename" ++ * or "force software only" ioctl is in progress. At most one of these two ++ * configuration ioctls can be in progress at any one time, and they can't ++ * overlap with any other Sync ioctl. The in_use variable indicates one ++ * of the other Sync ioctls has started. Once set, in_use stays set, and ++ * prevents any further configuration ioctls. Non-configuration ioctls ++ * are allowed to overlap. ++ * It is possible for a configuration and non-configuration ioctl to race, ++ * but at most one will be allowed to proceed, and perhaps neither. ++ * Given the intended usage of the API in user space, where the timeline ++ * is fully configured before being used, the race behaviour won't be ++ * an issue. ++ */ ++ ++struct pvr_sync_file_data { ++ atomic_t in_setup; ++ atomic_t in_use; ++ void *api_private; ++ bool is_sw; ++}; ++ ++static bool pvr_sync_set_in_use(struct pvr_sync_file_data *fdata) ++{ ++ if (atomic_read(&fdata->in_use) < 2) { ++ atomic_set(&fdata->in_use, 1); ++ /* Ensure in_use change is visible before in_setup is read */ ++ smp_mb(); ++ if (atomic_read(&fdata->in_setup) != 0) ++ return false; ++ ++ atomic_set(&fdata->in_use, 2); ++ } else { ++ /* Ensure stale private data isn't read */ ++ smp_rmb(); ++ } ++ ++ return true; ++} ++ ++static bool pvr_sync_set_in_setup(struct pvr_sync_file_data *fdata) ++{ ++ int in_setup; ++ ++ in_setup = atomic_inc_return(&fdata->in_setup); ++ if (in_setup > 1 || atomic_read(&fdata->in_use) != 0) { ++ atomic_dec(&fdata->in_setup); ++ return false; ++ } ++ ++ return true; ++} ++ ++static inline void pvr_sync_reset_in_setup(struct pvr_sync_file_data *fdata) ++{ ++ /* ++ * Ensure setup changes are visible before allowing other ++ * operations to proceed. ++ */ ++ smp_mb__before_atomic(); ++ atomic_dec(&fdata->in_setup); ++} ++ ++void *pvr_sync_get_api_priv_common(struct file *file) ++{ ++ if (file != NULL && pvr_sync_is_timeline(file)) { ++ struct pvr_sync_file_data *fdata = pvr_sync_get_private_data(file); ++ ++ if (fdata != NULL && pvr_sync_set_in_use(fdata)) ++ return fdata->api_private; ++ } ++ ++ return NULL; ++} ++ ++int pvr_sync_open_common(void *connection_data, void *file_handle) ++{ ++ void *data = NULL; ++ struct pvr_sync_file_data *fdata; ++ int err; ++ ++ fdata = kzalloc(sizeof(*fdata), GFP_KERNEL); ++ if (!fdata) ++ return -ENOMEM; ++ ++ atomic_set(&fdata->in_setup, 0); ++ atomic_set(&fdata->in_use, 0); ++ ++ if (!pvr_sync_set_private_data(connection_data, fdata)) { ++ kfree(fdata); ++ return -EINVAL; ++ } ++ ++ err = pvr_sync_api_init(file_handle, &data); ++ if (err) ++ kfree(fdata); ++ else ++ fdata->api_private = data; ++ ++ return err; ++} ++ ++int pvr_sync_close_common(void *connection_data) ++{ ++ struct pvr_sync_file_data *fdata; ++ ++ fdata = pvr_sync_connection_private_data(connection_data); ++ if (fdata) { ++ int err; ++ ++ err = pvr_sync_api_deinit(fdata->api_private, fdata->is_sw); ++ ++ kfree(fdata); ++ ++ return err; ++ } ++ ++ return 0; ++} ++ ++static inline int pvr_sync_ioctl_rename(void *api_priv, void *arg) ++{ ++ struct pvr_sync_rename_ioctl_data *data = arg; ++ ++ return pvr_sync_api_rename(api_priv, data); ++} ++ ++static inline int pvr_sync_ioctl_force_sw_only(struct pvr_sync_file_data *fdata) ++{ ++ void *data = fdata->api_private; ++ int err; ++ ++ err = pvr_sync_api_force_sw_only(fdata->api_private, &data); ++ if (!err) { ++ if (data != fdata->api_private) ++ fdata->api_private = data; ++ ++ fdata->is_sw = true; ++ } ++ ++ return err; ++} ++ ++static inline int pvr_sync_ioctl_sw_create_fence(void *api_priv, void *arg) ++{ ++ struct pvr_sw_sync_create_fence_data *data = arg; ++ ++ return pvr_sync_api_sw_create_fence(api_priv, data); ++} ++ ++static inline int pvr_sync_ioctl_sw_inc(void *api_priv, void *arg) ++{ ++ struct pvr_sw_timeline_advance_data *data = arg; ++ ++ return pvr_sync_api_sw_inc(api_priv, data); ++} ++ ++int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg) ++{ ++ int err = -ENOTTY; ++ struct pvr_sync_file_data *fdata; ++ bool in_setup; ++ ++ fdata = pvr_sync_get_private_data(file); ++ if (!fdata) ++ return -EINVAL; ++ ++ switch (cmd) { ++ case DRM_PVR_SYNC_RENAME_CMD: ++ case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD: ++ if (!pvr_sync_set_in_setup(fdata)) ++ return -EBUSY; ++ ++ in_setup = true; ++ break; ++ default: ++ if (!pvr_sync_set_in_use(fdata)) ++ return -EBUSY; ++ ++ in_setup = false; ++ break; ++ } ++ ++ if (in_setup) { ++ if (fdata->is_sw) ++ err = -ENOTTY; ++ else ++ switch (cmd) { ++ case DRM_PVR_SYNC_RENAME_CMD: ++ err = pvr_sync_ioctl_rename(fdata->api_private, ++ arg); ++ break; ++ case DRM_PVR_SYNC_FORCE_SW_ONLY_CMD: ++ err = pvr_sync_ioctl_force_sw_only(fdata); ++ break; ++ default: ++ break; ++ } ++ } else { ++ if (!fdata->is_sw) ++ err = -ENOTTY; ++ else ++ switch (cmd) { ++ case DRM_PVR_SW_SYNC_CREATE_FENCE_CMD: ++ err = pvr_sync_ioctl_sw_create_fence(fdata->api_private, ++ arg); ++ break; ++ case DRM_PVR_SW_SYNC_INC_CMD: ++ err = pvr_sync_ioctl_sw_inc(fdata->api_private, ++ arg); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ if (in_setup) ++ pvr_sync_reset_in_setup(fdata); ++ ++ return err; ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h +new file mode 100644 +index 000000000000..ef12dc298368 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_common.h +@@ -0,0 +1,71 @@ ++/* ++ * @File pvr_sync_ioctl_common.h ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _PVR_SYNC_IOCTL_COMMON_H ++#define _PVR_SYNC_IOCTL_COMMON_H ++ ++struct file; ++ ++/* Functions provided by pvr_sync_ioctl_common */ ++ ++int pvr_sync_open_common(void *connection_data, void *file_handle); ++int pvr_sync_close_common(void *connection_data); ++int pvr_sync_ioctl_common(struct file *file, unsigned int cmd, void *arg); ++void *pvr_sync_get_api_priv_common(struct file *file); ++ ++struct pvr_sync_file_data; ++ ++/* Functions required by pvr_sync_ioctl_common */ ++ ++bool pvr_sync_set_private_data(void *connection_data, ++ struct pvr_sync_file_data *fdata); ++ ++struct pvr_sync_file_data * ++pvr_sync_connection_private_data(void *connection_data); ++ ++struct pvr_sync_file_data * ++pvr_sync_get_private_data(struct file *file); ++ ++bool pvr_sync_is_timeline(struct file *file); ++ ++#endif /* _PVR_SYNC_IOCTL_COMMON_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c +new file mode 100644 +index 000000000000..423c8d3a75ef +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.c +@@ -0,0 +1,168 @@ ++/* ++ * @File pvr_sync_ioctl_drm.c ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include "pvr_drv.h" ++#include "pvr_drm.h" ++#include "private_data.h" ++#include "env_connection.h" ++#include "pvr_sync_api.h" ++#include "pvr_sync_ioctl_common.h" ++#include "pvr_sync_ioctl_drm.h" ++ ++bool pvr_sync_set_private_data(void *connection_data, ++ struct pvr_sync_file_data *fdata) ++{ ++ if (connection_data) { ++ ENV_CONNECTION_DATA *env_data; ++ ++ env_data = PVRSRVConnectionPrivateData(connection_data); ++ if (env_data) { ++ env_data->pvPvrSyncPrivateData = fdata; ++ ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++struct pvr_sync_file_data * ++pvr_sync_connection_private_data(void *connection_data) ++{ ++ if (connection_data) { ++ ENV_CONNECTION_DATA *env_data; ++ ++ env_data = PVRSRVConnectionPrivateData(connection_data); ++ ++ if (env_data) ++ return env_data->pvPvrSyncPrivateData; ++ } ++ ++ return NULL; ++} ++ ++struct pvr_sync_file_data * ++pvr_sync_get_private_data(struct file *file) ++{ ++ CONNECTION_DATA *connection_data = LinuxSyncConnectionFromFile(file); ++ ++ return pvr_sync_connection_private_data(connection_data); ++} ++ ++bool pvr_sync_is_timeline(struct file *file) ++{ ++ return file->f_op == &pvr_drm_fops; ++} ++ ++void *pvr_sync_get_api_priv(struct file *file) ++{ ++ return pvr_sync_get_api_priv_common(file); ++} ++ ++struct file *pvr_sync_get_file_struct(void *file_handle) ++{ ++ if (file_handle) { ++ struct drm_file *file = file_handle; ++ ++ return file->filp; ++ } ++ ++ return NULL; ++} ++ ++int pvr_sync_open(void *connection_data, struct drm_file *file) ++{ ++ /* ++ * The file structure pointer (file->filp) may not have been ++ * initialised at this point, so pass down a pointer to the ++ * drm_file structure instead. ++ */ ++ return pvr_sync_open_common(connection_data, file); ++} ++ ++void pvr_sync_close(void *connection_data) ++{ ++ int iErr = pvr_sync_close_common(connection_data); ++ ++ if (iErr < 0) ++ pr_err("%s: ERROR (%d) returned by pvr_sync_close_common()\n", ++ __func__, iErr); ++} ++ ++ ++int pvr_sync_rename_ioctl(struct drm_device __maybe_unused *dev, ++ void *arg, struct drm_file *file) ++{ ++ return pvr_sync_ioctl_common(file->filp, ++ DRM_PVR_SYNC_RENAME_CMD, arg); ++} ++ ++int pvr_sync_force_sw_only_ioctl(struct drm_device __maybe_unused *dev, ++ void *arg, struct drm_file *file) ++{ ++ return pvr_sync_ioctl_common(file->filp, ++ DRM_PVR_SYNC_FORCE_SW_ONLY_CMD, arg); ++} ++ ++int pvr_sw_sync_create_fence_ioctl(struct drm_device __maybe_unused *dev, ++ void *arg, struct drm_file *file) ++{ ++ return pvr_sync_ioctl_common(file->filp, ++ DRM_PVR_SW_SYNC_CREATE_FENCE_CMD, arg); ++} ++ ++int pvr_sw_sync_inc_ioctl(struct drm_device __maybe_unused *dev, ++ void *arg, struct drm_file *file) ++{ ++ return pvr_sync_ioctl_common(file->filp, ++ DRM_PVR_SW_SYNC_INC_CMD, arg); ++} ++ ++int pvr_sync_ioctl_init(void) ++{ ++ return 0; ++} ++ ++void pvr_sync_ioctl_deinit(void) ++{ ++} +diff --git a/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h +new file mode 100644 +index 000000000000..756ce4bf71e6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_sync_ioctl_drm.h +@@ -0,0 +1,62 @@ ++/* ++ * @File pvr_sync_ioctl_drm.h ++ * @Title Kernel driver for Android's sync mechanism ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _PVR_SYNC_IOCTL_DRM_H ++#define _PVR_SYNC_IOCTL_DRM_H ++ ++struct drm_device; ++struct drm_file; ++ ++int pvr_sync_open(void *connection_data, struct drm_file *file); ++void pvr_sync_close(void *connection_data); ++ ++int pvr_sync_rename_ioctl(struct drm_device *dev, void *arg, ++ struct drm_file *file); ++int pvr_sync_force_sw_only_ioctl(struct drm_device *dev, void *arg, ++ struct drm_file *file); ++int pvr_sw_sync_create_fence_ioctl(struct drm_device *dev, void *arg, ++ struct drm_file *file); ++int pvr_sw_sync_inc_ioctl(struct drm_device *dev, void *arg, ++ struct drm_file *file); ++ ++#endif /* _PVR_SYNC_IOCTL_DRM_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_uaccess.h b/drivers/gpu/drm/img-rogue/pvr_uaccess.h +new file mode 100644 +index 000000000000..13864eab4c7a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_uaccess.h +@@ -0,0 +1,99 @@ ++/*************************************************************************/ /*! ++@File ++@Title Utility functions for user space access ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVR_UACCESS_H ++#define PVR_UACCESS_H ++ ++#include ++#include ++ ++static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes) ++{ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) ++ if (access_ok(VERIFY_WRITE, pvTo, ulBytes)) ++#else ++ if (access_ok(pvTo, ulBytes)) ++#endif ++ { ++ return __copy_to_user(pvTo, pvFrom, ulBytes); ++ } ++ ++ return ulBytes; ++} ++ ++ ++#if defined(__KLOCWORK__) ++ /* this part is only to tell Klocwork not to report false positive because ++ it doesn't understand that pvr_copy_from_user will initialise the memory ++ pointed to by pvTo */ ++#include /* get the memset prototype */ ++static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) ++{ ++ if (pvTo != NULL) ++ { ++ memset(pvTo, 0xAA, ulBytes); ++ return 0; ++ } ++ return 1; ++} ++ ++#else /* real implementation */ ++ ++static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes) ++{ ++ /* ++ * The compile time correctness checking introduced for copy_from_user in ++ * Linux 2.6.33 isn't fully compatible with our usage of the function. ++ */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)) ++ if (access_ok(VERIFY_READ, pvFrom, ulBytes)) ++#else ++ if (access_ok(pvFrom, ulBytes)) ++#endif ++ { ++ return __copy_from_user(pvTo, pvFrom, ulBytes); ++ } ++ ++ return ulBytes; ++} ++#endif /* klocworks */ ++ ++#endif /* PVR_UACCESS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvr_vmap.h b/drivers/gpu/drm/img-rogue/pvr_vmap.h +new file mode 100644 +index 000000000000..19fe8b8f190b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvr_vmap.h +@@ -0,0 +1,83 @@ ++/* ++ * @File pvr_vmap.h ++ * @Title Utility functions for virtual memory mapping ++ * @Codingstyle LinuxKernel ++ * @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++ * @License Dual MIT/GPLv2 ++ * ++ * The contents of this file are subject to the MIT license as set out below. ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * Alternatively, the contents of this file may be used under the terms of ++ * the GNU General Public License Version 2 ("GPL") in which case the provisions ++ * of GPL are applicable instead of those above. ++ * ++ * If you wish to allow use of your version of this file only under the terms of ++ * GPL, and not to allow others to use your version of this file under the terms ++ * of the MIT license, indicate your decision by deleting the provisions above ++ * and replace them with the notice and other provisions required by GPL as set ++ * out in the file called "GPL-COPYING" included in this distribution. If you do ++ * not delete the provisions above, a recipient may use your version of this file ++ * under the terms of either the MIT license or GPL. ++ * ++ * This License is also included in this distribution in the file called ++ * "MIT-COPYING". ++ * ++ * EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++ * PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++ * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++ * PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef PVR_VMAP_H ++#define PVR_VMAP_H ++ ++#include ++#include ++ ++static inline void *pvr_vmap(struct page **pages, ++ unsigned int count, ++ __maybe_unused unsigned long flags, ++ pgprot_t prot) ++{ ++#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) ++ return vmap(pages, count, flags, prot); ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) ++ return vm_map_ram(pages, count, -1, prot); ++#else ++ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) ++ return vm_map_ram(pages, count, -1); ++ else ++ return vmap(pages, count, flags, prot); ++#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ ++} ++ ++static inline void pvr_vunmap(void *pages, ++ __maybe_unused unsigned int count, ++ __maybe_unused pgprot_t prot) ++{ ++#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) ++ vunmap(pages); ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) ++ vm_unmap_ram(pages, count); ++#else ++ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) ++ vm_unmap_ram(pages, count); ++ else ++ vunmap(pages); ++#endif /* !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS) */ ++} ++ ++#endif /* PVR_VMAP_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrmodule.h b/drivers/gpu/drm/img-rogue/pvrmodule.h +new file mode 100644 +index 000000000000..267c7b687487 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrmodule.h +@@ -0,0 +1,48 @@ ++/*************************************************************************/ /*! ++@Title Module Author and License. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef _PVRMODULE_H_ ++#define _PVRMODULE_H_ ++ ++MODULE_AUTHOR("Imagination Technologies Ltd. "); ++MODULE_LICENSE("Dual MIT/GPL"); ++ ++#endif /* _PVRMODULE_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv.c b/drivers/gpu/drm/img-rogue/pvrsrv.c +new file mode 100644 +index 000000000000..c023870f562c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv.c +@@ -0,0 +1,3028 @@ ++/*************************************************************************/ /*! ++@File ++@Title core services functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Main APIs for core services functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "rgxdebug.h" ++#include "handle.h" ++#include "connection_server.h" ++#include "osconnection_server.h" ++#include "pdump_km.h" ++#include "ra.h" ++#include "allocmem.h" ++#include "pmr.h" ++#include "pvrsrv.h" ++#include "srvcore.h" ++#include "services_km.h" ++#include "pvrsrv_device.h" ++#include "pvr_debug.h" ++#include "debug_common.h" ++#include "pvr_notifier.h" ++#include "sync.h" ++#include "sync_server.h" ++#include "sync_checkpoint.h" ++#include "sync_fallback_server.h" ++#include "sync_checkpoint_init.h" ++#include "devicemem.h" ++#include "cache_km.h" ++#include "info_page.h" ++#include "info_page_defs.h" ++#include "pvrsrv_bridge_init.h" ++#include "devicemem_server.h" ++#include "km_apphint_defs.h" ++#include "di_server.h" ++#include "di_impl_brg.h" ++#include "htb_debug.h" ++#include "dma_km.h" ++ ++#include "log2.h" ++ ++#include "lists.h" ++#include "dllist.h" ++#include "syscommon.h" ++#include "sysvalidation.h" ++ ++#include "physmem_lma.h" ++#include "physmem_osmem.h" ++#include "physmem_hostmem.h" ++ ++#include "tlintern.h" ++#include "htbserver.h" ++ ++//#define MULTI_DEVICE_BRINGUP ++ ++#if defined(MULTI_DEVICE_BRINGUP) ++#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) PVR_DPF((PVR_DBG_MESSAGE, msg, __VA_ARGS__)) ++#else ++#define MULTI_DEVICE_BRINGUP_DPF(msg, ...) ++#endif ++ ++#if defined(SUPPORT_RGX) ++#include "rgxinit.h" ++#include "rgxhwperf.h" ++#include "rgxfwutils.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++#include "ri_server.h" ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++ ++#include "vz_vmm_pvz.h" ++ ++#include "devicemem_history_server.h" ++ ++#if defined(SUPPORT_LINUX_DVFS) ++#include "pvr_dvfs_device.h" ++#endif ++ ++#if defined(SUPPORT_DISPLAY_CLASS) ++#include "dc_server.h" ++#endif ++ ++#include "rgx_options.h" ++#include "srvinit.h" ++#include "rgxutils.h" ++ ++#include "oskm_apphint.h" ++#include "pvrsrv_apphint.h" ++ ++#include "pvrsrv_tlstreams.h" ++#include "tlstream.h" ++ ++#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) ++#include "physmem_test.h" ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++#if defined(__linux__) ++#include "km_apphint.h" ++#endif /* defined(__linux__) */ ++ ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++#define INFINITE_SLEEP_TIMEOUT 0ULL ++#endif ++ ++/*! Wait 100ms before retrying deferred clean-up again */ ++#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL ++ ++/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times ++ * a day to check for any missed clean-up. */ ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT ++#else ++#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL ++#endif ++ ++/*! When unloading try a few times to free everything remaining on the list */ ++#define CLEANUP_THREAD_UNLOAD_RETRY 4 ++ ++#define PVRSRV_TL_CTLR_STREAM_SIZE 4096 ++ ++static PVRSRV_DATA *gpsPVRSRVData; ++static IMG_UINT32 g_ui32InitFlags; ++ ++static IMG_BOOL g_CleanupThread_work; ++static IMG_BOOL g_DevicesWatchdogThread_work; ++ ++/* mark which parts of Services were initialised */ ++#define INIT_DATA_ENABLE_PDUMPINIT 0x1U ++ ++/* Callback to dump info of cleanup thread in debug_dump */ ++static void CleanupThreadDumpInfo(DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_DATA *psPVRSRVData; ++ psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items Queued : %u", ++ OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsQueued)); ++ PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items dropped after " ++ "retry limit reached : %u", ++ OSAtomicRead(&psPVRSRVData->i32NumCleanupItemsNotCompleted)); ++} ++ ++/* Add work to the cleanup thread work list. ++ * The work item will be executed by the cleanup thread ++ */ ++void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData) ++{ ++ PVRSRV_DATA *psPVRSRVData; ++ PVRSRV_ERROR eError; ++ ++ psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ PVR_ASSERT(psData != NULL); ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload) ++#else ++ if (psPVRSRVData->bUnload) ++#endif ++ { ++ CLEANUP_THREAD_FN pfnFree = psData->pfnFree; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately")); ++ ++ eError = pfnFree(psData->pvData); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " ++ "(callback " IMG_PFN_FMTSPEC "). " ++ "Immediate free will not be retried.", ++ pfnFree)); ++ } ++ } ++ else ++ { ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* add this work item to the list */ ++ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode); ++ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ ++ OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsQueued); ++ ++ /* signal the cleanup thread to ensure this item gets processed */ ++ eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++} ++ ++/* Pop an item from the head of the cleanup thread work list */ ++static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData) ++{ ++ DLLIST_NODE *psNode; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList); ++ if (psNode != NULL) ++ { ++ dllist_remove_node(psNode); ++ } ++ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ ++ return psNode; ++} ++ ++/* Process the cleanup thread work list */ ++static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData, ++ IMG_BOOL *pbUseGlobalEO) ++{ ++ DLLIST_NODE *psNodeIter, *psNodeLast; ++ PVRSRV_ERROR eError; ++ IMG_BOOL bNeedRetry = IMG_FALSE; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* any callback functions which return error will be ++ * moved to the back of the list, and additional items can be added ++ * to the list at any time so we ensure we only iterate from the ++ * head of the list to the current tail (since the tail may always ++ * be changing) ++ */ ++ ++ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ psNodeLast = dllist_get_prev_node(&psPVRSRVData->sCleanupThreadWorkList); ++ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ ++ if (psNodeLast == NULL) ++ { ++ /* no elements to clean up */ ++ return IMG_FALSE; ++ } ++ ++ do ++ { ++ psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData); ++ ++ if (psNodeIter != NULL) ++ { ++ PVRSRV_CLEANUP_THREAD_WORK *psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode); ++ CLEANUP_THREAD_FN pfnFree; ++ ++ /* get the function pointer address here so we have access to it ++ * in order to report the error in case of failure, without having ++ * to depend on psData not having been freed ++ */ ++ pfnFree = psData->pfnFree; ++ ++ *pbUseGlobalEO = psData->bDependsOnHW; ++ eError = pfnFree(psData->pvData); ++ ++ if (eError != PVRSRV_OK) ++ { ++ /* move to back of the list, if this item's ++ * retry count hasn't hit zero. ++ */ ++ if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData)) ++ { ++ if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData)) ++ { ++ bNeedRetry = IMG_TRUE; ++ } ++ } ++ else ++ { ++ if (psData->ui32RetryCount-- > 0) ++ { ++ bNeedRetry = IMG_TRUE; ++ } ++ } ++ ++ if (bNeedRetry) ++ { ++ OSSpinLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter); ++ OSSpinLockRelease(psPVRSRVData->hCleanupThreadWorkListLock, uiFlags); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource " ++ "(callback " IMG_PFN_FMTSPEC "). " ++ "Retry limit reached", ++ pfnFree)); ++ OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued); ++ OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItemsNotCompleted); ++ ++ } ++ } ++ else ++ { ++ OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItemsQueued); ++ } ++ } ++ } while ((psNodeIter != NULL) && (psNodeIter != psNodeLast)); ++ ++ return bNeedRetry; ++} ++ ++// #define CLEANUP_DPFL PVR_DBG_WARNING ++#define CLEANUP_DPFL PVR_DBG_MESSAGE ++ ++/* Create/initialise data required by the cleanup thread, ++ * before the cleanup thread is started ++ */ ++static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Create the clean up event object */ ++ ++ eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Exit); ++ ++ /* initialise the mutex and linked list required for the cleanup thread work list */ ++ ++ eError = OSSpinLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Exit); ++ ++ dllist_init(&psPVRSRVData->sCleanupThreadWorkList); ++ ++Exit: ++ return eError; ++} ++ ++static void CleanupThread(void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = pvData; ++ IMG_BOOL bRetryWorkList = IMG_FALSE; ++ IMG_HANDLE hGlobalEvent; ++ IMG_HANDLE hOSEvent; ++ PVRSRV_ERROR eRc; ++ IMG_BOOL bUseGlobalEO = IMG_FALSE; ++ IMG_UINT32 uiUnloadRetry = 0; ++ ++ /* Store the process id (pid) of the clean-up thread */ ++ psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID(); ++ psPVRSRVData->cleanupThreadTid = OSGetCurrentThreadID(); ++ OSAtomicWrite(&psPVRSRVData->i32NumCleanupItemsQueued, 0); ++ OSAtomicWrite(&psPVRSRVData->i32NumCleanupItemsNotCompleted, 0); ++ ++ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... ")); ++ ++ /* Open an event on the clean up event object so we can listen on it, ++ * abort the clean up thread and driver if this fails. ++ */ ++ eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent); ++ PVR_ASSERT(eRc == PVRSRV_OK); ++ ++ eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent); ++ PVR_ASSERT(eRc == PVRSRV_OK); ++ ++ /* While the driver is in a good state and is not being unloaded ++ * try to free any deferred items when signalled ++ */ ++ while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) ++ { ++ IMG_HANDLE hEvent; ++ ++ if (psPVRSRVData->bUnload) ++ { ++ if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) || ++ uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY) ++ { ++ break; ++ } ++ uiUnloadRetry++; ++ } ++ ++ /* Wait until signalled for deferred clean up OR wait for a ++ * short period if the previous deferred clean up was not able ++ * to release all the resources before trying again. ++ * Bridge lock re-acquired on our behalf before the wait call returns. ++ */ ++ ++ if (bRetryWorkList && bUseGlobalEO) ++ { ++ hEvent = hGlobalEvent; ++ } ++ else ++ { ++ hEvent = hOSEvent; ++ } ++ ++ eRc = OSEventObjectWaitKernel(hEvent, ++ bRetryWorkList ? ++ CLEANUP_THREAD_WAIT_RETRY_TIMEOUT : ++ CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT); ++ if (eRc == PVRSRV_ERROR_TIMEOUT) ++ { ++ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout")); ++ } ++ else if (eRc == PVRSRV_OK) ++ { ++ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received")); ++ } ++ else if (g_CleanupThread_work) ++ { ++ PVR_LOG_ERROR(eRc, "OSEventObjectWaitKernel"); ++ } ++ ++ bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO); ++ } ++ ++ OSSpinLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock); ++ ++ eRc = OSEventObjectClose(hOSEvent); ++ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); ++ ++ eRc = OSEventObjectClose(hGlobalEvent); ++ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose"); ++ ++ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... ")); ++} ++ ++IMG_PID PVRSRVCleanupThreadGetPid(void) ++{ ++ return gpsPVRSRVData->cleanupThreadPid; ++} ++ ++uintptr_t PVRSRVCleanupThreadGetTid(void) ++{ ++ return gpsPVRSRVData->cleanupThreadTid; ++} ++ ++static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, ++ va_list va) ++{ ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; ++#endif ++ PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus; ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_DEBUG_DUMP_STATUS eDebugDumpState; ++ IMG_BOOL bCheckAfterTimePassed; ++ ++ pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *); ++ bCheckAfterTimePassed = va_arg(va, IMG_BOOL); ++ ++ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ return; ++ } ++ ++ if (psDeviceNode->pfnUpdateHealthStatus != NULL) ++ { ++ eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, bCheckAfterTimePassed); ++ PVR_WARN_IF_ERROR(eError, "pfnUpdateHealthStatus"); ++ } ++ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus); ++ ++ if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK) ++ { ++ if (eHealthStatus != *pePreviousHealthStatus) ++ { ++#if defined(SUPPORT_RGX) ++ if (!(psDevInfo->ui32DeviceFlags & ++ RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)) ++#else ++ /* In this case we don't have an RGX device */ ++ if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED) ++#endif ++ { ++ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " ++ "Device status not OK!!!")); ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, ++ NULL, NULL); ++ } ++ } ++ } ++ ++ *pePreviousHealthStatus = eHealthStatus; ++ ++ /* Have we received request from FW to capture debug dump(could be due to HWR) */ ++ eDebugDumpState = (PVRSRV_DEVICE_DEBUG_DUMP_STATUS)OSAtomicCompareExchange( ++ &psDeviceNode->eDebugDumpRequested, ++ PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE, ++ PVRSRV_DEVICE_DEBUG_DUMP_NONE); ++ if (PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE == eDebugDumpState) ++ { ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ } ++ ++} ++ ++#if defined(SUPPORT_RGX) ++static void HWPerfPeriodicHostEventsThread(void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = pvData; ++ IMG_HANDLE hOSEvent; ++ PVRSRV_ERROR eError; ++ ++ eError = OSEventObjectOpen(psPVRSRVData->hHWPerfHostPeriodicEvObj, &hOSEvent); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); ++ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && ++ !psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) ++#else ++ while (!psPVRSRVData->bUnload && !psPVRSRVData->bHWPerfHostThreadStop) ++#endif ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ IMG_BOOL bInfiniteSleep = IMG_TRUE; ++ ++ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)psPVRSRVData->ui32HWPerfHostThreadTimeout * 1000); ++ if (eError == PVRSRV_OK && (psPVRSRVData->bUnload || psPVRSRVData->bHWPerfHostThreadStop)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "HWPerfPeriodicHostEventsThread: Shutdown event received.")); ++ break; ++ } ++ ++ for (psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ psDeviceNode != NULL; ++ psDeviceNode = psDeviceNode->psNext) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ /* If the psDevInfo or hHWPerfHostStream are NULL it most ++ * likely means that this device or stream has not been ++ * initialised yet, so just skip */ ++ if (psDevInfo == NULL || psDevInfo->hHWPerfHostStream == NULL) ++ { ++ continue; ++ } ++ ++ /* Check if the HWPerf host stream is open for reading before writing ++ * a packet, this covers cases where the event filter is not zeroed ++ * before a reader disconnects. */ ++ if (TLStreamIsOpenForReading(psDevInfo->hHWPerfHostStream)) ++ { ++ /* As long as any of the streams is opened don't go into ++ * indefinite sleep. */ ++ bInfiniteSleep = IMG_FALSE; ++#if defined(SUPPORT_RGX) ++ RGXSRV_HWPERF_HOST_INFO(psDevInfo, RGX_HWPERF_INFO_EV_MEM_USAGE); ++#endif ++ } ++ } ++ ++ if (bInfiniteSleep) ++ { ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ psPVRSRVData->ui32HWPerfHostThreadTimeout = INFINITE_SLEEP_TIMEOUT; ++#else ++ /* Use an 8 hour timeout if indefinite sleep is not supported. */ ++ psPVRSRVData->ui32HWPerfHostThreadTimeout = 60 * 60 * 8 * 1000; ++#endif ++ } ++ } ++ ++ eError = OSEventObjectClose(hOSEvent); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); ++} ++#endif ++ ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ ++typedef enum ++{ ++ DWT_ST_INIT, ++ DWT_ST_SLEEP_POWERON, ++ DWT_ST_SLEEP_POWEROFF, ++ DWT_ST_SLEEP_DEFERRED, ++ DWT_ST_FINAL ++} DWT_STATE; ++ ++typedef enum ++{ ++ DWT_SIG_POWERON, ++ DWT_SIG_POWEROFF, ++ DWT_SIG_TIMEOUT, ++ DWT_SIG_UNLOAD, ++ DWT_SIG_ERROR ++} DWT_SIGNAL; ++ ++static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData) ++{ ++ return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, ++ PVRSRVIsDevicePowered); ++} ++ ++static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData, ++ PVRSRV_DEVICE_HEALTH_STATUS *peStatus, ++ IMG_BOOL bTimeOut) ++{ ++ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, ++ DevicesWatchdogThread_ForEachVaCb, ++ peStatus, ++ bTimeOut); ++} ++ ++static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent, ++ IMG_UINT32 ui32Timeout) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000); ++ ++#ifdef PVR_TESTING_UTILS ++ psPVRSRVData->ui32DevicesWdWakeupCounter++; ++#endif ++ ++ if (eError == PVRSRV_OK) ++ { ++ if (psPVRSRVData->bUnload) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event" ++ " received.")); ++ return DWT_SIG_UNLOAD; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state " ++ "change event received.")); ++ ++ if (_DwtIsPowerOn(psPVRSRVData)) ++ { ++ return DWT_SIG_POWERON; ++ } ++ else ++ { ++ return DWT_SIG_POWEROFF; ++ } ++ } ++ } ++ else if (eError == PVRSRV_ERROR_TIMEOUT) ++ { ++ return DWT_SIG_TIMEOUT; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when" ++ " waiting for event!", eError)); ++ return DWT_SIG_ERROR; ++} ++ ++#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ ++ ++static void DevicesWatchdogThread(void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = pvData; ++ PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; ++ IMG_HANDLE hOSEvent; ++ PVRSRV_ERROR eError; ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ DWT_STATE eState = DWT_ST_INIT; ++ const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; ++ const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT; ++#else ++ IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; ++ /* Flag used to defer the sleep timeout change by 1 loop iteration. ++ * This helps to ensure at least two health checks are performed before a long sleep. ++ */ ++ IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE; ++#endif ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.", ++ DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)); ++ ++ /* Open an event on the devices watchdog event object so we can listen on it ++ and abort the devices watchdog thread. */ ++ eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); ++ ++ /* Loop continuously checking the device status every few seconds. */ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && ++ !psPVRSRVData->bUnload) ++#else ++ while (!psPVRSRVData->bUnload) ++#endif ++ { ++#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ ++ switch (eState) ++ { ++ case DWT_ST_INIT: ++ { ++ if (_DwtIsPowerOn(psPVRSRVData)) ++ { ++ eState = DWT_ST_SLEEP_POWERON; ++ } ++ else ++ { ++ eState = DWT_ST_SLEEP_POWEROFF; ++ } ++ ++ break; ++ } ++ case DWT_ST_SLEEP_POWERON: ++ { ++ DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, ++ ui32OnTimeout); ++ ++ switch (eSignal) { ++ case DWT_SIG_POWERON: ++ /* self-transition, nothing to do */ ++ break; ++ case DWT_SIG_POWEROFF: ++ eState = DWT_ST_SLEEP_DEFERRED; ++ break; ++ case DWT_SIG_TIMEOUT: ++ _DwtCheckHealthStatus(psPVRSRVData, ++ &ePreviousHealthStatus, ++ IMG_TRUE); ++ /* self-transition */ ++ break; ++ case DWT_SIG_UNLOAD: ++ eState = DWT_ST_FINAL; ++ break; ++ case DWT_SIG_ERROR: ++ /* deliberately ignored */ ++ break; ++ } ++ ++ break; ++ } ++ case DWT_ST_SLEEP_POWEROFF: ++ { ++ DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent, ++ ui32OffTimeout); ++ ++ switch (eSignal) { ++ case DWT_SIG_POWERON: ++ eState = DWT_ST_SLEEP_POWERON; ++ _DwtCheckHealthStatus(psPVRSRVData, ++ &ePreviousHealthStatus, ++ IMG_FALSE); ++ break; ++ case DWT_SIG_POWEROFF: ++ /* self-transition, nothing to do */ ++ break; ++ case DWT_SIG_TIMEOUT: ++ /* self-transition */ ++ _DwtCheckHealthStatus(psPVRSRVData, ++ &ePreviousHealthStatus, ++ IMG_TRUE); ++ break; ++ case DWT_SIG_UNLOAD: ++ eState = DWT_ST_FINAL; ++ break; ++ case DWT_SIG_ERROR: ++ /* deliberately ignored */ ++ break; ++ } ++ ++ break; ++ } ++ case DWT_ST_SLEEP_DEFERRED: ++ { ++ DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent, ++ ui32OnTimeout); ++ ++ switch (eSignal) { ++ case DWT_SIG_POWERON: ++ eState = DWT_ST_SLEEP_POWERON; ++ _DwtCheckHealthStatus(psPVRSRVData, ++ &ePreviousHealthStatus, ++ IMG_FALSE); ++ break; ++ case DWT_SIG_POWEROFF: ++ /* self-transition, nothing to do */ ++ break; ++ case DWT_SIG_TIMEOUT: ++ eState = DWT_ST_SLEEP_POWEROFF; ++ _DwtCheckHealthStatus(psPVRSRVData, ++ &ePreviousHealthStatus, ++ IMG_FALSE); ++ break; ++ case DWT_SIG_UNLOAD: ++ eState = DWT_ST_FINAL; ++ break; ++ case DWT_SIG_ERROR: ++ /* deliberately ignored */ ++ break; ++ } ++ ++ break; ++ } ++ case DWT_ST_FINAL: ++ /* the loop should terminate on next spin if this state is ++ * reached so nothing to do here. */ ++ break; ++ } ++ ++#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ ++ IMG_BOOL bPwrIsOn = IMG_FALSE; ++ IMG_BOOL bTimeOut = IMG_FALSE; ++ ++ /* Wait time between polls (done at the start of the loop to allow devices ++ to initialise) or for the event signal (shutdown or power on). */ ++ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); ++ ++#ifdef PVR_TESTING_UTILS ++ psPVRSRVData->ui32DevicesWdWakeupCounter++; ++#endif ++ if (eError == PVRSRV_OK) ++ { ++ if (psPVRSRVData->bUnload) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received.")); ++ break; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received.")); ++ } ++ } ++ else if (eError != PVRSRV_ERROR_TIMEOUT && g_DevicesWatchdogThread_work) ++ { ++ /* If timeout do nothing otherwise print warning message. */ ++ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: " ++ "Error (%d) when waiting for event!", eError)); ++ } ++ else ++ { ++ bTimeOut = IMG_TRUE; ++ } ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList, ++ DevicesWatchdogThread_ForEachVaCb, ++ &ePreviousHealthStatus, ++ bTimeOut); ++ bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList, ++ PVRSRVIsDevicePowered); ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans) ++ { ++ psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0; ++ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; ++ bDoDeferredTimeoutChange = IMG_FALSE; ++ } ++ else ++ { ++ /* First, check if the previous loop iteration signalled a need to change the timeout period */ ++ if (bDoDeferredTimeoutChange) ++ { ++ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT; ++ bDoDeferredTimeoutChange = IMG_FALSE; ++ } ++ else ++ { ++ /* Signal that we need to change the sleep timeout in the next loop iteration ++ * to allow the device health check code a further iteration at the current ++ * sleep timeout in order to determine bad health (e.g. stalled cCCB) by ++ * comparing past and current state snapshots */ ++ bDoDeferredTimeoutChange = IMG_TRUE; ++ } ++ } ++ ++#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */ ++ } ++ ++ eError = OSEventObjectClose(hOSEvent); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); ++} ++ ++#if defined(SUPPORT_AUTOVZ) ++static void AutoVzWatchdogThread_ForEachCb(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ return; ++ } ++ else if (psDeviceNode->pfnUpdateAutoVzWatchdog != NULL) ++ { ++ psDeviceNode->pfnUpdateAutoVzWatchdog(psDeviceNode); ++ } ++} ++ ++static void AutoVzWatchdogThread(void *pvData) ++{ ++ PVRSRV_DATA *psPVRSRVData = pvData; ++ IMG_HANDLE hOSEvent; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32Timeout = PVR_AUTOVZ_WDG_PERIOD_MS / 3; ++ ++ /* Open an event on the devices watchdog event object so we can listen on it ++ and abort the devices watchdog thread. */ ++ eError = OSEventObjectOpen(psPVRSRVData->hAutoVzWatchdogEvObj, &hOSEvent); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectOpen"); ++ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && ++ !psPVRSRVData->bUnload) ++#else ++ while (!psPVRSRVData->bUnload) ++#endif ++ { ++ /* Wait time between polls (done at the start of the loop to allow devices ++ to initialise) or for the event signal (shutdown or power on). */ ++ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000); ++ ++ List_PVRSRV_DEVICE_NODE_ForEach(psPVRSRVData->psDeviceNodeList, ++ AutoVzWatchdogThread_ForEachCb); ++ } ++ ++ eError = OSEventObjectClose(hOSEvent); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose"); ++} ++#endif /* SUPPORT_AUTOVZ */ ++ ++PVRSRV_DATA *PVRSRVGetPVRSRVData(void) ++{ ++ return gpsPVRSRVData; ++} ++ ++static PVRSRV_ERROR InitialiseInfoPageTimeouts(PVRSRV_DATA *psPVRSRVData) ++{ ++ if (NULL == psPVRSRVData) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_RETRIES] = WAIT_TRY_COUNT; ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_VALUE_TIMEOUT_MS] = ++ ((MAX_HW_TIME_US / 10000) + 1000); ++ /* TIMEOUT_INFO_VALUE_TIMEOUT_MS resolves to... ++ vp : 2000 + 1000 ++ emu : 2000 + 1000 ++ rgx_nohw : 50 + 1000 ++ plato : 30000 + 1000 (VIRTUAL_PLATFORM or EMULATOR) ++ 50 + 1000 (otherwise) ++ */ ++ ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_RETRIES] = 5; ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_CONDITION_TIMEOUT_MS] = ++ ((MAX_HW_TIME_US / 10000) + 100); ++ /* TIMEOUT_INFO_CONDITION_TIMEOUT_MS resolves to... ++ vp : 2000 + 100 ++ emu : 2000 + 100 ++ rgx_nohw : 50 + 100 ++ plato : 30000 + 100 (VIRTUAL_PLATFORM or EMULATOR) ++ 50 + 100 (otherwise) ++ */ ++ ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_RETRIES] = 10; ++#if defined(VIRTUAL_PLATFORM) ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1200000U; ++#else ++#if defined(EMULATOR) ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 20000U; ++#else ++ psPVRSRVData->pui32InfoPage[TIMEOUT_INFO_TASK_QUEUE_FLUSH_TIMEOUT_MS] = 1000U; ++#endif /* EMULATOR */ ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR PopulateInfoPageBridges(PVRSRV_DATA *psPVRSRVData) ++{ ++ PVR_RETURN_IF_INVALID_PARAM(psPVRSRVData); ++ ++ psPVRSRVData->pui32InfoPage[BRIDGE_INFO_PVR_BRIDGES] = gui32PVRBridges; ++ ++#if defined(SUPPORT_RGX) ++ psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = gui32RGXBridges; ++#else ++ psPVRSRVData->pui32InfoPage[BRIDGE_INFO_RGX_BRIDGES] = 0; ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVR_UNREFERENCED_PARAMETER(hDbgRequestHandle); ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) ++ { ++ PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------"); ++ OSThreadDumpInfo(pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++} ++ ++PVRSRV_ERROR ++PVRSRVCommonDriverInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVRSRV_DATA *psPVRSRVData = NULL; ++ ++ IMG_UINT32 ui32AppHintCleanupThreadPriority; ++ IMG_UINT32 ui32AppHintWatchdogThreadPriority; ++ IMG_BOOL bEnablePageFaultDebug; ++ IMG_BOOL bEnableFullSyncTracking; ++ ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32AppHintDefault; ++ ++ /* ++ * As this function performs one time driver initialisation, use the ++ * Services global device-independent data to determine whether or not ++ * this function has already been called. ++ */ ++ if (gpsPVRSRVData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__)); ++ return PVRSRV_ERROR_ALREADY_EXISTS; ++ } ++ ++ /* ++ * Allocate the device-independent data ++ */ ++ psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData)); ++ PVR_GOTO_IF_NOMEM(psPVRSRVData, eError, Error); ++ ++ /* Now it is set up, point gpsPVRSRVData to the actual data */ ++ gpsPVRSRVData = psPVRSRVData; ++ ++ eError = OSWRLockCreate(&gpsPVRSRVData->hDeviceNodeListLock); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* Register the driver context debug table */ ++ eError = PVRSRVRegisterDriverDbgTable(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* Register the Server Thread Debug notifier */ ++ eError = PVRSRVRegisterDriverDbgRequestNotify(&gpsPVRSRVData->hThreadsDbgReqNotify, ++ _ThreadsDebugRequestNotify, ++ DEBUG_REQUEST_SRV, ++ NULL); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = DIInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++#if defined(SUPPORT_DI_BRG_IMPL) ++ eError = PVRDIImplBrgRegister(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++#endif ++ ++#ifdef PVRSRV_ENABLE_PROCESS_STATS ++ eError = PVRSRVStatsInitialise(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++#endif /* PVRSRV_ENABLE_PROCESS_STATS */ ++ ++ eError = HTB_CreateDIEntry(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* ++ * Initialise the server bridges ++ */ ++ eError = ServerBridgeInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = PhysHeapInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = DevmemIntInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = DebugCommonInitDriver(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = BridgeDispatcherInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* Init any OS specific's */ ++ eError = OSInitEnvData(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* Early init. server cache maintenance */ ++ eError = CacheOpInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ RIInitKM(); ++#endif ++ ++ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEPAGEFAULTDEBUG; ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnablePageFaultDebug, ++ &ui32AppHintDefault, &bEnablePageFaultDebug); ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ if (bEnablePageFaultDebug) ++ { ++ eError = DevicememHistoryInitKM(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevicememHistoryInitKM", Error); ++ } ++ ++ eError = PMRInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++#if defined(SUPPORT_DISPLAY_CLASS) ++ eError = DCInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++#endif ++ ++ /* Initialise overall system state */ ++ gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK; ++ ++ /* Create an event object */ ++ eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0; ++ ++ eError = PVRSRVCmdCompleteInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = PVRSRVHandleInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, CleanupThreadPriority, ++ &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority); ++ ++ ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, WatchdogThreadPriority, ++ &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority); ++ ++ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFULLSYNCTRACKING; ++ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, EnableFullSyncTracking, ++ &ui32AppHintDefault, &bEnableFullSyncTracking); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ eError = _CleanupThreadPrepare(gpsPVRSRVData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_CleanupThreadPrepare", Error); ++ ++ /* Create a thread which is used to do the deferred cleanup */ ++ g_CleanupThread_work = IMG_TRUE; ++ eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread, ++ "pvr_defer_free", ++ CleanupThread, ++ CleanupThreadDumpInfo, ++ IMG_TRUE, ++ gpsPVRSRVData, ++ ui32AppHintCleanupThreadPriority); ++ if (eError != PVRSRV_OK) ++ g_CleanupThread_work = IMG_FALSE; ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:1", Error); ++ ++ /* Create the devices watchdog event object */ ++ eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); ++ ++ /* Create a thread which is used to detect fatal errors */ ++ g_DevicesWatchdogThread_work = IMG_TRUE; ++ eError = OSThreadCreatePriority(&gpsPVRSRVData->hDevicesWatchdogThread, ++ "pvr_device_wdg", ++ DevicesWatchdogThread, ++ NULL, ++ IMG_TRUE, ++ gpsPVRSRVData, ++ ui32AppHintWatchdogThreadPriority); ++ if (eError != PVRSRV_OK) ++ g_DevicesWatchdogThread_work = IMG_FALSE; ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:2", Error); ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* Create the devices watchdog event object */ ++ eError = OSEventObjectCreate("PVRSRV_AUTOVZ_WATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hAutoVzWatchdogEvObj); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectCreate", Error); ++ ++ /* Create a thread that maintains the FW-KM connection by regularly updating the virtualization watchdog */ ++ eError = OSThreadCreatePriority(&gpsPVRSRVData->hAutoVzWatchdogThread, ++ "pvr_autovz_wdg", ++ AutoVzWatchdogThread, ++ NULL, ++ IMG_TRUE, ++ gpsPVRSRVData, ++ OS_THREAD_HIGHEST_PRIORITY); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSThreadCreatePriority:3", Error); ++#endif /* SUPPORT_AUTOVZ */ ++ ++#if defined(SUPPORT_RGX) ++ eError = OSLockCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", Error); ++#endif ++ ++ eError = HostMemDeviceCreate(&gpsPVRSRVData->psHostMemDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* Initialise the Transport Layer */ ++ eError = TLInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ /* Initialise pdump */ ++ eError = PDUMPINIT(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT; ++ ++ /* Initialise TL control stream */ ++ eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream, ++ PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE, ++ TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "TLStreamCreate"); ++ psPVRSRVData->hTLCtrlStream = NULL; ++ } ++ ++ eError = InfoPageCreate(psPVRSRVData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "InfoPageCreate", Error); ++ ++ ++ /* Initialise the Timeout Info */ ++ eError = InitialiseInfoPageTimeouts(psPVRSRVData); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ eError = PopulateInfoPageBridges(psPVRSRVData); ++ ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++ if (bEnableFullSyncTracking) ++ { ++ psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED; ++ } ++ if (bEnablePageFaultDebug) ++ { ++ psPVRSRVData->pui32InfoPage[DEBUG_FEATURE_FLAGS] |= DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; ++ } ++ ++ /* Initialise the Host Trace Buffer */ ++ eError = HTBInit(); ++ PVR_GOTO_IF_ERROR(eError, Error); ++ ++#if defined(SUPPORT_RGX) ++ RGXHWPerfClientInitAppHintCallbacks(); ++#endif ++ ++ /* Late init. client cache maintenance via info. page */ ++ eError = CacheOpInit2(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "CacheOpInit2", Error); ++ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ eError = SyncFbRegisterSyncFunctions(); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SyncFbRegisterSyncFunctions", Error); ++#endif ++ ++#if defined(PDUMP) ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ /* If initialising the device on first connection, we will ++ * bind PDump capture to the first device we connect to later. ++ */ ++ psPVRSRVData->ui32PDumpBoundDevice = PVRSRV_MAX_DEVICES; ++#else ++ /* If not initialising the device on first connection, bind PDump ++ * capture to device 0. This is because we need to capture PDump ++ * during device initialisation but only want to capture PDump for ++ * a single device (by default, device 0). ++ */ ++ psPVRSRVData->ui32PDumpBoundDevice = 0; ++#endif ++#endif ++ ++ return 0; ++ ++Error: ++ PVRSRVCommonDriverDeInit(); ++ return eError; ++} ++ ++void ++PVRSRVCommonDriverDeInit(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_BOOL bEnablePageFaultDebug = IMG_FALSE; ++ ++ if (gpsPVRSRVData == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data", ++ __func__)); ++ return; ++ } ++ ++ if (gpsPVRSRVData->pui32InfoPage != NULL) ++ { ++ bEnablePageFaultDebug = GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED; ++ } ++ ++ gpsPVRSRVData->bUnload = IMG_TRUE; ++ ++#if defined(SUPPORT_RGX) ++ PVRSRVDestroyHWPerfHostThread(); ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) ++ { ++ OSLockDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); ++ gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock = NULL; ++ } ++#endif ++ ++ if (gpsPVRSRVData->hGlobalEventObject) ++ { ++ OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject); ++ } ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* Stop and cleanup the devices watchdog thread */ ++ if (gpsPVRSRVData->hAutoVzWatchdogThread) ++ { ++ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) ++ { ++ if (gpsPVRSRVData->hAutoVzWatchdogEvObj) ++ { ++ eError = OSEventObjectSignal(gpsPVRSRVData->hAutoVzWatchdogEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ ++ eError = OSThreadDestroy(gpsPVRSRVData->hAutoVzWatchdogThread); ++ if (PVRSRV_OK == eError) ++ { ++ gpsPVRSRVData->hAutoVzWatchdogThread = NULL; ++ break; ++ } ++ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); ++ } ++ ++ if (gpsPVRSRVData->hAutoVzWatchdogEvObj) ++ { ++ eError = OSEventObjectDestroy(gpsPVRSRVData->hAutoVzWatchdogEvObj); ++ gpsPVRSRVData->hAutoVzWatchdogEvObj = NULL; ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ } ++#endif /* SUPPORT_AUTOVZ */ ++ ++ /* Stop and cleanup the devices watchdog thread */ ++ if (gpsPVRSRVData->hDevicesWatchdogThread) ++ { ++ g_DevicesWatchdogThread_work = IMG_FALSE; ++ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) ++ { ++ if (gpsPVRSRVData->hDevicesWatchdogEvObj) ++ { ++ eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ ++ eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread); ++ if (PVRSRV_OK == eError) ++ { ++ gpsPVRSRVData->hDevicesWatchdogThread = NULL; ++ break; ++ } ++ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ if (eError != PVRSRV_OK) ++ g_DevicesWatchdogThread_work = IMG_TRUE; ++ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); ++ } ++ ++ if (gpsPVRSRVData->hDevicesWatchdogEvObj) ++ { ++ eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj); ++ gpsPVRSRVData->hDevicesWatchdogEvObj = NULL; ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ } ++ ++ /* Stop and cleanup the deferred clean up thread, event object and ++ * deferred context list. ++ */ ++ if (gpsPVRSRVData->hCleanupThread) ++ { ++ g_CleanupThread_work = IMG_FALSE; ++ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) ++ { ++ if (gpsPVRSRVData->hCleanupEventObject) ++ { ++ eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ ++ eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread); ++ if (PVRSRV_OK == eError) ++ { ++ gpsPVRSRVData->hCleanupThread = NULL; ++ break; ++ } ++ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ if (eError != PVRSRV_OK) ++ g_CleanupThread_work = IMG_TRUE; ++ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); ++ } ++ ++ if (gpsPVRSRVData->hCleanupEventObject) ++ { ++ eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject); ++ gpsPVRSRVData->hCleanupEventObject = NULL; ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ } ++ ++ /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */ ++ /* HTB De-init happens in device de-registration currently */ ++ eError = HTBDeInit(); ++ PVR_LOG_IF_ERROR(eError, "HTBDeInit"); ++ ++ /* Tear down CacheOp framework information page first */ ++ CacheOpDeInit2(); ++ ++ /* Clean up information page */ ++ InfoPageDestroy(gpsPVRSRVData); ++ ++ /* Close the TL control plane stream. */ ++ if (gpsPVRSRVData->hTLCtrlStream != NULL) ++ { ++ TLStreamClose(gpsPVRSRVData->hTLCtrlStream); ++ } ++ ++ /* deinitialise pdump */ ++ if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0) ++ { ++ PDUMPDEINIT(); ++ } ++ ++ /* Clean up Transport Layer resources that remain */ ++ TLDeInit(); ++ ++ HostMemDeviceDestroy(gpsPVRSRVData->psHostMemDeviceNode); ++ gpsPVRSRVData->psHostMemDeviceNode = NULL; ++ ++ eError = PVRSRVHandleDeInit(); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVHandleDeInit"); ++ ++ /* destroy event object */ ++ if (gpsPVRSRVData->hGlobalEventObject) ++ { ++ OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject); ++ gpsPVRSRVData->hGlobalEventObject = NULL; ++ } ++ ++ PVRSRVCmdCompleteDeinit(); ++ ++#if defined(SUPPORT_DISPLAY_CLASS) ++ eError = DCDeInit(); ++ PVR_LOG_IF_ERROR(eError, "DCDeInit"); ++#endif ++ ++ eError = PMRDeInit(); ++ PVR_LOG_IF_ERROR(eError, "PMRDeInit"); ++ ++ BridgeDispatcherDeinit(); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ RIDeInitKM(); ++#endif ++ ++ if (bEnablePageFaultDebug) ++ { ++ DevicememHistoryDeInitKM(); ++ } ++ ++ CacheOpDeInit(); ++ ++ OSDeInitEnvData(); ++ ++ (void) DevmemIntDeInit(); ++ ++ ServerBridgeDeInit(); ++ ++ PhysHeapDeinit(); ++ ++ HTB_DestroyDIEntry(); ++ ++#ifdef PVRSRV_ENABLE_PROCESS_STATS ++ PVRSRVStatsDestroy(); ++#endif /* PVRSRV_ENABLE_PROCESS_STATS */ ++ ++ DebugCommonDeInitDriver(); ++ ++ DIDeInit(); ++ ++ if (gpsPVRSRVData->hThreadsDbgReqNotify) ++ { ++ PVRSRVUnregisterDriverDbgRequestNotify(gpsPVRSRVData->hThreadsDbgReqNotify); ++ } ++ ++ PVRSRVUnregisterDriverDbgTable(); ++ ++ OSWRLockDestroy(gpsPVRSRVData->hDeviceNodeListLock); ++ ++ OSFreeMem(gpsPVRSRVData); ++ gpsPVRSRVData = NULL; ++} ++ ++static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ /* Only dump info once */ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hDebugRequestHandle; ++ ++ PVR_DUMPDEBUG_LOG("------[ System Summary Device ID:%d ]------", psDeviceNode->sDevId.ui32InternalID); ++ ++ switch (psDeviceNode->eCurrentSysPowerState) ++ { ++ case PVRSRV_SYS_POWER_STATE_OFF: ++ PVR_DUMPDEBUG_LOG("Device System Power State: OFF"); ++ break; ++ case PVRSRV_SYS_POWER_STATE_ON: ++ PVR_DUMPDEBUG_LOG("Device System Power State: ON"); ++ break; ++ default: ++ PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)", ++ psDeviceNode->eCurrentSysPowerState); ++ break; ++ } ++ ++ PVR_DUMPDEBUG_LOG("MaxHWTOut: %dus, WtTryCt: %d, WDGTOut(on,off): (%dms,%dms)", ++ MAX_HW_TIME_US, WAIT_TRY_COUNT, DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT, DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT); ++ ++ SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile); ++} ++ ++#define PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE (0x100000ULL * 32ULL) /* 32MB */ ++ ++static PVRSRV_ERROR PVRSRVValidatePhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ IMG_UINT32 ui32FlagsAccumulate = 0; ++ IMG_UINT32 i; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDevConfig->ui32PhysHeapCount > 0, ++ "Device config must specify at least one phys heap config.", ++ PVRSRV_ERROR_PHYSHEAP_CONFIG); ++ ++ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) ++ { ++ PHYS_HEAP_CONFIG *psHeapConf = &psDevConfig->pasPhysHeaps[i]; ++ ++ PVR_LOG_RETURN_IF_FALSE_VA(psHeapConf->ui32UsageFlags != 0, ++ PVRSRV_ERROR_PHYSHEAP_CONFIG, ++ "Phys heap config %d: must specify usage flags.", i); ++ ++ PVR_LOG_RETURN_IF_FALSE_VA((ui32FlagsAccumulate & psHeapConf->ui32UsageFlags) == 0, ++ PVRSRV_ERROR_PHYSHEAP_CONFIG, ++ "Phys heap config %d: duplicate usage flags.", i); ++ ++ ui32FlagsAccumulate |= psHeapConf->ui32UsageFlags; ++ ++ /* Output message if default heap is LMA and smaller than recommended minimum */ ++ if ((i == psDevConfig->eDefaultHeap) && ++#if defined(__KERNEL__) ++ ((psHeapConf->eType == PHYS_HEAP_TYPE_LMA) || ++ (psHeapConf->eType == PHYS_HEAP_TYPE_DMA)) && ++#else ++ (psHeapConf->eType == PHYS_HEAP_TYPE_LMA) && ++#endif ++ (psHeapConf->uiSize < PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Size of default heap is 0x%" IMG_UINT64_FMTSPECX ++ " (recommended minimum heap size is 0x%llx)", ++ __func__, psHeapConf->uiSize, ++ PVRSRV_MIN_DEFAULT_LMA_PHYS_HEAP_SIZE)); ++ } ++ } ++ ++ if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_GPU_LOCAL) ++ { ++ PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_GPU_LOCAL) != 0) , ++ "Device config must specify GPU local phys heap config.", ++ PVRSRV_ERROR_PHYSHEAP_CONFIG); ++ } ++ else if (psDevConfig->eDefaultHeap == PVRSRV_PHYS_HEAP_CPU_LOCAL) ++ { ++ PVR_LOG_RETURN_IF_FALSE(((ui32FlagsAccumulate & PHYS_HEAP_USAGE_CPU_LOCAL) != 0) , ++ "Device config must specify CPU local phys heap config.", ++ PVRSRV_ERROR_PHYSHEAP_CONFIG); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_PHYS_HEAP ePhysHeap; ++ ++ eError = PVRSRVValidatePhysHeapConfig(psDevConfig); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVValidatePhysHeapConfig"); ++ ++ eError = PhysHeapCreateDeviceHeapsFromConfigs(psDeviceNode, ++ psDevConfig->pasPhysHeaps, ++ psDevConfig->ui32PhysHeapCount); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapCreateDeviceHeapsFromConfigs", ErrorDeinit); ++ ++ for (ePhysHeap = PVRSRV_PHYS_HEAP_DEFAULT+1; ePhysHeap < PVRSRV_PHYS_HEAP_LAST; ePhysHeap++) ++ { ++ if (PhysHeapPVRLayerAcquire(ePhysHeap)) ++ { ++ eError = PhysHeapAcquireByDevPhysHeap(ePhysHeap, psDeviceNode, &psDeviceNode->apsPhysHeap[ePhysHeap]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquireByDevPhysHeap", ErrorDeinit); ++ } ++ ++ /* Calculate the total number of user accessible physical heaps */ ++ if (psDeviceNode->apsPhysHeap[ePhysHeap] && PhysHeapUserModeAlloc(ePhysHeap)) ++ { ++ psDeviceNode->ui32UserAllocHeapCount++; ++ } ++ } ++ ++ if (PhysHeapValidateDefaultHeapExists(psDeviceNode)) ++ { ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPhysHeapCheckUsageFlags", ErrorDeinit); ++ } ++ ++ eError = PhysHeapMMUPxSetup(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapMMUPxSetup", ErrorDeinit); ++ ++ return PVRSRV_OK; ++ ++ErrorDeinit: ++ PVR_ASSERT(IMG_FALSE); ++ PVRSRVPhysMemHeapsDeinit(psDeviceNode); ++ ++ return eError; ++} ++ ++void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_PHYS_HEAP ePhysHeapIdx; ++ IMG_UINT32 i; ++ ++#if defined(SUPPORT_AUTOVZ) ++ if (psDeviceNode->psFwMMUReservedPhysHeap) ++ { ++ PhysHeapDestroy(psDeviceNode->psFwMMUReservedPhysHeap); ++ psDeviceNode->psFwMMUReservedPhysHeap = NULL; ++ } ++#endif ++ ++ PhysHeapMMUPxDeInit(psDeviceNode); ++ ++ /* Release heaps */ ++ for (ePhysHeapIdx = 0; ++ ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap); ++ ePhysHeapIdx++) ++ { ++ if (psDeviceNode->apsPhysHeap[ePhysHeapIdx]) ++ { ++ PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]); ++ } ++ } ++ ++ if (psDeviceNode->psFWMainPhysHeap) ++ { ++ PhysHeapDestroy(psDeviceNode->psFWMainPhysHeap); ++ psDeviceNode->psFWMainPhysHeap = NULL; ++ } ++ ++ if (psDeviceNode->psFWCfgPhysHeap) ++ { ++ PhysHeapDestroy(psDeviceNode->psFWCfgPhysHeap); ++ psDeviceNode->psFWCfgPhysHeap = NULL; ++ } ++ ++ for (i = 0; i < RGX_NUM_OS_SUPPORTED; i++) ++ { ++ if (psDeviceNode->apsFWPremapPhysHeap[i]) ++ { ++ PhysHeapDestroy(psDeviceNode->apsFWPremapPhysHeap[i]); ++ psDeviceNode->apsFWPremapPhysHeap[i] = NULL; ++ } ++ } ++ ++ PhysHeapDestroyDeviceHeaps(psDeviceNode); ++} ++ ++PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ PHYS_HEAP_USAGE_FLAGS ui32Flags) ++{ ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++) ++ { ++ if (psDevConfig->pasPhysHeaps[i].ui32UsageFlags == ui32Flags) ++ { ++ return &psDevConfig->pasPhysHeaps[i]; ++ } ++ } ++ ++ return NULL; ++} ++ ++PVRSRV_ERROR PVRSRVCommonDeviceCreate(void *pvOSDevice, ++ IMG_INT32 i32OsDeviceID, ++ PVRSRV_DEVICE_NODE **ppsDeviceNode) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ IMG_UINT32 ui32AppHintDefault; ++ IMG_UINT32 ui32AppHintDriverMode; ++#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) ++ IMG_UINT32 ui32AppHintPhysMemTestPasses; ++#endif ++ void *pvAppHintState = NULL; ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ IMG_HANDLE hProcessStats; ++#endif ++ ++ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceCreate: DevId %d", i32OsDeviceID); ++ ++ /* Read driver mode (i.e. native, host or guest) AppHint early as it is ++ required by SysDevInit */ ++ ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, DriverMode, ++ &ui32AppHintDefault, &ui32AppHintDriverMode); ++ psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode); ++ psPVRSRVData->bForceApphintDriverMode = PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode)); ++ PVR_LOG_RETURN_IF_NOMEM(psDeviceNode, "psDeviceNode"); ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ /* Allocate process statistics */ ++ eError = PVRSRVStatsRegisterProcess(&hProcessStats); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVStatsRegisterProcess", ErrorFreeDeviceNode); ++#endif ++ ++ psDeviceNode->sDevId.i32OsDeviceID = i32OsDeviceID; ++ psDeviceNode->sDevId.ui32InternalID = psPVRSRVData->ui32RegisteredDevices; ++ ++ eError = SysDevInit(pvOSDevice, &psDevConfig); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); ++ ++ PVR_ASSERT(psDevConfig); ++ PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice); ++ PVR_ASSERT(!psDevConfig->psDevNode); ++ ++ if ((psDevConfig->eDefaultHeap != PVRSRV_PHYS_HEAP_GPU_LOCAL) && ++ (psDevConfig->eDefaultHeap != PVRSRV_PHYS_HEAP_CPU_LOCAL)) ++ { ++ PVR_LOG_MSG(PVR_DBG_ERROR, "DEFAULT Heap is invalid, " ++ "it must be GPU_LOCAL or CPU_LOCAL"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SysDevInit", ErrorDeregisterStats); ++ } ++ ++ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT; ++ ++ if (psDevConfig->pfnGpuDomainPower) ++ { ++ psDeviceNode->eCurrentSysPowerState = psDevConfig->pfnGpuDomainPower(psDeviceNode); ++ } ++ else ++ { ++ /* If the System Layer doesn't provide a function to query the power state ++ * of the system hardware, use a default implementation that keeps track of ++ * the power state locally and assumes the system starting state */ ++ psDevConfig->pfnGpuDomainPower = PVRSRVDefaultDomainPower; ++ ++#if defined(SUPPORT_AUTOVZ) ++ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; ++#else ++ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_OFF; ++#endif ++ } ++ ++ psDeviceNode->psDevConfig = psDevConfig; ++ psDevConfig->psDevNode = psDeviceNode; ++ ++#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) ++ if (PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ /* Read AppHint - Configurable memory test pass count */ ++ ui32AppHintDefault = 0; ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, PhysMemTestPasses, ++ &ui32AppHintDefault, &ui32AppHintPhysMemTestPasses); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ if (ui32AppHintPhysMemTestPasses > 0) ++ { ++ eError = PhysMemTest(psDevConfig, ui32AppHintPhysMemTestPasses); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysMemTest", ErrorSysDevDeInit); ++ } ++ } ++#endif ++ ++ /* Initialise the paravirtualised connection */ ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ /* If a device already exists */ ++ if (psPVRSRVData->psDeviceNodeList != NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Virtualization is currently supported only on single device systems.", ++ __func__)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ goto ErrorSysDevDeInit; ++ } ++ ++ PvzConnectionInit(psDevConfig); ++ PVR_GOTO_IF_ERROR(eError, ErrorSysDevDeInit); ++ } ++ ++ eError = PVRSRVRegisterDeviceDbgTable(psDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, ErrorPvzConnectionDeInit); ++ ++ eError = PVRSRVPowerLockInit(psDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, ErrorUnregisterDbgTable); ++ ++ eError = PVRSRVPhysMemHeapsInit(psDeviceNode, psDevConfig); ++ PVR_GOTO_IF_ERROR(eError, ErrorPowerLockDeInit); ++ ++#if defined(SUPPORT_RGX) ++ /* Requirements: ++ * registered GPU and FW local heaps */ ++ /* debug table */ ++ eError = RGXRegisterDevice(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "RGXRegisterDevice"); ++ eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED; ++ goto ErrorPhysMemHeapsDeinit; ++ } ++#endif ++ ++ if (psDeviceNode->pfnPhysMemDeviceHeapsInit != NULL) ++ { ++ eError = psDeviceNode->pfnPhysMemDeviceHeapsInit(psDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, ErrorPhysMemHeapsDeinit); ++ } ++ ++ if (psDeviceNode->pfnFwMMUInit != NULL) ++ { ++ eError = psDeviceNode->pfnFwMMUInit(psDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, ErrorFwMMUDeinit); ++ } ++ ++ eError = SyncServerInit(psDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, ErrorDeInitRgx); ++ ++ eError = SyncCheckpointInit(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointInit", ErrorSyncCheckpointInit); ++ ++ /* ++ * This is registered before doing device specific initialisation to ensure ++ * generic device information is dumped first during a debug request. ++ */ ++ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDeviceNode->hDbgReqNotify, ++ psDeviceNode, ++ _SysDebugRequestNotify, ++ DEBUG_REQUEST_SYS, ++ psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterDeviceDbgRequestNotify", ErrorRegDbgReqNotify); ++ ++#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ++ eError = InitDVFS(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "InitDVFS", ErrorDVFSInitFail); ++#endif ++ ++ OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0); ++ ++#if defined(PVR_TESTING_UTILS) ++ TUtilsInit(psDeviceNode); ++#endif ++ ++ OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock); ++ if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list", ++ __func__)); ++ goto ErrorPageFaultLockFailCreate; ++ } ++ ++ dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx", ++ (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr)); ++ PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ)); ++ ++/* SUPPORT_ALT_REGBASE is defined for rogue cores only */ ++#if defined(SUPPORT_RGX) && defined(SUPPORT_ALT_REGBASE) ++ { ++ IMG_DEV_PHYADDR sRegsGpuPBase; ++ ++ PhysHeapCpuPAddrToDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], ++ 1, ++ &sRegsGpuPBase, ++ &(psDeviceNode->psDevConfig->sRegsCpuPBase)); ++ ++ PVR_LOG(("%s: Using alternate Register bank GPU address: 0x%08lx (orig: 0x%08lx)", __func__, ++ (unsigned long)psDevConfig->sAltRegsGpuPBase.uiAddr, ++ (unsigned long)sRegsGpuPBase.uiAddr)); ++ } ++#endif ++ ++#if defined(__linux__) ++ /* register the AppHint device control before device initialisation ++ * so individual AppHints can be configured during the init phase ++ */ ++ { ++ int iError = pvr_apphint_device_register(psDeviceNode); ++ PVR_LOG_IF_FALSE(iError == 0, "pvr_apphint_device_register() failed"); ++ } ++#endif /* defined(__linux__) */ ++ ++#if defined(SUPPORT_RGX) ++ RGXHWPerfInitAppHintCallbacks(psDeviceNode); ++#endif ++ ++ eError = DebugCommonInitDevice(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DebugCommonInitDevice", ++ ErrorDestroyMemoryContextPageFaultNotifyListLock); ++ ++ /* Finally insert the device into the dev-list and set it as active */ ++ OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); ++ List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList, ++ psDeviceNode); ++ psPVRSRVData->ui32RegisteredDevices++; ++ OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); ++ ++ *ppsDeviceNode = psDeviceNode; ++ ++#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ++ /* Register the DVFS device now the device node is present in the dev-list */ ++ eError = RegisterDVFSDevice(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RegisterDVFSDevice", ErrorRegisterDVFSDeviceFail); ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ /* Close the process statistics */ ++ PVRSRVStatsDeregisterProcess(hProcessStats); ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++ OSLockCreateNoStats(&psDeviceNode->hValidationLock); ++#endif ++ ++ return PVRSRV_OK; ++ ++#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ++ErrorRegisterDVFSDeviceFail: ++ /* Remove the device from the list */ ++ OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); ++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); ++ psPVRSRVData->ui32RegisteredDevices--; ++ OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); ++#endif ++ ++ErrorDestroyMemoryContextPageFaultNotifyListLock: ++ OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); ++ psDeviceNode->hMemoryContextPageFaultNotifyListLock = NULL; ++ ++ErrorPageFaultLockFailCreate: ++#if defined(PVR_TESTING_UTILS) ++ TUtilsDeinit(psDeviceNode); ++#endif ++ ++#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ++ErrorDVFSInitFail: ++#endif ++ ++ if (psDeviceNode->hDbgReqNotify) ++ { ++ PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceNode->hDbgReqNotify); ++ } ++ ++ErrorRegDbgReqNotify: ++ SyncCheckpointDeinit(psDeviceNode); ++ ++ErrorSyncCheckpointInit: ++ SyncServerDeinit(psDeviceNode); ++ ++ErrorDeInitRgx: ++#if defined(SUPPORT_RGX) ++ DevDeInitRGX(psDeviceNode); ++#endif ++ErrorFwMMUDeinit: ++ErrorPhysMemHeapsDeinit: ++ PVRSRVPhysMemHeapsDeinit(psDeviceNode); ++ErrorPowerLockDeInit: ++ PVRSRVPowerLockDeInit(psDeviceNode); ++ErrorUnregisterDbgTable: ++ PVRSRVUnregisterDeviceDbgTable(psDeviceNode); ++ErrorPvzConnectionDeInit: ++ psDevConfig->psDevNode = NULL; ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ PvzConnectionDeInit(); ++ } ++ErrorSysDevDeInit: ++ SysDevDeInit(psDevConfig); ++ErrorDeregisterStats: ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ /* Close the process statistics */ ++ PVRSRVStatsDeregisterProcess(hProcessStats); ++ErrorFreeDeviceNode: ++#endif ++ OSFreeMemNoStats(psDeviceNode); ++ ++ return eError; ++} ++ ++#if defined(SUPPORT_RGX) ++static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, ++ const void *psPrivate, IMG_BOOL bValue) ++{ ++ PVRSRV_ERROR eResult = PVRSRV_OK; ++ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); ++ ++ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); ++ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, ++ ui32Flag, bValue); ++ ++ return eResult; ++} ++ ++static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice, ++ const void *psPrivate, IMG_BOOL *pbValue) ++{ ++ PVRSRV_ERROR eResult = PVRSRV_OK; ++ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); ++ IMG_UINT32 ui32State; ++ ++ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); ++ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, ++ &ui32State); ++ ++ if (PVRSRV_OK == eResult) ++ { ++ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; ++ } ++ ++ return eResult; ++} ++static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice, ++ const void *psPrivate, IMG_BOOL bValue) ++{ ++ PVRSRV_ERROR eResult = PVRSRV_OK; ++ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); ++ ++ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); ++ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice, ++ ui32Flag, NULL, bValue); ++ ++ return eResult; ++} ++ ++static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice, ++ const void *psPrivate, IMG_BOOL *pbValue) ++{ ++ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate); ++ IMG_UINT32 ui32State; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_RETURN_IF_INVALID_PARAM(ui32Flag); ++ PVR_RETURN_IF_FALSE(psDevice != APPHINT_OF_DRIVER_NO_DEVICE, ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice; ++ ui32State = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; ++ ++ if (pbValue) ++ { ++ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE; ++ } ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++PVRSRV_ERROR PVRSRVCommonDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ IMG_BOOL bInitSuccesful = IMG_FALSE; ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ IMG_HANDLE hProcessStats; ++#endif ++ PVRSRV_ERROR eError; ++ ++ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceInitialise: DevId %d", psDeviceNode->sDevId.i32OsDeviceID); ++ ++ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__)); ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++#if defined(PDUMP) ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_CONNECT) ++ { ++ PVRSRV_DATA *psSRVData = PVRSRVGetPVRSRVData(); ++ ++ /* If first connection, bind this and future PDump clients to use this device */ ++ if (psSRVData->ui32PDumpBoundDevice == PVRSRV_MAX_DEVICES) ++ { ++ psSRVData->ui32PDumpBoundDevice = psDeviceNode->sDevId.ui32InternalID; ++ } ++ } ++#endif ++#endif ++ ++ /* Initialise Connection_Data access mechanism */ ++ dllist_init(&psDeviceNode->sConnections); ++ eError = OSLockCreate(&psDeviceNode->hConnectionsLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); ++ ++ /* Allocate process statistics */ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ eError = PVRSRVStatsRegisterProcess(&hProcessStats); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVStatsRegisterProcess"); ++#endif ++ ++#if defined(SUPPORT_RGX) ++ eError = RGXInit(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInit", Exit); ++#endif ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ PVRSRVInitialiseDMA(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVInitialiseDMA", Exit); ++#endif ++ ++ bInitSuccesful = IMG_TRUE; ++ ++#if defined(SUPPORT_RGX) ++Exit: ++#endif ++ eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVDeviceFinalise"); ++ ++#if defined(SUPPORT_RGX) ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating, ++ _ReadStateFlag, _SetStateFlag, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN)); ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap, ++ _ReadStateFlag, _SetStateFlag, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP)); ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger, ++ _ReadStateFlag, _SetStateFlag, ++ psDeviceNode, ++ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER)); ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory, ++ _ReadStateFlag, _SetStateFlag, ++ psDeviceNode, ++ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY)); ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList, ++ _ReadStateFlag, _SetStateFlag, ++ psDeviceNode, ++ (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN)); ++ } ++ ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging, ++ _ReadDeviceFlag, _SetDeviceFlag, ++ psDeviceNode, ++ (void*)((uintptr_t)RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN)); ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist, ++ _ReadDeviceFlag, _SetDeviceFlag, ++ psDeviceNode, ++ (void*)((uintptr_t)RGXKM_DEVICE_STATE_ZERO_FREELIST)); ++#if defined(SUPPORT_VALIDATION) ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_GPUUnitsPowerChange, ++ _ReadDeviceFlag, _SetDeviceFlag, ++ psDeviceNode, ++ (void*)((uintptr_t)RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN)); ++#endif ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic, ++ RGXQueryPdumpPanicDisable, RGXSetPdumpPanicDisable, ++ psDeviceNode, ++ NULL); ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) ++ /* Close the process statistics */ ++ PVRSRVStatsDeregisterProcess(hProcessStats); ++#endif ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVCommonDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_ERROR eError; ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ IMG_BOOL bForceUnload = IMG_FALSE; ++ ++ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ bForceUnload = IMG_TRUE; ++ } ++#endif ++ ++ MULTI_DEVICE_BRINGUP_DPF("PVRSRVCommonDeviceDestroy: DevId %d", psDeviceNode->sDevId.i32OsDeviceID); ++ ++ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT; ++ ++#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ++ UnregisterDVFSDevice(psDeviceNode); ++#endif ++ ++ OSWRLockAcquireWrite(psPVRSRVData->hDeviceNodeListLock); ++ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode); ++ psPVRSRVData->ui32RegisteredDevices--; ++ OSWRLockReleaseWrite(psPVRSRVData->hDeviceNodeListLock); ++ ++#if defined(__linux__) ++ pvr_apphint_device_unregister(psDeviceNode); ++#endif /* defined(__linux__) */ ++ ++ DebugCommonDeInitDevice(psDeviceNode); ++ ++ if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL) ++ { ++ OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock); ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ OSLockDestroyNoStats(psDeviceNode->hValidationLock); ++ psDeviceNode->hValidationLock = NULL; ++#endif ++ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ SyncFbDeregisterDevice(psDeviceNode); ++#endif ++ /* Counter part to what gets done in PVRSRVDeviceFinalise */ ++ if (psDeviceNode->hSyncCheckpointContext) ++ { ++ SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); ++ psDeviceNode->hSyncCheckpointContext = NULL; ++ } ++ if (psDeviceNode->hSyncPrimContext) ++ { ++ if (psDeviceNode->psMMUCacheSyncPrim) ++ { ++ PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim; ++ ++ /* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */ ++ eError = PVRSRVPollForValueKM(psDeviceNode, ++ psSync->pui32LinAddr, ++ psDeviceNode->ui32NextMMUInvalidateUpdate-1, ++ 0xFFFFFFFF, ++ POLL_FLAG_LOG_ERROR); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPollForValueKM"); ++ ++ /* Important to set the device node pointer to NULL ++ * before we free the sync-prim to make sure we don't ++ * defer the freeing of the sync-prim's page tables itself. ++ * The sync is used to defer the MMU page table ++ * freeing. */ ++ psDeviceNode->psMMUCacheSyncPrim = NULL; ++ ++ /* Free general purpose sync primitive */ ++ SyncPrimFree(psSync); ++ } ++ ++ SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext); ++ psDeviceNode->hSyncPrimContext = NULL; ++ } ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ if (eError == PVRSRV_OK) ++ { ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ /* ++ * Firmware probably not responding if bForceUnload is set, but we still want to unload the ++ * driver. ++ */ ++ if (!bForceUnload) ++#endif ++ { ++ /* Force idle device */ ++ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); ++ if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) ++ { ++ PVRSRVPowerUnlock(psDeviceNode); ++ } ++ return eError; ++ } ++ } ++ ++ /* Power down the device if necessary */ ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_OFF, ++ PVRSRV_POWER_FLAGS_FORCED); ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "PVRSRVSetDevicePowerStateKM"); ++ ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ ++ /* ++ * If the driver is okay then return the error, otherwise we can ignore ++ * this error. ++ */ ++ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK) ++ { ++ return eError; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Will continue to unregister as driver status is not OK", ++ __func__)); ++ } ++ } ++ } ++ ++#if defined(PVR_TESTING_UTILS) ++ TUtilsDeinit(psDeviceNode); ++#endif ++ ++#if defined(SUPPORT_LINUX_DVFS) && !defined(NO_HARDWARE) ++ DeinitDVFS(psDeviceNode); ++#endif ++ ++ if (psDeviceNode->hDbgReqNotify) ++ { ++ PVRSRVUnregisterDeviceDbgRequestNotify(psDeviceNode->hDbgReqNotify); ++ } ++ ++ SyncCheckpointDeinit(psDeviceNode); ++ ++ SyncServerDeinit(psDeviceNode); ++ ++#if defined(SUPPORT_RGX) ++ DevDeInitRGX(psDeviceNode); ++#endif ++ ++ PVRSRVPhysMemHeapsDeinit(psDeviceNode); ++ PVRSRVPowerLockDeInit(psDeviceNode); ++ ++ PVRSRVUnregisterDeviceDbgTable(psDeviceNode); ++ ++ /* Release the Connection-Data lock as late as possible. */ ++ if (psDeviceNode->hConnectionsLock) ++ { ++ OSLockDestroy(psDeviceNode->hConnectionsLock); ++ } ++ ++ psDeviceNode->psDevConfig->psDevNode = NULL; ++ ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ PvzConnectionDeInit(); ++ } ++ SysDevDeInit(psDeviceNode->psDevConfig); ++ ++ OSFreeMemNoStats(psDeviceNode); ++ ++ return PVRSRV_OK; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVDeviceFinalise ++@Description Performs the final parts of device initialisation. ++@Input psDeviceNode Device node of the device to finish ++ initialising ++@Input bInitSuccessful Whether or not device specific ++ initialisation was successful ++@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /***************************************************************************/ ++PVRSRV_ERROR PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bInitSuccessful) ++{ ++ PVRSRV_ERROR eError; ++ __maybe_unused PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); ++ ++ if (bInitSuccessful) ++ { ++ eError = SyncCheckpointContextCreate(psDeviceNode, ++ &psDeviceNode->hSyncCheckpointContext); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SyncCheckpointContextCreate", ErrorExit); ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ eError = SyncFbRegisterDevice(psDeviceNode); ++ PVR_GOTO_IF_ERROR(eError, ErrorExit); ++#endif ++ eError = SyncPrimContextCreate(psDeviceNode, ++ &psDeviceNode->hSyncPrimContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "SyncPrimContextCreate"); ++ SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext); ++ goto ErrorExit; ++ } ++ ++ /* Allocate MMU cache invalidate sync */ ++ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext, ++ &psDeviceNode->psMMUCacheSyncPrim, ++ "pvrsrv dev MMU cache"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SyncPrimAlloc", ErrorExit); ++ ++ /* Set the sync prim value to a much higher value near the ++ * wrapping range. This is so any wrapping bugs would be ++ * seen early in the driver start-up. ++ */ ++ SyncPrimSet(psDeviceNode->psMMUCacheSyncPrim, 0xFFFFFFF6UL); ++ ++ /* Next update value will be 0xFFFFFFF7 since sync prim starts with 0xFFFFFFF6 */ ++ psDeviceNode->ui32NextMMUInvalidateUpdate = 0xFFFFFFF7UL; ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPowerLock", ErrorExit); ++ ++ /* ++ * Always ensure a single power on command appears in the pdump. This ++ * should be the only power related call outside of PDUMPPOWCMDSTART ++ * and PDUMPPOWCMDEND. ++ */ ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_ON, ++ PVRSRV_POWER_FLAGS_FORCED); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to set device %p power state to 'on' (%s)", ++ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); ++ PVRSRVPowerUnlock(psDeviceNode); ++ goto ErrorExit; ++ } ++ ++#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) ++ eError = ValidateFWOnLoad(psDeviceNode->pvDevice); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "ValidateFWOnLoad"); ++ PVRSRVPowerUnlock(psDeviceNode); ++ return eError; ++ } ++#endif ++ ++ eError = PVRSRVDevInitCompatCheck(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed compatibility check for device %p (%s)", ++ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); ++ PVRSRVPowerUnlock(psDeviceNode); ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ goto ErrorExit; ++ } ++ ++ PDUMPPOWCMDSTART(psDeviceNode); ++ ++ /* Force the device to idle if its default power state is off */ ++ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, ++ &PVRSRVDeviceIsDefaultStateOFF, ++ IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "PVRSRVDeviceIdleRequestKM"); ++ if (eError != PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) ++ { ++ PVRSRVPowerUnlock(psDeviceNode); ++ } ++ goto ErrorExit; ++ } ++ ++ /* Place device into its default power state. */ ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_DEFAULT, ++ PVRSRV_POWER_FLAGS_FORCED); ++ PDUMPPOWCMDEND(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to set device %p into its default power state (%s)", ++ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ goto ErrorExit; ++ } ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ /* ++ * If PDUMP is enabled and RGX device is supported, then initialise the ++ * performance counters that can be further modified in PDUMP. Then, ++ * before ending the init phase of the pdump, drain the commands put in ++ * the kCCB during the init phase. ++ */ ++#if defined(SUPPORT_RGX) ++#if defined(PDUMP) ++ { ++ eError = RGXInitHWPerfCounters(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitHWPerfCounters", ErrorExit); ++ ++ eError = RGXPdumpDrainKCCB(psDevInfo, ++ psDevInfo->psKernelCCBCtl->ui32WriteOffset); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", ErrorExit); ++ } ++#endif ++#endif /* defined(SUPPORT_RGX) */ ++ /* Now that the device(s) are fully initialised set them as active */ ++ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE; ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ /* Initialisation failed so set the device(s) into a bad state */ ++ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; ++ eError = PVRSRV_ERROR_NOT_INITIALISED; ++ } ++ ++ /* Give PDump control a chance to end the init phase, depends on OS */ ++ PDUMPENDINITPHASE(psDeviceNode); ++ return eError; ++ ++ErrorExit: ++ /* Initialisation failed so set the device(s) into a bad state */ ++ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD; ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ /* Only check devices which specify a compatibility check callback */ ++ if (psDeviceNode->pfnInitDeviceCompatCheck) ++ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode); ++ else ++ return PVRSRV_OK; ++} ++ ++/* ++ PollForValueKM ++*/ ++static ++PVRSRV_ERROR PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ IMG_UINT32 ui32Timeoutus, ++ IMG_UINT32 ui32PollPeriodus, ++ POLL_FLAGS ePollFlags) ++{ ++#if defined(NO_HARDWARE) ++ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(ui32Timeoutus); ++ PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus); ++ PVR_UNREFERENCED_PARAMETER(ePollFlags); ++ return PVRSRV_OK; ++#else ++ IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */ ++ ++ LOOP_UNTIL_TIMEOUT(ui32Timeoutus) ++ { ++ ui32ActualValue = OSReadHWReg32((void __iomem *)pui32LinMemAddr, 0) & ui32Mask; ++ ++ if (ui32ActualValue == ui32Value) ++ { ++ return PVRSRV_OK; ++ } ++ ++ if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ OSWaitus(ui32PollPeriodus); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (BITMASK_HAS(ePollFlags, POLL_FLAG_LOG_ERROR)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).", ++ ui32Value, ui32ActualValue, ui32Mask)); ++ } ++ ++ return PVRSRV_ERROR_TIMEOUT; ++#endif /* NO_HARDWARE */ ++} ++ ++ ++/* ++ PVRSRVPollForValueKM ++*/ ++PVRSRV_ERROR PVRSRVPollForValueKM (PVRSRV_DEVICE_NODE *psDevNode, ++ volatile IMG_UINT32 __iomem *pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ POLL_FLAGS ePollFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, ++ MAX_HW_TIME_US, ++ MAX_HW_TIME_US/WAIT_TRY_COUNT, ++ ePollFlags); ++ if (eError != PVRSRV_OK && BITMASK_HAS(ePollFlags, POLL_FLAG_DEBUG_DUMP)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", ++ __func__, PVRSRVGetErrorString(eError), ++ pui32LinMemAddr, ui32Value)); ++ PVRSRVDebugRequest(psDevNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask) ++{ ++#if defined(NO_HARDWARE) ++ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ return PVRSRV_OK; ++#else ++ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ IMG_HANDLE hOSEvent; ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eErrorWait; ++ IMG_UINT32 ui32ActualValue; ++ ++ eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSEventObjectOpen", EventObjectOpenError); ++ ++ eError = PVRSRV_ERROR_TIMEOUT; /* Initialiser for following loop */ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask); ++ ++ if (ui32ActualValue == ui32Value) ++ { ++ /* Expected value has been found */ ++ eError = PVRSRV_OK; ++ break; ++ } ++ else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ /* Services in bad state, don't wait any more */ ++ eError = PVRSRV_ERROR_NOT_READY; ++ break; ++ } ++ else ++ { ++ /* wait for event and retry */ ++ eErrorWait = OSEventObjectWait(hOSEvent); ++ if (eErrorWait != PVRSRV_OK && eErrorWait != PVRSRV_ERROR_TIMEOUT) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Failed with error %d. Found value 0x%x but was expected " ++ "to be 0x%x (Mask 0x%08x). Retrying", ++ __func__, ++ eErrorWait, ++ ui32ActualValue, ++ ui32Value, ++ ui32Mask)); ++ } ++ } ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ OSEventObjectClose(hOSEvent); ++ ++ /* One last check in case the object wait ended after the loop timeout... */ ++ if (eError != PVRSRV_OK && ++ (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value) ++ { ++ eError = PVRSRV_OK; ++ } ++ ++ /* Provide event timeout information to aid the Device Watchdog Thread... */ ++ if (eError == PVRSRV_OK) ++ { ++ psPVRSRVData->ui32GEOConsecutiveTimeouts = 0; ++ } ++ else if (eError == PVRSRV_ERROR_TIMEOUT) ++ { ++ psPVRSRVData->ui32GEOConsecutiveTimeouts++; ++ } ++ ++EventObjectOpenError: ++ ++ return eError; ++ ++#endif /* NO_HARDWARE */ ++} ++ ++int PVRSRVGetDriverStatus(void) ++{ ++ return PVRSRVGetPVRSRVData()->eServicesState; ++} ++ ++/* ++ PVRSRVSystemHasCacheSnooping ++*/ ++IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) && ++ (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED)) ++ { ++ return IMG_TRUE; ++ } ++ return IMG_FALSE; ++} ++ ++IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED) ++ { ++ return IMG_TRUE; ++ } ++ return IMG_FALSE; ++} ++ ++IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) || ++ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) ++ { ++ return IMG_TRUE; ++ } ++ return IMG_FALSE; ++} ++ ++IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) || ++ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS)) ++ { ++ return IMG_TRUE; ++ } ++ return IMG_FALSE; ++} ++ ++IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ return psDevConfig->bHasNonMappableLocalMemory; ++} ++ ++/* ++ PVRSRVSystemWaitCycles ++*/ ++void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles) ++{ ++ /* Delay in us */ ++ IMG_UINT32 ui32Delayus = 1; ++ ++ /* obtain the device freq */ ++ if (psDevConfig->pfnClockFreqGet != NULL) ++ { ++ IMG_UINT32 ui32DeviceFreq; ++ ++ ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData); ++ ++ ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq; ++ ++ if (ui32Delayus == 0) ++ { ++ ui32Delayus = 1; ++ } ++ } ++ ++ OSWaitus(ui32Delayus); ++} ++ ++static void * ++PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode, ++ va_list va) ++{ ++ void *pvOSDevice = va_arg(va, void *); ++ ++ if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice) ++ { ++ return psDeviceNode; ++ } ++ ++ return NULL; ++} ++ ++PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszName, ++ PFN_LISR pfnLISR, ++ void *pvData, ++ IMG_HANDLE *phLISRData) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = ++ List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList, ++ &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb, ++ pvOSDevice); ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ if (!psDeviceNode) ++ { ++ /* Device can't be found in the list so it isn't in the system */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present", ++ __func__, pvOSDevice, ui32IRQ)); ++ return PVRSRV_ERROR_INVALID_DEVICE; ++ } ++ ++ return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ, ++ pszName, pfnLISR, pvData, phLISRData); ++} ++ ++PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData) ++{ ++ return SysUninstallDeviceLISR(hLISRData); ++} ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) ++/* functions only used on rogue, but header defining them is common */ ++void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState) ++{ ++ SysSetAxiProtOSid(ui32OSid, bState); ++} ++ ++void SetTrustedDeviceAceEnabled(void) ++{ ++ SysSetTrustedDeviceAceEnabled(); ++} ++#endif ++ ++#if defined(SUPPORT_RGX) ++PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (!ui32Timeout) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) { ++ OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); ++ } ++ ++ /* Create only once */ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread == NULL) ++ { ++ /* Create the HWPerf event object */ ++ eError = OSEventObjectCreate("PVRSRV_HWPERFHOSTPERIODIC_EVENTOBJECT", &gpsPVRSRVData->hHWPerfHostPeriodicEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectCreate"); ++ ++ if (eError == PVRSRV_OK) ++ { ++ gpsPVRSRVData->bHWPerfHostThreadStop = IMG_FALSE; ++ gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; ++ /* Create a thread which is used to periodically emit host stream packets */ ++ eError = OSThreadCreate(&gpsPVRSRVData->hHWPerfHostPeriodicThread, ++ "pvr_hwperf_host", ++ HWPerfPeriodicHostEventsThread, ++ NULL, IMG_TRUE, gpsPVRSRVData); ++ PVR_LOG_IF_ERROR(eError, "OSThreadCreate"); ++ } ++ } ++ /* If the thread has already been created then just update the timeout and wake up thread */ ++ else ++ { ++ gpsPVRSRVData->ui32HWPerfHostThreadTimeout = ui32Timeout; ++ eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) { ++ OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) { ++ OSLockAcquire(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); ++ } ++ ++ /* Stop and cleanup the HWPerf periodic thread */ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread) ++ { ++ if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) ++ { ++ gpsPVRSRVData->bHWPerfHostThreadStop = IMG_TRUE; ++ eError = OSEventObjectSignal(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal"); ++ } ++ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US) ++ { ++ eError = OSThreadDestroy(gpsPVRSRVData->hHWPerfHostPeriodicThread); ++ if (PVRSRV_OK == eError) ++ { ++ gpsPVRSRVData->hHWPerfHostPeriodicThread = NULL; ++ break; ++ } ++ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy"); ++ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicEvObj) ++ { ++ eError = OSEventObjectDestroy(gpsPVRSRVData->hHWPerfHostPeriodicEvObj); ++ gpsPVRSRVData->hHWPerfHostPeriodicEvObj = NULL; ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ } ++ } ++ ++ if (gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock) { ++ OSLockRelease(gpsPVRSRVData->hHWPerfHostPeriodicThread_Lock); ++ } ++ ++ return eError; ++} ++#endif ++ ++/* ++ * Scan the list of known devices until we find the specific instance or ++ * exhaust the list ++ */ ++PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstance(IMG_UINT32 uiInstance) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ if (uiInstance >= gpsPVRSRVData->ui32RegisteredDevices) ++ { ++ return NULL; ++ } ++ OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); ++ for (psDevNode = gpsPVRSRVData->psDeviceNodeList; ++ psDevNode != NULL; psDevNode = psDevNode->psNext) ++ { ++ if (uiInstance == psDevNode->sDevId.ui32InternalID) ++ { ++ break; ++ } ++ } ++ OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); ++ ++ return psDevNode; ++} ++ ++PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ OSWRLockAcquireRead(gpsPVRSRVData->hDeviceNodeListLock); ++ for (psDevNode = gpsPVRSRVData->psDeviceNodeList; ++ psDevNode != NULL; psDevNode = psDevNode->psNext) ++ { ++ if (i32OSInstance == psDevNode->sDevId.i32OsDeviceID) ++ { ++ break; ++ } ++ } ++ OSWRLockReleaseRead(gpsPVRSRVData->hDeviceNodeListLock); ++ ++ return psDevNode; ++} ++ ++/* Default function for querying the power state of the system */ ++PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ return psDevNode->eCurrentSysPowerState; ++} ++/***************************************************************************** ++ End of file (pvrsrv.c) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv.h b/drivers/gpu/drm/img-rogue/pvrsrv.h +new file mode 100644 +index 000000000000..97454f5ad8cd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv.h +@@ -0,0 +1,542 @@ ++/*************************************************************************/ /*! ++@File ++@Title PowerVR services server header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_H ++#define PVRSRV_H ++ ++#include "connection_server.h" ++#include "pvrsrv_pool.h" ++#include "device.h" ++#include "power.h" ++#include "syscommon.h" ++#include "sysinfo.h" ++#include "physheap.h" ++#include "cache_ops.h" ++#include "pvr_notifier.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++#include "dma_support.h" ++#include "vz_vmm_pvz.h" ++ ++/*! ++ * For OSThreadDestroy(), which may require a retry ++ * Try for 100 ms to destroy an OS thread before failing ++ */ ++#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL ++#define OS_THREAD_DESTROY_RETRY_COUNT 10 ++ ++typedef enum _POLL_FLAGS_ ++{ ++ POLL_FLAG_NONE = 0, /* No message or dump is printed on poll timeout */ ++ POLL_FLAG_LOG_ERROR = 1, /* Log error on poll timeout */ ++ POLL_FLAG_DEBUG_DUMP = 2 /* Print debug dump on poll timeout */ ++} POLL_FLAGS; ++ ++typedef struct _BUILD_INFO_ ++{ ++ IMG_UINT32 ui32BuildOptions; ++ IMG_UINT32 ui32BuildVersion; ++ IMG_UINT32 ui32BuildRevision; ++ IMG_UINT32 ui32BuildType; ++#define BUILD_TYPE_DEBUG 0 ++#define BUILD_TYPE_RELEASE 1 ++ /* The above fields are self explanatory */ ++ /* B.V.N.C can be added later if required */ ++} BUILD_INFO; ++ ++typedef struct _DRIVER_INFO_ ++{ ++ BUILD_INFO sUMBuildInfo; ++ BUILD_INFO sKMBuildInfo; ++ IMG_UINT8 ui8UMSupportedArch; ++ IMG_UINT8 ui8KMBitArch; ++ ++#define BUILD_ARCH_64BIT (1 << 0) ++#define BUILD_ARCH_32BIT (1 << 1) ++#define BUILD_ARCH_BOTH (BUILD_ARCH_32BIT | BUILD_ARCH_64BIT) ++ IMG_BOOL bIsNoMatch; ++}DRIVER_INFO; ++ ++#if defined(SUPPORT_VALIDATION) && defined(__linux__) ++typedef struct MEM_LEAK_INTERVALS_TAG ++{ ++ IMG_UINT32 ui32OSAlloc; ++ IMG_UINT32 ui32GPU; ++ IMG_UINT32 ui32MMU; ++} MEM_LEAK_INTERVALS; ++#endif ++ ++typedef struct PVRSRV_DATA_TAG ++{ ++ PVRSRV_DRIVER_MODE eDriverMode; /*!< Driver mode (i.e. native, host or guest) */ ++ IMG_BOOL bForceApphintDriverMode; /*!< Indicate if driver mode is forced via apphint */ ++ DRIVER_INFO sDriverInfo; ++ IMG_UINT32 ui32DPFErrorCount; /*!< Number of Fatal/Error DPFs */ ++ ++ POSWR_LOCK hDeviceNodeListLock; /*!< Read-Write lock to protect the list of devices */ ++ PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< List head of device nodes */ ++ IMG_UINT32 ui32RegisteredDevices; ++ PVRSRV_DEVICE_NODE *psHostMemDeviceNode; /*!< DeviceNode to be used for device independent ++ host based memory allocations where the DevMem ++ framework is to be used e.g. TL */ ++ PVRSRV_SERVICES_STATE eServicesState; /*!< global driver state */ ++ ++ IMG_HANDLE hGlobalEventObject; /*!< OS Global Event Object */ ++ IMG_UINT32 ui32GEOConsecutiveTimeouts; /*!< OS Global Event Object Timeouts */ ++ ++ IMG_HANDLE hCleanupThread; /*!< Cleanup thread */ ++ IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */ ++ POS_SPINLOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */ ++ DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread */ ++ IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */ ++ uintptr_t cleanupThreadTid; /*!< Cleanup thread id */ ++ ATOMIC_T i32NumCleanupItemsQueued; /*!< Number of items in cleanup thread work list */ ++ ATOMIC_T i32NumCleanupItemsNotCompleted; /*!< Number of items dropped from cleanup thread work list ++ after retry limit reached */ ++ ++ IMG_HANDLE hDevicesWatchdogThread; /*!< Devices watchdog thread */ ++ IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */ ++ volatile IMG_UINT32 ui32DevicesWatchdogPwrTrans; /*! Number of off -> on power state transitions */ ++#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) ++ volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */ ++#endif ++#ifdef PVR_TESTING_UTILS ++ volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */ ++#endif ++ ++#if defined(SUPPORT_AUTOVZ) ++ IMG_HANDLE hAutoVzWatchdogThread; /*!< Devices watchdog thread */ ++ IMG_HANDLE hAutoVzWatchdogEvObj; /*! Event object to drive devices watchdog thread */ ++#endif ++ ++ POS_LOCK hHWPerfHostPeriodicThread_Lock; /*!< Lock for the HWPerf Host periodic thread */ ++ IMG_HANDLE hHWPerfHostPeriodicThread; /*!< HWPerf Host periodic thread */ ++ IMG_HANDLE hHWPerfHostPeriodicEvObj; /*! Event object to drive HWPerf thread */ ++ volatile IMG_BOOL bHWPerfHostThreadStop; ++ IMG_UINT32 ui32HWPerfHostThreadTimeout; ++ ++ IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */ ++ POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */ ++ IMG_BOOL abVmOnline[RGX_NUM_OS_SUPPORTED]; ++ ++ IMG_BOOL bUnload; /*!< Driver unload is in progress */ ++ ++ IMG_HANDLE hTLCtrlStream; /*! Control plane for TL streams */ ++ ++ IMG_HANDLE hDriverThreadEventObject; /*! Event object relating to multi-threading in the Server */ ++ IMG_BOOL bDriverSuspended; /*! if TRUE, the driver is suspended and new threads should not enter */ ++ ATOMIC_T iNumActiveDriverThreads; /*! Number of threads active in the Server */ ++ ++ PMR *psInfoPagePMR; /*! Handle to exportable PMR of the information page. */ ++ IMG_UINT32 *pui32InfoPage; /*! CPU memory mapping for information page. */ ++ DEVMEM_MEMDESC *psInfoPageMemDesc; /*! Memory descriptor of the information page. */ ++ POS_LOCK hInfoPageLock; /*! Lock guarding access to information page. */ ++ ++#if defined(SUPPORT_VALIDATION) && defined(__linux__) ++ MEM_LEAK_INTERVALS sMemLeakIntervals; /*!< How often certain memory leak types will trigger */ ++#endif ++ IMG_HANDLE hThreadsDbgReqNotify; ++ ++ IMG_UINT32 ui32PDumpBoundDevice; /*!< PDump is bound to the device first connected to */ ++} PVRSRV_DATA; ++ ++ ++/*! ++****************************************************************************** ++ @Function PVRSRVGetPVRSRVData ++ ++ @Description Get a pointer to the global data ++ ++ @Return PVRSRV_DATA * ++******************************************************************************/ ++PVRSRV_DATA *PVRSRVGetPVRSRVData(void); ++ ++#define PVRSRV_KM_ERRORS (PVRSRVGetPVRSRVData()->ui32DPFErrorCount) ++#define PVRSRV_ERROR_LIMIT_REACHED (PVRSRV_KM_ERRORS == IMG_UINT32_MAX) ++#define PVRSRV_REPORT_ERROR() do { if (PVRSRVGetPVRSRVData()) { if (!(PVRSRV_ERROR_LIMIT_REACHED)) { PVRSRVGetPVRSRVData()->ui32DPFErrorCount++; } } } while (0) ++ ++#define PVRSRV_VZ_MODE_IS(_expr) (DRIVER_MODE_##_expr == PVRSRVGetPVRSRVData()->eDriverMode) ++#define PVRSRV_VZ_RETN_IF_MODE(_expr) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) ++#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while (0) ++#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) ++#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while (0) ++ ++/*! ++****************************************************************************** ++@Note The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) ++ can be an override or non-override 32-bit value. An override value ++ has the MSB bit set & a non-override value has this MSB bit cleared. ++ Excluding this MSB bit & interpreting the remaining 31-bit as a ++ signed 31-bit integer, the mode values are: ++ [-1 native : 0 host : +1 guest ]. ++******************************************************************************/ ++#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr) ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31)) ++#define PVRSRV_VZ_APPHINT_MODE(_expr) \ ++ ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \ ++ !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \ ++ ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \ ++ ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF)) ++ ++typedef struct _PHYS_HEAP_ITERATOR_ PHYS_HEAP_ITERATOR; ++ ++/*! ++****************************************************************************** ++ @Function LMA_HeapIteratorCreate ++ ++ @Description ++ Creates iterator for traversing physical heap requested by ui32Flags. The ++ iterator will go through all of the segments (a segment is physically ++ contiguous) of the physical heap and return their CPU physical address and ++ size. ++ ++ @Input psDevNode: Pointer to device node struct. ++ @Input ui32Flags: Find heap that matches flags. ++ @Output ppsIter: Pointer to the iterator object. ++ ++ @Return PVRSRV_OK upon success and PVRSRV_ERROR otherwise. ++******************************************************************************/ ++PVRSRV_ERROR LMA_HeapIteratorCreate(PVRSRV_DEVICE_NODE *psDevNode, ++ PHYS_HEAP_USAGE_FLAGS ui32Flags, ++ PHYS_HEAP_ITERATOR **ppsIter); ++ ++/*! ++****************************************************************************** ++ @Function LMA_HeapIteratorDestroy ++ ++ @Description ++ Frees the iterator object created with LMA_HeapIteratorCreate. ++ ++ @Input psIter: Pointer to the iterator object. ++******************************************************************************/ ++void LMA_HeapIteratorDestroy(PHYS_HEAP_ITERATOR *psIter); ++ ++/*! ++****************************************************************************** ++ @Function LMA_HeapIteratorReset ++ ++ @Description ++ Resets the iterator the first segment of the physical heap. ++ ++ @Input psIter: Pointer to the iterator object. ++******************************************************************************/ ++PVRSRV_ERROR LMA_HeapIteratorReset(PHYS_HEAP_ITERATOR *psIter); ++ ++/*! ++****************************************************************************** ++ @Function LMA_HeapIteratorNext ++ ++ @Description ++ Retrieves current segment's physical device address and size and moves the ++ iterator to the next element (if exists). If the iterator reached an end of ++ the heap and no segment was retrieved, this function returns IMG_FALSE. ++ ++ @Input psIter: Pointer to the iterator object. ++ @Output psDevPAddr: Device physical address of the current segment. ++ @Output puiSize: Size of the current segment. ++ ++ @Return IMG TRUE if a segment was found and retrieved, IMG_FALSE otherwise. ++******************************************************************************/ ++IMG_BOOL LMA_HeapIteratorNext(PHYS_HEAP_ITERATOR *psIter, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_UINT64 *puiSize); ++ ++/*! ++****************************************************************************** ++ @Function LMA_HeapIteratorGetHeapStats ++ ++ @Description ++ Retrieves phys heap's usage statistics. ++ ++ @Input psPhysHeap: Pointer to the physical heap object. ++ @Output puiTotalSize: Total size of the physical heap. ++ @Output puiInUseSize: Used space in the physical heap. ++ ++ @Return PVRSRV_OK upon success and PVRSRV_otherwise. ++******************************************************************************/ ++PVRSRV_ERROR LMA_HeapIteratorGetHeapStats(PHYS_HEAP_ITERATOR *psIter, ++ IMG_UINT64 *puiTotalSize, ++ IMG_UINT64 *puiInUseSize); ++ ++/*! ++****************************************************************************** ++ @Function PVRSRVPollForValueKM ++ ++ @Description ++ Polls for a value to match a masked read ++ ++ @Input psDevNode : Pointer to device node struct ++ @Input pui32LinMemAddr : CPU linear address to poll ++ @Input ui32Value : required value ++ @Input ui32Mask : Mask ++ @Input bDebugDumpOnFailure : Whether poll failure should result into a debug ++ dump. CAUTION: When calling this function from code paths which are ++ also used by debug-dumping code, this argument MUST be IMG_FALSE ++ otherwise, we might end up requesting debug-dump in recursion and ++ eventually blow-up call stack. ++ ++ @Return PVRSRV_ERROR : ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVPollForValueKM(PVRSRV_DEVICE_NODE *psDevNode, ++ volatile IMG_UINT32 __iomem *pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ POLL_FLAGS ePollFlags); ++ ++/*! ++****************************************************************************** ++ @Function PVRSRVWaitForValueKM ++ ++ @Description ++ Waits (using EventObjects) for a value to match a masked read ++ ++ @Input pui32LinMemAddr : CPU linear address to poll ++ @Input ui32Value : Required value ++ @Input ui32Mask : Mask to be applied before checking against ++ ui32Value ++ @Return PVRSRV_ERROR : ++******************************************************************************/ ++PVRSRV_ERROR ++PVRSRVWaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVSystemHasCacheSnooping ++ ++ @Description : Returns whether the system has cache snooping ++ ++ @Return : IMG_TRUE if the system has cache snooping ++******************************************************************************/ ++IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVSystemSnoopingIsEmulated ++ ++ @Description : Returns whether system cache snooping support is emulated ++ ++ @Return : IMG_TRUE if the system cache snooping is emulated in software ++******************************************************************************/ ++IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVSystemSnoopingOfCPUCache ++ ++ @Description : Returns whether the system supports snooping of the CPU cache ++ ++ @Return : IMG_TRUE if the system has CPU cache snooping ++******************************************************************************/ ++IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVSystemSnoopingOfDeviceCache ++ ++ @Description : Returns whether the system supports snooping of the device cache ++ ++ @Return : IMG_TRUE if the system has device cache snooping ++******************************************************************************/ ++IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVSystemHasNonMappableLocalMemory ++ ++ @Description : Returns whether the device has non-mappable part of local memory ++ ++ @Return : IMG_TRUE if the device has non-mappable part of local memory ++******************************************************************************/ ++IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVSystemWaitCycles ++ ++ @Description : Waits for at least ui32Cycles of the Device clk. ++******************************************************************************/ ++void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles); ++ ++PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszName, ++ PFN_LISR pfnLISR, ++ void *pvData, ++ IMG_HANDLE *phLISRData); ++ ++PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData); ++ ++int PVRSRVGetDriverStatus(void); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVIsBridgeEnabled ++ ++ @Description : Returns whether the given bridge group is enabled ++ ++ @Return : IMG_TRUE if the given bridge group is enabled ++******************************************************************************/ ++static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup) ++{ ++ IMG_UINT32 ui32Bridges; ++ IMG_UINT32 ui32Offset; ++ ++ PVR_UNREFERENCED_PARAMETER(hServices); ++ ++#if defined(SUPPORT_RGX) ++ if (ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST) ++ { ++ ui32Bridges = gui32RGXBridges; ++ ui32Offset = PVRSRV_BRIDGE_RGX_FIRST; ++ } ++ else ++#endif /* SUPPORT_RGX */ ++ { ++ ui32Bridges = gui32PVRBridges; ++ ui32Offset = PVRSRV_BRIDGE_FIRST; ++ } ++ ++ return (IMG_BOOL)(((1U << (ui32BridgeGroup - ui32Offset)) & ui32Bridges) != 0); ++} ++ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#if defined(EMULATOR) ++ void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); ++ void SetTrustedDeviceAceEnabled(void); ++#endif ++#endif ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVCreateHWPerfHostThread ++ ++ @Description : Creates HWPerf event object and thread unless already created ++ ++ @Input ui32Timeout : Initial timeout (ms) between updates on the HWPerf thread ++ ++ @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVCreateHWPerfHostThread(IMG_UINT32 ui32Timeout); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVDestroyHWPerfHostThread ++ ++ @Description : Destroys HWPerf event object and thread if created ++ ++ @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVDestroyHWPerfHostThread(void); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVPhysMemHeapsInit ++ ++ @Description : Registers and acquires physical memory heaps ++ ++ @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVPhysMemHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode, PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++****************************************************************************** ++ @Function : PVRSRVPhysMemHeapsDeinit ++ ++ @Description : Releases and unregisters physical memory heaps ++ ++ @Return : PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_ ++ error code ++******************************************************************************/ ++void PVRSRVPhysMemHeapsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*************************************************************************/ /*! ++@Function FindPhysHeapConfig ++@Description Find Phys Heap Config from Device Config. ++@Input psDevConfig Pointer to device config. ++@Input ui32Flags Find heap that matches flags. ++@Return PHYS_HEAP_CONFIG* Return a config, or NULL if not found. ++*/ /**************************************************************************/ ++PHYS_HEAP_CONFIG* FindPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ PHYS_HEAP_USAGE_FLAGS ui32Flags); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetDeviceInstance ++@Description Return the specified device instance from Device node list. ++@Input ui32Instance Device instance to find ++@Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found. ++*/ /**************************************************************************/ ++PVRSRV_DEVICE_NODE* PVRSRVGetDeviceInstance(IMG_UINT32 ui32Instance); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVGetDeviceInstanceByOSId ++@Description Return the specified device instance by OS Id. ++@Input i32OSInstance OS device Id to find ++@Return PVRSRV_DEVICE_NODE* Return a device node, or NULL if not found. ++*/ /**************************************************************************/ ++PVRSRV_DEVICE_NODE *PVRSRVGetDeviceInstanceByOSId(IMG_INT32 i32OSInstance); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDefaultDomainPower ++@Description Returns psDevNode->eCurrentSysPowerState ++@Input PVRSRV_DEVICE_NODE* Device node ++@Return PVRSRV_SYS_POWER_STATE System power state tracked internally ++*/ /**************************************************************************/ ++PVRSRV_SYS_POWER_STATE PVRSRVDefaultDomainPower(PVRSRV_DEVICE_NODE *psDevNode); ++ ++#endif /* PVRSRV_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_apphint.h b/drivers/gpu/drm/img-rogue/pvrsrv_apphint.h +new file mode 100644 +index 000000000000..e35426680795 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_apphint.h +@@ -0,0 +1,71 @@ ++/**************************************************************************/ /*! ++@File ++@Title PowerVR AppHint generic interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#if !defined(PVRSRV_APPHINT_H) ++#define PVRSRV_APPHINT_H ++ ++/* Supplied to PVRSRVAppHintRegisterHandlers*() functions when the apphint ++ * is a global driver apphint, i.e. apphints not present in ++ * APPHINT_DEBUGFS_DEVICE_ID, i.e. not per device. ++ */ ++#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U) ++ ++#if defined(__linux__) ++ ++#include "km_apphint.h" ++#define PVRSRVAppHintDumpState(d) pvr_apphint_dump_state(d) ++#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p) ++#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p) ++#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p) ++#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p) ++ ++#else ++ ++#define PVRSRVAppHintDumpState(d) ++#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) ++#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) ++#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) ++#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) ++ ++#endif ++ ++#endif /* PVRSRV_APPHINT_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c +new file mode 100644 +index 000000000000..2ce4ae0a858a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.c +@@ -0,0 +1,385 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR Common Bridge Init/Deinit Module (kernel side) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements common PVR Bridge init/deinit code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvrsrv_bridge_init.h" ++#include "srvcore.h" ++ ++/* These will go when full bridge gen comes in */ ++#if defined(PDUMP) ++PVRSRV_ERROR InitPDUMPCTRLBridge(void); ++void DeinitPDUMPCTRLBridge(void); ++PVRSRV_ERROR InitPDUMPBridge(void); ++void DeinitPDUMPBridge(void); ++PVRSRV_ERROR InitRGXPDUMPBridge(void); ++void DeinitRGXPDUMPBridge(void); ++#endif ++#if defined(SUPPORT_DISPLAY_CLASS) ++PVRSRV_ERROR InitDCBridge(void); ++void DeinitDCBridge(void); ++#endif ++PVRSRV_ERROR InitMMBridge(void); ++void DeinitMMBridge(void); ++#if !defined(EXCLUDE_CMM_BRIDGE) ++PVRSRV_ERROR InitCMMBridge(void); ++void DeinitCMMBridge(void); ++#endif ++PVRSRV_ERROR InitPDUMPMMBridge(void); ++void DeinitPDUMPMMBridge(void); ++PVRSRV_ERROR InitSRVCOREBridge(void); ++void DeinitSRVCOREBridge(void); ++PVRSRV_ERROR InitSYNCBridge(void); ++void DeinitSYNCBridge(void); ++#if defined(SUPPORT_DMA_TRANSFER) ++PVRSRV_ERROR InitDMABridge(void); ++void DeinitDMABridge(void); ++#endif ++ ++#if defined(SUPPORT_RGX) ++PVRSRV_ERROR InitRGXTA3DBridge(void); ++void DeinitRGXTA3DBridge(void); ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++PVRSRV_ERROR InitRGXTQBridge(void); ++void DeinitRGXTQBridge(void); ++#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ ++ ++#if defined(SUPPORT_USC_BREAKPOINT) ++PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); ++void DeinitRGXBREAKPOINTBridge(void); ++#endif ++PVRSRV_ERROR InitRGXFWDBGBridge(void); ++void DeinitRGXFWDBGBridge(void); ++PVRSRV_ERROR InitRGXHWPERFBridge(void); ++void DeinitRGXHWPERFBridge(void); ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++PVRSRV_ERROR InitRGXREGCONFIGBridge(void); ++void DeinitRGXREGCONFIGBridge(void); ++#endif ++PVRSRV_ERROR InitRGXKICKSYNCBridge(void); ++void DeinitRGXKICKSYNCBridge(void); ++#endif /* SUPPORT_RGX */ ++PVRSRV_ERROR InitCACHEBridge(void); ++void DeinitCACHEBridge(void); ++#if defined(SUPPORT_SECURE_EXPORT) ++PVRSRV_ERROR InitSMMBridge(void); ++void DeinitSMMBridge(void); ++#endif ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++PVRSRV_ERROR InitHTBUFFERBridge(void); ++void DeinitHTBUFFERBridge(void); ++#endif ++PVRSRV_ERROR InitPVRTLBridge(void); ++void DeinitPVRTLBridge(void); ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++PVRSRV_ERROR InitRIBridge(void); ++void DeinitRIBridge(void); ++#endif ++PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); ++void DeinitDEVICEMEMHISTORYBridge(void); ++#if defined(SUPPORT_VALIDATION_BRIDGE) ++PVRSRV_ERROR InitVALIDATIONBridge(void); ++void DeinitVALIDATIONBridge(void); ++#endif ++#if defined(PVR_TESTING_UTILS) ++PVRSRV_ERROR InitTUTILSBridge(void); ++void DeinitTUTILSBridge(void); ++#endif ++PVRSRV_ERROR InitSYNCTRACKINGBridge(void); ++void DeinitSYNCTRACKINGBridge(void); ++#if defined(SUPPORT_WRAP_EXTMEM) ++PVRSRV_ERROR InitMMEXTMEMBridge(void); ++void DeinitMMEXTMEMBridge(void); ++#endif ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++PVRSRV_ERROR InitSYNCFALLBACKBridge(void); ++void DeinitSYNCFALLBACKBridge(void); ++#endif ++PVRSRV_ERROR InitRGXTIMERQUERYBridge(void); ++void DeinitRGXTIMERQUERYBridge(void); ++#if defined(SUPPORT_DI_BRG_IMPL) ++PVRSRV_ERROR InitDIBridge(void); ++void DeinitDIBridge(void); ++#endif ++ ++PVRSRV_ERROR ++ServerBridgeInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ BridgeDispatchTableStartOffsetsInit(); ++ ++ eError = InitSRVCOREBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge"); ++ ++ eError = InitSYNCBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitSYNCBridge"); ++ ++#if defined(PDUMP) ++ eError = InitPDUMPCTRLBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge"); ++#endif ++ ++ eError = InitMMBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitMMBridge"); ++ ++#if !defined(EXCLUDE_CMM_BRIDGE) ++ eError = InitCMMBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitCMMBridge"); ++#endif ++ ++#if defined(PDUMP) ++ eError = InitPDUMPMMBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge"); ++ ++ eError = InitPDUMPBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge"); ++#endif ++ ++#if defined(SUPPORT_DISPLAY_CLASS) ++ eError = InitDCBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitDCBridge"); ++#endif ++ ++ eError = InitCACHEBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitCACHEBridge"); ++ ++#if defined(SUPPORT_SECURE_EXPORT) ++ eError = InitSMMBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitSMMBridge"); ++#endif ++ ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++ eError = InitHTBUFFERBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge"); ++#endif ++ ++ eError = InitPVRTLBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge"); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ eError = InitRIBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRIBridge"); ++#endif ++ ++#if defined(SUPPORT_VALIDATION_BRIDGE) ++ eError = InitVALIDATIONBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge"); ++#endif ++ ++#if defined(PVR_TESTING_UTILS) ++ eError = InitTUTILSBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge"); ++#endif ++ ++ eError = InitDEVICEMEMHISTORYBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge"); ++ ++ eError = InitSYNCTRACKINGBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge"); ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ eError = InitDMABridge(); ++ PVR_LOG_IF_ERROR(eError, "InitDMABridge"); ++#endif ++ ++#if defined(SUPPORT_RGX) ++ ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++ eError = InitRGXTQBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge"); ++#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ ++ ++ eError = InitRGXTA3DBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge"); ++ ++ #if defined(SUPPORT_USC_BREAKPOINT) ++ eError = InitRGXBREAKPOINTBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXBREAKPOINTBridge"); ++#endif ++ ++ eError = InitRGXFWDBGBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXFWDBGBridge"); ++ ++#if defined(PDUMP) ++ eError = InitRGXPDUMPBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge"); ++#endif ++ ++ eError = InitRGXHWPERFBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge"); ++ ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++ eError = InitRGXREGCONFIGBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXREGCONFIGBridge"); ++#endif ++ ++ eError = InitRGXKICKSYNCBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge"); ++ ++ eError = InitRGXTIMERQUERYBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitRGXTIMERQUERYBridge"); ++ ++#endif /* SUPPORT_RGX */ ++ ++#if defined(SUPPORT_WRAP_EXTMEM) ++ eError = InitMMEXTMEMBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge"); ++#endif ++ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ eError = InitSYNCFALLBACKBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge"); ++#endif ++ ++#if defined(SUPPORT_DI_BRG_IMPL) ++ eError = InitDIBridge(); ++ PVR_LOG_IF_ERROR(eError, "InitDIBridge"); ++#endif ++ ++ eError = OSPlatformBridgeInit(); ++ PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit"); ++ ++ return eError; ++} ++ ++void ServerBridgeDeInit(void) ++{ ++ OSPlatformBridgeDeInit(); ++ ++#if defined(SUPPORT_DI_BRG_IMPL) ++ DeinitDIBridge(); ++#endif ++ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ DeinitSYNCFALLBACKBridge(); ++#endif ++ ++#if defined(SUPPORT_WRAP_EXTMEM) ++ DeinitMMEXTMEMBridge(); ++#endif ++ ++ DeinitSRVCOREBridge(); ++ ++ DeinitSYNCBridge(); ++ ++#if defined(PDUMP) ++ DeinitPDUMPCTRLBridge(); ++#endif ++ ++ DeinitMMBridge(); ++ ++#if !defined(EXCLUDE_CMM_BRIDGE) ++ DeinitCMMBridge(); ++#endif ++ ++#if defined(PDUMP) ++ DeinitPDUMPMMBridge(); ++ ++ DeinitPDUMPBridge(); ++#endif ++ ++#if defined(PVR_TESTING_UTILS) ++ DeinitTUTILSBridge(); ++#endif ++ ++#if defined(SUPPORT_DISPLAY_CLASS) ++ DeinitDCBridge(); ++#endif ++ ++ DeinitCACHEBridge(); ++ ++#if defined(SUPPORT_SECURE_EXPORT) ++ DeinitSMMBridge(); ++#endif ++ ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++ DeinitHTBUFFERBridge(); ++#endif ++ ++ DeinitPVRTLBridge(); ++ ++#if defined(SUPPORT_VALIDATION_BRIDGE) ++ DeinitVALIDATIONBridge(); ++#endif ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ DeinitRIBridge(); ++#endif ++ ++ DeinitDEVICEMEMHISTORYBridge(); ++ ++ DeinitSYNCTRACKINGBridge(); ++ ++#if defined(SUPPORT_DMA_TRANSFER) ++ DeinitDMABridge(); ++#endif ++ ++#if defined(SUPPORT_RGX) ++ ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++ DeinitRGXTQBridge(); ++#endif /* defined(SUPPORT_RGXTQ_BRIDGE) */ ++ ++ DeinitRGXTA3DBridge(); ++ ++#if defined(SUPPORT_USC_BREAKPOINT) ++ DeinitRGXBREAKPOINTBridge(); ++#endif ++ ++ DeinitRGXFWDBGBridge(); ++ ++#if defined(PDUMP) ++ DeinitRGXPDUMPBridge(); ++#endif ++ ++ DeinitRGXHWPERFBridge(); ++ ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++ DeinitRGXREGCONFIGBridge(); ++#endif ++ ++ DeinitRGXKICKSYNCBridge(); ++ ++ DeinitRGXTIMERQUERYBridge(); ++ ++#endif /* SUPPORT_RGX */ ++} +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h +new file mode 100644 +index 000000000000..750c9816c8ac +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_bridge_init.h +@@ -0,0 +1,53 @@ ++/**************************************************************************/ /*! ++@File ++@Title PVR Common Bridge Init/Deinit Module (kernel side) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the common PVR Bridge init/deinit code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef PVRSRV_BRIDGE_INIT_H ++#define PVRSRV_BRIDGE_INIT_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++PVRSRV_ERROR ServerBridgeInit(void); ++void ServerBridgeDeInit(void); ++ ++#endif /* PVRSRV_BRIDGE_INIT_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h b/drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h +new file mode 100644 +index 000000000000..9eb454f5e1d1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_cleanup.h +@@ -0,0 +1,177 @@ ++/**************************************************************************/ /*! ++@File ++@Title PowerVR SrvKM cleanup thread deferred work interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef PVRSRV_CLEANUP_H ++#define PVRSRV_CLEANUP_H ++ ++#include "dllist.h" ++ ++/**************************************************************************/ /*! ++@Brief CLEANUP_THREAD_FN ++ ++@Description This is the function prototype for the pfnFree member found in ++ the structure PVRSRV_CLEANUP_THREAD_WORK. The function is ++ responsible for carrying out the clean up work and if successful ++ freeing the memory originally supplied to the call ++ PVRSRVCleanupThreadAddWork(). ++ ++@Input pvParam This is private data originally supplied by the caller ++ to PVRSRVCleanupThreadAddWork() when registering the ++ clean up work item, psDAta->pvData. Itr can be cast ++ to a relevant type within the using module. ++ ++@Return PVRSRV_OK if the cleanup operation was successful and the ++ callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item ++ memory original supplied to PVRSRVCleanupThreadAddWork() ++ Any other error code will lead to the work item ++ being re-queued and hence the original ++ PVRSRV_CLEANUP_THREAD_WORK* must not be freed. ++*/ /***************************************************************************/ ++ ++typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam); ++ ++ ++/* Typical number of times a caller should want the work to be retried in case ++ * of the callback function (pfnFree) returning an error. ++ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry ++ * count (ui32RetryCount) unless there are special requirements. ++ * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not ++ * successful by then give up as an unrecoverable problem has occurred. ++ */ ++#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u ++/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for ++ * a specified amount of time rather than number of retries. ++ */ ++#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 20000u /* 20s */ ++ ++/* Use to set retry count on a cleanup item. ++ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK ++ * _count - retry count ++ */ ++#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \ ++ do { \ ++ (_item)->ui32RetryCount = (_count); \ ++ (_item)->ui32TimeStart = 0; \ ++ (_item)->ui32TimeEnd = 0; \ ++ } while (0) ++ ++/* Use to set timeout deadline on a cleanup item. ++ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK ++ * _timeout - timeout in milliseconds, if 0 ++ * CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used ++ */ ++#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \ ++ do { \ ++ (_item)->ui32RetryCount = 0; \ ++ (_item)->ui32TimeStart = OSClockms(); \ ++ (_item)->ui32TimeEnd = (_item)->ui32TimeStart + ((_timeout) > 0 ? \ ++ (_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \ ++ } while (0) ++ ++/* Indicates if the timeout on a given item has been reached. ++ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK ++ */ ++#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \ ++ ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \ ++ OSClockms() - (_item)->ui32TimeStart) ++ ++/* Indicates if the current item is waiting on timeout or retry count. ++ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK ++ * */ ++#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \ ++ ((_item)->ui32TimeStart != (_item->ui32TimeEnd)) ++ ++/* Clean up work item specifics so that the task can be managed by the ++ * pvr_defer_free cleanup thread in the Server. ++ */ ++typedef struct _PVRSRV_CLEANUP_THREAD_WORK_ ++{ ++ DLLIST_NODE sNode; /*!< List node used internally by the cleanup ++ thread */ ++ CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to ++ carry out the deferred cleanup */ ++ void *pvData; /*!< private data for pfnFree, usually a way back ++ to the original PVRSRV_CLEANUP_THREAD_WORK* ++ pointer supplied in the call to ++ PVRSRVCleanupThreadAddWork(). */ ++ IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when ++ cleanup item has been created. */ ++ IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry ++ attempts will be made, item discard and ++ error logged when this is reached. */ ++ IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be ++ re-tried when it returns error. */ ++ IMG_BOOL bDependsOnHW; /*!< Retry again after the RGX interrupt signals ++ the global event object */ ++} PVRSRV_CLEANUP_THREAD_WORK; ++ ++ ++/**************************************************************************/ /*! ++@Function PVRSRVCleanupThreadAddWork ++ ++@Description Add a work item to be called from the cleanup thread ++ ++@Input psData : The function pointer and private data for the callback ++ ++@Return None ++*/ /***************************************************************************/ ++void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVCleanupThreadGetPid ++ ++@Description Returns Cleanup Thread's PID. ++ ++@Return PID of the Cleanup Thread ++*/ /***************************************************************************/ ++IMG_PID PVRSRVCleanupThreadGetPid(void); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVCleanupThreadGetTid ++ ++@Description Returns Cleanup Thread's TID. ++ ++@Return TID of the Cleanup Thread ++*/ /***************************************************************************/ ++uintptr_t PVRSRVCleanupThreadGetTid(void); ++ ++#endif /* PVRSRV_CLEANUP_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_device.h b/drivers/gpu/drm/img-rogue/pvrsrv_device.h +new file mode 100644 +index 000000000000..b97e015cb700 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_device.h +@@ -0,0 +1,401 @@ ++/**************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef PVRSRV_DEVICE_H ++#define PVRSRV_DEVICE_H ++ ++#include "img_types.h" ++#include "physheap.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_memalloc_physheap.h" ++#include "pvrsrv_firmware_boot.h" ++#include "rgx_fwif_km.h" ++#include "servicesext.h" ++#include "cache_ops.h" ++ ++#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) ++#include "pvr_dvfs.h" ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG; ++typedef enum _DRIVER_MODE_ ++{ ++/* Do not use these enumerations directly, to query the ++ current driver mode, use the PVRSRV_VZ_MODE_IS() ++ macro */ ++ DRIVER_MODE_NATIVE = -1, ++ DRIVER_MODE_HOST = 0, ++ DRIVER_MODE_GUEST ++} PVRSRV_DRIVER_MODE; ++ ++typedef enum ++{ ++ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0, ++ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1, ++ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST ++} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA; ++ ++typedef enum _PVRSRV_DEVICE_SNOOP_MODE_ ++{ ++ PVRSRV_DEVICE_SNOOP_NONE = 0, ++ PVRSRV_DEVICE_SNOOP_CPU_ONLY, ++ PVRSRV_DEVICE_SNOOP_DEVICE_ONLY, ++ PVRSRV_DEVICE_SNOOP_CROSS, ++ PVRSRV_DEVICE_SNOOP_EMULATED, ++} PVRSRV_DEVICE_SNOOP_MODE; ++ ++#if defined(SUPPORT_SOC_TIMER) ++typedef IMG_UINT64 ++(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData); ++#endif ++ ++typedef enum _PVRSRV_DEVICE_FABRIC_TYPE_ ++{ ++ PVRSRV_DEVICE_FABRIC_NONE = 0, ++ PVRSRV_DEVICE_FABRIC_ACELITE, ++ PVRSRV_DEVICE_FABRIC_FULLACE, ++} PVRSRV_DEVICE_FABRIC_TYPE; ++ ++typedef IMG_UINT32 ++(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData); ++ ++typedef PVRSRV_ERROR ++(*PFN_SYS_PRE_POWER)(IMG_HANDLE hSysData, ++ PVRSRV_SYS_POWER_STATE eNewPowerState, ++ PVRSRV_SYS_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++typedef PVRSRV_ERROR ++(*PFN_SYS_POST_POWER)(IMG_HANDLE hSysData, ++ PVRSRV_SYS_POWER_STATE eNewPowerState, ++ PVRSRV_SYS_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*************************************************************************/ /*! ++@Brief Callback function type PFN_SYS_GET_POWER ++ ++@Description This function queries the SoC power registers to determine ++ if the power domain on which the GPU resides is powered on. ++ ++ Implementation of this callback is optional - where it is not provided, ++ the driver will assume the domain power state depending on driver type: ++ regular drivers assume it is unpowered at startup, while drivers with ++ AutoVz support expect the GPU domain to be powered on initially. The power ++ state will be then tracked internally according to the pfnPrePowerState ++ and pfnPostPowerState calls using a fallback function. ++ ++@Input psDevNode Pointer to node struct of the ++ device being initialised ++ ++@Return PVRSRV_SYS_POWER_STATE_ON if the respective device's hardware ++ domain is powered on ++ PVRSRV_SYS_POWER_STATE_OFF if the domain is powered off ++*/ /**************************************************************************/ ++typedef PVRSRV_SYS_POWER_STATE ++(*PFN_SYS_GET_POWER)(struct _PVRSRV_DEVICE_NODE_ *psDevNode); ++ ++typedef void ++(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++typedef PVRSRV_ERROR ++(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData, ++ IMG_UINT64 ui64MemSize); ++ ++typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64); ++ ++typedef void ++(*PFN_SYS_DEV_HOST_CACHE_MAINTENANCE)(IMG_HANDLE hSysData, ++ PVRSRV_CACHE_OP eRequestType, ++ void *pvVirtStart, ++ void *pvVirtEnd, ++ IMG_CPU_PHYADDR sCPUPhysStart, ++ IMG_CPU_PHYADDR sCPUPhysEnd); ++ ++typedef void* ++(*PFN_SLAVE_DMA_CHAN)(PVRSRV_DEVICE_CONFIG*, char*); ++ ++typedef void ++(*PFN_SLAVE_DMA_FREE)(PVRSRV_DEVICE_CONFIG*, ++ void*); ++ ++typedef void ++(*PFN_DEV_PHY_ADDR_2_DMA_ADDR)(PVRSRV_DEVICE_CONFIG *, ++ IMG_DMA_ADDR *, ++ IMG_DEV_PHYADDR *, ++ IMG_BOOL *, ++ IMG_UINT32, ++ IMG_BOOL); ++ ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ ++typedef struct _PVRSRV_TD_FW_PARAMS_ ++{ ++ const void *pvFirmware; ++ IMG_UINT32 ui32FirmwareSize; ++ PVRSRV_FW_BOOT_PARAMS uFWP; ++} PVRSRV_TD_FW_PARAMS; ++ ++typedef PVRSRV_ERROR ++(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData, ++ PVRSRV_TD_FW_PARAMS *psTDFWParams); ++ ++typedef struct _PVRSRV_TD_POWER_PARAMS_ ++{ ++ IMG_DEV_PHYADDR sPCAddr; ++ ++ /* MIPS-only fields */ ++ IMG_DEV_PHYADDR sGPURegAddr; ++ IMG_DEV_PHYADDR sBootRemapAddr; ++ IMG_DEV_PHYADDR sCodeRemapAddr; ++ IMG_DEV_PHYADDR sDataRemapAddr; ++} PVRSRV_TD_POWER_PARAMS; ++ ++typedef PVRSRV_ERROR ++(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData, ++ PVRSRV_TD_POWER_PARAMS *psTDPowerParams); ++ ++typedef PVRSRV_ERROR ++(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData); ++ ++typedef PVRSRV_ERROR ++(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData); ++ ++#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++typedef void (*PFN_SYS_DEV_VIRT_INIT)(IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT64[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++ ++typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG_ ++{ ++ IMG_UINT32 ui32Status; /*!< FW status */ ++ IMG_UINT32 ui32Reason; /*!< Reason for FW status */ ++} PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG; ++ ++typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF_ ++{ ++ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< FW page fault address */ ++} PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF; ++ ++typedef struct _PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM_ ++{ ++ IMG_UINT32 ui32ExtJobRef; /*!< External Job Reference of any affected GPU work */ ++ RGXFWIF_DM eDM; /*!< Data Master which was running any affected GPU work */ ++} PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM; ++ ++typedef struct _PVRSRV_ROBUSTNESS_NOTIFY_DATA_ ++{ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for error/reset */ ++ IMG_PID pid; /*!< Pid of process which created the errored context */ ++ union ++ { ++ PVRSRV_ROBUSTNESS_ERR_DATA_CHECKSUM sChecksumErrData; /*!< Data returned for checksum errors */ ++ PVRSRV_ROBUSTNESS_ERR_DATA_FW_PF sFwPFErrData; /*!< Data returned for FW page faults */ ++ PVRSRV_ROBUSTNESS_ERR_DATA_HOST_WDG sHostWdgData; /*!< Data returned for Host Wdg FW faults */ ++ } uErrData; ++} PVRSRV_ROBUSTNESS_NOTIFY_DATA; ++ ++typedef void ++(*PFN_SYS_DEV_ERROR_NOTIFY)(IMG_HANDLE hSysData, ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA *psRobustnessErrorData); ++ ++struct _PVRSRV_DEVICE_CONFIG_ ++{ ++ /*! OS device passed to SysDevInit (linux: 'struct device') */ ++ void *pvOSDevice; ++ ++ /*! ++ *! Service representation of pvOSDevice. Should be set to NULL when the ++ *! config is created in SysDevInit. Set by Services once a device node has ++ *! been created for this config and unset before SysDevDeInit is called. ++ */ ++ struct _PVRSRV_DEVICE_NODE_ *psDevNode; ++ ++ /*! Name of the device */ ++ IMG_CHAR *pszName; ++ ++ /*! Version of the device (optional) */ ++ IMG_CHAR *pszVersion; ++ ++ /*! Register bank address */ ++ IMG_CPU_PHYADDR sRegsCpuPBase; ++ /*! Register bank size */ ++ IMG_UINT32 ui32RegsSize; ++ /*! Device interrupt number */ ++ IMG_UINT32 ui32IRQ; ++ ++ PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode; ++ ++ /*! Device specific data handle */ ++ IMG_HANDLE hDevData; ++ ++ /*! System specific data that gets passed into system callback functions. */ ++ IMG_HANDLE hSysData; ++ ++ IMG_BOOL bHasNonMappableLocalMemory; ++ ++ /*! Indicates if system supports FBCDC v3.1 */ ++ IMG_BOOL bHasFBCDCVersion31; ++ ++ /*! Physical Heap definitions for this device. ++ * eDefaultHeap must be set to GPU_LOCAL or CPU_LOCAL. Specifying any other value ++ * (e.g. DEFAULT) will lead to an error at device discovery. ++ * pasPhysHeap array must contain at least one PhysHeap, the declared default heap. ++ */ ++ PVRSRV_PHYS_HEAP eDefaultHeap; ++ PHYS_HEAP_CONFIG *pasPhysHeaps; ++ IMG_UINT32 ui32PhysHeapCount; ++ ++ /*! ++ *! Callbacks to change system device power state at the beginning and end ++ *! of a power state change (optional). ++ */ ++ PFN_SYS_PRE_POWER pfnPrePowerState; ++ PFN_SYS_POST_POWER pfnPostPowerState; ++ PFN_SYS_GET_POWER pfnGpuDomainPower; ++ ++ /*! Callback to obtain the clock frequency from the device (optional). */ ++ PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet; ++ ++#if defined(SUPPORT_SOC_TIMER) ++ /*! Callback to read SoC timer register value (mandatory). */ ++ PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead; ++#endif ++ ++ /*! ++ *! Callback to handle memory budgeting. Can be used to reject allocations ++ *! over a certain size (optional). ++ */ ++ PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize; ++ ++ /*! ++ *! Callback to perform host CPU cache maintenance. Might be needed for ++ *! architectures which allow extensions such as RISC-V (optional). ++ */ ++ PFN_SYS_DEV_HOST_CACHE_MAINTENANCE pfnHostCacheMaintenance; ++ IMG_BOOL bHasPhysicalCacheMaintenance; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ /*! ++ *! Callback to send FW image and FW boot time parameters to the trusted ++ *! device. ++ */ ++ PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage; ++ ++ /*! ++ *! Callback to send parameters needed in a power transition to the trusted ++ *! device. ++ */ ++ PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams; ++ ++ /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */ ++ PFN_TD_RGXSTART pfnTDRGXStart; ++ PFN_TD_RGXSTOP pfnTDRGXStop; ++#endif /* defined(SUPPORT_TRUSTED_DEVICE) */ ++ ++ /*! Function that does device feature specific system layer initialisation */ ++ PFN_SYS_DEV_FEAT_DEP_INIT pfnSysDevFeatureDepInit; ++ ++#if defined(SUPPORT_LINUX_DVFS) || defined(SUPPORT_PDVFS) ++ PVRSRV_DVFS sDVFS; ++#endif ++ ++#if defined(SUPPORT_ALT_REGBASE) ++ IMG_DEV_PHYADDR sAltRegsGpuPBase; ++#endif ++ ++ /*! ++ *! Indicates if device physical address 0x0 might be used as GPU memory ++ *! (e.g. LMA system or UMA system with CPU PA 0x0 reserved by the OS, ++ *! but CPU PA != device PA and device PA 0x0 available for the GPU) ++ */ ++ IMG_BOOL bDevicePA0IsValid; ++ ++ /*! ++ *! Function to initialize System-specific virtualization. If not supported ++ *! this should be a NULL reference. Only present if ++ *! SUPPORT_GPUVIRT_VALIDATION is defined. ++ */ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ PFN_SYS_DEV_VIRT_INIT pfnSysDevVirtInit; ++#endif ++ ++ /*! ++ *! Callback to notify system layer of device errors. ++ *! NB. implementers should ensure that the minimal amount of work is ++ *! done in the callback function, as it will be executed in the main ++ *! RGX MISR. (e.g. any blocking or lengthy work should be performed by ++ *! a worker queue/thread instead.) ++ */ ++ PFN_SYS_DEV_ERROR_NOTIFY pfnSysDevErrorNotify; ++ ++ /*! ++ *! Slave DMA channel request callbacks ++ */ ++ PFN_SLAVE_DMA_CHAN pfnSlaveDMAGetChan; ++ PFN_SLAVE_DMA_FREE pfnSlaveDMAFreeChan; ++ /*! ++ *! Conversion of device memory to DMA addresses ++ */ ++ PFN_DEV_PHY_ADDR_2_DMA_ADDR pfnDevPhysAddr2DmaAddr; ++ /*! ++ *! DMA channel names ++ */ ++ IMG_CHAR *pszDmaTxChanName; ++ IMG_CHAR *pszDmaRxChanName; ++ /*! ++ *! DMA device transfer restrictions ++ */ ++ IMG_UINT32 ui32DmaAlignment; ++ IMG_UINT32 ui32DmaTransferUnit; ++ /*! ++ *! System-wide presence of DMA capabilities ++ */ ++ IMG_BOOL bHasDma; ++ ++}; ++ ++#endif /* PVRSRV_DEVICE_H*/ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_device_types.h b/drivers/gpu/drm/img-rogue/pvrsrv_device_types.h +new file mode 100644 +index 000000000000..662e3bc17163 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_device_types.h +@@ -0,0 +1,55 @@ ++/*************************************************************************/ /*! ++@File ++@Title PowerVR device type definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PVRSRV_DEVICE_TYPES_H) ++#define PVRSRV_DEVICE_TYPES_H ++ ++#include "img_types.h" ++ ++#define PVRSRV_MAX_DEVICES 16U /*!< Largest supported number of devices on the system */ ++ ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++#endif /* PVRSRV_DEVICE_TYPES_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_devvar.h b/drivers/gpu/drm/img-rogue/pvrsrv_devvar.h +new file mode 100644 +index 000000000000..a8c64e309fda +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_devvar.h +@@ -0,0 +1,291 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Device Variable interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the client side interface for device variables ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_DEVVAR_H ++#define PVRSRV_DEVVAR_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define DEVVAR_MAX_NAME_LEN 32 ++ ++typedef struct SYNC_PRIM_CONTEXT_TAG *PDEVVARCTX; ++typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG *PDEVVAR; ++ ++typedef struct PVRSRV_DEV_VAR_UPDATE_TAG ++{ ++ PDEVVAR psDevVar; /*!< Pointer to the dev var */ ++ IMG_UINT32 ui32UpdateValue; /*!< the update value */ ++} PVRSRV_DEV_VAR_UPDATE; ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarContextCreate ++ ++@Description Create a new device variable context ++ ++@Input psDevConnection Device to create the device ++ variable context on ++ ++@Output phDevVarContext Handle to the created device ++ variable context ++ ++@Return PVRSRV_OK if the device variable context was successfully ++ created ++*/ ++/*****************************************************************************/ ++IMG_EXPORT PVRSRV_ERROR ++PVRSRVDevVarContextCreate(const PVRSRV_DEV_CONNECTION *psDevConnection, ++ PDEVVARCTX *phDevVarContext); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarContextDestroy ++ ++@Description Destroy a device variable context ++ ++@Input hDevVarContext Handle to the device variable ++ context to destroy ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarContextDestroy(PDEVVARCTX hDevVarContext); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarAlloc ++ ++@Description Allocate a new device variable on the specified device ++ variable context. The device variable's value is initialised ++ with the value passed in ui32InitialValue. ++ ++@Input hDevVarContext Handle to the device variable ++ context ++@Input ui32InitialValue Value to initially assign to the ++ new variable ++@Input pszDevVarName Name assigned to the device variable ++ (for debug purposes) ++ ++@Output ppsDevVar Created device variable ++ ++@Return PVRSRV_OK if the device variable was successfully created ++*/ ++/*****************************************************************************/ ++IMG_EXPORT PVRSRV_ERROR ++PVRSRVDevVarAllocI(PDEVVARCTX hDevVarContext, ++ PDEVVAR *ppsDevVar, ++ IMG_UINT32 ui32InitialValue, ++ const IMG_CHAR *pszDevVarName ++ PVR_DBG_FILELINE_PARAM); ++#define PVRSRVDevVarAlloc(hDevVarContext, ppsDevVar, ui32InitialValue, pszDevVarName) \ ++ PVRSRVDevVarAllocI( (hDevVarContext), (ppsDevVar), (ui32InitialValue), (pszDevVarName) \ ++ PVR_DBG_FILELINE ) ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarFree ++ ++@Description Free a device variable ++ ++@Input psDevVar The device variable to free ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarFree(PDEVVAR psDevVar); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarSet ++ ++@Description Set the device variable to a value ++ ++@Input psDevVar The device variable to set ++ ++@Input ui32Value Value to set it to ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarSet(PDEVVAR psDevVar, ++ IMG_UINT32 ui32Value); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarGet ++ ++@Description Get the current value of the device variable ++ ++@Input psDevVar The device variable to get the ++ value of ++ ++@Return Value of the variable ++*/ ++/*****************************************************************************/ ++IMG_EXPORT IMG_UINT32 ++PVRSRVDevVarGet(PDEVVAR psDevVar); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarGetFirmwareAddr ++ ++@Description Returns the address of the associated firmware value for a ++ specified device integer (not exposed to client) ++ ++@Input psDevVar The device variable to resolve ++ ++@Return The firmware address of the device variable ++*/ ++/*****************************************************************************/ ++IMG_EXPORT IMG_UINT32 ++PVRSRVDevVarGetFirmwareAddr(PDEVVAR psDevVar); ++ ++#if defined(PDUMP) ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarPDump ++ ++@Description PDump the current value of the device variable ++ ++@Input psDevVar The device variable to PDump ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarPDump(PDEVVAR psDevVar); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarPDumpPol ++ ++@Description Do a PDump poll of the device variable ++ ++@Input psDevVar The device variable to PDump ++ ++@Input ui32Value Value to Poll for ++ ++@Input ui32Mask PDump mask operator ++ ++@Input ui32PDumpFlags PDump flags ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVDevVarPDumpCBP ++ ++@Description Do a PDump CB poll using the device variable ++ ++@Input psDevVar The device variable to PDump ++ ++@Input uiWriteOffset Current write offset of buffer ++ ++@Input uiPacketSize Size of the packet to write into CB ++ ++@Input uiBufferSize Size of the CB ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_EXPORT void ++PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize); ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVDevVarPDump) ++#endif ++static INLINE void ++PVRSRVDevVarPDump(PDEVVAR psDevVar) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevVar); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVDevVarPDumpPol) ++#endif ++static INLINE void ++PVRSRVDevVarPDumpPol(PDEVVAR psDevVar, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevVar); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVDevVarPDumpCBP) ++#endif ++static INLINE void ++PVRSRVDevVarPDumpCBP(PDEVVAR psDevVar, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevVar); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++} ++#endif /* PDUMP */ ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* PVRSRV_DEVVAR_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_error.c b/drivers/gpu/drm/img-rogue/pvrsrv_error.c +new file mode 100644 +index 000000000000..5cd02a28d5a1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_error.c +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services error support ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "pvr_debug.h" ++ ++IMG_EXPORT ++const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError) ++{ ++ switch (eError) ++ { ++ case PVRSRV_OK: ++ return "PVRSRV_OK"; ++#define PVRE(x) \ ++ case x: \ ++ return #x; ++#include "pvrsrv_errors.h" ++#undef PVRE ++ default: ++ return "Unknown PVRSRV error number"; ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_error.h b/drivers/gpu/drm/img-rogue/pvrsrv_error.h +new file mode 100644 +index 000000000000..0bbf8431bedc +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_error.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File pvrsrv_error.h ++@Title services error enumerant ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines error codes used by any/all services modules ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(PVRSRV_ERROR_H) ++#define PVRSRV_ERROR_H ++ ++/*! ++ ***************************************************************************** ++ * Error values ++ *****************************************************************************/ ++typedef enum PVRSRV_ERROR_TAG ++{ ++ PVRSRV_OK, ++#define PVRE(x) x, ++#include "pvrsrv_errors.h" ++#undef PVRE ++ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff ++ ++} PVRSRV_ERROR; ++ ++#endif /* !defined(PVRSRV_ERROR_H) */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_errors.h b/drivers/gpu/drm/img-rogue/pvrsrv_errors.h +new file mode 100644 +index 000000000000..59b9cfe84cea +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_errors.h +@@ -0,0 +1,410 @@ ++/*************************************************************************/ /*! ++@File pvrsrv_errors.h ++@Title services error codes ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines error codes used by any/all services modules ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* Don't add include guards to this file! */ ++ ++PVRE(PVRSRV_ERROR_OUT_OF_MEMORY) ++PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS) ++PVRE(PVRSRV_ERROR_INVALID_PARAMS) ++PVRE(PVRSRV_ERROR_INIT_FAILURE) ++PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK) ++PVRE(PVRSRV_ERROR_INVALID_DEVICE) ++PVRE(PVRSRV_ERROR_NOT_OWNER) ++PVRE(PVRSRV_ERROR_BAD_MAPPING) ++PVRE(PVRSRV_ERROR_TIMEOUT) ++PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED) ++PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS) ++PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL) ++PVRE(PVRSRV_ERROR_SCENE_INVALID) ++PVRE(PVRSRV_ERROR_STREAM_ERROR) ++PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES) ++PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED) ++PVRE(PVRSRV_ERROR_CMD_TOO_BIG) ++PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED) ++PVRE(PVRSRV_ERROR_TOOMANYBUFFERS) ++PVRE(PVRSRV_ERROR_NOT_SUPPORTED) ++PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED) ++PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE) ++PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE) ++PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS) ++PVRE(PVRSRV_ERROR_RETRY) ++PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH) ++PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH) ++PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH) ++PVRE(PVRSRV_ERROR_BVNC_MISMATCH) ++PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH) ++PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG) ++PVRE(PVRSRV_ERROR_INVALID_FLAGS) ++PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS) ++PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY) ++PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR) ++PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED) ++PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED) ++PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED) ++PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR) ++PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG) ++PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE) ++PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE) ++PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE) ++PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP) ++PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP) ++PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY) ++PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED) ++PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED) ++PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES) ++PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY) ++PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES) ++PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE) ++PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED) ++PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED) ++PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR) ++PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR) ++PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE) ++PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS) ++PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE) ++PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE) ++PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH) ++PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK) ++PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING) ++PVRE(PVRSRV_ERROR_PMR_EMPTY) ++PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND) ++PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED) ++PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED) ++PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED) ++PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY) ++PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP) ++PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE) ++PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX) ++PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED) ++PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT) ++PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE) ++PVRE(PVRSRV_ERROR_DEVICEMEM_VA_ALLOC_FAILED) ++PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA) ++PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM) ++PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED) ++PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS) ++PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP) ++PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE) ++PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND) ++PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT) ++PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND) ++PVRE(PVRSRV_ERROR_PCI_CALL_FAILED) ++PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL) ++PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE) ++PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH) ++PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY) ++PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC) ++PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL) ++PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY) ++PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES) ++PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES) ++PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES) ++PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES) ++PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES) ++PVRE(PVRSRV_ERROR_STILL_MAPPED) ++PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND) ++PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT) ++PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE) ++PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK) ++PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA) ++PVRE(PVRSRV_ERROR_INVALID_DEVINFO) ++PVRE(PVRSRV_ERROR_INVALID_MEMINFO) ++PVRE(PVRSRV_ERROR_INVALID_MISCINFO) ++PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL) ++PVRE(PVRSRV_ERROR_INVALID_CONTEXT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT) ++PVRE(PVRSRV_ERROR_INVALID_HEAP) ++PVRE(PVRSRV_ERROR_INVALID_KERNELINFO) ++PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE) ++PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE) ++PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE) ++PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR) ++PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR) ++PVRE(PVRSRV_ERROR_INVALID_HEAPINFO) ++PVRE(PVRSRV_ERROR_INVALID_PERPROC) ++PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO) ++PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST) ++PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST) ++PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP) ++PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE) ++PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS) ++PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD) ++PVRE(PVRSRV_ERROR_THREAD_READ_ERROR) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR) ++PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR) ++PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED) ++PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE) ++PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND) ++PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL) ++PVRE(PVRSRV_ERROR_FLIP_FAILED) ++PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED) ++PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE) ++PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB) ++PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED) ++PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG) ++PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG) ++PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID) ++PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED) ++PVRE(PVRSRV_ERROR_SUBMIT_NEEDED) ++PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE) ++PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL) ++PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW) ++PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE) ++PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES) ++PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED) ++PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR) ++PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND) ++PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED) ++PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND) ++PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED) ++PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED) ++PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED) ++PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE) ++PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND) ++PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE) ++PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH) ++PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE) ++PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND) ++PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND) ++PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND) ++PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER) ++PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT) ++PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE) ++PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED) ++PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE) ++PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE) ++PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND) ++PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE) ++PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE) ++PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE) ++PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP) ++PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE) ++PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE) ++PVRE(PVRSRV_ERROR_INVALID_DEVICEID) ++PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND) ++PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED) ++PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED) ++PVRE(PVRSRV_ERROR_COPY_TEST_FAILED) ++PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK) ++PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED) ++PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK) ++PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK) ++PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR) ++PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE) ++PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE) ++PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) ++PVRE(PVRSRV_ERROR_BAD_SYNC_STATE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE) ++PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID) ++PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION) ++PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION) ++PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE) ++PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID) ++PVRE(PVRSRV_ERROR_PHYSHEAP_CONFIG) ++PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG) ++PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM) ++PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP) ++PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT) ++PVRE(PVRSRV_ERROR_BP_NOT_SET) ++PVRE(PVRSRV_ERROR_BP_ALREADY_SET) ++PVRE(PVRSRV_ERROR_FEATURE_DISABLED) ++PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED) ++PVRE(PVRSRV_ERROR_REG_CONFIG_FULL) ++PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE) ++PVRE(PVRSRV_ERROR_MEMORY_ACCESS) ++PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER) ++PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG) ++PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT) ++PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT) ++PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS) ++PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM) ++PVRE(PVRSRV_ERROR_DC_INVALID_SCALE) ++PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM) ++PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES) ++PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA) ++PVRE(PVRSRV_ERROR_NOT_READY) ++PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS) ++PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER) ++PVRE(PVRSRV_ERROR_NOT_FOUND) ++PVRE(PVRSRV_ERROR_ALREADY_OPEN) ++PVRE(PVRSRV_ERROR_STREAM_MISUSE) ++PVRE(PVRSRV_ERROR_STREAM_FULL) ++PVRE(PVRSRV_ERROR_STREAM_READLIMIT_REACHED) ++PVRE(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE) ++PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED) ++PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX) ++PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN) ++PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG) ++PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED) ++PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED) ++PVRE(PVRSRV_ERROR_REQUEST_TDFWMEM_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_INIT_TDFWMEM_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL) ++PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED) ++PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED) ++PVRE(PVRSRV_ERROR_ALREADY_EXISTS) ++PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE) ++PVRE(PVRSRV_ERROR_TASK_FAILED) ++PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED) ++PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR) ++PVRE(PVRSRV_ERROR_INVALID_OFFSET) ++PVRE(PVRSRV_ERROR_CCCB_STALLED) ++PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE) ++PVRE(PVRSRV_ERROR_NOT_ENABLED) ++PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL) ++PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH) ++PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED) ++PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL) ++PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX) ++PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT) ++PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED) ++PVRE(PVRSRV_ERROR_UNABLE_TO_COMPILE_PDS) ++PVRE(PVRSRV_ERROR_INTERNAL_ERROR) ++PVRE(PVRSRV_ERROR_BRIDGE_EFAULT) ++PVRE(PVRSRV_ERROR_BRIDGE_EINVAL) ++PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM) ++PVRE(PVRSRV_ERROR_BRIDGE_ERANGE) ++PVRE(PVRSRV_ERROR_BRIDGE_EPERM) ++PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY) ++PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) ++PVRE(PVRSRV_ERROR_PROBE_DEFER) ++PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT) ++PVRE(PVRSRV_ERROR_CLOSE_FAILED) ++PVRE(PVRSRV_ERROR_NOT_INITIALISED) ++PVRE(PVRSRV_ERROR_CONVERSION_FAILED) ++PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL) ++PVRE(PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL) ++PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED) ++PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED) ++PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED) ++PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED) ++PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS) ++PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE) ++PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT) ++PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED) ++PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED) ++PVRE(PVRSRV_ERROR_SIGNAL_FAILED) ++PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM) ++PVRE(PVRSRV_ERROR_INVALID_SPU_MASK) ++PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED) ++PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG) ++PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED) ++PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE) ++PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT) ++PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID) ++PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE) ++PVRE(PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG) ++PVRE(PVRSRV_ERROR_INTERRUPTED) ++PVRE(PVRSRV_ERROR_PWLOCK_RELEASED_REACQ_FAILED) ++PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN) ++PVRE(PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF) ++PVRE(PVRSRV_ERROR_MULTIPLE_SECURITY_PDUMPS) ++PVRE(PVRSRV_ERROR_BAD_PARAM_SIZE) ++PVRE(PVRSRV_ERROR_INVALID_REQUEST) ++PVRE(PVRSRV_ERROR_FAILED_TO_ACQUIRE_PAGES) ++PVRE(PVRSRV_ERROR_TEST_FAILED) ++PVRE(PVRSRV_ERROR_SYNC_PRIM_OP_NOT_SUPPORTED) ++PVRE(PVRSRV_ERROR_FAILED_TO_GET_VIRT_ADDR) ++PVRE(PVRSRV_ERROR_UNABLE_TO_FREE_RESOURCE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_SEMAPHORE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_SEMAPHORE) ++PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_SEMAPHORE) ++PVRE(PVRSRV_ERROR_TOO_MANY_SYNCS) ++PVRE(PVRSRV_ERROR_ION_NO_CLIENT) ++PVRE(PVRSRV_ERROR_ION_FAILED_TO_ALLOC) ++PVRE(PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE) ++PVRE(PVRSRV_ERROR_REFCOUNT_OVERFLOW) ++PVRE(PVRSRV_ERROR_OUT_OF_RANGE) +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h b/drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h +new file mode 100644 +index 000000000000..14a196d2767c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_firmware_boot.h +@@ -0,0 +1,87 @@ ++/**************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef PVRSRV_FIRMWARE_BOOT_H ++#define PVRSRV_FIRMWARE_BOOT_H ++ ++#include "img_types.h" ++#include "rgx_fwif_shared.h" ++ ++#define TD_MAX_NUM_MIPS_PAGETABLE_PAGES (4U) ++ ++typedef union _PVRSRV_FW_BOOT_PARAMS_ ++{ ++ struct ++ { ++ IMG_DEV_VIRTADDR sFWCodeDevVAddr; ++ IMG_DEV_VIRTADDR sFWDataDevVAddr; ++ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; ++ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; ++ IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; ++ IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; ++ RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; ++ IMG_UINT32 ui32NumThreads; ++ } sMeta; ++ ++ struct ++ { ++ IMG_DEV_PHYADDR sGPURegAddr; ++ IMG_DEV_PHYADDR asFWPageTableAddr[TD_MAX_NUM_MIPS_PAGETABLE_PAGES]; ++ IMG_DEV_PHYADDR sFWStackAddr; ++ IMG_UINT32 ui32FWPageTableLog2PageSize; ++ IMG_UINT32 ui32FWPageTableNumPages; ++ } sMips; ++ ++ struct ++ { ++ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddr; ++ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; ++ IMG_DEVMEM_SIZE_T uiFWCorememCodeSize; ++ ++ IMG_DEV_VIRTADDR sFWCorememDataDevVAddr; ++ RGXFWIF_DEV_VIRTADDR sFWCorememDataFWAddr; ++ IMG_DEVMEM_SIZE_T uiFWCorememDataSize; ++ } sRISCV; ++ ++} PVRSRV_FW_BOOT_PARAMS; ++ ++ ++#endif /* PVRSRV_FIRMWARE_BOOT_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h b/drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h +new file mode 100644 +index 000000000000..1072ba857c9a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_memalloc_physheap.h +@@ -0,0 +1,170 @@ ++/*************************************************************************/ /*! ++@File pvrsrv_memalloc_physheap.h ++@Title Services Phys Heap types ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Used in creating and allocating from Physical Heaps. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVRSRV_MEMALLOC_PHYSHEAP_H ++#define PVRSRV_MEMALLOC_PHYSHEAP_H ++ ++#include "img_defs.h" ++ ++/* ++ * These IDs are replicated in the Device Memory allocation flags to allow ++ * allocations to be made in terms of their locality/use to ensure the correct ++ * physical heap is accessed for the given system/platform configuration. ++ * A system Phys Heap Config is linked to one or more Phys Heaps. When a heap ++ * is not present in the system configuration the allocation will fallback to ++ * the default GPU_LOCAL physical heap which all systems must define. ++ * See PVRSRV_MEMALLOCFLAGS_*_MAPPABLE_MASK. ++ * ++ * NOTE: Enum order important, table in physheap.c must change if order changed. ++ */ ++typedef IMG_UINT32 PVRSRV_PHYS_HEAP; ++/* Services client accessible heaps */ ++#define PVRSRV_PHYS_HEAP_DEFAULT 0U /* default phys heap for device memory allocations */ ++#define PVRSRV_PHYS_HEAP_GPU_LOCAL 1U /* used for buffers with more GPU access than CPU */ ++#define PVRSRV_PHYS_HEAP_CPU_LOCAL 2U /* used for buffers with more CPU access than GPU */ ++#define PVRSRV_PHYS_HEAP_GPU_PRIVATE 3U /* used for buffers that only required GPU read/write access, not visible to the CPU. */ ++ ++#define HEAPSTR(x) #x ++static inline const IMG_CHAR *PVRSRVGetClientPhysHeapName(PVRSRV_PHYS_HEAP ePhysHeapID) ++{ ++ switch (ePhysHeapID) ++ { ++ case PVRSRV_PHYS_HEAP_DEFAULT: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_DEFAULT); ++ case PVRSRV_PHYS_HEAP_GPU_LOCAL: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_LOCAL); ++ case PVRSRV_PHYS_HEAP_CPU_LOCAL: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_CPU_LOCAL); ++ case PVRSRV_PHYS_HEAP_GPU_PRIVATE: ++ return HEAPSTR(PVRSRV_PHYS_HEAP_GPU_PRIVATE); ++ default: ++ return "Unknown Heap"; ++ } ++} ++ ++/* Services internal heaps */ ++#define PVRSRV_PHYS_HEAP_FW_MAIN 4U /* runtime data, e.g. CCBs, sync objects */ ++#define PVRSRV_PHYS_HEAP_EXTERNAL 5U /* used by some PMR import/export factories where the physical memory heap is not managed by the pvrsrv driver */ ++#define PVRSRV_PHYS_HEAP_GPU_COHERENT 6U /* used for a cache coherent region */ ++#define PVRSRV_PHYS_HEAP_GPU_SECURE 7U /* used by security validation */ ++#define PVRSRV_PHYS_HEAP_FW_CONFIG 8U /* subheap of FW_MAIN, configuration data for FW init */ ++#define PVRSRV_PHYS_HEAP_FW_CODE 9U /* used by security validation or dedicated fw */ ++#define PVRSRV_PHYS_HEAP_FW_PRIV_DATA 10U /* internal FW data (like the stack, FW control data structures, etc.) */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP0 11U /* Host OS premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP1 12U /* Guest OS 1 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP2 13U /* Guest OS 2 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP3 14U /* Guest OS 3 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP4 15U /* Guest OS 4 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP5 16U /* Guest OS 5 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP6 17U /* Guest OS 6 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_FW_PREMAP7 18U /* Guest OS 7 premap fw heap */ ++#define PVRSRV_PHYS_HEAP_LAST 19U ++ ++ ++static_assert(PVRSRV_PHYS_HEAP_LAST <= (0x1FU + 1U), "Ensure enum fits in memalloc flags bitfield."); ++ ++/*! Type conveys the class of physical heap to instantiate within Services ++ * for the physical pool of memory. */ ++typedef enum _PHYS_HEAP_TYPE_ ++{ ++ PHYS_HEAP_TYPE_UNKNOWN = 0, /*!< Not a valid value for any config */ ++ PHYS_HEAP_TYPE_UMA, /*!< Heap represents OS managed physical memory heap ++ i.e. system RAM. Unified Memory Architecture ++ physmem_osmem PMR factory */ ++ PHYS_HEAP_TYPE_LMA, /*!< Heap represents physical memory pool managed by ++ Services i.e. carve out from system RAM or local ++ card memory. Local Memory Architecture ++ physmem_lma PMR factory */ ++#if defined(__KERNEL__) ++ PHYS_HEAP_TYPE_DMA, /*!< Heap represents a physical memory pool managed by ++ Services, alias of LMA and is only used on ++ VZ non-native system configurations for ++ a heap used for PHYS_HEAP_USAGE_FW_MAIN tagged ++ buffers */ ++#if defined(SUPPORT_WRAP_EXTMEMOBJECT) ++ PHYS_HEAP_TYPE_WRAP, /*!< Heap used to group UM buffers given ++ to Services. Integrity OS port only. */ ++#endif ++#endif ++} PHYS_HEAP_TYPE; ++ ++/* Defines used when interpreting the ui32PhysHeapFlags in PHYS_HEAP_MEM_STATS ++ 0x000000000000dttt ++ d = is this the default heap? (1=yes, 0=no) ++ ttt = heap type (000 = PHYS_HEAP_TYPE_UNKNOWN, ++ 001 = PHYS_HEAP_TYPE_UMA, ++ 010 = PHYS_HEAP_TYPE_LMA, ++ 011 = PHYS_HEAP_TYPE_DMA) ++*/ ++#define PVRSRV_PHYS_HEAP_FLAGS_TYPE_MASK (0x7U << 0) ++#define PVRSRV_PHYS_HEAP_FLAGS_IS_DEFAULT (0x1U << 7) ++ ++typedef struct PHYS_HEAP_MEM_STATS_TAG ++{ ++ IMG_UINT64 ui64TotalSize; ++ IMG_UINT64 ui64FreeSize; ++ IMG_UINT32 ui32PhysHeapFlags; ++}PHYS_HEAP_MEM_STATS, *PHYS_HEAP_MEM_STATS_PTR; ++ ++typedef struct PHYS_HEAP_MEM_STATS_PKD_TAG ++{ ++ IMG_UINT64 ui64TotalSize; ++ IMG_UINT64 ui64FreeSize; ++ IMG_UINT32 ui32PhysHeapFlags; ++ IMG_UINT32 ui32Dummy; ++}PHYS_HEAP_MEM_STATS_PKD, *PHYS_HEAP_MEM_STATS_PKD_PTR; ++ ++static inline const IMG_CHAR *PVRSRVGetClientPhysHeapTypeName(PHYS_HEAP_TYPE ePhysHeapType) ++{ ++ switch (ePhysHeapType) ++ { ++ case PHYS_HEAP_TYPE_UMA: ++ return HEAPSTR(PHYS_HEAP_TYPE_UMA); ++ case PHYS_HEAP_TYPE_LMA: ++ return HEAPSTR(PHYS_HEAP_TYPE_LMA); ++ default: ++ return "Unknown Heap Type"; ++ } ++} ++#undef HEAPSTR ++ ++#endif /* PVRSRV_MEMALLOC_PHYSHEAP_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h +new file mode 100644 +index 000000000000..3b87dbf498d5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags.h +@@ -0,0 +1,969 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This file defines flags used on memory allocations and mappings ++ These flags are relevant throughout the memory management ++ software stack and are specified by users of services and ++ understood by all levels of the memory management in both ++ client and server. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_MEMALLOCFLAGS_H ++#define PVRSRV_MEMALLOCFLAGS_H ++ ++#include "img_types.h" ++#include "pvrsrv_memalloc_physheap.h" ++ ++/*! ++ Type for specifying memory allocation flags. ++ */ ++ ++typedef IMG_UINT64 PVRSRV_MEMALLOCFLAGS_T; ++#define PVRSRV_MEMALLOCFLAGS_FMTSPEC IMG_UINT64_FMTSPECx ++ ++#if defined(__KERNEL__) ++#include "pvrsrv_memallocflags_internal.h" ++#endif /* __KERNEL__ */ ++ ++/* ++ * --- MAPPING FLAGS 0..14 (15-bits) --- ++ * | 0-3 | 4-7 | 8-10 | 11-13 | 14 | ++ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable | ++ * ++ * --- MISC FLAGS 15..23 (9-bits) --- ++ * | 15 | 17 | 18 | 19 | 20 | ++ * | Defer | SVM | Sparse-Dummy-Page | CPU-Cache-Clean | Sparse-Zero-Page | ++ * ++ * --- DEV CONTROL FLAGS 26..27 (2-bits) --- ++ * | 26-27 | ++ * | Device-Flags | ++ * ++ * --- MISC FLAGS 28..31 (4-bits) --- ++ * | 28 | 29 | 30 | 31 | ++ * | No-Cache-Align | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc | ++ * ++ * --- VALIDATION FLAGS --- ++ * | 35 | ++ * | Shared-buffer | ++ * ++ * --- PHYS HEAP HINTS --- ++ * | 59-63 | ++ * | PhysHeap Hints | ++ * ++ */ ++ ++/* ++ * ********************************************************** ++ * * * ++ * * MAPPING FLAGS * ++ * * * ++ * ********************************************************** ++ */ ++ ++/*! ++ * This flag affects the device MMU protection flags, and specifies ++ * that the memory may be read by the GPU. ++ * ++ * Typically all device memory allocations would specify this flag. ++ * ++ * At the moment, memory allocations without this flag are not supported ++ * ++ * This flag will live with the PMR, thus subsequent mappings would ++ * honour this flag. ++ * ++ * This is a dual purpose flag. It specifies that memory is permitted ++ * to be read by the GPU, and also requests that the allocation is ++ * mapped into the GPU as a readable mapping ++ * ++ * To be clear: ++ * - When used as an argument on PMR creation; it specifies ++ * that GPU readable mappings will be _permitted_ ++ * - When used as an argument to a "map" function: it specifies ++ * that a GPU readable mapping is _desired_ ++ * - When used as an argument to "AllocDeviceMem": it specifies ++ * that the PMR will be created with permission to be mapped ++ * with a GPU readable mapping, _and_ that this PMR will be ++ * mapped with a GPU readable mapping. ++ * This distinction becomes important when (a) we export allocations; ++ * and (b) when we separate the creation of the PMR from the mapping. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1ULL<<0) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0U) ++ ++/*! ++ * This flag affects the device MMU protection flags, and specifies ++ * that the memory may be written by the GPU ++ * ++ * Using this flag on an allocation signifies that the allocation is ++ * intended to be written by the GPU. ++ * ++ * Omitting this flag causes a read-only mapping. ++ * ++ * This flag will live with the PMR, thus subsequent mappings would ++ * honour this flag. ++ * ++ * This is a dual purpose flag. It specifies that memory is permitted ++ * to be written by the GPU, and also requests that the allocation is ++ * mapped into the GPU as a writable mapping (see note above about ++ * permission vs. mapping mode, and why this flag causes permissions ++ * to be inferred from mapping mode on first allocation) ++ * ++ * N.B. This flag has no relevance to the CPU's MMU mapping, if any, ++ * and would therefore not enforce read-only mapping on CPU. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1ULL<<1) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as GPU readable in another GPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1ULL<<2) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as GPU writable in another GPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1ULL<<3) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0U) ++ ++/*! ++ The flag indicates that an allocation is mapped as readable to the CPU. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1ULL<<4) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0U) ++ ++/*! ++ The flag indicates that an allocation is mapped as writable to the CPU. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1ULL<<5) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as CPU readable in another CPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1ULL<<6) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0U) ++ ++/*! ++ The flag indicates whether an allocation can be mapped as CPU writable in another CPU memory context. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1ULL<<7) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0U) ++ ++ ++/* ++ * ********************************************************** ++ * * * ++ * * CACHE CONTROL FLAGS * ++ * * * ++ * ********************************************************** ++ */ ++ ++/* ++ GPU domain ++ ========== ++ ++ The following defines are used to control the GPU cache bit field. ++ The defines are mutually exclusive. ++ ++ A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU ++ cache bit field from the flags. This should be used whenever the GPU cache ++ mode needs to be determined. ++*/ ++ ++/*! ++ GPU domain. Flag indicating uncached memory. This means that any writes to memory ++ allocated with this flag are written straight to memory and thus are ++ coherent for any device in the system. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (1ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED) ++ ++/*! ++ GPU domain. Use write combiner (if supported) to combine sequential writes ++ together to reduce memory access by doing burst writes. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC (0ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC) ++ ++/*! ++ GPU domain. This flag affects the GPU MMU protection flags. ++ The allocation will be cached. ++ Services will try to set the coherent bit in the GPU MMU tables so the ++ GPU cache is snooping the CPU cache. If coherency is not supported the ++ caller is responsible to ensure the caches are up to date. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) ++ ++/*! ++ GPU domain. Request cached memory, but not coherent (i.e. no cache ++ snooping). Services will flush the GPU internal caches after every GPU ++ task so no cache maintenance requests from the users are necessary. ++ ++ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future ++ expansion. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT) ++ ++/*! ++ GPU domain. This flag is for internal use only and is used to indicate ++ that the underlying allocation should be cached on the GPU after all ++ the snooping and coherent checks have been done ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7ULL<<8) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_GPU_CACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED) ++ ++/*! ++ GPU domain. GPU cache mode mask. ++*/ ++#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7ULL<<8) ++ ++/*! ++ @Description A helper macro to obtain just the GPU cache bit field from the flags. ++ This should be used whenever the GPU cache mode needs to be determined. ++ @Input uiFlags Allocation flags. ++ @Return Value of the GPU cache bit field. ++ */ ++#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) ++ ++ ++/* ++ CPU domain ++ ========== ++ ++ The following defines are used to control the CPU cache bit field. ++ The defines are mutually exclusive. ++ ++ A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU ++ cache bit field from the flags. This should be used whenever the CPU cache ++ mode needs to be determined. ++*/ ++ ++/*! ++ CPU domain. Use write combiner (if supported) to combine sequential writes ++ together to reduce memory access by doing burst writes. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC (0ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) ++ ++/*! ++ CPU domain. This flag affects the CPU MMU protection flags. ++ The allocation will be cached. ++ Services will try to set the coherent bit in the CPU MMU tables so the ++ CPU cache is snooping the GPU cache. If coherency is not supported the ++ caller is responsible to ensure the caches are up to date. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ++ ++/*! ++ CPU domain. Request cached memory, but not coherent (i.e. no cache ++ snooping). This means that if the allocation needs to transition from ++ one device to another services has to be informed so it can ++ flush/invalidate the appropriate caches. ++ ++ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future ++ expansion. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) ++ ++/*! ++ CPU domain. This flag is for internal use only and is used to indicate ++ that the underlying allocation should be cached on the CPU ++ after all the snooping and coherent checks have been done ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED) ++ ++/*! ++ CPU domain. CPU cache mode mask ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7ULL<<11) ++ ++/*! ++ @Description A helper macro to obtain just the CPU cache bit field from the flags. ++ This should be used whenever the CPU cache mode needs to be determined. ++ @Input uiFlags Allocation flags. ++ @Return Value of the CPU cache bit field. ++ */ ++#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) ++ ++/* Helper flags for usual cases */ ++ ++/*! ++ * Memory will be write-combined on CPU and GPU ++ */ ++#define PVRSRV_MEMALLOCFLAG_UNCACHED_WC (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED_WC | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED_WC mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED_WC) ++ ++/*! ++ * Memory will be cached. ++ * Services will try to set the correct flags in the MMU tables. ++ * In case there is no coherency support the caller has to ensure caches are up to date */ ++#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_COHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT) ++ ++/*! ++ * Memory will be cache-incoherent on CPU and GPU ++ */ ++#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT) ++ ++/*! ++ Cache mode mask ++*/ ++#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags)) ++ ++ ++/*! ++ CPU MMU Flags mask -- intended for use internal to services only ++ */ ++#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK) ++ ++/*! ++ MMU Flags mask -- intended for use internal to services only - used for ++ partitioning the flags bits and determining which flags to pass down to ++ mmu_common.c ++ */ ++#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK) ++ ++/*! ++ Indicates that the PMR created due to this allocation will support ++ in-kernel CPU mappings. Only privileged processes may use this flag as ++ it may cause wastage of precious kernel virtual memory on some platforms. ++ */ ++#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1ULL<<14) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0U) ++ ++ ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * ALLOC MEMORY FLAGS * ++ * * * ++ * ********************************************************** ++ * ++ * (Bits 15) ++ * ++ */ ++#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1ULL<<15) ++#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0U) ++ ++/*! ++ Indicates that the allocation will be accessed by the CPU and GPU using ++ the same virtual address, i.e. for all SVM allocs, ++ IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR ++ */ ++#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1ULL<<17) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SVM_ALLOC flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0U) ++ ++/*! ++ Indicates the particular memory that's being allocated is sparse and the ++ sparse regions should not be backed by dummy page ++ */ ++#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1ULL << 18) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0U) ++ ++/*! ++ Used to force Services to carry out at least one CPU cache invalidate on a ++ CPU cached buffer during allocation of the memory. Applicable to incoherent ++ systems, it must be used for buffers which are CPU cached and which will not ++ be 100% written to by the CPU before the GPU accesses it. For performance ++ reasons, avoid usage if the whole buffer that is allocated is written to by ++ the CPU anyway before the next GPU kick, or if the system is coherent. ++ */ ++#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1ULL<<19) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0U) ++ ++/*! PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING ++ ++ Indicates the particular memory that's being allocated is sparse and the ++ sparse regions should be backed by zero page. This is different with ++ zero on alloc flag such that only physically unbacked pages are backed ++ by zero page at the time of mapping. ++ The zero backed page is always with read only attribute irrespective of its ++ original attributes. ++ */ ++#define PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING (1ULL << 20) ++#define PVRSRV_IS_SPARSE_ZERO_BACKING_REQUIRED(uiFlags) (((uiFlags) & \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) == PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING) ++ ++/*! ++ @Description Macro extracting the OS id from a variable containing memalloc flags ++ @Input uiFlags Allocation flags ++ @Return returns the value of the FW_ALLOC_OSID bitfield ++ */ ++#define PVRSRV_FW_RAW_ALLOC_OSID(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ ++ >> PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) ++ ++/*! ++ @Description Macro converting an OS id value into a memalloc bitfield ++ @Input uiFlags OS id ++ @Return returns a shifted bitfield with the OS id value ++ */ ++#define PVRSRV_MEMALLOCFLAG_FW_RAW_ALLOC_OSID(osid) (((osid) << PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_SHIFT) \ ++ & PVRSRV_MEMALLOCFLAG_FW_ALLOC_OSID_MASK) \ ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * MEMORY ZEROING AND POISONING FLAGS * ++ * * * ++ * ********************************************************** ++ * ++ * Zero / Poison, on alloc/free ++ * ++ * We think the following usecases are required: ++ * ++ * don't poison or zero on alloc or free ++ * (normal operation, also most efficient) ++ * poison on alloc ++ * (for helping to highlight bugs) ++ * poison on alloc and free ++ * (for helping to highlight bugs) ++ * zero on alloc ++ * (avoid highlighting security issues in other uses of memory) ++ * zero on alloc and poison on free ++ * (avoid highlighting security issues in other uses of memory, while ++ * helping to highlight a subset of bugs e.g. memory freed prematurely) ++ * ++ * Since there are more than 4, we can't encode this in just two bits, ++ * so we might as well have a separate flag for each of the three ++ * actions. ++ */ ++ ++/*! ++ Ensures that the memory allocated is initialised with zeroes. ++ */ ++#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1ULL<<31) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0U) ++ ++/*! ++ Scribbles over the allocated memory with a poison value ++ ++ Not compatible with ZERO_ON_ALLOC ++ ++ Poisoning is very deliberately _not_ reflected in PDump as we want ++ a simulation to cry loudly if the initialised data propagates to a ++ result. ++ */ ++#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1ULL<<30) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0U) ++ ++#if defined(DEBUG) || defined(SERVICES_SC) ++/*! ++ Causes memory to be trashed when freed, used when debugging only, not to be used ++ as a security measure. ++ */ ++#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1ULL<<29) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_POISON_ON_FREE flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0U) ++#endif /* DEBUG */ ++ ++/*! ++ Avoid address alignment to a CPU or GPU cache line size. ++ */ ++#define PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN (1ULL<<28) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_CHECK_NO_CACHE_LINE_ALIGN flag is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the flag is set, false otherwise ++ */ ++#define PVRSRV_CHECK_NO_CACHE_LINE_ALIGN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_CACHE_LINE_ALIGN) != 0U) ++ ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * Device specific MMU flags * ++ * * * ++ * ********************************************************** ++ * ++ * (Bits 26 to 27) ++ * ++ * Some services controlled devices have device specific control bits in ++ * their page table entries, we need to allow these flags to be passed down ++ * the memory management layers so the user can control these bits. ++ * For example, RGX device has the file rgx_memallocflags.h ++ */ ++ ++/*! ++ * Offset of device specific MMU flags. ++ */ ++#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 26 ++ ++/*! ++ * Mask for retrieving device specific MMU flags. ++ */ ++#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK (0x3ULL << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) ++ ++/*! ++ @Description Helper macro for setting device specific MMU flags. ++ @Input n Flag index. ++ @Return Flag vector with the specified bit set. ++ */ ++#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \ ++ (((PVRSRV_MEMALLOCFLAGS_T)(n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \ ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK) ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * Secure validation flags * ++ * * * ++ * ********************************************************** ++ * ++ * (Bit 35) ++ * ++ */ ++ ++/*! ++ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER ++ */ ++ ++#define PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER (1ULL<<35) ++#define PVRSRV_CHECK_SHARED_BUFFER(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER) != 0U) ++ ++/* ++ * ++ * ********************************************************** ++ * * * ++ * * Phys Heap Hints * ++ * * * ++ * ********************************************************** ++ * ++ * (Bits 59 to 63) ++ * ++ */ ++ ++/*! ++ * Value of enum PVRSRV_PHYS_HEAP stored in memalloc flags. If not set ++ * i.e. PVRSRV_PHYS_HEAP_DEFAULT (value 0) used, the system layer defined default physical heap is used. ++ */ ++#define PVRSRV_PHYS_HEAP_HINT_SHIFT (59) ++#define PVRSRV_PHYS_HEAP_HINT_MASK (0x1FULL << PVRSRV_PHYS_HEAP_HINT_SHIFT) ++ ++ ++/*! ++ @Description Macro extracting the Phys Heap hint from memalloc flag value. ++ @Input uiFlags Allocation flags ++ @Return returns the value of the PHYS_HEAP_HINT bitfield ++ */ ++#define PVRSRV_GET_PHYS_HEAP_HINT(uiFlags) (((uiFlags) & PVRSRV_PHYS_HEAP_HINT_MASK) \ ++ >> PVRSRV_PHYS_HEAP_HINT_SHIFT) ++ ++/*! ++ @Description Macro converting a Phys Heap value into a memalloc bitfield ++ @Input uiFlags Device Phys Heap ++ @Return returns a shifted bitfield with the Device Phys Heap value ++ */ ++#define PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) ((((PVRSRV_MEMALLOCFLAGS_T)PVRSRV_PHYS_HEAP_ ## PhysHeap) << \ ++ PVRSRV_PHYS_HEAP_HINT_SHIFT) \ ++ & PVRSRV_PHYS_HEAP_HINT_MASK) ++/*! ++ @Description Macro to replace an existing phys heap hint value in flags. ++ @Input PhysHeap Phys Heap Macro ++ @Input uiFlags Allocation flags ++ @Return N/A ++ */ ++#define PVRSRV_SET_PHYS_HEAP_HINT(PhysHeap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(PhysHeap) ++ ++/*! ++ @Description Macro to replace an existing phys heap hint value using Phys Heap value. ++ @Input PhysHeap Phys Heap Value ++ @Input uiFlags Allocation flags ++ @Return N/A ++ */ ++#define PVRSRV_CHANGE_PHYS_HEAP_HINT(Physheap, uiFlags) (uiFlags) = ((uiFlags) & ~PVRSRV_PHYS_HEAP_HINT_MASK) | \ ++ (((PVRSRV_MEMALLOCFLAGS_T)(Physheap) << \ ++ PVRSRV_PHYS_HEAP_HINT_SHIFT) \ ++ & PVRSRV_PHYS_HEAP_HINT_MASK) ++ ++/*! ++ @Description Macros checking if a Phys Heap hint is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the hint is set, false otherwise ++ */ ++#define PVRSRV_CHECK_PHYS_HEAP(PhysHeap, uiFlags) (PVRSRV_PHYS_HEAP_ ## PhysHeap == PVRSRV_GET_PHYS_HEAP_HINT(uiFlags)) ++ ++#define PVRSRV_CHECK_FW_MAIN(uiFlags) (PVRSRV_CHECK_PHYS_HEAP(FW_MAIN, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_CONFIG, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_CODE, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PRIV_DATA, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP0, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP1, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP2, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP3, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP4, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP5, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP6, uiFlags) || \ ++ PVRSRV_CHECK_PHYS_HEAP(FW_PREMAP7, uiFlags)) ++ ++/*! ++ * Secure buffer mask -- Flags in the mask are allowed for secure buffers ++ * because they are not related to CPU mappings. ++ */ ++#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) ++ ++/*! ++ * Trusted device mask -- Flags in the mask are allowed for trusted device ++ * because the driver cannot access the memory ++ */ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) ++#else ++#define PVRSRV_MEMALLOCFLAGS_TDFWMASK ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) ++#endif ++ ++/*! ++ PMR flags mask -- for internal services use only. This is the set of flags ++ that will be passed down and stored with the PMR, this also includes the ++ MMU flags which the PMR has to pass down to mm_common.c at PMRMap time. ++*/ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ ++ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#else ++#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#endif ++ ++/*! ++ * CPU mappable mask -- Any flag set in the mask requires memory to be CPU mappable ++ */ ++#define PVRSRV_MEMALLOCFLAGS_CPU_MAPPABLE_MASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) ++/*! ++ RA differentiation mask ++ ++ for use internal to services ++ ++ this is the set of flags bits that are able to determine whether a pair of ++ allocations are permitted to live in the same page table. Allocations ++ whose flags differ in any of these places would be allocated from separate ++ RA Imports and therefore would never coexist in the same page. ++ Special cases are zeroing and poisoning of memory. The caller is responsible ++ to set the sub-allocations to the value he wants it to be. To differentiate ++ between zeroed and poisoned RA Imports does not make sense because the ++ memory might be reused. ++ ++*/ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ ++ & \ ++ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)) ++#else ++#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \ ++ & \ ++ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)) ++#endif ++/*! ++ Flags that affect _allocation_ ++*/ ++#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU) ++ ++/*! ++ Flags that affect _mapping_ ++*/ ++#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \ ++ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_ZERO_BACKING | \ ++ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) ++ ++#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0U) ++#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK ++#endif ++ ++ ++/*! ++ Flags that affect _physical allocations_ in the DevMemX API ++ */ ++#if defined(DEBUG) || defined(SERVICES_SC) ++#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#else ++#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \ ++ PVRSRV_PHYS_HEAP_HINT_MASK) ++#endif ++ ++/*! ++ Flags that affect _virtual allocations_ in the DevMemX API ++ */ ++#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) ++ ++#endif /* PVRSRV_MEMALLOCFLAGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h +new file mode 100644 +index 000000000000..4fee3d49b927 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_memallocflags_internal.h +@@ -0,0 +1,78 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device Memory Management allocation flags for internal Services ++ use only ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This file defines flags used on memory allocations and mappings ++ These flags are relevant throughout the memory management ++ software stack and are specified by users of services and ++ understood by all levels of the memory management in the server ++ and in special cases in the client. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_MEMALLOCFLAGS_INTERNAL_H ++#define PVRSRV_MEMALLOCFLAGS_INTERNAL_H ++ ++/*! ++ CPU domain. Request uncached memory. This means that any writes to memory ++ allocated with this flag are written straight to memory and thus are ++ coherent for any device in the system. ++*/ ++#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (1ULL<<11) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_CPU_UNCACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) ++ ++/*! ++ * Memory will be uncached on CPU and GPU ++ */ ++#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED) ++ ++/*! ++ @Description Macro checking whether the PVRSRV_MEMALLOCFLAG_UNCACHED mode is set. ++ @Input uiFlags Allocation flags. ++ @Return True if the mode is set, false otherwise ++ */ ++#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED) ++ ++#endif /* PVRSRV_MEMALLOCFLAGS_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_pool.c b/drivers/gpu/drm/img-rogue/pvrsrv_pool.c +new file mode 100644 +index 000000000000..d62a062a944c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_pool.c +@@ -0,0 +1,260 @@ ++/**************************************************************************/ /*! ++@File ++@Title Services pool implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides a generic pool implementation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv.h" ++#include "lock.h" ++#include "dllist.h" ++#include "allocmem.h" ++ ++struct _PVRSRV_POOL_ ++{ ++ POS_LOCK hLock; ++ /* total max number of permitted entries in the pool */ ++ IMG_UINT uiMaxEntries; ++ /* currently number of pool entries created. these may be in the pool ++ * or in-use ++ */ ++ IMG_UINT uiNumBusy; ++ /* number of not-in-use entries currently free in the pool */ ++ IMG_UINT uiNumFree; ++ ++ DLLIST_NODE sFreeList; ++ ++ const IMG_CHAR *pszName; ++ ++ PVRSRV_POOL_ALLOC_FUNC *pfnAlloc; ++ PVRSRV_POOL_FREE_FUNC *pfnFree; ++ void *pvPrivData; ++}; ++ ++typedef struct _PVRSRV_POOL_ENTRY_ ++{ ++ DLLIST_NODE sNode; ++ void *pvData; ++} PVRSRV_POOL_ENTRY; ++ ++PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, ++ PVRSRV_POOL_FREE_FUNC *pfnFree, ++ IMG_UINT32 ui32MaxEntries, ++ const IMG_CHAR *pszName, ++ void *pvPrivData, ++ PVRSRV_POOL **ppsPool) ++{ ++ PVRSRV_POOL *psPool; ++ PVRSRV_ERROR eError; ++ ++ psPool = OSAllocMem(sizeof(PVRSRV_POOL)); ++ PVR_GOTO_IF_NOMEM(psPool, eError, err_alloc); ++ ++ eError = OSLockCreate(&psPool->hLock); ++ ++ PVR_GOTO_IF_ERROR(eError, err_lock_create); ++ ++ psPool->uiMaxEntries = ui32MaxEntries; ++ psPool->uiNumBusy = 0; ++ psPool->uiNumFree = 0; ++ psPool->pfnAlloc = pfnAlloc; ++ psPool->pfnFree = pfnFree; ++ psPool->pvPrivData = pvPrivData; ++ psPool->pszName = pszName; ++ ++ dllist_init(&psPool->sFreeList); ++ ++ *ppsPool = psPool; ++ ++ return PVRSRV_OK; ++ ++err_lock_create: ++ OSFreeMem(psPool); ++err_alloc: ++ return eError; ++} ++ ++static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool, ++ PVRSRV_POOL_ENTRY *psEntry) ++{ ++ psPool->pfnFree(psPool->pvPrivData, psEntry->pvData); ++ OSFreeMem(psEntry); ++ ++ return PVRSRV_OK; ++} ++ ++void PVRSRVPoolDestroy(PVRSRV_POOL *psPool) ++{ ++ if (psPool->uiNumBusy != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s " ++ "with %u entries still in use", ++ __func__, ++ psPool->pszName, ++ psPool->uiNumBusy)); ++ return; ++ } ++ ++ OSLockDestroy(psPool->hLock); ++ ++ if (psPool->uiNumFree) ++ { ++ PVRSRV_POOL_ENTRY *psEntry; ++ DLLIST_NODE *psChosenNode; ++ ++ psChosenNode = dllist_get_next_node(&psPool->sFreeList); ++ ++ while (psChosenNode) ++ { ++ dllist_remove_node(psChosenNode); ++ ++ psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); ++ _DestroyPoolEntry(psPool, psEntry); ++ ++ psPool->uiNumFree--; ++ ++ psChosenNode = dllist_get_next_node(&psPool->sFreeList); ++ } ++ ++ PVR_ASSERT(psPool->uiNumFree == 0); ++ } ++ ++ OSFreeMem(psPool); ++} ++ ++static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool, ++ PVRSRV_POOL_ENTRY **ppsEntry) ++{ ++ PVRSRV_POOL_ENTRY *psNewEntry; ++ PVRSRV_ERROR eError; ++ ++ psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY)); ++ PVR_GOTO_IF_NOMEM(psNewEntry, eError, err_allocmem); ++ ++ dllist_init(&psNewEntry->sNode); ++ ++ eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData); ++ ++ PVR_GOTO_IF_ERROR(eError, err_pfn_alloc); ++ ++ *ppsEntry = psNewEntry; ++ ++ return PVRSRV_OK; ++ ++err_pfn_alloc: ++ OSFreeMem(psNewEntry); ++err_allocmem: ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, ++ PVRSRV_POOL_TOKEN *hToken, ++ void **ppvDataOut) ++{ ++ PVRSRV_POOL_ENTRY *psEntry; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ DLLIST_NODE *psChosenNode; ++ ++ OSLockAcquire(psPool->hLock); ++ ++ psChosenNode = dllist_get_next_node(&psPool->sFreeList); ++ if (unlikely(psChosenNode == NULL)) ++ { ++ /* no available elements in the pool. try to create one */ ++ ++ eError = _CreateNewPoolEntry(psPool, &psEntry); ++ ++ PVR_GOTO_IF_ERROR(eError, out_unlock); ++ } ++ else ++ { ++ dllist_remove_node(psChosenNode); ++ ++ psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode); ++ ++ psPool->uiNumFree--; ++ } ++ ++#if defined(DEBUG) || defined(SUPPORT_VALIDATION) ++ /* Don't poison the IN buffer as that is copied from client and would be ++ * waste of cycles. ++ */ ++ OSCachedMemSet(((IMG_PBYTE)psEntry->pvData)+PVRSRV_MAX_BRIDGE_IN_SIZE, ++ PVRSRV_POISON_ON_ALLOC_VALUE, PVRSRV_MAX_BRIDGE_OUT_SIZE); ++#endif ++ ++ psPool->uiNumBusy++; ++ *hToken = psEntry; ++ *ppvDataOut = psEntry->pvData; ++ ++out_unlock: ++ OSLockRelease(psPool->hLock); ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_POOL_ENTRY *psEntry = hToken; ++ ++ PVR_ASSERT(psPool->uiNumBusy > 0); ++ ++ OSLockAcquire(psPool->hLock); ++ ++ /* put this entry in the pool if the pool has space, ++ * otherwise free it ++ */ ++ if (psPool->uiNumFree < psPool->uiMaxEntries) ++ { ++ dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode); ++ psPool->uiNumFree++; ++ } ++ else ++ { ++ eError = _DestroyPoolEntry(psPool, psEntry); ++ } ++ ++ psPool->uiNumBusy--; ++ ++ OSLockRelease(psPool->hLock); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_pool.h b/drivers/gpu/drm/img-rogue/pvrsrv_pool.h +new file mode 100644 +index 000000000000..2272fc50ce6e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_pool.h +@@ -0,0 +1,135 @@ ++/**************************************************************************/ /*! ++@File ++@Title Services pool implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides a generic pool implementation. ++ The pool allows to dynamically retrieve and return entries from ++ it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries ++ are created in lazy manner which means not until first usage. ++ The pool API allows to pass and allocation/free functions ++ pair that will allocate entry's private data and return it ++ to the caller on every entry 'Get'. ++ The pool will keep up to ui32MaxEntries entries allocated. ++ Every entry that exceeds this number and is 'Put' back to the ++ pool will be freed on the spot instead being returned to the ++ pool. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#if !defined(PVRSRVPOOL_H) ++#define PVRSRVPOOL_H ++ ++/**************************************************************************/ /*! ++ @Description Callback function called during creation of the new element. This ++ function allocates an object that will be stored in the pool. ++ The object can be retrieved from the pool by calling ++ PVRSRVPoolGet. ++ @Input pvPrivData Private data passed to the alloc function. ++ @Output pvOut Allocated object. ++ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /***************************************************************************/ ++typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut); ++ ++/**************************************************************************/ /*! ++ @Description Callback function called to free the object allocated by ++ the counterpart alloc function. ++ @Input pvPrivData Private data passed to the free function. ++ @Output pvFreeData Object allocated by PVRSRV_POOL_ALLOC_FUNC. ++*/ /***************************************************************************/ ++typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData); ++ ++typedef IMG_HANDLE PVRSRV_POOL_TOKEN; ++ ++typedef struct _PVRSRV_POOL_ PVRSRV_POOL; ++ ++/**************************************************************************/ /*! ++ @Function PVRSRVPoolCreate ++ @Description Creates new buffer pool. ++ @Input pfnAlloc Allocation function pointer. Function is used ++ to allocate new pool entries' data. ++ @Input pfnFree Free function pointer. Function is used to ++ free memory allocated by pfnAlloc function. ++ @Input ui32MaxEntries Total maximum number of entries in the pool. ++ @Input pszName Name of the pool. String has to be NULL ++ terminated. ++ @Input pvPrivData Private data that will be passed to pfnAlloc and ++ pfnFree functions. ++ @Output ppsPool New buffer pool object. ++ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /***************************************************************************/ ++PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc, ++ PVRSRV_POOL_FREE_FUNC *pfnFree, ++ IMG_UINT32 ui32MaxEntries, ++ const IMG_CHAR *pszName, ++ void *pvPrivData, ++ PVRSRV_POOL **ppsPool); ++ ++/**************************************************************************/ /*! ++ @Function PVRSRVPoolDestroy ++ @Description Destroys pool created by PVRSRVPoolCreate. ++ @Input psPool Buffer pool object meant to be destroyed. ++*/ /***************************************************************************/ ++void PVRSRVPoolDestroy(PVRSRV_POOL *psPool); ++ ++/**************************************************************************/ /*! ++ @Function PVRSRVPoolGet ++ @Description Retrieves an entry from a pool. If no free elements are ++ available new entry will be allocated. ++ @Input psPool Pointer to the pool. ++ @Output hToken Pointer to the entry handle. ++ @Output ppvDataOut Pointer to data stored in the entry (the data ++ allocated by the pfnAlloc function). ++ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /***************************************************************************/ ++PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool, ++ PVRSRV_POOL_TOKEN *hToken, ++ void **ppvDataOut); ++ ++/**************************************************************************/ /*! ++ @Function PVRSRVPoolPut ++ @Description Returns entry to the pool. If number of entries is greater ++ than ui32MaxEntries set during pool creation the entry will ++ be freed instead. ++ @Input psPool Pointer to the pool. ++ @Input hToken Entry handle. ++ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /***************************************************************************/ ++PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, ++ PVRSRV_POOL_TOKEN hToken); ++ ++#endif /* PVRSRVPOOL_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h b/drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h +new file mode 100644 +index 000000000000..04611f9f7cee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_sync_km.h +@@ -0,0 +1,65 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR synchronisation interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Types for server side code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVRSRV_SYNC_KM_H ++#define PVRSRV_SYNC_KM_H ++ ++#include ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#define SYNC_FB_FILE_STRING_MAX 256 ++#define SYNC_FB_MODULE_STRING_LEN_MAX (32) ++#define SYNC_FB_DESC_STRING_LEN_MAX (32) ++ ++/* By default, fence-sync module emits into HWPerf (of course, if enabled) and ++ * considers a process (sleepable) context */ ++#define PVRSRV_FENCE_FLAG_NONE (0U) ++#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0) ++#define PVRSRV_FENCE_FLAG_CTX_ATOMIC (1U << 1) ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* PVRSRV_SYNC_KM_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h b/drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h +new file mode 100644 +index 000000000000..5d1a10cd0a9c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_sync_server.h +@@ -0,0 +1,278 @@ ++/**************************************************************************/ /*! ++@File ++@Title Fence sync server interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef PVRSRV_SYNC_SERVER_H ++#define PVRSRV_SYNC_SERVER_H ++ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++#include "sync_fallback_server.h" ++#include "pvr_notifier.h" ++#include "img_types.h" ++#include "pvrsrv_sync_km.h" ++#elif defined(SUPPORT_NATIVE_FENCE_SYNC) ++#include "pvr_sync.h" ++#endif ++ ++#include "rgxhwperf.h" ++ ++#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH ++#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH ++ ++typedef struct _SYNC_TIMELINE_OBJ_ ++{ ++ void *pvTlObj; /* Implementation specific timeline object */ ++ ++ PVRSRV_TIMELINE hTimeline; /* Reference to implementation-independent timeline object */ ++} SYNC_TIMELINE_OBJ; ++ ++typedef struct _SYNC_FENCE_OBJ_ ++{ ++ void *pvFenceObj; /* Implementation specific fence object */ ++ ++ PVRSRV_FENCE hFence; /* Reference to implementation-independent fence object */ ++} SYNC_FENCE_OBJ; ++ ++static inline void SyncClearTimelineObj(SYNC_TIMELINE_OBJ *psSTO) ++{ ++ psSTO->pvTlObj = NULL; ++ psSTO->hTimeline = PVRSRV_NO_TIMELINE; ++} ++ ++static inline IMG_BOOL SyncIsTimelineObjValid(const SYNC_TIMELINE_OBJ *psSTO) ++{ ++ return (IMG_BOOL)(psSTO->pvTlObj != NULL); ++} ++ ++static inline void SyncClearFenceObj(SYNC_FENCE_OBJ *psSFO) ++{ ++ psSFO->pvFenceObj = NULL; ++ psSFO->hFence = PVRSRV_NO_FENCE; ++} ++ ++static inline IMG_BOOL SyncIsFenceObjValid(const SYNC_FENCE_OBJ *psSFO) ++{ ++ return (IMG_BOOL)(psSFO->pvFenceObj != NULL); ++} ++ ++ ++/* Mapping of each required function to its appropriate sync-implementation function */ ++#if defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ #define SyncFenceWaitKM_ SyncFbFenceWait ++ #define SyncGetFenceObj_ SyncFbGetFenceObj ++ #define SyncFenceReleaseKM_ SyncFbFenceReleaseKM ++ #define SyncSWTimelineFenceCreateKM_ SyncFbSWTimelineFenceCreateKM ++ #define SyncSWTimelineAdvanceKM_ SyncFbSWTimelineAdvanceKM ++ #define SyncSWGetTimelineObj_ SyncFbSWGetTimelineObj ++ #define SyncSWTimelineReleaseKM_ SyncFbTimelineRelease ++ #define SyncDumpFence_ SyncFbDumpFenceKM ++ #define SyncSWDumpTimeline_ SyncFbSWDumpTimelineKM ++#elif defined(SUPPORT_NATIVE_FENCE_SYNC) ++ #define SyncFenceWaitKM_ pvr_sync_fence_wait ++ #define SyncGetFenceObj_ pvr_sync_fence_get ++ #define SyncFenceReleaseKM_ pvr_sync_fence_release ++ #define SyncSWTimelineFenceCreateKM_ pvr_sync_sw_timeline_fence_create ++ #define SyncSWTimelineAdvanceKM_ pvr_sync_sw_timeline_advance ++ #define SyncSWGetTimelineObj_ pvr_sync_sw_timeline_get ++ #define SyncSWTimelineReleaseKM_ pvr_sync_sw_timeline_release ++ #define SyncDumpFence_ sync_dump_fence ++ #define SyncSWDumpTimeline_ sync_sw_dump_timeline ++#endif ++ ++/*************************************************************************/ /*! ++@Function SyncFenceWaitKM ++ ++@Description Wait for all the sync points in the fence to be signalled. ++ ++@Input psFenceObj Fence to wait on ++ ++@Input ui32TimeoutInMs Maximum time to wait (in milliseconds) ++ ++@Return PVRSRV_OK once the fence has been passed (all ++ containing check points have either ++ signalled or errored) ++ PVRSRV_ERROR_TIMEOUT if the poll has exceeded the timeout ++ PVRSRV_ERROR_FAILED_DEPENDENCIES Other sync-impl specific error ++*/ /**************************************************************************/ ++static inline PVRSRV_ERROR ++SyncFenceWaitKM(PVRSRV_DEVICE_NODE *psDevNode, ++ const SYNC_FENCE_OBJ *psFenceObj, ++ IMG_UINT32 ui32TimeoutInMs) ++{ ++ PVRSRV_ERROR eError; ++ ++ RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, ++ BEGIN, ++ OSGetCurrentProcessID(), ++ psFenceObj->hFence, ++ ui32TimeoutInMs); ++ ++ eError = SyncFenceWaitKM_(psFenceObj->pvFenceObj, ui32TimeoutInMs); ++ ++ RGXSRV_HWPERF_SYNC_FENCE_WAIT(psDevNode->pvDevice, ++ END, ++ OSGetCurrentProcessID(), ++ psFenceObj->hFence, ++ ((eError == PVRSRV_OK) ? ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED : ++ ((eError == PVRSRV_ERROR_TIMEOUT) ? ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT : ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR))); ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function SyncGetFenceObj ++ ++@Description Get the implementation specific server fence object from ++ opaque implementation independent PVRSRV_FENCE type. ++ When successful, this function gets a reference on the base ++ fence, which needs to be dropped using SyncFenceReleaseKM, ++ when fence object is no longer in use. ++ ++@Input iFence Input opaque fence object ++ ++@Output psFenceObj Pointer to implementation specific fence object ++ ++@Return PVRSRV_ERROR PVRSRV_OK, on success ++*/ /**************************************************************************/ ++static inline PVRSRV_ERROR ++SyncGetFenceObj(PVRSRV_FENCE iFence, ++ SYNC_FENCE_OBJ *psFenceObj) ++{ ++ psFenceObj->hFence = iFence; ++ return SyncGetFenceObj_(iFence, &psFenceObj->pvFenceObj); ++} ++ ++/*************************************************************************/ /*! ++@Function SyncFenceReleaseKM ++ ++@Description Release reference on this fence. ++ ++@Input psFenceObj Fence to be released ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static inline ++PVRSRV_ERROR SyncFenceReleaseKM(const SYNC_FENCE_OBJ *psFenceObj) ++{ ++ return SyncFenceReleaseKM_(psFenceObj->pvFenceObj); ++} ++ ++/*****************************************************************************/ ++/* */ ++/* SW TIMELINE SPECIFIC FUNCTIONS */ ++/* */ ++/*****************************************************************************/ ++ ++static inline PVRSRV_ERROR ++SyncSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDevNode, ++ PVRSRV_TIMELINE hSWTimeline, ++ const IMG_CHAR *pszFenceName, ++ PVRSRV_FENCE *phOutFence) ++{ ++ IMG_UINT64 ui64SyncPtIdx; ++ PVRSRV_ERROR eError; ++ eError = SyncSWTimelineFenceCreateKM_(psDevNode, ++ hSWTimeline, ++ pszFenceName, ++ phOutFence, ++ &ui64SyncPtIdx); ++ if (eError == PVRSRV_OK) ++ { ++ RGXSRV_HWPERF_ALLOC_SW_FENCE(psDevNode, OSGetCurrentProcessID(), ++ *phOutFence, hSWTimeline, ui64SyncPtIdx, ++ pszFenceName, OSStringLength(pszFenceName)); ++ } ++ return eError; ++} ++ ++static inline PVRSRV_ERROR ++SyncSWTimelineAdvanceKM(PVRSRV_DEVICE_NODE *psDevNode, ++ const SYNC_TIMELINE_OBJ *psSWTimelineObj) ++{ ++ IMG_UINT64 ui64SyncPtIdx; ++ PVRSRV_ERROR eError; ++ eError = SyncSWTimelineAdvanceKM_(psSWTimelineObj->pvTlObj, ++ &ui64SyncPtIdx); ++ ++ if (eError == PVRSRV_OK) ++ { ++ RGXSRV_HWPERF_SYNC_SW_TL_ADV(psDevNode->pvDevice, ++ OSGetCurrentProcessID(), ++ psSWTimelineObj->hTimeline, ++ ui64SyncPtIdx); ++ } ++ return eError; ++} ++ ++static inline PVRSRV_ERROR ++SyncSWGetTimelineObj(PVRSRV_TIMELINE hSWTimeline, ++ SYNC_TIMELINE_OBJ *psSWTimelineObj) ++{ ++ psSWTimelineObj->hTimeline = hSWTimeline; ++ return SyncSWGetTimelineObj_(hSWTimeline, &psSWTimelineObj->pvTlObj); ++} ++ ++static inline PVRSRV_ERROR ++SyncSWTimelineReleaseKM(const SYNC_TIMELINE_OBJ *psSWTimelineObj) ++{ ++ return SyncSWTimelineReleaseKM_(psSWTimelineObj->pvTlObj); ++} ++ ++static inline PVRSRV_ERROR ++SyncDumpFence(const SYNC_FENCE_OBJ *psFenceObj, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ return SyncDumpFence_(psFenceObj->pvFenceObj, pfnDumpDebugPrintf, pvDumpDebugFile); ++} ++ ++static inline PVRSRV_ERROR ++SyncSWDumpTimeline(const SYNC_TIMELINE_OBJ *psSWTimelineObj, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ return SyncSWDumpTimeline_(psSWTimelineObj->pvTlObj, pfnDumpDebugPrintf, pvDumpDebugFile); ++} ++ ++ ++#endif /* PVRSRV_SYNC_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h b/drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h +new file mode 100644 +index 000000000000..28999e5d21b7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_tlcommon.h +@@ -0,0 +1,260 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Transport Layer common types and definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport layer common types and definitions included into ++ both user mode and kernel mode source. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef PVR_TLCOMMON_H ++#define PVR_TLCOMMON_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++ ++ ++/*! Handle type for stream descriptor objects as created by this API */ ++typedef IMG_HANDLE PVRSRVTL_SD; ++ ++/*! Maximum stream name length including the null byte */ ++#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U ++ ++/*! Maximum number of streams expected to exist */ ++#define PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER (32*PRVSRVTL_MAX_STREAM_NAME_SIZE) ++ ++/*! Packet lengths are always rounded up to a multiple of 8 bytes */ ++#define PVRSRVTL_PACKET_ALIGNMENT 8U ++#define PVRSRVTL_ALIGN(x) (((x)+PVRSRVTL_PACKET_ALIGNMENT-1U) & ~(PVRSRVTL_PACKET_ALIGNMENT-1U)) ++ ++ ++/*! A packet is made up of a header structure followed by the data bytes. ++ * There are 3 types of packet: normal (has data), data lost and padding, ++ * see packet flags. Header kept small to reduce data overhead. ++ * ++ * if the ORDER of the structure members is changed, please UPDATE the ++ * PVRSRVTL_PACKET_FLAG_OFFSET macro. ++ * ++ * Layout of uiTypeSize member is : ++ * ++ * |<---------------------------32-bits------------------------------>| ++ * |<----8---->|<-----1----->|<----7--->|<------------16------------->| ++ * | Type | Drop-Oldest | UNUSED | Size | ++ * ++ */ ++typedef struct ++{ ++ IMG_UINT32 uiTypeSize; /*!< Type, Drop-Oldest flag & number of bytes following header */ ++ IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */ ++ ++ /* First bytes of TL packet data follow header ... */ ++} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR; ++ ++/* Structure must always be a size multiple of 8 as stream buffer ++ * still an array of IMG_UINT32s. ++ */ ++static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8"); ++ ++/*! Packet header reserved word fingerprint "TLP1" */ ++#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U ++ ++/*! Packet header mask used to extract the size from the uiTypeSize member. ++ * Do not use directly, see GET macros. ++ */ ++#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU ++#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU) ++ ++ ++/*! Packet header mask used to extract the type from the uiTypeSize member. ++ * Do not use directly, see GET macros. ++ */ ++#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U ++#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U ++ ++/*! Packet header mask used to check if packets before this one were dropped ++ * or not. Do not use directly, see GET macros. ++ */ ++#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK 0x00800000U ++#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET 23U ++ ++/*! Packet type enumeration. ++ */ ++typedef IMG_UINT32 PVRSRVTL_PACKETTYPE; ++ ++/*! Undefined packet */ ++#define PVRSRVTL_PACKETTYPE_UNDEF 0U ++ ++/*! Normal packet type. Indicates data follows the header. ++ */ ++#define PVRSRVTL_PACKETTYPE_DATA 1U ++ ++/*! When seen this packet type indicates that at this moment in the stream ++ * packet(s) were not able to be accepted due to space constraints and ++ * that recent data may be lost - depends on how the producer handles the ++ * error. Such packets have no data, data length is 0. ++ */ ++#define PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED 2U ++ ++/*! Packets with this type set are padding packets that contain undefined ++ * data and must be ignored/skipped by the client. They are used when the ++ * circular stream buffer wraps around and there is not enough space for ++ * the data at the end of the buffer. Such packets have a length of 0 or ++ * more. ++ */ ++#define PVRSRVTL_PACKETTYPE_PADDING 3U ++ ++/*! This packet type conveys to the stream consumer that the stream ++ * producer has reached the end of data for that data sequence. The ++ * TLDaemon has several options for processing these packets that can ++ * be selected on a per stream basis. ++ */ ++#define PVRSRVTL_PACKETTYPE_MARKER_EOS 4U ++ ++/*! This is same as PVRSRVTL_PACKETTYPE_MARKER_EOS but additionally removes ++ * old data record output file before opening new/next one ++ */ ++#define PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD 5U ++ ++/*! Packet emitted on first stream opened by writer. Packet carries a name ++ * of the opened stream in a form of null-terminated string. ++ */ ++#define PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE 6U ++ ++/*! Packet emitted on last stream closed by writer. Packet carries a name ++ * of the closed stream in a form of null-terminated string. ++ */ ++#define PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE 7U ++ ++#define PVRSRVTL_PACKETTYPE_LAST 8U ++ ++/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared: ++ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities. ++ */ ++#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0U) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++#define PVRSRVTL_SET_PACKET_HDR(len, type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++ ++/*! Returns the number of bytes of data in the packet. ++ * p may be any address type. ++ */ ++#define GET_PACKET_DATA_LEN(p) \ ++ ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR) (void *) (p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) ++ ++ ++/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */ ++#define GET_PACKET_DATA_PTR(p) \ ++ (((IMG_UINT8 *) (void *) (p)) + sizeof(PVRSRVTL_PACKETHDR)) ++ ++/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type. ++ */ ++#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR) ((void *) (p))) ++ ++/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack ++ * It is up to the caller to determine if the new address is within the ++ * packet buffer. ++ */ ++#define GET_NEXT_PACKET_ADDR(p) \ ++ GET_PACKET_HDR( \ ++ GET_PACKET_DATA_PTR(p) + \ ++ ( \ ++ (GET_PACKET_DATA_LEN(p) + (PVRSRVTL_PACKET_ALIGNMENT-1U)) & \ ++ (~(PVRSRVTL_PACKET_ALIGNMENT-1U)) \ ++ ) \ ++ ) ++ ++/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR. ++ */ ++#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET) ++ ++/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. ++ * p is of type PVRSRVTL_PPACKETHDR. ++ */ ++#define SET_PACKETS_DROPPED(p) (((p)->uiTypeSize) | (1UL << PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)) ++ ++/*! Check if packets were dropped before this packet. ++ * p is of type PVRSRVTL_PPACKETHDR. ++ */ ++#define CHECK_PACKETS_DROPPED(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET) ++ ++/*! Flags for use with PVRSRVTLOpenStream ++ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available ++ * 0x02 - When the stream does not exist wait for a bit (2s) in ++ * PVRSRVTLOpenStream() and then exit with a timeout error if it still ++ * does not exist. ++ * 0x04 - Open stream for write only operations. ++ * If flag is not used stream is opened as read-only. This flag is ++ * required if one wants to call reserve/commit/write function on the ++ * stream descriptor. Read from on the stream descriptor opened ++ * with this flag will fail. ++ * 0x08 - Disable Producer Callback. ++ * If this flag is set and the stream becomes empty, do not call any ++ * associated producer callback to generate more data from the reader ++ * context. ++ * 0x10 - Reset stream on open. ++ * When this flag is used the stream will drop all of the stored data. ++ * 0x20 - Limit read position to the write position at time the stream ++ * was opened. Hence this flag will freeze the content read to that ++ * produced before the stream was opened for reading. ++ * 0x40 - Ignore Open Callback. ++ * When this flag is set ignore any OnReaderOpenCallback setting for ++ * the stream. This allows access to the stream to be made without ++ * generating any extra packets into the stream. ++ */ ++ ++#define PVRSRV_STREAM_FLAG_NONE (0U) ++#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0) ++#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1) ++#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2) ++#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK (1U<<3) ++#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN (1U<<4) ++#define PVRSRV_STREAM_FLAG_READ_LIMIT (1U<<5) ++#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK (1U<<6) ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* PVR_TLCOMMON_H */ ++/****************************************************************************** ++ End of file (pvrsrv_tlcommon.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h b/drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h +new file mode 100644 +index 000000000000..9064075ad5c0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrv_tlstreams.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Transport Layer stream names ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport layer common types and definitions included into ++ both user mode and kernel mode source. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRSRV_TLSTREAMS_H ++#define PVRSRV_TLSTREAMS_H ++ ++#define PVRSRV_TL_CTLR_STREAM "tlctrl" ++ ++#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_" ++#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_" ++ ++/* Host HWPerf client stream names are of the form 'hwperf_client_' */ ++#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_" ++#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u" ++ ++#endif /* PVRSRV_TLSTREAMS_H */ ++ ++/****************************************************************************** ++ End of file (pvrsrv_tlstreams.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/pvrsrvkm.mk b/drivers/gpu/drm/img-rogue/pvrsrvkm.mk +new file mode 100644 +index 000000000000..5d57e765685e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrsrvkm.mk +@@ -0,0 +1,148 @@ ++pvrsrvkm-y += \ ++ client_cache_direct_bridge.o \ ++ server_cache_bridge.o \ ++ server_cmm_bridge.o \ ++ client_devicememhistory_direct_bridge.o \ ++ server_devicememhistory_bridge.o \ ++ server_di_bridge.o \ ++ server_dmabuf_bridge.o \ ++ client_htbuffer_direct_bridge.o \ ++ server_htbuffer_bridge.o \ ++ client_mm_direct_bridge.o \ ++ server_mm_bridge.o \ ++ client_pvrtl_direct_bridge.o \ ++ server_pvrtl_bridge.o \ ++ server_rgxbreakpoint_bridge.o \ ++ server_rgxcmp_bridge.o \ ++ server_rgxfwdbg_bridge.o \ ++ server_rgxhwperf_bridge.o \ ++ server_rgxkicksync_bridge.o \ ++ server_rgxregconfig_bridge.o \ ++ server_rgxta3d_bridge.o \ ++ server_rgxtimerquery_bridge.o \ ++ server_rgxtq2_bridge.o \ ++ server_rgxtq_bridge.o \ ++ server_srvcore_bridge.o \ ++ client_sync_direct_bridge.o \ ++ server_sync_bridge.o \ ++ client_synctracking_direct_bridge.o \ ++ server_synctracking_bridge.o \ ++ cache_km.o \ ++ connection_server.o \ ++ debug_common.o \ ++ devicemem_heapcfg.o \ ++ devicemem_history_server.o \ ++ devicemem_server.o \ ++ di_impl_brg.o \ ++ di_server.o \ ++ handle.o \ ++ htb_debug.o \ ++ htbserver.o \ ++ info_page_km.o \ ++ lists.o \ ++ mmu_common.o \ ++ physheap.o \ ++ physmem.o \ ++ physmem_hostmem.o \ ++ physmem_lma.o \ ++ pmr.o \ ++ power.o \ ++ process_stats.o \ ++ pvr_notifier.o \ ++ pvrsrv.o \ ++ pvrsrv_bridge_init.o \ ++ pvrsrv_pool.o \ ++ srvcore.o \ ++ sync_checkpoint.o \ ++ sync_server.o \ ++ tlintern.o \ ++ tlserver.o \ ++ tlstream.o \ ++ vmm_pvz_client.o \ ++ vmm_pvz_server.o \ ++ vz_vmm_pvz.o \ ++ vz_vmm_vm.o \ ++ rgx_bridge_init.o \ ++ rgxbreakpoint.o \ ++ rgxbvnc.o \ ++ rgxccb.o \ ++ rgxfwdbg.o \ ++ rgxfwimageutils.o \ ++ rgxfwtrace_strings.o \ ++ rgxhwperf_common.o \ ++ rgxkicksync.o \ ++ rgxmem.o \ ++ rgxregconfig.o \ ++ rgxshader.o \ ++ rgxsyncutils.o \ ++ rgxtimecorr.o \ ++ rgxtimerquery.o \ ++ rgxutils.o \ ++ rgxcompute.o \ ++ rgxdebug.o \ ++ rgxfwutils.o \ ++ rgxhwperf.o \ ++ rgxinit.o \ ++ rgxlayer_impl.o \ ++ rgxmipsmmuinit.o \ ++ rgxmmuinit.o \ ++ rgxmulticore.o \ ++ rgxpower.o \ ++ rgxsrvinit.o \ ++ rgxstartstop.o \ ++ rgxta3d.o \ ++ rgxtdmtransfer.o \ ++ rgxtransfer.o \ ++ allocmem.o \ ++ event.o \ ++ fwload.o \ ++ handle_idr.o \ ++ km_apphint.o \ ++ module_common.o \ ++ osconnection_server.o \ ++ osfunc.o \ ++ osmmap_stub.o \ ++ physmem_dmabuf.o \ ++ physmem_osmem_linux.o \ ++ physmem_test.o \ ++ pmr_os.o \ ++ pvr_bridge_k.o \ ++ pvr_buffer_sync.o \ ++ pvr_counting_timeline.o \ ++ pvr_debug.o \ ++ pvr_debugfs.o \ ++ pvr_drm.o \ ++ pvr_fence.o \ ++ pvr_gputrace.o \ ++ pvr_platform_drv.o \ ++ pvr_sw_fence.o \ ++ pvr_sync_file.o \ ++ pvr_sync_ioctl_common.o \ ++ pvr_sync_ioctl_drm.o \ ++ devicemem.o \ ++ devicemem_utils.o \ ++ hash.o \ ++ htbuffer.o \ ++ mem_utils.o \ ++ pvrsrv_error.o \ ++ ra.o \ ++ sync.o \ ++ tlclient.o \ ++ uniq_key_splay_tree.o \ ++ rgx_hwperf_table.o \ ++ interrupt_support.o \ ++ pci_support.o \ ++ sysconfig_cmn.o \ ++ dma_support.o \ ++ vmm_type_stub.o \ ++ sysconfig.o \ ++ xuantie_sys.o ++pvrsrvkm-$(CONFIG_DRM_POWERVR_ROGUE_DEBUG) += \ ++ client_ri_direct_bridge.o \ ++ server_ri_bridge.o \ ++ ri_server.o ++pvrsrvkm-$(CONFIG_ARM) += osfunc_arm.o ++pvrsrvkm-$(CONFIG_ARM64) += osfunc_arm64.o ++pvrsrvkm-$(CONFIG_EVENT_TRACING) += trace_events.o ++pvrsrvkm-$(CONFIG_RISCV) += osfunc_riscv.o ++pvrsrvkm-$(CONFIG_X86) += osfunc_x86.o +diff --git a/drivers/gpu/drm/img-rogue/pvrversion.h b/drivers/gpu/drm/img-rogue/pvrversion.h +new file mode 100644 +index 000000000000..c62b3f752b1b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/pvrversion.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@File pvrversion.h ++@Title PowerVR version numbers and strings. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Version numbers and strings for PowerVR components. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef PVRVERSION_H ++#define PVRVERSION_H ++ ++#define PVRVERSION_MAJ 1U ++#define PVRVERSION_MIN 17U ++ ++#define PVRVERSION_FAMILY "rogueddk" ++#define PVRVERSION_BRANCHNAME "1.17" ++#define PVRVERSION_BUILD 6210866 ++#define PVRVERSION_BSCONTROL "Rogue_DDK_Linux_WS" ++ ++#define PVRVERSION_STRING "Rogue_DDK_Linux_WS rogueddk 1.17@6210866" ++#define PVRVERSION_STRING_SHORT "1.17@6210866" ++ ++#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved." ++ ++#define PVRVERSION_BUILD_HI 621 ++#define PVRVERSION_BUILD_LO 866 ++#define PVRVERSION_STRING_NUMERIC "1.17.621.866" ++ ++#define PVRVERSION_PACK(MAJOR,MINOR) (((IMG_UINT32)((IMG_UINT32)(MAJOR) & 0xFFFFU) << 16U) | (((MINOR) & 0xFFFFU) << 0U)) ++#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16U) & 0xFFFFU) ++#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0U) & 0xFFFFU) ++ ++#endif /* PVRVERSION_H */ +diff --git a/drivers/gpu/drm/img-rogue/ra.c b/drivers/gpu/drm/img-rogue/ra.c +new file mode 100644 +index 000000000000..4c2981e57563 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/ra.c +@@ -0,0 +1,2166 @@ ++/*************************************************************************/ /*! ++@File ++@Title Resource Allocator ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ ++@Description ++ Implements generic resource allocation. The resource allocator was originally ++ intended to manage address spaces. In practice the resource allocator is ++ generic and can manage arbitrary sets of integers. ++ ++ Resources are allocated from arenas. Arenas can be created with an initial ++ span of resources. Further resources spans can be added to arenas. A ++ callback mechanism allows an arena to request further resource spans on ++ demand. ++ ++ Each arena maintains an ordered list of resource segments each described by a ++ boundary tag. Each boundary tag describes a segment of resources which are ++ either 'free', available for allocation, or 'busy' currently allocated. ++ Adjacent 'free' segments are always coalesced to avoid fragmentation. ++ ++ For allocation, all 'free' segments are kept on lists of 'free' segments in ++ a table index by pvr_log2(segment size) i.e., each table index n holds 'free' ++ segments in the size range 2^n -> 2^(n+1) - 1. ++ ++ Allocation policy is based on an *almost* good fit strategy. ++ ++ Allocated segments are inserted into a self-scaling hash table which maps ++ the base resource of the span to the relevant boundary tag. This allows the ++ code to get back to the boundary tag without exporting explicit boundary tag ++ references through the API. ++ ++ Each arena has an associated quantum size, all allocations from the arena are ++ made in multiples of the basic quantum. ++ ++ On resource exhaustion in an arena, a callback if provided will be used to ++ request further resources. Resource spans allocated by the callback mechanism ++ will be returned when freed (through one of the two callbacks). ++*/ /**************************************************************************/ ++ ++/* Issues: ++ * - flags, flags are passed into the resource allocator but are not currently used. ++ * - determination, of import size, is currently braindead. ++ * - debug code should be moved out to own module and #ifdef'd ++ */ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "uniq_key_splay_tree.h" ++ ++#include "hash.h" ++#include "ra.h" ++#include "pvrsrv_memallocflags.h" ++ ++#include "osfunc.h" ++#include "allocmem.h" ++#include "lock.h" ++#include "pvr_intrinsics.h" ++ ++/* The initial, and minimum size of the live address -> boundary tag structure ++ * hash table. The value 64 is a fairly arbitrary choice. The hash table ++ * resizes on demand so the value chosen is not critical. ++ */ ++#define MINIMUM_HASH_SIZE (64) ++ ++ ++/* #define RA_VALIDATE */ ++ ++#if defined(__KLOCWORK__) ++ /* Make sure Klocwork analyses all the code (including the debug one) */ ++ #if !defined(RA_VALIDATE) ++ #define RA_VALIDATE ++ #endif ++#endif ++ ++#if !defined(PVRSRV_NEED_PVR_ASSERT) || !defined(RA_VALIDATE) ++/* Disable the asserts unless explicitly told otherwise. ++ * They slow the driver too much for other people ++ */ ++ ++#undef PVR_ASSERT ++/* Use a macro that really do not do anything when compiling in release ++ * mode! ++ */ ++#define PVR_ASSERT(x) ++#endif ++ ++/* boundary tags, used to describe a resource segment */ ++struct _BT_ ++{ ++ enum bt_type ++ { ++ btt_free, /* free resource segment */ ++ btt_live /* allocated resource segment */ ++ } type; ++ ++ unsigned int is_leftmost; ++ unsigned int is_rightmost; ++ unsigned int free_import; ++ ++ /* The base resource and extent of this segment */ ++ RA_BASE_T base; ++ RA_LENGTH_T uSize; ++ ++ /* doubly linked ordered list of all segments within the arena */ ++ struct _BT_ *pNextSegment; ++ struct _BT_ *pPrevSegment; ++ ++ /* doubly linked un-ordered list of free segments with the same flags. */ ++ struct _BT_ *next_free; ++ struct _BT_ *prev_free; ++ ++ /* A user reference associated with this span, user references are ++ * currently only provided in the callback mechanism ++ */ ++ IMG_HANDLE hPriv; ++ ++ /* Flags to match on this span */ ++ RA_FLAGS_T uFlags; ++ ++}; ++typedef struct _BT_ BT; ++ ++ ++/* resource allocation arena */ ++struct _RA_ARENA_ ++{ ++ /* arena name for diagnostics output */ ++ IMG_CHAR name[RA_MAX_NAME_LENGTH]; ++ ++ /* allocations within this arena are quantum sized */ ++ RA_LENGTH_T uQuantum; ++ ++ /* import interface, if provided */ ++ PFN_RA_ALLOC pImportAlloc; ++ ++ PFN_RA_FREE pImportFree; ++ ++ /* Arbitrary handle provided by arena owner to be passed into the ++ * import alloc and free hooks ++ */ ++ void *pImportHandle; ++ ++ IMG_PSPLAY_TREE per_flags_buckets; ++ ++ /* resource segment list */ ++ BT *pHeadSegment; ++ ++ /* segment address to boundary tag hash table */ ++ HASH_TABLE *pSegmentHash; ++ ++ /* Lock for this arena */ ++ POS_LOCK hLock; ++ ++ /* Policies that govern the resource area */ ++ IMG_UINT32 ui32PolicyFlags; ++ ++ /* LockClass of this arena. This is used within lockdep to decide if a ++ * recursive call sequence with the same lock class is allowed or not. ++ */ ++ IMG_UINT32 ui32LockClass; ++ ++ /* Total Size of the Arena */ ++ IMG_UINT64 ui64TotalArenaSize; ++ ++ /* Size available for allocation in the arena */ ++ IMG_UINT64 ui64FreeArenaSize; ++ ++}; ++ ++struct _RA_ARENA_ITERATOR_ ++{ ++ RA_ARENA *pArena; ++ BT *pCurrent; ++ IMG_BOOL bIncludeFreeSegments; ++}; ++ ++/*************************************************************************/ /*! ++@Function _RequestAllocFail ++@Description Default callback allocator used if no callback is specified, ++ always fails to allocate further resources to the arena. ++@Input _h - callback handle ++@Input _uSize - requested allocation size ++@Input _uflags - allocation flags ++@Input _pBase - receives allocated base ++@Output _pActualSize - actual allocation size ++@Input _pRef - user reference ++@Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails ++ to allocate. ++*/ /**************************************************************************/ ++static PVRSRV_ERROR ++_RequestAllocFail(RA_PERARENA_HANDLE _h, ++ RA_LENGTH_T _uSize, ++ RA_FLAGS_T _uFlags, ++ const IMG_CHAR *_pszAnnotation, ++ RA_BASE_T *_pBase, ++ RA_LENGTH_T *_pActualSize, ++ RA_PERISPAN_HANDLE *_phPriv) ++{ ++ PVR_UNREFERENCED_PARAMETER(_h); ++ PVR_UNREFERENCED_PARAMETER(_uSize); ++ PVR_UNREFERENCED_PARAMETER(_pActualSize); ++ PVR_UNREFERENCED_PARAMETER(_phPriv); ++ PVR_UNREFERENCED_PARAMETER(_uFlags); ++ PVR_UNREFERENCED_PARAMETER(_pBase); ++ PVR_UNREFERENCED_PARAMETER(_pszAnnotation); ++ ++ return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL; ++} ++ ++ ++#if defined(PVR_CTZLL) ++ /* Make sure to trigger an error if someone change the buckets or the bHasEltsMapping size ++ the bHasEltsMapping is used to quickly determine the smallest bucket containing elements. ++ therefore it must have at least as many bits has the buckets array have buckets. The RA ++ implementation actually uses one more bit. */ ++ static_assert(ARRAY_SIZE(((IMG_PSPLAY_TREE)0)->buckets) ++ < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping), ++ "Too many buckets for bHasEltsMapping bitmap"); ++#endif ++ ++ ++/*************************************************************************/ /*! ++@Function pvr_log2 ++@Description Computes the floor of the log base 2 of a unsigned integer ++@Input n Unsigned integer ++@Return Floor(Log2(n)) ++*/ /**************************************************************************/ ++#if defined(PVR_CLZLL) ++/* make sure to trigger a problem if someone changes the RA_LENGTH_T type ++ indeed the __builtin_clzll is for unsigned long long variables. ++ ++ if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl ++ if it changes to unsigned int, use __builtin_clz ++ ++ if it changes for something bigger than unsigned long long, ++ then revert the pvr_log2 to the classic implementation */ ++static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long), ++ "RA log routines not tuned for sizeof(RA_LENGTH_T)"); ++ ++static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n) ++{ ++ PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ ++ ++ return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n); ++} ++#else ++static IMG_UINT32 ++pvr_log2(RA_LENGTH_T n) ++{ ++ IMG_UINT32 l = 0; ++ ++ PVR_ASSERT(n != 0); /* Log2 is not defined on 0 */ ++ ++ n >>= 1; ++ while (n > 0) ++ { ++ n >>= 1; ++ l++; ++ } ++ return l; ++} ++#endif ++ ++ ++#if defined(RA_VALIDATE) ++/*************************************************************************/ /*! ++@Function _IsInSegmentList ++@Description Tests if a BT is in the segment list. ++@Input pArena The arena. ++@Input pBT The boundary tag to look for. ++@Return IMG_FALSE BT was not in the arena's segment list. ++ IMG_TRUE BT was in the arena's segment list. ++*/ /**************************************************************************/ ++static IMG_BOOL ++_IsInSegmentList(RA_ARENA *pArena, BT *pBT) ++{ ++ BT* pBTScan; ++ ++ PVR_ASSERT(pArena != NULL); ++ PVR_ASSERT(pBT != NULL); ++ ++ /* Walk the segment list until we see the BT pointer... */ ++ pBTScan = pArena->pHeadSegment; ++ while (pBTScan != NULL && pBTScan != pBT) ++ { ++ pBTScan = pBTScan->pNextSegment; ++ } ++ ++ /* Test if we found it and then return */ ++ return (pBTScan == pBT); ++} ++ ++/*************************************************************************/ /*! ++@Function _IsInFreeList ++@Description Tests if a BT is in the free list. ++@Input pArena The arena. ++@Input pBT The boundary tag to look for. ++@Return IMG_FALSE BT was not in the arena's free list. ++ IMG_TRUE BT was in the arena's free list. ++*/ /**************************************************************************/ ++static IMG_BOOL ++_IsInFreeList(RA_ARENA *pArena, BT *pBT) ++{ ++ BT* pBTScan; ++ IMG_UINT32 uIndex; ++ ++ PVR_ASSERT(pArena != NULL); ++ PVR_ASSERT(pBT != NULL); ++ ++ /* Look for the free list that holds BTs of this size... */ ++ uIndex = pvr_log2(pBT->uSize); ++ PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); ++ ++ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); ++ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags)) ++ { ++ return 0; ++ } ++ else ++ { ++ pBTScan = pArena->per_flags_buckets->buckets[uIndex]; ++ while (pBTScan != NULL && pBTScan != pBT) ++ { ++ pBTScan = pBTScan->next_free; ++ } ++ ++ /* Test if we found it and then return */ ++ return (pBTScan == pBT); ++ } ++} ++ ++/* is_arena_valid should only be used in debug mode. ++ * It checks that some properties an arena must have are verified ++ */ ++static int is_arena_valid(struct _RA_ARENA_ *arena) ++{ ++ struct _BT_ *chunk; ++#if defined(PVR_CTZLL) ++ unsigned int i; ++#endif ++ ++ for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment) ++ { ++ /* if next segment is NULL, then it must be a rightmost */ ++ PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost)); ++ /* if prev segment is NULL, then it must be a leftmost */ ++ PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost)); ++ ++ if (chunk->type == btt_free) ++ { ++ /* checks the correctness of the type field */ ++ PVR_ASSERT(_IsInFreeList(arena, chunk)); ++ ++ /* check that there can't be two consecutive free chunks. ++ Indeed, instead of having two consecutive free chunks, ++ there should be only one that span the size of the two. */ ++ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free)); ++ PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free)); ++ } ++ else ++ { ++ /* checks the correctness of the type field */ ++ PVR_ASSERT(!_IsInFreeList(arena, chunk)); ++ } ++ ++ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base)); ++ PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base)); ++ ++ /* all segments of the same imports must have the same flags ... */ ++ PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags)); ++ /* ... and the same import handle */ ++ PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv)); ++ ++ ++ /* if a free chunk spans a whole import, then it must be an 'not to free import'. ++ Otherwise it should have been freed. */ ++ PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import)); ++ } ++ ++#if defined(PVR_CTZLL) ++ if (arena->per_flags_buckets != NULL) ++ { ++ for (i = 0; i < FREE_TABLE_LIMIT; ++i) ++ { ++ /* verify that the bHasEltsMapping is correct for this flags bucket */ ++ PVR_ASSERT( ++ ((arena->per_flags_buckets->buckets[i] == NULL) && ++ (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0))) ++ || ++ ((arena->per_flags_buckets->buckets[i] != NULL) && ++ (((arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0))) ++ ); ++ } ++ } ++#endif ++ ++ /* if arena was not valid, an earlier assert should have triggered */ ++ return 1; ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function _SegmentListInsertAfter ++@Description Insert a boundary tag into an arena segment list after a ++ specified boundary tag. ++@Input pInsertionPoint The insertion point. ++@Input pBT The boundary tag to insert. ++*/ /**************************************************************************/ ++static INLINE void ++_SegmentListInsertAfter(BT *pInsertionPoint, ++ BT *pBT) ++{ ++ PVR_ASSERT(pBT != NULL); ++ PVR_ASSERT(pInsertionPoint != NULL); ++ ++ pBT->pNextSegment = pInsertionPoint->pNextSegment; ++ pBT->pPrevSegment = pInsertionPoint; ++ if (pInsertionPoint->pNextSegment != NULL) ++ { ++ pInsertionPoint->pNextSegment->pPrevSegment = pBT; ++ } ++ pInsertionPoint->pNextSegment = pBT; ++} ++ ++/*************************************************************************/ /*! ++@Function _SegmentListInsert ++@Description Insert a boundary tag into an arena segment list ++@Input pArena The arena. ++@Input pBT The boundary tag to insert. ++*/ /**************************************************************************/ ++static INLINE void ++_SegmentListInsert(RA_ARENA *pArena, BT *pBT) ++{ ++ PVR_ASSERT(!_IsInSegmentList(pArena, pBT)); ++ ++ /* insert into the segment chain */ ++ pBT->pNextSegment = pArena->pHeadSegment; ++ pArena->pHeadSegment = pBT; ++ if (pBT->pNextSegment != NULL) ++ { ++ pBT->pNextSegment->pPrevSegment = pBT; ++ } ++ ++ pBT->pPrevSegment = NULL; ++} ++ ++/*************************************************************************/ /*! ++@Function _SegmentListRemove ++@Description Remove a boundary tag from an arena segment list. ++@Input pArena The arena. ++@Input pBT The boundary tag to remove. ++*/ /**************************************************************************/ ++static void ++_SegmentListRemove(RA_ARENA *pArena, BT *pBT) ++{ ++ PVR_ASSERT(_IsInSegmentList(pArena, pBT)); ++ ++ if (pBT->pPrevSegment == NULL) ++ pArena->pHeadSegment = pBT->pNextSegment; ++ else ++ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment; ++ ++ if (pBT->pNextSegment != NULL) ++ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _BuildBT ++@Description Construct a boundary tag for a free segment. ++@Input base The base of the resource segment. ++@Input uSize The extent of the resource segment. ++@Input uFlags The flags to give to the boundary tag ++@Return Boundary tag or NULL ++*/ /**************************************************************************/ ++static BT * ++_BuildBT(RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags) ++{ ++ BT *pBT; ++ ++ pBT = OSAllocZMem(sizeof(BT)); ++ if (pBT == NULL) ++ { ++ return NULL; ++ } ++ ++ pBT->is_leftmost = 1; ++ pBT->is_rightmost = 1; ++ /* pBT->free_import = 0; */ ++ pBT->type = btt_live; ++ pBT->base = base; ++ pBT->uSize = uSize; ++ pBT->uFlags = uFlags; ++ ++ return pBT; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _SegmentSplit ++@Description Split a segment into two, maintain the arena segment list. The ++ boundary tag should not be in the free table. Neither the ++ original or the new neighbour boundary tag will be in the free ++ table. ++@Input pBT The boundary tag to split. ++@Input uSize The required segment size of boundary tag after ++ splitting. ++@Return New neighbour boundary tag or NULL. ++*/ /**************************************************************************/ ++static BT * ++_SegmentSplit(BT *pBT, RA_LENGTH_T uSize) ++{ ++ BT *pNeighbour; ++ ++ pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags); ++ if (pNeighbour == NULL) ++ { ++ return NULL; ++ } ++ ++ _SegmentListInsertAfter(pBT, pNeighbour); ++ ++ pNeighbour->is_leftmost = 0; ++ pNeighbour->is_rightmost = pBT->is_rightmost; ++ pNeighbour->free_import = pBT->free_import; ++ pBT->is_rightmost = 0; ++ pNeighbour->hPriv = pBT->hPriv; ++ pBT->uSize = uSize; ++ pNeighbour->uFlags = pBT->uFlags; ++ ++ return pNeighbour; ++} ++ ++/*************************************************************************/ /*! ++@Function _FreeListInsert ++@Description Insert a boundary tag into an arena free table. ++@Input pArena The arena. ++@Input pBT The boundary tag. ++*/ /**************************************************************************/ ++static void ++_FreeListInsert(RA_ARENA *pArena, BT *pBT) ++{ ++ IMG_UINT32 uIndex; ++ BT *pBTTemp = NULL; ++ uIndex = pvr_log2(pBT->uSize); ++ ++ PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); ++ PVR_ASSERT(!_IsInFreeList(pArena, pBT)); ++ ++ pBT->type = btt_free; ++ ++ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); ++ /* the flags item in the splay tree must have been created before-hand by ++ _InsertResource */ ++ PVR_ASSERT(pArena->per_flags_buckets != NULL); ++ PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL); ++ ++ /* Handle NULL values for RELEASE builds and/or disabled ASSERT DEBUG builds */ ++ if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL))) ++ { ++ return; ++ } ++ ++ /* Get the first node in the bucket */ ++ pBTTemp = pArena->per_flags_buckets->buckets[uIndex]; ++ ++ if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_ALLOC_NODE_SELECT_MASK) == RA_POLICY_ALLOC_OPTIMAL)) ++ { ++ /* Add the node to the start if the bucket is empty */ ++ if (NULL == pBTTemp) ++ { ++ pArena->per_flags_buckets->buckets[uIndex] = pBT; ++ pBT->next_free = NULL; ++ pBT->prev_free = NULL; ++ ++ } ++ else ++ { ++ BT *pBTPrev = NULL; ++ /* Traverse the list and identify the appropriate ++ * place based on the size of the Boundary being inserted */ ++ while (pBTTemp && (pBTTemp->uSize < pBT->uSize)) ++ { ++ pBTPrev = pBTTemp; ++ pBTTemp = pBTTemp->next_free; ++ } ++ /* point the new node to the first higher size element */ ++ pBT->next_free = pBTTemp; ++ pBT->prev_free = pBTPrev; ++ ++ if (pBTPrev) ++ { ++ /* Set the lower size element in the ++ * chain to point new node */ ++ pBTPrev->next_free = pBT; ++ } ++ else ++ { ++ /* Assign the new node to the start of the bucket ++ * if the bucket is empty */ ++ pArena->per_flags_buckets->buckets[uIndex] = pBT; ++ } ++ /* Make sure the higher size element in the chain points back ++ * to the new node to be introduced */ ++ if (pBTTemp) ++ { ++ pBTTemp->prev_free = pBT; ++ } ++ } ++ } ++ else ++ { ++ pBT->next_free = pBTTemp; ++ if (pBT->next_free != NULL) ++ { ++ pBT->next_free->prev_free = pBT; ++ } ++ pBT->prev_free = NULL; ++ pArena->per_flags_buckets->buckets[uIndex] = pBT; ++ } ++ ++#if defined(PVR_CTZLL) ++ /* tells that bucket[index] now contains elements */ ++ pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex); ++#endif ++ ++} ++ ++/*************************************************************************/ /*! ++@Function _FreeListRemove ++@Description Remove a boundary tag from an arena free table. ++@Input pArena The arena. ++@Input pBT The boundary tag. ++*/ /**************************************************************************/ ++static void ++_FreeListRemove(RA_ARENA *pArena, BT *pBT) ++{ ++ IMG_UINT32 uIndex; ++ uIndex = pvr_log2(pBT->uSize); ++ ++ PVR_ASSERT(uIndex < FREE_TABLE_LIMIT); ++ PVR_ASSERT(_IsInFreeList(pArena, pBT)); ++ ++ if (pBT->next_free != NULL) ++ { ++ pBT->next_free->prev_free = pBT->prev_free; ++ } ++ ++ if (pBT->prev_free != NULL) ++ { ++ pBT->prev_free->next_free = pBT->next_free; ++ } ++ else ++ { ++ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets); ++ /* the flags item in the splay tree must have already been created ++ (otherwise how could there be a segment with these flags */ ++ PVR_ASSERT(pArena->per_flags_buckets != NULL); ++ PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL); ++ ++ /* Handle unlikely NULL values for RELEASE or ASSERT-disabled builds */ ++ if (unlikely((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->buckets == NULL))) ++ { ++ pBT->type = btt_live; ++ return; ++ } ++ ++ pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free; ++#if defined(PVR_CTZLL) ++ if (pArena->per_flags_buckets->buckets[uIndex] == NULL) ++ { ++ /* there is no more elements in this bucket. Update the mapping. */ ++ pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex); ++ } ++#endif ++ } ++ ++ PVR_ASSERT(!_IsInFreeList(pArena, pBT)); ++ pBT->type = btt_live; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _InsertResource ++@Description Add a free resource segment to an arena. ++@Input pArena The arena. ++@Input base The base of the resource segment. ++@Input uSize The extent of the resource segment. ++@Input uFlags The flags of the new resources. ++@Return New bucket pointer ++ NULL on failure ++*/ /**************************************************************************/ ++static BT * ++_InsertResource(RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags) ++{ ++ BT *pBT; ++ PVR_ASSERT(pArena!=NULL); ++ ++ pBT = _BuildBT(base, uSize, uFlags); ++ ++ if (pBT != NULL) ++ { ++ IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets); ++ if (tmp == NULL) ++ { ++ OSFreeMem(pBT); ++ return NULL; ++ } ++ ++ pArena->per_flags_buckets = tmp; ++ _SegmentListInsert(pArena, pBT); ++ _FreeListInsert(pArena, pBT); ++ } ++ return pBT; ++} ++ ++/*************************************************************************/ /*! ++@Function _InsertResourceSpan ++@Description Add a free resource span to an arena, marked for free_import. ++@Input pArena The arena. ++@Input base The base of the resource segment. ++@Input uSize The extent of the resource segment. ++@Return The boundary tag representing the free resource segment, ++ or NULL on failure. ++*/ /**************************************************************************/ ++static INLINE BT * ++_InsertResourceSpan(RA_ARENA *pArena, ++ RA_BASE_T base, ++ RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags) ++{ ++ BT *pBT = _InsertResource(pArena, base, uSize, uFlags); ++ if (pBT != NULL) ++ { ++ pBT->free_import = 1; ++ } ++ return pBT; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function _RemoveResourceSpan ++@Description Frees a resource span from an arena, returning the imported ++ span via the callback. ++@Input pArena The arena. ++@Input pBT The boundary tag to free. ++@Return IMG_FALSE failure - span was still in use ++ IMG_TRUE success - span was removed and returned ++*/ /**************************************************************************/ ++static INLINE IMG_BOOL ++_RemoveResourceSpan(RA_ARENA *pArena, BT *pBT) ++{ ++ PVR_ASSERT(pArena!=NULL); ++ PVR_ASSERT(pBT!=NULL); ++ ++ if (pBT->free_import && ++ pBT->is_leftmost && ++ pBT->is_rightmost) ++ { ++ _SegmentListRemove(pArena, pBT); ++ pArena->pImportFree(pArena->pImportHandle, pBT->base, pBT->hPriv); ++ OSFreeMem(pBT); ++ ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++/*************************************************************************/ /*! ++@Function _FreeBT ++@Description Free a boundary tag taking care of the segment list and the ++ boundary tag free table. ++@Input pArena The arena. ++@Input pBT The boundary tag to free. ++*/ /**************************************************************************/ ++static void ++_FreeBT(RA_ARENA *pArena, BT *pBT) ++{ ++ BT *pNeighbour; ++ ++ PVR_ASSERT(pArena!=NULL); ++ PVR_ASSERT(pBT!=NULL); ++ PVR_ASSERT(!_IsInFreeList(pArena, pBT)); ++ ++ /* try and coalesce with left neighbour */ ++ pNeighbour = pBT->pPrevSegment; ++ if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free)) ++ { ++ /* Verify list correctness */ ++ PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base); ++ ++ _FreeListRemove(pArena, pNeighbour); ++ _SegmentListRemove(pArena, pNeighbour); ++ pBT->base = pNeighbour->base; ++ ++ pBT->uSize += pNeighbour->uSize; ++ pBT->is_leftmost = pNeighbour->is_leftmost; ++ OSFreeMem(pNeighbour); ++ } ++ ++ /* try to coalesce with right neighbour */ ++ pNeighbour = pBT->pNextSegment; ++ if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free)) ++ { ++ /* Verify list correctness */ ++ PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base); ++ ++ _FreeListRemove(pArena, pNeighbour); ++ _SegmentListRemove(pArena, pNeighbour); ++ pBT->uSize += pNeighbour->uSize; ++ pBT->is_rightmost = pNeighbour->is_rightmost; ++ OSFreeMem(pNeighbour); ++ } ++ ++ if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE) ++ { ++ _FreeListInsert(pArena, pBT); ++ PVR_ASSERT((!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import)); ++ } ++ ++ PVR_ASSERT(is_arena_valid(pArena)); ++} ++ ++ ++/* ++ This function returns the first element in a bucket that can be split ++ in a way that one of the sub-segments can meet the size and alignment ++ criteria. ++ ++ The first_elt is the bucket to look into. Remember that a bucket is ++ implemented as a pointer to the first element of the linked list. ++ ++ nb_max_try is used to limit the number of elements considered. ++ This is used to only consider the first nb_max_try elements in the ++ free-list. The special value ~0 is used to say unlimited i.e. consider ++ all elements in the free list ++ */ ++static INLINE ++struct _BT_ *find_chunk_in_bucket(struct _BT_ * first_elt, ++ RA_LENGTH_T uSize, ++ RA_LENGTH_T uAlignment, ++ unsigned int nb_max_try) ++{ ++ struct _BT_ *walker; ++ ++ for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free) ++ { ++ const RA_BASE_T aligned_base = (uAlignment > 1) ? ++ (walker->base + uAlignment - 1) & ~(uAlignment - 1) ++ : walker->base; ++ ++ if (walker->base + walker->uSize >= aligned_base + uSize) ++ { ++ return walker; ++ } ++ ++ /* 0xFFFF...FFFF is used has nb_max_try = infinity. */ ++ if (nb_max_try != (unsigned int) ~0) ++ { ++ nb_max_try--; ++ } ++ } ++ ++ return NULL; ++} ++ ++/*************************************************************************/ /*! ++@Function _AllocAlignSplit ++@Description Given a valid BT, trim the start and end of the BT according ++ to alignment and size requirements. Also add the resulting ++ BT to the live hash table. ++@Input pArena The arena. ++@Input pBT The BT to trim and add to live hash table ++@Input uSize The requested allocation size. ++@Input uAlignment The alignment requirements of the allocation ++ Required uAlignment, or 0. ++ Must be a power of 2 if not 0 ++@Output pBase Allocated, corrected, resource base ++ (non-optional, must not be NULL) ++@Output phPriv The user references associated with ++ the imported segment. (optional) ++@Return IMG_FALSE failure ++ IMG_TRUE success ++*/ /**************************************************************************/ ++static IMG_BOOL ++_AllocAlignSplit(RA_ARENA *pArena, ++ BT *pBT, ++ RA_LENGTH_T uSize, ++ RA_LENGTH_T uAlignment, ++ RA_BASE_T *pBase, ++ RA_PERISPAN_HANDLE *phPriv) ++{ ++ RA_BASE_T aligned_base; ++ ++ aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base; ++ ++ _FreeListRemove(pArena, pBT); ++ ++ if ((pArena->ui32PolicyFlags & RA_POLICY_NO_SPLIT_MASK) == RA_POLICY_NO_SPLIT) ++ { ++ goto nosplit; ++ } ++ ++ /* with uAlignment we might need to discard the front of this segment */ ++ if (aligned_base > pBT->base) ++ { ++ BT *pNeighbour; ++ pNeighbour = _SegmentSplit(pBT, (RA_LENGTH_T)(aligned_base - pBT->base)); ++ /* partition the buffer, create a new boundary tag */ ++ if (pNeighbour == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Front split failed", __func__)); ++ /* Put pBT back in the list */ ++ _FreeListInsert(pArena, pBT); ++ return IMG_FALSE; ++ } ++ ++ _FreeListInsert(pArena, pBT); ++ pBT = pNeighbour; ++ } ++ ++ /* the segment might be too big, if so, discard the back of the segment */ ++ if (pBT->uSize > uSize) ++ { ++ BT *pNeighbour; ++ pNeighbour = _SegmentSplit(pBT, uSize); ++ /* partition the buffer, create a new boundary tag */ ++ if (pNeighbour == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Back split failed", __func__)); ++ /* Put pBT back in the list */ ++ _FreeListInsert(pArena, pBT); ++ return IMG_FALSE; ++ } ++ ++ _FreeListInsert(pArena, pNeighbour); ++ } ++nosplit: ++ pBT->type = btt_live; ++ ++ if (!HASH_Insert_Extended(pArena->pSegmentHash, &aligned_base, (uintptr_t)pBT)) ++ { ++ _FreeBT(pArena, pBT); ++ return IMG_FALSE; ++ } ++ ++ if (phPriv != NULL) ++ *phPriv = pBT->hPriv; ++ ++ *pBase = aligned_base; ++ ++ return IMG_TRUE; ++} ++ ++/*************************************************************************/ /*! ++@Function _AttemptAllocAligned ++@Description Attempt an allocation from an arena. ++@Input pArena The arena. ++@Input uSize The requested allocation size. ++@Input uFlags Allocation flags ++@Output phPriv The user references associated with ++ the imported segment. (optional) ++@Input uAlignment Required uAlignment, or 0. ++ Must be a power of 2 if not 0 ++@Output base Allocated resource base (non-optional, must not ++ be NULL) ++@Return IMG_FALSE failure ++ IMG_TRUE success ++*/ /**************************************************************************/ ++static IMG_BOOL ++_AttemptAllocAligned(RA_ARENA *pArena, ++ RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags, ++ RA_LENGTH_T uAlignment, ++ RA_BASE_T *base, ++ RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */ ++{ ++ ++ IMG_UINT32 index_low; ++ IMG_UINT32 index_high; ++ IMG_UINT32 i; ++ struct _BT_ *pBT = NULL; ++ ++ PVR_ASSERT(pArena!=NULL); ++ PVR_ASSERT(base != NULL); ++ ++ pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets); ++ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->uiFlags != uFlags)) ++ { ++ /* no chunks with these flags. */ ++ return IMG_FALSE; ++ } ++ ++ index_low = pvr_log2(uSize); ++ if (uAlignment) ++ { ++ index_high = pvr_log2(uSize + uAlignment - 1); ++ } ++ else ++ { ++ index_high = index_low; ++ } ++ ++ PVR_ASSERT(index_low < FREE_TABLE_LIMIT); ++ PVR_ASSERT(index_high < FREE_TABLE_LIMIT); ++ PVR_ASSERT(index_low <= index_high); ++ ++ if (unlikely((pArena->ui32PolicyFlags & RA_POLICY_BUCKET_MASK) == RA_POLICY_BUCKET_BEST_FIT)) ++ { ++ /* This policy ensures the selection of the first lowest size bucket that ++ * satisfies the request size is selected */ ++#if defined(PVR_CTZLL) ++ i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_low )) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); ++#else ++ i = index_low; ++#endif ++ for ( ; (i < FREE_TABLE_LIMIT) && (pBT == NULL); ++i) ++ { ++ if (pArena->per_flags_buckets->buckets[i]) ++ { ++ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); ++ } ++ } ++ } ++ else ++ { ++#if defined(PVR_CTZLL) ++ i = PVR_CTZLL((~(((IMG_ELTS_MAPPINGS)1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping); ++#else ++ for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i) ++ { ++ } ++#endif ++ PVR_ASSERT(i <= FREE_TABLE_LIMIT); ++ ++ if (i != FREE_TABLE_LIMIT) ++ { ++ /* since we start at index_high + 1, we are guaranteed to exit */ ++ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1); ++ } ++ else ++ { ++ for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i) ++ { ++ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0); ++ } ++ } ++ } ++ ++ if (pBT == NULL) ++ { ++ return IMG_FALSE; ++ } ++ ++ return _AllocAlignSplit(pArena, pBT, uSize, uAlignment, base, phPriv); ++} ++ ++/*************************************************************************/ /*! ++@Function _AttemptImportSpanAlloc ++@Description Attempt to Import more memory and create a new span. ++ Function attempts to import more memory from the callback ++ provided at RA creation time, if successful the memory ++ will form a new span in the RA. ++@Input pArena The arena. ++@Input uRequestSize The requested allocation size. ++@Input uImportMultiplier Import x-times more for future requests if ++ we have to import new memory. ++@Input uImportFlags Flags influencing allocation policy. ++@Input uAlignment The alignment requirements of the allocation ++ Required uAlignment, or 0. ++ Must be a power of 2 if not 0 ++@Input pszAnnotation String to describe the allocation ++@Output pImportBase Allocated import base ++ (non-optional, must not be NULL) ++@Output pImportSize Allocated import size ++@Output pImportBT Allocated import BT ++@Return PVRSRV_OK - success ++*/ /**************************************************************************/ ++static PVRSRV_ERROR ++_AttemptImportSpanAlloc(RA_ARENA *pArena, ++ RA_LENGTH_T uRequestSize, ++ IMG_UINT8 uImportMultiplier, ++ RA_FLAGS_T uImportFlags, ++ RA_LENGTH_T uAlignment, ++ const IMG_CHAR *pszAnnotation, ++ RA_BASE_T *pImportBase, ++ RA_LENGTH_T *pImportSize, ++ BT **pImportBT) ++{ ++ IMG_HANDLE hPriv; ++ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); ++ BT *pBT; ++ PVRSRV_ERROR eError; ++ ++ *pImportSize = uRequestSize; ++ /* ++ Ensure that we allocate sufficient space to meet the uAlignment ++ constraint ++ */ ++ if (uAlignment > pArena->uQuantum) ++ { ++ *pImportSize += (uAlignment - pArena->uQuantum); ++ } ++ ++ /* apply over-allocation multiplier after all alignment adjustments */ ++ *pImportSize *= uImportMultiplier; ++ ++ /* ensure that we import according to the quanta of this arena */ ++ *pImportSize = (*pImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1); ++ ++ eError = pArena->pImportAlloc(pArena->pImportHandle, ++ *pImportSize, uImportFlags, ++ pszAnnotation, ++ pImportBase, pImportSize, ++ &hPriv); ++ if (PVRSRV_OK != eError) ++ { ++ return eError; ++ } ++ ++ /* If we successfully import more resource, create a span to ++ * represent it else free the resource we imported. ++ */ ++ pBT = _InsertResourceSpan(pArena, *pImportBase, *pImportSize, uFlags); ++ if (pBT == NULL) ++ { ++ /* insufficient resources to insert the newly acquired span, ++ so free it back again */ ++ pArena->pImportFree(pArena->pImportHandle, *pImportBase, hPriv); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " ++ "size=0x%llx failed!", __func__, pArena->name, ++ (unsigned long long)uRequestSize)); ++ /* RA_Dump (arena); */ ++ ++ return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED; ++ } ++ ++ pBT->hPriv = hPriv; ++ *pImportBT = pBT; ++ ++ return eError; ++} ++ ++IMG_INTERNAL RA_ARENA * ++RA_Create(IMG_CHAR *name, ++ RA_LOG2QUANTUM_T uLog2Quantum, ++ IMG_UINT32 ui32LockClass, ++ PFN_RA_ALLOC imp_alloc, ++ PFN_RA_FREE imp_free, ++ RA_PERARENA_HANDLE arena_handle, ++ IMG_UINT32 ui32PolicyFlags) ++{ ++ RA_ARENA *pArena; ++ PVRSRV_ERROR eError; ++ ++ if (name == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter 'name' (NULL not accepted)", __func__)); ++ return NULL; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s'", __func__, name)); ++ ++ pArena = OSAllocMem(sizeof(*pArena)); ++ if (pArena == NULL) ++ { ++ goto arena_fail; ++ } ++ ++ eError = OSLockCreate(&pArena->hLock); ++ if (eError != PVRSRV_OK) ++ { ++ goto lock_fail; ++ } ++ ++ pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default); ++ ++ if (pArena->pSegmentHash==NULL) ++ { ++ goto hash_fail; ++ } ++ ++ OSStringLCopy(pArena->name, name, RA_MAX_NAME_LENGTH); ++ pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail; ++ pArena->pImportFree = imp_free; ++ pArena->pImportHandle = arena_handle; ++ pArena->pHeadSegment = NULL; ++ pArena->uQuantum = 1ULL << uLog2Quantum; ++ pArena->per_flags_buckets = NULL; ++ pArena->ui32LockClass = ui32LockClass; ++ pArena->ui32PolicyFlags = ui32PolicyFlags; ++ pArena->ui64TotalArenaSize = 0; ++ pArena->ui64FreeArenaSize = 0; ++ ++ PVR_ASSERT(is_arena_valid(pArena)); ++ return pArena; ++ ++hash_fail: ++ OSLockDestroy(pArena->hLock); ++lock_fail: ++ OSFreeMem(pArena); ++ /* not nulling pointer, out of scope */ ++arena_fail: ++ return NULL; ++} ++ ++static void _LogRegionCreation(const char *pszMemType, ++ IMG_UINT64 ui64CpuPA, ++ IMG_UINT64 ui64DevPA, ++ IMG_UINT64 ui64Size) ++{ ++#if !defined(DEBUG) ++ PVR_UNREFERENCED_PARAMETER(pszMemType); ++ PVR_UNREFERENCED_PARAMETER(ui64CpuPA); ++ PVR_UNREFERENCED_PARAMETER(ui64DevPA); ++ PVR_UNREFERENCED_PARAMETER(ui64Size); ++#else ++ if ((ui64CpuPA != 0) && (ui64DevPA != 0) && (ui64CpuPA != ui64DevPA)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Creating RA for \"%s\" memory" ++ " - Cpu PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx ++ " - Dev PA 0x%016" IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx, ++ pszMemType, ++ ui64CpuPA, ui64CpuPA + ui64Size, ++ ui64DevPA, ui64DevPA + ui64Size)); ++ } ++ else ++ { ++ __maybe_unused IMG_UINT64 ui64PA = ++ ui64CpuPA != 0 ? ui64CpuPA : ui64DevPA; ++ __maybe_unused const IMG_CHAR *pszAddrType = ++ ui64CpuPA == ui64DevPA ? "Cpu/Dev" : (ui64CpuPA != 0 ? "Cpu" : "Dev"); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Creating RA for \"%s\" memory - %s PA 0x%016" ++ IMG_UINT64_FMTSPECx "-0x%016" IMG_UINT64_FMTSPECx, ++ pszMemType, pszAddrType, ++ ui64PA, ui64PA + ui64Size)); ++ } ++#endif ++} ++ ++IMG_INTERNAL RA_ARENA * ++RA_Create_With_Span(IMG_CHAR *name, ++ RA_LOG2QUANTUM_T uLog2Quantum, ++ IMG_UINT64 ui64CpuBase, ++ IMG_UINT64 ui64SpanDevBase, ++ IMG_UINT64 ui64SpanSize) ++{ ++ RA_ARENA *psRA; ++ IMG_BOOL bSuccess; ++ ++ psRA = RA_Create(name, ++ uLog2Quantum, /* Use OS page size, keeps things simple */ ++ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */ ++ NULL, /* No Import */ ++ NULL, /* No free import */ ++ NULL, /* No import handle */ ++ RA_POLICY_DEFAULT); /* No restriction on import splitting */ ++ PVR_LOG_GOTO_IF_FALSE(psRA != NULL, "RA_Create() failed", return_); ++ ++ bSuccess = RA_Add(psRA, (RA_BASE_T) ui64SpanDevBase, (RA_LENGTH_T) ui64SpanSize, 0, NULL); ++ PVR_LOG_GOTO_IF_FALSE(bSuccess, "RA_Add() failed", cleanup_); ++ ++ _LogRegionCreation(name, ui64CpuBase, ui64SpanDevBase, ui64SpanSize); ++ ++ return psRA; ++ ++cleanup_: ++ RA_Delete(psRA); ++return_: ++ return NULL; ++} ++ ++IMG_INTERNAL void ++RA_Delete(RA_ARENA *pArena) ++{ ++ IMG_UINT32 uIndex; ++ IMG_BOOL bWarn = IMG_TRUE; ++ ++ PVR_ASSERT(pArena != NULL); ++ ++ if (pArena == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); ++ return; ++ } ++ ++ PVR_ASSERT(is_arena_valid(pArena)); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: name='%s'", __func__, pArena->name)); ++ ++ while (pArena->pHeadSegment != NULL) ++ { ++ BT *pBT = pArena->pHeadSegment; ++ ++ if (pBT->type != btt_free) ++ { ++ if (bWarn) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__, ++ (unsigned long long)pBT->base, (unsigned long long)pBT->uSize)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__)); ++ bWarn = IMG_FALSE; ++ } ++ } ++ else ++ { ++ _FreeListRemove(pArena, pBT); ++ } ++ ++ _SegmentListRemove(pArena, pBT); ++ OSFreeMem(pBT); ++ /* not nulling original pointer, it has changed */ ++ } ++ ++ while (pArena->per_flags_buckets != NULL) ++ { ++ for (uIndex=0; uIndexper_flags_buckets->buckets[uIndex] == NULL); ++ } ++ ++ pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->uiFlags, pArena->per_flags_buckets); ++ } ++ ++ HASH_Delete(pArena->pSegmentHash); ++ OSLockDestroy(pArena->hLock); ++ OSFreeMem(pArena); ++ /* not nulling pointer, copy on stack */ ++} ++ ++IMG_INTERNAL IMG_BOOL ++RA_Add(RA_ARENA *pArena, ++ RA_BASE_T base, ++ RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags, ++ RA_PERISPAN_HANDLE hPriv) ++{ ++ struct _BT_* bt; ++ PVR_ASSERT(pArena != NULL); ++ PVR_ASSERT(uSize != 0); ++ ++ if (pArena == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); ++ return IMG_FALSE; ++ } ++ ++ if (uSize == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid size 0 added to arena %s", __func__, pArena->name)); ++ return IMG_FALSE; ++ } ++ ++ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); ++ PVR_ASSERT(is_arena_valid(pArena)); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', " ++ "base=0x%llx, size=0x%llx", __func__, pArena->name, ++ (unsigned long long)base, (unsigned long long)uSize)); ++ ++ uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1); ++ bt = _InsertResource(pArena, base, uSize, uFlags); ++ if (bt != NULL) ++ { ++ bt->hPriv = hPriv; ++ } ++ ++ PVR_ASSERT(is_arena_valid(pArena)); ++ ++ pArena->ui64TotalArenaSize += uSize; ++ pArena->ui64FreeArenaSize += uSize; ++ OSLockRelease(pArena->hLock); ++ ++ return bt != NULL; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++RA_Alloc(RA_ARENA *pArena, ++ RA_LENGTH_T uRequestSize, ++ IMG_UINT8 uImportMultiplier, ++ RA_FLAGS_T uImportFlags, ++ RA_LENGTH_T uAlignment, ++ const IMG_CHAR *pszAnnotation, ++ RA_BASE_T *base, ++ RA_LENGTH_T *pActualSize, ++ RA_PERISPAN_HANDLE *phPriv) ++{ ++ PVRSRV_ERROR eError; ++ IMG_BOOL bResult; ++ RA_LENGTH_T uSize = uRequestSize; ++ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK); ++ ++ if (pArena == NULL || uImportMultiplier == 0 || uSize == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: One of the necessary parameters is 0", __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); ++ PVR_ASSERT(is_arena_valid(pArena)); ++ ++ if (pActualSize != NULL) ++ { ++ *pActualSize = uSize; ++ } ++ ++ /* Must be a power of 2 or 0 */ ++ PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: arena='%s', size=0x%llx(0x%llx), " ++ "alignment=0x%llx", __func__, pArena->name, ++ (unsigned long long)uSize, (unsigned long long)uRequestSize, ++ (unsigned long long)uAlignment)); ++ ++ /* if allocation failed then we might have an import source which ++ can provide more resource, else we will have to fail the ++ allocation to the caller. */ ++ bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); ++ if (!bResult) ++ { ++ RA_BASE_T uImportBase; ++ RA_LENGTH_T uImportSize; ++ BT *pBT = NULL; ++ ++ eError = _AttemptImportSpanAlloc(pArena, ++ uSize, ++ uImportMultiplier, ++ uFlags, ++ uAlignment, ++ pszAnnotation, ++ &uImportBase, ++ &uImportSize, ++ &pBT); ++ if (eError != PVRSRV_OK) ++ { ++ OSLockRelease(pArena->hLock); ++ return eError; ++ } ++ ++ bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv); ++ if (!bResult) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: name='%s' second alloc failed!", ++ __func__, pArena->name)); ++ ++ /* ++ On failure of _AttemptAllocAligned() depending on the exact point ++ of failure, the imported segment may have been used and freed, or ++ left untouched. If the later, we need to return it. ++ */ ++ _FreeBT(pArena, pBT); ++ ++ OSLockRelease(pArena->hLock); ++ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED; ++ } ++ else ++ { ++ /* Check if the new allocation was in the span we just added... */ ++ if (*base < uImportBase || *base > (uImportBase + uImportSize)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: name='%s' alloc did not occur in the imported span!", ++ __func__, pArena->name)); ++ ++ /* ++ Remove the imported span which should not be in use (if it is then ++ that is okay, but essentially no span should exist that is not used). ++ */ ++ _FreeBT(pArena, pBT); ++ } ++ else ++ { ++ pArena->ui64FreeArenaSize += uImportSize; ++ pArena->ui64TotalArenaSize += uImportSize; ++ } ++ } ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', size=0x%llx, " ++ "*base=0x%llx = %d", __func__, pArena->name, (unsigned long long)uSize, ++ (unsigned long long)*base, bResult)); ++ ++ PVR_ASSERT(is_arena_valid(pArena)); ++ ++ pArena->ui64FreeArenaSize -= uSize; ++ ++ OSLockRelease(pArena->hLock); ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function RA_Find_BT_VARange ++@Description To find the boundary tag associated with the given device ++ virtual address. ++@Input pArena The arena ++@input base Allocated base resource ++@Input uRequestSize The size of resource segment requested. ++@Input uImportFlags Flags influencing allocation policy. ++@Return Boundary Tag - success, NULL on failure ++*/ /**************************************************************************/ ++static BT *RA_Find_BT_VARange(RA_ARENA *pArena, ++ RA_BASE_T base, ++ RA_LENGTH_T uRequestSize, ++ RA_FLAGS_T uImportFlags) ++{ ++ IMG_PSPLAY_TREE psSplaynode; ++ BT *pBT = pArena->pHeadSegment; ++ IMG_UINT32 uIndex; ++ ++ uIndex = pvr_log2 (uRequestSize); ++ ++ /* Find the splay node associated with these import flags */ ++ psSplaynode = PVRSRVFindNode(uImportFlags, pArena->per_flags_buckets); ++ ++ if (psSplaynode == NULL) ++ { ++ return NULL; ++ } ++ ++ /* Find the free Boundary Tag from the bucket that holds the requested range */ ++ while (uIndex < FREE_TABLE_LIMIT) ++ { ++ pBT = psSplaynode->buckets[uIndex]; ++ ++ while (pBT) ++ { ++ if ((pBT->base <= base) && ((pBT->base + pBT->uSize) >= (base + uRequestSize))) ++ { ++ if (pBT->type == btt_free) ++ { ++ return pBT; ++ } ++ else ++ { ++ PVR_ASSERT(pBT->type == btt_free); ++ } ++ } ++ else{ ++ pBT = pBT->next_free; ++ } ++ } ++ ++#if defined(PVR_CTZLL) ++ /* This could further be optimised to get the next valid bucket */ ++ while (!(psSplaynode->bHasEltsMapping & (1ULL << ++uIndex))); ++#else ++ uIndex++; ++#endif ++ } ++ ++ return NULL; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++RA_Alloc_Range(RA_ARENA *pArena, ++ RA_LENGTH_T uRequestSize, ++ RA_FLAGS_T uImportFlags, ++ RA_LENGTH_T uAlignment, ++ RA_BASE_T base, ++ RA_LENGTH_T *pActualSize) ++{ ++ RA_LENGTH_T uSize = uRequestSize; ++ BT *pBT = NULL; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (pArena == NULL || uSize == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: One of the necessary parameters is 0", __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); ++ PVR_ASSERT(is_arena_valid(pArena)); ++ ++ /* Align the requested size to the Arena Quantum */ ++ uSize = ((uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1)); ++ ++ /* Must be a power of 2 or 0 */ ++ PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0); ++ ++ if (uAlignment > 1) ++ { ++ if (base != ((base + uAlignment - 1) & ~(uAlignment - 1))) ++ { ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, unlock_); ++ } ++ } ++ ++ /* Find if the segment in the range exists and is free ++ * Check if the segment can be split ++ * Find the bucket that points to this segment ++ * Find the free segment is in the free list ++ * remove the free segment ++ * split the segment into three segments one prior free, alloc range, ++ * free segment after the range. ++ * remove the allocated range segment from the free list ++ * hook up the prior and after segments back to free list ++ * For each free, find the bucket the segment should go to ++ */ ++ ++ pBT = RA_Find_BT_VARange(pArena, base, uSize, uImportFlags); ++ ++ if (pBT == NULL) ++ { ++ PVR_GOTO_WITH_ERROR(eError, ++ PVRSRV_ERROR_RA_REQUEST_VIRT_ADDR_FAIL, ++ unlock_); ++ } ++ ++ /* Remove the boundary tag from the free list */ ++ _FreeListRemove (pArena, pBT); ++ ++ /* if requested VA start in the middle of the BT, split the BT accordingly */ ++ if (base > pBT->base) ++ { ++ BT *pNeighbour; ++ pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(base - pBT->base)); ++ /* partition the buffer, create a new boundary tag */ ++ if (pNeighbour == NULL) ++ { ++ /* Put pBT back in the list */ ++ _FreeListInsert (pArena, pBT); ++ PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (1)", eError, ++ PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, ++ unlock_); ++ } ++ ++ /* Insert back the free BT to the free list */ ++ _FreeListInsert(pArena, pBT); ++ pBT = pNeighbour; ++ } ++ ++ /* the segment might be too big, if so, discard the back of the segment */ ++ if (pBT->uSize > uSize) ++ { ++ BT *pNeighbour; ++ pNeighbour = _SegmentSplit(pBT, uSize); ++ /* partition the buffer, create a new boundary tag */ ++ if (pNeighbour == NULL) ++ { ++ /* Put pBT back in the list */ ++ _FreeListInsert (pArena, pBT); ++ PVR_LOG_GOTO_WITH_ERROR("_SegmentSplit (2)", eError, ++ PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, ++ unlock_); ++ } ++ ++ /* Insert back the free BT to the free list */ ++ _FreeListInsert (pArena, pNeighbour); ++ } ++ ++ pBT->type = btt_live; ++ ++ if (!HASH_Insert_Extended (pArena->pSegmentHash, &base, (uintptr_t)pBT)) ++ { ++ _FreeBT (pArena, pBT); ++ PVR_GOTO_WITH_ERROR(eError, ++ PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED, ++ unlock_); ++ } ++ ++ if (pActualSize != NULL) ++ { ++ *pActualSize = uSize; ++ } ++ ++ pArena->ui64FreeArenaSize -= uSize; ++ ++unlock_: ++ OSLockRelease(pArena->hLock); ++ ++ return eError; ++} ++ ++IMG_INTERNAL void ++RA_Free(RA_ARENA *pArena, RA_BASE_T base) ++{ ++ BT *pBT; ++ ++ PVR_ASSERT(pArena != NULL); ++ ++ if (pArena == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid parameter - pArena", __func__)); ++ return; ++ } ++ ++ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); ++ PVR_ASSERT(is_arena_valid(pArena)); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: name='%s', base=0x%llx", __func__, pArena->name, ++ (unsigned long long)base)); ++ ++ pBT = (BT *) HASH_Remove_Extended(pArena->pSegmentHash, &base); ++ PVR_ASSERT(pBT != NULL); ++ ++ if (pBT) ++ { ++ pArena->ui64FreeArenaSize += pBT->uSize; ++ ++ PVR_ASSERT(pBT->base == base); ++ _FreeBT(pArena, pBT); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: no resource span found for given base (0x%llX) in arena %s", ++ __func__, (unsigned long long) base, pArena->name)); ++ } ++ ++ PVR_ASSERT(is_arena_valid(pArena)); ++ OSLockRelease(pArena->hLock); ++} ++ ++IMG_INTERNAL void ++RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats) ++{ ++ psRAStats->ui64TotalArenaSize = pArena->ui64TotalArenaSize; ++ psRAStats->ui64FreeArenaSize = pArena->ui64FreeArenaSize; ++} ++ ++/* #define _DBG(...) PVR_LOG((__VA_ARGS__)) */ ++#define _DBG(...) ++ ++IMG_INTERNAL RA_ARENA_ITERATOR * ++RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments) ++{ ++ RA_ARENA_ITERATOR *pIter = OSAllocMem(sizeof(*pIter)); ++ PVR_LOG_RETURN_IF_FALSE(pIter != NULL, "OSAllocMem", NULL); ++ ++ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass); ++ ++ pIter->pArena = pArena; ++ pIter->bIncludeFreeSegments = bIncludeFreeSegments; ++ ++ RA_IteratorReset(pIter); ++ ++ return pIter; ++} ++ ++IMG_INTERNAL void ++RA_IteratorRelease(RA_ARENA_ITERATOR *pIter) ++{ ++ PVR_ASSERT(pIter != NULL); ++ ++ if (pIter == NULL) ++ { ++ return; ++ } ++ ++ OSLockRelease(pIter->pArena->hLock); ++ ++ OSFreeMem(pIter); ++} ++ ++IMG_INTERNAL void ++RA_IteratorReset(RA_ARENA_ITERATOR *pIter) ++{ ++ BT *pNext; ++ ++ PVR_ASSERT(pIter != NULL); ++ ++ pNext = pIter->pArena->pHeadSegment; ++ ++ /* find next element if we're not including the free ones */ ++ if (!pIter->bIncludeFreeSegments) ++ { ++ while (pNext != NULL && pNext->type != btt_live) ++ { ++ _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " ++ "type=%u", __func__, (void *) pNext->base, pNext->uSize, ++ pNext->type); ++ pNext = pNext->pNextSegment; ++ } ++ } ++ ++ _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " ++ "type=%u", __func__, ++ pNext != NULL ? (void *) pNext->base : NULL, ++ pNext != NULL ? pNext->uSize : 0, ++ pNext != NULL ? pNext->type : 0); ++ ++ /* if bIncludeFreeSegments then pNext here is either a valid pointer to ++ * "live" segment or NULL and if !bIncludeFreeSegments then it's either ++ * a valid pointer to any next segment or NULL */ ++ pIter->pCurrent = pNext; ++} ++ ++IMG_INTERNAL IMG_BOOL ++RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData) ++{ ++ BT *pNext; ++ ++ PVR_ASSERT(pIter != NULL); ++ ++ if (pIter == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "pIter in %s() is NULL", __func__)); ++ return IMG_FALSE; ++ } ++ ++ if (pIter->pCurrent == NULL) ++ { ++ return IMG_FALSE; ++ } ++ ++ pNext = pIter->pCurrent; ++ ++ _DBG("(%s()) current segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " ++ "type=%u", __func__, (void *) pNext->base, pNext->uSize, ++ pNext->type); ++ ++ pData->uiAddr = pIter->pCurrent->base; ++ pData->uiSize = pIter->pCurrent->uSize; ++ pData->bFree = pIter->pCurrent->type == btt_free; ++ ++ /* combine contiguous segments */ ++ while ((pNext = pNext->pNextSegment) != NULL && ++ pNext->type == btt_live && ++ pNext->base == pData->uiAddr + pData->uiSize) ++ { ++ _DBG("(%s()) combining segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " ++ "type=%u", __func__, (void *) pNext->base, pNext->uSize, ++ pNext->type); ++ pData->uiSize += pNext->uSize; ++ } ++ ++ /* advance to next */ ++ if (!pIter->bIncludeFreeSegments) ++ { ++ while (pNext != NULL && pNext->type != btt_live) ++ { ++ _DBG("(%s()) skipping segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " ++ "type=%u", __func__, (void *) pNext->base, pNext->uSize, ++ pNext->type); ++ pNext = pNext->pNextSegment; ++ } ++ } ++ ++ _DBG("(%s()) next segment=%px, size=0x%" IMG_UINT64_FMTSPECx ", " ++ "type=%u", __func__, ++ pNext != NULL ? (void *) pNext->base : NULL, ++ pNext != NULL ? pNext->uSize : 0, ++ pNext != NULL ? pNext->type : 0); ++ ++ /* if bIncludeFreeSegments then pNext here is either a valid pointer to ++ * "live" segment or NULL and if !bIncludeFreeSegments then it's either ++ * a valid pointer to any next segment or NULL */ ++ pIter->pCurrent = pNext; ++ ++ return IMG_TRUE; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++RA_BlockDump(RA_ARENA *pArena, void (*pfnLogDump)(void*, IMG_CHAR*, ...), void *pPrivData) ++{ ++ RA_ARENA_ITERATOR *pIter = NULL; ++ RA_ITERATOR_DATA sIterData; ++ const IMG_UINT32 uiLineWidth = 64; ++ ++ IMG_UINT32 **papRegionArray = NULL; ++ IMG_UINT32 uiRegionCount = 0; ++ ++ const IMG_UINT32 uiChunkSize = 32; /* 32-bit chunks */ ++ const IMG_UINT32 uiChunkCount = (uiLineWidth / uiChunkSize) * 2; /* This should equal 2 or a multiple of 2 */ ++ const IMG_UINT32 uiRegionSize = uiChunkSize * uiChunkCount; ++ ++ IMG_UINT32 uiRecognisedQuantum = 0; ++ ++ IMG_UINT32 uiLastBase = 0; ++ IMG_UINT32 uiLastSize = 0; ++ ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* -- papRegionArray Structure -- ++ * papRegionArray Indexes ++ * | Chunk 0 Chunk 1 Chunk 2 Chunk 3 ++ * v |------------|------------|------------|------------| ++ * [0] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | -- | ++ * [1] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | ++ * [2] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | ++ * [3] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | Regions ++ * [4] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | ++ * [5] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | | ++ * [6] -> | 0000000000 | 0000000000 | 0000000000 | 0000000000 | -- | ++ * ... ++ */ ++ ++ if (pArena == NULL || pfnLogDump == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ pIter = RA_IteratorAcquire(pArena, IMG_FALSE); ++ PVR_LOG_RETURN_IF_NOMEM(pIter, "RA_IteratorAcquire"); ++ ++ uiRecognisedQuantum = pArena->uQuantum > 0 ? pArena->uQuantum : 4096; ++ ++ while (RA_IteratorNext(pIter, &sIterData)) ++ { ++ if (sIterData.uiAddr >= uiLastBase) ++ { ++ uiLastBase = sIterData.uiAddr; ++ uiLastSize = sIterData.uiSize; ++ } ++ } ++ ++ uiRegionCount = ((uiLastBase + uiLastSize) / uiRecognisedQuantum) / uiRegionSize; ++ if (((uiLastBase + uiLastSize) / uiRecognisedQuantum) % uiRegionSize != 0 ++ || uiRegionCount == 0) ++ { ++ uiRegionCount += 1; ++ } ++ ++ papRegionArray = OSAllocZMem(sizeof(IMG_UINT32*) * uiRegionCount); ++ PVR_LOG_GOTO_IF_NOMEM(papRegionArray, eError, cleanup_array); ++ ++ RA_IteratorReset(pIter); ++ ++ while (RA_IteratorNext(pIter, &sIterData)) ++ { ++ IMG_UINT32 uiAddrRegionIdx = 0; ++ IMG_UINT32 uiAddrRegionOffset = 0; ++ IMG_UINT32 uiAddrChunkIdx = 0; ++ IMG_UINT32 uiAddrChunkOffset = 0; ++ IMG_UINT32 uiAddrChunkShift; /* The bit-shift needed to fill the chunk */ ++ ++ IMG_UINT32 uiQuantisedSize; ++ IMG_UINT32 uiQuantisedSizeMod; ++ IMG_UINT32 uiAllocLastRegionIdx = 0; /* The last region that this alloc appears in */ ++ IMG_UINT32 uiAllocChunkSize = 0; /* The number of chunks this alloc spans */ ++ ++ IMG_INT32 iBitSetCount = 0; ++ IMG_INT32 iOverflowCheck = 0; ++ IMG_INT32 iOverflow = 0; ++ IMG_UINT32 uiRegionIdx = 0; ++ IMG_UINT32 uiChunkIdx = 0; ++ ++#if defined(__KERNEL__) && defined(__linux__) ++ IMG_UINT64 uiDataDivRecQuant = sIterData.uiSize; ++ uiQuantisedSizeMod = do_div(uiDataDivRecQuant, uiRecognisedQuantum); ++ uiQuantisedSize = (IMG_UINT32)uiDataDivRecQuant; ++ ++ uiDataDivRecQuant = sIterData.uiAddr; ++ do_div(uiDataDivRecQuant, uiRecognisedQuantum); ++ uiAddrRegionOffset = do_div(uiDataDivRecQuant, uiRegionSize); ++ uiAddrRegionIdx = (IMG_UINT32)uiDataDivRecQuant; ++ ++ uiDataDivRecQuant = sIterData.uiAddr; ++ do_div(uiDataDivRecQuant, uiRecognisedQuantum); ++#else ++ IMG_UINT64 uiDataDivRecQuant = sIterData.uiAddr / uiRecognisedQuantum; ++ uiAddrRegionIdx = uiDataDivRecQuant / uiRegionSize; ++ uiAddrRegionOffset = uiDataDivRecQuant % uiRegionSize; ++ ++ uiQuantisedSize = sIterData.uiSize / uiRecognisedQuantum; ++ uiQuantisedSizeMod = sIterData.uiSize % uiRecognisedQuantum; ++#endif ++ uiAddrChunkIdx = uiAddrRegionOffset / uiChunkSize; ++ uiAddrChunkOffset = uiAddrRegionOffset % uiChunkSize; ++ uiAddrChunkShift = uiChunkSize - uiAddrChunkOffset; ++ uiRegionIdx = uiAddrRegionIdx; ++ uiChunkIdx = uiAddrChunkIdx; ++ ++ if ((uiQuantisedSize == 0) || (uiQuantisedSizeMod != 0)) ++ { ++ uiQuantisedSize += 1; ++ } ++ ++#if defined(__KERNEL__) && defined(__linux__) ++ uiDataDivRecQuant += uiQuantisedSize - 1; ++ do_div(uiDataDivRecQuant, uiRegionSize); ++ uiAllocLastRegionIdx = (IMG_UINT32)uiDataDivRecQuant; ++#else ++ uiAllocLastRegionIdx = ++ (uiDataDivRecQuant + uiQuantisedSize - 1) / uiRegionSize; ++#endif ++ uiAllocChunkSize = (uiAddrChunkOffset + uiQuantisedSize) / uiChunkSize; ++ ++ if ((uiAddrChunkOffset + uiQuantisedSize) % uiChunkSize > 0) ++ { ++ uiAllocChunkSize += 1; ++ } ++ ++ iBitSetCount = uiQuantisedSize; ++ iOverflowCheck = uiQuantisedSize - uiAddrChunkShift; ++ ++ if (iOverflowCheck > 0) ++ { ++ iOverflow = iOverflowCheck; ++ iBitSetCount = uiQuantisedSize - iOverflow; ++ } ++ ++ /** ++ * Allocate memory to represent the chunks for each region the allocation ++ * spans. If one was already allocated before don't do it again. ++ */ ++ for (i = 0; uiAddrRegionIdx + i <= uiAllocLastRegionIdx; i++) ++ { ++ if (papRegionArray[uiAddrRegionIdx + i] == NULL) ++ { ++ papRegionArray[uiAddrRegionIdx + i] = OSAllocZMem(sizeof(IMG_UINT32) * uiChunkCount); ++ PVR_LOG_GOTO_IF_NOMEM(papRegionArray[uiAddrRegionIdx + i], eError, cleanup_regions); ++ } ++ } ++ ++ for (i = 0; i < uiAllocChunkSize; i++) ++ { ++ if (uiChunkIdx >= uiChunkCount) ++ { ++ uiRegionIdx++; ++ uiChunkIdx = 0; ++ } ++ ++ if ((IMG_UINT32)iBitSetCount != uiChunkSize) ++ { ++ IMG_UINT32 uiBitMask = 0; ++ ++ uiBitMask = (1U << iBitSetCount) - 1; ++ uiBitMask <<= (uiAddrChunkShift - iBitSetCount); ++ ++ papRegionArray[uiRegionIdx][uiChunkIdx] |= uiBitMask; ++ } ++ else ++ { ++ papRegionArray[uiRegionIdx][uiChunkIdx] |= 0xFFFFFFFF; ++ } ++ ++ uiChunkIdx++; ++ iOverflow -= uiChunkSize; ++ iBitSetCount = iOverflow >= 0 ? uiChunkSize : uiChunkSize + iOverflow; ++ if (iOverflow < 0) ++ { ++ uiAddrChunkShift = 32; ++ } ++ } ++ } ++ ++ RA_IteratorRelease(pIter); ++ ++ pfnLogDump(pPrivData, "~~~ '%s' Resource Arena Block Dump", pArena->name); ++ pfnLogDump(pPrivData, " Block Size: %uB", uiRecognisedQuantum); ++ pfnLogDump(pPrivData, ++ " Span Memory Usage: %"IMG_UINT64_FMTSPEC"B" ++ " Free Span Memory: %"IMG_UINT64_FMTSPEC"B", ++ pArena->ui64TotalArenaSize, ++ pArena->ui64FreeArenaSize); ++ pfnLogDump(pPrivData, ++ "==============================================================================="); ++ ++ for (i = 0; i < uiRegionCount; i++) ++ { ++ static IMG_BOOL bEmptyRegion = IMG_FALSE; ++ if (papRegionArray[i] != NULL) ++ { ++ IMG_CHAR pszLine[65]; ++ IMG_UINT32 j; ++ ++ bEmptyRegion = IMG_FALSE; ++ pszLine[64] = '\0'; ++ ++ for (j = 0; j < uiChunkCount; j+=2) ++ { ++ IMG_UINT8 uiBit = 0; ++ IMG_UINT32 k; ++ IMG_UINT64 uiLineAddress = ++ (i * uiRegionSize + (j >> 1) * uiLineWidth) * uiRecognisedQuantum; ++ ++ /** ++ * Move through each of the 32 bits in the chunk and check their ++ * value. If it is 1 we set the corresponding character to '#', ++ * otherwise it is set to '.' representing empty space ++ */ ++ for (k = 1 << 31; k != 0; k >>= 1) ++ { ++ pszLine[uiBit] = papRegionArray[i][j] & k ? '#' : '.'; ++ pszLine[32 + uiBit] = papRegionArray[i][j+1] & k ? '#' : '.'; ++ uiBit++; ++ } ++ ++ pfnLogDump(pPrivData, ++ "| 0x%08"IMG_UINT64_FMTSPECx" | %s", ++ uiLineAddress, ++ pszLine); ++ } ++ OSFreeMem(papRegionArray[i]); ++ } ++ else ++ { ++ /* We only print this once per gap of n regions */ ++ if (!bEmptyRegion) ++ { ++ pfnLogDump(pPrivData, " ...."); ++ bEmptyRegion = IMG_TRUE; ++ } ++ } ++ } ++ OSFreeMem(papRegionArray); ++ return eError; ++ ++cleanup_regions: ++ for (i = 0; i < uiRegionCount; i++) ++ { ++ if (papRegionArray[i] != NULL) ++ { ++ OSFreeMem(papRegionArray[i]); ++ } ++ } ++ ++cleanup_array: ++ OSFreeMem(papRegionArray); ++ RA_IteratorRelease(pIter); ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/ra.h b/drivers/gpu/drm/img-rogue/ra.h +new file mode 100644 +index 000000000000..d306af7eda9f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/ra.h +@@ -0,0 +1,386 @@ ++/*************************************************************************/ /*! ++@File ++@Title Resource Allocator API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RA_H ++#define RA_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++#define RA_MAX_NAME_LENGTH 20 ++ ++/** Resource arena. ++ * struct _RA_ARENA_ deliberately opaque ++ */ ++typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313 ++ ++/** Resource arena's iterator. ++ * struct _RA_ARENA_ITERATOR_ deliberately opaque ++ */ ++typedef struct _RA_ARENA_ITERATOR_ RA_ARENA_ITERATOR; ++ ++typedef struct _RA_ITERATOR_DATA_ { ++ IMG_UINT64 uiAddr; ++ IMG_UINT64 uiSize; ++ IMG_BOOL bFree; ++} RA_ITERATOR_DATA; ++ ++/** Resource arena usage statistics. ++ * struct _RA_USAGE_STATS ++ */ ++typedef struct _RA_USAGE_STATS { ++ IMG_UINT64 ui64TotalArenaSize; ++ IMG_UINT64 ui64FreeArenaSize; ++}RA_USAGE_STATS, *PRA_USAGE_STATS; ++ ++/* ++ * Per-Arena handle - this is private data for the caller of the RA. ++ * The RA knows nothing about this data. It is given it in RA_Create, and ++ * promises to pass it to calls to the ImportAlloc and ImportFree callbacks ++ */ ++typedef IMG_HANDLE RA_PERARENA_HANDLE; ++/* ++ * Per-Import handle - this is private data for the caller of the RA. ++ * The RA knows nothing about this data. It is given it on a per-import basis, ++ * basis, either the "initial" import at RA_Create time, or further imports ++ * via the ImportAlloc callback. It sends it back via the ImportFree callback, ++ * and also provides it in answer to any RA_Alloc request to signify from ++ * which "import" the allocation came. ++ */ ++typedef IMG_HANDLE RA_PERISPAN_HANDLE; ++ ++typedef IMG_UINT64 RA_BASE_T; ++typedef IMG_UINT32 RA_LOG2QUANTUM_T; ++typedef IMG_UINT64 RA_LENGTH_T; ++ ++/* Lock classes: describes the level of nesting between different arenas. */ ++#define RA_LOCKCLASS_0 0 ++#define RA_LOCKCLASS_1 1 ++#define RA_LOCKCLASS_2 2 ++ ++#define RA_NO_IMPORT_MULTIPLIER 1 ++ ++/* ++ * Allocation Policies that govern the resource areas. ++ * */ ++ ++/* --- Resource allocation policy definitions --- ++* | 31.........4|......3....|........2.............|1...................0| ++* | Reserved | No split | Area bucket selection| Alloc node selection| ++*/ ++ ++/* ++ * Fast allocation policy allows to pick the first node ++ * that satisfies the request. ++ * It is the default policy for all arenas. ++ * */ ++#define RA_POLICY_ALLOC_FAST (0U) ++/* ++ * Optimal allocation policy allows to pick the lowest size node ++ * that satisfies the request. This picking policy helps in reducing the fragmentation. ++ * This minimises the necessity to split the nodes more often as the optimal ++ * ones are picked. ++ * As a result any future higher size allocation requests are likely to succeed ++ */ ++#define RA_POLICY_ALLOC_OPTIMAL (1U) ++#define RA_POLICY_ALLOC_NODE_SELECT_MASK (3U) ++ ++/* ++ * Bucket selection policies ++ * */ ++/* Assured bucket policy makes sure the selected bucket is guaranteed ++ * to satisfy the given request. Generally Nodes picked up from such a ++ * bucket need to be further split. However picking node that belongs to this ++ * bucket is likely to succeed and thus promises better response times */ ++#define RA_POLICY_BUCKET_ASSURED_FIT (0U) ++/* ++ * Best fit bucket policy selects a bucket with free nodes that are likely ++ * to satisfy the request and nodes that are close to the requested size. ++ * Nodes picked up from this bucket may likely to satisfy the request but not ++ * guaranteed. Failing to satisfy the request from this bucket mean further ++ * higher size buckets are selected in the later iterations till the request ++ * is satisfied. ++ * ++ * Hence response times may vary depending on availability of free nodes ++ * that satisfy the request. ++ * */ ++#define RA_POLICY_BUCKET_BEST_FIT (4U) ++#define RA_POLICY_BUCKET_MASK (4U) ++ ++/* This flag ensures the imports will not be split up and Allocations will always get ++ * their own import ++ */ ++#define RA_POLICY_NO_SPLIT (8U) ++#define RA_POLICY_NO_SPLIT_MASK (8U) ++ ++/* ++ * Default Arena Policy ++ * */ ++#define RA_POLICY_DEFAULT (RA_POLICY_ALLOC_FAST | RA_POLICY_BUCKET_ASSURED_FIT) ++ ++/* ++ * Flags in an "import" must match the flags for an allocation ++ */ ++typedef IMG_UINT64 RA_FLAGS_T; ++ ++/*************************************************************************/ /*! ++@Function Callback function PFN_RA_ALLOC ++@Description RA import allocate function ++@Input RA_PERARENA_HANDLE RA handle ++@Input RA_LENGTH_T Request size ++@Input RA_FLAGS_T RA flags ++@Input IMG_CHAR Annotation ++@Input RA_BASE_T Allocation base ++@Input RA_LENGTH_T Actual size ++@Input RA_PERISPAN_HANDLE Per import private data ++@Return PVRSRV_ERROR PVRSRV_OK or error code ++*/ /**************************************************************************/ ++typedef PVRSRV_ERROR (*PFN_RA_ALLOC)(RA_PERARENA_HANDLE, ++ RA_LENGTH_T, ++ RA_FLAGS_T, ++ const IMG_CHAR*, ++ RA_BASE_T*, ++ RA_LENGTH_T*, ++ RA_PERISPAN_HANDLE*); ++ ++/*************************************************************************/ /*! ++@Function Callback function PFN_RA_FREE ++@Description RA free imported allocation ++@Input RA_PERARENA_HANDLE RA handle ++@Input RA_BASE_T Allocation base ++@Output RA_PERISPAN_HANDLE Per import private data ++*/ /**************************************************************************/ ++typedef void (*PFN_RA_FREE)(RA_PERARENA_HANDLE, ++ RA_BASE_T, ++ RA_PERISPAN_HANDLE); ++ ++/** ++ * @Function RA_Create ++ * ++ * @Description To create a resource arena. ++ * ++ * @Input name - the name of the arena for diagnostic purposes. ++ * @Input uLog2Quantum - the arena allocation quantum. ++ * @Input ui32LockClass - the lock class level this arena uses. ++ * @Input imp_alloc - a resource allocation callback or 0. ++ * @Input imp_free - a resource de-allocation callback or 0. ++ * @Input per_arena_handle - private handle passed to alloc and free or 0. ++ * @Input ui32PlicyFlags - Policies that govern the arena. ++ * @Return pointer to arena, or NULL. ++ */ ++RA_ARENA * ++RA_Create(IMG_CHAR *name, ++ /* subsequent imports: */ ++ RA_LOG2QUANTUM_T uLog2Quantum, ++ IMG_UINT32 ui32LockClass, ++ PFN_RA_ALLOC imp_alloc, ++ PFN_RA_FREE imp_free, ++ RA_PERARENA_HANDLE per_arena_handle, ++ IMG_UINT32 ui32PolicyFlags); ++ ++/** ++ * @Function RA_Create_With_Span ++ * ++ * @Description ++ * ++ * Create a resource arena and initialises it, with a given resource span. ++ * ++ * @Input name - String briefly describing the RA's purpose. ++ * @Input uLog2Quantum - the arena allocation quantum. ++ * @Input ui64CpuBase - CPU Physical Base Address of the RA. ++ * @Input ui64SpanDevBase - Device Physical Base Address of the RA. ++ * @Input ui64SpanSize - Size of the span to add to the created RA. ++ * @Return pointer to arena, or NULL. ++*/ ++RA_ARENA * ++RA_Create_With_Span(IMG_CHAR *name, ++ RA_LOG2QUANTUM_T uLog2Quantum, ++ IMG_UINT64 ui64CpuBase, ++ IMG_UINT64 ui64SpanDevBase, ++ IMG_UINT64 ui64SpanSize); ++ ++/** ++ * @Function RA_Delete ++ * ++ * @Description ++ * ++ * To delete a resource arena. All resources allocated from the arena ++ * must be freed before deleting the arena. ++ * ++ * @Input pArena - the arena to delete. ++ * @Return None ++ */ ++void ++RA_Delete(RA_ARENA *pArena); ++ ++/** ++ * @Function RA_Add ++ * ++ * @Description ++ * ++ * To add a resource span to an arena. The span must not overlap with ++ * any span previously added to the arena. ++ * ++ * @Input pArena - the arena to add a span into. ++ * @Input base - the base of the span. ++ * @Input uSize - the extent of the span. ++ * @Input hPriv - handle associated to the span (reserved for user uses) ++ * @Return IMG_TRUE - success, IMG_FALSE - failure ++ */ ++IMG_BOOL ++RA_Add(RA_ARENA *pArena, ++ RA_BASE_T base, ++ RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags, ++ RA_PERISPAN_HANDLE hPriv); ++ ++/** ++ * @Function RA_Alloc ++ * ++ * @Description To allocate resource from an arena. ++ * ++ * @Input pArena - the arena ++ * @Input uRequestSize - the size of resource segment requested. ++ * @Input uImportMultiplier - Import x-times of the uRequestSize ++ * for future RA_Alloc calls. ++ * Use RA_NO_IMPORT_MULTIPLIER to import the exact size. ++ * @Input uImportFlags - flags influencing allocation policy. ++ * @Input uAlignment - the alignment constraint required for the ++ * allocated segment, use 0 if alignment not required. ++ * @Input pszAnnotation - a string to describe the allocation ++ * @Output base - allocated base resource ++ * @Output pActualSize - the actual_size of resource segment allocated, ++ * typically rounded up by quantum. ++ * @Output phPriv - the user reference associated with allocated ++ * resource span. ++ * @Return PVRSRV_OK - success ++ */ ++PVRSRV_ERROR ++RA_Alloc(RA_ARENA *pArena, ++ RA_LENGTH_T uRequestSize, ++ IMG_UINT8 uImportMultiplier, ++ RA_FLAGS_T uImportFlags, ++ RA_LENGTH_T uAlignment, ++ const IMG_CHAR *pszAnnotation, ++ RA_BASE_T *base, ++ RA_LENGTH_T *pActualSize, ++ RA_PERISPAN_HANDLE *phPriv); ++ ++/** ++ * @Function RA_Alloc_Range ++ * ++ * @Description ++ * ++ * To allocate a resource at a specified base from an arena. ++ * ++ * @Input pArena - the arena ++ * @Input uRequestSize - the size of resource segment requested. ++ * @Input uImportFlags - flags influencing allocation policy. ++ * @Input uAlignment - the alignment constraint required for the ++ * allocated segment, use 0 if alignment not required. ++ * @Input base - allocated base resource ++ * @Output pActualSize - the actual_size of resource segment allocated, ++ * typically rounded up by quantum. ++ * @Return PVRSRV_OK - success ++ */ ++PVRSRV_ERROR ++RA_Alloc_Range(RA_ARENA *pArena, ++ RA_LENGTH_T uRequestSize, ++ RA_FLAGS_T uImportFlags, ++ RA_LENGTH_T uAlignment, ++ RA_BASE_T base, ++ RA_LENGTH_T *pActualSize); ++ ++/** ++ * @Function RA_Free ++ * ++ * @Description To free a resource segment. ++ * ++ * @Input pArena - the arena the segment was originally allocated from. ++ * @Input base - the base of the resource span to free. ++ * ++ * @Return None ++ */ ++void ++RA_Free(RA_ARENA *pArena, RA_BASE_T base); ++ ++/** ++ * @Function RA_Get_Usage_Stats ++ * ++ * @Description To collect the arena usage statistics. ++ * ++ * @Input pArena - the arena to acquire usage statistics from. ++ * @Input psRAStats - the buffer to hold the usage statistics of the arena. ++ * ++ * @Return None ++ */ ++IMG_INTERNAL void ++RA_Get_Usage_Stats(RA_ARENA *pArena, PRA_USAGE_STATS psRAStats); ++ ++IMG_INTERNAL RA_ARENA_ITERATOR * ++RA_IteratorAcquire(RA_ARENA *pArena, IMG_BOOL bIncludeFreeSegments); ++ ++IMG_INTERNAL void ++RA_IteratorReset(RA_ARENA_ITERATOR *pIter); ++ ++IMG_INTERNAL void ++RA_IteratorRelease(RA_ARENA_ITERATOR *pIter); ++ ++IMG_INTERNAL IMG_BOOL ++RA_IteratorNext(RA_ARENA_ITERATOR *pIter, RA_ITERATOR_DATA *pData); ++ ++/*************************************************************************/ /*! ++@Function RA_BlockDump ++@Description Debug dump of all memory allocations within the RA and the space ++ between. A '#' represents a block of memory (the arena's quantum ++ in size) that has been allocated whereas a '.' represents a free ++ block. ++@Input pArena The arena to dump. ++@Input pfnLogDump The dumping method. ++@Input pPrivData Data to be passed into the pfnLogDump method. ++*/ /**************************************************************************/ ++IMG_INTERNAL PVRSRV_ERROR ++RA_BlockDump(RA_ARENA *pArena, ++ __printf(2, 3) void (*pfnLogDump)(void*, IMG_CHAR*, ...), ++ void *pPrivData); ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/rgx_bridge.h b/drivers/gpu/drm/img-rogue/rgx_bridge.h +new file mode 100644 +index 000000000000..fa4ca1ff50b3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_bridge.h +@@ -0,0 +1,243 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Bridge Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the Rogue Bridge code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_BRIDGE_H ++#define RGX_BRIDGE_H ++ ++#include "pvr_bridge.h" ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "rgx_fwif_km.h" ++ ++#define RGXFWINITPARAMS_VERSION 1 ++#define RGXFWINITPARAMS_EXTENSION 128 ++ ++#include "common_rgxta3d_bridge.h" ++#include "common_rgxcmp_bridge.h" ++#if defined(SUPPORT_FASTRENDER_DM) ++#include "common_rgxtq2_bridge.h" ++#endif ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++#include "common_rgxtq_bridge.h" ++#endif ++#if defined(SUPPORT_USC_BREAKPOINT) ++#include "common_rgxbreakpoint_bridge.h" ++#endif ++#include "common_rgxfwdbg_bridge.h" ++#if defined(PDUMP) ++#include "common_rgxpdump_bridge.h" ++#endif ++#include "common_rgxhwperf_bridge.h" ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++#include "common_rgxregconfig_bridge.h" ++#endif ++#include "common_rgxkicksync_bridge.h" ++#include "common_rgxtimerquery_bridge.h" ++#if defined(SUPPORT_RGXRAY_BRIDGE) ++#include "common_rgxray_bridge.h" ++#endif ++/* ++ * Bridge Cmd Ids ++ */ ++ ++/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge ++ * group! ++ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST offsets ++ * follow on from the previous bridge group's commands! ++ * ++ * If a bridge group is optional, ensure you *ALWAYS* define its index ++ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is not ++ * defined). If an optional bridge group is not defined you must still ++ * define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an assigned ++ * value of 0. ++ */ ++ ++/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than ++ * follow-on from the other non-device bridge groups (meaning that they then ++ * won't be displaced if other non-device bridge groups are added). ++ */ ++ ++#define PVRSRV_BRIDGE_RGX_FIRST 128UL ++ ++/* 128: RGX TQ interface functions */ ++#define PVRSRV_BRIDGE_RGXTQ 128UL ++/* The RGXTQ bridge is conditional since the definitions in this header file ++ * support both the rogue and volcanic servers, but the RGXTQ bridge is not ++ * required at all on the Volcanic architecture. ++ */ ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST (PVRSRV_BRIDGE_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_DISPATCH_LAST) ++#endif ++ ++/* 129: RGX Compute interface functions */ ++#define PVRSRV_BRIDGE_RGXCMP 129UL ++#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST) ++ ++/* 130: RGX TA/3D interface functions */ ++#define PVRSRV_BRIDGE_RGXTA3D 130UL ++#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST) ++ ++/* 131: RGX Breakpoint interface functions */ ++#define PVRSRV_BRIDGE_RGXBREAKPOINT 131UL ++#if defined(SUPPORT_USC_BREAKPOINT) ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXBREAKPOINT_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST) ++#endif ++ ++/* 132: RGX Debug/Misc interface functions */ ++#define PVRSRV_BRIDGE_RGXFWDBG 132UL ++#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXFWDBG_CMD_LAST) ++ ++/* 133: RGX PDump interface functions */ ++#define PVRSRV_BRIDGE_RGXPDUMP 133UL ++#if defined(PDUMP) ++#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST) ++#endif ++ ++/* 134: RGX HWPerf interface functions */ ++#define PVRSRV_BRIDGE_RGXHWPERF 134UL ++#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST) ++ ++/* 135: RGX Register Configuration interface functions */ ++#define PVRSRV_BRIDGE_RGXREGCONFIG 135UL ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXREGCONFIG_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST) ++#endif ++ ++/* 136: RGX kicksync interface */ ++#define PVRSRV_BRIDGE_RGXKICKSYNC 136UL ++#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST) ++ ++/* 137: RGX TQ2 interface */ ++#define PVRSRV_BRIDGE_RGXTQ2 137UL ++#if defined(SUPPORT_FASTRENDER_DM) ++#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (0) ++#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST) ++#endif ++ ++/* 138: RGX timer query interface */ ++#define PVRSRV_BRIDGE_RGXTIMERQUERY 138UL ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTIMERQUERY_CMD_LAST) ++ ++/* 139: RGX Ray tracing interface */ ++#define PVRSRV_BRIDGE_RGXRAY 139UL ++#if defined(SUPPORT_RGXRAY_BRIDGE) ++#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST + 1) ++#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST) ++#else ++#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST 0 ++#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST) ++#endif ++ ++#define PVRSRV_BRIDGE_RGX_LAST (PVRSRV_BRIDGE_RGXRAY) ++#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST) ++ ++/* bit mask representing the enabled RGX bridges */ ++ ++static const IMG_UINT32 gui32RGXBridges = ++ (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST)) ++#if defined(RGX_FEATURE_COMPUTE) || defined(__KERNEL__) ++ | (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST)) ++#if defined(SUPPORT_BREAKPOINT) ++ | (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_RGXFWDBG - PVRSRV_BRIDGE_RGX_FIRST)) ++#if defined(PDUMP) ++ | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST)) ++#if defined(SUPPORT_REGCONFIG) ++ | (1U << (PVRSRV_BRIDGE_RGXREGCONFIG - PVRSRV_BRIDGE_RGX_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST)) ++#if defined(SUPPORT_FASTRENDER_DM) || defined(__KERNEL__) ++ | (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST)) ++#endif ++#if defined(SUPPORT_TIMERQUERY) ++ | (1U << (PVRSRV_BRIDGE_RGXTIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST)) ++#endif ++ | (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST)) ++ ; ++/* bit field representing which RGX bridge groups may optionally not ++ * be present in the server ++ */ ++ ++#define RGX_BRIDGES_OPTIONAL \ ++ ( \ ++ 0 /* no RGX bridges are currently optional */ \ ++ ) ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_BRIDGE_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_bridge_init.c b/drivers/gpu/drm/img-rogue/rgx_bridge_init.c +new file mode 100644 +index 000000000000..1b1f81d788bc +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_bridge_init.c +@@ -0,0 +1,111 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR device dependent bridge Init/Deinit Module (kernel side) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements device dependent PVR Bridge init/deinit code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_bridge_init.h" ++#include "rgxdevice.h" ++ ++#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) ++PVRSRV_ERROR InitRGXTQ2Bridge(void); ++void DeinitRGXTQ2Bridge(void); ++#endif ++PVRSRV_ERROR InitRGXCMPBridge(void); ++void DeinitRGXCMPBridge(void); ++#if defined(SUPPORT_RGXRAY_BRIDGE) ++PVRSRV_ERROR InitRGXRAYBridge(void); ++void DeinitRGXRAYBridge(void); ++#endif ++ ++PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) ++ { ++ eError = InitRGXCMPBridge(); ++ PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXCMPBridge"); ++ } ++ ++#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ eError = InitRGXTQ2Bridge(); ++ PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXTQ2Bridge"); ++ } ++#endif ++ ++#if defined(SUPPORT_RGXRAY_BRIDGE) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && ++ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0) ++ { ++ eError = InitRGXRAYBridge(); ++ PVR_LOG_RETURN_IF_ERROR(eError, "InitRGXRAYBridge"); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) ++ { ++ DeinitRGXCMPBridge(); ++ } ++ ++#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ DeinitRGXTQ2Bridge(); ++ } ++#endif ++ ++#if defined(SUPPORT_RGXRAY_BRIDGE) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && ++ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 0) ++ { ++ DeinitRGXRAYBridge(); ++ } ++#endif ++} +diff --git a/drivers/gpu/drm/img-rogue/rgx_bridge_init.h b/drivers/gpu/drm/img-rogue/rgx_bridge_init.h +new file mode 100644 +index 000000000000..10e8e72ca095 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_bridge_init.h +@@ -0,0 +1,55 @@ ++/*************************************************************************/ /*! ++@File ++@Title PVR device dependent bridge Init/Deinit Module (kernel side) ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements device dependent PVR Bridge init/deinit code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_BRIDGE_INIT_H) ++#define RGX_BRIDGE_INIT_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "device.h" ++#include "rgxdevice.h" ++ ++PVRSRV_ERROR DeviceDepBridgeInit(PVRSRV_RGXDEV_INFO *psDevInfo); ++void DeviceDepBridgeDeInit(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++#endif /* RGX_BRIDGE_INIT_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_common.h b/drivers/gpu/drm/img-rogue/rgx_common.h +new file mode 100644 +index 000000000000..b6ae1500acc3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_common.h +@@ -0,0 +1,235 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Common Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common types and definitions for RGX software ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_COMMON_H ++#define RGX_COMMON_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++ ++/* Included to get the BVNC_KM_N defined and other feature defs */ ++#include "km/rgxdefs_km.h" ++ ++#include "rgx_common_asserts.h" ++ ++ ++/* Virtualisation validation builds are meant to test the VZ-related hardware without a fully virtualised platform. ++ * As such a driver can support either the vz-validation code or real virtualisation. ++ * Note: PVRSRV_VZ_NUM_OSID is the external build option, while RGX_NUM_OS_SUPPORTED is the internal symbol used in the DDK */ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) && (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) ++#error "Invalid build configuration: Virtualisation support (PVRSRV_VZ_NUM_OSID > 1) and virtualisation validation code (SUPPORT_GPUVIRT_VALIDATION) are mutually exclusive." ++#endif ++ ++/* The RGXFWIF_DM defines assume only one of RGX_FEATURE_TLA or ++ * RGX_FEATURE_FASTRENDER_DM is present. Ensure this with a compile-time check. ++ */ ++#if defined(RGX_FEATURE_TLA) && defined(RGX_FEATURE_FASTRENDER_DM) ++#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!" ++#endif ++ ++/*! The master definition for data masters known to the firmware of RGX. ++ * When a new DM is added to this list, relevant entry should be added to ++ * RGX_HWPERF_DM enum list. ++ * The DM in a V1 HWPerf packet uses this definition. */ ++ ++typedef IMG_UINT32 RGXFWIF_DM; ++ ++#define RGXFWIF_DM_GP IMG_UINT32_C(0) ++/* Either TDM or 2D DM is present. The above build time error is present to verify this */ ++#define RGXFWIF_DM_2D IMG_UINT32_C(1) /* when RGX_FEATURE_TLA defined */ ++#define RGXFWIF_DM_TDM IMG_UINT32_C(1) /* when RGX_FEATURE_FASTRENDER_DM defined */ ++ ++#define RGXFWIF_DM_GEOM IMG_UINT32_C(2) ++#define RGXFWIF_DM_3D IMG_UINT32_C(3) ++#define RGXFWIF_DM_CDM IMG_UINT32_C(4) ++#define RGXFWIF_DM_RAY IMG_UINT32_C(5) ++#define RGXFWIF_DM_GEOM2 IMG_UINT32_C(6) ++#define RGXFWIF_DM_GEOM3 IMG_UINT32_C(7) ++#define RGXFWIF_DM_GEOM4 IMG_UINT32_C(8) ++ ++#define RGXFWIF_DM_LAST RGXFWIF_DM_GEOM4 ++ ++typedef IMG_UINT32 RGX_KICK_TYPE_DM; ++#define RGX_KICK_TYPE_DM_GP IMG_UINT32_C(0x001) ++#define RGX_KICK_TYPE_DM_TDM_2D IMG_UINT32_C(0x002) ++#define RGX_KICK_TYPE_DM_TA IMG_UINT32_C(0x004) ++#define RGX_KICK_TYPE_DM_3D IMG_UINT32_C(0x008) ++#define RGX_KICK_TYPE_DM_CDM IMG_UINT32_C(0x010) ++#define RGX_KICK_TYPE_DM_RTU IMG_UINT32_C(0x020) ++#define RGX_KICK_TYPE_DM_SHG IMG_UINT32_C(0x040) ++#define RGX_KICK_TYPE_DM_TQ2D IMG_UINT32_C(0x080) ++#define RGX_KICK_TYPE_DM_TQ3D IMG_UINT32_C(0x100) ++#define RGX_KICK_TYPE_DM_RAY IMG_UINT32_C(0x200) ++#define RGX_KICK_TYPE_DM_LAST IMG_UINT32_C(0x400) ++ ++/* Maximum number of DM in use: GP, 2D/TDM, GEOM, 3D, CDM, RDM, GEOM2, GEOM3, GEOM4 */ ++#define RGXFWIF_DM_MAX (RGXFWIF_DM_LAST + 1U) ++ ++/* ++ * Data Master Tags to be appended to resources created on behalf of each RGX ++ * Context. ++ */ ++#define RGX_RI_DM_TAG_KS 'K' ++#define RGX_RI_DM_TAG_CDM 'C' ++#define RGX_RI_DM_TAG_RC 'R' /* To be removed once TA/3D Timelines are split */ ++#define RGX_RI_DM_TAG_TA 'V' ++#define RGX_RI_DM_TAG_GEOM 'V' ++#define RGX_RI_DM_TAG_3D 'P' ++#define RGX_RI_DM_TAG_TDM 'T' ++#define RGX_RI_DM_TAG_TQ2D '2' ++#define RGX_RI_DM_TAG_TQ3D 'Q' ++#define RGX_RI_DM_TAG_RAY 'r' ++ ++/* ++ * Client API Tags to be appended to resources created on behalf of each ++ * Client API. ++ */ ++#define RGX_RI_CLIENT_API_GLES1 '1' ++#define RGX_RI_CLIENT_API_GLES3 '3' ++#define RGX_RI_CLIENT_API_VULKAN 'V' ++#define RGX_RI_CLIENT_API_EGL 'E' ++#define RGX_RI_CLIENT_API_OPENCL 'C' ++#define RGX_RI_CLIENT_API_OPENGL 'G' ++#define RGX_RI_CLIENT_API_SERVICES 'S' ++#define RGX_RI_CLIENT_API_WSEGL 'W' ++#define RGX_RI_CLIENT_API_ANDROID 'A' ++#define RGX_RI_CLIENT_API_LWS 'L' ++ ++/* ++ * Format a RI annotation for a given RGX Data Master context ++ */ ++#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \ ++ { \ ++ (annotation)[0] = (dmTag); \ ++ (annotation)[1] = (clientAPI); \ ++ (annotation)[2] = '\0'; \ ++ } while (false) ++ ++/*! ++ ****************************************************************************** ++ * RGXFW Compiler alignment definitions ++ *****************************************************************************/ ++#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES) || defined(INTEGRITY_OS) ++#define RGXFW_ALIGN __attribute__ ((aligned (8))) ++#define RGXFW_ALIGN_DCACHEL __attribute__((aligned (64))) ++#elif defined(_MSC_VER) ++#define RGXFW_ALIGN __declspec(align(8)) ++#define RGXFW_ALIGN_DCACHEL __declspec(align(64)) ++#pragma warning (disable : 4324) ++#else ++#error "Align MACROS need to be defined for this compiler" ++#endif ++ ++/*! ++ ****************************************************************************** ++ * Force 8-byte alignment for structures allocated uncached. ++ *****************************************************************************/ ++#define UNCACHED_ALIGN RGXFW_ALIGN ++ ++ ++/*! ++ ****************************************************************************** ++ * GPU Utilisation states ++ *****************************************************************************/ ++#define RGXFWIF_GPU_UTIL_STATE_IDLE (0U) ++#define RGXFWIF_GPU_UTIL_STATE_ACTIVE (1U) ++#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (2U) ++#define RGXFWIF_GPU_UTIL_STATE_NUM (3U) ++#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003) ++ ++ ++/* ++ * Maximum amount of register writes that can be done by the register ++ * programmer (FW or META DMA). This is not a HW limitation, it is only ++ * a protection against malformed inputs to the register programmer. ++ */ ++#define RGX_MAX_NUM_REGISTER_PROGRAMMER_WRITES (128U) ++ ++/* FW common context priority. */ ++/*! ++ * @AddToGroup WorkloadContexts ++ * @{ ++ */ ++#define RGX_CTX_PRIORITY_REALTIME (INT32_MAX) ++#define RGX_CTX_PRIORITY_HIGH (2U) /*!< HIGH priority */ ++#define RGX_CTX_PRIORITY_MEDIUM (1U) /*!< MEDIUM priority */ ++#define RGX_CTX_PRIORITY_LOW (0) /*!< LOW priority */ ++/*! ++ * @} End of AddToGroup WorkloadContexts ++ */ ++ ++ ++/* ++ * Use of the 32-bit context property flags mask ++ * ( X = taken/in use, - = available/unused ) ++ * ++ * 0 ++ * | ++ * -------------------------------x ++ */ ++/* ++ * Context creation flags ++ * (specify a context's properties at creation time) ++ */ ++#define RGX_CONTEXT_FLAG_DISABLESLR (1UL << 0) /*!< Disable SLR */ ++ ++/* Bitmask of context flags allowed to be modified after context create. */ ++#define RGX_CONTEXT_FLAGS_WRITEABLE_MASK (RGX_CONTEXT_FLAG_DISABLESLR) ++ ++/* List of attributes that may be set for a context */ ++typedef enum _RGX_CONTEXT_PROPERTY_ ++{ ++ RGX_CONTEXT_PROPERTY_FLAGS = 0, /*!< Context flags */ ++} RGX_CONTEXT_PROPERTY; ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_COMMON_H */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_common_asserts.h b/drivers/gpu/drm/img-rogue/rgx_common_asserts.h +new file mode 100644 +index 000000000000..c571cc6f008e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_common_asserts.h +@@ -0,0 +1,73 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Common Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common types and definitions for RGX software ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_COMMON_ASSERTS_H ++#define RGX_COMMON_ASSERTS_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/*! This macro represents a mask of LSBs that must be zero on data structure ++ * sizes and offsets to ensure they are 8-byte granular on types shared between ++ * the FW and host driver */ ++#define RGX_FW_ALIGNMENT_LSB (7U) ++ ++/*! Macro to test structure size alignment */ ++#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \ ++ static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0U, \ ++ "Size of " #_a " is not properly aligned") ++ ++/*! Macro to test structure member alignment */ ++#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \ ++ static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0U, \ ++ "Offset of " #_a "." #_b " is not properly aligned") ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_COMMON_ASSERTS_H */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h b/drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h +new file mode 100644 +index 000000000000..c3e1333cdb0f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_compat_bvnc.h +@@ -0,0 +1,140 @@ ++/*************************************************************************/ /*! ++@File rgx_compat_bvnc.h ++@Title BVNC compatibility check utilities ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used for packing BNC and V. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_COMPAT_BVNC_H) ++#define RGX_COMPAT_BVNC_H ++ ++#include "img_types.h" ++ ++#if defined(RGX_FIRMWARE) /* Services firmware */ ++# include "rgxfw_utils.h" ++# define PVR_COMPAT_ASSERT RGXFW_ASSERT ++#elif !defined(RGX_BUILD_BINARY) /* Services host driver code */ ++# include "pvr_debug.h" ++# define PVR_COMPAT_ASSERT PVR_ASSERT ++#else /* FW user-mode tools */ ++# include ++# define PVR_COMPAT_ASSERT assert ++#endif ++ ++/* 64bit endian conversion macros */ ++#if defined(__BIG_ENDIAN__) ++#define RGX_INT64_TO_BE(N) (N) ++#define RGX_INT64_FROM_BE(N) (N) ++#define RGX_INT32_TO_BE(N) (N) ++#define RGX_INT32_FROM_BE(N) (N) ++#else ++#define RGX_INT64_TO_BE(N) \ ++ ((((N) >> 56) & 0xff) \ ++ | (((N) >> 40) & 0xff00) \ ++ | (((N) >> 24) & 0xff0000) \ ++ | (((N) >> 8) & 0xff000000U) \ ++ | ((N) << 56) \ ++ | (((N) & 0xff00) << 40) \ ++ | (((N) & 0xff0000) << 24) \ ++ | (((N) & 0xff000000U) << 8)) ++#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N) ++ ++#define RGX_INT32_TO_BE(N) \ ++ ((((N) >> 24) & 0xff) \ ++ | (((N) >> 8) & 0xff00) \ ++ | ((N) << 24) \ ++ | ((((N) & 0xff00) << 8))) ++#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N) ++#endif ++ ++/****************************************************************************** ++ * RGX Version packed into 64-bit (BVNC) to be used by Compatibility Check ++ *****************************************************************************/ ++ ++#define RGX_BVNC_PACK_SHIFT_B 48 ++#define RGX_BVNC_PACK_SHIFT_V 32 ++#define RGX_BVNC_PACK_SHIFT_N 16 ++#define RGX_BVNC_PACK_SHIFT_C 0 ++ ++#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0xFFFF000000000000)) ++#define RGX_BVNC_PACK_MASK_V (IMG_UINT64_C(0x0000FFFF00000000)) ++#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000)) ++#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF)) ++ ++#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B)) ++#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_V) >> RGX_BVNC_PACK_SHIFT_V)) ++#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N)) ++#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC) & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C)) ++ ++#define RGX_BVNC_EQUAL(L,R,all,version,bvnc) do { \ ++ (bvnc) = IMG_FALSE; \ ++ (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \ ++ if (version) \ ++ { \ ++ (bvnc) = ((L).ui64BVNC == (R).ui64BVNC); \ ++ } \ ++ (all) = (version) && (bvnc); \ ++ } while (false) ++ ++ ++/**************************************************************************//** ++ * Utility function for packing BVNC ++ *****************************************************************************/ ++static inline IMG_UINT64 rgx_bvnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C) ++{ ++ /* ++ * Test for input B, V, N and C exceeding max bit width. ++ */ ++ PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0U); ++ PVR_COMPAT_ASSERT((ui32V & (~(RGX_BVNC_PACK_MASK_V >> RGX_BVNC_PACK_SHIFT_V))) == 0U); ++ PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0U); ++ PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0U); ++ ++ return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) | ++ ((IMG_UINT64)ui32V << RGX_BVNC_PACK_SHIFT_V) | ++ ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) | ++ ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C)); ++} ++ ++ ++#endif /* RGX_COMPAT_BVNC_H */ ++ ++/****************************************************************************** ++ End of file (rgx_compat_bvnc.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_fw_info.h b/drivers/gpu/drm/img-rogue/rgx_fw_info.h +new file mode 100644 +index 000000000000..2f012d59ba5a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fw_info.h +@@ -0,0 +1,135 @@ ++/*************************************************************************/ /*! ++@File ++@Title FW image information ++ ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used internally for HWPerf data retrieval ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FW_INFO_H) ++#define RGX_FW_INFO_H ++ ++#include "img_types.h" ++#include "rgx_common.h" ++ ++/* ++ * Firmware binary block unit in bytes. ++ * Raw data stored in FW binary will be aligned to this size. ++ */ ++#define FW_BLOCK_SIZE 4096L ++ ++typedef enum ++{ ++ META_CODE = 0, ++ META_PRIVATE_DATA, ++ META_COREMEM_CODE, ++ META_COREMEM_DATA, ++ MIPS_CODE, ++ MIPS_EXCEPTIONS_CODE, ++ MIPS_BOOT_CODE, ++ MIPS_PRIVATE_DATA, ++ MIPS_BOOT_DATA, ++ MIPS_STACK, ++ RISCV_UNCACHED_CODE, ++ RISCV_CACHED_CODE, ++ RISCV_PRIVATE_DATA, ++ RISCV_COREMEM_CODE, ++ RISCV_COREMEM_DATA, ++} RGX_FW_SECTION_ID; ++ ++typedef enum ++{ ++ NONE = 0, ++ FW_CODE, ++ FW_DATA, ++ FW_COREMEM_CODE, ++ FW_COREMEM_DATA ++} RGX_FW_SECTION_TYPE; ++ ++ ++/* ++ * FW binary format with FW info attached: ++ * ++ * Contents Offset ++ * +-----------------+ ++ * | | 0 ++ * | | ++ * | Original binary | ++ * | file | ++ * | (.ldr/.elf) | ++ * | | ++ * | | ++ * +-----------------+ ++ * | FW info header | FILE_SIZE - 4K ++ * +-----------------+ ++ * | | ++ * | FW layout table | ++ * | | ++ * +-----------------+ ++ * FILE_SIZE ++ */ ++ ++#define FW_INFO_VERSION (1) ++ ++typedef struct ++{ ++ IMG_UINT32 ui32InfoVersion; /* FW info version */ ++ IMG_UINT32 ui32HeaderLen; /* Header length */ ++ IMG_UINT32 ui32LayoutEntryNum; /* Number of entries in the layout table */ ++ IMG_UINT32 ui32LayoutEntrySize; /* Size of an entry in the layout table */ ++ IMG_UINT64 RGXFW_ALIGN ui64BVNC; /* BVNC */ ++ IMG_UINT32 ui32FwPageSize; /* Page size of processor on which firmware executes */ ++ IMG_UINT32 ui32Flags; /* Compatibility flags */ ++} RGX_FW_INFO_HEADER; ++ ++typedef struct ++{ ++ RGX_FW_SECTION_ID eId; ++ RGX_FW_SECTION_TYPE eType; ++ IMG_UINT32 ui32BaseAddr; ++ IMG_UINT32 ui32MaxSize; ++ IMG_UINT32 ui32AllocSize; ++ IMG_UINT32 ui32AllocOffset; ++} RGX_FW_LAYOUT_ENTRY; ++ ++#endif /* RGX_FW_INFO_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fw_info.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h +new file mode 100644 +index 000000000000..4f82b23743be +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fwif_alignchecks.h +@@ -0,0 +1,192 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX fw interface alignment checks ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Checks to avoid disalignment in RGX fw data structures ++ shared with the host ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_ALIGNCHECKS_H) ++#define RGX_FWIF_ALIGNCHECKS_H ++ ++/* for the offsetof macro */ ++#if defined(__KERNEL__) && defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++/*! ++ ****************************************************************************** ++ * Alignment UM/FW checks array ++ *****************************************************************************/ ++ ++#define RGXFW_ALIGN_CHECKS_UM_MAX 128U ++ ++#define RGXFW_ALIGN_CHECKS_INIT0 \ ++ sizeof(RGXFWIF_TRACEBUF), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \ ++ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32TraceBufSizeInDWords), \ ++ offsetof(RGXFWIF_TRACEBUF, ui32TracebufFlags), \ ++ \ ++ sizeof(RGXFWIF_SYSDATA), \ ++ offsetof(RGXFWIF_SYSDATA, ePowState), \ ++ offsetof(RGXFWIF_SYSDATA, ui32HWPerfDropCount), \ ++ offsetof(RGXFWIF_SYSDATA, ui32LastDropOrdinal), \ ++ offsetof(RGXFWIF_SYSDATA, ui32FWFaults), \ ++ offsetof(RGXFWIF_SYSDATA, ui32HWRStateFlags), \ ++ \ ++ sizeof(RGXFWIF_OSDATA), \ ++ offsetof(RGXFWIF_OSDATA, ui32HostSyncCheckMark), \ ++ offsetof(RGXFWIF_OSDATA, ui32KCCBCmdsExecuted), \ ++ \ ++ sizeof(RGXFWIF_HWRINFOBUF), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmLockedUpCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmOverranCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmRecoveredCount), \ ++ offsetof(RGXFWIF_HWRINFOBUF, aui32HwrDmFalseDetectCount), \ ++ \ ++ /* RGXFWIF_CMDTA checks */ \ ++ sizeof(RGXFWIF_CMDTA), \ ++ offsetof(RGXFWIF_CMDTA, sGeomRegs), \ ++ \ ++ /* RGXFWIF_CMD3D checks */ \ ++ sizeof(RGXFWIF_CMD3D), \ ++ offsetof(RGXFWIF_CMD3D, s3DRegs), \ ++ \ ++ /* RGXFWIF_CMDTRANSFER checks */ \ ++ sizeof(RGXFWIF_CMDTRANSFER), \ ++ offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \ ++ \ ++ \ ++ /* RGXFWIF_CMD_COMPUTE checks */ \ ++ sizeof(RGXFWIF_CMD_COMPUTE), \ ++ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \ ++ \ ++ /* RGXFWIF_FREELIST checks */ \ ++ sizeof(RGXFWIF_FREELIST), \ ++ offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr), \ ++ offsetof(RGXFWIF_FREELIST, ui32MaxPages), \ ++ offsetof(RGXFWIF_FREELIST, ui32CurrentPages), \ ++ \ ++ /* RGXFWIF_HWRTDATA checks */ \ ++ sizeof(RGXFWIF_HWRTDATA), \ ++ offsetof(RGXFWIF_HWRTDATA, psVHeapTableDevVAddr), \ ++ offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \ ++ offsetof(RGXFWIF_HWRTDATA, apsFreeLists), \ ++ offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \ ++ offsetof(RGXFWIF_HWRTDATA, eState), \ ++ \ ++ /* RGXFWIF_HWRTDATA_COMMON checks */ \ ++ sizeof(RGXFWIF_HWRTDATA_COMMON), \ ++ offsetof(RGXFWIF_HWRTDATA_COMMON, bTACachesNeedZeroing),\ ++ \ ++ /* RGXFWIF_HWPERF_CTL_BLK checks */ \ ++ sizeof(RGXFWIF_HWPERF_CTL_BLK), \ ++ offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \ ++ \ ++ /* RGXFWIF_HWPERF_CTL checks */ \ ++ sizeof(RGXFWIF_HWPERF_CTL), \ ++ offsetof(RGXFWIF_HWPERF_CTL, SelCntr) ++ ++#if defined(RGX_FEATURE_TLA) ++#define RGXFW_ALIGN_CHECKS_INIT1 \ ++ RGXFW_ALIGN_CHECKS_INIT0, \ ++ /* RGXFWIF_CMD2D checks */ \ ++ sizeof(RGXFWIF_CMD2D), \ ++ offsetof(RGXFWIF_CMD2D, s2DRegs) ++#else ++#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0 ++#endif /* RGX_FEATURE_TLA */ ++ ++ ++#if defined(RGX_FEATURE_FASTRENDER_DM) ++#define RGXFW_ALIGN_CHECKS_INIT \ ++ RGXFW_ALIGN_CHECKS_INIT1, \ ++ /* RGXFWIF_CMDTDM checks */ \ ++ sizeof(RGXFWIF_CMDTDM), \ ++ offsetof(RGXFWIF_CMDTDM, sTDMRegs) ++#else ++#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT1 ++#endif /* ! RGX_FEATURE_FASTRENDER_DM */ ++ ++ ++ ++/*! ++ ****************************************************************************** ++ * Alignment KM checks array ++ *****************************************************************************/ ++ ++#define RGXFW_ALIGN_CHECKS_INIT_KM \ ++ sizeof(RGXFWIF_SYSINIT), \ ++ offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr), \ ++ offsetof(RGXFWIF_SYSINIT, sPDSExecBase), \ ++ offsetof(RGXFWIF_SYSINIT, sUSCExecBase), \ ++ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), \ ++ offsetof(RGXFWIF_SYSINIT, sTraceBufCtl), \ ++ offsetof(RGXFWIF_SYSINIT, sFwSysData), \ ++ sizeof(RGXFWIF_OSINIT), \ ++ offsetof(RGXFWIF_OSINIT, psKernelCCBCtl), \ ++ offsetof(RGXFWIF_OSINIT, psKernelCCB), \ ++ offsetof(RGXFWIF_OSINIT, psFirmwareCCBCtl), \ ++ offsetof(RGXFWIF_OSINIT, psFirmwareCCB), \ ++ offsetof(RGXFWIF_OSINIT, sFwOsData), \ ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks), \ ++ \ ++ /* RGXFWIF_FWRENDERCONTEXT checks */ \ ++ sizeof(RGXFWIF_FWRENDERCONTEXT), \ ++ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \ ++ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \ ++ \ ++ sizeof(RGXFWIF_FWCOMMONCONTEXT), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \ ++ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \ ++ \ ++ sizeof(RGXFWIF_MMUCACHEDATA), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, ui32CacheFlags), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, sMMUCacheSync), \ ++ offsetof(RGXFWIF_MMUCACHEDATA, ui32MMUCacheSyncUpdateValue) ++ ++#endif /* RGX_FWIF_ALIGNCHECKS_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_alignchecks.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h b/drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h +new file mode 100644 +index 000000000000..7001092c7221 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fwif_hwperf.h +@@ -0,0 +1,252 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_hwperf.h ++@Title RGX HWPerf support ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shared header between RGX firmware and Init process ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_FWIF_HWPERF_H ++#define RGX_FWIF_HWPERF_H ++ ++#include "rgx_fwif_shared.h" ++#include "rgx_hwperf.h" ++#include "rgxdefs_km.h" ++ ++ ++/*****************************************************************************/ ++ ++/* Structure to hold a block's parameters for passing between the BG context ++ * and the IRQ context when applying a configuration request. */ ++typedef struct ++{ ++ IMG_BOOL bValid; ++ IMG_BOOL bEnabled; ++ IMG_UINT32 eBlockID; ++ IMG_UINT32 uiCounterMask; ++ IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++} RGXFWIF_HWPERF_CTL_BLK; ++ ++/* Structure used to hold the configuration of the non-mux counters blocks */ ++typedef struct ++{ ++ IMG_UINT32 ui32NumSelectedCounters; ++ IMG_UINT32 aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS]; ++} RGXFW_HWPERF_SELECT; ++ ++/* Structure used to hold a Direct-Addressable block's parameters for passing ++ * between the BG context and the IRQ context when applying a configuration ++ * request. RGX_FEATURE_HWPERF_OCEANIC use only. ++ */ ++typedef struct ++{ ++ IMG_UINT32 uiEnabled; ++ IMG_UINT32 uiNumCounters; ++ IMG_UINT32 eBlockID; ++ RGXFWIF_DEV_VIRTADDR psModel; ++ IMG_UINT32 aui32Counters[RGX_CNTBLK_COUNTERS_MAX]; ++} RGXFWIF_HWPERF_DA_BLK; ++ ++ ++/* Structure to hold the whole configuration request details for all blocks ++ * The block masks and counts are used to optimise reading of this data. */ ++typedef struct ++{ ++ IMG_UINT32 ui32HWPerfCtlFlags; ++ ++ IMG_UINT32 ui32SelectedCountersBlockMask; ++ RGXFW_HWPERF_SELECT RGXFW_ALIGN SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS]; ++ ++ IMG_UINT32 ui32EnabledMUXBlksCount; ++ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_MUX_BLKS]; ++} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL; ++ ++/* NOTE: The switch statement in this function must be kept in alignment with ++ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may ++ * result if not. ++ * The function provides a hash lookup to get a handle on the global store for ++ * a block's configuration store from it's block ID. ++ */ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(rgxfw_hwperf_get_block_ctl) ++#endif ++static INLINE RGXFWIF_HWPERF_CTL_BLK *rgxfw_hwperf_get_block_ctl( ++ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) ++{ ++ IMG_UINT32 ui32Idx; ++ ++ /* Hash the block ID into a control configuration array index */ ++ switch (eBlockID) ++ { ++ case RGX_CNTBLK_ID_TA: ++ case RGX_CNTBLK_ID_RASTER: ++ case RGX_CNTBLK_ID_HUB: ++ case RGX_CNTBLK_ID_TORNADO: ++ case RGX_CNTBLK_ID_JONES: ++ { ++ ui32Idx = eBlockID; ++ break; ++ } ++ case RGX_CNTBLK_ID_TPU_MCU0: ++ case RGX_CNTBLK_ID_TPU_MCU1: ++ case RGX_CNTBLK_ID_TPU_MCU2: ++ case RGX_CNTBLK_ID_TPU_MCU3: ++ case RGX_CNTBLK_ID_TPU_MCU4: ++ case RGX_CNTBLK_ID_TPU_MCU5: ++ case RGX_CNTBLK_ID_TPU_MCU6: ++ case RGX_CNTBLK_ID_TPU_MCU7: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_USC0: ++ case RGX_CNTBLK_ID_USC1: ++ case RGX_CNTBLK_ID_USC2: ++ case RGX_CNTBLK_ID_USC3: ++ case RGX_CNTBLK_ID_USC4: ++ case RGX_CNTBLK_ID_USC5: ++ case RGX_CNTBLK_ID_USC6: ++ case RGX_CNTBLK_ID_USC7: ++ case RGX_CNTBLK_ID_USC8: ++ case RGX_CNTBLK_ID_USC9: ++ case RGX_CNTBLK_ID_USC10: ++ case RGX_CNTBLK_ID_USC11: ++ case RGX_CNTBLK_ID_USC12: ++ case RGX_CNTBLK_ID_USC13: ++ case RGX_CNTBLK_ID_USC14: ++ case RGX_CNTBLK_ID_USC15: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_TEXAS0: ++ case RGX_CNTBLK_ID_TEXAS1: ++ case RGX_CNTBLK_ID_TEXAS2: ++ case RGX_CNTBLK_ID_TEXAS3: ++ case RGX_CNTBLK_ID_TEXAS4: ++ case RGX_CNTBLK_ID_TEXAS5: ++ case RGX_CNTBLK_ID_TEXAS6: ++ case RGX_CNTBLK_ID_TEXAS7: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_RASTER0: ++ case RGX_CNTBLK_ID_RASTER1: ++ case RGX_CNTBLK_ID_RASTER2: ++ case RGX_CNTBLK_ID_RASTER3: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_BLACKPEARL0: ++ case RGX_CNTBLK_ID_BLACKPEARL1: ++ case RGX_CNTBLK_ID_BLACKPEARL2: ++ case RGX_CNTBLK_ID_BLACKPEARL3: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ case RGX_CNTBLK_ID_PBE0: ++ case RGX_CNTBLK_ID_PBE1: ++ case RGX_CNTBLK_ID_PBE2: ++ case RGX_CNTBLK_ID_PBE3: ++ case RGX_CNTBLK_ID_PBE4: ++ case RGX_CNTBLK_ID_PBE5: ++ case RGX_CNTBLK_ID_PBE6: ++ case RGX_CNTBLK_ID_PBE7: ++ case RGX_CNTBLK_ID_PBE8: ++ case RGX_CNTBLK_ID_PBE9: ++ case RGX_CNTBLK_ID_PBE10: ++ case RGX_CNTBLK_ID_PBE11: ++ case RGX_CNTBLK_ID_PBE12: ++ case RGX_CNTBLK_ID_PBE13: ++ case RGX_CNTBLK_ID_PBE14: ++ case RGX_CNTBLK_ID_PBE15: ++ { ++ ui32Idx = RGX_CNTBLK_ID_DIRECT_LAST + ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) + ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) + ++ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) + ++ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) + ++ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK); ++ break; ++ } ++ default: ++ { ++ ui32Idx = RGX_HWPERF_MAX_DEFINED_BLKS; ++ break; ++ } ++ } ++ if (ui32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS) ++ { ++ return NULL; ++ } ++ return &psHWPerfInitData->sBlkCfg[ui32Idx]; ++} ++ ++/* Stub routine for rgxfw_hwperf_get_da_block_ctl() for non ++ * RGX_FEATURE_HWPERF_OCEANIC systems. Just return a NULL. ++ */ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(rgxfw_hwperf_get_da_block_ctl) ++#endif ++static INLINE RGXFWIF_HWPERF_DA_BLK* rgxfw_hwperf_get_da_block_ctl( ++ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData) ++{ ++ PVR_UNREFERENCED_PARAMETER(eBlockID); ++ PVR_UNREFERENCED_PARAMETER(psHWPerfInitData); ++ ++ return NULL; ++} ++#endif +diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_km.h b/drivers/gpu/drm/img-rogue/rgx_fwif_km.h +new file mode 100644 +index 000000000000..724f6eecd6fd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fwif_km.h +@@ -0,0 +1,2341 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware interface structures used by pvrsrvkm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware interface structures used by pvrsrvkm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_KM_H) ++#define RGX_FWIF_KM_H ++ ++#include "img_types.h" ++#include "rgx_fwif_shared.h" ++#include "rgxdefs_km.h" ++#include "dllist.h" ++#include "rgx_hwperf.h" ++ ++ ++/*************************************************************************/ /*! ++ Logging type ++*/ /**************************************************************************/ ++#define RGXFWIF_LOG_TYPE_NONE 0x00000000U ++#define RGXFWIF_LOG_TYPE_TRACE 0x00000001U ++#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002U ++#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004U ++#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008U ++#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010U ++#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020U ++#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040U ++#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080U ++#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100U ++#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200U ++#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400U ++#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800U ++#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000U ++#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000U ++#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000U ++#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000U ++#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFEU ++#define RGXFWIF_LOG_TYPE_MASK 0x80007FFFU ++ ++/* String used in pvrdebug -h output */ ++#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug" ++ ++/* Table entry to map log group strings to log type value */ ++typedef struct { ++ const IMG_CHAR* pszLogGroupName; ++ IMG_UINT32 ui32LogGroupType; ++} RGXFWIF_LOG_GROUP_MAP_ENTRY; ++ ++/* ++ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup ++ table where needed. Keep log group names short, no more than 20 chars. ++*/ ++#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \ ++ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \ ++ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \ ++ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \ ++ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \ ++ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \ ++ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \ ++ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \ ++ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \ ++ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \ ++ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \ ++ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \ ++ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \ ++ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \ ++ { "misc", RGXFWIF_LOG_TYPE_GROUP_MISC }, \ ++ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG } ++ ++ ++/* Used in print statements to display log group state, one %s per group defined */ ++#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" ++ ++/* Used in a print statement to display log group state, one per group */ ++#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) ((((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) != 0U) ?("main ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) != 0U) ?("mts ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) != 0U) ?("cleanup ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) != 0U) ?("csw ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) != 0U) ?("bif ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_PM) != 0U) ?("pm ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) != 0U) ?("rtd ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) != 0U) ?("spm ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_POW) != 0U) ?("pow ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) != 0U) ?("hwr ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) != 0U) ?("hwp ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) != 0U) ?("rpm ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) != 0U) ?("dma ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_MISC) != 0U) ?("misc ") :("")), \ ++ ((((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) != 0U) ?("debug ") :("")) ++ ++ ++/************************************************************************ ++* RGX FW signature checks ++************************************************************************/ ++#define RGXFW_SIG_BUFFER_SIZE_MIN (8192) ++ ++#define RGXFWIF_TIMEDIFF_ID ((0x1UL << 28) | RGX_CR_TIMER) ++ ++/*! ++ ****************************************************************************** ++ * Trace Buffer ++ *****************************************************************************/ ++ ++/*! Default size of RGXFWIF_TRACEBUF_SPACE in DWords */ ++#define RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS 12000U ++#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200U ++#if defined(RGXFW_META_SUPPORT_2ND_THREAD) ++#define RGXFW_THREAD_NUM 2U ++#else ++#define RGXFW_THREAD_NUM 1U ++#endif ++ ++#define RGXFW_POLL_TYPE_SET 0x80000000U ++ ++typedef struct ++{ ++ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; ++ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE]; ++ IMG_UINT32 ui32LineNum; ++} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF; ++ ++/*! ++ * @Defgroup SRVAndFWTracing Services and Firmware Tracing data interface ++ * @Brief The document groups/lists the data structures and the interfaces related to Services and Firmware Tracing ++ * @{ ++ */ ++ ++/*! ++ * @Brief Firmware trace buffer details ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32TracePointer; /*!< Trace pointer (write index into Trace Buffer)*/ ++ ++#if defined(RGX_FIRMWARE) ++ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address), to be used by firmware for writing into trace buffer */ ++#else ++ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer; /*!< Trace buffer address (FW address)*/ ++#endif ++ IMG_PUINT32 pui32TraceBuffer; /*!< Trace buffer address (Host address), to be used by host when reading from trace buffer */ ++ ++ RGXFWIF_FILE_INFO_BUF sAssertBuf; ++} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE; ++ ++/*! @} End of Defgroup SRVAndFWTracing */ ++ ++#define RGXFWIF_FWFAULTINFO_MAX (8U) /* Total number of FW fault logs stored */ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; ++ IMG_UINT32 RGXFW_ALIGN ui32Data; ++ IMG_UINT32 ui32Reserved; ++ RGXFWIF_FILE_INFO_BUF sFaultBuf; ++} UNCACHED_ALIGN RGX_FWFAULTINFO; ++ ++ ++#define RGXFWIF_POW_STATES \ ++ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \ ++ X(RGXFWIF_POW_ON) /* running HW commands */ \ ++ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \ ++ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */ ++ ++typedef enum ++{ ++#define X(NAME) NAME, ++ RGXFWIF_POW_STATES ++#undef X ++} RGXFWIF_POW_STATE; ++ ++/* Firmware HWR states */ ++#define RGXFWIF_HWR_HARDWARE_OK (IMG_UINT32_C(0x1) << 0U) /*!< The HW state is ok or locked up */ ++#define RGXFWIF_HWR_RESET_IN_PROGRESS (IMG_UINT32_C(0x1) << 1U) /*!< Tells if a HWR reset is in progress */ ++#define RGXFWIF_HWR_GENERAL_LOCKUP (IMG_UINT32_C(0x1) << 3U) /*!< A DM unrelated lockup has been detected */ ++#define RGXFWIF_HWR_DM_RUNNING_OK (IMG_UINT32_C(0x1) << 4U) /*!< At least one DM is running without being close to a lockup */ ++#define RGXFWIF_HWR_DM_STALLING (IMG_UINT32_C(0x1) << 5U) /*!< At least one DM is close to lockup */ ++#define RGXFWIF_HWR_FW_FAULT (IMG_UINT32_C(0x1) << 6U) /*!< The FW has faulted and needs to restart */ ++#define RGXFWIF_HWR_RESTART_REQUESTED (IMG_UINT32_C(0x1) << 7U) /*!< The FW has requested the host to restart it */ ++ ++#define RGXFWIF_PHR_STATE_SHIFT (8U) ++#define RGXFWIF_PHR_RESTART_REQUESTED (IMG_UINT32_C(1) << RGXFWIF_PHR_STATE_SHIFT) /*!< The FW has requested the host to restart it, per PHR configuration */ ++#define RGXFWIF_PHR_RESTART_FINISHED (IMG_UINT32_C(2) << RGXFWIF_PHR_STATE_SHIFT) /*!< A PHR triggered GPU reset has just finished */ ++#define RGXFWIF_PHR_RESTART_MASK (RGXFWIF_PHR_RESTART_REQUESTED | RGXFWIF_PHR_RESTART_FINISHED) ++ ++#define RGXFWIF_PHR_MODE_OFF (0UL) ++#define RGXFWIF_PHR_MODE_RD_RESET (1UL) ++#define RGXFWIF_PHR_MODE_FULL_RESET (2UL) ++ ++typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS; ++ ++/* Firmware per-DM HWR states */ ++#define RGXFWIF_DM_STATE_WORKING (0x00U) /*!< DM is working if all flags are cleared */ ++#define RGXFWIF_DM_STATE_READY_FOR_HWR (IMG_UINT32_C(0x1) << 0) /*!< DM is idle and ready for HWR */ ++#define RGXFWIF_DM_STATE_NEEDS_SKIP (IMG_UINT32_C(0x1) << 2) /*!< DM need to skip to next cmd before resuming processing */ ++#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (IMG_UINT32_C(0x1) << 3) /*!< DM need partial render cleanup before resuming processing */ ++#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (IMG_UINT32_C(0x1) << 4) /*!< DM need to increment Recovery Count once fully recovered */ ++#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (IMG_UINT32_C(0x1) << 5) /*!< DM was identified as locking up and causing HWR */ ++#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (IMG_UINT32_C(0x1) << 6) /*!< DM was innocently affected by another lockup which caused HWR */ ++#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (IMG_UINT32_C(0x1) << 7) /*!< DM was identified as over-running and causing HWR */ ++#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (IMG_UINT32_C(0x1) << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */ ++#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (IMG_UINT32_C(0x1) << 9) /*!< DM was forced into HWR as it delayed more important workloads */ ++#define RGXFWIF_DM_STATE_GPU_ECC_HWR (IMG_UINT32_C(0x1) << 10) /*!< DM was forced into HWR due to an uncorrected GPU ECC error */ ++ ++/* Firmware's connection state */ ++typedef IMG_UINT32 RGXFWIF_CONNECTION_FW_STATE; ++#define RGXFW_CONNECTION_FW_OFFLINE 0U /*!< Firmware is offline */ ++#define RGXFW_CONNECTION_FW_READY 1U /*!< Firmware is initialised */ ++#define RGXFW_CONNECTION_FW_ACTIVE 2U /*!< Firmware connection is fully established */ ++#define RGXFW_CONNECTION_FW_OFFLOADING 3U /*!< Firmware is clearing up connection data */ ++#define RGXFW_CONNECTION_FW_STATE_COUNT 4U ++ ++/* OS' connection state */ ++typedef enum ++{ ++ RGXFW_CONNECTION_OS_OFFLINE = 0, /*!< OS is offline */ ++ RGXFW_CONNECTION_OS_READY, /*!< OS's KM driver is setup and waiting */ ++ RGXFW_CONNECTION_OS_ACTIVE, /*!< OS connection is fully established */ ++ RGXFW_CONNECTION_OS_STATE_COUNT ++} RGXFWIF_CONNECTION_OS_STATE; ++ ++typedef struct ++{ ++ IMG_UINT bfOsState : 3; ++ IMG_UINT bfFLOk : 1; ++ IMG_UINT bfFLGrowPending : 1; ++ IMG_UINT bfIsolatedOS : 1; ++ IMG_UINT bfReserved : 26; ++} RGXFWIF_OS_RUNTIME_FLAGS; ++ ++typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS; ++ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++#define PVR_SLR_LOG_ENTRIES 10U ++#define PVR_SLR_LOG_STRLEN 30 /*!< MAX_CLIENT_CCB_NAME not visible to this header */ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64Timestamp; ++ IMG_UINT32 ui32FWCtxAddr; ++ IMG_UINT32 ui32NumUFOs; ++ IMG_CHAR aszCCBName[PVR_SLR_LOG_STRLEN]; ++} UNCACHED_ALIGN RGXFWIF_SLR_ENTRY; ++#endif ++ ++/*! ++ * @InGroup SRVAndFWTracing ++ * @Brief Firmware trace control data ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32LogType; /*!< FW trace log group configuration */ ++ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM]; /*!< FW Trace buffer */ ++ IMG_UINT32 ui32TraceBufSizeInDWords; /*!< FW Trace buffer size in dwords, Member initialised only when sTraceBuf is actually allocated ++ (in RGXTraceBufferInitOnDemandResources) */ ++ IMG_UINT32 ui32TracebufFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_TRACEBUF; ++ ++/*! @Brief Firmware system data shared with the Host driver */ ++typedef struct ++{ ++ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */ ++ IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */ ++ volatile RGXFWIF_POW_STATE ePowState; ++ volatile IMG_UINT32 ui32HWPerfRIdx; ++ volatile IMG_UINT32 ui32HWPerfWIdx; ++ volatile IMG_UINT32 ui32HWPerfWrapCount; ++ IMG_UINT32 ui32HWPerfSize; /*!< Constant after setup, needed in FW */ ++ IMG_UINT32 ui32HWPerfDropCount; /*!< The number of times the FW drops a packet due to buffer full */ ++ ++ /* ui32HWPerfUt, ui32FirstDropOrdinal, ui32LastDropOrdinal only valid when FW is built with ++ * RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined in rgxfw_hwperf.c */ ++ IMG_UINT32 ui32HWPerfUt; /*!< Buffer utilisation, high watermark of bytes in use */ ++ IMG_UINT32 ui32FirstDropOrdinal; /*!< The ordinal of the first packet the FW dropped */ ++ IMG_UINT32 ui32LastDropOrdinal; /*!< The ordinal of the last packet the FW dropped */ ++ RGXFWIF_OS_RUNTIME_FLAGS asOsRuntimeFlagsMirror[RGXFW_MAX_NUM_OS];/*!< State flags for each Operating System mirrored from Fw coremem */ ++ RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX]; /*!< Firmware fault info */ ++ IMG_UINT32 ui32FWFaults; /*!< Firmware faults count */ ++ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM]; /*!< Failed poll address */ ++ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM]; /*!< Failed poll mask */ ++ IMG_UINT32 aui32CrPollCount[RGXFW_THREAD_NUM]; /*!< Failed poll count */ ++ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime; ++#if defined(SUPPORT_POWMON_COMPONENT) ++#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) ++ RGXFWIF_TRACEBUF_SPACE sPowerMonBuf; ++ IMG_UINT32 ui32PowerMonBufSizeInDWords; ++#endif ++#endif ++ ++#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) ++#define RGXFWIF_STATS_FRAMEWORK_LINESIZE (8) ++#define RGXFWIF_STATS_FRAMEWORK_MAX (2048*RGXFWIF_STATS_FRAMEWORK_LINESIZE) ++ IMG_UINT32 RGXFW_ALIGN aui32FWStatsBuf[RGXFWIF_STATS_FRAMEWORK_MAX]; ++#endif ++ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags; /*!< Firmware's Current HWR state */ ++ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_MAX]; /*!< Each DM's HWR state */ ++ IMG_UINT32 ui32FwSysDataFlags; /*!< Compatibility and other flags */ ++ IMG_UINT32 ui32McConfig; /*!< Identify whether MC config is P-P or P-S */ ++} UNCACHED_ALIGN RGXFWIF_SYSDATA; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware per-os data and configuration ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FwOsConfigFlags; /*!< Configuration flags from an OS */ ++ IMG_UINT32 ui32FWSyncCheckMark; /*!< Markers to signal that the host should perform a full sync check */ ++ IMG_UINT32 ui32HostSyncCheckMark; /*!< Markers to signal that the Firmware should perform a full sync check */ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ IMG_UINT32 ui32ForcedUpdatesRequested; ++ IMG_UINT8 ui8SLRLogWp; ++ RGXFWIF_SLR_ENTRY sSLRLogFirst; ++ RGXFWIF_SLR_ENTRY sSLRLog[PVR_SLR_LOG_ENTRIES]; ++ IMG_UINT64 RGXFW_ALIGN ui64LastForcedUpdateTime; ++#endif ++ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */ ++ IMG_UINT32 ui32KCCBCmdsExecuted; /*!< Executed Kernel CCB command count */ ++ RGXFWIF_DEV_VIRTADDR sPowerSync; /*!< Sync prim used to signal the host the power off state */ ++ IMG_UINT32 ui32FwOsDataFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_OSDATA; ++ ++/* Firmware trace time-stamp field breakup */ ++ ++/* RGX_CR_TIMER register read (48 bits) value*/ ++#define RGXFWT_TIMESTAMP_TIME_SHIFT (0U) ++#define RGXFWT_TIMESTAMP_TIME_CLRMSK (IMG_UINT64_C(0xFFFF000000000000)) ++ ++/* Extra debug-info (16 bits) */ ++#define RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT (48U) ++#define RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK ~RGXFWT_TIMESTAMP_TIME_CLRMSK ++ ++ ++/* Debug-info sub-fields */ ++/* Bit 0: RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT bit from RGX_CR_EVENT_STATUS register */ ++#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT (0U) ++#define RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET (1U << RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SHIFT) ++ ++/* Bit 1: RGX_CR_BIF_MMU_ENTRY_PENDING bit from RGX_CR_BIF_MMU_ENTRY register */ ++#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT (1U) ++#define RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET (1U << RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SHIFT) ++ ++/* Bit 2: RGX_CR_SLAVE_EVENT register is non-zero */ ++#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT (2U) ++#define RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET (1U << RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SHIFT) ++ ++/* Bit 3-15: Unused bits */ ++ ++#define RGXFWT_DEBUG_INFO_STR_MAXLEN 64 ++#define RGXFWT_DEBUG_INFO_STR_PREPEND " (debug info: " ++#define RGXFWT_DEBUG_INFO_STR_APPEND ")" ++ ++/* Table of debug info sub-field's masks and corresponding message strings ++ * to be appended to firmware trace ++ * ++ * Mask : 16 bit mask to be applied to debug-info field ++ * String : debug info message string ++ */ ++ ++#define RGXFWT_DEBUG_INFO_MSKSTRLIST \ ++/*Mask, String*/ \ ++X(RGXFWT_DEBUG_INFO_MMU_PAGE_FAULT_SET, "mmu pf") \ ++X(RGXFWT_DEBUG_INFO_MMU_ENTRY_PENDING_SET, "mmu pending") \ ++X(RGXFWT_DEBUG_INFO_SLAVE_EVENTS_SET, "slave events") ++ ++/*! ++ ****************************************************************************** ++ * HWR Data ++ *****************************************************************************/ ++/*! ++ * @Defgroup HWRInfo FW HWR shared data interface ++ * @Brief Types grouping data structures and defines used in realising the HWR record. ++ * @{ ++ */ ++/*! @Brief HWR Lockup types */ ++typedef enum ++{ ++ RGX_HWRTYPE_UNKNOWNFAILURE = 0, /*!< Unknown failure */ ++ RGX_HWRTYPE_OVERRUN = 1, /*!< DM overrun */ ++ RGX_HWRTYPE_POLLFAILURE = 2, /*!< Poll failure */ ++ RGX_HWRTYPE_BIF0FAULT = 3, /*!< BIF0 fault */ ++ RGX_HWRTYPE_BIF1FAULT = 4, /*!< BIF1 fault */ ++ RGX_HWRTYPE_TEXASBIF0FAULT = 5, /*!< TEXASBIF0 fault */ ++ RGX_HWRTYPE_MMUFAULT = 6, /*!< MMU fault */ ++ RGX_HWRTYPE_MMUMETAFAULT = 7, /*!< MMU META fault */ ++ RGX_HWRTYPE_MIPSTLBFAULT = 8, /*!< MIPS TLB fault */ ++ RGX_HWRTYPE_ECCFAULT = 9, /*!< ECC fault */ ++ RGX_HWRTYPE_MMURISCVFAULT = 10, /*!< MMU RISCV fault */ ++} RGX_HWRTYPE; ++ ++#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) (((eHWRType) == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1) ++ ++#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((((eHWRType) == RGX_HWRTYPE_BIF0FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_BIF1FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_TEXASBIF0FAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMUFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMUMETAFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MIPSTLBFAULT) || \ ++ ((eHWRType) == RGX_HWRTYPE_MMURISCVFAULT)) ? true : false) ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus; /*!< BIF request status */ ++ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus; /*!< MMU status */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} RGX_BIFINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32FaultGPU; /*!< ECC fault in GPU */ ++} RGX_ECCINFO; ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN aui64MMUStatus[2]; /*!< MMU status */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} RGX_MMUINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32ThreadNum; /*!< Thread ID performing poll operation */ ++ IMG_UINT32 ui32CrPollAddr; /*!< CR Poll Address */ ++ IMG_UINT32 ui32CrPollMask; /*!< CR Poll mask */ ++ IMG_UINT32 ui32CrPollLastValue; /*!< CR Poll last value */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved; ++} UNCACHED_ALIGN RGX_POLLINFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32BadVAddr; /*!< VA address */ ++ IMG_UINT32 ui32EntryLo; ++} RGX_TLBINFO; ++ ++/*! @Brief Structure to keep information specific to a lockup e.g. DM, timer, lockup type etc. */ ++typedef struct ++{ ++ union ++ { ++ RGX_BIFINFO sBIFInfo; /*!< BIF failure details */ ++ RGX_MMUINFO sMMUInfo; /*!< MMU failure details */ ++ RGX_POLLINFO sPollInfo; /*!< Poll failure details */ ++ RGX_TLBINFO sTLBInfo; /*!< TLB failure details */ ++ RGX_ECCINFO sECCInfo; /*!< ECC failure details */ ++ } uHWRData; ++ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimer; /*!< Timer value at the time of lockup */ ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimer; /*!< OS timer value at the time of lockup */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number of the workload */ ++ IMG_UINT32 ui32PID; /*!< PID belonging to the workload */ ++ IMG_UINT32 ui32ActiveHWRTData; /*!< HWRT data of the workload */ ++ IMG_UINT32 ui32HWRNumber; /*!< HWR number */ ++ IMG_UINT32 ui32EventStatus; /*!< Core specific event status register at the time of lockup */ ++ IMG_UINT32 ui32HWRRecoveryFlags; /*!< DM state flags */ ++ RGX_HWRTYPE eHWRType; /*!< Type of lockup */ ++ RGXFWIF_DM eDM; /*!< Recovery triggered for the DM */ ++ IMG_UINT32 ui32CoreID; /*!< Core ID of the GPU */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick; /*!< Workload kick time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart; /*!< HW reset start time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish; /*!< HW reset stop time */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady; /*!< freelist ready time on the last HWR */ ++ IMG_UINT64 RGXFW_ALIGN ui64Reserved[2]; ++} UNCACHED_ALIGN RGX_HWRINFO; ++ ++#define RGXFWIF_HWINFO_MAX_FIRST 8U /* Number of first HWR logs recorded (never overwritten by newer logs) */ ++#define RGXFWIF_HWINFO_MAX_LAST 8U /* Number of latest HWR logs (older logs are overwritten by newer logs) */ ++#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */ ++#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1U) /* Index of the last log in the HWR log buffer */ ++ ++/*! @Brief Firmware HWR information structure allocated by the Services and used by the Firmware to update recovery information. */ ++typedef struct ++{ ++ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX]; /*!< Max number of recovery record */ ++ IMG_UINT32 ui32HwrCounter; /*!< HWR counter used in FL reconstruction */ ++ IMG_UINT32 ui32WriteIndex; /*!< Index for updating recovery information in sHWRInfo */ ++ IMG_UINT32 ui32DDReqCount; /*!< Count of DebugDump requested to the host after recovery */ ++ IMG_UINT32 ui32HWRInfoBufFlags; /* Compatibility and other flags */ ++ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_MAX]; /*!< Lockup count for each DM */ ++ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_MAX]; /*!< Overrun count for each DM */ ++ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_MAX]; /*!< Lockup + Overrun count for each DM */ ++ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_MAX]; /*!< False lockup detection count for each DM */ ++} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF; ++ ++/*! @} End of HWRInfo */ ++ ++#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (IMG_UINT32_C(0x1)) ++#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (IMG_UINT32_C(0x2)) ++#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (IMG_UINT32_C(0x3)) ++#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (IMG_UINT32_C(0x4)) ++ ++#define RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN (IMG_UINT32_C(0x1)) ++#define RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN (IMG_UINT32_C(0x2)) ++ ++#define RGXFWIF_ISP_SCHEDMODE_VER1_IPP (IMG_UINT32_C(0x1)) ++#define RGXFWIF_ISP_SCHEDMODE_VER2_ISP (IMG_UINT32_C(0x2)) ++/*! ++ ****************************************************************************** ++ * RGX firmware Init Config Data ++ *****************************************************************************/ ++ ++/* Flag definitions affecting the firmware globally */ ++#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (IMG_UINT32_C(0x1) << 0) ++#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (IMG_UINT32_C(0x1) << 1) ++#define RGXFWIF_INICFG_HWPERF_EN (IMG_UINT32_C(0x1) << 2) ++#define RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN (IMG_UINT32_C(0x1) << 3) ++#define RGXFWIF_INICFG_POW_RASCALDUST (IMG_UINT32_C(0x1) << 4) ++/* 5 unused */ ++#define RGXFWIF_INICFG_FBCDC_V3_1_EN (IMG_UINT32_C(0x1) << 6) ++#define RGXFWIF_INICFG_CHECK_MLIST_EN (IMG_UINT32_C(0x1) << 7) ++#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (IMG_UINT32_C(0x1) << 8) ++/* 9 unused */ ++/* 10 unused */ ++/* 11 unused */ ++#define RGXFWIF_INICFG_REGCONFIG_EN (IMG_UINT32_C(0x1) << 12) ++#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (IMG_UINT32_C(0x1) << 13) ++#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (IMG_UINT32_C(0x1) << 14) ++/* 15 unused */ ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (16) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (IMG_UINT32_C(0x7) << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) ++#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (IMG_UINT32_C(0x1) << 19) ++#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (IMG_UINT32_C(0x1) << 20) ++#define RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED (IMG_UINT32_C(0x1) << 21) ++#define RGXFWIF_INICFG_VALIDATE_IRQ (IMG_UINT32_C(0x1) << 22) ++#define RGXFWIF_INICFG_DISABLE_PDP_EN (IMG_UINT32_C(0x1) << 23) ++#define RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN (IMG_UINT32_C(0x1) << 24) ++#define RGXFWIF_INICFG_WORKEST (IMG_UINT32_C(0x1) << 25) ++#define RGXFWIF_INICFG_PDVFS (IMG_UINT32_C(0x1) << 26) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT (27) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND (RGXFWIF_CDM_ARBITRATION_TASK_DEMAND_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN (RGXFWIF_CDM_ARBITRATION_ROUND_ROBIN_EN << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_CDM_ARBITRATION_MASK (IMG_UINT32_C(0x3) << RGXFWIF_INICFG_CDM_ARBITRATION_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT (29) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_NONE (0) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP (RGXFWIF_ISP_SCHEDMODE_VER1_IPP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP (RGXFWIF_ISP_SCHEDMODE_VER2_ISP << RGXFWIF_INICFG_ISPSCHEDMODE_SHIFT) ++#define RGXFWIF_INICFG_ISPSCHEDMODE_MASK (RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP |\ ++ RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP) ++#define RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER (IMG_UINT32_C(0x1) << 31) ++ ++#define RGXFWIF_INICFG_ALL (0xFFFFFFFFU) ++ ++/* Extended Flag definitions affecting the firmware globally */ ++#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT (0) ++/* [7] YUV10 override ++ * [6:4] Quality ++ * [3] Quality enable ++ * [2:1] Compression scheme ++ * [0] Lossy group */ ++#define RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK (IMG_UINT32_C(0xFF)) /* RGX_CR_TFBC_COMPRESSION_CONTROL_MASKFULL */ ++#define RGXFWIF_INICFG_EXT_ALL (RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) ++ ++#define RGXFWIF_INICFG_SYS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \ ++ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN) ++ ++/* Flag definitions affecting only workloads submitted by a particular OS */ ++ ++/*! ++ * @AddToGroup ContextSwitching ++ * @{ ++ * @Name Per-OS DM context switch configuration flags ++ * @{ ++ */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN (IMG_UINT32_C(0x1) << 0) /*!< Enables TDM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN (IMG_UINT32_C(0x1) << 1) /*!< Enables GEOM DM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN (IMG_UINT32_C(0x1) << 2) /*!< Enables FRAG DM context switch */ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN (IMG_UINT32_C(0x1) << 3) /*!< Enables CDM context switch */ ++ ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM (IMG_UINT32_C(0x1) << 4) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM (IMG_UINT32_C(0x1) << 5) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D (IMG_UINT32_C(0x1) << 6) ++#define RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM (IMG_UINT32_C(0x1) << 7) ++ ++#define RGXFWIF_INICFG_OS_ALL (0xFFU) ++ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN | \ ++ RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN) ++ ++#define RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_OS_CTXSWITCH_DM_ALL) ++ ++/*! ++ * @} End of Per-OS Context switch configuration flags ++ * @} End of AddToGroup ContextSwitching ++ */ ++ ++#define RGXFWIF_FILTCFG_TRUNCATE_HALF (IMG_UINT32_C(0x1) << 3) ++#define RGXFWIF_FILTCFG_TRUNCATE_INT (IMG_UINT32_C(0x1) << 2) ++#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (IMG_UINT32_C(0x1) << 1) ++ ++typedef IMG_UINT32 RGX_ACTIVEPM_CONF; ++#define RGX_ACTIVEPM_FORCE_OFF 0U ++#define RGX_ACTIVEPM_FORCE_ON 1U ++#define RGX_ACTIVEPM_DEFAULT 2U ++ ++typedef enum ++{ ++ RGX_RD_POWER_ISLAND_FORCE_OFF = 0, ++ RGX_RD_POWER_ISLAND_FORCE_ON = 1, ++ RGX_RD_POWER_ISLAND_DEFAULT = 2 ++} RGX_RD_POWER_ISLAND_CONF; ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++/* Unused registers re-purposed for storing counters of the Firmware's ++ * interrupts for each OS ++ */ ++#define IRQ_COUNTER_STORAGE_REGS \ ++ 0x2028U, /* RGX_CR_PM_TA_MMU_FSTACK */ \ ++ 0x2050U, /* RGX_CR_PM_3D_MMU_FSTACK */ \ ++ 0x2030U, /* RGX_CR_PM_START_OF_MMU_TACONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ \ ++ 0x2058U, /* RGX_CR_PM_START_OF_MMU_3DCONTEXT*/ ++#endif ++ ++typedef struct ++{ ++ IMG_UINT16 ui16RegNum; /*!< Register number */ ++ IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */ ++ IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */ ++ IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */ ++} RGXFW_REGISTER_LIST; ++ ++#if defined(RGX_FIRMWARE) ++typedef DLLIST_NODE RGXFWIF_DLLIST_NODE; ++#else ++typedef struct {RGXFWIF_DEV_VIRTADDR p; ++ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE; ++#endif ++ ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SYSDATA; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OSDATA; ++#if defined(SUPPORT_TBI_INTERFACE) ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF; ++#endif ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_MUX_CNTBLK; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK; ++typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_RTN_SLOTS; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FIRMWAREGCOVBUFFER; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD; ++ ++/*! ++ * This number is used to represent an invalid page catalogue physical address ++ */ ++#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU ++ ++/*! ++ * This number is used to represent unallocated page catalog base register ++ */ ++#define RGXFW_BIF_INVALID_PCSET 0xFFFFFFFFU ++ ++/*! ++ Firmware memory context. ++*/ ++typedef struct ++{ ++ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */ ++ IMG_UINT32 uiPageCatBaseRegSet; /*!< associated page catalog base register (RGXFW_BIF_INVALID_PCSET == unallocated) */ ++ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */ ++ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */ ++ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */ ++ IMG_UINT32 ui32FwMemCtxFlags; /*!< Compatibility and other flags */ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ IMG_UINT32 ui32OSid; ++ IMG_BOOL bOSidAxiProt; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT; ++ ++/*! ++ * FW context state flags ++ */ ++#define RGXFWIF_CONTEXT_FLAGS_NEED_RESUME (0x00000001U) ++#define RGXFWIF_CONTEXT_FLAGS_MC_NEED_RESUME_MASKFULL (0x000000FFU) ++#define RGXFWIF_CONTEXT_FLAGS_TDM_HEADER_STALE (0x00000100U) ++#define RGXFWIF_CONTEXT_FLAGS_LAST_KICK_SECURE (0x00000200U) ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware GEOM/TA context suspend state ++ */ ++typedef struct ++{ ++ /* FW-accessible TA state which must be written out to memory on context store */ ++ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /*!< VDM control stream stack pointer, to store in mid-TA */ ++ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /*!< Initial value of VDM control stream stack pointer (in case is 'lost' due to a lock-up) */ ++ IMG_UINT32 uTAReg_VBS_SO_PRIM[4]; ++ IMG_UINT16 ui16TACurrentIdx; ++} UNCACHED_ALIGN RGXFWIF_TACTX_STATE_PER_GEOM; ++ ++typedef struct ++{ ++ /* FW-accessible TA state which must be written out to memory on context store */ ++ RGXFWIF_TACTX_STATE_PER_GEOM asGeomCore[RGX_NUM_GEOM_CORES]; ++} UNCACHED_ALIGN RGXFWIF_TACTX_STATE; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Firmware FRAG/3D context suspend state ++ */ ++typedef struct ++{ ++ /* FW-accessible ISP state which must be written out to memory on context store */ ++ IMG_UINT32 u3DReg_PM_DEALLOCATED_MASK_STATUS; /*!< PM deallocation status */ ++ IMG_UINT32 u3DReg_PM_PDS_MTILEFREE_STATUS; /*!< Macro-tiles (MTs) finished status */ ++ IMG_UINT32 ui32CtxStateFlags; /*!< Compatibility and other flags */ ++ /* au3DReg_ISP_STORE should be the last element of the structure ++ * as this is an array whose size is determined at runtime ++ * after detecting the RGX core */ ++ IMG_UINT32 au3DReg_ISP_STORE[]; /*!< ISP state (per-pipe) */ ++} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE; ++ ++static_assert(sizeof(RGXFWIF_3DCTX_STATE) <= 16U, ++ "Size of structure RGXFWIF_3DCTX_STATE exceeds maximum expected size."); ++ ++#define RGXFWIF_CTX_USING_BUFFER_A (0) ++#define RGXFWIF_CTX_USING_BUFFER_B (1U) ++ ++typedef struct ++{ ++ IMG_UINT32 ui32CtxStateFlags; /*!< Target buffer and other flags */ ++} RGXFWIF_COMPUTECTX_STATE; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware Common Context (or FWCC) ++ */ ++typedef struct RGXFWIF_FWCOMMONCONTEXT_ ++{ ++ /* CCB details for this firmware context */ ++ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */ ++ PRGXFWIF_CCCB psCCB; /*!< CCB base */ ++ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr; ++ ++ /* Context suspend state */ ++ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */ ++ ++ /* Flags e.g. for context switching */ ++ IMG_UINT32 ui32FWComCtxFlags; ++ IMG_INT32 i32Priority; /*!< Priority level */ ++ IMG_UINT32 ui32PrioritySeqNum; ++ ++ /* Framework state */ ++ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */ ++ ++ /* Statistic updates waiting to be passed back to the host... */ ++ IMG_BOOL bStatsPending; /*!< True when some stats are pending */ ++ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */ ++ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */ ++ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */ ++ RGXFWIF_DM eDM; /*!< Data Master type */ ++ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitSignalNode; /*!< List entry for the wait-signal list */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */ ++ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64RobustnessAddress; ++ IMG_UINT32 ui32MaxDeadlineMS; /*!< Max HWR deadline limit in ms */ ++ bool bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */ ++ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */ ++ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sRunNode; /*!< List entry for the run list */ ++ RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */ ++ ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ ++ ++ /* References to the host side originators */ ++ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */ ++ IMG_UINT32 ui32PID; /*!< associated process ID */ ++ ++ IMG_BOOL bGeomOOMDisabled; /*!< True when Geom DM OOM is not allowed */ ++ ++} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT; ++ ++static_assert(sizeof(RGXFWIF_FWCOMMONCONTEXT) <= 256U, ++ "Size of structure RGXFWIF_FWCOMMONCONTEXT exceeds maximum expected size."); ++ ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_TQ[RGX_TRP_MAX_NUM_CORES][1]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_2D[RGX_TRP_MAX_NUM_CORES][2]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_3D[RGX_TRP_MAX_NUM_CORES][4]; ++typedef IMG_UINT64 RGXFWIF_TRP_CHECKSUM_GEOM[RGX_TRP_MAX_NUM_CORES][2]; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware render context. ++ */ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */ ++ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */ ++ ++ RGXFWIF_STATIC_RENDERCONTEXT_STATE sStaticRenderContextState; ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++ IMG_UINT32 ui32FwRenderCtxFlags; /*!< Compatibility and other flags */ ++ ++#if defined(SUPPORT_TRP) ++ RGXFWIF_TRP_CHECKSUM_3D aui64TRPChecksums3D; ++ RGXFWIF_TRP_CHECKSUM_GEOM aui64TRPChecksumsGeom; ++#endif ++} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT; ++ ++/*! ++ Firmware compute context. ++*/ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sCDMContext; /*!< Firmware context for the CDM */ ++ ++ RGXFWIF_STATIC_COMPUTECONTEXT_STATE sStaticComputeContextState; ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++ IMG_UINT32 ui32ComputeCtxFlags; /*!< Compatibility and other flags */ ++ ++ IMG_UINT32 ui32WGPState; ++ IMG_UINT32 ui32WGPChecksum; ++ IMG_UINT32 ui32CoreMaskA; ++ IMG_UINT32 ui32CoreMaskB; ++} UNCACHED_ALIGN RGXFWIF_FWCOMPUTECONTEXT; ++ ++/*! ++ Firmware TDM context. ++*/ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTDMContext; /*!< Firmware context for the TDM */ ++ ++ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */ ++ ++} UNCACHED_ALIGN RGXFWIF_FWTDMCONTEXT; ++ ++/*! ++ * @InGroup WorkloadContexts ++ * @Brief Firmware transfer context. ++ */ ++typedef struct ++{ ++ RGXFWIF_FWCOMMONCONTEXT sTQContext; /*!< Firmware context for TQ3D */ ++ ++#if defined(SUPPORT_TRP) ++ IMG_UINT32 ui32TRPState; ++ RGXFWIF_TRP_CHECKSUM_TQ aui64TRPChecksumsTQ; ++#endif ++} UNCACHED_ALIGN RGXFWIF_FWTRANSFERCONTEXT; ++ ++/*! ++ ****************************************************************************** ++ * Defines for CMD_TYPE corruption detection and forward compatibility check ++ *****************************************************************************/ ++ ++/* CMD_TYPE 32bit contains: ++ * 31:16 Reserved for magic value to detect corruption (16 bits) ++ * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit) ++ * 14:0 Bits available for CMD_TYPEs (15 bits) */ ++ ++ ++/* Magic value to detect corruption */ ++#define RGX_CMD_MAGIC_DWORD IMG_UINT32_C(0x2ABC) ++#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U) ++#define RGX_CMD_MAGIC_DWORD_SHIFT (16U) ++#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT) ++ ++/*! ++ * @InGroup KCCBTypes ClientCCBTypes ++ * @Brief Generic CCB control structure ++ */ ++typedef struct ++{ ++ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */ ++ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */ ++ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */ ++ IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */ ++} UNCACHED_ALIGN RGXFWIF_CCB_CTL; ++ ++/*! ++ * @Defgroup KCCBTypes Kernel CCB data interface ++ * @Brief Types grouping data structures and defines used in realising the KCCB functionality ++ * @{ ++ */ ++ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1U) /* MMU_CTRL_INVAL_PT_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2U) /* MMU_CTRL_INVAL_PD_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4U) /* MMU_CTRL_INVAL_PC_EN */ ++ ++#if !defined(__KERNEL) ++ ++#if !defined(RGX_FEATURE_SLC_VIVT) ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10U) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE < 2) ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8U) /* BIF_CTRL_INVAL_TLB1_EN */ ++#else ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB) ++#endif ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0U) /* not used */ ++ ++#else /* RGX_FEATURE_SLC_VIVT */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ ++#endif ++ ++#else ++#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */ ++#endif ++ ++#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000U) /* indicates FW should interrupt the host */ ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_MMUCACHE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32CacheFlags; ++ RGXFWIF_DEV_VIRTADDR sMMUCacheSync; ++ IMG_UINT32 ui32MMUCacheSyncUpdateValue; ++} RGXFWIF_MMUCACHEDATA; ++ ++#define RGXFWIF_BPDATA_FLAGS_ENABLE (1U << 0) ++#define RGXFWIF_BPDATA_FLAGS_WRITE (1U << 1) ++#define RGXFWIF_BPDATA_FLAGS_CTL (1U << 2) ++#define RGXFWIF_BPDATA_FLAGS_REGS (1U << 3) ++ ++typedef struct ++{ ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */ ++ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */ ++ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */ ++ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */ ++ IMG_UINT32 ui32BPDataFlags; ++ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */ ++ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */ ++ RGXFWIF_DM eDM; /*!< DM associated with the breakpoint */ ++} RGXFWIF_BPDATA; ++ ++#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS (RGXFWIF_PRBUFFER_MAXSUPPORTED + 1U) /* +1 is RTDATASET cleanup */ ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_KICK type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ ++ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */ ++ IMG_UINT32 ui32CWrapMaskUpdate; /*!< Client CCB wrap mask update after CCCB growth */ ++ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */ ++ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */ ++ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */ ++} RGXFWIF_KCCB_CMD_KICK_DATA; ++ ++/*! ++ * @Brief Command data for @Ref RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK type command ++ */ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; /*!< GEOM DM kick command data */ ++ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; /*!< FRAG DM kick command data */ ++} RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FORCE_UPDATE type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */ ++ IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */ ++} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA; ++ ++/*! ++ * @Brief Resource types supported by \ref RGXFWIF_KCCB_CMD_CLEANUP type command ++ */ ++typedef enum ++{ ++ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */ ++ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */ ++ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */ ++ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */ ++} RGXFWIF_CLEANUP_TYPE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CLEANUP type command ++ */ ++typedef struct ++{ ++ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */ ++ union { ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */ ++ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */ ++ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */ ++ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */ ++ } uCleanupData; ++} RGXFWIF_CLEANUP_REQUEST; ++ ++/*! ++ * @Brief Type of power requests supported in \ref RGXFWIF_KCCB_CMD_POW type command ++ */ ++typedef enum ++{ ++ RGXFWIF_POW_OFF_REQ = 1, /*!< GPU power-off request */ ++ RGXFWIF_POW_FORCED_IDLE_REQ, /*!< Force-idle related request */ ++ RGXFWIF_POW_NUM_UNITS_CHANGE, /*!< Request to change default powered scalable units */ ++ RGXFWIF_POW_APM_LATENCY_CHANGE /*!< Request to change the APM latency period */ ++} RGXFWIF_POWER_TYPE; ++ ++/*! ++ * @Brief Supported force-idle related requests with \ref RGXFWIF_POW_FORCED_IDLE_REQ type request ++ */ ++typedef enum ++{ ++ RGXFWIF_POWER_FORCE_IDLE = 1, /*!< Request to force-idle GPU */ ++ RGXFWIF_POWER_CANCEL_FORCED_IDLE, /*!< Request to cancel a previously successful force-idle transition */ ++ RGXFWIF_POWER_HOST_TIMEOUT, /*!< Notification that host timed-out waiting for force-idle state */ ++} RGXFWIF_POWER_FORCE_IDLE_TYPE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_POW type command ++ */ ++typedef struct ++{ ++ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */ ++ union ++ { ++ IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */ ++ IMG_BOOL bForced; /*!< If the operation is mandatory */ ++ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */ ++ } uPowerReqData; ++} RGXFWIF_POWER_REQUEST; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_SLCFLUSHINVAL type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */ ++ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */ ++ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */ ++ IMG_UINT64 RGXFW_ALIGN ui64Address; /*!< Optional address of range (only useful when bDMContext == FALSE) */ ++ IMG_UINT64 RGXFW_ALIGN ui64Size; /*!< Optional size of range (only useful when bDMContext == FALSE) */ ++} RGXFWIF_SLCFLUSHINVALDATA; ++ ++typedef enum ++{ ++ RGXFWIF_HWPERF_CTRL_TOGGLE = 0, ++ RGXFWIF_HWPERF_CTRL_SET = 1, ++ RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV = 2 ++} RGXFWIF_HWPERF_UPDATE_CONFIG; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG type command ++ */ ++typedef struct ++{ ++ RGXFWIF_HWPERF_UPDATE_CONFIG eOpCode; /*!< Control operation code */ ++ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */ ++} RGXFWIF_HWPERF_CTRL; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_MUX_CNTBLK in the array */ ++ PRGX_HWPERF_CONFIG_MUX_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_MUX_CNTBLK array */ ++} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */ ++ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */ ++} RGXFWIF_HWPERF_CONFIG_DA_BLKS; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */ ++} RGXFWIF_CORECLKSPEEDCHANGE_DATA; ++ ++#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16U ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS type command ++ */ ++typedef struct ++{ ++ bool bEnable; ++ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */ ++ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */ ++} RGXFWIF_HWPERF_CTRL_BLKS; ++ ++ ++typedef struct ++{ ++ IMG_UINT16 ui16CustomBlock; ++ IMG_UINT16 ui16NumCounters; ++ PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs; ++} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE & \ref RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE type commands ++ */ ++typedef struct ++{ ++ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */ ++ IMG_BOOL bDone; /*!< action backing/unbacking succeeded */ ++} RGXFWIF_ZSBUFFER_BACKING_DATA; ++ ++#if defined(SUPPORT_VALIDATION) ++typedef struct ++{ ++ IMG_UINT32 ui32RegWidth; ++ IMG_BOOL bWriteOp; ++ IMG_UINT32 ui32RegAddr; ++ IMG_UINT64 RGXFW_ALIGN ui64RegVal; ++} RGXFWIF_RGXREG_DATA; ++ ++typedef struct ++{ ++ IMG_UINT64 ui64BaseAddress; ++ PRGXFWIF_FWCOMMONCONTEXT psContext; ++ IMG_UINT32 ui32Size; ++} RGXFWIF_GPUMAP_DATA; ++#endif ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE type command ++ */ ++typedef struct ++{ ++ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */ ++ IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */ ++ IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */ ++ IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */ ++} RGXFWIF_FREELIST_GS_DATA; ++ ++#define RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS * 2U) ++#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000U ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistsCount; ++ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; ++} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE type command ++ */ ++typedef struct ++{ ++ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */ ++} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA; ++ ++/*! ++ ****************************************************************************** ++ * Proactive DVFS Structures ++ *****************************************************************************/ ++#define NUM_OPP_VALUES 16 ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Volt; /* V */ ++ IMG_UINT32 ui32Freq; /* Hz */ ++} UNCACHED_ALIGN PDVFS_OPP; ++ ++typedef struct ++{ ++ PDVFS_OPP asOPPValues[NUM_OPP_VALUES]; ++#if defined(DEBUG) ++ IMG_UINT32 ui32MinOPPPoint; ++#endif ++ IMG_UINT32 ui32MaxOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32MaxOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32MinOPPPoint; ++} UNCACHED_ALIGN RGXFWIF_PDVFS_MIN_FREQ_DATA; ++ ++/*! ++ ****************************************************************************** ++ * Register configuration structures ++ *****************************************************************************/ ++ ++#define RGXFWIF_REG_CFG_MAX_SIZE 512 ++ ++typedef enum ++{ ++ RGXFWIF_REGCFG_CMD_ADD = 101, ++ RGXFWIF_REGCFG_CMD_CLEAR = 102, ++ RGXFWIF_REGCFG_CMD_ENABLE = 103, ++ RGXFWIF_REGCFG_CMD_DISABLE = 104 ++} RGXFWIF_REGDATA_CMD_TYPE; ++ ++typedef enum ++{ ++ RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */ ++ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */ ++ RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */ ++ RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */ ++ RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */ ++ RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */ ++ RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */ ++ RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */ ++} RGXFWIF_REG_CFG_TYPE; ++ ++typedef struct ++{ ++ IMG_UINT64 ui64Addr; ++ IMG_UINT64 ui64Mask; ++ IMG_UINT64 ui64Value; ++} RGXFWIF_REG_CFG_REC; ++ ++typedef struct ++{ ++ RGXFWIF_REGDATA_CMD_TYPE eCmdType; ++ RGXFWIF_REG_CFG_TYPE eRegConfigType; ++ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig; ++ ++} RGXFWIF_REGCONFIG_DATA; ++ ++typedef struct ++{ ++ /** ++ * PDump WRW command write granularity is 32 bits. ++ * Add padding to ensure array size is 32 bit granular. ++ */ ++ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[PVR_ALIGN((IMG_UINT32)RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))]; ++ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE]; ++} UNCACHED_ALIGN RGXFWIF_REG_CFG; ++ ++typedef enum ++{ ++ RGXFWIF_OS_ONLINE = 1, ++ RGXFWIF_OS_OFFLINE ++} RGXFWIF_OS_STATE_CHANGE; ++ ++/*! ++ * @Brief Command data for \ref RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE type command ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32OSid; ++ RGXFWIF_OS_STATE_CHANGE eNewOSState; ++} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA; ++ ++typedef enum ++{ ++ RGXFWIF_PWR_COUNTER_DUMP_START = 1, ++ RGXFWIF_PWR_COUNTER_DUMP_STOP, ++ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE, ++} RGXFWIF_COUNTER_DUMP_REQUEST; ++ ++typedef struct ++{ ++ RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest; ++} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA; ++ ++/*! ++ * @Brief List of command types supported by the Kernel CCB ++ */ ++typedef enum ++{ ++ /* Common commands */ ++ RGXFWIF_KCCB_CMD_KICK = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< DM workload kick command */ ++ RGXFWIF_KCCB_CMD_MMUCACHE = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< MMU cache invalidation request */ ++ RGXFWIF_KCCB_CMD_BP = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< SLC flush and invalidation request */ ++ RGXFWIF_KCCB_CMD_CLEANUP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */ ++ RGXFWIF_KCCB_CMD_POW = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */ ++ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */ ++ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */ ++ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */ ++ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */ ++ /* RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE */ ++ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 114U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */ ++ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 115U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */ ++ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 116U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */ ++ ++ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK = 117U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< There is a TA and a 3D command in this single kick */ ++ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 118U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. */ ++ ++ /* Commands only permitted to the native or host OS */ ++ RGXFWIF_KCCB_CMD_REGCONFIG = 200U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 201U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */ ++ /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS */ ++ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 203U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */ ++ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 204U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Core clock speed change event */ ++ /* RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT*/ ++ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 206U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */ ++ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 207U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a maximum frequency/OPP point */ ++ RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 208U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */ ++ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 209U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */ ++ /* RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE */ ++ /*RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE */ ++ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MIN_FREQ = 212U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set a minimum frequency/OPP point */ ++ RGXFWIF_KCCB_CMD_PHR_CFG = 213U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Periodic Hardware Reset behaviour */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_KCCB_CMD_RGXREG = 214U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Read RGX Register from FW */ ++#endif ++ RGXFWIF_KCCB_CMD_WDG_CFG = 215U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure Safety Firmware Watchdog */ ++ RGXFWIF_KCCB_CMD_COUNTER_DUMP = 216U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */ ++ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 217U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */ ++ RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 218U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_KCCB_CMD_GPUMAP = 219U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request a FW GPU mapping which is written into by the FW with a pattern */ ++#endif ++ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS = 220U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure directly addressable counters for HWPerf */ ++} RGXFWIF_KCCB_CMD_TYPE; ++ ++#define RGXFWIF_LAST_ALLOWED_GUEST_KCCB_CMD (RGXFWIF_KCCB_CMD_REGCONFIG - 1) ++ ++/*! @Brief Kernel CCB command packet */ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */ ++ IMG_UINT32 ui32KCCBFlags; /*!< Compatibility and other flags */ ++ ++ /* NOTE: Make sure that uCmdData is the last member of this struct ++ * This is to calculate actual command size for device mem copy. ++ * (Refer RGXGetCmdMemCopySize()) ++ * */ ++ union ++ { ++ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */ ++ RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA sCombinedTA3DCmdKickData; /*!< Data for combined TA/3D Kick command */ ++ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMU cache command */ ++ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */ ++ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */ ++ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */ ++ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */ ++ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */ ++ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */ ++ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */ ++ RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */ ++ RGXFWIF_HWPERF_CONFIG_DA_BLKS sHWPerfCfgDABlks; /*!< Data for HWPerf configure Directly Addressable blocks */ ++ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCoreClkSpeedChangeData;/*!< Data for core clock speed change */ ++ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */ ++ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */ ++ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */ ++ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */ ++ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */ ++ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData; /*!< Data for setting the max frequency/OPP */ ++ RGXFWIF_PDVFS_MIN_FREQ_DATA sPDVFSMinFreqData; /*!< Data for setting the min frequency/OPP */ ++ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */ ++ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */ ++ RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */ ++ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_RGXREG_DATA sFwRgxData; /*!< Data for reading off an RGX register */ ++ RGXFWIF_GPUMAP_DATA sGPUMapData; /*!< Data for requesting a FW GPU mapping which is written into by the FW with a pattern */ ++#endif ++ } UNCACHED_ALIGN uCmdData; ++} UNCACHED_ALIGN RGXFWIF_KCCB_CMD; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD); ++ ++/*! @} End of KCCBTypes */ ++ ++/*! ++ * @Defgroup FWCCBTypes Firmware CCB data interface ++ * @Brief Types grouping data structures and defines used in realising the Firmware CCB functionality ++ * @{ ++ */ ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING and the ++ * \ref RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING Firmware CCB commands ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ZSBufferID; /*!< ZS buffer ID */ ++} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELIST_GROW Firmware CCB ++ * command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistID; /*!< Freelist ID */ ++} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32FreelistsCount; /*!< Freelists count */ ++ IMG_UINT32 ui32HwrCounter; /*!< HWR counter */ ++ IMG_UINT32 aui32FreelistIDs[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT]; /*!< Array of freelist IDs to reconstruct */ ++} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA; ++ ++#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF (1U<<0) /*!< 1 if a page fault happened */ ++#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS (1U<<1) /*!< 1 if applicable to all contexts */ ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */ ++ RGXFWIF_DM eDM; /*!< Data Master affected by the reset */ ++ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */ ++ IMG_UINT32 ui32Flags; /*!< RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG bitfield */ ++ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress; /*!< Page fault address (only when applicable) */ ++} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION ++ * Firmware CCB command ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR sFWFaultAddr; /*!< Page fault address */ ++} RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA; ++ ++/*! ++ ****************************************************************************** ++ * List of command types supported by the Firmware CCB ++ *****************************************************************************/ ++typedef enum ++{ ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages ++ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked ++ \n Command data: RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA */ ++ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow ++ \n Command data: RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA */ ++ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction ++ \n Command data: RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA */ ++ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context ++ \n Command data: RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA */ ++ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump ++ \n Command data: None */ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats ++ \n Command data: RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA */ ++ ++ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 108U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++ RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 109U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests GPU restart ++ \n Command data: None */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_FWCCB_CMD_REG_READ = 110U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#if defined(SUPPORT_SOC_TIMER) ++ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS = 111U | RGX_CMD_MAGIC_DWORD_SHIFTED, ++#endif ++#endif ++ RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION = 112U | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a FW pagefault ++ \n Command data: RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA */ ++} RGXFWIF_FWCCB_CMD_TYPE; ++ ++/*! ++ ****************************************************************************** ++ * List of the various stats of the process to update/increment ++ *****************************************************************************/ ++typedef enum ++{ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */ ++ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTDMStores stat */ ++} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE; ++ ++/*! ++ ****************************************************************************** ++ * @Brief Command data of the \ref RGXFWIF_FWCCB_CMD_UPDATE_STATS Firmware CCB ++ * command ++ *****************************************************************************/ ++typedef struct ++{ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */ ++ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */ ++ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */ ++} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32CoreClkRate; ++} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA; ++ ++#if defined(SUPPORT_VALIDATION) ++typedef struct ++{ ++ IMG_UINT64 ui64RegValue; ++} RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA; ++ ++#if defined(SUPPORT_SOC_TIMER) ++typedef struct ++{ ++ IMG_UINT64 ui64timerGray; ++ IMG_UINT64 ui64timerBinary; ++ IMG_UINT64 aui64uscTimers[RGX_FEATURE_NUM_CLUSTERS]; ++} RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA; ++#endif ++#endif ++ ++/*! ++ ****************************************************************************** ++ * @Brief Firmware CCB command structure ++ *****************************************************************************/ ++typedef struct ++{ ++ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */ ++ IMG_UINT32 ui32FWCCBFlags; /*!< Compatibility and other flags */ ++ ++ union ++ { ++ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/ ++ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */ ++ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */ ++ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */ ++ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */ ++ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange; ++ RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA sCmdFWPagefault; /*!< Data for context reset notification */ ++#if defined(SUPPORT_VALIDATION) ++ RGXFWIF_FWCCB_CMD_RGXREG_READ_DATA sCmdRgxRegReadData; ++#if defined(SUPPORT_SOC_TIMER) ++ RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS_DATA sCmdTimers; ++#endif ++#endif ++ } RGXFW_ALIGN uCmdData; ++} RGXFW_ALIGN RGXFWIF_FWCCB_CMD; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD); ++ ++/*! @} End of FWCCBTypes */ ++ ++/*! ++ ****************************************************************************** ++ * Workload estimation Firmware CCB command structure for RGX ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT16 ui16ReturnDataIndex; /*!< Index for return data array */ ++ IMG_UINT32 ui32CyclesTaken; /*!< The cycles the workload took on the hardware */ ++} RGXFWIF_WORKEST_FWCCB_CMD; ++ ++/*! ++ * @Defgroup ClientCCBTypes Client CCB data interface ++ * @Brief Types grouping data structures and defines used in realising Client CCB commands/functionality ++ * @{ ++ */ ++ ++/* Required memory alignment for 64-bit variables accessible by Meta ++ (The gcc meta aligns 64-bit variables to 64-bit; therefore, memory shared ++ between the host and meta that contains 64-bit variables has to maintain ++ this alignment) */ ++#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64) ++ ++#define RGX_CCB_TYPE_TASK (IMG_UINT32_C(1) << 15) ++#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1U)) & ~(RGXFWIF_FWALLOC_ALIGN - 1U)) ++ ++typedef IMG_UINT32 RGXFWIF_CCB_CMD_TYPE; ++ ++/*! ++ * @Name Client CCB command types ++ * @{ ++ */ ++#define RGXFWIF_CCB_CMD_TYPE_GEOM (201U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TA DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_TQ_3D (202U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for TQ operation */ ++#define RGXFWIF_CCB_CMD_TYPE_3D (203U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_3D_PR (204U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 3D DM command for Partial render */ ++#define RGXFWIF_CCB_CMD_TYPE_CDM (205U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< Compute DM command */ ++#define RGXFWIF_CCB_CMD_TYPE_TQ_TDM (206U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< TDM command */ ++#define RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE (207U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_TQ_2D (208U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) /*!< 2D DM command for TQ operation */ ++#define RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP (209U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_NULL (210U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++#define RGXFWIF_CCB_CMD_TYPE_ABORT (211U | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK) ++ ++/* Leave a gap between CCB specific commands and generic commands */ ++#define RGXFWIF_CCB_CMD_TYPE_FENCE (212U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_UPDATE (213U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE (214U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence updates related to workload resources */ ++#define RGXFWIF_CCB_CMD_TYPE_FENCE_PR (215U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Fence dependencies of a PR command */ ++#define RGXFWIF_CCB_CMD_TYPE_PRIORITY (216U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Context priority update command */ ++/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The ++ padding code with the CCB wrap upsets the FW if we don't have the task type ++ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. ++*/ ++#define RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP (217U | RGX_CMD_MAGIC_DWORD_SHIFTED) ++#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE (218U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates of a command */ ++#define RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE (219U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Unfenced fence updates related to workload resources */ ++ ++#if defined(SUPPORT_VALIDATION) ++#define RGXFWIF_CCB_CMD_TYPE_REG_READ (220U | RGX_CMD_MAGIC_DWORD_SHIFTED) ++#endif ++ ++#define RGXFWIF_CCB_CMD_TYPE_PADDING (221U | RGX_CMD_MAGIC_DWORD_SHIFTED) /*!< Skip without action type command */ ++/*! @} End of Client CCB command types */ ++ ++typedef struct ++{ ++ /* Index for the KM Workload estimation return data array */ ++ IMG_UINT16 RGXFW_ALIGN ui16ReturnDataIndex; ++ /* Predicted time taken to do the work in cycles */ ++ IMG_UINT32 RGXFW_ALIGN ui32CyclesPrediction; ++ /* Deadline for the workload (in usecs) */ ++ IMG_UINT64 RGXFW_ALIGN ui64Deadline; ++} RGXFWIF_WORKEST_KICK_DATA; ++ ++/*! @Brief Command header of a command in the client CCB buffer. ++ * ++ * Followed by this header is the command-data specific to the ++ * command-type as specified in the header. ++ */ ++typedef struct ++{ ++ RGXFWIF_CCB_CMD_TYPE eCmdType; /*!< Command data type following this command header */ ++ IMG_UINT32 ui32CmdSize; /*!< Size of the command following this header */ ++ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */ ++ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */ ++ RGXFWIF_WORKEST_KICK_DATA RGXFW_ALIGN sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */ ++} RGXFWIF_CCB_CMD_HEADER; ++ ++/* ++ ****************************************************************************** ++ * Client CCB commands which are only required by the kernel ++ *****************************************************************************/ ++ ++/*! @Brief Command data for \ref RGXFWIF_CCB_CMD_TYPE_PRIORITY type client CCB command */ ++typedef struct ++{ ++ IMG_INT32 i32Priority; /*!< Priority level */ ++} RGXFWIF_CMD_PRIORITY; ++ ++/*! @} End of ClientCCBTypes */ ++ ++/*! ++ ****************************************************************************** ++ * Signature and Checksums Buffer ++ *****************************************************************************/ ++typedef struct ++{ ++ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */ ++ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL; ++ ++typedef struct ++{ ++ PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */ ++ IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL; ++ ++typedef struct ++{ ++ PRGXFWIF_FIRMWAREGCOVBUFFER sBuffer; /*!< Ptr to firmware gcov buffer */ ++ IMG_UINT32 ui32Size; /*!< Amount of space for storing in the buffer */ ++} UNCACHED_ALIGN RGXFWIF_FIRMWARE_GCOV_CTL; ++ ++/*! ++ ***************************************************************************** ++ * RGX Compatibility checks ++ *****************************************************************************/ ++ ++/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC changes, the ++ following define should be increased by 1 to indicate to the ++ compatibility logic that layout has changed. */ ++#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 3 ++ ++typedef struct ++{ ++ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */ ++ IMG_UINT64 RGXFW_ALIGN ui64BVNC; ++} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC; ++ ++typedef struct ++{ ++ IMG_UINT8 ui8OsCountSupport; ++} UNCACHED_ALIGN RGXFWIF_INIT_OPTIONS; ++ ++#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \ ++ RGXFWIF_COMPCHECKS_BVNC (name) = { \ ++ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \ ++ 0, \ ++ } ++#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \ ++ do { \ ++ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \ ++ (name).ui64BVNC = 0; \ ++ } while (false) ++ ++typedef struct ++{ ++ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BVNC (from the RGX registers) */ ++ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BVNC */ ++ IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the FW processor version */ ++ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */ ++ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */ ++ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */ ++ RGXFWIF_INIT_OPTIONS sInitOptions; /*!< initialisation options bit-field */ ++ IMG_BOOL bUpdated; /*!< Information is valid */ ++} UNCACHED_ALIGN RGXFWIF_COMPCHECKS; ++ ++/*! ++ ****************************************************************************** ++ * Updated configuration post FW data init. ++ *****************************************************************************/ ++typedef struct ++{ ++ IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */ ++ IMG_UINT32 ui32RuntimeCfgFlags; /* Compatibility and other flags */ ++ IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */ ++ IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */ ++ IMG_UINT32 ui32DefaultDustsNumInit; /* Last number of dusts change requested by the host */ ++ IMG_UINT32 ui32PHRMode; /* Periodic Hardware Reset configuration values */ ++ IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */ ++ IMG_UINT32 ui32WdgPeriodUs; /* The watchdog period in microseconds */ ++ IMG_UINT32 aui32OSidPriority[RGXFW_MAX_NUM_OS]; /*!< Array of priorities per OS */ ++ PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */ ++} RGXFWIF_RUNTIME_CFG; ++ ++/*! ++ ***************************************************************************** ++ * Control data for RGX ++ *****************************************************************************/ ++ ++#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999U) ++ ++#if defined(PDUMP) ++ ++#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32U ++ ++typedef enum ++{ ++ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, ++ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT ++} RGXFWIF_PID_FILTER_MODE; ++ ++typedef struct ++{ ++ IMG_PID uiPID; ++ IMG_UINT32 ui32OSID; ++} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM; ++ ++typedef struct ++{ ++ RGXFWIF_PID_FILTER_MODE eMode; ++ /* each process in the filter list is specified by a PID and OS ID pair. ++ * each PID and OS pair is an item in the items array (asItems). ++ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries ++ * then it must be terminated by an item with pid of zero. ++ */ ++ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS]; ++} RGXFW_ALIGN RGXFWIF_PID_FILTER; ++#endif ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA (0x1U << 0) ++#define RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE (0x1U << 1) ++#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE (0x1U << 2) ++#define RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE (0x1U << 3) ++#endif ++ ++typedef enum ++{ ++ RGXFWIF_TPU_DM_PDM = 0, ++ RGXFWIF_TPU_DM_VDM = 1, ++ RGXFWIF_TPU_DM_CDM = 2, ++ RGXFWIF_TPU_DM_TDM = 3, ++ RGXFWIF_TPU_DM_LAST ++} RGXFWIF_TPU_DM; ++ ++typedef enum ++{ ++ RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */ ++ RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that ++ initiates by sending data via the ++ GPIO and then sends back any data ++ received over the GPIO */ ++ RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes ++ and reads data across the entire ++ GPIO AP address range.*/ ++#if defined(SUPPORT_STRIP_RENDERING) ++ RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/ ++ RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/ ++#endif ++ RGXFWIF_GPIO_VAL_TESTBENCH = 5, /*!< Validates the GPIO Testbench. */ ++ RGXFWIF_GPIO_VAL_LOOPBACK = 6, /*!< Send and then receive each byte ++ in the range 0-255. */ ++ RGXFWIF_GPIO_VAL_LOOPBACK_LITE = 7, /*!< Send and then receive each power-of-2 ++ byte in the range 0-255. */ ++ RGXFWIF_GPIO_VAL_LAST ++} RGXFWIF_GPIO_VAL_MODE; ++ ++typedef enum ++{ ++ FW_PERF_CONF_NONE = 0, ++ FW_PERF_CONF_ICACHE = 1, ++ FW_PERF_CONF_DCACHE = 2, ++ FW_PERF_CONF_JTLB_INSTR = 5, ++ FW_PERF_CONF_INSTRUCTIONS = 6 ++} FW_PERF_CONF; ++ ++typedef enum ++{ ++ FW_BOOT_STAGE_TLB_INIT_FAILURE = -2, ++ FW_BOOT_STAGE_NOT_AVAILABLE = -1, ++ FW_BOOT_NOT_STARTED = 0, ++ FW_BOOT_BLDR_STARTED = 1, ++ FW_BOOT_CACHE_DONE, ++ FW_BOOT_TLB_DONE, ++ FW_BOOT_MAIN_STARTED, ++ FW_BOOT_ALIGNCHECKS_DONE, ++ FW_BOOT_INIT_DONE, ++} FW_BOOT_STAGE; ++ ++/*! ++ * @AddToGroup KCCBTypes ++ * @{ ++ * @Name Kernel CCB return slot responses ++ * @{ ++ * Usage of bit-fields instead of bare integers ++ * allows FW to possibly pack-in several responses for each single kCCB command. ++ */ ++ ++#define RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED (1U << 0) /*!< Command executed (return status from FW) */ ++#define RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY (1U << 1) /*!< A cleanup was requested but resource busy */ ++#define RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE (1U << 2) /*!< Poll failed in FW for a HW operation to complete */ ++ ++#define RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE 0x0U /*!< Reset value of a kCCB return slot (set by host) */ ++/*! ++ * @} End of Name Kernel CCB return slot responses ++ * @} End of AddToGroup KCCBTypes ++ */ ++ ++typedef struct ++{ ++ /* Fw-Os connection states */ ++ volatile RGXFWIF_CONNECTION_FW_STATE eConnectionFwState; ++ volatile RGXFWIF_CONNECTION_OS_STATE eConnectionOsState; ++ volatile IMG_UINT32 ui32AliveFwToken; ++ volatile IMG_UINT32 ui32AliveOsToken; ++} UNCACHED_ALIGN RGXFWIF_CONNECTION_CTL; ++ ++/*! @Brief Firmware OS Initialization data \ref RGXFWIF_OSINIT ++ * allocated by services and used by the Firmware on boot ++ **/ ++typedef struct ++{ ++ /* Kernel CCB */ ++ PRGXFWIF_CCB_CTL psKernelCCBCtl; /*!< Kernel CCB Control */ ++ PRGXFWIF_CCB psKernelCCB; /*!< Kernel CCB */ ++ PRGXFWIF_CCB_RTN_SLOTS psKernelCCBRtnSlots; /*!< Kernel CCB return slots */ ++ ++ /* Firmware CCB */ ++ PRGXFWIF_CCB_CTL psFirmwareCCBCtl; /*!< Firmware CCB control */ ++ PRGXFWIF_CCB psFirmwareCCB; /*!< Firmware CCB */ ++ ++ /* Workload Estimation Firmware CCB */ ++ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl; /*!< Workload estimation control */ ++ PRGXFWIF_CCB psWorkEstFirmwareCCB; /*!< Workload estimation buffer */ ++ ++ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl; /*!< HWRecoveryInfo control */ ++ ++ IMG_UINT32 ui32HWRDebugDumpLimit; /*!< Firmware debug dump maximum limit */ ++ ++ PRGXFWIF_OSDATA sFwOsData; /*!< Firmware per-os shared data */ ++ ++ RGXFWIF_COMPCHECKS sRGXCompChecks; /*!< Compatibility checks to be populated by the Firmware */ ++ ++} UNCACHED_ALIGN RGXFWIF_OSINIT; ++ ++/*! @Brief Firmware System Initialization data \ref RGXFWIF_SYSINIT ++ * allocated by services and used by the Firmware on boot ++ **/ ++typedef struct ++{ ++ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr; /*!< Fault read address */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase; /*!< PDS execution base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase; /*!< USC execution base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCStateTableBase; /*!< FBCDC bindless texture state table base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sFBCDCLargeStateTableBase; ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sTextureHeapBase; /*!< Texture state base */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter; /*! Event filter for Firmware events */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr; ++ ++ IMG_UINT32 RGXFW_ALIGN aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; ++ ++ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_MAX]; /*!< Signature and Checksum Buffers for DMs */ ++ ++ RGXFWIF_PDVFS_OPP sPDVFSOPPInfo; ++ ++ RGXFWIF_DMA_ADDR sCorememDataStore; /*!< Firmware coremem data */ ++ ++ RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl; ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++ RGXFWIF_FIRMWARE_GCOV_CTL sFirmwareGcovCtl; /*!< Firmware gcov buffer control */ ++#endif ++ ++ IMG_UINT32 ui32FilterFlags; ++ ++ PRGXFWIF_RUNTIME_CFG sRuntimeCfg; /*!< Firmware Runtime configuration */ ++ ++ PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< Firmware Trace buffer control */ ++ PRGXFWIF_SYSDATA sFwSysData; /*!< Firmware System shared data */ ++#if defined(SUPPORT_TBI_INTERFACE) ++ PRGXFWIF_TBIBUF sTBIBuf; /*!< Tbi log buffer */ ++#endif ++ ++ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl; /*!< GPU utilization buffer */ ++ PRGXFWIF_REG_CFG sRegCfg; /*!< Firmware register user configuration */ ++ PRGXFWIF_HWPERF_CTL sHWPerfCtl; /*!< HWPerf counter block configuration.*/ ++ ++ RGXFWIF_DEV_VIRTADDR sAlignChecks; /*!< Array holding Server structures alignment data */ ++ ++ IMG_UINT32 ui32InitialCoreClockSpeed; /*!< Core clock speed at FW boot time */ ++ ++ IMG_UINT32 ui32InitialActivePMLatencyms; /*!< APM latency in ms before signalling IDLE to the host */ ++ ++ IMG_BOOL bFirmwareStarted; /*!< Flag to be set by the Firmware after successful start */ ++ ++ IMG_UINT32 ui32MarkerVal; /*!< Host/FW Trace synchronisation Partition Marker */ ++ ++ IMG_UINT32 ui32FirmwareStartedTimeStamp; /*!< Firmware initialization complete time */ ++ ++ IMG_UINT32 ui32JonesDisableMask; ++ ++ FW_PERF_CONF eFirmwarePerf; /*!< Firmware performance counter config */ ++ ++ /** ++ * FW Pointer to memory containing core clock rate in Hz. ++ * Firmware (PDVFS) updates the memory when running on non primary FW thread ++ * to communicate to host driver. ++ */ ++ PRGXFWIF_CORE_CLK_RATE sCoreClockRate; ++ ++#if defined(PDUMP) ++ RGXFWIF_PID_FILTER sPIDFilter; ++#endif ++ ++ RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode; ++ ++ RGX_HWPERF_BVNC sBvncKmFeatureFlags; /*!< Used in HWPerf for decoding BVNC Features*/ ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ IMG_UINT32 ui32SecurityTestFlags; ++ RGXFWIF_DEV_VIRTADDR pbSecureBuffer; ++ RGXFWIF_DEV_VIRTADDR pbNonSecureBuffer; ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* ++ * Used when validation is enabled to allow the host to check ++ * that MTS sent the correct sideband in response to a kick ++ * from a given OSes schedule register. ++ * Testing is enabled if RGXFWIF_KICK_TEST_ENABLED_BIT is set ++ * ++ * Set by the host to: ++ * (osid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT ++ * reset to 0 by FW when kicked by the given OSid ++ */ ++ IMG_UINT32 ui32OSKickTest; ++#endif ++ ++ /* Value to write into RGX_CR_TFBC_COMPRESSION_CONTROL */ ++ IMG_UINT32 ui32TFBCCompressionControl; ++ ++#if defined(SUPPORT_AUTOVZ) ++ IMG_UINT32 ui32VzWdgPeriod; ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_SYSINIT; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#define RGXFWIF_KICK_TEST_ENABLED_BIT 0x1 ++#define RGXFWIF_KICK_TEST_OSID_SHIFT 0x1 ++#endif ++ ++/*! ++ ***************************************************************************** ++ * Timer correlation shared data and defines ++ *****************************************************************************/ ++ ++typedef struct ++{ ++ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp; ++ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp; ++ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp; ++ ++ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS), ++ * where the deltas are relative to the timestamps above: ++ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */ ++ IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs; ++ ++ IMG_UINT32 ui32CoreClockSpeed; ++ IMG_UINT32 ui32Reserved; ++} UNCACHED_ALIGN RGXFWIF_TIME_CORR; ++ ++ ++/* The following macros are used to help converting FW timestamps to the Host ++ * time domain. On the FW the RGX_CR_TIMER counter is used to keep track of ++ * time; it increments by 1 every 256 GPU clock ticks, so the general ++ * formula to perform the conversion is: ++ * ++ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS, ++ * otherwise if (scale == 10^6) then deltaOS is in uS ] ++ * ++ * deltaCR * 256 256 * scale ++ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ] ++ * GPUclockspeed GPUclockspeed ++ * ++ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20) ++ * to get some better accuracy and to avoid returning 0 in the integer ++ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz. ++ * This is the same as keeping K as a decimal number. ++ * ++ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies ++ * (deltaCR * K is more or less a constant), and it's relative to the base ++ * OS timestamp sampled as a part of the timer correlation data. ++ * This base is refreshed on GPU power-on, DVFS transition and periodic ++ * frequency calibration (executed every few seconds if the FW is doing ++ * some work), so as long as the GPU is doing something and one of these ++ * events is triggered then deltaCR * K will not overflow and deltaOS will be ++ * correct. ++ */ ++ ++#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20) ++ ++#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \ ++ (((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT) ++ ++ ++/*! ++ ****************************************************************************** ++ * GPU Utilisation ++ *****************************************************************************/ ++ ++/* See rgx_common.h for a list of GPU states */ ++#define RGXFWIF_GPU_UTIL_TIME_MASK (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF) & ~RGXFWIF_GPU_UTIL_STATE_MASK) ++ ++#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK) ++#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK) ++ ++/* The OS timestamps computed by the FW are approximations of the real time, ++ * which means they could be slightly behind or ahead the real timer on the Host. ++ * In some cases we can perform subtractions between FW approximated ++ * timestamps and real OS timestamps, so we need a form of protection against ++ * negative results if for instance the FW one is a bit ahead of time. ++ */ ++#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \ ++ (((newtime) > (oldtime)) ? ((newtime) - (oldtime)) : 0U) ++ ++#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \ ++ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state)) ++ ++ ++/* The timer correlation array must be big enough to ensure old entries won't be ++ * overwritten before all the HWPerf events linked to those entries are processed ++ * by the MISR. The update frequency of this array depends on how fast the system ++ * can change state (basically how small the APM latency is) and perform DVFS transitions. ++ * ++ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading ++ * an entry while the Host is updating it. With 2 entries in the worst case the FW ++ * will read old data, which is still quite ok if the Host is updating the timer ++ * correlation at that time. ++ */ ++#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256U ++#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE) ++ ++/* Make sure the timer correlation array size is a power of 2 */ ++static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1U)) == 0U, ++ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two"); ++ ++typedef struct ++{ ++ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE]; ++ IMG_UINT32 ui32TimeCorrSeqCount; ++ ++ /* Compatibility and other flags */ ++ IMG_UINT32 ui32GpuUtilFlags; ++ ++ /* Last GPU state + OS time of the last state update */ ++ IMG_UINT64 RGXFW_ALIGN ui64LastWord; ++ ++ /* Counters for the amount of time the GPU was active/idle/blocked */ ++ IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM]; ++} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32RenderTargetIndex; //Render number ++ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA ++ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs ++ IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM ++ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices ++ RGXFWIF_DEV_VIRTADDR sRTANumPartialRenders; //Array of number of occurred partial renders per render target ++ IMG_UINT32 ui32MaxRTs; //Number of render targets in the array ++ IMG_UINT32 ui32RTACtlFlags; /* Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_RTA_CTL; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief Firmware Freelist holding usage state of the Parameter Buffers ++ */ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr; /*!< Freelist page table base */ ++ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;/*!< Freelist page table entry for current free page */ ++ IMG_UINT32 ui32CurrentStackTop; /*!< Freelist current free page */ ++ IMG_UINT32 ui32MaxPages; /*!< Max no. of pages can be added to the freelist */ ++ IMG_UINT32 ui32GrowPages; /*!< No pages to add in each freelist grow */ ++ IMG_UINT32 ui32CurrentPages; /*!< Total no. of pages made available to the PM HW */ ++ IMG_UINT32 ui32AllocatedPageCount; /*!< No. of pages allocated by PM HW */ ++ IMG_UINT32 ui32AllocatedMMUPageCount; /*!< No. of pages allocated for GPU MMU for PM*/ ++#if defined(SUPPORT_SHADOW_FREELISTS) ++ IMG_UINT32 ui32HWRCounter; ++ PRGXFWIF_FWMEMCONTEXT psFWMemContext; ++#endif ++ IMG_UINT32 ui32FreeListID; /*!< Unique Freelist ID */ ++ IMG_BOOL bGrowPending; /*!< Freelist grow is pending */ ++ IMG_UINT32 ui32ReadyPages; /*!< Reserved pages to be used only on PM OOM event */ ++ IMG_UINT32 ui32FreelistFlags; /*!< Compatibility and other flags */ ++#if defined(SUPPORT_AGP) ++ IMG_UINT32 ui32PmGlobalPb; /*!< PM Global PB on which Freelist is loaded */ ++#endif ++} UNCACHED_ALIGN RGXFWIF_FREELIST; ++ ++/*! ++ ****************************************************************************** ++ * HWRTData ++ *****************************************************************************/ ++ ++/* HWRTData flags */ ++/* Deprecated flags 1:0 */ ++#define HWRTDATA_HAS_LAST_TA (1UL << 2) ++#define HWRTDATA_PARTIAL_RENDERED (1UL << 3) ++#define HWRTDATA_DISABLE_TILE_REORDERING (1UL << 4) ++#define HWRTDATA_NEED_BRN65101_BLIT (1UL << 5) ++#define HWRTDATA_FIRST_BRN65101_STRIP (1UL << 6) ++#define HWRTDATA_NEED_BRN67182_2ND_RENDER (1UL << 7) ++#if defined(SUPPORT_AGP) ++#define HWRTDATA_GLOBAL_PB_NUMBER_BIT0 (1UL << 8) ++#if defined(SUPPORT_AGP4) ++#define HWRTDATA_GLOBAL_PB_NUMBER_BIT1 (1UL << 9) ++#endif ++#define HWRTDATA_GEOM_NEEDS_RESUME (1UL << 10) ++#endif ++ ++typedef enum ++{ ++ RGXFWIF_RTDATA_STATE_NONE = 0, ++ RGXFWIF_RTDATA_STATE_KICKTA, ++ RGXFWIF_RTDATA_STATE_KICKTAFIRST, ++ RGXFWIF_RTDATA_STATE_TAFINISHED, ++ RGXFWIF_RTDATA_STATE_KICK3D, ++ RGXFWIF_RTDATA_STATE_3DFINISHED, ++ RGXFWIF_RTDATA_STATE_3DCONTEXTSTORED, ++ RGXFWIF_RTDATA_STATE_TAOUTOFMEM, ++ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED, ++ /* In case of HWR, we can't set the RTDATA state to NONE, ++ * as this will cause any TA to become a first TA. ++ * To ensure all related TA's are skipped, we use the HWR state */ ++ RGXFWIF_RTDATA_STATE_HWR, ++ RGXFWIF_RTDATA_STATE_UNKNOWN = 0x7FFFFFFFU ++} RGXFWIF_RTDATA_STATE; ++ ++typedef struct ++{ ++ IMG_BOOL bTACachesNeedZeroing; ++ ++ IMG_UINT32 ui32ScreenPixelMax; ++ IMG_UINT64 RGXFW_ALIGN ui64MultiSampleCtl; ++ IMG_UINT64 ui64FlippedMultiSampleCtl; ++ IMG_UINT32 ui32TPCStride; ++ IMG_UINT32 ui32TPCSize; ++ IMG_UINT32 ui32TEScreen; ++ IMG_UINT32 ui32MTileStride; ++ IMG_UINT32 ui32TEAA; ++ IMG_UINT32 ui32TEMTILE1; ++ IMG_UINT32 ui32TEMTILE2; ++ IMG_UINT32 ui32ISPMergeLowerX; ++ IMG_UINT32 ui32ISPMergeLowerY; ++ IMG_UINT32 ui32ISPMergeUpperX; ++ IMG_UINT32 ui32ISPMergeUpperY; ++ IMG_UINT32 ui32ISPMergeScaleX; ++ IMG_UINT32 ui32ISPMergeScaleY; ++ IMG_UINT32 uiRgnHeaderSize; ++ IMG_UINT32 ui32ISPMtileSize; ++} UNCACHED_ALIGN RGXFWIF_HWRTDATA_COMMON; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief Firmware Render Target data i.e. HWRTDATA used to hold the PM context ++ */ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */ ++ ++ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4]; /*!< VCE Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4]; ++ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4]; /*!< TE Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4]; ++ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase; /*!< Alist Page Catalogue base */ ++ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase; ++ ++ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer; /*!< Freelist page table entry for current Mlist page */ ++ IMG_UINT32 ui32PMMListStackPointer; /*!< Current Mlist page */ ++ ++ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; /*!< Render target dimension dependent data */ ++ ++ IMG_UINT32 ui32HWRTDataFlags; ++ RGXFWIF_RTDATA_STATE eState; /*!< Current workload processing state of HWRTDATA */ ++ ++ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS]; /*!< Freelist to use */ ++ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS]; ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap table base */ ++ ++ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Render target clean up state */ ++ ++ RGXFWIF_RTA_CTL sRTACtl; /*!< Render target array data */ ++ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr; /*!< Tail pointers base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sMacrotileArrayDevVAddr; /*!< Macrotiling array base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sRgnHeaderDevVAddr; /*!< Region headers base */ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN sRTCDevVAddr; /*!< Render target cache base */ ++#if defined(RGX_FIRMWARE) ++ struct RGXFWIF_FWCOMMONCONTEXT_* RGXFW_ALIGN psOwnerGeom; ++#else ++ RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN pui32OwnerGeomNotUsedByHost; ++#endif ++#if defined(SUPPORT_TRP) ++ IMG_UINT32 ui32KickFlagsCopy; ++ IMG_UINT32 ui32TRPState; ++ IMG_UINT32 ui32TEPageCopy; ++ IMG_UINT32 ui32VCEPageCopy; ++#endif ++#if defined(SUPPORT_AGP) ++ IMG_BOOL bTACachesNeedZeroing; ++#endif ++} UNCACHED_ALIGN RGXFWIF_HWRTDATA; ++ ++/* Sync_checkpoint firmware object. ++ * This is the FW-addressable structure use to hold the sync checkpoint's ++ * state and other information which needs to be accessed by the firmware. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */ ++ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */ ++} SYNC_CHECKPOINT_FW_OBJ; ++ ++/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */ ++#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0) ++ ++#endif /* RGX_FWIF_KM_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_km.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h b/drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h +new file mode 100644 +index 000000000000..e60bafd84536 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fwif_resetframework.h +@@ -0,0 +1,70 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_resetframework.h ++@Title Post-reset work-around framework FW interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_RESETFRAMEWORK_H) ++#define RGX_FWIF_RESETFRAMEWORK_H ++ ++#include "img_types.h" ++#include "rgx_fwif_shared.h" ++ ++typedef struct ++{ ++ union ++ { ++ IMG_UINT64 uCDMReg_CDM_CB_BASE; // defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++ IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++ }; ++ IMG_UINT64 uCDMReg_CDM_CB_QUEUE; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++ IMG_UINT64 uCDMReg_CDM_CB; // !defined(RGX_FEATURE_CDM_USER_MODE_QUEUE) ++} RGXFWIF_RF_REGISTERS; ++ ++typedef struct ++{ ++ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */ ++ RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters; ++ ++} RGXFWIF_RF_CMD; ++ ++/* to opaquely allocate and copy in the kernel */ ++#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD) ++ ++#endif /* RGX_FWIF_RESETFRAMEWORK_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_sf.h b/drivers/gpu/drm/img-rogue/rgx_fwif_sf.h +new file mode 100644 +index 000000000000..9238cf8ca589 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fwif_sf.h +@@ -0,0 +1,931 @@ ++/*************************************************************************/ /*! ++@File rgx_fwif_sf.h ++@Title RGX firmware interface string format specifiers ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the rgx firmware logging messages. The following ++ list are the messages the firmware prints. Changing anything ++ but the first column or spelling mistakes in the strings will ++ break compatibility with log files created with older/newer ++ firmware versions. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_FWIF_SF_H ++#define RGX_FWIF_SF_H ++ ++/****************************************************************************** ++ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you ++ * WILL BREAK fw tracing message compatibility with previous ++ * fw versions. Only add new ones, if so required. ++ *****************************************************************************/ ++/* Available log groups */ ++#define RGXFW_LOG_SFGROUPLIST \ ++ X(RGXFW_GROUP_NULL,NULL) \ ++ X(RGXFW_GROUP_MAIN,MAIN) \ ++ X(RGXFW_GROUP_CLEANUP,CLEANUP) \ ++ X(RGXFW_GROUP_CSW,CSW) \ ++ X(RGXFW_GROUP_PM, PM) \ ++ X(RGXFW_GROUP_RTD,RTD) \ ++ X(RGXFW_GROUP_SPM,SPM) \ ++ X(RGXFW_GROUP_MTS,MTS) \ ++ X(RGXFW_GROUP_BIF,BIF) \ ++ X(RGXFW_GROUP_MISC,MISC) \ ++ X(RGXFW_GROUP_POW,POW) \ ++ X(RGXFW_GROUP_HWR,HWR) \ ++ X(RGXFW_GROUP_HWP,HWP) \ ++ X(RGXFW_GROUP_RPM,RPM) \ ++ X(RGXFW_GROUP_DMA,DMA) \ ++ X(RGXFW_GROUP_DBG,DBG) ++ ++/*! ++ * @InGroup SRVAndFWTracing ++ * @Brief FW Trace log groups(GID) list ++ */ ++enum RGXFW_LOG_SFGROUPS { ++#define X(A,B) A, ++ RGXFW_LOG_SFGROUPLIST ++#undef X ++}; ++ ++#define IMG_SF_STRING_MAX_SIZE 256U ++ ++typedef struct { ++ IMG_UINT32 ui32Id; ++ IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE]; ++} RGXFW_STID_FMT; /* pair of string format id and string formats */ ++ ++typedef struct { ++ IMG_UINT32 ui32Id; ++ const IMG_CHAR *psName; ++} RGXKM_STID_FMT; /* pair of string format id and string formats */ ++ ++/* Table of String Format specifiers, the group they belong and the number of ++ * arguments each expects. Xmacro styled macros are used to generate what is ++ * needed without requiring hand editing. ++ * ++ * id : id within a group ++ * gid : group id ++ * Sym name : name of enumerations used to identify message strings ++ * String : Actual string ++ * #args : number of arguments the string format requires ++ */ ++#define RGXFW_LOG_SFIDLIST \ ++/*id, gid, id name, string, # arguments */ \ ++X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string", 0) \ ++\ ++X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d", 6) \ ++X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%x, HWRTData1State=%x", 2) \ ++X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d, prio: %d", 4) \ ++X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished", 0) \ ++X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ ++X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished", 0) \ ++X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d", 7) \ ++X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished", 0) \ ++X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render", 0) \ ++X( 10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render", 0) \ ++X( 11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x", 2) \ ++X( 12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ ++X( 13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished", 0) \ ++X( 14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8x", 3) \ ++X( 16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx 0x%08.8x @ %d", 2) \ ++X( 17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ ++X( 18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded", 0) \ ++X( 19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ ++X( 20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx 0x%08.8x", 1) \ ++X( 21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= ????????, [0x%08.8x] is ???????? requires 0x%08.8x", 4) \ ++X( 22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx 0x%08.8x @ %d", 2) \ ++X( 23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ ++X( 24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of:", 1) \ ++X( 25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ ++X( 26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ ++X( 27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW", 0) \ ++X( 28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.", 0) \ ++X( 29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u", 1) \ ++X( 30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = 0x%x, fw = 0x%x", 3) \ ++X( 31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered", 0) \ ++X( 32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler", 2) \ ++X( 33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8x", 1) \ ++X( 34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state", 0) \ ++X( 35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers", 0) \ ++X( 36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u", 1) \ ++X( 37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets", 1) \ ++X( 38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER_DEPRECATED, "Estimated Power 0x%x", 1) \ ++X( 39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u", 1) \ ++X( 40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u", 2) \ ++X( 41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK_DEPRECATED, "HWR sizes check %d failed: addresses = %d, sizes = %d", 3) \ ++X( 42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%x", 1) \ ++X( 43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ ++X( 44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d", 2) \ ++X( 45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down", 0) \ ++X( 46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)", 2) \ ++X( 47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)", 0) \ ++X( 48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)", 0) \ ++X( 49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: 0x%08.8x", 2) \ ++X( 50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d", 7) \ ++X( 51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x", 3) \ ++X( 52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK", 1) \ ++X( 53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty", 1) \ ++X( 54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08x%08x, VCE=0x%08x%08x, ALIST=0x%08x%08x, IsTA=%d", 8) \ ++X( 55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick", 0) \ ++X( 56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device", 1) \ ++X( 57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8x DM%u", 2) \ ++X( 58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ ++X( 59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED_DEPRECATED, "RDM finished on context %u", 1) \ ++X( 60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8x @ %d, prio: %d", 3) \ ++X( 61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED_DEPRECATED, "SHG finished", 0) \ ++X( 62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED_DEPRECATED, "FBA finished on context %u", 1) \ ++X( 63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed", 0) \ ++X( 64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start", 1) \ ++X( 65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete", 1) \ ++X( 66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE_DEPRECATED, "FC%u cCCB Woff update = %u", 2) \ ++X( 67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ ++X( 68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_INIT, "GPU init", 0) \ ++X( 69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_INIT, "GPU Units init (# mask: 0x%x)", 1) \ ++X( 70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d", 3) \ ++X( 71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x", 3) \ ++X( 72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)", 1) \ ++X( 73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.", 0) \ ++X( 74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DEPRECATED, "GPU has locked up (see HWR logs for more info)", 0) \ ++X( 75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)", 0) \ ++X( 76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)", 0) \ ++X( 77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM_DEPRECATED, "Doppler out of memory event for FC %u", 1) \ ++X( 78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires >= 0x%08.8x", 3) \ ++X( 79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ ++X( 80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [0x%08.8x]", 1) \ ++X( 81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx 0x%08.8x @ %d", 2) \ ++X( 82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [0x%08.8x] = 0x%08.8x", 2) \ ++X( 83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8x @ %d", 2) \ ++X( 84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM_DEPRECATED, "RPM Out of memory! Context 0x%08x, SH requestor %d", 2) \ ++X( 85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD_DEPRECATED, "Discard RTU due to RPM abort: FWCtx 0x%08.8x @ %d, prio: %d, Frame Context: %d", 4) \ ++X( 86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ ++X( 87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN_DEPRECATED, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)", 4) \ ++X( 88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)", 4) \ ++X( 89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ ++X( 90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)", 3) \ ++X( 91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d", 1) \ ++X( 92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d", 1) \ ++X( 93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz", 1) \ ++X( 94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ ++X( 95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DEPRECATED, "Signal check failed, Required Data: 0x%x, Address: 0x%08x%08x", 3) \ ++X( 96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_DEPRECATED, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x", 5) \ ++X( 97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ ++X( 98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled", 0) \ ++X( 99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)", 3) \ ++X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)", 3) \ ++X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x", 4) \ ++X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM_DEPRECATED, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x", 4) \ ++X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM_DEPRECATED, "DM: %u signal check failed", 1) \ ++X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8x @ %d, prio:%d", 3) \ ++X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished", 0) \ ++X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS_DEPRECATED, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08x 0x%08x)", 4) \ ++X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT_DEPRECATED, "BRN 54141 HIT", 0) \ ++X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA_DEPRECATED, "BRN 54141 Dummy TA kicked", 0) \ ++X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA_DEPRECATED, "BRN 54141 resume TA", 0) \ ++X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT_DEPRECATED, "BRN 54141 double hit after applying WA", 0) \ ++X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE_DEPRECATED, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x", 2) \ ++X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%x, Current Data: 0x%x, Address: 0x%08x%08x", 4) \ ++X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL_DEPRECATED, "TDM stalled (Roff = %u, Woff = %u)", 2) \ ++X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx 0x%08.8x", 1) \ ++X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE_DEPRECATED, "Changing OSid %d's priority from %u to %u", 3) \ ++X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed", 0) \ ++X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ ++X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ ++X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8x @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ ++X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 6) \ ++X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED3, "Kick RTU: FWCtx 0x%08.8x @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ ++X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED2, "Kick SHG: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.", 1) \ ++X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.", 1) \ ++X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.", 1) \ ++X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed", 1) \ ++X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ ++X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ ++X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).", 1) \ ++X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET_DEPRECATED, "HCS changed to %d ms", 1) \ ++X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT_DEPRECATED, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)", 4) \ ++X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d", 2) \ ++X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF_DEPRECATED, "Isolation grouping is disabled", 0) \ ++X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_DEPRECATED, "Isolation group configured with a priority threshold of %d", 1) \ ++X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE_DEPRECATED, "OS %d has come online", 1) \ ++X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE_DEPRECATED, "OS %d has gone offline", 1) \ ++X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8x, OSId: %u, Signal Address: 0x%08x%08x", 4) \ ++X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS_DEPRECATED, "TDM Queue: FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 7) \ ++X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes 0, Woff = %u, Size = %u)", 6) \ ++X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 5) \ ++X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_DEINIT, "GPU deinit", 0) \ ++X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNITS_DEINIT, "GPU units deinit", 0) \ ++X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x", 2) \ ++X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_LIMIT, "UFO limit exceeded %d/%d", 2) \ ++X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store", 0) \ ++X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x", 3) \ ++X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND_DEPRECATED, "Unknown Command (eCmdType=0x%08x)", 1) \ ++X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x", 4) \ ++X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx 0x%08.8x @ %d [0x%08.8x] = 0x%08.8x, reason %d", 5) \ ++X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u", 3) \ ++X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x", 6) \ ++X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u", 2) \ ++X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 3) \ ++X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_DEPRECATED, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x", 3) \ ++X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x", 3) \ ++X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x", 4) \ ++X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8x DM%u usc_breakpoint_ctrl_dm = %u", 4) \ ++X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID_DEPRECATED, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x", 3) \ ++X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE_DEPRECATED, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x", 4) \ ++X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD_DEPRECATED, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u", 4) \ ++X(164, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FULL_CHPTCCB, "Checkpoint CCB for OSid %u is full, signalling host for full check state (Roff = %u, Woff = %u)", 3) \ ++X(165, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS_DEPRECATED, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x chptCCBCtl@0x%x chptCCB@0x%x", 8) \ ++X(166, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_STATE_CHANGE, "OSid %d fw state transition request: from %d to %d (0-offline 1-ready 2-active 3-offloading). Status %d (1-ok 0-fail)", 4) \ ++X(167, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_STALE_KCCB_CMDS, "OSid %u has %u stale commands in its KCCB", 2) \ ++X(168, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_VCE_PAUSE, "Applying VCE pause", 0) \ ++X(169, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KCCB_UPDATE_RTN_SLOT_DEPRECATED, "OSid %u KCCB slot %u value updated to %u", 3) \ ++X(170, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_KCCB_COMMAND, "Unknown KCCB Command: KCCBCtl=0x%08x, KCCB=0x%08x, Roff=%u, Woff=%u, Wrap=%u, Cmd=0x%08x, CmdType=0x%08x", 7) \ ++X(171, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND1, "Unknown Client CCB Command processing fences: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ ++X(172, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_CCB_COMMAND2, "Unknown Client CCB Command executing kick: FWCtx=0x%08x, CCBCtl=0x%08x, CCB=0x%08x, Roff=%u, Doff=%u, Woff=%u, Wrap=%u, CmdHdr=0x%08x, CmdType=0x%08x, CmdSize=%u", 10) \ ++X(173, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u with WOff %u", 2) \ ++X(174, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u, FWCtx 0x%08x", 2) \ ++X(175, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u: FWCtx 0x%08x, MemCtx 0x%08x", 3) \ ++X(176, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_INIT_CONFIG, "Initialised Firmware with config flags 0x%08x and extended config flags 0x%08x", 2) \ ++X(177, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_CONFIG, "Set Periodic Hardware Reset Mode: %d", 1) \ ++X(179, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_TRIG, "PHR mode %d, FW state: 0x%08x, HWR flags: 0x%08x", 3) \ ++X(180, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PHR_RESET_DEPRECATED, "PHR mode %d triggered a reset", 1) \ ++X(181, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, Signal Id: %u", 2) \ ++X(182, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEV_SERIES8_DEPRECATED, "WARNING: Skipping FW KCCB Cmd type %d which is not yet supported on Series8.", 1) \ ++X(183, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INCONSISTENT_MMU_FLAGS, "MMU context cache data NULL, but cache flags=0x%x (sync counter=%u, update value=%u) OSId=%u", 4) \ ++X(184, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SLC_FLUSH, "SLC range based flush: Context=%u VAddr=0x%02x%08x, Size=0x%08x, Invalidate=%d", 5) \ ++X(185, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBSC_INVAL, "FBSC invalidate for Context Set [0x%08x]: Entry mask 0x%08x%08x.", 3) \ ++X(186, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66284_UPDATE, "TDM context switch check: Roff %u was not valid for kick starting at %u, moving back to %u", 3) \ ++X(187, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SPFILTER_UPDATES, "Signal updates: FIFO: %u, Signals: 0x%08x", 2) \ ++X(188, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_FBSC_CMD, "Invalid FBSC cmd: FWCtx 0x%08x, MemCtx 0x%08x", 2) \ ++X(189, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN68497_BLIT, "Insert BRN68497 WA blit after TDM Context store.", 0) \ ++X(190, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PENDING_UFO_UPDATE_START, "UFO Updates for previously finished FWCtx 0x%08.8x", 1) \ ++X(191, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_RTA_PRESENT, "RTC with RTA present, %u active render targets", 1) \ ++X(192, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULL_RTAS, "Invalid RTA Set-up. The ValidRenderTargets array in RTACtl is Null!", 0) \ ++X(193, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_COUNTER, "Block 0x%x / Counter 0x%x INVALID and ignored", 2) \ ++X(194, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_DEPRECATED, "ECC fault GPU=0x%08x FW=0x%08x", 2) \ ++X(195, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PROCESS_XPU_EVENT, "Processing XPU event on DM = %d", 1) \ ++X(196, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_VZ_WDG_TRIGGER, "OSid %u failed to respond to the virtualisation watchdog in time. Timestamp of its last input = %u", 2) \ ++X(197, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU-%u has locked up (see HWR logs for more info)", 1) \ ++X(198, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x)", 3) \ ++X(199, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP_DM, "GPU has locked up (see HWR logs for more info)", 0) \ ++X(200, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REPROCESS_XPU_EVENTS, "Reprocessing outstanding XPU events from cores 0x%02x", 1) \ ++X(201, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SECONDARY_XPU_EVENT, "Secondary XPU event on DM=%d, CoreMask=0x%02x, Raised=0x%02x", 3) \ ++X(202, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ ++X(203, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled Core %u (Roff = %u, Woff = %u)", 3) \ ++X(204, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_OFFSETS, "Compute Queue: Core %u, FWCtx 0x%08.8x, prio: %d, queue: 0x%08x%08x (Roff = %u, Woff = %u, Size = %u)", 8) \ ++X(205, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_STALLED, "Compute stalled core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ ++X(206, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_CORE_READ_OFFSET, "User Mode Queue mismatched stream start: Core %u, FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u, StreamStartOffset = %u)", 6) \ ++X(207, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_RESUMED_FROM_STALL, "TDM resumed core %u (Roff = %u, Woff = %u)", 3) \ ++X(208, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_CORE_RESUMED_FROM_STALL, "Compute resumed core %u (Roff = %u, Woff = %u, Size = %u)", 4) \ ++X(209, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_MTS_PERMISSION_CHANGED, " Updated permission for OSid %u to perform MTS kicks: %u (1 = allowed, 0 = not allowed)", 2) \ ++X(210, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST1, "Mask = 0x%X, mask2 = 0x%X", 2) \ ++X(211, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TEST2, " core %u, reg = %u, mask = 0x%X)", 3) \ ++X(212, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ECC_FAULT_SAFETY_BUS, "ECC fault received from safety bus: 0x%08x", 1) \ ++X(213, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_CONFIG, "Safety Watchdog threshold period set to 0x%x clock cycles", 1) \ ++X(214, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SAFETY_WDG_TRIGGER, "MTS Safety Event trigged by the safety watchdog.", 0) \ ++X(215, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_USC_TASKS_RANGE, "DM%d USC tasks range limit 0 - %d, stride %d", 3) \ ++X(216, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_ECC_FAULT, "ECC fault GPU=0x%08x", 1) \ ++X(217, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GPU_SAFETY_RESET, "GPU Hardware units reset to prevent transient faults.", 0) \ ++X(218, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ABORTCMD, "Kick Abort cmd: FWCtx 0x%08.8x @ %d", 2) \ ++X(219, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_DEPRECATED, "Kick Ray: FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 7)\ ++X(220, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_DEPRECATED, "Ray finished", 0) \ ++X(221, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWDATA_INIT_STATUS, "State of firmware's private data at boot time: %d (0 = uninitialised, 1 = initialised); Fw State Flags = 0x%08X", 2) \ ++X(222, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT, "CFI Timeout detected (%d increasing to %d)", 2) \ ++X(223, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CFI_TIMEOUT_FBM, "CFI Timeout detected for FBM (%d increasing to %d)", 2) \ ++X(224, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GEOM_OOM_DISALLOWED, "Geom OOM event not allowed", 0) \ ++X(225, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u; Isolation = %u (0 = off; 1 = on)", 4) \ ++X(226, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SKIP_ALREADY_RUN_GEOM, "Skipping already executed TA FWCtx 0x%08.8x @ %d", 2) \ ++X(227, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ATTEMPT_TO_RUN_AHEAD_GEOM, "Attempt to execute TA FWCtx 0x%08.8x @ %d ahead of time on other GEOM", 2) \ ++X(228, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED2, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8) \ ++X(229, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_PIPELINE, "Kick TA: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 12) \ ++X(230, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_PIPELINE, "Kick 3D: Kick ID %u FWCtx 0x%08.8x @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 11) \ ++X(231, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_PIPELINE, "Kick Compute: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, ext:0x%08x, int:0x%08x)", 7) \ ++X(232, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED_PIPELINE, "TDM finished: Kick ID %u ", 1) \ ++X(233, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED_PIPELINE, "TA finished: Kick ID %u ", 1) \ ++X(234, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED_PIPELINE, "3D finished: Kick ID %u , HWRTData0State=%x, HWRTData1State=%x", 3) \ ++X(235, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED_PIPELINE, "Compute finished: Kick ID %u ", 1) \ ++X(236, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_PIPELINE, "Kick TDM: Kick ID %u FWCtx 0x%08.8x @ %d, Base 0x%08x%08x. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 10) \ ++X(237, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RAY_PIPELINE, "Kick Ray: Kick ID %u FWCtx 0x%08.8x @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08x, int:0x%08x)", 8)\ ++X(238, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RAY_FINISHED_PIPELINE, "Ray finished: Kick ID %u ", 1) \ ++\ ++X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d", 2) \ ++X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u", 1) \ ++X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK_DEPRECATED, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%x", 3) \ ++X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u", 1) \ ++X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL_DEPRECATED, "Kick MTS Bg task DM=All", 0) \ ++X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d", 1) \ ++X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d", 2) \ ++X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x", 2) \ ++X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = 0x%x, cmd = 0x%x", 3) \ ++X( 10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x", 3) \ ++X( 11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED2, "Ready queue debug DM = %u, celltype = %d, OSid = %u", 3) \ ++X( 12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED2, "Bg Task DM = %u, counted = %d, OSid = %u", 3) \ ++X( 13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u", 1) \ ++X( 14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.", 0) \ ++X( 15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = 0x%x, due to USC breakpoint hit by OS ID = %d PID = %d.", 7) \ ++X( 16, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC_DEPRECATED, "KCCB Slot %u: DM=%u, Cmd=0x%08x, OSid=%u", 4) \ ++X( 17, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_RTN_VALUE, "KCCB Slot %u: Return value %u", 2) \ ++X( 18, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task OSid = %u", 1) \ ++X( 19, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KCCBCMD_EXEC, "KCCB Slot %u: Cmd=0x%08x, OSid=%u", 3) \ ++X( 20, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task (EVENT_STATUS=0x%08x)", 1) \ ++X( 21, RGXFW_GROUP_MTS, RGXFW_SF_MTS_VZ_SIDEBAND, "VZ sideband test, kicked with OSid=%u from MTS, OSid for test=%u", 2) \ ++\ ++X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned", 1) \ ++X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d", 3) \ ++X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP_DEPRECATED, "HWRTData [0x%08x] for DM=%d, received cleanup request", 2) \ ++X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM_DEPRECATED, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d", 3) \ ++X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy", 2) \ ++X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_DEPRECATED, "HWRTData [0x%08x] HW Context %u cleaned", 2) \ ++X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned", 1) \ ++X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned", 1) \ ++X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d", 3) \ ++X( 10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED2, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d", 4) \ ++X( 11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP_DEPRECATED, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request", 2) \ ++X( 12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM_DEPRECATED, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d", 3) \ ++X( 13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY_DEPRECATED, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d", 4) \ ++X( 14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_DEPRECATED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned", 2) \ ++X( 15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x", 1) \ ++X( 16, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "Received cleanup request for HWRTData [0x%08x]", 1) \ ++X( 17, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context is busy: submitted = %d, executed = %d", 3) \ ++X( 18, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned, executed commands = %d", 3) \ ++\ ++X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8x needs resume", 1) \ ++X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_DEPRECATED, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ ++X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%x", 1) \ ++X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete", 0) \ ++X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start", 0) \ ++X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset", 0) \ ++X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8x needs resume", 1) \ ++X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8x resume", 1) \ ++X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete", 0) \ ++X( 10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8x 0x%08.8x 0x%08.8x", 3) \ ++X( 11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start", 0) \ ++X( 12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8x resume", 1) \ ++X( 13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8x needs resume", 1) \ ++X( 14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ ++X( 15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%x, load 0x%x", 2) \ ++X( 16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete", 0) \ ++X( 17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start", 0) \ ++X( 18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d", 3) \ ++X( 19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u", 2) \ ++X( 20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8x", 2) \ ++X( 21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8x", 2) \ ++X( 22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME_DEPRECATED, "SHG FWCtx 0x%08.8x needs resume", 1) \ ++X( 23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME_DEPRECATED, "*** SHG FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x", 3) \ ++X( 24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED_DEPRECATED, "SHG context shared alloc size store 0x%x, load 0x%x", 2) \ ++X( 25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE_DEPRECATED, "*** SHG context store complete", 0) \ ++X( 26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START_DEPRECATED, "*** SHG context store start", 0) \ ++X( 27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d", 1) \ ++X( 28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.", 0) \ ++X( 29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8x resume from snapshot buffer 0x%08x%08x, shader state %u", 4) \ ++X( 30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)", 2) \ ++X( 31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT_DEPRECATED, "TA context store hit BRN 52563: vertex store tasks outstanding", 0) \ ++X( 32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)", 1) \ ++X( 33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED_DEPRECATED, "TA context store deferred due to BRN 54141.", 0) \ ++X( 34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED2, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u", 7) \ ++X( 35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start", 0) \ ++X( 36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete", 0) \ ++X( 37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME_DEPRECATED, "TDM context needs resume, header [0x%08.8x, 0x%08.8x]", 2) \ ++X( 38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u", 8) \ ++X( 39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8x", 3) \ ++X( 40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8x", 3) \ ++X( 41, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START_VOLCANIC, "*** 3D context store start version %d (1=IPP_TILE, 2=ISP_TILE)", 1) \ ++X( 42, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_VOLCANIC, "3D context store pipe%d state: 0x%08.8x%08x", 3) \ ++X( 43, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_VOLCANIC, "3D context resume pipe%d state: 0x%08.8x%08x", 3) \ ++X( 44, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_IPP_STATE, "3D context resume IPP state: 0x%08.8x%08x", 2) \ ++X( 45, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_PIPES_EMPTY, "All 3D pipes empty after ISP tile mode store! IPP_status: 0x%08x", 1) \ ++X( 46, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE_DEPRECATED, "TDM context resume pipe%d state: 0x%08.8x%08x", 3) \ ++X( 47, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_LEVEL4_STORE_START, "*** 3D context store start version 4", 0) \ ++X( 48, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RESUME_MULTICORE, "Multicore context resume on DM%d active core mask 0x%04.4x", 2) \ ++X( 49, RGXFW_GROUP_CSW, RGXFW_SF_CSW_STORE_MULTICORE, "Multicore context store on DM%d active core mask 0x%04.4x", 2) \ ++X( 50, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_RESUME_PIPE_STATE, "TDM context resume Core %d, pipe%d state: 0x%08.8x%08x%08x", 5) \ ++X( 51, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_COMPLETE, "*** RDM FWCtx store complete", 0) \ ++X( 52, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_STORE_START, "*** RDM FWCtx store start", 0) \ ++X( 53, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_NEEDS_RESUME, "RDM FWCtx 0x%08.8x needs resume", 1) \ ++X( 54, RGXFW_GROUP_CSW, RGXFW_SF_CSW_RDM_RESUME, "RDM FWCtx 0x%08.8x resume", 1) \ ++\ ++X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_BIFREQ_DEPRECATED, "Activate MemCtx=0x%08x BIFreq=%d secure=%d", 3) \ ++X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x", 1) \ ++X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC_DEPRECATED, "Alloc PC reg %d", 1) \ ++X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_GRAB, "Grab reg set %d refcount now %d", 2) \ ++X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_UNGRAB, "Ungrab reg set %d refcount now %d", 2) \ ++X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_BIFREQ_DEPRECATED, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ ++X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DEPRECATED, "Trust enabled:%d, for BIFreq=%d", 2) \ ++X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG_DEPRECATED, "BIF Tiling Cfg %d base 0x%08x%08x len 0x%08x%08x enable %d stride %d --> 0x%08x%08x", 9) \ ++X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now 0x%08x 0x%08x", 4) \ ++X( 10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now 0x%04x", 3) \ ++X( 11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = 0x%x, Reg index = %u, Bitshift index = %u, Val = 0x%08x%08x", 7) \ ++X( 12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_BIFREQ_DEPRECATED, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u", 5) \ ++X( 13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)", 1) \ ++X( 14, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE_DM, "Activate MemCtx=0x%08x DM=%d secure=%d", 3) \ ++X( 15, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM_DEPRECATED, "Setup reg=%d DM=%d, expect=0x%08x%08x, actual=0x%08x%08x", 6) \ ++X( 16, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u", 4) \ ++X( 17, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST_DM, "Trust enabled:%d, for DM=%d", 2) \ ++X( 18, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY_DM, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, DM %u", 5) \ ++X( 19, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG_DM, "Setup register set=%d DM=%d, PC address=0x%08x%08x, OSid=%u, NewPCRegRequired=%d", 6) \ ++X( 20, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCSET_ALLOC, "Alloc PC set %d as register range [%u - %u]", 3) \ ++\ ++X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x", 1) \ ++X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x", 1) \ ++X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled", 0) \ ++X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled", 0) \ ++X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)", 1) \ ++X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))", 2) \ ++X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))", 2) \ ++X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!", 0) \ ++X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)", 1) \ ++X( 10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x", 1) \ ++X( 11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d", 2) \ ++X( 12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d", 1) \ ++X( 13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)", 2) \ ++X( 14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)", 2) \ ++X( 15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame", 1) \ ++X( 16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame", 1) \ ++X( 17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)", 1) \ ++X( 18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d", 1) \ ++X( 19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)", 1) \ ++X( 20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)", 1) \ ++X( 21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)", 1) \ ++X( 22, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_STATE, "TRP state: %d", 1) \ ++X( 23, RGXFW_GROUP_MISC, RGXFW_SF_MISC_TRP_FAILURE, "TRP failure: %d", 1) \ ++X( 24, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_STATE, "SW TRP State: %d", 1) \ ++X( 25, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SW_TRP_FAILURE, "SW TRP failure: %d", 1) \ ++X( 26, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HW_KICK, "HW kick event (%u)", 1) \ ++X( 27, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_CHECKSUMS, "GPU core (%u/%u): checksum 0x%08x vs. 0x%08x", 4) \ ++X( 28, RGXFW_GROUP_MISC, RGXFW_SF_MISC_WGP_UNIT_CHECKSUMS, "GPU core (%u/%u), unit (%u,%u): checksum 0x%08x vs. 0x%08x", 6) \ ++X( 29, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_CHECK_REG, "HWR: Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ ++X( 30, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_SLOTS_CHECK, "HWR: USC Core%u, ui32TotalSlotsUsedByDM=0x%08x, psDMHWCtl->ui32USCSlotsUsedByDM=0x%08x, bHWRNeeded=%u", 4) \ ++X( 31, RGXFW_GROUP_MISC, RGXFW_SF_MISC_HWR_USC_REG_CHECK, "HWR: USC Core%u, Register=0x%08x, OldValue=0x%08x%08x, CurrValue=0x%08x%08x", 6) \ ++\ ++X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)", 10) \ ++X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d", 8) \ ++X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ ++X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)", 14) \ ++X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE_DEPRECATED, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ ++X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED_DEPRECATED, "Grow for freelist ID=0x%08x denied by host", 1) \ ++X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 5) \ ++X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed", 1) \ ++X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d", 2) \ ++X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: 0x%08x, status(1:success, 0:fail): %d", 2)\ ++X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x", 1) \ ++X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x", 1) \ ++X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d", 7) \ ++X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 19, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE_VOLCANIC, "Freelist update completed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ ++X( 20, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_FAILED, "Freelist update failed [0x%08x / FL State 0x%08x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x", 7) \ ++X( 21, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_VOLCANIC, "UFL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 22, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_VOLCANIC, "UFL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-State-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)", 10) \ ++X( 23, RGXFW_GROUP_PM, RGXFW_SF_PM_CHECK_FL_BASEADDR, "Freelist 0x%08x base address from HW: 0x%02x%08x (expected value: 0x%02x%08x)", 5) \ ++X( 24, RGXFW_GROUP_PM, RGXFW_SF_PM_ANALYSE_FL_GROW, "Analysis of FL grow: Pause=(%u,%u) Paused+Valid(%u,%u) PMStateBuffer=0x%x", 5) \ ++X( 25, RGXFW_GROUP_PM, RGXFW_SF_PM_ATTEMPT_FL_GROW, "Attempt FL grow for FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ ++X( 26, RGXFW_GROUP_PM, RGXFW_SF_PM_DEFER_FL_GROW, "Deferring FL grow for non-loaded FL: 0x%08x, new dev address: 0x%02x%08x, new page count: %u, new ready count: %u", 5) \ ++X( 27, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_ALBIORIX, "Is GEOM: %d, finished: %d (HWRTData = 0x%08x, MemCtx = 0x%08x)", 4) \ ++X( 28, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT, "3D Timeout Now for FWCtx 0x%08.8x", 1) \ ++X( 29, RGXFW_GROUP_PM, RGXFW_SF_PM_RECYCLE, "GEOM PM Recycle for FWCtx 0x%08.8x", 1) \ ++X( 30, RGXFW_GROUP_PM, RGXFW_SF_PM_PRIMARY_CONFIG, "PM running primary config (Core %d)", 1) \ ++X( 31, RGXFW_GROUP_PM, RGXFW_SF_PM_SECONDARY_CONFIG, "PM running secondary config (Core %d)", 1) \ ++X( 32, RGXFW_GROUP_PM, RGXFW_SF_PM_TERTIARY_CONFIG, "PM running tertiary config (Core %d)", 1) \ ++X( 33, RGXFW_GROUP_PM, RGXFW_SF_PM_QUATERNARY_CONFIG, "PM running quaternary config (Core %d)", 1) \ ++\ ++X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS_DEPRECATED, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ ++X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS_DEPRECATED, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x", 3) \ ++X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW_DEPRECATED, "RPM request failed. Waiting for freelist grow.", 0) \ ++X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT_DEPRECATED, "RPM request failed. Aborting the current frame.", 0) \ ++X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW_DEPRECATED, "RPM waiting for pending grow on freelist 0x%08x", 1) \ ++X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW_DEPRECATED, "Request freelist grow [0x%08x] current pages %d, grow size %d", 3) \ ++X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD_DEPRECATED, "Freelist load: SHF = 0x%08x, SHG = 0x%08x", 2) \ ++X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08x.0x%08x", 2) \ ++X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08x.0x%08x", 2) \ ++X( 10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST_DEPRECATED, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)", 5) \ ++X( 11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART_DEPRECATED, "Restarting SHG", 0) \ ++X( 12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED_DEPRECATED, "Grow failed, aborting the current frame.", 0) \ ++X( 13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE_DEPRECATED, "RPM abort complete on HWFrameData [0x%08x].", 1) \ ++X( 14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT_DEPRECATED, "RPM freelist cleanup [0x%08x] requires abort to proceed.", 1) \ ++X( 15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT_DEPRECATED, "RPM page table base register: 0x%08x.0x%08x", 2) \ ++X( 16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT_DEPRECATED, "Issuing RPM abort.", 0) \ ++X( 17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL_DEPRECATED, "RPM OOM received but toggle bits indicate free pages available", 0) \ ++X( 18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT_DEPRECATED, "RPM hardware timeout. Unable to process OOM event.", 0) \ ++X( 19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD_DEPRECATED_DEPRECATED, "SHF FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ ++X( 20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD_DEPRECATED, "SHG FL (0x%08x) load, FPL: 0x%08x.0x%08x, roff: 0x%08x, woff: 0x%08x", 5) \ ++X( 21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE_DEPRECATED, "SHF FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ ++X( 22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE_DEPRECATED, "SHG FL (0x%08x) store, roff: 0x%08x, woff: 0x%08x", 3) \ ++\ ++X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u", 2) \ ++X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u", 2) \ ++X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d, mmu: %d", 4) \ ++X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D_DEPRECATED, "Loading VFP table 0x%08x%08x for 3D", 2) \ ++X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA_DEPRECATED, "Loading VFP table 0x%08x%08x for TA", 2) \ ++X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ ++X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store", 0) \ ++X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No", 2) \ ++X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No", 2) \ ++X( 10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ ++X( 11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes", 3) \ ++X( 12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x", 5) \ ++X( 13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 10) \ ++X( 14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u", 2) \ ++X( 15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u", 2) \ ++X( 16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%x type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG_DEPRECATED, "Freelist 0x%x RESET!!!!!!!!", 1) \ ++X( 19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2_DEPRECATED, "Freelist 0x%x stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 5) \ ++X( 20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%x type: %d (0:local,1:global,2:mmu) on HW context %u", 3) \ ++X( 21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)", 1) \ ++X( 22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed", 0) \ ++X( 23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d", 3) \ ++X( 24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)", 3) \ ++X( 25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS_DEPRECATED, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 8) \ ++X( 26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED_DEPRECATED, "3D RTData 0x%08x loaded on HW context %u", 2) \ ++X( 27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)", 4) \ ++X( 28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=0x%x, HWRTData1State=0x%x", 2) \ ++X( 29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to 0x%x, FL different between TA/3D: local: %d, global: %d", 3) \ ++X( 30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u", 12) \ ++X( 32, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS_DEPRECATED2, "3D Buffers: FWCtx 0x%08x, parent RT 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 5) \ ++X( 33, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)", 7) \ ++X( 34, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RTData 0x%08x on ctx %d, (MemCtx 0x%08x)", 4) \ ++X( 35, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_V2, "Load Freelist 0x%x type: %d (0:local,1:global) for PMDM%d: FL Total Pages %u (max=%u,grow size=%u)", 6) \ ++X( 36, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_TA, "TA RTData 0x%08x marked as killed.", 1) \ ++X( 37, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILLED_3D, "3D RTData 0x%08x marked as killed.", 1) \ ++X( 38, RGXFW_GROUP_RTD, RGXFW_SF_RTD_KILL_TA_AFTER_RESTART, "RTData 0x%08x will be killed after TA restart.", 1) \ ++X( 39, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RENDERSTATE_RESET, "RTData 0x%08x Render State Buffer 0x%02x%08x will be reset.", 3) \ ++X( 40, RGXFW_GROUP_RTD, RGXFW_SF_RTD_GEOM_RENDERSTATE, "GEOM RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \ ++X( 41, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FRAG_RENDERSTATE, "FRAG RTData 0x%08x using Render State Buffer 0x%02x%08x.", 3) \ ++\ ++X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render", 0) \ ++X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render", 0) \ ++X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL_DEPRECATED, "3D MemFree: Local FL 0x%08x", 1) \ ++X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU_DEPRECATED, "3D MemFree: MMU FL 0x%08x", 1) \ ++X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL_DEPRECATED, "3D MemFree: Global FL 0x%08x", 1) \ ++X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_DEPRECATED, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x, HardwareSync Fence [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 6) \ ++X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x", 3) \ ++X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL_DEPRECATED, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x", 5) \ ++X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED_DEPRECATED, "Partial render avoided", 0) \ ++X( 10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED_DEPRECATED, "Partial render discarded", 0) \ ++X( 11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished", 0) \ ++X( 12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG_DEPRECATED, "SPM Owner = 3D-BG", 0) \ ++X( 13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ_DEPRECATED, "SPM Owner = 3D-IRQ", 0) \ ++X( 14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE_DEPRECATED, "SPM Owner = NONE", 0) \ ++X( 15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG_DEPRECATED, "SPM Owner = TA-BG", 0) \ ++X( 16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ_DEPRECATED, "SPM Owner = TA-IRQ", 0) \ ++X( 17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x", 2) \ ++X( 18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x", 2) \ ++X( 19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x", 2) \ ++X( 20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x", 2) \ ++X( 21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided", 0) \ ++X( 22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)", 1) \ ++X( 23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)", 1) \ ++X( 24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)", 1) \ ++X( 25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)", 1) \ ++X( 26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)", 1) \ ++X( 27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)", 1) \ ++X( 28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)", 1) \ ++X( 31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)", 1) \ ++X( 32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none", 0) \ ++X( 33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked", 0) \ ++X( 34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow", 0) \ ++X( 35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW", 0) \ ++X( 36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running", 0) \ ++X( 37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided", 0) \ ++X( 38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed", 0) \ ++X( 39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)", 2) \ ++X( 40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag", 0) \ ++X( 41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x", 1) \ ++X( 42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)", 1) \ ++X( 43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u", 5) \ ++X( 44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA_WITH_SP, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u", 4) \ ++X( 45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u", 5) \ ++X( 46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided", 1) \ ++X( 47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)", 1) \ ++X( 48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)", 1) \ ++X( 49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)", 1) \ ++X( 50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)", 1) \ ++X( 51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)", 1) \ ++X( 53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)", 2) \ ++X( 54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u", 4) \ ++X( 66, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [0x%08.8x] is 0x%08.8x requires 0x%08.8x", 3) \ ++X( 67, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u", 3) \ ++X( 68, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PR_DEADLOCK_UNBLOCKED, "OOM TA/3D PR deadlock unblocked reordering DM%d runlist head from Context 0x%08x to 0x%08x", 3) \ ++X( 69, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_FORCEFREE, "SPM State = PR force free", 0) \ ++\ ++X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state DM%d int: 0x%x, ext: 0x%x, pow flags: 0x%x", 4) \ ++X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_IDLE, "GPU idle (might be powered down). Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ_DEPRECATED, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%x", 3) \ ++X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d", 4) \ ++X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ ++X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_GPU_OFF, "GPU ready to be powered down. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8x", 2) \ ++X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)", 2) \ ++X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d", 2) \ ++X( 11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init", 0) \ ++X( 12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: 0x%x)", 1) \ ++X( 13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.", 0) \ ++X( 14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.", 0) \ ++X( 15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d", 2) \ ++X( 16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d", 2) \ ++X( 17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37270_DEPRECATED, "Request power up due to BRN37270. Pow stat int: 0x%x", 1) \ ++X( 18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%x, ext: 0x%x, pow flags: 0x%x", 3) \ ++X( 19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%x", 1) \ ++X( 20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%x", 1) \ ++X( 21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz", 2) \ ++X( 24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%x, %d dusts powered.", 2) \ ++X( 25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.", 0) \ ++X( 26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u", 1) \ ++X( 27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED2, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x", 3) \ ++X( 28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x", 2) \ ++X( 29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x", 2) \ ++X( 30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = %u", 1) \ ++X( 31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = %u percent", 1) \ ++X( 32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = %u.%u", 2) \ ++X( 33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x", 1) \ ++X( 34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x", 2) \ ++X( 35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x", 2) \ ++X( 36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x", 1) \ ++X( 37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.", 0) \ ++X( 38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE_DEPRECATED, "Proactive DVFS: Invalid node passed to function.", 0) \ ++X( 39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS_DEPRECATED, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u", 1) \ ++X( 40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: %u", 1) \ ++X( 41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: %u", 1) \ ++X( 42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.", 0) \ ++X( 43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d", 2) \ ++X( 44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042_DEPRECATED, "Allowed number of dusts is %d due to BRN59042.", 1) \ ++X( 45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%x, ext: 0x%x, flags: 0x%x", 3) \ ++X( 46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%x, Ext: 0x%x, Pow flags: 0x%x, Fence Counters: Check: %u - Update: %u", 5) \ ++X( 47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = 0x%x", 2) \ ++X( 48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle", 0) \ ++X( 49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active", 0) \ ++X( 50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.", 1) \ ++X( 51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.", 0) \ ++X( 52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST_DEPRECATED, "Discarding invalid power request: type 0x%x, DM %u", 2) \ ++X( 53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ ++X( 54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)", 2) \ ++X( 55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)", 1) \ ++X( 56, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESULT, "Power monitor: Type = %d (0 = power, 1 = energy), Estimate result = 0x%08x%08x", 3) \ ++X( 57, RGXFW_GROUP_POW, RGXFW_SF_POW_MINMAX_CONFLICT, "Conflicting clock frequency range: OPP min = %u, max = %u", 2) \ ++X( 58, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_FLOOR, "Proactive DVFS: Set floor to a minimum = 0x%x", 1) \ ++X( 59, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), pow flags: 0x%x", 2) \ ++X( 60, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x", 1) \ ++X( 61, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x. Pow flags: 0x%x", 3) \ ++X( 62, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x", 2) \ ++X( 63, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_POW_CHANGE_NOT_IDLE, "Detected attempt to change SPU power state mask while not forced idle (pow state 0x%x)", 1) \ ++X( 64, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_SPU_POWER_MASK, "Invalid SPU power mask 0x%x! Changing to 1", 1) \ ++X( 65, RGXFW_GROUP_POW, RGXFW_SF_POW_CLKDIV_UPDATE, "Proactive DVFS: Send OPP %u with clock divider value %u", 2) \ ++X( 66, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_PERF_MODE, "PPA block started in perf validation mode.", 0) \ ++X( 67, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_RESET, "Reset PPA block state %u (1=reset, 0=recalculate).", 1) \ ++X( 68, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT_WITH_CORE, "Power controller returned ABORT for Core-%d last request so retrying.", 1) \ ++X( 69, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ64BIT, "HW Request On(1)/Off(0): %d, Units: 0x%08x%08x", 3) \ ++X( 70, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE_REQ, "Request to change SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x. Pow flags: 0x%x", 5) \ ++X( 71, RGXFW_GROUP_POW, RGXFW_SF_POW_SPU_RAC_POW_STATE_CHANGE, "Changing SPU power state mask from 0x%x to 0x%x and RAC from 0x%x to 0x%x", 4) \ ++X( 72, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RAC, "RAC pending? %d, RAC Active? %d", 2) \ ++X( 73, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RAC, "Initiate powoff query for RAC.", 0) \ ++\ ++X( 1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: 0x%08.8x", 2) \ ++X( 2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: 0x%08.8x, MemCtx: 0x%08.8x", 3) \ ++X( 3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW", 0) \ ++X( 4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.", 0) \ ++X( 5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: 0x%08.8x", 2) \ ++X( 6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ ++X( 7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08x)", 3) \ ++X( 8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08x->0x%08x)", 3) \ ++X( 9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ ++X( 10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08x->0x%08x)", 4) \ ++X( 11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08x)", 3) \ ++X( 12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08x)", 4) \ ++X( 13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08x val:0x%08x)", 3) \ ++X( 14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08x", 2) \ ++X( 15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08x (st: %d), FWCtx 0x%08x @ %d", 6) \ ++X( 16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE_DEPRECATED, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d", 2) \ ++X( 17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8x - local (0x%08.8x): %d, global (0x%08.8x): %d", 5) \ ++X( 18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8x, discard: %d - local (0x%08.8x): s%d?=c%d, global (0x%08.8x): s%d?=c%d", 8) \ ++X( 19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8x c%d", 2) \ ++X( 20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8x @ %d, RTD 0x%08x.", 3) \ ++X( 21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)", 2) \ ++X( 22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: 0x%08.8x (TPC addr: 0x%08x%08x, size: %d bytes)", 4) \ ++X( 23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED2, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08x", 2) \ ++X( 24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08x", 5) \ ++X( 25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered", 1) \ ++X( 26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x", 2) \ ++X( 27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction", 0) \ ++X( 28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: 0x%08.8x. Need to skip to next command", 2) \ ++X( 29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: 0x%08.8x. Need to skip to next command", 2) \ ++X( 30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: 0x%08.8x. Need PR cleanup", 2) \ ++X( 31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED2, "GPU has locked up", 0) \ ++X( 32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR", 1) \ ++X( 33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08x", 2) \ ++X( 34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED2, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08x)", 1) \ ++X( 35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out", 1) \ ++X( 36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x", 1) \ ++X( 37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08x", 2) \ ++X( 38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline", 0) \ ++X( 39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll", 0) \ ++X( 40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x", 2) \ ++X( 41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED3, "Reset HW (loop:%d, poll failures: 0x%08x)", 2) \ ++X( 42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08x", 1) \ ++X( 43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08x)", 1) \ ++X( 44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).", 1) \ ++X( 45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08x%08x)", 2) \ ++X( 46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_DEPRECATED, "Fast CRC Check result for DM%u is HWRNeeded=%u", 2) \ ++X( 47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK_DEPRECATED, "Full Signature Check result for DM%u is HWRNeeded=%u", 2) \ ++X( 48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u", 3) \ ++X( 49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK_DEPRECATED, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 3) \ ++X( 50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK_DEPRECATED, "Deadline counter for DM%u is HWRDeadline=%u", 2) \ ++X( 51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction", 1) \ ++X( 52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%x (ID=%d)", 2) \ ++X( 53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete", 1) \ ++X( 54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u", 4) \ ++X( 55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed", 1) \ ++X( 56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ ++X( 57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02x, StallingMask=0x%02x, PDS_CTRL=0x%08x%08x)", 4) \ ++X( 58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u", 2) \ ++X( 59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty", 1) \ ++X( 60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: 0x%08x%08x, deadline: 0x%08x%08x", 5) \ ++X( 61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)", 1) \ ++X( 62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)", 1) \ ++X( 63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction", 2) \ ++X( 64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8x, queue: 0x%08x%08x (Roff = %u becomes StreamStartOffset = %u)", 5) \ ++X( 65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED2, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) on HW context %u", 4) \ ++X( 66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)", 3) \ ++X( 67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance", 1) \ ++X( 68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%x (ID=%d)", 2) \ ++X( 69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: 0x%08.8x (RTC addr: 0x%08x%08x, size: %d bytes)", 4) \ ++X( 70, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED3, "Reconstruction needed for freelist 0x%x (ID=%d) type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 5) \ ++X( 71, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_LONG_HW_POLL, "Start long HW poll %u (0-Unset 1-Set) for (reg:0x%08x val:0x%08x)", 3) \ ++X( 72, RGXFW_GROUP_HWR, RGXFW_SF_HWR_END_LONG_HW_POLL, "End long HW poll (result=%d)", 1) \ ++X( 73, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "DM%u has taken %d ticks and deadline is %d ticks", 3) \ ++X( 74, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK_DEPRECATED, "USC Watchdog result for DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 5) \ ++X( 75, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%x (ID=%d) OSid: %d type: %d (0:local,1:global) phase: %d (0:TA, 1:3D) on HW context %u", 6) \ ++X( 76, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU-%u has locked up", 1) \ ++X( 77, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DM, "DM%u has locked up", 1) \ ++X( 78, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_EVENT_STATUS_REG, "Core %d RGX_CR_EVENT_STATUS=0x%08x", 2) \ ++X( 79, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MULTICORE_EVENT_STATUS_REG, "RGX_CR_MULTICORE_EVENT_STATUS%u=0x%08x", 2) \ ++X( 80, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_BIF0_FAULT, "BIF0 page fault detected (Core %d MMU Status: 0x%08x%08x Req Status: 0x%08x%08x)", 5) \ ++X( 81, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT_S7, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x)", 3) \ ++X( 82, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CORE_MMU_FAULT, "MMU page fault detected (Core %d MMU Status: 0x%08x%08x 0x%08x)", 4) \ ++X( 83, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (core:%d of %d, loop:%d, poll failures: 0x%08x)", 4) \ ++X( 84, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ ++X( 85, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ ++X( 86, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for Core%u, DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d", 4) \ ++X( 87, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WATCHDOG_CHECK, "USC Watchdog result for Core%u DM%u is HWRNeeded=%u Status=%u USCs={0x%x} with HWRChecksToGo=%u", 6) \ ++X( 88, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_RISCV_FAULT, "RISC-V MMU page fault detected (FWCORE MMU Status 0x%08x Req Status 0x%08x%08x)", 3) \ ++X( 89, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS1_PFS_DEPRECATED, "TEXAS1_PFS poll failed on core %d with value 0x%08x", 2) \ ++X( 90, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_PFS, "BIF_PFS poll failed on core %d with value 0x%08x", 2) \ ++X( 91, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS set poll failed on core %d with value 0x%08x", 2) \ ++X( 92, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_UNSET_ABORT_PM_STATUS, "MMU_ABORT_PM_STATUS unset poll failed on core %d with value 0x%08x", 2) \ ++X( 93, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLC_INVAL, "MMU_CTRL_INVAL poll (all but fw) failed on core %d with value 0x%08x", 2) \ ++X( 94, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_SLCMMU_INVAL, "MMU_CTRL_INVAL poll (all) failed on core %d with value 0x%08x", 2) \ ++X( 95, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HWR_FAULT_POLL_BIF_TEXAS_PFS, "TEXAS%d_PFS poll failed on core %d with value 0x%08x", 3) \ ++X( 96, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EXTRA_CHECK, "Extra Registers Check result for Core%u, DM%u is HWRNeeded=%u", 3) \ ++X( 97, RGXFW_GROUP_HWR, RGXFW_SF_HWR_WRITE_TO_GPU_READONLY_ADDR, "FW attempted to write to read-only GPU address 0x%08x", 1) \ ++\ ++X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u", 2) \ ++X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW", 1) \ ++X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW", 1) \ ++X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x", 2) \ ++X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x", 1) \ ++X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x", 1) \ ++X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping", 1) \ ++X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x", 1) \ ++X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x", 1) \ ++X( 10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x", 1) \ ++X( 11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x", 2) \ ++X( 12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x", 1) \ ++X( 13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x", 2) \ ++X( 14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver", 1) \ ++X( 15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver", 0) \ ++X( 16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: 0x%x value: 0x%x", 2) \ ++X( 17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:0x%x", 2) \ ++X( 18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID 0x%x is not allowed. The package [b:%u, n:%u] will be discarded", 3) \ ++X( 19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS_CUSTOM, "Custom Counters filter status %d", 1) \ ++X( 20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded", 2) \ ++X( 21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d", 2) \ ++X( 22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter 0x%x is 0x%x ?", 2) \ ++X( 23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset", 1) \ ++X( 24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD_DEPRECATED, "Encountered an invalid command (%d)", 1) \ ++X( 25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)", 2) \ ++X( 26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)", 3) \ ++X( 27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d", 1) \ ++X( 28, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKENA, "Block 0x%x ENABLED", 1) \ ++X( 29, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKDIS, "Block 0x%x DISABLED", 1) \ ++X( 30, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK_INSTANCE, "Accessing Indirect block 0x%x, instance %u", 2) \ ++X( 31, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTRVAL, "Counter register 0x%x, Value 0x%x", 2) \ ++X( 32, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Counters filter status %d", 1) \ ++X( 33, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTLBLK, "Block 0x%x mapped to Ctl Idx %u", 2) \ ++X( 34, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_WORKEST_EN, "Block(s) in use for workload estimation.", 0) \ ++X( 35, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCCTR, "GPU %u Cycle counter 0x%x, Value 0x%x", 3) \ ++X( 36, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CYCMAX, "GPU Mask 0x%x Cycle counter 0x%x, Value 0x%x", 3) \ ++X( 37, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_IGNORE_BLOCKS, "Blocks IGNORED for GPU %u", 1) \ ++\ ++X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST_DEPRECATED, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u", 5) \ ++X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u", 4) \ ++X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x", 1) \ ++X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer of type 0x%02x completion...", 1) \ ++X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed", 3) \ ++X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)", 3) \ ++X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure", 1) \ ++X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead", 2) \ ++X( 9, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request on ch. %u: system 0x%02x%08x, coremem 0x%08x, flags 0x%x, size %u", 7) \ ++\ ++X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x", 2) \ ++X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x", 1) \ ++X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x", 2) \ ++X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x", 3) \ ++X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x", 4) \ ++X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 5) \ ++X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 6) \ ++X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 7) \ ++X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x", 8) \ ++X( 10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d", 1) \ ++X( 11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d", 2) \ ++X( 12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d", 3) \ ++X( 13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d", 4) \ ++X( 14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d", 5) \ ++X( 15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d", 6) \ ++X( 16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d", 7) \ ++X( 17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d", 8) \ ++X( 18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u", 1) \ ++X( 19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u", 2) \ ++X( 20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u", 3) \ ++X( 21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u", 4) \ ++X( 22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u", 5) \ ++X( 23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u", 6) \ ++X( 24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u", 7) \ ++X( 25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u", 8) \ ++\ ++X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string", 15) ++ ++ ++/* The symbolic names found in the table above are assigned an ui32 value of ++ * the following format: ++ * 31 30 28 27 20 19 16 15 12 11 0 bits ++ * - --- ---- ---- ---- ---- ---- ---- ---- ++ * 0-11: id number ++ * 12-15: group id number ++ * 16-19: number of parameters ++ * 20-27: unused ++ * 28-30: active: identify SF packet, otherwise regular int32 ++ * 31: reserved for signed/unsigned compatibility ++ * ++ * The following macro assigns those values to the enum generated SF ids list. ++ */ ++#define RGXFW_LOG_IDMARKER (0x70000000U) ++#define RGXFW_LOG_CREATESFID(a,b,e) ((IMG_UINT32)(a) | ((IMG_UINT32)(b)<<12U) | ((IMG_UINT32)(e)<<16U)) | RGXFW_LOG_IDMARKER ++ ++#define RGXFW_LOG_IDMASK (0xFFF00000U) ++#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER) ++ ++typedef enum { ++#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e), ++ RGXFW_LOG_SFIDLIST ++#undef X ++} RGXFW_LOG_SFids; ++ ++/* Return the group id that the given (enum generated) id belongs to */ ++#define RGXFW_SF_GID(x) (((IMG_UINT32)(x)>>12) & 0xfU) ++/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */ ++#define RGXFW_SF_PARAMNUM(x) (((IMG_UINT32)(x)>>16) & 0xfU) ++ ++#endif /* RGX_FWIF_SF_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_fwif_shared.h b/drivers/gpu/drm/img-rogue/rgx_fwif_shared.h +new file mode 100644 +index 000000000000..13844ad4e801 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_fwif_shared.h +@@ -0,0 +1,335 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware interface structures ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware interface structures shared by both host client ++ and host server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_FWIF_SHARED_H) ++#define RGX_FWIF_SHARED_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_common.h" ++#include "powervr/mem_types.h" ++ ++/* Indicates the number of RTDATAs per RTDATASET */ ++#if defined(SUPPORT_AGP) ++#define RGXMKIF_NUM_RTDATAS 4U ++#define RGXMKIF_NUM_GEOMDATAS 4U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 12U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (2U) ++#else ++#define RGXMKIF_NUM_RTDATAS 2U ++#define RGXMKIF_NUM_GEOMDATAS 1U ++#define RGXMKIF_NUM_RTDATA_FREELISTS 2U /* RGXMKIF_NUM_RTDATAS * RGXFW_MAX_FREELISTS */ ++#define RGX_NUM_GEOM_CORES (1U) ++#endif ++ ++/* Maximum number of UFOs in a CCB command. ++ * The number is based on having 32 sync prims (as originally), plus 32 sync ++ * checkpoints. ++ * Once the use of sync prims is no longer supported, we will retain ++ * the same total (64) as the number of sync checkpoints which may be ++ * supporting a fence is not visible to the client driver and has to ++ * allow for the number of different timelines involved in fence merges. ++ */ ++#define RGXFWIF_CCB_CMD_MAX_UFOS (32U+32U) ++ ++/* ++ * This is a generic limit imposed on any DM (TA,3D,CDM,TDM,2D,TRANSFER) ++ * command passed through the bridge. ++ * Just across the bridge in the server, any incoming kick command size is ++ * checked against this maximum limit. ++ * In case the incoming command size is larger than the specified limit, ++ * the bridge call is retired with error. ++ */ ++#define RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U) ++ ++typedef struct RGXFWIF_DEV_VIRTADDR_ ++{ ++ IMG_UINT32 ui32Addr; ++} RGXFWIF_DEV_VIRTADDR; ++ ++typedef struct ++{ ++ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr; ++ RGXFWIF_DEV_VIRTADDR pbyFWAddr; ++} UNCACHED_ALIGN RGXFWIF_DMA_ADDR; ++ ++typedef IMG_UINT8 RGXFWIF_CCCB; ++ ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR; ++typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL; ++ ++ ++/*! ++ * @InGroup ClientCCBTypes ++ * @Brief Command data for fence & update types Client CCB commands. ++ */ ++typedef struct ++{ ++ PRGXFWIF_UFO_ADDR puiAddrUFO; /*!< Address to be checked/updated */ ++ IMG_UINT32 ui32Value; /*!< Value to check-against/update-to */ ++} RGXFWIF_UFO; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */ ++ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */ ++} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL; ++ ++#define RGXFWIF_PRBUFFER_START IMG_UINT32_C(0) ++#define RGXFWIF_PRBUFFER_ZSBUFFER IMG_UINT32_C(0) ++#define RGXFWIF_PRBUFFER_MSAABUFFER IMG_UINT32_C(1) ++#define RGXFWIF_PRBUFFER_MAXSUPPORTED IMG_UINT32_C(2) ++ ++typedef IMG_UINT32 RGXFWIF_PRBUFFER_TYPE; ++ ++typedef enum ++{ ++ RGXFWIF_PRBUFFER_UNBACKED = 0, ++ RGXFWIF_PRBUFFER_BACKED, ++ RGXFWIF_PRBUFFER_BACKING_PENDING, ++ RGXFWIF_PRBUFFER_UNBACKING_PENDING, ++}RGXFWIF_PRBUFFER_STATE; ++ ++/*! ++ * @InGroup RenderTarget ++ * @Brief OnDemand Z/S/MSAA Buffers ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32BufferID; /*!< Buffer ID*/ ++ IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */ ++ RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */ ++ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */ ++ IMG_UINT32 ui32PRBufferFlags; /*!< Compatibility and other flags */ ++} UNCACHED_ALIGN RGXFWIF_PRBUFFER; ++ ++/* ++ * Used to share frame numbers across UM-KM-FW, ++ * frame number is set in UM, ++ * frame number is required in both KM for HTB and FW for FW trace. ++ * ++ * May be used to house Kick flags in the future. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32FrameNum; /*!< associated frame number */ ++} CMD_COMMON; ++ ++/* ++ * TA and 3D commands require set of firmware addresses that are stored in the ++ * Kernel. Client has handle(s) to Kernel containers storing these addresses, ++ * instead of raw addresses. We have to patch/write these addresses in KM to ++ * prevent UM from controlling FW addresses directly. ++ * Typedefs for TA and 3D commands are shared between Client and Firmware (both ++ * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use ++ * TA|3D CMD type definitions directly. Therefore we have a SHARED block that ++ * is shared between UM-KM-FW across all BVNC configurations. ++ */ ++typedef struct ++{ ++ CMD_COMMON sCmn; /*!< Common command attributes */ ++ RGXFWIF_DEV_VIRTADDR sHWRTData; /* RTData associated with this command, ++ this is used for context selection and for storing out HW-context, ++ when TA is switched out for continuing later */ ++ ++ RGXFWIF_DEV_VIRTADDR asPRBuffer[RGXFWIF_PRBUFFER_MAXSUPPORTED]; /* Supported PR Buffers like Z/S/MSAA Scratch */ ++ ++} CMDTA3D_SHARED; ++ ++/*! ++ * Client Circular Command Buffer (CCCB) control structure. ++ * This is shared between the Server and the Firmware and holds byte offsets ++ * into the CCCB as well as the wrapping mask to aid wrap around. A given ++ * snapshot of this queue with Cmd 1 running on the GPU might be: ++ * ++ * Roff Doff Woff ++ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........] ++ * < runnable commands >< !ready to run > ++ * ++ * Cmd 1 : Currently executing on the GPU data master. ++ * Cmd 2,3,4: Fence dependencies met, commands runnable. ++ * Cmd 5... : Fence dependency not met yet. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This ++ * must be aligned to 16 bytes. */ ++ IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB. ++ Points to the command that is ++ * runnable on GPU, if R!=W */ ++ IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset. ++ * Points to commands not ready, i.e. ++ * fence dependencies are not met. */ ++ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity ++ in bytes of the CCB-1 */ ++#if defined(SUPPORT_AGP) ++ IMG_UINT32 ui32ReadOffset2; ++#if defined(SUPPORT_AGP4) ++ IMG_UINT32 ui32ReadOffset3; ++ IMG_UINT32 ui32ReadOffset4; ++#endif ++#endif ++ ++} UNCACHED_ALIGN RGXFWIF_CCCB_CTL; ++ ++ ++typedef IMG_UINT32 RGXFW_FREELIST_TYPE; ++ ++#define RGXFW_LOCAL_FREELIST IMG_UINT32_C(0) ++#define RGXFW_GLOBAL_FREELIST IMG_UINT32_C(1) ++#if defined(SUPPORT_AGP) ++#define RGXFW_GLOBAL2_FREELIST IMG_UINT32_C(2) ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL2_FREELIST + 1U) ++#else ++#define RGXFW_MAX_FREELISTS (RGXFW_GLOBAL_FREELIST + 1U) ++#endif ++#define RGXFW_MAX_HWFREELISTS (2U) ++ ++/*! ++ * @Defgroup ContextSwitching Context switching data interface ++ * @Brief Types grouping data structures and defines used in realising the Context Switching (CSW) functionality ++ * @{ ++ */ ++ ++/*! ++ * @Brief GEOM DM or TA register controls for context switch ++ */ ++typedef struct ++{ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the VDM's context state buffer */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STATE_RESUME_ADDR; ++ IMG_UINT64 uTAReg_TA_CONTEXT_STATE_BASE_ADDR; /*!< The base address of the TA's context state buffer */ ++ ++ struct ++ { ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK0; /*!< VDM context store task 0 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK1; /*!< VDM context store task 1 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK2; /*!< VDM context store task 2 */ ++ ++ /* VDM resume state update controls */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK0; /*!< VDM context resume task 0 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK1; /*!< VDM context resume task 1 */ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK2; /*!< VDM context resume task 2 */ ++ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK3; ++ IMG_UINT64 uTAReg_VDM_CONTEXT_STORE_TASK4; ++ ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK3; ++ IMG_UINT64 uTAReg_VDM_CONTEXT_RESUME_TASK4; ++ } asTAState[2]; ++ ++} RGXFWIF_TAREGISTERS_CSWITCH; ++/*! @} End of Defgroup ContextSwitching */ ++ ++#define RGXFWIF_TAREGISTERS_CSWITCH_SIZE sizeof(RGXFWIF_TAREGISTERS_CSWITCH) ++ ++typedef struct ++{ ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0; ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS1; ++ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS; ++ IMG_UINT64 uCDMReg_CDM_TERMINATE_PDS1; ++ ++ /* CDM resume controls */ ++ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0; ++ IMG_UINT64 uCDMReg_CDM_CONTEXT_PDS0_B; ++ IMG_UINT64 uCDMReg_CDM_RESUME_PDS0_B; ++ ++} RGXFWIF_CDM_REGISTERS_CSWITCH; ++ ++/*! ++ * @InGroup ContextSwitching ++ * @Brief Render context static register controls for context switch ++ */ ++typedef struct ++{ ++ RGXFWIF_TAREGISTERS_CSWITCH RGXFW_ALIGN asCtxSwitch_GeomRegs[RGX_NUM_GEOM_CORES]; /*!< Geom registers for ctx switch */ ++} RGXFWIF_STATIC_RENDERCONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_RENDERCONTEXT_SIZE sizeof(RGXFWIF_STATIC_RENDERCONTEXT_STATE) ++ ++typedef struct ++{ ++ RGXFWIF_CDM_REGISTERS_CSWITCH RGXFW_ALIGN sCtxSwitch_Regs; /*!< CDM registers for ctx switch */ ++} RGXFWIF_STATIC_COMPUTECONTEXT_STATE; ++ ++#define RGXFWIF_STATIC_COMPUTECONTEXT_SIZE sizeof(RGXFWIF_STATIC_COMPUTECONTEXT_STATE) ++ ++/*! ++ @Brief Context reset reason. Last reset reason for a reset context. ++*/ ++typedef enum ++{ ++ RGX_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */ ++ RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */ ++ RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */ ++ RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */ ++ RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */ ++ RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */ ++ RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM = 6, /*!< CDM Mission/safety checksum mismatch */ ++ RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM = 7, /*!< TRP checksum mismatch */ ++ RGX_CONTEXT_RESET_REASON_GPU_ECC_OK = 8, /*!< GPU ECC error (corrected, OK) */ ++ RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR = 9, /*!< GPU ECC error (uncorrected, HWR) */ ++ RGX_CONTEXT_RESET_REASON_FW_ECC_OK = 10, /*!< FW ECC error (corrected, OK) */ ++ RGX_CONTEXT_RESET_REASON_FW_ECC_ERR = 11, /*!< FW ECC error (uncorrected, ERR) */ ++ RGX_CONTEXT_RESET_REASON_FW_WATCHDOG = 12, /*!< FW Safety watchdog triggered */ ++ RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13, /*!< FW page fault (no HWR) */ ++ RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14, /*!< FW execution error (GPU reset requested) */ ++ RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15, /*!< Host watchdog detected FW error */ ++ RGX_CONTEXT_GEOM_OOM_DISABLED = 16, /*!< Geometry DM OOM event is not allowed */ ++} RGX_CONTEXT_RESET_REASON; ++ ++/*! ++ @Brief Context reset data shared with the host ++*/ ++typedef struct ++{ ++ RGX_CONTEXT_RESET_REASON eResetReason; /*!< Reset reason */ ++ IMG_UINT32 ui32ResetExtJobRef; /*!< External Job ID */ ++} RGX_CONTEXT_RESET_REASON_DATA; ++#endif /* RGX_FWIF_SHARED_H */ ++ ++/****************************************************************************** ++ End of file (rgx_fwif_shared.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_heap_firmware.h b/drivers/gpu/drm/img-rogue/rgx_heap_firmware.h +new file mode 100644 +index 000000000000..db2b90b9f2a7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_heap_firmware.h +@@ -0,0 +1,120 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX FW heap definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_HEAP_FIRMWARE_H) ++#define RGX_HEAP_FIRMWARE_H ++ ++/* Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h) ++ * NOTE: ++ * The firmware heaps bases and sizes are defined here to ++ * simplify #include dependencies, see rgxheapconfig.h ++ * for the full RGX virtual address space layout. ++ */ ++ ++/* ++ * The Config heap holds initialisation data shared between the ++ * the driver and firmware (e.g. pointers to the KCCB and FWCCB). ++ * The Main Firmware heap size is adjusted accordingly but most ++ * of the map / unmap functions must take into consideration ++ * the entire range (i.e. main and config heap). ++ */ ++#define RGX_FIRMWARE_NUMBER_OF_FW_HEAPS (IMG_UINT32_C(2)) ++#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT ++#define RGX_FIRMWARE_RAW_HEAP_BASE (0xE1C0000000ULL) ++#define RGX_FIRMWARE_RAW_HEAP_SIZE (IMG_UINT32_C(1) << RGX_FIRMWARE_HEAP_SHIFT) ++ ++/* To enable the firmware to compute the exact address of structures allocated by the KM ++ * in the Fw Config subheap, regardless of the KM's page size (and PMR granularity), ++ * objects allocated consecutively but from different PMRs (due to differing memalloc flags) ++ * are allocated with a 64kb offset. This way, all structures will be located at the same base ++ * addresses when the KM is running with a page size of 4k, 16k or 64k. */ ++#define RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY (IMG_UINT32_C(0x10000)) ++ ++/* Ensure the heap can hold 3 PMRs of maximum supported granularity (192KB): ++ * 1st PMR: RGXFWIF_CONNECTION_CTL ++ * 2nd PMR: RGXFWIF_OSINIT ++ * 3rd PMR: RGXFWIF_SYSINIT */ ++#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (IMG_UINT32_C(3)*RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) ++ ++#define RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE) ++/* ++ * MIPS FW needs space in the Main heap to map GPU memory. ++ * This space is taken from the MAIN heap, to avoid creating a new heap. ++ */ ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL (IMG_UINT32_C(0x100000)) /* 1MB */ ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 (IMG_UINT32_C(0x400000)) /* 4MB */ ++ ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ ++ RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL) ++ ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101 (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - \ ++ RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101) ++ ++#if !defined(__KERNEL__) ++#if defined(FIX_HW_BRN_65101) ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_BRN65101 ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101 ++ ++#include "img_defs.h" ++static_assert((RGX_FIRMWARE_RAW_HEAP_SIZE) >= IMG_UINT32_C(0x800000), "MIPS GPU map size cannot be increased due to BRN65101 with a small FW heap"); ++ ++#else ++#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE_NORMAL ++#define RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL ++#endif ++#endif /* !defined(__KERNEL__) */ ++ ++#define RGX_FIRMWARE_MAIN_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE ++#define RGX_FIRMWARE_CONFIG_HEAP_BASE (RGX_FIRMWARE_MAIN_HEAP_BASE + \ ++ RGX_FIRMWARE_RAW_HEAP_SIZE - \ ++ RGX_FIRMWARE_CONFIG_HEAP_SIZE) ++ ++/* ++ * The maximum configurable size via RGX_FW_HEAP_SHIFT is 32MiB (1<<25) and ++ * the minimum is 4MiB (1<<22); the default firmware heap size is set to ++ * maximum 32MiB. ++ */ ++#if defined(RGX_FW_HEAP_SHIFT) && (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25) ++#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]" ++#endif ++ ++#endif /* RGX_HEAP_FIRMWARE_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_heaps.h b/drivers/gpu/drm/img-rogue/rgx_heaps.h +new file mode 100644 +index 000000000000..e41e4002b2c4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_heaps.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX heap definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_HEAPS_H) ++#define RGX_HEAPS_H ++ ++/* ++ Identify heaps by their names ++*/ ++#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< RGX General SVM (shared virtual memory) Heap Identifier */ ++#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */ ++#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */ ++#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */ ++#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */ ++#define RGX_VK_CAPT_REPLAY_HEAP_IDENT "Vulkan Capture Replay" /*!< RGX Vulkan capture replay buffer Heap Identifier */ ++#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */ ++#define RGX_FBCDC_HEAP_IDENT "FBCDC" /*!< RGX FBCDC State Table Heap Identifier */ ++#define RGX_FBCDC_LARGE_HEAP_IDENT "Large FBCDC" /*!< RGX Large FBCDC State Table Heap Identifier */ ++#define RGX_CMP_MISSION_RMW_HEAP_IDENT "Compute Mission RMW" /*!< Compute Mission RMW Heap Identifier */ ++#define RGX_CMP_SAFETY_RMW_HEAP_IDENT "Compute Safety RMW" /*!< Compute Safety RMW Heap Identifier */ ++#define RGX_TEXTURE_STATE_HEAP_IDENT "Texture State" /*!< Texture State Heap Identifier */ ++#define RGX_VISIBILITY_TEST_HEAP_IDENT "Visibility Test" /*!< Visibility Test Heap Identifier */ ++ ++/* Services client internal heap identification */ ++#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142" /*!< RGX RgnHdr BRN63142 Heap Identifier */ ++#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */ ++#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273" /*!< MMU BRN65273 Heap A Identifier */ ++#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273" /*!< MMU BRN65273 Heap B Identifier */ ++#endif /* RGX_HEAPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf.h b/drivers/gpu/drm/img-rogue/rgx_hwperf.h +new file mode 100644 +index 000000000000..fa711b0b6df2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_hwperf.h +@@ -0,0 +1,1607 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HWPerf and Debug Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common data types definitions for hardware performance API ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_HWPERF_H_ ++#define RGX_HWPERF_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* These structures are used on both GPU and CPU and must be a size that is a ++ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at ++ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. ++ */ ++ ++/****************************************************************************** ++ * Includes and Defines ++ *****************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#include "rgx_common.h" ++#include "rgx_hwperf_common.h" ++#include "pvrsrv_tlcommon.h" ++#include "pvrsrv_sync_km.h" ++ ++ ++#if !defined(__KERNEL__) ++/* User-mode and Firmware definitions only */ ++ ++#if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) ++ ++/* HWPerf interface assumption checks */ ++static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16U, "Cluster count too large for HWPerf protocol definition"); ++ ++/*! The number of indirectly addressable TPU_MSC blocks in the GPU */ ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX(((IMG_UINT32)RGX_FEATURE_NUM_CLUSTERS >> 1), 1U) ++ ++/*! The number of indirectly addressable USC blocks in the GPU */ ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS) ++ ++# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ ++ /*! Defines the number of performance counter blocks that are directly ++ * addressable in the RGX register map for S. */ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 1 /* JONES */ ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 1 /* BLACKPEARL */ ++# define RGX_HWPERF_PHANTOM_DUST_BLKS 2 /* TPU, TEXAS */ ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */ ++ ++# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++ ++ /*! Defines the number of performance counter blocks that are directly ++ * addressable in the RGX register map. */ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 2 /* TORNADO, TA */ ++ ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS) ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 2 /* RASTER, TEXAS */ ++# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ ++ ++# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */ ++ ++ /*! Defines the number of performance counter blocks that are ++ * addressable in the RGX register map for Series 6. */ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 3 /* TA, RASTER, HUB */ ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM 0 /* PHANTOM is not there in Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */ ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0 ++# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */ ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */ ++ ++# endif ++ ++/*! The number of performance counters in each layout block defined for UM/FW code */ ++#if defined(RGX_FEATURE_CLUSTER_GROUPING) ++ #define RGX_HWPERF_CNTRS_IN_BLK 6 ++ #else ++ #define RGX_HWPERF_CNTRS_IN_BLK 4 ++#endif ++ ++#endif /* #if defined(RGX_BVNC_CORE_KM_HEADER) && defined(RGX_BNC_CONFIG_KM_HEADER) */ ++#else /* defined(__KERNEL__) */ ++/* Kernel/server definitions - not used, hence invalid definitions */ ++ ++# define RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC 0xFF ++ ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++ ++# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_INDIRECT_BY_PHANTOM RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_NONDUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_DUST_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++ ++#endif ++ ++/*! The number of custom non-mux counter blocks supported */ ++#define RGX_HWPERF_MAX_CUSTOM_BLKS 5U ++ ++/*! The number of counters supported in each non-mux counter block */ ++#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8U ++ ++/*! The number of directly-addressable counters allowed in non-mux counter blocks */ ++#define RGX_CNTBLK_COUNTERS_MAX ((IMG_UINT32)PVRSRV_HWPERF_COUNTERS_PERBLK + 0U) ++ ++ ++/****************************************************************************** ++ * Data Stream Common Types ++ *****************************************************************************/ ++ ++/*! All the Data Masters HWPerf is aware of. When a new DM is added to this ++ * list, it should be appended at the end to maintain backward compatibility ++ * of HWPerf data. ++ */ ++typedef enum { ++ ++ RGX_HWPERF_DM_GP, ++ RGX_HWPERF_DM_2D, ++ RGX_HWPERF_DM_TA, ++ RGX_HWPERF_DM_3D, ++ RGX_HWPERF_DM_CDM, ++ RGX_HWPERF_DM_RTU, ++ RGX_HWPERF_DM_SHG, ++ RGX_HWPERF_DM_TDM, ++ ++ RGX_HWPERF_DM_LAST, ++ ++ RGX_HWPERF_DM_INVALID = 0x1FFFFFFF ++} RGX_HWPERF_DM; ++ ++/*! Define containing bit position for 32bit feature flags used in hwperf and api */ ++typedef IMG_UINT32 RGX_HWPERF_FEATURE_FLAGS; ++#define RGX_HWPERF_FEATURE_PERFBUS_FLAG 0x0001U ++#define RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG 0x0002U ++#define RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG 0x0004U ++#define RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG 0x0008U ++#define RGX_HWPERF_FEATURE_ROGUEXE_FLAG 0x0010U ++#define RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG 0x0020U ++#define RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG 0x0040U ++#define RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION 0x0080U ++#define RGX_HWPERF_FEATURE_MULTICORE_FLAG 0x0100U ++#define RGX_HWPERF_FEATURE_VOLCANIC_FLAG 0x0800U ++#define RGX_HWPERF_FEATURE_ROGUE_FLAG 0x1000U ++#define RGX_HWPERF_FEATURE_OCEANIC_FLAG 0x2000U ++ ++/*! This structure holds the data of a firmware packet. */ ++typedef struct ++{ ++ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ ++ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */ ++ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Internal field */ ++ IMG_UINT32 ui32Padding; /*!< Reserved */ ++} RGX_HWPERF_FW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA); ++ ++/*! This structure holds the data of a hardware packet, including counters. */ ++typedef struct ++{ ++ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ ++ IMG_UINT32 ui32PID; /*!< Process identifier */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */ ++ IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */ ++ IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */ ++ IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */ ++ IMG_UINT32 ui32CtxPriority; /*!< Context priority */ ++ IMG_UINT32 ui32GPUIdMask; /*!< GPU IDs active within this event */ ++ IMG_UINT32 ui32KickInfo; /*!< <31..8> Reserved <7..0> GPU Pipeline DM kick ID, 0 if not using Pipeline DMs */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. To ensure correct alignment */ ++ IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Optional variable length Counter data */ ++ IMG_UINT32 ui32Padding2; /*!< Reserved. To ensure correct alignment (not written in the packet) */ ++} RGX_HWPERF_HW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA); ++RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_HW_DATA, aui32CountBlksStream); ++ ++typedef struct ++{ ++ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */ ++ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */ ++ IMG_UINT32 ui32PID; /*!< Process identifier */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32WorkTarget[4]; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */ ++ /*!< V2A Block count / Client driver context job reference used for tracking/debugging */ ++ /*!< RGX Data master context job reference used for tracking/debugging */ ++ /*!< V2 Block count / Index to the time correlation at the time the packet was generated */ ++} RGX_HWPERF_HW_DATA_V2; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA_V2); ++ ++/*! Mask for use with the aui32CountBlksStream field when decoding the ++ * counter block ID and mask word. */ ++#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U ++#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U ++ ++/*! Obtains the counter block ID word from an aui32CountBlksStream field. ++ * The word combines Control bits (15-12), GPU-Id (11-8), Group (7-4), Unit ++ * within group (3-0) */ ++#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT)) ++ ++/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address ++ * and stream index. May be used in decoding the counter block stream words of ++ * a RGX_HWPERF_HW_DATA structure. */ ++#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) RGX_HWPERF_GET_CNTBLK_IDW((_data_addr)->aui32CountBlksStream[(_idx)]) ++ ++/*! Obtains the GPU ID from the supplied RGX_HWPERF_HW_DATA CNTBLK_IDW */ ++#define RGX_HWPERF_GET_CNTBLK_GPUW(_word) ((IMG_UINT16)(((_word)&RGX_CNTBLK_ID_MC_GPU_MASK)>>RGX_CNTBLK_ID_MC_GPU_SHIFT)) ++ ++#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&(~RGX_HWPERF_CNTBLK_ID_MASK))) ++ ++/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address ++ * and stream index. May be used in decoding the counter block stream words ++ * of a RGX_HWPERF_HW_DATA structure. */ ++#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) RGX_HWPERF_GET_CNT_MASKW((_data_addr)->aui32CountBlksStream[(_idx)]) ++ ++/*! Context switch packet event */ ++typedef struct ++{ ++ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32FrameNum; /*!< Client Frame number (TA, 3D only) */ ++ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */ ++ IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */ ++ IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */ ++ IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */ ++} RGX_HWPERF_CSW_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA); ++ ++/*! Enumeration of clocks supporting this event */ ++typedef enum ++{ ++ RGX_HWPERF_CLKS_CHG_INVALID = 0, ++ ++ RGX_HWPERF_CLKS_CHG_NAME_CORE = 1, ++ ++ RGX_HWPERF_CLKS_CHG_LAST, ++} RGX_HWPERF_CLKS_CHG_NAME; ++ ++/*! This structure holds the data of a clocks change packet. */ ++typedef struct ++{ ++ IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */ ++ RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */ ++ IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */ ++ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */ ++ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and ++ correlated to OSTimeStamp */ ++} RGX_HWPERF_CLKS_CHG_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA); ++ ++/*! Enumeration of GPU utilisation states supported by this event */ ++typedef IMG_UINT32 RGX_HWPERF_GPU_STATE; ++ ++/*! This structure holds the data of a GPU utilisation state change packet. */ ++typedef struct ++{ ++ RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */ ++ IMG_UINT32 uiUnused1; /*!< Padding */ ++ IMG_UINT32 uiUnused2; /*!< Padding */ ++ IMG_UINT32 uiUnused3; /*!< Padding */ ++} RGX_HWPERF_GPU_STATE_CHG_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA); ++ ++ ++/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */ ++#define HWPERF_PWR_EST_V1_SIG 0x48504531 ++ ++/*! Macros to obtain a component field from a counter ID word */ ++#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31) ++#define RGX_HWPERF_GET_PWR_EST_GPUID(_word) (((_word)&0x70000000)>>28) ++/*!< Obtains the GPU ID from a counter ID word */ ++#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24) ++#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF) ++ ++#define RGX_HWPERF_PWR_EST_HIGH_OFFSET (31) ++#define RGX_HWPERF_PWR_EST_GPUID_OFFSET (28) ++#define RGX_HWPERF_PWR_EST_GPUID_MASK (0x7U) ++#define RGX_HWPERF_PWR_EST_UNIT_OFFSET (24) ++#define RGX_HWPERF_PWR_EST_UNIT_MASK (0xFU) ++#define RGX_HWPERF_PWR_EST_VALUE_MASK (0xFFFFU) ++ ++/*! This macro constructs a counter ID for a power estimate data stream from ++ * the component parts of: high word flag, unit id, GPU id, counter number */ ++#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _core, _number) \ ++ ((IMG_UINT32)(((IMG_UINT32)((IMG_UINT32)(_high)&0x1U)<= RGX_BVNC_STR_SIZE_MAX), ++ "Space inside HWPerf packet data for BVNC string insufficient"); ++ ++#define RGX_HWPERF_MAX_BVNC_BLOCK_LEN (16U) ++ ++/*! BVNC Features */ ++typedef struct ++{ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! Number of counters in this block type */ ++ IMG_UINT16 ui16NumCounters; ++ ++ /*! Number of blocks of this type */ ++ IMG_UINT16 ui16NumBlocks; ++ ++ /*! Reserved for future use */ ++ IMG_UINT16 ui16Reserved; ++} RGX_HWPERF_BVNC_BLOCK; ++ ++/*! BVNC Features */ ++typedef struct ++{ ++ IMG_CHAR aszBvncString[RGX_HWPERF_MAX_BVNC_LEN]; /*!< BVNC string */ ++ IMG_UINT32 ui32BvncKmFeatureFlags; /*!< See RGX_HWPERF_FEATURE_FLAGS */ ++ IMG_UINT16 ui16BvncBlocks; /*!< Number of blocks described in aBvncBlocks */ ++ IMG_UINT16 ui16BvncGPUCores; /*!< Number of GPU cores present */ ++ RGX_HWPERF_BVNC_BLOCK aBvncBlocks[RGX_HWPERF_MAX_BVNC_BLOCK_LEN]; /*!< Supported Performance Blocks for BVNC. See RGX_HWPERF_BVNC_BLOCK */ ++} RGX_HWPERF_BVNC; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_BVNC); ++ ++/*! Performance Counter Configuration data element. */ ++typedef struct ++{ ++ IMG_UINT32 ui32BlockID; /*!< Counter Block ID. See RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT32 ui32NumCounters; /*!< Number of counters configured */ ++ IMG_UINT32 ui32CounterVals[RGX_CNTBLK_COUNTERS_MAX]; /*!< Counters configured (ui32NumCounters worth of entries) */ ++} RGX_HWPERF_COUNTER_CFG_DATA_EL; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG_DATA_EL); ++ ++/*! Performance Counter Configuration data. */ ++typedef struct ++{ ++ IMG_UINT32 ui32EnabledBlocks; /*!< Number of Enabled Blocks. */ ++ RGX_HWPERF_COUNTER_CFG_DATA_EL uData; /*!< Start of variable length data. See RGX_HWPERF_COUNTER_CFG_DATA_EL */ ++ IMG_UINT32 ui32Padding; /*!< reserved */ ++} RGX_HWPERF_COUNTER_CFG; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_COUNTER_CFG); ++ ++/*! Sub-event's data. */ ++typedef union ++{ ++ struct ++ { ++ RGX_HWPERF_DM eDM; /*!< Data Master ID. */ ++ RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */ ++ IMG_UINT32 ui32DMContext; /*!< FW render context */ ++ } sHWR; /*!< HWR sub-event data. */ ++ ++ RGX_HWPERF_BVNC sBVNC; /*!< BVNC Features. See RGX_HWPERF_BVNC */ ++ struct ++ { ++ IMG_UINT32 ui32EvMaskLo; /*!< Low order 32 bits of Filter Mask */ ++ IMG_UINT32 ui32EvMaskHi; /*!< High order 32 bits of Filter Mask */ ++ } sEvMsk; /*!< HW Filter Mask */ ++ RGX_HWPERF_COUNTER_CFG sPCC; /*!< Performance Counter Config. See RGX_HWPERF_COUNTER_CFG */ ++} RGX_HWPERF_FWACT_DETAIL; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DETAIL); ++ ++/*! This structure holds the data of a FW activity event packet */ ++typedef struct ++{ ++ RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */ ++ RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. */ ++} RGX_HWPERF_FWACT_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA); ++ ++ ++typedef enum { ++ RGX_HWPERF_UFO_EV_UPDATE, /*!< Update on the UFO objects. */ ++ RGX_HWPERF_UFO_EV_CHECK_SUCCESS, /*!< Successful check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS, /*!< Successful partial render check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_CHECK_FAIL, /*!< Unsuccessful check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_PRCHECK_FAIL, /*!< Unsuccessful partial render check on UFO objects. */ ++ RGX_HWPERF_UFO_EV_FORCE_UPDATE, /*!< Forced erroring of the UFO objects. */ ++ ++ RGX_HWPERF_UFO_EV_LAST /*!< Reserved. Do not use. */ ++} RGX_HWPERF_UFO_EV; ++ ++/*! Data stream tuple. */ ++typedef union ++{ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32Value; /*!< Value of the UFO object */ ++ } sCheckSuccess; ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32Value; /*!< Value of the UFO object */ ++ IMG_UINT32 ui32Required; /*!< Value of the UFO object required by the fence */ ++ } sCheckFail; ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< UFO's unique address */ ++ IMG_UINT32 ui32OldValue; /*!< Value of UFO object before update */ ++ IMG_UINT32 ui32NewValue; /*!< Value of UFO object after update */ ++ } sUpdate; ++} RGX_HWPERF_UFO_DATA_ELEMENT; ++ ++/*! This structure holds the packet payload data for UFO event. */ ++typedef struct ++{ ++ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event. See RGX_HWPERF_UFO_EV */ ++ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the timer correlation data ++ at the time the packet was generated. ++ Used to approximate Host timestamps for ++ these events. */ ++ IMG_UINT32 ui32PID; /*!< Client process identifier */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX ++ API to track submitted work (for ++ debugging/trace purposes) */ ++ IMG_UINT32 ui32IntJobRef; /*!< Internal reference used to track ++ submitted work (for debugging / trace ++ purposes) */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context. ++ RenderContext for TA and 3D, Common ++ Context for other DMs */ ++ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the ++ stream and stream data offset in the ++ payload */ ++ RGX_HWPERF_DM eDM; /*!< Data Master number, see RGX_HWPERF_DM */ ++ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ ++ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Series of tuples holding UFO objects data */ ++} RGX_HWPERF_UFO_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA); ++ ++ ++/*! ++ * RGX_HWPERF_KICK_TYPE describes the type of kick for events received / sent ++ * between KICK_START / KICK_END inclusively for all event types. ++ */ ++typedef enum ++{ ++ RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */ ++ RGX_HWPERF_KICK_TYPE_TQ2D, /*!< 2D TQ Kick */ ++ RGX_HWPERF_KICK_TYPE_TQ3D, /*!< 3D TQ Kick */ ++ RGX_HWPERF_KICK_TYPE_CDM, /*!< Compute Kick */ ++ RGX_HWPERF_KICK_TYPE_RS, /*!< Ray Store Kick */ ++ RGX_HWPERF_KICK_TYPE_VRDM, /*!< Vertex Ray Data Master Kick */ ++ RGX_HWPERF_KICK_TYPE_TQTDM,/*!< 2D Data Master TQ Kick */ ++ RGX_HWPERF_KICK_TYPE_SYNC, /*!< Sync Kick */ ++ RGX_HWPERF_KICK_TYPE_TA, /*!< TA Kick */ ++ RGX_HWPERF_KICK_TYPE_3D, /*!< 3D Kick */ ++ RGX_HWPERF_KICK_TYPE_LAST, ++ ++ RGX_HWPERF_KICK_TYPE_FORCE_32BIT = 0x7fffffff ++} RGX_HWPERF_KICK_TYPE; ++ ++typedef struct ++{ ++ RGX_HWPERF_KICK_TYPE ui32EnqType; /*!< Workload type sent to FW for ++ scheduling on GPU hardware. ++ See RGX_HWPERF_KICK_TYPE */ ++ IMG_UINT32 ui32PID; /*!< Client process identifier */ ++ IMG_UINT32 ui32ExtJobRef; /*!< Reference used by callers of the RGX API ++ to track submitted work (for debugging / ++ trace purposes) */ ++ IMG_UINT32 ui32IntJobRef; /*!< internal reference used to track submitted ++ work (for debugging / trace purposes) */ ++ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */ ++ IMG_UINT32 ui32Padding; /*!< Unused, reserved */ ++ IMG_UINT64 ui64CheckFence_UID; /*!< ID of fence gating work execution on GPU */ ++ IMG_UINT64 ui64UpdateFence_UID; /*!< ID of fence triggered after work completes on GPU */ ++ IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */ ++ IMG_UINT32 ui32CycleEstimate; /*!< Estimated cycle time for the workload */ ++ PVRSRV_FENCE hCheckFence; /*!< Fence this enqueue task waits for, before starting */ ++ PVRSRV_FENCE hUpdateFence; /*!< Fence this enqueue task signals, on completion */ ++ PVRSRV_TIMELINE hUpdateTimeline; /*!< Timeline on which the above hUpdateFence is created */ ++ ++ /* Align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_ENQ_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef struct ++{ ++ RGX_HWPERF_UFO_EV eEvType; /*!< Subtype of the event */ ++ IMG_UINT32 ui32StreamInfo; /*!< Encoded number of elements in the stream and ++ stream data offset in the payload */ ++#ifdef __CHECKER__ ++ /* Since we're not conforming to the C99 standard by not using a flexible ++ * array member need to add a special case for Smatch static code analyser. */ ++ IMG_UINT32 aui32StreamData[]; ++#else ++ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ /*!< Series of tuples holding UFO objects data */ ++ ++ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ ++#endif ++} RGX_HWPERF_HOST_UFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! ++ * RGX_HWPERF_HOST_RESOURCE_TYPE describes the type of resource which has been ++ * Allocated, Freed or Modified. The values are used to determine which event ++ * data structure to use to decode the data from the event stream ++ */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID, /*!< Invalid */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, /*!< SyncPrim */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE_DEPRECATED, ++ /*!< Timeline resource packets are ++ now emitted in client hwperf buffer */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, /*!< Fence for use on GPU (SYNC_CP backed) */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, /*!< Sync Checkpoint */ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, /*!< Fence created on SW timeline */ ++ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST /*!< End of enumeration */ ++} RGX_HWPERF_HOST_RESOURCE_TYPE; ++ ++typedef union ++{ ++ /*! Data for TYPE_TIMELINE (*Deprecated*). This sub-event is no longer ++ * generated in the HOST stream. Timeline data is now provided in the ++ * CLIENT stream instead. ++ */ ++ struct ++ { ++ IMG_UINT32 uiPid; /*!< Identifier of owning process */ ++ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for timeline resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sTimelineAlloc; ++ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier of the check point ++ backing this fence on the GPU */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sFenceAlloc; ++ ++ /*! Data for TYPE_SYNC_CP */ ++ struct ++ { ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ ++ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSyncCheckPointAlloc; ++ ++ /*! Data for TYPE_FENCE_SW */ ++ struct ++ { ++ IMG_PID uiPID; /*!< Identifier of owning process */ ++ PVRSRV_FENCE hSWFence; /*!< Unique identifier for the SWFence resource */ ++ PVRSRV_TIMELINE hSWTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT64 ui64SyncPtIndex; /*!< Sync-pt index where this SW timeline has reached */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSWFenceAlloc; ++ ++ /*! Data for TYPE_SYNC */ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< Identifier of sync resource */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ } sSyncAlloc; ++} RGX_HWPERF_HOST_ALLOC_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType; ++ /*!< This describes the type of the resource ++ allocated in the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail; ++ /*!< Union of structures providing further ++ data regarding the resource allocated. ++ Size of data varies with union member that ++ is present, check ``ui32AllocType`` value ++ to decode */ ++} RGX_HWPERF_HOST_ALLOC_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef union ++{ ++ /*! Data for TYPE_TIMELINE (*Deprecated*) */ ++ struct ++ { ++ IMG_UINT32 uiPid; /*!< Identifier of owning process */ ++ IMG_UINT64 ui64Timeline_UID1; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sTimelineDestroy; ++ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_UINT64 ui64Fence_UID; /*!< Unique identifier for the fence resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. */ ++ } sFenceDestroy; ++ ++ /*! Data for TYPE_SYNC_CP */ ++ struct ++ { ++ IMG_UINT32 ui32CheckPt_FWAddr; /*!< Unique identifier for the check point resource */ ++ } sSyncCheckPointFree; ++ ++ /*! Data for TYPE_SYNC */ ++ struct ++ { ++ IMG_UINT32 ui32FWAddr; /*!< Unique identifier for the sync resource */ ++ } sSyncFree; ++} RGX_HWPERF_HOST_FREE_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType; ++ /*!< This describes the type of the resource ++ freed or released by the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail; ++ /*!< Union of structures providing further data ++ regarding the resource freed. Size of data ++ varies with union member that is present, ++ check ``ui32FreeType`` value to decode */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_FREE_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef struct ++{ ++ IMG_UINT64 ui64CRTimestamp; /*!< CR timer value from the latest entry of ++ the time domains correlation table */ ++ IMG_UINT64 ui64OSTimestamp; /*!< OS timestamp from the latest entry of the ++ time domains correlation table */ ++ IMG_UINT32 ui32ClockSpeed; /*!< GPU clock speed from the latest entry of ++ the time domains correlation table */ ++ IMG_UINT32 ui32Padding; /*!< Reserved, align structure size to 8 bytes */ ++} RGX_HWPERF_HOST_CLK_SYNC_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef union ++{ ++ /*! Data for TYPE_FENCE_PVR */ ++ struct ++ { ++ IMG_UINT64 ui64NewFence_UID; /*!< Unique identifier for the new merged fence ++ resource that has been created */ ++ IMG_UINT64 ui64InFence1_UID; /*!< Unique identifier for the fence resource */ ++ IMG_UINT64 ui64InFence2_UID; /*!< Unique identifier of the check point backing ++ the fence on the GPU */ ++ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; ++ /*!< Label or name given to the sync resource */ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ } sFenceMerge; ++} RGX_HWPERF_HOST_MODIFY_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType; ++ /*!< Describes the type of the resource ++ modified by the driver. See ++ RGX_HWPERF_HOST_RESOURCE_TYPE */ ++ ++ RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail; ++ /*!< Union of structures providing further ++ data regarding the resource modified. ++ Size of data varies with union member that ++ is present. ++ Check ``uiModifyType`` value to decode */ ++} RGX_HWPERF_HOST_MODIFY_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK, /*!< Device OK */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING, /*!< Device responding to requests */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD, /*!< Device not responding */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT, /*!< Device has faulted */ ++ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_LAST ++} RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS; ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE, /*!< No underlying health reason. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED, /*!< Device has asserted. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING, /*!< Device poll has failed. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS, /*!< Device timeout has fired. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT, /*!< Queue has become corrupt. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED, /*!< Queue has stalled. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING, /*!< Device is idling. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING, /*!< Device restarting. */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS,/*!< Interrupts have been discarded. */ ++ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_LAST ++} RGX_HWPERF_HOST_DEVICE_HEALTH_REASON; ++ ++/*! RGX_HWPERF_DEV_INFO_EV values */ ++typedef enum ++{ ++ RGX_HWPERF_DEV_INFO_EV_HEALTH, /*!< Health sub-event */ ++ ++ RGX_HWPERF_DEV_INFO_EV_LAST /*!< Last enumeration value */ ++} RGX_HWPERF_DEV_INFO_EV; ++ ++/*! RGX_HWPERF_HOST_DEV_INFO_DETAIL is a union of structures providing ++ * further data regarding the device's status ++ */ ++typedef union ++{ ++ /*! Data for device status event */ ++ struct ++ { ++ RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS eDeviceHealthStatus; ++ /*!< Device's health status */ ++ RGX_HWPERF_HOST_DEVICE_HEALTH_REASON eDeviceHealthReason; ++ /*!< Reason for device's health status */ ++ } sDeviceStatus; ++} RGX_HWPERF_HOST_DEV_INFO_DETAIL; ++ ++/*! RGX_HWPERF_HOST_DEV_INFO_DATA contains device health status information */ ++typedef struct ++{ ++ IMG_UINT32 ui32Padding; ++ /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_DEV_INFO_EV eEvType; ++ /*!< Type of the sub-event. See ++ RGX_HWPERF_DEV_INFO_EV */ ++ RGX_HWPERF_HOST_DEV_INFO_DETAIL uDevInfoDetail; ++ /*!< Union of structures providing further data ++ regarding the device's status. Size of data ++ varies with union member that is present, ++ check ``eEvType`` value to decode */ ++} RGX_HWPERF_HOST_DEV_INFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_DEV_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! RGX_HWPERF_INFO_EV event subtype for RGX_HWPERF_HOST_INFO_DATA events */ ++typedef enum ++{ ++ RGX_HWPERF_INFO_EV_MEM_USAGE, /*!< Memory usage event */ ++ RGX_HWPERF_INFO_EV_LAST /*!< End of enumeration */ ++} RGX_HWPERF_INFO_EV; ++ ++/*! RGX_HWPERF_HOST_INFO_DETAIL contains the data payload for the ++ * RGX_HWPERF_HOST_INFO_DATA event. ++ */ ++typedef union ++{ ++ /*! Host Memory usage statistics */ ++ struct ++ { ++ IMG_UINT32 ui32TotalMemoryUsage; /*!< Total memory usage */ ++ /*! Detailed memory usage */ ++ struct ++ { ++ IMG_UINT32 ui32Pid; /*!< Process ID */ ++ IMG_UINT32 ui32KernelMemUsage; /*!< Kernel memory usage */ ++ IMG_UINT32 ui32GraphicsMemUsage; /*!< GPU memory usage */ ++ } sPerProcessUsage[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; ++ } sMemUsageStats; ++} RGX_HWPERF_HOST_INFO_DETAIL; ++ ++/*! RGX_HWPERF_HOST_INFO_DATA. Host Info data event payload contains device ++ * memory usage information. ++ */ ++typedef struct ++{ ++ IMG_UINT32 ui32Padding; /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_INFO_EV eEvType; /*!< Type of subevent. See RGX_HWPERF_INFO_EV */ ++ RGX_HWPERF_HOST_INFO_DETAIL uInfoDetail; ++ /*!< Union of structures providing further data ++ regarding memory usage. Size varies with union ++ member that is present, check ``eEvType`` ++ value to decode */ ++} RGX_HWPERF_HOST_INFO_DATA; ++ ++/* Payload size must be multiple of 8 bytes to align start of next packet. */ ++static_assert((sizeof(RGX_HWPERF_HOST_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! FENCE_WAIT_TYPE definitions */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN = 0, /*!< Begin */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END, /*!< End */ ++ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE; ++ ++/*! FENCE_WAIT_RESULT definitions */ ++typedef enum ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_INVALID = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_TIMEOUT, /*!< Timed Out */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_PASSED, /*!< Passed */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_ERROR, /*!< Errored */ ++ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT; ++ ++/*! FENCE_WAIT_DETAIL Event Payload */ ++typedef union ++{ ++/*! Data for SYNC_FENCE_WAIT_TYPE_BEGIN */ ++ struct ++ { ++ IMG_UINT32 ui32TimeoutInMs; /*!< Wait timeout (ms) */ ++ } sBegin; ++ ++ /*! Data for SYNC_FENCE_WAIT_TYPE_END */ ++ struct ++ { ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT eResult; /*!< Wait result */ ++ } sEnd; ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL; ++ ++/*! RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA Event Payload. This data structure ++ * is received whenever the host driver handles a wait for sync event request. ++ */ ++typedef struct ++{ ++ IMG_PID uiPID; /*!< Identifier of the owning process */ ++ PVRSRV_FENCE hFence; /*!< Unique identifier for the fence resource */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType; ++ /*!< Type of the subevent, see ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DETAIL uDetail; ++ /*!< Union of structures providing further data ++ regarding device's status. Size of data varies with ++ union member that is present, check ``eType`` value ++ to decode */ ++ ++} RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/*! RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA. ++ * Software Timeline Advanced Event Payload. This data structure is received ++ * whenever the host driver processes a Software Timeline Advanced event. ++ */ ++typedef struct ++{ ++ IMG_PID uiPID; /*!< Identifier of the owning process */ ++ PVRSRV_TIMELINE hTimeline; /*!< Unique identifier for the timeline resource */ ++ IMG_UINT64 ui64SyncPtIndex; /*!< Index of the sync point to which the ++ timeline has advanced */ ++ ++} RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_INVALID = 0, /*!< Invalid */ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME, /*!< Process Name */ ++ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE_LAST, /*!< Do not use */ ++} RGX_HWPERF_HOST_CLIENT_INFO_TYPE; ++ ++typedef struct ++{ ++ IMG_PID uiClientPID; /*!< Client process identifier */ ++ IMG_UINT32 ui32Length; /*!< Number of bytes present in ``acName`` */ ++ IMG_CHAR acName[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Process name string, null terminated */ ++} RGX_HWPERF_HOST_CLIENT_PROC_NAME; ++ ++#define RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen) \ ++ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_PROC_NAME, acName) + (ui32NameLen))) ++ ++typedef union ++{ ++ struct ++ { ++ IMG_UINT32 ui32Count; /*!< Number of elements in ``asProcNames`` */ ++ RGX_HWPERF_HOST_CLIENT_PROC_NAME asProcNames[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sProcName; ++} RGX_HWPERF_HOST_CLIENT_INFO_DETAIL; ++ ++typedef struct ++{ ++ IMG_UINT32 uiReserved1; /*!< Reserved. Align structure size to 8 bytes */ ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE eType; ++ /*!< Type of the subevent, see ++ RGX_HWPERF_HOST_CLIENT_INFO_TYPE */ ++ RGX_HWPERF_HOST_CLIENT_INFO_DETAIL uDetail; ++ /*!< Union of structures. Size of data ++ varies with union member that is present, ++ check ``eType`` value to decode */ ++ ++} RGX_HWPERF_HOST_CLIENT_INFO_DATA; ++ ++static_assert((sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1U)) == 0U, ++ "sizeof(RGX_HWPERF_HOST_CLIENT_INFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT"); ++ ++typedef enum ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_NONE, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_DEFAULT_FRAMEBUFFER, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_OFFSCREEN_FB_ATTACHMENTS, ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE_TILE_LIFETIME_DATA, ++ ++ RGX_HWPERF_RESOURCE_TYPE_COUNT ++} RGX_HWPERF_RESOURCE_CAPTURE_TYPE; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32Height; ++ IMG_UINT32 ui32Width; ++ IMG_UINT32 ui32BPP; ++ IMG_UINT32 ui32PixFormat; ++} RGX_RESOURCE_PER_SURFACE_INFO, *PRGX_RESOURCE_PER_SURFACE_INFO; ++ ++typedef struct ++{ ++ IMG_INT32 i32XOffset; /*!< render surface X shift */ ++ IMG_INT32 i32YOffset; /*!< render surface Y shift */ ++ IMG_UINT32 ui32WidthInTiles; /*!< number of TLT data points in X */ ++ IMG_UINT32 ui32HeightInTiles; /*!< number of TLT data points in Y */ ++} RGX_RESOURCE_PER_TLT_BUFFER_INFO, *PRGX_RESOURCE_PER_TLT_BUFFER_INFO; ++ ++typedef union ++{ ++ struct RGX_RESOURCE_CAPTURE_RENDER_SURFACES ++ { ++ IMG_UINT32 ui32RenderSurfaceCount; ++ RGX_RESOURCE_PER_SURFACE_INFO sSurface[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sRenderSurfaces; ++ ++ struct RGX_RESOURCE_CAPTURE_TILE_LIFETIME_BUFFERS ++ { ++ RGX_RESOURCE_PER_TLT_BUFFER_INFO sTLTBufInfo[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; ++ } sTLTBuffers; ++} RGX_RESOURCE_CAPTURE_DETAIL; ++ ++typedef struct ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_TYPE eType; ++ IMG_PID uPID; ++ IMG_UINT32 ui32ContextID; ++ IMG_UINT32 ui32FrameNum; ++ IMG_UINT32 ui32CapturedTaskJobRef; /* The job ref of the HW task that emitted the data */ ++ IMG_INT32 eClientModule; /* RGX_HWPERF_CLIENT_API - ID that the capture is originating from. */ ++ RGX_RESOURCE_CAPTURE_DETAIL uDetail; /* eType determines the value of the union */ ++} RGX_RESOURCE_CAPTURE_INFO, *PRGX_RESOURCE_CAPTURE_INFO; ++ ++#define RGX_RESOURCE_CAPTURE_INFO_BASE_SIZE() offsetof(RGX_RESOURCE_CAPTURE_INFO, uDetail) ++ ++/*! Tile Lifetime Tracking header size. Only available if ++ * RGX_FEATURE_ISP_TILE_LIFETIME_TRACKING is present and enabled via ++ * SUPPORT_TLT_PERF ++ */ ++#define RGX_TLT_HARDWARE_HDR_SIZE (16U) ++ ++/* PVRSRVGetHWPerfResourceCaptureResult */ ++typedef enum ++{ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NONE = 0, ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK, /* We got data ok, expect more packets for this request. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_NOT_READY, /* Signals a timeout on the connection - no data available yet. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_SUCCESS, /* The request completed successfully, signals the end of packets for the request. */ ++ RGX_HWPERF_RESOURCE_CAPTURE_RESULT_COMPLETE_FAILURE /* The request failed, signals the end of packets for the request. */ ++} RGX_HWPERF_RESOURCE_CAPTURE_RESULT_STATUS; ++ ++typedef struct ++{ ++ IMG_PID uPID; /* In case of a failed request pass the caller the PID and context ID. */ ++ IMG_UINT32 ui32CtxID; ++ RGX_RESOURCE_CAPTURE_INFO *psInfo; /* Various meta-data regarding the captured resource which aid the requester when, ++ unpacking the resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ ++ IMG_BYTE *pbData; /* Buffer containing the captured resource data, valid if RGX_HWPERF_RESOURCE_CAPTURE_RESULT_OK is returned. */ ++} RGX_RESOURCE_CAPTURE_RESULT; ++ ++/*! This type is a union of packet payload data structures associated with ++ * various FW and Host events */ ++typedef union ++{ ++ RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data, ++ events ``0x01-0x06`` */ ++ RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data, ++ events ``0x07-0x19``, ``0x28-0x29`` */ ++ RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet ++ data, events ``0x1A`` */ ++ RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state ++ change event packet data, ++ events ``0x1B`` */ ++ RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event ++ packet data, ++ events ``0x20-0x22`` */ ++ RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data, ++ events ``0x23`` */ ++ RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data, ++ events ``0x30-0x31`` */ ++ RGX_HWPERF_DVFS_DATA sDVFS; /*!< DVFS activity data, ++ events ``0x32`` */ ++ RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data, events ``0x38`` */ ++ RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event ++ packet data, ++ events ``0x39`` */ ++ /* */ ++ RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data, ++ events ``0x01`` (Host) */ ++ RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data, ++ events ``0x02`` (Host) */ ++ RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data, ++ events ``0x03`` (Host) */ ++ RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data, ++ events ``0x04`` (Host) */ ++ RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data, ++ events ``0x05`` (Host) */ ++ RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data, ++ events ``0x06`` (Host) */ ++ RGX_HWPERF_HOST_DEV_INFO_DATA sHDEVINFO; /*!< Host device info data, ++ events ``0x07`` (Host) */ ++ RGX_HWPERF_HOST_INFO_DATA sHINFO; /*!< Host info data, ++ events ``0x08`` (Host) */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA sWAIT; /*!< Host fence-wait data, ++ events ``0x09`` (Host) */ ++ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA sSWTLADV; /*!< Host SW-timeline advance ++ data, events ``0x0A`` (Host) */ ++ RGX_HWPERF_HOST_CLIENT_INFO_DATA sHClientInfo; /*!< Host client info, ++ events ``0x0B`` (Host) */ ++ ++} RGX_HWPERF_V2_PACKET_DATA, *RGX_PHWPERF_V2_PACKET_DATA; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_DATA); ++ ++#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR)))) ++ ++#define RGX_HWPERF_GET_DVFS_EVENT_TYPE_PTR(_packet_addr) \ ++ ((RGX_HWPERF_DVFS_EV*) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR) + offsetof(RGX_HWPERF_DVFS_DATA,eEventType)))) ++ ++/****************************************************************************** ++ * API Types ++ *****************************************************************************/ ++ ++/*! Counter block IDs for all the hardware blocks with counters. ++ * Directly addressable blocks must have a value between 0..15 [0..0xF]. ++ * Indirect groups have following encoding: ++ * First hex digit (LSB) represents a unit number within the group ++ * and the second hex digit represents the group number. ++ * Group 0 is the direct group, all others are indirect groups. ++ */ ++typedef IMG_UINT32 RGX_HWPERF_CNTBLK_ID; ++ ++/*! Directly addressable counter blocks */ ++#if defined(DOXYGEN) ++/*! _RGX_HWPERF_CNTBLK_ID */ ++#endif ++#define RGX_CNTBLK_ID_TA 0x0000U ++#define RGX_CNTBLK_ID_RASTER 0x0001U /*!< Non-cluster grouping cores */ ++#define RGX_CNTBLK_ID_HUB 0x0002U /*!< Non-cluster grouping cores */ ++#define RGX_CNTBLK_ID_TORNADO 0x0003U /*!< XT cores */ ++#define RGX_CNTBLK_ID_JONES 0x0004U /*!< S7 cores */ ++#if defined(RGX_FEATURE_HWPERF_OCEANIC) ++#define RGX_CNTBLK_ID_DIRECT_LAST 0x0003U /*!< Indirect blocks start from here */ ++#else ++#define RGX_CNTBLK_ID_DIRECT_LAST 0x0005U /*!< Indirect blocks start from here */ ++#endif /* defined(RGX_FEATURE_HWPERF_OCEANIC) */ ++ ++#define RGX_CNTBLK_ID_BF_DEPRECATED 0x0005U /*!< Doppler unit (DEPRECATED) */ ++#define RGX_CNTBLK_ID_BT_DEPRECATED 0x0006U /*!< Doppler unit (DEPRECATED) */ ++#define RGX_CNTBLK_ID_RT_DEPRECATED 0x0007U /*!< Doppler unit (DEPRECATED) */ ++#define RGX_CNTBLK_ID_SH_DEPRECATED 0x0008U /*!< Ray tracing unit (DEPRECATED) */ ++ ++ ++/*! Indirectly addressable counter blocks. DA blocks indicate counter blocks ++ * where the counter registers are directly accessible ++ */ ++#define RGX_CNTBLK_ID_TPU_MCU0 0x0010U /*!< Addressable by Dust */ ++#define RGX_CNTBLK_ID_TPU_MCU0_DA 0x8010U ++#define RGX_CNTBLK_ID_TPU_MCU1 0x0011U ++#define RGX_CNTBLK_ID_TPU_MCU1_DA 0x8011U ++#define RGX_CNTBLK_ID_TPU_MCU2 0x0012U ++#define RGX_CNTBLK_ID_TPU_MCU2_DA 0x8012U ++#define RGX_CNTBLK_ID_TPU_MCU3 0x0013U ++#define RGX_CNTBLK_ID_TPU_MCU3_DA 0x8013U ++#define RGX_CNTBLK_ID_TPU_MCU4 0x0014U ++#define RGX_CNTBLK_ID_TPU_MCU4_DA 0x8014U ++#define RGX_CNTBLK_ID_TPU_MCU5 0x0015U ++#define RGX_CNTBLK_ID_TPU_MCU5_DA 0x8015U ++#define RGX_CNTBLK_ID_TPU_MCU6 0x0016U ++#define RGX_CNTBLK_ID_TPU_MCU6_DA 0x8016U ++#define RGX_CNTBLK_ID_TPU_MCU7 0x0017U ++#define RGX_CNTBLK_ID_TPU_MCU7_DA 0x8017U ++#define RGX_CNTBLK_ID_TPU_MCU_ALL 0x4010U ++#define RGX_CNTBLK_ID_TPU_MCU_ALL_DA 0xC010U ++ ++#define RGX_CNTBLK_ID_USC0 0x0020U /*!< Addressable by Cluster */ ++#define RGX_CNTBLK_ID_USC0_DA 0x8020U ++#define RGX_CNTBLK_ID_USC1 0x0021U ++#define RGX_CNTBLK_ID_USC1_DA 0x8021U ++#define RGX_CNTBLK_ID_USC2 0x0022U ++#define RGX_CNTBLK_ID_USC2_DA 0x8022U ++#define RGX_CNTBLK_ID_USC3 0x0023U ++#define RGX_CNTBLK_ID_USC3_DA 0x8023U ++#define RGX_CNTBLK_ID_USC4 0x0024U ++#define RGX_CNTBLK_ID_USC4_DA 0x8024U ++#define RGX_CNTBLK_ID_USC5 0x0025U ++#define RGX_CNTBLK_ID_USC5_DA 0x8025U ++#define RGX_CNTBLK_ID_USC6 0x0026U ++#define RGX_CNTBLK_ID_USC6_DA 0x8026U ++#define RGX_CNTBLK_ID_USC7 0x0027U ++#define RGX_CNTBLK_ID_USC7_DA 0x8027U ++#define RGX_CNTBLK_ID_USC8 0x0028U ++#define RGX_CNTBLK_ID_USC8_DA 0x8028U ++#define RGX_CNTBLK_ID_USC9 0x0029U ++#define RGX_CNTBLK_ID_USC9_DA 0x8029U ++#define RGX_CNTBLK_ID_USC10 0x002AU ++#define RGX_CNTBLK_ID_USC10_DA 0x802AU ++#define RGX_CNTBLK_ID_USC11 0x002BU ++#define RGX_CNTBLK_ID_USC11_DA 0x802BU ++#define RGX_CNTBLK_ID_USC12 0x002CU ++#define RGX_CNTBLK_ID_USC12_DA 0x802CU ++#define RGX_CNTBLK_ID_USC13 0x002DU ++#define RGX_CNTBLK_ID_USC13_DA 0x802DU ++#define RGX_CNTBLK_ID_USC14 0x002EU ++#define RGX_CNTBLK_ID_USC14_DA 0x802EU ++#define RGX_CNTBLK_ID_USC15 0x002FU ++#define RGX_CNTBLK_ID_USC15_DA 0x802FU ++#define RGX_CNTBLK_ID_USC_ALL 0x4020U ++#define RGX_CNTBLK_ID_USC_ALL_DA 0xC020U ++ ++#define RGX_CNTBLK_ID_TEXAS0 0x0030U /*!< Addressable by Phantom in XT, Dust in S7 */ ++#define RGX_CNTBLK_ID_TEXAS1 0x0031U ++#define RGX_CNTBLK_ID_TEXAS2 0x0032U ++#define RGX_CNTBLK_ID_TEXAS3 0x0033U ++#define RGX_CNTBLK_ID_TEXAS4 0x0034U ++#define RGX_CNTBLK_ID_TEXAS5 0x0035U ++#define RGX_CNTBLK_ID_TEXAS6 0x0036U ++#define RGX_CNTBLK_ID_TEXAS7 0x0037U ++#define RGX_CNTBLK_ID_TEXAS_ALL 0x4030U ++ ++#define RGX_CNTBLK_ID_RASTER0 0x0040U /*!< Addressable by Phantom, XT only */ ++#define RGX_CNTBLK_ID_RASTER1 0x0041U ++#define RGX_CNTBLK_ID_RASTER2 0x0042U ++#define RGX_CNTBLK_ID_RASTER3 0x0043U ++#define RGX_CNTBLK_ID_RASTER_ALL 0x4040U ++ ++#define RGX_CNTBLK_ID_BLACKPEARL0 0x0050U /*!< Addressable by Phantom, S7, only */ ++#define RGX_CNTBLK_ID_BLACKPEARL1 0x0051U ++#define RGX_CNTBLK_ID_BLACKPEARL2 0x0052U ++#define RGX_CNTBLK_ID_BLACKPEARL3 0x0053U ++#define RGX_CNTBLK_ID_BLACKPEARL_ALL 0x4050U ++ ++#define RGX_CNTBLK_ID_PBE0 0x0060U /*!< Addressable by Cluster in S7 and PBE2_IN_XE */ ++#define RGX_CNTBLK_ID_PBE1 0x0061U ++#define RGX_CNTBLK_ID_PBE2 0x0062U ++#define RGX_CNTBLK_ID_PBE3 0x0063U ++#define RGX_CNTBLK_ID_PBE4 0x0064U ++#define RGX_CNTBLK_ID_PBE5 0x0065U ++#define RGX_CNTBLK_ID_PBE6 0x0066U ++#define RGX_CNTBLK_ID_PBE7 0x0067U ++#define RGX_CNTBLK_ID_PBE8 0x0068U ++#define RGX_CNTBLK_ID_PBE9 0x0069U ++#define RGX_CNTBLK_ID_PBE10 0x006AU ++#define RGX_CNTBLK_ID_PBE11 0x006BU ++#define RGX_CNTBLK_ID_PBE12 0x006CU ++#define RGX_CNTBLK_ID_PBE13 0x006DU ++#define RGX_CNTBLK_ID_PBE14 0x006EU ++#define RGX_CNTBLK_ID_PBE15 0x006FU ++#define RGX_CNTBLK_ID_PBE_ALL 0x4060U ++ ++#define RGX_CNTBLK_ID_LAST 0x0070U /*!< End of PBE block */ ++ ++#define RGX_CNTBLK_ID_BX_TU0_DEPRECATED 0x0070U /*!< Doppler unit, DEPRECATED */ ++#define RGX_CNTBLK_ID_BX_TU1_DEPRECATED 0x0071U ++#define RGX_CNTBLK_ID_BX_TU2_DEPRECATED 0x0072U ++#define RGX_CNTBLK_ID_BX_TU3_DEPRECATED 0x0073U ++#define RGX_CNTBLK_ID_BX_TU_ALL_DEPRECATED 0x4070U ++ ++#define RGX_CNTBLK_ID_CUSTOM0 0x70F0U ++#define RGX_CNTBLK_ID_CUSTOM1 0x70F1U ++#define RGX_CNTBLK_ID_CUSTOM2 0x70F2U ++#define RGX_CNTBLK_ID_CUSTOM3 0x70F3U ++#define RGX_CNTBLK_ID_CUSTOM4_FW 0x70F4U /*!< Custom block used for getting statistics held in the FW */ ++#define RGX_CNTBLK_ID_CUSTOM_MASK 0x70FFU ++ ++ ++/* Masks for the counter block ID*/ ++#define RGX_CNTBLK_ID_UNIT_MASK (0x000FU) ++#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U) ++#define RGX_CNTBLK_ID_GROUP_SHIFT (4U) ++#define RGX_CNTBLK_ID_MC_GPU_MASK (0x0F00U) ++#define RGX_CNTBLK_ID_MC_GPU_SHIFT (8U) ++#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U) ++#define RGX_CNTBLK_ID_DA_MASK (0x8000U) /*!< Block with directly accessible counter registers */ ++ ++#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## _n) - (IMG_UINT32)(RGX_CNTBLK_ID_ ## _class ## 0) + 1u) ++ ++/*! The number of layout blocks defined with configurable multiplexed ++ * performance counters, hence excludes custom counter blocks. ++ */ ++#if defined(RGX_FEATURE_HWPERF_OCEANIC) ++#define RGX_HWPERF_MAX_MUX_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ ++ RGX_CNTBLK_INDIRECT_COUNT(PBE, 0) ) ++ ++#define RGX_HWPERF_MAX_DA_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 0)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 0) ) ++ ++#define RGX_HWPERF_MAX_DEFINED_BLKS (\ ++ (IMG_UINT32)RGX_HWPERF_MAX_MUX_BLKS +\ ++ RGX_HWPERF_MAX_DA_BLKS ) ++#else ++#define RGX_HWPERF_MAX_DEFINED_BLKS (\ ++ (IMG_UINT32)RGX_CNTBLK_ID_DIRECT_LAST +\ ++ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(USC, 15)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3)+\ ++ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) ) ++#define RGX_HWPERF_MAX_MUX_BLKS (\ ++ RGX_HWPERF_MAX_DEFINED_BLKS ) ++#endif ++ ++static_assert( ++ ((RGX_CNTBLK_ID_DIRECT_LAST + ((RGX_CNTBLK_ID_LAST & RGX_CNTBLK_ID_GROUP_MASK) >> RGX_CNTBLK_ID_GROUP_SHIFT)) <= RGX_HWPERF_MAX_BVNC_BLOCK_LEN), ++ "RGX_HWPERF_MAX_BVNC_BLOCK_LEN insufficient"); ++ ++#define RGX_HWPERF_EVENT_MASK_VALUE(e) (IMG_UINT64_C(1) << (IMG_UINT32)(e)) ++ ++#define RGX_CUSTOM_FW_CNTRS \ ++ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ ++ \ ++ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \ ++ \ ++ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ ++ \ ++ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \ ++ \ ++ X(ISP_TILES_IN_FLIGHT, 0x4, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DSPMKICK)) ++ ++/*! Counter IDs for the firmware held statistics */ ++typedef enum ++{ ++#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id, ++ RGX_CUSTOM_FW_CNTRS ++#undef X ++ ++ /* always the last entry in the list */ ++ RGX_CUSTOM_FW_CNTR_LAST ++} RGX_HWPERF_CUSTOM_FW_CNTR_ID; ++ ++/*! Identifier for each counter in a performance counting module */ ++typedef IMG_UINT32 RGX_HWPERF_CNTBLK_COUNTER_ID; ++ ++#define RGX_CNTBLK_COUNTER0_ID 0U ++#define RGX_CNTBLK_COUNTER1_ID 1U ++#define RGX_CNTBLK_COUNTER2_ID 2U ++#define RGX_CNTBLK_COUNTER3_ID 3U ++#define RGX_CNTBLK_COUNTER4_ID 4U ++#define RGX_CNTBLK_COUNTER5_ID 5U ++ /* MAX value used in server handling of counter config arrays */ ++#define RGX_CNTBLK_MUX_COUNTERS_MAX 6U ++ ++ ++/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */ ++#define MASK_RANGE_IMPL(b1, b2) ((IMG_UINT64)((IMG_UINT64_C(1) << ((IMG_UINT32)(b2)-(IMG_UINT32)(b1) + 1U)) - 1U) << (IMG_UINT32)(b1)) ++#define MASK_RANGE(R) MASK_RANGE_IMPL(R##_FIRST_TYPE, R##_LAST_TYPE) ++#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) (IMG_UINT32_C(1) << (e)) ++ ++/*! Mask macros for use with RGXCtrlHWPerf() API. ++ */ ++#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000)) ++#define RGX_HWPERF_EVENT_MASK_DEFAULT RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_FWACT) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG) | \ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) ++#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF)) ++ ++/*! HWPerf Firmware event masks ++ * @par ++ * All FW Start/End/Debug (SED) events. */ ++#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE)) ++ ++#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) ++#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED)) ++/*! All FW events. */ ++#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\ ++ RGX_HWPERF_EVENT_MASK_FW_UFO |\ ++ RGX_HWPERF_EVENT_MASK_FW_CSW) ++ ++/*! HW Periodic events (1ms interval). */ ++#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC)) ++/*! All HW Kick/Finish events. */ ++#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\ ++ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\ ++ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC)) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\ ++ RGX_HWPERF_EVENT_MASK_HW_PERIODIC) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE)) ++ ++#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\ ++ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG)) ++ ++/*! HWPerf Host event masks ++ */ ++#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ)) ++#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO)) ++#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC)) ++ ++ ++/*! Type used in the RGX API RGXConfigMuxHWPerfCounters() */ ++typedef struct ++{ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! 4 or 6 LSBs used to select counters to configure in this block. */ ++ IMG_UINT8 ui8CounterSelect; ++ ++ /*! 4 or 6 LSBs used as MODE bits for the counters in the group. */ ++ IMG_UINT8 ui8Mode; ++ ++ /*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */ ++ IMG_UINT8 aui8GroupSelect[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++ ++ /*! 16 LSBs used as the BIT_SELECT value for the counter. */ ++ IMG_UINT16 aui16BitSelect[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++ ++ /*! 14 LSBs used as the BATCH_MAX value for the counter. */ ++ IMG_UINT32 aui32BatchMax[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++ ++ /*! 14 LSBs used as the BATCH_MIN value for the counter. */ ++ IMG_UINT32 aui32BatchMin[RGX_CNTBLK_MUX_COUNTERS_MAX]; ++} UNCACHED_ALIGN RGX_HWPERF_CONFIG_MUX_CNTBLK; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_MUX_CNTBLK); ++ ++/*! Type used in the RGX API RGXConfigHWPerfCounters() */ ++typedef struct ++{ ++ /*! Reserved for future use */ ++ IMG_UINT32 ui32Reserved; ++ ++ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */ ++ IMG_UINT16 ui16BlockID; ++ ++ /*! Number of configured counters within this block */ ++ IMG_UINT16 ui16NumCounters; ++ ++ /*! Counter register values */ ++ IMG_UINT16 ui16Counters[RGX_CNTBLK_COUNTERS_MAX]; ++} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK; ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK); ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_HWPERF_H_ */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf_common.h b/drivers/gpu/drm/img-rogue/rgx_hwperf_common.h +new file mode 100644 +index 000000000000..0635a51578a8 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_hwperf_common.h +@@ -0,0 +1,482 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HWPerf and Debug Types and Defines Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Common data types definitions for hardware performance API ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGX_HWPERF_COMMON_H_ ++#define RGX_HWPERF_COMMON_H_ ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++/* These structures are used on both GPU and CPU and must be a size that is a ++ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities at ++ * 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this. ++ */ ++ ++/****************************************************************************** ++ * Includes and Defines ++ *****************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++ ++#include "rgx_common_asserts.h" ++#include "pvrsrv_tlcommon.h" ++ ++ ++/****************************************************************************** ++ * Packet Event Type Enumerations ++ *****************************************************************************/ ++ ++/*! Type used to encode the event that generated the packet. ++ * NOTE: When this type is updated the corresponding hwperfbin2json tool ++ * source needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will ++ * also need updating when adding new types. ++ * ++ * @par ++ * The event type values are incrementing integers for use as a shift ordinal ++ * in the event filtering process at the point events are generated. ++ * This scheme thus implies a limit of 63 event types. ++ */ ++ ++typedef IMG_UINT32 RGX_HWPERF_EVENT_TYPE; ++ ++#define RGX_HWPERF_INVALID 0x00U /*!< Invalid. Reserved value. */ ++ ++/*! FW types 0x01..0x06 */ ++#define RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE 0x01U ++ ++#define RGX_HWPERF_FW_BGSTART 0x01U /*!< Background task processing start */ ++#define RGX_HWPERF_FW_BGEND 0x02U /*!< Background task end */ ++#define RGX_HWPERF_FW_IRQSTART 0x03U /*!< IRQ task processing start */ ++ ++#define RGX_HWPERF_FW_IRQEND 0x04U /*!< IRQ task end */ ++#define RGX_HWPERF_FW_DBGSTART 0x05U /*!< Debug event start */ ++#define RGX_HWPERF_FW_DBGEND 0x06U /*!< Debug event end */ ++ ++#define RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE 0x06U ++ ++/*! HW types 0x07..0x19 */ ++#define RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE 0x07U ++ ++#define RGX_HWPERF_HW_PMOOM_TAPAUSE 0x07U /*!< TA Pause at PM Out of Memory */ ++ ++#define RGX_HWPERF_HW_TAKICK 0x08U /*!< TA task started */ ++#define RGX_HWPERF_HW_TAFINISHED 0x09U /*!< TA task finished */ ++#define RGX_HWPERF_HW_3DTQKICK 0x0AU /*!< 3D TQ started */ ++#define RGX_HWPERF_HW_3DKICK 0x0BU /*!< 3D task started */ ++#define RGX_HWPERF_HW_3DFINISHED 0x0CU /*!< 3D task finished */ ++#define RGX_HWPERF_HW_CDMKICK 0x0DU /*!< CDM task started */ ++#define RGX_HWPERF_HW_CDMFINISHED 0x0EU /*!< CDM task finished */ ++#define RGX_HWPERF_HW_TLAKICK 0x0FU /*!< TLA task started */ ++#define RGX_HWPERF_HW_TLAFINISHED 0x10U /*!< TLS task finished */ ++#define RGX_HWPERF_HW_3DSPMKICK 0x11U /*!< 3D SPM task started */ ++#define RGX_HWPERF_HW_PERIODIC 0x12U /*!< Periodic event with updated HW counters */ ++#define RGX_HWPERF_HW_RTUKICK 0x13U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_RTUFINISHED 0x14U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_SHGKICK 0x15U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_SHGFINISHED 0x16U /*!< Reserved, future use */ ++#define RGX_HWPERF_HW_3DTQFINISHED 0x17U /*!< 3D TQ finished */ ++#define RGX_HWPERF_HW_3DSPMFINISHED 0x18U /*!< 3D SPM task finished */ ++ ++#define RGX_HWPERF_HW_PMOOM_TARESUME 0x19U /*!< TA Resume after PM Out of Memory */ ++ ++/*! HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */ ++#define RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE 0x19U ++ ++/*! other types 0x1A..0x1F */ ++#define RGX_HWPERF_CLKS_CHG 0x1AU /*!< Clock speed change in GPU */ ++#define RGX_HWPERF_GPU_STATE_CHG 0x1BU /*!< GPU work state change */ ++ ++/*! power types 0x20..0x27 */ ++#define RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE 0x20U ++#define RGX_HWPERF_PWR_EST_REQUEST 0x20U /*!< Power estimate requested (via GPIO) */ ++#define RGX_HWPERF_PWR_EST_READY 0x21U /*!< Power estimate inputs ready */ ++#define RGX_HWPERF_PWR_EST_RESULT 0x22U /*!< Power estimate result calculated */ ++#define RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE 0x22U ++ ++#define RGX_HWPERF_PWR_CHG 0x23U /*!< Power state change */ ++ ++/*! HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */ ++#define RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE 0x28U ++ ++#define RGX_HWPERF_HW_TDMKICK 0x28U /*!< TDM task started */ ++#define RGX_HWPERF_HW_TDMFINISHED 0x29U /*!< TDM task finished */ ++#define RGX_HWPERF_HW_NULLKICK 0x2AU /*!< NULL event */ ++ ++#define RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE 0x2AU ++ ++/*! context switch types 0x30..0x31 */ ++#define RGX_HWPERF_CSW_START 0x30U /*!< HW context store started */ ++#define RGX_HWPERF_CSW_FINISHED 0x31U /*!< HW context store finished */ ++ ++/*! DVFS events */ ++#define RGX_HWPERF_DVFS 0x32U /*!< Dynamic voltage/frequency scaling events */ ++ ++/*! firmware misc 0x38..0x39 */ ++#define RGX_HWPERF_UFO 0x38U /*!< FW UFO Check / Update */ ++#define RGX_HWPERF_FWACT 0x39U /*!< FW Activity notification */ ++ ++/*! last */ ++#define RGX_HWPERF_LAST_TYPE 0x3BU ++ ++/*! This enumeration must have a value that is a power of two as it is ++ * used in masks and a filter bit field (currently 64 bits long). ++ */ ++#define RGX_HWPERF_MAX_TYPE 0x40U ++ ++static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types"); ++ ++/*! Macro used to check if an event type ID is present in the known set of hardware type events */ ++#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \ ++ ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE)) ++ ++/*! Macro used to check if an event type ID is present in the known set of firmware type events */ ++#define HWPERF_PACKET_IS_FW_TYPE(_etype) \ ++ ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \ ++ (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE) ++ ++ ++typedef enum { ++ RGX_HWPERF_HOST_INVALID = 0x00, /*!< Invalid, do not use. */ ++ RGX_HWPERF_HOST_ENQ = 0x01, /*!< ``0x01`` Kernel driver has queued GPU work. ++ See RGX_HWPERF_HOST_ENQ_DATA */ ++ RGX_HWPERF_HOST_UFO = 0x02, /*!< ``0x02`` UFO updated by the driver. ++ See RGX_HWPERF_HOST_UFO_DATA */ ++ RGX_HWPERF_HOST_ALLOC = 0x03, /*!< ``0x03`` Resource allocated. ++ See RGX_HWPERF_HOST_ALLOC_DATA */ ++ RGX_HWPERF_HOST_CLK_SYNC = 0x04, /*!< ``0x04`` GPU / Host clocks correlation data. ++ See RGX_HWPERF_HOST_CLK_SYNC_DATA */ ++ RGX_HWPERF_HOST_FREE = 0x05, /*!< ``0x05`` Resource freed, ++ See RGX_HWPERF_HOST_FREE_DATA */ ++ RGX_HWPERF_HOST_MODIFY = 0x06, /*!< ``0x06`` Resource modified / updated. ++ See RGX_HWPERF_HOST_MODIFY_DATA */ ++ RGX_HWPERF_HOST_DEV_INFO = 0x07, /*!< ``0x07`` Device Health status. ++ See RGX_HWPERF_HOST_DEV_INFO_DATA */ ++ RGX_HWPERF_HOST_INFO = 0x08, /*!< ``0x08`` Device memory usage information. ++ See RGX_HWPERF_HOST_INFO_DATA */ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT = 0x09, /*!< ``0x09`` Wait for sync event. ++ See RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA */ ++ RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE = 0x0A, /*!< ``0x0A`` Software timeline advanced. ++ See RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA */ ++ RGX_HWPERF_HOST_CLIENT_INFO = 0x0B, /*!< ``0x0B`` Additional client info. ++ See RGX_HWPERF_HOST_CLIENT_INFO_DATA */ ++ ++ /*! last */ ++ RGX_HWPERF_HOST_LAST_TYPE, ++ ++ /*! This enumeration must have a value that is a power of two as it is ++ * used in masks and a filter bit field (currently 32 bits long). ++ */ ++ RGX_HWPERF_HOST_MAX_TYPE = 0x20 ++} RGX_HWPERF_HOST_EVENT_TYPE; ++ ++/*!< The event type values are incrementing integers for use as a shift ordinal ++ * in the event filtering process at the point events are generated. ++ * This scheme thus implies a limit of 31 event types. ++ */ ++static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types"); ++ ++ ++/****************************************************************************** ++ * Packet Header Format Version 2 Types ++ *****************************************************************************/ ++ ++/*! Major version number of the protocol in operation ++ */ ++#define RGX_HWPERF_V2_FORMAT 2 ++ ++/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet ++ */ ++#define HWPERF_PACKET_V2_SIG 0x48575032 ++ ++/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet ++ */ ++#define HWPERF_PACKET_V2A_SIG 0x48575041 ++ ++/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet ++ */ ++#define HWPERF_PACKET_V2B_SIG 0x48575042 ++ ++/*! Signature ASCII pattern 'HWPC' found in the first word of a HWPerfV2c packet ++ */ ++#define HWPERF_PACKET_V2C_SIG 0x48575043 ++ ++#define HWPERF_PACKET_ISVALID(_val) (((_val) == HWPERF_PACKET_V2_SIG) || ((_val) == HWPERF_PACKET_V2A_SIG) || ((_val) == HWPERF_PACKET_V2B_SIG) || ((_val) == HWPERF_PACKET_V2C_SIG)) ++/*!< Checks that the packet signature is one of the supported versions */ ++ ++/*! Type defines the HWPerf packet header common to all events. */ ++typedef struct ++{ ++ IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */ ++ IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */ ++ IMG_UINT32 eTypeId; /*!< Event type information field */ ++ IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */ ++ IMG_UINT64 ui64Timestamp; /*!< Event timestamp */ ++} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR; ++ ++RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp); ++ ++RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR); ++ ++ ++/*! Mask for use with the IMG_UINT32 ui32Size header field */ ++#define RGX_HWPERF_SIZE_MASK 0xFFFFU ++ ++/*! This macro defines an upper limit to which the size of the largest variable ++ * length HWPerf packet must fall within, currently 3KB. This constant may be ++ * used to allocate a buffer to hold one packet. ++ * This upper limit is policed by packet producing code. ++ */ ++#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U ++ ++/*! Defines an upper limit to the size of a variable length packet payload. ++ */ ++#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\ ++ sizeof(RGX_HWPERF_V2_PACKET_HDR))) ++ ++/*! Macro which takes a structure name and provides the packet size for ++ * a fixed size payload packet, rounded up to 8 bytes to align packets ++ * for 64 bit architectures. */ ++#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT)))) ++ ++/*! Macro which takes the number of bytes written in the data payload of a ++ * packet for a variable size payload packet, rounded up to 8 bytes to ++ * align packets for 64 bit architectures. */ ++#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&((IMG_UINT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN((_size), PVRSRVTL_PACKET_ALIGNMENT)))) ++ ++/*! Macro to obtain the size of the packet */ ++#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK)) ++ ++/*! Macro to obtain the size of the packet data */ ++#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR)) ++ ++/*! Masks for use with the IMG_UINT32 eTypeId header field */ ++#define RGX_HWPERF_TYPEID_MASK 0x0007FFFFU ++#define RGX_HWPERF_TYPEID_EVENT_MASK 0x00007FFFU ++#define RGX_HWPERF_TYPEID_THREAD_MASK 0x00008000U ++#define RGX_HWPERF_TYPEID_STREAM_MASK 0x00070000U ++#define RGX_HWPERF_TYPEID_META_DMA_MASK 0x00080000U ++#define RGX_HWPERF_TYPEID_M_CORE_MASK 0x00100000U ++#define RGX_HWPERF_TYPEID_OSID_MASK 0x07000000U ++ ++/*! Meta thread macros for encoding the ID into the type field of a packet */ ++#define RGX_HWPERF_META_THREAD_SHIFT 15U ++#define RGX_HWPERF_META_THREAD_ID0 0x0U /*!< Meta Thread 0 ID */ ++#define RGX_HWPERF_META_THREAD_ID1 0x1U /*!< Meta Thread 1 ID */ ++/*! Obsolete, kept for source compatibility */ ++#define RGX_HWPERF_META_THREAD_MASK 0x1U ++/*! Stream ID macros for encoding the ID into the type field of a packet */ ++#define RGX_HWPERF_STREAM_SHIFT 16U ++/*! Meta DMA macro for encoding how the packet was generated into the type field of a packet */ ++#define RGX_HWPERF_META_DMA_SHIFT 19U ++/*! Bit-shift macro used for encoding multi-core data into the type field of a packet */ ++#define RGX_HWPERF_M_CORE_SHIFT 20U ++/*! OSID bit-shift macro used for encoding OSID into type field of a packet */ ++#define RGX_HWPERF_OSID_SHIFT 24U ++typedef enum { ++ RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */ ++ RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */ ++ RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */ ++ RGX_HWPERF_STREAM_ID_LAST, ++} RGX_HWPERF_STREAM_ID; ++ ++/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */ ++static_assert(((IMG_UINT32)RGX_HWPERF_STREAM_ID_LAST - 1U) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT), ++ "Too many HWPerf stream IDs."); ++ ++/*! Compile-time value used to seed the Multi-Core (MC) bit in the typeID field. ++ * Only set by RGX_FIRMWARE builds. ++ */ ++#if defined(RGX_FIRMWARE) ++# if defined(RGX_FEATURE_GPU_MULTICORE_SUPPORT) ++#define RGX_HWPERF_M_CORE_VALUE 1U /*!< 1 => Multi-core supported */ ++# else ++#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ ++# endif ++#else ++#define RGX_HWPERF_M_CORE_VALUE 0U /*!< 0 => Multi-core not supported */ ++#endif ++ ++/*! Macros used to set the packet type and encode meta thread ID (0|1), ++ * HWPerf stream ID, multi-core capability and OSID within the typeID */ ++#define RGX_HWPERF_MAKE_TYPEID(_stream, _type, _thread, _metadma, _osid)\ ++ ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((IMG_UINT32)(_stream) << RGX_HWPERF_STREAM_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_THREAD_MASK & ((IMG_UINT32)(_thread) << RGX_HWPERF_META_THREAD_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_EVENT_MASK & (IMG_UINT32)(_type)) | \ ++ (RGX_HWPERF_TYPEID_META_DMA_MASK & ((IMG_UINT32)(_metadma) << RGX_HWPERF_META_DMA_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_OSID_MASK & ((IMG_UINT32)(_osid) << RGX_HWPERF_OSID_SHIFT)) | \ ++ (RGX_HWPERF_TYPEID_M_CORE_MASK & ((IMG_UINT32)(RGX_HWPERF_M_CORE_VALUE) << RGX_HWPERF_M_CORE_SHIFT)))) ++ ++/*! Obtains the event type that generated the packet */ ++#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK) ++ ++/*! Obtains the META Thread number that generated the packet */ ++#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT)) ++ ++/*! Determines if the packet generated contains multi-core data */ ++#define RGX_HWPERF_GET_M_CORE(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_M_CORE_MASK) >> RGX_HWPERF_M_CORE_SHIFT) ++ ++/*! Obtains the guest OSID which resulted in packet generation */ ++#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT) ++ ++/*! Obtain stream id */ ++#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT)) ++ ++/*! Obtain information about how the packet was generated, which might affect payload total size */ ++#define RGX_HWPERF_GET_META_DMA_INFO(_packet_addr) (((((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_META_DMA_MASK) >> RGX_HWPERF_META_DMA_SHIFT)) ++ ++/*! Obtains a typed pointer to a packet given a buffer address */ ++#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR *)(void *) (_buffer_addr)) ++/*! Obtains a typed pointer to a data structure given a packet address */ ++#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) (IMG_OFFSET_ADDR((_packet_addr), sizeof(RGX_HWPERF_V2_PACKET_HDR))) ++/*! Obtains a typed pointer to the next packet given a packet address */ ++#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), RGX_HWPERF_SIZE_MASK&((_packet_addr)->ui32Size)))) ++ ++/*! Obtains a typed pointer to a packet header given the packet data address */ ++#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR *) (IMG_OFFSET_ADDR((_packet_addr), -(IMG_INT32)sizeof(RGX_HWPERF_V2_PACKET_HDR)))) ++ ++ ++/****************************************************************************** ++ * Other Common Defines ++ *****************************************************************************/ ++ ++/*! This macro is not a real array size, but indicates the array has a variable ++ * length only known at run-time but always contains at least 1 element. The ++ * final size of the array is deduced from the size field of a packet header. ++ */ ++#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U ++ ++/*! This macro is not a real array size, but indicates the array is optional ++ * and if present has a variable length only known at run-time. The final ++ * size of the array is deduced from the size field of a packet header. */ ++#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U ++ ++ ++/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */ ++#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U ++#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU ++ ++/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */ ++#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U ++#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U ++ ++/*! Macro used to set the block info word as a combination of two 16-bit integers */ ++#define RGX_HWPERF_MAKE_BLKINFO(_numblks, _blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)))) ++ ++/*! Macro used to obtain the number of counter blocks present in the packet */ ++#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT) ++ ++/*! Obtains the offset of the counter block stream in the packet */ ++#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) (((_blkinfo) & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT) ++ ++/*! This macro gets the number of blocks depending on the packet version */ ++#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \ ++ do { \ ++ if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \ ++ { \ ++ (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\ ++ } \ ++ else \ ++ { \ ++ IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\ ++ (_numblocks) = *(IMG_UINT16 *)(IMG_OFFSET_ADDR(&(_packet_data)->ui32WorkTarget, ui32VersionOffset)); \ ++ } \ ++ } while (0) ++ ++/*! This macro gets the counter stream pointer depending on the packet version */ ++#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \ ++{ \ ++ if (HWPERF_PACKET_V2B_SIG == (_sig) || HWPERF_PACKET_V2C_SIG == (_sig)) \ ++ { \ ++ (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR((_hw_packet_data), RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo))); \ ++ } \ ++ else \ ++ { \ ++ IMG_UINT32 ui32BlkStreamOffsetInWords = (((_sig) == HWPERF_PACKET_V2_SIG) ? 6 : 8); \ ++ (_cntstream_ptr) = (IMG_UINT32 *)(IMG_OFFSET_ADDR_DW((_hw_packet_data), ui32BlkStreamOffsetInWords)); \ ++ } \ ++} ++ ++/*! Masks for use with the IMG_UINT32 ui32KickInfo field */ ++#define RGX_HWPERF_KICKINFO_KICKID_MASK 0x000000FFU ++ ++/*! Shift for the Kick ID field in ui32KickInfo */ ++#define RGX_HWPERF_KICKINFO_KICKID_SHIFT 0U ++ ++/*! Macro used to set the kick info field. */ ++#define RGX_HWPERF_MAKE_KICKINFO(_kickid) ((IMG_UINT32) (RGX_HWPERF_KICKINFO_KICKID_MASK&((_kickid) << RGX_HWPERF_KICKINFO_KICKID_SHIFT))) ++ ++/*! Macro used to obtain the Kick ID if present in the packet */ ++#define RGX_HWPERF_GET_KICKID(_kickinfo) (((_kickinfo) & RGX_HWPERF_KICKINFO_KICKID_MASK) >> RGX_HWPERF_KICKINFO_KICKID_SHIFT) ++ ++/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */ ++#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U ++#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU ++ ++/*! Shift for the UFO count and data stream fields */ ++#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U ++#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U ++ ++/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */ ++#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize, _soff) \ ++ ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) | \ ++ (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)))) ++ ++/*! Macro used to obtain UFO count*/ ++#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo) \ ++ (((_streaminfo) & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT) ++ ++/*! Obtains the offset of the UFO stream in the packet */ ++#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo) \ ++ (((_streaminfo) & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT) ++ ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGX_HWPERF_COMMON_H_ */ ++ ++/****************************************************************************** ++ End of file ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf_table.c b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.c +new file mode 100644 +index 000000000000..268ba65207ae +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.c +@@ -0,0 +1,635 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HW Performance counter table ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX HW Performance counters table ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "rgx_fwif_hwperf.h" ++#if defined(__KERNEL__) ++#include "rgxdefs_km.h" ++#else ++#include "rgxdefs.h" ++#endif ++#include "rgx_hwperf_table.h" ++ ++/* Includes needed for PVRSRVKM (Server) context */ ++# include "rgx_bvnc_defs_km.h" ++# if defined(__KERNEL__) ++# include "rgxdevice.h" ++# endif ++ ++/* Shared compile-time context ASSERT macro */ ++#if defined(RGX_FIRMWARE) ++# include "rgxfw_utils.h" ++/* firmware context */ ++# define DBG_ASSERT(_c) RGXFW_ASSERT((_c)) ++#else ++# include "pvr_debug.h" ++/* host client/server context */ ++# define DBG_ASSERT(_c) PVR_ASSERT((_c)) ++#endif ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() ++ ++ Referenced in gasCntBlkTypeModel[] table below and only called from ++ RGX_FIRMWARE run-time context. Therefore compile time configuration is used. ++ *****************************************************************************/ ++ ++#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS) ++# include "rgxfw_pow.h" ++# include "rgxfw_utils.h" ++ ++static bool rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) ++{ ++ PVR_UNREFERENCED_PARAMETER(eBlkType); ++ PVR_UNREFERENCED_PARAMETER(ui8UnitId); ++ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ /* S7XT: JONES */ ++ return (eBlkType == RGX_CNTBLK_ID_JONES); ++#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++ /* S6XT: TA, TORNADO */ ++ return true; ++#else ++ /* S6 : TA, HUB, RASTER (RASCAL) */ ++ return (gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U; ++#endif ++} ++ ++/* Only use conditional compilation when counter blocks appear in different ++ * islands for different Rogue families. ++ */ ++static bool rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId) ++{ ++ IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_units(); ++ ++ if (((gsPowCtl.eUnitsPowState & RGXFW_POW_ST_RD_ON) != 0U) && ++ (ui32NumDustsEnabled > 0U)) ++ { ++#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER) ++ IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2U; ++ ++ switch (eBlkType) ++ { ++ case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ case RGX_CNTBLK_ID_TEXAS0: /* S7 */ ++#endif ++ if (ui8UnitId >= ui32NumDustsEnabled) ++ { ++ return false; ++ } ++ break; ++ case RGX_CNTBLK_ID_USC0: /* S6, S6XT, S7 */ ++ case RGX_CNTBLK_ID_PBE0: /* S7, PBE2_IN_XE */ ++ /* Handle single cluster cores */ ++ if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled)) ++ { ++ return false; ++ } ++ break; ++ case RGX_CNTBLK_ID_BLACKPEARL0: /* S7 */ ++ case RGX_CNTBLK_ID_RASTER0: /* S6XT */ ++#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++ case RGX_CNTBLK_ID_TEXAS0: /* S6XT */ ++#endif ++ if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled))) ++ { ++ return false; ++ } ++ break; ++ default: ++ RGXFW_ASSERT(false); /* should never get here, table error */ ++ break; ++ } ++#else ++ /* Always true, no fused DUSTs, all powered so do not check unit */ ++ PVR_UNREFERENCED_PARAMETER(eBlkType); ++ PVR_UNREFERENCED_PARAMETER(ui8UnitId); ++#endif ++ } ++ else ++ { ++ return false; ++ } ++ return true; ++} ++ ++#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ ++ ++# define rgxfw_hwperf_pow_st_direct ((void*)NULL) ++# define rgxfw_hwperf_pow_st_indirect ((void*)NULL) ++ ++#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end ++ *****************************************************************************/ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start ++ ++ Referenced in gasCntBlkTypeModel[] table below and called from all build ++ contexts: ++ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server). ++ ++ Therefore each function has two implementations, one for compile time and one ++ run time configuration depending on the context. The functions will inform the ++ caller whether this block is valid for this particular RGX device. Other ++ run-time dependent data is returned in psRtInfo for the caller to use. ++ *****************************************************************************/ ++ ++/* Used for block types: USC */ ++static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ DBG_ASSERT(psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_USC0); ++ ++#if defined(__KERNEL__) /* Server context */ ++ PVR_ASSERT(pvDev_km != NULL); ++ PVR_ASSERT(pvRtInfo != NULL); ++ { ++ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; ++ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) ++ { ++ psRtInfo->ui32NumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0; ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ } ++#else /* FW context */ ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++# if defined(RGX_FEATURE_PERFBUS) ++ return IMG_TRUE; ++# endif ++#endif ++ return IMG_FALSE; ++} ++ ++/* Used for block types: Direct RASTERISATION, HUB */ ++static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_HUB)); ++ ++#if defined(__KERNEL__) /* Server context */ ++ PVR_ASSERT(pvDev_km != NULL); ++ PVR_ASSERT(pvRtInfo != NULL); ++ { ++ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; ++ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; ++ if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))) ++ { ++ psRtInfo->ui32NumUnits = 1; ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ } ++#else /* FW context */ ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) ++ return IMG_TRUE; ++# endif ++#endif ++ return IMG_FALSE; ++} ++ ++#if defined(__KERNEL__) /* Server context */ ++static IMG_UINT32 rgx_units_indirect_by_phantom(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) ++{ ++ /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */ ++ return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1 ++ : (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4; ++} ++ ++static IMG_UINT32 rgx_units_phantom_indirect_by_dust(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) ++{ ++ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */ ++ return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1); ++} ++ ++static IMG_UINT32 rgx_units_phantom_indirect_by_cluster(const PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg) ++{ ++ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */ ++ return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]; ++} ++#endif /* defined(__KERNEL__) */ ++ ++/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */ ++static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0)); ++ ++#if defined(__KERNEL__) /* Server context */ ++ PVR_ASSERT(pvDev_km != NULL); ++ PVR_ASSERT(pvRtInfo != NULL); ++ { ++ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; ++ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) ++ { ++ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ++ { ++ psRtInfo->ui32NumUnits = 1; ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = RGX_CR_TEXAS_PERF_INDIRECT; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_RASTER0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ } ++ } ++#else /* FW context */ ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) ++ return IMG_TRUE; ++# endif ++#endif ++ return IMG_FALSE; ++} ++ ++/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */ ++static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); ++ ++#if defined(__KERNEL__) /* Server context */ ++ PVR_ASSERT(pvDev_km != NULL); ++ PVR_ASSERT(pvRtInfo != NULL); ++ { ++ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; ++ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = RGX_CR_TPU_PERF_INDIRECT; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = RGX_CR_TEXAS3_PERF_INDIRECT; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_JONES) ++ { ++ psRtInfo->ui32NumUnits = 1; ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ } ++ } ++#else /* FW context */ ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ return IMG_TRUE; ++# else ++# endif ++#endif ++ return IMG_FALSE; ++} ++ ++/* Used for block types: TA, TPU_MCU. Also PBE when PBE2_IN_XE is present */ ++static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++ DBG_ASSERT(psBlkTypeDesc != NULL); ++ DBG_ASSERT((psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) || ++ (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0)); ++ ++#if defined(__KERNEL__) /* Server context */ ++ PVR_ASSERT(pvDev_km != NULL); ++ PVR_ASSERT(pvRtInfo != NULL); ++ { ++ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo = (RGX_HWPERF_CNTBLK_RT_INFO *) pvRtInfo; ++ const PVRSRV_RGXDEV_INFO *psDevInfo = (const PVRSRV_RGXDEV_INFO *)pvDev_km; ++ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) && ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) ++ { ++ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TA) ++ { ++ psRtInfo->ui32NumUnits = 1; ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) ++ { ++ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) ++ { ++ /* PBE counters are not present on this config */ ++ return IMG_FALSE; ++ } ++ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = psBlkTypeDesc->ui32IndirectReg; ++ return IMG_TRUE; ++ } ++ else if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ++ { ++ psRtInfo->ui32NumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); ++ psRtInfo->ui32IndirectReg = RGX_CR_TPU_MCU_L0_PERF_INDIRECT; ++ return IMG_TRUE; ++ } ++ } ++ } ++#else /* FW context */ ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) ++# if !defined(RGX_FEATURE_PBE2_IN_XE) ++ if (psBlkTypeDesc->ui32CntBlkIdBase == RGX_CNTBLK_ID_PBE0) ++ { ++ /* No support for PBE counters without PBE2_IN_XE */ ++ return IMG_FALSE; ++ } ++# endif ++ return IMG_TRUE; ++# endif ++#endif ++ return IMG_FALSE; ++} ++ ++static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_not(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++#if defined(__KERNEL__) ++ return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) ++ || rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo)); ++ ++#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); ++ ++#elif defined(RGX_FEATURE_PBE2_IN_XE) || defined(RGX_FEATURE_PERFBUS) ++ return rgx_hwperf_blk_present_not_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); ++#else ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++ return IMG_FALSE; ++#endif ++} ++ ++static IMG_BOOL rgx_hwperf_blk_present_check_s7top_or_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++#if defined(__KERNEL__) ++ return (rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo) ++ || rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo)); ++ ++#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ return rgx_hwperf_blk_present_s7top(psBlkTypeDesc, pvDev_km, pvRtInfo); ++ ++#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++ return rgx_hwperf_blk_present_xttop(psBlkTypeDesc, pvDev_km, pvRtInfo); ++#else ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++ return IMG_FALSE; ++#endif ++} ++ ++#if !defined(__KERNEL__) /* Firmware or User-mode context */ ++static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, const void *pvDev_km, void *pvRtInfo) ++{ ++ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc); ++ PVR_UNREFERENCED_PARAMETER(pvDev_km); ++ PVR_UNREFERENCED_PARAMETER(pvRtInfo); ++ ++ /* Some functions not used on some BVNCs, silence compiler warnings */ ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus); ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping); ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop); ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top); ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top); ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_not); ++ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_check_s7top_or_xttop); ++ ++ return IMG_FALSE; ++} ++ ++/* Used to instantiate a null row in the block type model table below where the ++ * block is not supported for a given build BVNC in firmware/user mode context. ++ * This is needed as the blockid to block type lookup uses the table as well ++ * and clients may try to access blocks not in the hardware. */ ++#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false} ++ ++#endif ++ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end ++ *****************************************************************************/ ++ ++#if defined(__KERNEL__) /* Values will be calculated at run-time */ ++#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_NUM_BLOCK_UNITS_RUNTIME_CALC ++#define RGX_INDIRECT_REG_TEXAS 0xFFFFFFFF ++#define RGX_INDIRECT_REG_TPU 0xFFFFFFFF ++ ++#elif defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST ++#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS3_PERF_INDIRECT ++#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_PERF_INDIRECT ++ ++#else ++ ++#if defined(RGX_FEATURE_PERFBUS) ++#define RGX_INDIRECT_REG_TPU RGX_CR_TPU_MCU_L0_PERF_INDIRECT ++#endif ++ ++#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++#define RGX_HWPERF_NUM_BLOCK_UNITS RGX_HWPERF_INDIRECT_BY_PHANTOM ++#define RGX_INDIRECT_REG_TEXAS RGX_CR_TEXAS_PERF_INDIRECT ++#endif ++ ++#endif ++ ++ ++/***************************************************************************** ++ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table ++ ++ This table holds the entries for the performance counter block type model. ++ Where the block is not present on an RGX device in question the ++ pfnIsBlkPresent() returns false, if valid and present it returns true. ++ Columns in the table with a ** indicate the value is a default and the ++ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent() ++ should be used at runtime by the caller. These columns are only valid for ++ compile time BVNC configured contexts. ++ ++ Order of table rows must match order of counter block IDs in the enumeration ++ RGX_HWPERF_CNTBLK_ID. ++ *****************************************************************************/ ++ ++static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] = ++{ ++ /* ui32CntBlkIdBase, ui32IndirectReg, ui32PerfReg, ui32Select0BaseReg, ui32Counter0BaseReg ui8NumCounters, ui32NumUnits**, ui8SelectRegModeShift, ui8SelectRegOffsetShift, pfnIsBlkPowered pfnIsBlkPresent ++ * pszBlockNameComment, */ ++ /*RGX_CNTBLK_ID_TA*/ ++#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_TA, 0, /* direct */ RGX_CR_TA_PERF, RGX_CR_TA_PERF_SELECT0, RGX_CR_TA_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_TA_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_s7top }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA), ++#endif ++ ++ /*RGX_CNTBLK_ID_RASTER*/ ++#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_RASTER, 0, /* direct */ RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER), ++#endif ++ ++ /*RGX_CNTBLK_ID_HUB*/ ++#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_HUB, 0, /* direct */ RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0, RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_HUB_BIFPMCACHE_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB), ++#endif ++ ++ /*RGX_CNTBLK_ID_TORNADO*/ ++#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_TORNADO, 0, /* direct */ RGX_CR_TORNADO_PERF, RGX_CR_TORNADO_PERF_SELECT0, RGX_CR_TORNADO_PERF_COUNTER_0, 4, 1, 21, 4, "RGX_CR_TORNADO_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_xttop }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO), ++#endif ++ ++ /*RGX_CNTBLK_ID_JONES*/ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_JONES, 0, /* direct */ RGX_CR_JONES_PERF, RGX_CR_JONES_PERF_SELECT0, RGX_CR_JONES_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_JONES_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_s7top }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES), ++#endif ++ ++ /*RGX_CNTBLK_ID_TPU_MCU0*/ ++#if defined(__KERNEL__) || (defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) ++ {RGX_CNTBLK_ID_TPU_MCU0, RGX_INDIRECT_REG_TPU, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0), ++#endif ++ ++ /*RGX_CNTBLK_ID_USC0*/ ++#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_USC0, RGX_CR_USC_PERF_INDIRECT, RGX_CR_USC_PERF, RGX_CR_USC_PERF_SELECT0, RGX_CR_USC_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_USC_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0), ++#endif ++ ++ /*RGX_CNTBLK_ID_TEXAS0*/ ++#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) ++ {RGX_CNTBLK_ID_TEXAS0, RGX_INDIRECT_REG_TEXAS, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_NUM_BLOCK_UNITS, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_xttop }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0), ++#endif ++ ++ /*RGX_CNTBLK_ID_RASTER0*/ ++#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0), ++#endif ++ ++ /*RGX_CNTBLK_ID_BLACKPEARL0*/ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__) ++ {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0, RGX_CR_BLACKPEARL_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_BLACKPEARL_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0), ++#endif ++ ++ /*RGX_CNTBLK_ID_PBE0*/ ++#if defined(__KERNEL__) || defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_PBE2_IN_XE) ++ {RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF, RGX_CR_PBE_PERF_SELECT0, RGX_CR_PBE_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_PBE_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_check_s7top_or_not }, ++#else ++ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0), ++#endif ++}; ++ ++ ++IMG_INTERNAL IMG_UINT32 ++RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel) ++{ ++ *ppsModel = gasCntBlkTypeModel; ++ return ARRAY_SIZE(gasCntBlkTypeModel); ++} ++ ++/****************************************************************************** ++ End of file (rgx_hwperf_table.c) ++ ******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_hwperf_table.h b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.h +new file mode 100644 +index 000000000000..449885cdcb64 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_hwperf_table.h +@@ -0,0 +1,116 @@ ++/*************************************************************************/ /*! ++@File ++@Title HWPerf counter table header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Utility functions used internally for HWPerf data retrieval ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_HWPERF_TABLE_H ++#define RGX_HWPERF_TABLE_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "rgx_fwif_hwperf.h" ++#if defined(__KERNEL__) ++#include "rgxdevice.h" ++#endif ++/*****************************************************************************/ ++ ++/* Forward declaration */ ++typedef struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL; ++ ++/* Function pointer type for functions to check dynamic power state of ++ * counter block instance. Used only in firmware. */ ++typedef bool (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)( ++ RGX_HWPERF_CNTBLK_ID eBlkType, ++ IMG_UINT8 ui8UnitId); ++ ++#if defined(__KERNEL__) ++/* Counter block run-time info */ ++typedef struct ++{ ++ IMG_UINT32 ui32IndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */ ++ IMG_UINT32 ui32NumUnits; /* Number of instances of this block type in the core */ ++} RGX_HWPERF_CNTBLK_RT_INFO; ++#endif ++ ++/* Function pointer type for functions to check block is valid and present ++ * on that RGX Device at runtime. It may have compile logic or run-time ++ * logic depending on where the code executes: server, srvinit or firmware. ++ * Values in the psRtInfo output parameter are only valid if true returned. ++ */ ++typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)( ++ const struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc, ++ const void *pvDev_km, ++ void *pvRtInfo); ++ ++/* This structure encodes properties of a type of performance counter block. ++ * The structure is sometimes referred to as a block type descriptor. These ++ * properties contained in this structure represent the columns in the block ++ * type model table variable below. These values vary depending on the build ++ * BVNC and core type. ++ * Each direct block has a unique type descriptor and each indirect group has ++ * a type descriptor. ++ */ ++struct RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ ++{ ++ /* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */ ++ IMG_UINT32 ui32CntBlkIdBase; /* The starting block id for this block type */ ++ IMG_UINT32 ui32IndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */ ++ IMG_UINT32 ui32PerfReg; /* RGX_CR_*_PERF register for this block type */ ++ IMG_UINT32 ui32Select0BaseReg; /* RGX_CR_*_PERF_SELECT0 register for this block type */ ++ IMG_UINT32 ui32Counter0BaseReg; /* RGX_CR_*_PERF_COUNTER_0 register for this block type */ ++ IMG_UINT8 ui8NumCounters; /* Number of counters in this block type */ ++ IMG_UINT8 ui8NumUnits; /* Number of instances of this block type in the core */ ++ IMG_UINT8 ui8SelectRegModeShift; /* Mode field shift value of select registers */ ++ IMG_UINT8 ui8SelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */ ++ const IMG_CHAR *pszBlockNameComment; /* Name of the PERF register. Used while dumping the perf counters to pdumps */ ++ PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */ ++ PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */ ++}; ++ ++/*****************************************************************************/ ++ ++IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel); ++ ++#endif /* RGX_HWPERF_TABLE_H */ ++ ++/****************************************************************************** ++ End of file (rgx_hwperf_table.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_memallocflags.h b/drivers/gpu/drm/img-rogue/rgx_memallocflags.h +new file mode 100644 +index 000000000000..e26f42c4f935 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_memallocflags.h +@@ -0,0 +1,58 @@ ++/**************************************************************************/ /*! ++@File ++@Title RGX device specific memory allocation flags ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_MEMALLOCFLAGS_H ++#define RGX_MEMALLOCFLAGS_H ++ ++ ++/* Include pvrsrv layer header as the flags below are used in the device ++ * field defined in this header inside Services code. ++ * See PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK */ ++#include "pvrsrv_memallocflags.h" ++ ++ ++/* Device specific MMU flags */ ++#define PMMETA_PROTECT (1U << 0) /*!< Memory that only the PM and Meta can access */ ++#define FIRMWARE_CACHED (1U << 1) /*!< Memory that is cached in META/MIPS */ ++ ++ ++#endif /* RGX_MEMALLOCFLAGS_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_meta.h b/drivers/gpu/drm/img-rogue/rgx_meta.h +new file mode 100644 +index 000000000000..bdff11ffbdc1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_meta.h +@@ -0,0 +1,385 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX META definitions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX META helper definitions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_META_H) ++#define RGX_META_H ++ ++ ++/***** The META HW register definitions in the file are updated manually *****/ ++ ++ ++#include "img_defs.h" ++#include "km/rgxdefs_km.h" ++ ++ ++/****************************************************************************** ++* META registers and MACROS ++******************************************************************************/ ++#define META_CR_CTRLREG_BASE(T) (0x04800000U + (0x1000U*(T))) ++ ++#define META_CR_TXPRIVEXT (0x048000E8) ++#define META_CR_TXPRIVEXT_MINIM_EN (IMG_UINT32_C(0x1) << 7) ++ ++#define META_CR_SYSC_JTAG_THREAD (0x04830030) ++#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004) ++ ++#define META_CR_PERF_COUNT0 (0x0480FFE0) ++#define META_CR_PERF_COUNT1 (0x0480FFE8) ++#define META_CR_PERF_COUNT_CTRL_SHIFT (28) ++#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000) ++#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (IMG_UINT32_C(0x8) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (IMG_UINT32_C(0x9) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (IMG_UINT32_C(0xA) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_CTRL_ICORE (IMG_UINT32_C(0xD) << META_CR_PERF_COUNT_CTRL_SHIFT) ++#define META_CR_PERF_COUNT_THR_SHIFT (24) ++#define META_CR_PERF_COUNT_THR_MASK (0x0F000000) ++#define META_CR_PERF_COUNT_THR_0 (IMG_UINT32_C(0x1) << META_CR_PERF_COUNT_THR_SHIFT) ++#define META_CR_PERF_COUNT_THR_1 (IMG_UINT32_C(0x2) << META_CR_PERF_COUNT_THR_SHIFT) ++ ++#define META_CR_TxVECINT_BHALT (0x04820500) ++#define META_CR_PERF_ICORE0 (0x0480FFD0) ++#define META_CR_PERF_ICORE1 (0x0480FFD8) ++#define META_CR_PERF_ICORE_DCACHEMISS (0x8) ++ ++#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \ ++ (THR << META_CR_PERF_COUNT_THR_SHIFT)) ++ ++#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF0U) ++#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0U) + 0x0000FFF8U) ++ ++#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000U) /* Poll for done */ ++#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000U) /* Set for read */ ++#define META_CR_TXUXXRXRQ_TX_S (12) ++#define META_CR_TXUXXRXRQ_RX_S (4) ++#define META_CR_TXUXXRXRQ_UXX_S (0) ++ ++#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */ ++#define META_CR_TXUD0_ID (0x1) /* Data unit regs */ ++#define META_CR_TXUD1_ID (0x2) /* Data unit regs */ ++#define META_CR_TXUA0_ID (0x3) /* Address unit regs */ ++#define META_CR_TXUA1_ID (0x4) /* Address unit regs */ ++#define META_CR_TXUPC_ID (0x5) /* PC registers */ ++ ++/* Macros to calculate register access values */ ++#define META_CR_CORE_REG(Thr, RegNum, Unit) (((IMG_UINT32)(Thr) << META_CR_TXUXXRXRQ_TX_S) | \ ++ ((IMG_UINT32)(RegNum) << META_CR_TXUXXRXRQ_RX_S) | \ ++ ((IMG_UINT32)(Unit) << META_CR_TXUXXRXRQ_UXX_S)) ++ ++#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID) ++#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID) ++#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID) ++ ++#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID) ++#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID) ++#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID) ++ ++#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID) ++#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID) ++ ++#define META_CR_COREREG_ENABLE (0x0000000U) ++#define META_CR_COREREG_STATUS (0x0000010U) ++#define META_CR_COREREG_DEFR (0x00000A0U) ++#define META_CR_COREREG_PRIVEXT (0x00000E8U) ++ ++#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_ENABLE) ++#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_STATUS) ++#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_DEFR) ++#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0U) + META_CR_COREREG_PRIVEXT) ++ ++#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_ENABLE) ++#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_STATUS) ++#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_DEFR) ++#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1U) + META_CR_COREREG_PRIVEXT) ++ ++#define META_CR_TXENABLE_ENABLE_BIT (0x00000001U) /* Set if running */ ++#define META_CR_TXSTATUS_PRIV (0x00020000U) ++#define META_CR_TXPRIVEXT_MINIM (0x00000080U) ++ ++#define META_MEM_GLOBAL_RANGE_BIT (0x80000000U) ++ ++#define META_CR_TXCLKCTRL (0x048000B0) ++#define META_CR_TXCLKCTRL_ALL_ON (0x55111111) ++#define META_CR_TXCLKCTRL_ALL_AUTO (0xAA222222) ++ ++ ++/****************************************************************************** ++* META LDR Format ++******************************************************************************/ ++/* Block header structure */ ++typedef struct ++{ ++ IMG_UINT32 ui32DevID; ++ IMG_UINT32 ui32SLCode; ++ IMG_UINT32 ui32SLData; ++ IMG_UINT16 ui16PLCtrl; ++ IMG_UINT16 ui16CRC; ++ ++} RGX_META_LDR_BLOCK_HDR; ++ ++/* High level data stream block structure */ ++typedef struct ++{ ++ IMG_UINT16 ui16Cmd; ++ IMG_UINT16 ui16Length; ++ IMG_UINT32 ui32Next; ++ IMG_UINT32 aui32CmdData[4]; ++ ++} RGX_META_LDR_L1_DATA_BLK; ++ ++/* High level data stream block structure */ ++typedef struct ++{ ++ IMG_UINT16 ui16Tag; ++ IMG_UINT16 ui16Length; ++ IMG_UINT32 aui32BlockData[4]; ++ ++} RGX_META_LDR_L2_DATA_BLK; ++ ++/* Config command structure */ ++typedef struct ++{ ++ IMG_UINT32 ui32Type; ++ IMG_UINT32 aui32BlockData[4]; ++ ++} RGX_META_LDR_CFG_BLK; ++ ++/* Block type definitions */ ++#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010U) ++#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0U) ++ ++/* Command definitions ++ * Value Name Description ++ * 0 LoadMem Load memory with binary data. ++ * 1 LoadCore Load a set of core registers. ++ * 2 LoadMMReg Load a set of memory mapped registers. ++ * 3 StartThreads Set each thread PC and SP, then enable threads. ++ * 4 ZeroMem Zeros a memory region. ++ * 5 Config Perform a configuration command. ++ */ ++#define RGX_META_LDR_CMD_MASK (0x000FU) ++ ++#define RGX_META_LDR_CMD_LOADMEM (0x0000U) ++#define RGX_META_LDR_CMD_LOADCORE (0x0001U) ++#define RGX_META_LDR_CMD_LOADMMREG (0x0002U) ++#define RGX_META_LDR_CMD_START_THREADS (0x0003U) ++#define RGX_META_LDR_CMD_ZEROMEM (0x0004U) ++#define RGX_META_LDR_CMD_CONFIG (0x0005U) ++ ++/* Config Command definitions ++ * Value Name Description ++ * 0 Pause Pause for x times 100 instructions ++ * 1 Read Read a value from register - No value return needed. ++ * Utilises effects of issuing reads to certain registers ++ * 2 Write Write to mem location ++ * 3 MemSet Set mem to value ++ * 4 MemCheck check mem for specific value. ++ */ ++#define RGX_META_LDR_CFG_PAUSE (0x0000) ++#define RGX_META_LDR_CFG_READ (0x0001) ++#define RGX_META_LDR_CFG_WRITE (0x0002) ++#define RGX_META_LDR_CFG_MEMSET (0x0003) ++#define RGX_META_LDR_CFG_MEMCHECK (0x0004) ++ ++ ++/****************************************************************************** ++* RGX FW segmented MMU definitions ++******************************************************************************/ ++/* All threads can access the segment */ ++#define RGXFW_SEGMMU_ALLTHRS (IMG_UINT32_C(0xf) << 8U) ++/* Writable */ ++#define RGXFW_SEGMMU_WRITEABLE (0x1U << 1U) ++/* All threads can access and writable */ ++#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE) ++ ++/* Direct map region 10 used for mapping GPU memory - max 8MB */ ++#define RGXFW_SEGMMU_DMAP_GPU_ID (10U) ++#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07000000U) ++#define RGXFW_SEGMMU_DMAP_GPU_MAX_SIZE (0x00800000U) ++ ++/* Segment IDs */ ++#define RGXFW_SEGMMU_DATA_ID (1U) ++#define RGXFW_SEGMMU_BOOTLDR_ID (2U) ++#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID) ++ ++/* ++ * SLC caching strategy in S7 and volcanic is emitted through the segment MMU. ++ * All the segments configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are ++ * CACHED in the SLC. ++ * The interface has been kept the same to simplify the code changes. ++ * The bifdm argument is ignored (no longer relevant) in S7 and volcanic. ++ */ ++#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(pers, slc_policy, mmu_ctx) ((((IMG_UINT64) ((pers) & 0x3U)) << 52) | \ ++ (((IMG_UINT64) ((mmu_ctx) & 0xFFU)) << 44) | \ ++ (((IMG_UINT64) ((slc_policy) & 0x1U)) << 40)) ++#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x3U, 0x0U, mmu_ctx) ++#define RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC(0x0U, 0x1U, mmu_ctx) ++ ++/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten ++ * accesses through this segment ++ */ ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, bifdm) (((IMG_UINT64)((IMG_UINT64)(pc) & 0xFU) << 44U) | \ ++ ((IMG_UINT64)((IMG_UINT64)(bifdm) & 0xFU) << 40U)) ++ ++#define RGXFW_SEGMMU_META_BIFDM_ID (0x7U) ++#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) ++#if defined(RGX_FEATURE_SLC_VIVT) ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED ++#define RGXFW_SEGMMU_OUTADDR_TOP_META RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED ++#else ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_CACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC ++#define RGXFW_SEGMMU_OUTADDR_TOP_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_SLC ++#define RGXFW_SEGMMU_OUTADDR_TOP_META(pc) RGXFW_SEGMMU_OUTADDR_TOP_SLC(pc, RGXFW_SEGMMU_META_BIFDM_ID) ++#endif ++#endif ++ ++/* META segments have 4kB minimum size */ ++#define RGXFW_SEGMMU_ALIGN (0x1000U) ++ ++/* Segmented MMU registers (n = segment id) */ ++#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000U + ((n)*0x10U)) ++#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004U + ((n)*0x10U)) ++#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008U + ((n)*0x10U)) ++#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000CU + ((n)*0x10U)) ++ ++/* The following defines must be recalculated if the Meta MMU segments used ++ * to access Host-FW data are changed ++ * Current combinations are: ++ * - SLC uncached, META cached, FW base address 0x70000000 ++ * - SLC uncached, META uncached, FW base address 0xF0000000 ++ * - SLC cached, META cached, FW base address 0x10000000 ++ * - SLC cached, META uncached, FW base address 0x90000000 ++ */ ++#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000U) ++#define RGXFW_SEGMMU_DATA_META_CACHED (0x0U) ++#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000 ++#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT) ++/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected in ++ * the PTEs for the FW data, not in the Meta Segment MMU, which means these ++ * defines have no real effect in those cases ++ */ ++#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0U) ++#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000U) ++#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000U) ++ ++ ++#if defined(SECURE_FW_CODE_OSID) && defined(RGX_FEATURE_META) ++#error "SECURE_FW_CODE_OSID is not supported on META cores" ++#endif ++ ++ ++/****************************************************************************** ++* RGX FW Bootloader defaults ++******************************************************************************/ ++#define RGXFW_BOOTLDR_META_ADDR (0x40000000U) ++#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000U) ++#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1) ++#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0) ++#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000) ++#define RGXFW_MAX_BOOTLDR_OFFSET (0x1000) ++ ++/* Bootloader configuration offset is in dwords (512 bytes) */ ++#define RGXFW_BOOTLDR_CONF_OFFSET (0x80) ++ ++ ++/****************************************************************************** ++* RGX META Stack ++******************************************************************************/ ++#define RGX_META_STACK_SIZE (0x1000U) ++ ++/****************************************************************************** ++ RGX META Core memory ++******************************************************************************/ ++/* code and data both map to the same physical memory */ ++#define RGX_META_COREMEM_CODE_ADDR (0x80000000U) ++#define RGX_META_COREMEM_DATA_ADDR (0x82000000U) ++#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffffU) ++ ++#if defined(__KERNEL__) ++#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B)))) ++#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B)))) ++#endif ++ ++/****************************************************************************** ++* 2nd thread ++******************************************************************************/ ++#define RGXFW_THR1_PC (0x18930000) ++#define RGXFW_THR1_SP (0x78890000) ++ ++/****************************************************************************** ++* META compatibility ++******************************************************************************/ ++ ++#define META_CR_CORE_ID (0x04831000) ++#define META_CR_CORE_ID_VER_SHIFT (16U) ++#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU) ++ ++#if !defined(__KERNEL__) && defined(RGX_FEATURE_META) ++ ++ #if (RGX_FEATURE_META == MTP218) ++ #define RGX_CR_META_CORE_ID_VALUE 0x19 ++ #elif (RGX_FEATURE_META == MTP219) ++ #define RGX_CR_META_CORE_ID_VALUE 0x1E ++ #elif (RGX_FEATURE_META == LTP218) ++ #define RGX_CR_META_CORE_ID_VALUE 0x1C ++ #elif (RGX_FEATURE_META == LTP217) ++ #define RGX_CR_META_CORE_ID_VALUE 0x1F ++ #else ++ #error "Unknown META ID" ++ #endif ++#else ++ ++ #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19 ++ #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E ++ #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C ++ #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F ++ ++#endif ++#define RGXFW_PROCESSOR_META "META" ++ ++ ++#endif /* RGX_META_H */ ++ ++/****************************************************************************** ++ End of file (rgx_meta.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgx_mips.h b/drivers/gpu/drm/img-rogue/rgx_mips.h +new file mode 100644 +index 000000000000..c2f381882f74 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_mips.h +@@ -0,0 +1,374 @@ ++/*************************************************************************/ /*! ++@File rgx_mips.h ++@Title ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform RGX ++@Description RGX MIPS definitions, kernel/user space ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_MIPS_H) ++#define RGX_MIPS_H ++ ++/* ++ * Utility defines for memory management ++ */ ++#define RGXMIPSFW_LOG2_PAGE_SIZE_4K (12) ++#define RGXMIPSFW_PAGE_SIZE_4K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4K) ++#define RGXMIPSFW_PAGE_MASK_4K (RGXMIPSFW_PAGE_SIZE_4K - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16) ++#define RGXMIPSFW_PAGE_SIZE_64K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_64K) ++#define RGXMIPSFW_PAGE_MASK_64K (RGXMIPSFW_PAGE_SIZE_64K - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_256K (18) ++#define RGXMIPSFW_PAGE_SIZE_256K (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_256K) ++#define RGXMIPSFW_PAGE_MASK_256K (RGXMIPSFW_PAGE_SIZE_256K - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_1MB (20) ++#define RGXMIPSFW_PAGE_SIZE_1MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_1MB) ++#define RGXMIPSFW_PAGE_MASK_1MB (RGXMIPSFW_PAGE_SIZE_1MB - 1) ++#define RGXMIPSFW_LOG2_PAGE_SIZE_4MB (22) ++#define RGXMIPSFW_PAGE_SIZE_4MB (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE_4MB) ++#define RGXMIPSFW_PAGE_MASK_4MB (RGXMIPSFW_PAGE_SIZE_4MB - 1) ++#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2) ++/* log2 page table sizes dependent on FW heap size and page size (for each OS) */ ++#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_4K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) ++#define RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K (RGX_FIRMWARE_HEAP_SHIFT - RGXMIPSFW_LOG2_PAGE_SIZE_64K + RGXMIPSFW_LOG2_PTE_ENTRY_SIZE) ++/* Maximum number of page table pages (both Host and MIPS pages) */ ++#define RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES (4) ++/* Total number of TLB entries */ ++#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16) ++/* "Uncached" caching policy */ ++#define RGXMIPSFW_UNCACHED_CACHE_POLICY (0X00000002U) ++/* "Write-back write-allocate" caching policy */ ++#define RGXMIPSFW_WRITEBACK_CACHE_POLICY (0X00000003) ++/* "Write-through no write-allocate" caching policy */ ++#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY (0X00000001) ++/* Cached policy used by MIPS in case of physical bus on 32 bit */ ++#define RGXMIPSFW_CACHED_POLICY (RGXMIPSFW_WRITEBACK_CACHE_POLICY) ++/* Cached policy used by MIPS in case of physical bus on more than 32 bit */ ++#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY) ++/* Total number of Remap entries */ ++#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES) ++ ++ ++/* ++ * MIPS EntryLo/PTE format ++ */ ++ ++#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U) ++#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF) ++#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000U) ++ ++#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U) ++#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF) ++#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000U) ++ ++/* Page Frame Number */ ++#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6) ++#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12) ++/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */ ++#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0) ++#define RGXMIPSFW_ENTRYLO_PFN_SIZE (20) ++/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */ ++#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0U) ++#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24) ++#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \ ++ RGXMIPSFW_ENTRYLO_PFN_SHIFT) ++ ++#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U) ++#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7U) ++ ++#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT (2U) ++#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB) ++#define RGXMIPSFW_ENTRYLO_DIRTY_EN (0X00000004U) ++ ++#define RGXMIPSFW_ENTRYLO_VALID_SHIFT (1U) ++#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD) ++#define RGXMIPSFW_ENTRYLO_VALID_EN (0X00000002U) ++ ++#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT (0U) ++#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE) ++#define RGXMIPSFW_ENTRYLO_GLOBAL_EN (0X00000001U) ++ ++#define RGXMIPSFW_ENTRYLO_DVG (RGXMIPSFW_ENTRYLO_DIRTY_EN | \ ++ RGXMIPSFW_ENTRYLO_VALID_EN | \ ++ RGXMIPSFW_ENTRYLO_GLOBAL_EN) ++#define RGXMIPSFW_ENTRYLO_UNCACHED (RGXMIPSFW_UNCACHED_CACHE_POLICY << \ ++ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT) ++#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED) ++ ++ ++/* Remap Range Config Addr Out */ ++/* These defines refer to the upper half of the Remap Range Config register */ ++#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0) ++#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register */ ++#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12) ++#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \ ++ RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT) ++ ++#if defined(SECURE_FW_CODE_OSID) && (SECURE_FW_CODE_OSID + 1 > 2) ++#define MIPS_FW_CODE_OSID (SECURE_FW_CODE_OSID) ++#elif defined(SECURE_FW_CODE_OSID) ++#define MIPS_FW_CODE_OSID (1U) ++#endif ++ ++ ++/* ++ * Pages to trampoline problematic physical addresses: ++ * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000 ++ * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000 ++ * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000 ++ * - (benign trampoline) : 0x1FC0_3000 ++ * that would otherwise be erroneously remapped by the MIPS wrapper ++ * (see "Firmware virtual layout and remap configuration" section below) ++ */ ++ ++#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2) ++#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1U << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES) ++#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE_4K) ++#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE_4K) ++ ++#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) ++#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN) ++ ++#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1UL << RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1U) & a)) ++ ++/* ++ * Firmware virtual layout and remap configuration ++ */ ++/* ++ * For each remap region we define: ++ * - the virtual base used by the Firmware to access code/data through that region ++ * - the microAptivAP physical address correspondent to the virtual base address, ++ * used as input address and remapped to the actual physical address ++ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from ++ * the bottom of the base input address that survive onto the output address ++ * (this defines both the alignment and the maximum size of the remapped region) ++ * - one or more code/data segments within the remapped region ++ */ ++ ++/* Boot remap setup */ ++#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000) ++#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000U) ++#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12) ++#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE) ++ ++/* Data remap setup */ ++#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000) ++#define RGXMIPSFW_DATA_CACHED_REMAP_VIRTUAL_BASE (0x9FC01000) ++#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000U) ++#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12) ++#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE) ++ ++/* Code remap setup */ ++#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000) ++#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000U) ++#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12) ++#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE) ++ ++/* Permanent mappings setup */ ++#define RGXMIPSFW_PT_VIRTUAL_BASE (0xCF000000) ++#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE (0xCF800000) ++#define RGXMIPSFW_STACK_VIRTUAL_BASE (0xCF600000) ++ ++ ++/* ++ * Bootloader configuration data ++ */ ++/* Bootloader configuration offset (where RGXMIPSFW_BOOT_DATA lives) ++ * within the bootloader/NMI data page */ ++#define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0U) ++ ++ ++/* ++ * NMI shared data ++ */ ++/* Base address of the shared data within the bootloader/NMI data page */ ++#define RGXMIPSFW_NMI_SHARED_DATA_BASE (0x100) ++/* Size used by Debug dump data */ ++#define RGXMIPSFW_NMI_SHARED_SIZE (0x2B0) ++/* Offsets in the NMI shared area in 32-bit words */ ++#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET (0x0) ++#define RGXMIPSFW_NMI_STATE_OFFSET (0x1) ++#define RGXMIPSFW_NMI_ERROR_STATE_SET (0x1) ++ ++/* ++ * MIPS boot stage ++ */ ++#define RGXMIPSFW_BOOT_STAGE_OFFSET (0x400) ++ ++/* ++ * MIPS private data in the bootloader data page. ++ * Memory below this offset is used by the FW only, no interface data allowed. ++ */ ++#define RGXMIPSFW_PRIVATE_DATA_OFFSET (0x800) ++ ++ ++/* The things that follow are excluded when compiling assembly sources */ ++#if !defined(RGXMIPSFW_ASSEMBLY_CODE) ++#include "img_types.h" ++#include "km/rgxdefs_km.h" ++ ++typedef struct ++{ ++ IMG_UINT64 ui64StackPhyAddr; ++ IMG_UINT64 ui64RegBase; ++ IMG_UINT64 aui64PTPhyAddr[RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES]; ++ IMG_UINT32 ui32PTLog2PageSize; ++ IMG_UINT32 ui32PTNumPages; ++ IMG_UINT32 ui32Reserved1; ++ IMG_UINT32 ui32Reserved2; ++} RGXMIPSFW_BOOT_DATA; ++ ++#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset) (offset / sizeof(IMG_UINT32)) ++#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset) (offset / sizeof(IMG_UINT64)) ++ ++/* Used for compatibility checks */ ++#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU) ++#define RGXMIPSFW_ARCHTYPE_VER_SHIFT (10U) ++#define RGXMIPSFW_CORE_ID_VALUE (0x001U) ++#define RGXFW_PROCESSOR_MIPS "MIPS" ++ ++/* microAptivAP cache line size */ ++#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U) ++ ++/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */ ++#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U) ++ ++/* Values to put in the MIPS selectors for performance counters */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U) /* Icache accesses in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U) /* Icache misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U) /* Dcache accesses in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U) /* Dcache misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U) /* ITLB instruction accesses in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U) /* JTLB instruction accesses misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U) /* Instructions completed in COUNTER0 */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U) /* JTLB data misses in COUNTER1 */ ++ ++#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U) /* Shift for the Event field in the MIPS perf ctrl registers */ ++/* Additional flags for performance counters. See MIPS manual for further reference */ ++#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U) ++#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U) ++#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U) ++ ++ ++#define RGXMIPSFW_C0_NBHWIRQ 8 ++ ++/* Macros to decode C0_Cause register */ ++#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE) (((CAUSE) & 0x7cU) >> 2U) ++#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR 9 ++/* Use only when Coprocessor Unusable exception */ ++#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28U) & 0x3U) ++#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10) ++#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1UL << 21) ++#define RGXMIPSFW_C0_CAUSE_IV (1UL << 23) ++#define RGXMIPSFW_C0_CAUSE_IC (1UL << 25) ++#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1UL << 26) ++#define RGXMIPSFW_C0_CAUSE_TIPENDING (1UL << 30) ++#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY (1UL << 31) ++ ++/* Macros to decode C0_Debug register */ ++#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10U) & 0x1fU) ++#define RGXMIPSFW_C0_DEBUG_DSS (1UL << 0) ++#define RGXMIPSFW_C0_DEBUG_DBP (1UL << 1) ++#define RGXMIPSFW_C0_DEBUG_DDBL (1UL << 2) ++#define RGXMIPSFW_C0_DEBUG_DDBS (1UL << 3) ++#define RGXMIPSFW_C0_DEBUG_DIB (1UL << 4) ++#define RGXMIPSFW_C0_DEBUG_DINT (1UL << 5) ++#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1UL << 6) ++#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1UL << 18) ++#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1UL << 19) ++#define RGXMIPSFW_C0_DEBUG_IEXI (1UL << 20) ++#define RGXMIPSFW_C0_DEBUG_DBUSEP (1UL << 21) ++#define RGXMIPSFW_C0_DEBUG_CACHEEP (1UL << 22) ++#define RGXMIPSFW_C0_DEBUG_MCHECKP (1UL << 23) ++#define RGXMIPSFW_C0_DEBUG_IBUSEP (1UL << 24) ++#define RGXMIPSFW_C0_DEBUG_DM (1UL << 30) ++#define RGXMIPSFW_C0_DEBUG_DBD (1UL << 31) ++ ++/* Macros to decode TLB entries */ ++#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK) (((PAGE_MASK) >> 13) & 0XFFFFU) ++#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK) ((((PAGE_MASK) | 0x1FFFU) + 1U) >> 11U) /* page size in KB */ ++#define RGXMIPSFW_TLB_GET_PAGE_MASK(PAGE_SIZE) ((((PAGE_SIZE) << 11) - 1) & ~0x7FF) /* page size in KB */ ++#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13) ++#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U) ++#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0XFFFFFU) ++/* GET_PA uses a non-standard PFN mask for 36 bit addresses */ ++#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO) (((IMG_UINT64)(ENTRY_LO) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) << 6) ++#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U) ++#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U) ++#define RGXMIPSFW_TLB_GLOBAL (1U) ++#define RGXMIPSFW_TLB_VALID (1U << 1) ++#define RGXMIPSFW_TLB_DIRTY (1U << 2) ++#define RGXMIPSFW_TLB_XI (1U << 30) ++#define RGXMIPSFW_TLB_RI (1U << 31) ++ ++typedef struct { ++ IMG_UINT32 ui32TLBPageMask; ++ IMG_UINT32 ui32TLBHi; ++ IMG_UINT32 ui32TLBLo0; ++ IMG_UINT32 ui32TLBLo1; ++} RGX_MIPS_TLB_ENTRY; ++ ++typedef struct { ++ IMG_UINT32 ui32RemapAddrIn; /* always 4k aligned */ ++ IMG_UINT32 ui32RemapAddrOut; /* always 4k aligned */ ++ IMG_UINT32 ui32RemapRegionSize; ++} RGX_MIPS_REMAP_ENTRY; ++ ++typedef struct { ++ IMG_UINT32 ui32ErrorState; /* This must come first in the structure */ ++ IMG_UINT32 ui32ErrorEPC; ++ IMG_UINT32 ui32StatusRegister; ++ IMG_UINT32 ui32CauseRegister; ++ IMG_UINT32 ui32BadRegister; ++ IMG_UINT32 ui32EPC; ++ IMG_UINT32 ui32SP; ++ IMG_UINT32 ui32Debug; ++ IMG_UINT32 ui32DEPC; ++ IMG_UINT32 ui32BadInstr; ++ IMG_UINT32 ui32UnmappedAddress; ++ RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES]; ++ RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES]; ++} RGX_MIPS_STATE; ++ ++#endif /* RGXMIPSFW_ASSEMBLY_CODE */ ++ ++#endif /* RGX_MIPS_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_options.h b/drivers/gpu/drm/img-rogue/rgx_options.h +new file mode 100644 +index 000000000000..91fc6522d7ee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_options.h +@@ -0,0 +1,304 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX build options ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* Each build option listed here is packed into a dword which provides up to ++ * log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM and ++ * (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM. ++ * The corresponding bit is set if the build option was enabled at compile ++ * time. ++ * ++ * In order to extract the enabled build flags the INTERNAL_TEST switch should ++ * be enabled in a client program which includes this header. Then the client ++ * can test specific build flags by reading the bit value at ++ * ##OPTIONNAME##_SET_OFFSET ++ * in RGX_BUILD_OPTIONS_KM or RGX_BUILD_OPTIONS. ++ * ++ * IMPORTANT: add new options to unused bits or define a new dword ++ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield ++ * remains backwards compatible. ++ */ ++ ++#ifndef RGX_OPTIONS_H ++#define RGX_OPTIONS_H ++ ++#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL ++ ++#define NO_HARDWARE_OPTION "NO_HARDWARE " ++#if defined(NO_HARDWARE) || defined(INTERNAL_TEST) ++ #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0 ++ #define OPTIONS_BIT0 (0x1UL << 0) ++ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT0 0x0UL ++#endif /* NO_HARDWARE */ ++ ++#define PDUMP_OPTION "PDUMP " ++#if defined(PDUMP) || defined(INTERNAL_TEST) ++ #define PDUMP_SET_OFFSET OPTIONS_BIT1 ++ #define OPTIONS_BIT1 (0x1UL << 1) ++ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT1 0x0UL ++#endif /* PDUMP */ ++ ++/* No longer used */ ++#define INTERNAL_TEST_OPTION "INTERNAL_TEST " ++#if defined(INTERNAL_TEST) ++ #define UNUSED_SET_OFFSET OPTIONS_BIT2 ++ #define OPTIONS_BIT2 (0x1UL << 2) ++ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT2 0x0UL ++#endif ++ ++/* No longer used */ ++#define UNUSED_OPTION " " ++#if defined(INTERNAL_TEST) ++ #define OPTIONS_BIT3 (0x1UL << 3) ++ #define INTERNAL_TEST_OPTION "INTERNAL_TEST " ++ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT3 0x0UL ++#endif ++ ++#define SUPPORT_RGX_OPTION " " ++#if defined(SUPPORT_RGX) || defined(INTERNAL_TEST) ++ #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4 ++ #define OPTIONS_BIT4 (0x1UL << 4) ++ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT4 0x0UL ++#endif /* SUPPORT_RGX */ ++ ++#define SUPPORT_SECURE_EXPORT_OPTION "SECURE_EXPORTS " ++#if defined(SUPPORT_SECURE_EXPORT) || defined(INTERNAL_TEST) ++ #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5 ++ #define OPTIONS_BIT5 (0x1UL << 5) ++ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT5 0x0UL ++#endif /* SUPPORT_SECURE_EXPORT */ ++ ++#define SUPPORT_INSECURE_EXPORT_OPTION "INSECURE_EXPORTS " ++#if defined(SUPPORT_INSECURE_EXPORT) || defined(INTERNAL_TEST) ++ #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6 ++ #define OPTIONS_BIT6 (0x1UL << 6) ++ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT6 0x0UL ++#endif /* SUPPORT_INSECURE_EXPORT */ ++ ++#define SUPPORT_VFP_OPTION "VFP " ++#if defined(SUPPORT_VFP) || defined(INTERNAL_TEST) ++ #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7 ++ #define OPTIONS_BIT7 (0x1UL << 7) ++ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT7 0x0UL ++#endif /* SUPPORT_VFP */ ++ ++#define SUPPORT_WORKLOAD_ESTIMATION_OPTION "WORKLOAD_ESTIMATION " ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined(INTERNAL_TEST) ++ #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8 ++ #define OPTIONS_BIT8 (0x1UL << 8) ++ #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT8 0x0UL ++#endif /* SUPPORT_WORKLOAD_ESTIMATION */ ++#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1UL << 8) ++ ++#define SUPPORT_PDVFS_OPTION "PDVFS " ++#if defined(SUPPORT_PDVFS) || defined(INTERNAL_TEST) ++ #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9 ++ #define OPTIONS_BIT9 (0x1UL << 9) ++ #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT9 0x0UL ++#endif /* SUPPORT_PDVFS */ ++#define OPTIONS_PDVFS_MASK (0x1UL << 9) ++ ++#define DEBUG_OPTION "DEBUG " ++#if defined(DEBUG) || defined(INTERNAL_TEST) ++ #define DEBUG_SET_OFFSET OPTIONS_BIT10 ++ #define OPTIONS_BIT10 (0x1UL << 10) ++ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT10 0x0UL ++#endif /* DEBUG */ ++/* The bit position of this should be the same as DEBUG_SET_OFFSET option ++ * when defined. ++ */ ++#define OPTIONS_DEBUG_MASK (0x1UL << 10) ++ ++#define SUPPORT_BUFFER_SYNC_OPTION "BUFFER_SYNC " ++#if defined(SUPPORT_BUFFER_SYNC) || defined(INTERNAL_TEST) ++ #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11 ++ #define OPTIONS_BIT11 (0x1UL << 11) ++ #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT11 0x0UL ++#endif /* SUPPORT_BUFFER_SYNC */ ++ ++#define SUPPORT_AUTOVZ_OPTION "AUTOVZ " ++#if defined(SUPPORT_AUTOVZ) ++ #define SUPPORT_AUTOVZ_OFFSET OPTIONS_BIT12 ++ #define OPTIONS_BIT12 (0x1UL << 12) ++ #if OPTIONS_BIT12 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT12 0x0UL ++#endif /* SUPPORT_AUTOVZ */ ++ ++#define SUPPORT_AUTOVZ_HW_REGS_OPTION "AUTOVZ_HW_REGS " ++#if defined(SUPPORT_AUTOVZ_HW_REGS) ++ #define SUPPORT_AUTOVZ_HW_REGS_OFFSET OPTIONS_BIT13 ++ #define OPTIONS_BIT13 (0x1UL << 13) ++ #if OPTIONS_BIT13 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT13 0x0UL ++#endif /* SUPPORT_AUTOVZ_HW_REGS */ ++ ++#define RGX_FW_IRQ_OS_COUNTERS_OPTION "FW_IRQ_OS_COUNTERS " ++#if defined(RGX_FW_IRQ_OS_COUNTERS) || defined(INTERNAL_TEST) ++ #define SUPPORT_FW_IRQ_REG_COUNTERS OPTIONS_BIT14 ++ #define OPTIONS_BIT14 (0x1UL << 14) ++ #if OPTIONS_BIT14 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT14 0x0UL ++#endif /* RGX_FW_IRQ_OS_COUNTERS */ ++ ++#define VALIDATION_EN_MASK (0x1UL << 15) ++#define SUPPORT_VALIDATION_OPTION "VALIDATION " ++#if defined(SUPPORT_VALIDATION) ++ #define SUPPORT_VALIDATION_OFFSET OPTIONS_BIT15 ++ #define OPTIONS_BIT15 (0x1UL << 15) ++ #if OPTIONS_BIT15 > RGX_BUILD_OPTIONS_MASK_KM ++ #error "Bit exceeds reserved range" ++ #endif ++#else ++ #define OPTIONS_BIT15 0x0UL ++#endif /* SUPPORT_VALIDATION */ ++ ++#define RGX_BUILD_OPTIONS_KM \ ++ (OPTIONS_BIT0 |\ ++ OPTIONS_BIT1 |\ ++ OPTIONS_BIT2 |\ ++ OPTIONS_BIT3 |\ ++ OPTIONS_BIT4 |\ ++ OPTIONS_BIT6 |\ ++ OPTIONS_BIT7 |\ ++ OPTIONS_BIT8 |\ ++ OPTIONS_BIT9 |\ ++ OPTIONS_BIT10 |\ ++ OPTIONS_BIT11 |\ ++ OPTIONS_BIT12 |\ ++ OPTIONS_BIT13 |\ ++ OPTIONS_BIT14 |\ ++ OPTIONS_BIT15) ++ ++#define RGX_BUILD_OPTIONS_LIST \ ++ { \ ++ NO_HARDWARE_OPTION, \ ++ PDUMP_OPTION, \ ++ INTERNAL_TEST_OPTION, \ ++ UNUSED_OPTION, \ ++ SUPPORT_RGX_OPTION, \ ++ SUPPORT_SECURE_EXPORT_OPTION, \ ++ SUPPORT_INSECURE_EXPORT_OPTION, \ ++ SUPPORT_VFP_OPTION, \ ++ SUPPORT_WORKLOAD_ESTIMATION_OPTION, \ ++ SUPPORT_PDVFS_OPTION, \ ++ DEBUG_OPTION, \ ++ SUPPORT_BUFFER_SYNC_OPTION, \ ++ SUPPORT_AUTOVZ_OPTION, \ ++ SUPPORT_AUTOVZ_HW_REGS_OPTION, \ ++ RGX_FW_IRQ_OS_COUNTERS_OPTION, \ ++ SUPPORT_VALIDATION_OPTION \ ++ } ++ ++#define RGX_BUILD_OPTIONS_MASK_FW \ ++ (RGX_BUILD_OPTIONS_MASK_KM & \ ++ ~OPTIONS_BIT11) ++ ++#define OPTIONS_BIT31 (0x1UL << 31) ++#if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM ++#error "Bit exceeds reserved range" ++#endif ++#define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31 ++ ++#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31) ++ ++#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \ ++ ~(OPTIONS_DEBUG_MASK | \ ++ OPTIONS_WORKLOAD_ESTIMATION_MASK | \ ++ OPTIONS_PDVFS_MASK)) ++ ++#endif /* RGX_OPTIONS_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_pdump_panics.h b/drivers/gpu/drm/img-rogue/rgx_pdump_panics.h +new file mode 100644 +index 000000000000..fce2b3efab69 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_pdump_panics.h +@@ -0,0 +1,64 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX PDump panic definitions header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX PDump panic definitions header ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_PDUMP_PANICS_H_) ++#define RGX_PDUMP_PANICS_H_ ++ ++/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of an ++ * RGX PDump panic in a PDump script. */ ++typedef enum ++{ ++ RGX_PDUMP_PANIC_UNDEFINED = 0, ++ ++ /* These panics occur when test parameters and driver configuration ++ * enable features that require the firmware and host driver to ++ * communicate. Such features are not supported with off-line playback. ++ */ ++ RGX_PDUMP_PANIC_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */ ++ RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */ ++ RGX_PDUMP_PANIC_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */ ++ RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */ ++ RGX_PDUMP_PANIC_SPARSEMEM_SWAP = 105, /*!< Requests sparse remap memory swap feature */ ++} RGX_PDUMP_PANIC; ++ ++#endif /* RGX_PDUMP_PANICS_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_riscv.h b/drivers/gpu/drm/img-rogue/rgx_riscv.h +new file mode 100644 +index 000000000000..e5be2a562f34 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_riscv.h +@@ -0,0 +1,250 @@ ++/*************************************************************************/ /*! ++@File rgx_riscv.h ++@Title ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform RGX ++@Description RGX RISCV definitions, kernel/user space ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_RISCV_H) ++#define RGX_RISCV_H ++ ++#include "km/rgxdefs_km.h" ++ ++ ++/* Utility defines to convert regions to virtual addresses and remaps */ ++#define RGXRISCVFW_GET_REGION_BASE(r) IMG_UINT32_C((r) << 28) ++#define RGXRISCVFW_GET_REGION(a) IMG_UINT32_C((a) >> 28) ++#define RGXRISCVFW_MAX_REGION_SIZE IMG_UINT32_C(1 << 28) ++#define RGXRISCVFW_GET_REMAP(r) (RGX_CR_FWCORE_ADDR_REMAP_CONFIG0 + ((r) * 8U)) ++ ++/* RISCV remap output is aligned to 4K */ ++#define RGXRISCVFW_REMAP_CONFIG_DEVVADDR_ALIGN (0x1000U) ++ ++/* ++ * FW bootloader defines ++ */ ++#define RGXRISCVFW_BOOTLDR_CODE_REGION IMG_UINT32_C(0xC) ++#define RGXRISCVFW_BOOTLDR_DATA_REGION IMG_UINT32_C(0x5) ++#define RGXRISCVFW_BOOTLDR_CODE_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_CODE_REGION)) ++#define RGXRISCVFW_BOOTLDR_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_BOOTLDR_DATA_REGION)) ++#define RGXRISCVFW_BOOTLDR_CODE_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_CODE_REGION)) ++#define RGXRISCVFW_BOOTLDR_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_BOOTLDR_DATA_REGION)) ++ ++/* Bootloader data offset in dwords from the beginning of the FW data allocation */ ++#define RGXRISCVFW_BOOTLDR_CONF_OFFSET (0x0) ++ ++/* ++ * FW coremem region defines ++ */ ++#define RGXRISCVFW_COREMEM_REGION IMG_UINT32_C(0x8) ++#define RGXRISCVFW_COREMEM_MAX_SIZE IMG_UINT32_C(0x10000000) /* 256 MB */ ++#define RGXRISCVFW_COREMEM_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_COREMEM_REGION)) ++#define RGXRISCVFW_COREMEM_END (RGXRISCVFW_COREMEM_BASE + RGXRISCVFW_COREMEM_MAX_SIZE - 1U) ++ ++ ++/* ++ * Host-FW shared data defines ++ */ ++#define RGXRISCVFW_SHARED_CACHED_DATA_REGION (0x6UL) ++#define RGXRISCVFW_SHARED_UNCACHED_DATA_REGION (0xDUL) ++#define RGXRISCVFW_SHARED_CACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) ++#define RGXRISCVFW_SHARED_UNCACHED_DATA_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) ++#define RGXRISCVFW_SHARED_CACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_CACHED_DATA_REGION)) ++#define RGXRISCVFW_SHARED_UNCACHED_DATA_REMAP (RGXRISCVFW_GET_REMAP(RGXRISCVFW_SHARED_UNCACHED_DATA_REGION)) ++ ++ ++/* ++ * GPU SOCIF access defines ++ */ ++#define RGXRISCVFW_SOCIF_REGION (0x2U) ++#define RGXRISCVFW_SOCIF_BASE (RGXRISCVFW_GET_REGION_BASE(RGXRISCVFW_SOCIF_REGION)) ++ ++ ++/* The things that follow are excluded when compiling assembly sources */ ++#if !defined(RGXRISCVFW_ASSEMBLY_CODE) ++#include "img_types.h" ++ ++#define RGXFW_PROCESSOR_RISCV "RISCV" ++#define RGXRISCVFW_CORE_ID_VALUE (0x00450B02U) ++#define RGXRISCVFW_MISA_ADDR (0x301U) ++#define RGXRISCVFW_MISA_VALUE (0x40001104U) ++#define RGXRISCVFW_MSCRATCH_ADDR (0x340U) ++ ++typedef struct ++{ ++ IMG_UINT64 ui64CorememCodeDevVAddr; ++ IMG_UINT64 ui64CorememDataDevVAddr; ++ IMG_UINT32 ui32CorememCodeFWAddr; ++ IMG_UINT32 ui32CorememDataFWAddr; ++ IMG_UINT32 ui32CorememCodeSize; ++ IMG_UINT32 ui32CorememDataSize; ++ IMG_UINT32 ui32Flags; ++ IMG_UINT32 ui32Reserved; ++} RGXRISCVFW_BOOT_DATA; ++ ++/* ++ * List of registers to be printed in debug dump. ++ * First column: register names (general purpose or control/status registers) ++ * Second column: register number to be used in abstract access register command ++ * (see RISC-V debug spec v0.13) ++ */ ++#define RGXRISCVFW_DEBUG_DUMP_REGISTERS \ ++ X(pc, 0x7b1) /* dpc */ \ ++ X(ra, 0x1001) \ ++ X(sp, 0x1002) \ ++ X(mepc, 0x341) \ ++ X(mcause, 0x342) \ ++ X(mdseac, 0xfc0) \ ++ X(mstatus, 0x300) \ ++ X(mie, 0x304) \ ++ X(mip, 0x344) \ ++ X(mscratch, 0x340) \ ++ X(mbvnc0, 0xffe) \ ++ X(mbvnc1, 0xfff) \ ++ X(micect, 0x7f0) \ ++ X(mdcect, 0x7f3) \ ++ X(mdcrfct, 0x7f4) \ ++ ++typedef struct ++{ ++#define X(name, address) \ ++ IMG_UINT32 name; ++ ++ RGXRISCVFW_DEBUG_DUMP_REGISTERS ++#undef X ++} RGXRISCVFW_STATE; ++ ++ ++#define RGXRISCVFW_MCAUSE_INTERRUPT (1U << 31) ++ ++#define RGXRISCVFW_MCAUSE_TABLE \ ++ X(0x00000000U, IMG_FALSE, "NMI pin assertion") /* Also reset value */ \ ++ X(0x00000001U, IMG_TRUE, "Instruction access fault") \ ++ X(0x00000002U, IMG_TRUE, "Illegal instruction") \ ++ X(0x00000003U, IMG_TRUE, "Breakpoint") \ ++ X(0x00000004U, IMG_TRUE, "Load address misaligned") \ ++ X(0x00000005U, IMG_TRUE, "Load access fault") \ ++ X(0x00000006U, IMG_TRUE, "Store/AMO address misaligned") \ ++ X(0x00000007U, IMG_TRUE, "Store/AMO access fault") \ ++ X(0x0000000BU, IMG_TRUE, "Environment call from M-mode (FW assert)") \ ++ X(0x80000007U, IMG_FALSE, "Machine timer interrupt") \ ++ X(0x8000000BU, IMG_FALSE, "Machine external interrupt") \ ++ X(0x8000001EU, IMG_FALSE, "Machine correctable error local interrupt") \ ++ X(0xF0000000U, IMG_TRUE, "Machine D-bus store error NMI") \ ++ X(0xF0000001U, IMG_TRUE, "Machine D-bus non-blocking load error NMI") \ ++ X(0xF0000002U, IMG_TRUE, "dCache unrecoverable NMI") ++ ++ ++/* Debug module HW defines */ ++#define RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER (0U) ++#define RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY (2U) ++#define RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT (2UL << 20) ++#define RGXRISCVFW_DMI_COMMAND_WRITE (1UL << 16) ++#define RGXRISCVFW_DMI_COMMAND_READ (0UL << 16) ++#define RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT (2U) ++ ++/* Abstract command error codes (descriptions from RISC-V debug spec v0.13) */ ++typedef enum ++{ ++ /* No error. */ ++ RISCV_ABSTRACT_CMD_NO_ERROR = 0, ++ ++ /* ++ * An abstract command was executing while command, abstractcs, or abstractauto ++ * was written, or when one of the data or progbuf registers was read or ++ * written. This status is only written if cmderr contains 0. ++ */ ++ RISCV_ABSTRACT_CMD_BUSY = 1, ++ ++ /* ++ * The requested command is not supported, regardless of whether ++ * the hart is running or not. ++ */ ++ RISCV_ABSTRACT_CMD_NOT_SUPPORTED = 2, ++ ++ /* ++ * An exception occurred while executing the command ++ * (e.g. while executing the Program Buffer). ++ */ ++ RISCV_ABSTRACT_CMD_EXCEPTION = 3, ++ ++ /* ++ * The abstract command couldn't execute because the hart wasn't in the required ++ * state (running/halted), or unavailable. ++ */ ++ RISCV_ABSTRACT_CMD_HALT_RESUME = 4, ++ ++ /* ++ * The abstract command failed due to a bus error ++ * (e.g. alignment, access size, or timeout). ++ */ ++ RISCV_ABSTRACT_CMD_BUS_ERROR = 5, ++ ++ /* The command failed for another reason. */ ++ RISCV_ABSTRACT_CMD_OTHER_ERROR = 7 ++ ++} RGXRISCVFW_ABSTRACT_CMD_ERR; ++ ++/* System Bus error codes (descriptions from RISC-V debug spec v0.13) */ ++typedef enum ++{ ++ /* There was no bus error. */ ++ RISCV_SYSBUS_NO_ERROR = 0, ++ ++ /* There was a timeout. */ ++ RISCV_SYSBUS_TIMEOUT = 1, ++ ++ /* A bad address was accessed. */ ++ RISCV_SYSBUS_BAD_ADDRESS = 2, ++ ++ /* There was an alignment error. */ ++ RISCV_SYSBUS_BAD_ALIGNMENT = 3, ++ ++ /* An access of unsupported size was requested. */ ++ RISCV_SYSBUS_UNSUPPORTED_SIZE = 4, ++ ++ /* Other. */ ++ RISCV_SYSBUS_OTHER_ERROR = 7 ++ ++} RGXRISCVFW_SYSBUS_ERR; ++ ++#endif /* RGXRISCVFW_ASSEMBLY_CODE */ ++ ++#endif /* RGX_RISCV_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgx_tq_shared.h b/drivers/gpu/drm/img-rogue/rgx_tq_shared.h +new file mode 100644 +index 000000000000..dc10b6eecc91 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgx_tq_shared.h +@@ -0,0 +1,63 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX transfer queue shared ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shared definitions between client and server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGX_TQ_SHARED_H ++#define RGX_TQ_SHARED_H ++ ++#define TQ_MAX_PREPARES_PER_SUBMIT 16U ++ ++#define TQ_PREP_FLAGS_COMMAND_3D 0x0U ++#define TQ_PREP_FLAGS_COMMAND_2D 0x1U ++#define TQ_PREP_FLAGS_COMMAND_MASK (0xfU) ++#define TQ_PREP_FLAGS_COMMAND_SHIFT 0 ++#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1U << 4) ++#define TQ_PREP_FLAGS_START (1U << 5) ++#define TQ_PREP_FLAGS_END (1U << 6) ++ ++#define TQ_PREP_FLAGS_COMMAND_SET(m) \ ++ ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK) ++ ++#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \ ++ (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT) == TQ_PREP_FLAGS_COMMAND_##n) ++ ++#endif /* RGX_TQ_SHARED_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxapi_km.h b/drivers/gpu/drm/img-rogue/rgxapi_km.h +new file mode 100644 +index 000000000000..65ba85d20b4b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxapi_km.h +@@ -0,0 +1,336 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX API Header kernel mode ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exported RGX API details ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXAPI_KM_H ++#define RGXAPI_KM_H ++ ++#if defined(SUPPORT_SHARED_SLC) ++/*************************************************************************/ /*! ++@Function RGXInitSLC ++@Description Init the SLC after a power up. It is required to call this ++ function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't ++ be called. ++ ++@Input hDevHandle RGX Device Node ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle); ++#endif ++ ++#include "rgx_hwperf.h" ++ ++ ++/****************************************************************************** ++ * RGX HW Performance Profiling Control API(s) ++ *****************************************************************************/ ++ ++/*! HWPerf device identification structure */ ++typedef struct _RGX_HWPERF_DEVICE_ ++{ ++ IMG_CHAR pszName[20]; /*!< Helps identify this device uniquely */ ++ IMG_HANDLE hDevData; /*!< Handle for the server */ ++ ++ struct _RGX_HWPERF_DEVICE_ *psNext; /*!< Next device if any */ ++} RGX_HWPERF_DEVICE; ++ ++/*! HWPerf connection structure */ ++typedef struct ++{ ++ RGX_HWPERF_DEVICE *psHWPerfDevList; /*!< Pointer to list of devices */ ++} RGX_HWPERF_CONNECTION; ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfLazyConnect ++@Description Obtain a HWPerf connection object to the RGX device(s). The ++ connections to devices are not actually opened until ++ HWPerfOpen() is called. ++ ++@Output ppsHWPerfConnection Address of a HWPerf connection object ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfOpen ++@Description Opens connection(s) to the RGX device(s). Valid handle to the ++ connection object has to be provided which means the this ++ function needs to be preceded by the call to ++ RGXHWPerfLazyConnect() function. ++ ++@Input psHWPerfConnection HWPerf connection object ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfConnect ++@Description Obtain a connection object to the RGX HWPerf module. Allocated ++ connection object(s) reference opened connection(s). Calling ++ this function is an equivalent of calling RGXHWPerfLazyConnect ++ and RGXHWPerfOpen. This connect should be used when the caller ++ will be retrieving event data. ++ ++@Output ppsHWPerfConnection Address of HWPerf connection object ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfFreeConnection ++@Description Frees the HWPerf connection object ++ ++@Input psHWPerfConnection Pointer to connection object as returned ++ from RGXHWPerfLazyConnect() ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfClose ++@Description Closes all the opened connection(s) to RGX device(s) ++ ++@Input psHWPerfConnection Pointer to HWPerf connection object as ++ returned from RGXHWPerfConnect() or ++ RGXHWPerfOpen() ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfDisconnect ++@Description Disconnect from the RGX device ++ ++@Input ppsHWPerfConnection Pointer to HWPerf connection object as ++ returned from RGXHWPerfConnect() or ++ RGXHWPerfOpen(). Calling this function is ++ an equivalent of calling RGXHWPerfClose() ++ and RGXHWPerfFreeConnection(). ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfControl ++@Description Enable or disable the generation of RGX HWPerf event packets. ++ See RGXCtrlHWPerf(). ++ ++@Input psHWPerfConnection Pointer to HWPerf connection object ++@Input eStreamId ID of the HWPerf stream ++@Input bToggle Switch to toggle or apply mask. ++@Input ui64Mask Mask of events to control. ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfControl( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_BOOL bToggle, ++ IMG_UINT64 ui64Mask); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfGetFilter ++@Description Reads HWPerf stream filter where stream is identified by the ++ given stream ID. ++ ++@Input hDevData Handle to connection/device object ++@Input eStreamId ID of the HWPerf stream ++@Output ui64Filter HWPerf filter value ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfGetFilter( ++ IMG_HANDLE hDevData, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_UINT64 *ui64Filter ++); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfConfigMuxCounters ++@Description Enable and configure the performance counter block for one or ++ more device layout modules. ++ See RGXHWPerfConfigureAndEnableCustomCounters(). ++ ++@Input psHWPerfConnection Pointer to HWPerf connection object ++@Input ui32NumBlocks Number of elements in the array ++@Input asBlockConfigs Address of the array of configuration blocks ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfConfigMuxCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ RGX_HWPERF_CONFIG_MUX_CNTBLK *asBlockConfigs); ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfConfigureAndEnableCustomCounters ++@Description Enable and configure custom performance counters ++ ++@Input psHWPerfConnection Pointer to HWPerf connection object ++@Input ui16CustomBlockID ID of the custom block to configure ++@Input ui16NumCustomCounters Number of custom counters ++@Input pui32CustomCounterIDs Pointer to array containing custom ++ counter IDs ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT16 ui16CustomBlockID, ++ IMG_UINT16 ui16NumCustomCounters, ++ IMG_UINT32 *pui32CustomCounterIDs); ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfDisableCounters ++@Description Disable the performance counter block for one or more device ++ layout modules. ++ ++@Input psHWPerfConnection Pointer to HWPerf connection object ++@Input ui32NumBlocks Number of elements in the array ++@Input aeBlockIDs An array of words with values taken from ++ the RGX_HWPERF_CNTBLK_ID ++ enumeration. ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfDisableCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ IMG_UINT16* aeBlockIDs); ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfEnableCounters ++@Description Enable the performance counter block for one or more device ++ layout modules. ++ ++@Input psHWPerfConnection Pointer to HWPerf connection object ++@Input ui32NumBlocks Number of elements in the array ++@Input aeBlockIDs An array of words with values taken from the ++ RGX_HWPERF_CNTBLK_ID enumeration. ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfEnableCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ IMG_UINT16* aeBlockIDs); ++ ++/****************************************************************************** ++ * RGX HW Performance Profiling Retrieval API(s) ++ * ++ * The client must ensure their use of this acquire/release API for a single ++ * connection/stream must not be shared with multiple execution contexts e.g. ++ * between a kernel thread and an ISR handler. It is the client's ++ * responsibility to ensure this API is not interrupted by a high priority ++ * thread/ISR ++ *****************************************************************************/ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfAcquireEvents ++@Description When there is data available to read this call returns with OK ++ and the address and length of the data buffer the client can ++ safely read. This buffer may contain one or more event packets. ++ When there is no data to read, this call returns with OK and ++ sets *puiBufLen to 0 on exit. ++ Clients must pair this call with a RGXHWPerfReleaseEvents() ++ call. ++ Data returned in ppBuf will be in the form of a sequence of ++ HWPerf packets which should be traversed using the pointers, ++ structures and macros provided by rgx_hwperf.h. ++ ++@Input hDevData Handle to connection/device object ++@Input eStreamId ID of the HWPerf stream ++@Output ppBuf Address of a pointer to a byte buffer. On exit it ++ contains the address of buffer to read from ++@Output pui32BufLen Pointer to an integer. On exit it is the size of ++ the data to read from the buffer ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfAcquireEvents( ++ IMG_HANDLE hDevData, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_PBYTE* ppBuf, ++ IMG_UINT32* pui32BufLen); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfReleaseEvents ++@Description Called after client has read the event data out of the buffer ++ retrieved from the Acquire Events call to release resources. ++ ++@Input hDevData Handle to connection/device object ++@Input eStreamId ID of the HWPerf stream ++@Return PVRSRV_ERROR System error code ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR RGXHWPerfReleaseEvents( ++ IMG_HANDLE hDevData, ++ RGX_HWPERF_STREAM_ID eStreamId); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfConvertCRTimeStamp ++@Description Converts the timestamp given by FW events to the common OS ++ timestamp. The first three inputs are obtained via a CLK_SYNC ++ event, ui64CRTimeStamp is the CR timestamp from the FW event ++ to be converted. ++ ++@Input ui32ClkSpeed Clock speed given by sync event ++@Input ui64CorrCRTimeStamp CR Timestamp given by sync event ++@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync ++ event ++@Input ui64CRTimeStamp CR Timestamp to convert ++@Return IMG_UINT64 Calculated OS Timestamp ++*/ /**************************************************************************/ ++IMG_UINT64 RGXHWPerfConvertCRTimeStamp( ++ IMG_UINT32 ui32ClkSpeed, ++ IMG_UINT64 ui64CorrCRTimeStamp, ++ IMG_UINT64 ui64CorrOSTimeStamp, ++ IMG_UINT64 ui64CRTimeStamp); ++ ++#endif /* RGXAPI_KM_H */ ++ ++/****************************************************************************** ++ End of file (rgxapi_km.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxbreakpoint.c b/drivers/gpu/drm/img-rogue/rgxbreakpoint.c +new file mode 100644 +index 000000000000..bd147dc62e1a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxbreakpoint.c +@@ -0,0 +1,290 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Breakpoint routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX Breakpoint routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgxbreakpoint.h" ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgxmem.h" ++#include "device.h" ++#include "sync_internal.h" ++#include "pdump_km.h" ++#include "pvrsrv.h" ++ ++PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData, ++ RGXFWIF_DM eFWDataMaster, ++ IMG_UINT32 ui32BPAddr, ++ IMG_UINT32 ui32HandlerAddr, ++ IMG_UINT32 ui32DataMaster) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sBPCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ OSLockAcquire(psDevInfo->hBPLock); ++ ++ if (psDevInfo->bBPSet) ++ { ++ eError = PVRSRV_ERROR_BP_ALREADY_SET; ++ goto unlock; ++ } ++ ++ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; ++ sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr; ++ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr; ++ sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster; ++ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_ENABLE; ++ sBPCmd.uCmdData.sBPData.eDM = eFWDataMaster; ++ ++ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, ++ psFWMemContextMemDesc, ++ 0 , ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ eFWDataMaster, ++ &sBPCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); ++ ++ /* Wait for FW to complete command execution */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); ++ ++ psDevInfo->eBPDM = eFWDataMaster; ++ psDevInfo->bBPSet = IMG_TRUE; ++ ++unlock: ++ OSLockRelease(psDevInfo->hBPLock); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sBPCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; ++ sBPCmd.uCmdData.sBPData.ui32BPAddr = 0; ++ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0; ++ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL; ++ sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; ++ ++ OSLockAcquire(psDevInfo->hBPLock); ++ ++ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, ++ psFWMemContextMemDesc, ++ 0 , ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ psDevInfo->eBPDM, ++ &sBPCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); ++ ++ /* Wait for FW to complete command execution */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); ++ ++ psDevInfo->bBPSet = IMG_FALSE; ++ ++unlock: ++ OSLockRelease(psDevInfo->hBPLock); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sBPCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ OSLockAcquire(psDevInfo->hBPLock); ++ ++ if (psDevInfo->bBPSet == IMG_FALSE) ++ { ++ eError = PVRSRV_ERROR_BP_NOT_SET; ++ goto unlock; ++ } ++ ++ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; ++ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL | RGXFWIF_BPDATA_FLAGS_ENABLE; ++ sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; ++ ++ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, ++ psFWMemContextMemDesc, ++ 0 , ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ psDevInfo->eBPDM, ++ &sBPCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); ++ ++ /* Wait for FW to complete command execution */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); ++ ++unlock: ++ OSLockRelease(psDevInfo->hBPLock); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sBPCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ OSLockAcquire(psDevInfo->hBPLock); ++ ++ if (psDevInfo->bBPSet == IMG_FALSE) ++ { ++ eError = PVRSRV_ERROR_BP_NOT_SET; ++ goto unlock; ++ } ++ ++ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; ++ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_CTL; ++ sBPCmd.uCmdData.sBPData.eDM = psDevInfo->eBPDM; ++ ++ eError = RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext, ++ psFWMemContextMemDesc, ++ 0 , ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", unlock); ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ psDevInfo->eBPDM, ++ &sBPCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); ++ ++ /* Wait for FW to complete command execution */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); ++ ++unlock: ++ OSLockRelease(psDevInfo->hBPLock); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32TempRegs, ++ IMG_UINT32 ui32SharedRegs) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sBPCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP; ++ sBPCmd.uCmdData.sBPData.ui32BPDataFlags = RGXFWIF_BPDATA_FLAGS_REGS; ++ sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs; ++ sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs; ++ sBPCmd.uCmdData.sBPData.psFWMemContext.ui32Addr = 0U; ++ sBPCmd.uCmdData.sBPData.eDM = RGXFWIF_DM_GP; ++ ++ OSLockAcquire(psDevInfo->hBPLock); ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sBPCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", unlock); ++ ++ /* Wait for FW to complete command execution */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", unlock); ++ ++unlock: ++ OSLockRelease(psDevInfo->hBPLock); ++ ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (rgxbreakpoint.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxbreakpoint.h b/drivers/gpu/drm/img-rogue/rgxbreakpoint.h +new file mode 100644 +index 000000000000..1a0b87b4252b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxbreakpoint.h +@@ -0,0 +1,141 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX breakpoint functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX breakpoint functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXBREAKPOINT_H) ++#define RGXBREAKPOINT_H ++ ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgx_fwif_km.h" ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXSetBreakpointKM ++ ++ @Description ++ Server-side implementation of RGXSetBreakpoint ++ ++ @Input psDeviceNode - RGX Device node ++ @Input eDataMaster - Data Master to schedule command for ++ @Input hMemCtxPrivData - memory context private data ++ @Input ui32BPAddr - Address of breakpoint ++ @Input ui32HandlerAddr - Address of breakpoint handler ++ @Input ui32BPCtl - Breakpoint controls ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData, ++ RGXFWIF_DM eFWDataMaster, ++ IMG_UINT32 ui32BPAddr, ++ IMG_UINT32 ui32HandlerAddr, ++ IMG_UINT32 ui32DataMaster); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXClearBreakpointKM ++ ++ @Description ++ Server-side implementation of RGXClearBreakpoint ++ ++ @Input psDeviceNode - RGX Device node ++ @Input hMemCtxPrivData - memory context private data ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXEnableBreakpointKM ++ ++ @Description ++ Server-side implementation of RGXEnableBreakpoint ++ ++ @Input psDeviceNode - RGX Device node ++ @Input hMemCtxPrivData - memory context private data ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXDisableBreakpointKM ++ ++ @Description ++ Server-side implementation of RGXDisableBreakpoint ++ ++ @Input psDeviceNode - RGX Device node ++ @Input hMemCtxPrivData - memory context private data ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXOverallocateBPRegistersKM ++ ++ @Description ++ Server-side implementation of RGXOverallocateBPRegisters ++ ++ @Input psDeviceNode - RGX Device node ++ @Input ui32TempRegs - Number of temporary registers to overallocate ++ @Input ui32SharedRegs - Number of shared registers to overallocate ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32TempRegs, ++ IMG_UINT32 ui32SharedRegs); ++#endif /* RGXBREAKPOINT_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxbvnc.c b/drivers/gpu/drm/img-rogue/rgxbvnc.c +new file mode 100644 +index 000000000000..6c29beef22b5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxbvnc.c +@@ -0,0 +1,852 @@ ++/*************************************************************************/ /*! ++@File ++@Title BVNC handling specific routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Functions used for BNVC related work ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "rgxbvnc.h" ++#define RGXBVNC_C ++#include "rgx_bvnc_table_km.h" ++#undef RGXBVNC_C ++#include "oskm_apphint.h" ++#include "pvrsrv.h" ++#include "pdump_km.h" ++#include "rgx_compat_bvnc.h" ++ ++#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(RGX_BVNC_STR_SIZE_MAX))+1) ++ ++/* This function searches the given array for a given search value */ ++static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array, ++ IMG_UINT uiEnd, ++ IMG_UINT64 ui64SearchValue, ++ IMG_UINT uiColCount) ++{ ++ IMG_UINT uiStart = 0, index; ++ IMG_UINT64 value, *pui64Ptr = NULL; ++ ++ while (uiStart < uiEnd) ++ { ++ index = (uiStart + uiEnd)/2; ++ pui64Ptr = pui64Array + (index * uiColCount); ++ value = *(pui64Ptr); ++ ++ if (value == ui64SearchValue) ++ { ++ return pui64Ptr; ++ } ++ ++ if (value > ui64SearchValue) ++ { ++ uiEnd = index; ++ }else ++ { ++ uiStart = index + 1; ++ } ++ } ++ return NULL; ++} ++#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \ ++ ARRAY_SIZE(t), (b), \ ++ sizeof((t)[0])/sizeof(IMG_UINT64)) ) ++ ++ ++#if defined(DEBUG) ++ ++#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \ ++ if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \ ++ { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); } \ ++ else \ ++ { PVR_LOG(("%s N/A", szShortName)); } ++ ++static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1; ++ ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF: ", CDM_CONTROL_STREAM_FORMAT); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE); ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT); ++#endif ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES); ++#if defined(RGX_FEATURE_NUM_ISP_PER_SPU_MAX_VALUE_IDX) ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIPS: ", NUM_ISP_PER_SPU); ++#endif ++#if defined(RGX_FEATURE_PBE_PER_SPU_MAX_VALUE_IDX) ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PPS: ", PBE_PER_SPU); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NSPU: ", NUM_SPU); ++#endif ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH); ++#if defined(RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX) ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE); ++#endif ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS: ", SLC_CACHE_LINE_SIZE_BITS); ++ PVR_LOG(("SLCSize: %d", psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes)); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB: ", VIRTUAL_ADDRESS_SPACE_BITS); ++ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NOSIDS: ", NUM_OSIDS); ++ ++#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX) ++ /* Dump the features with no values */ ++ ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features; ++ while (ui64Mask) ++ { ++ if (ui64Mask & 0x01) ++ { ++ if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX) ++ { ++ PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1])); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, ++ ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); ++ } ++ } ++ ui64Mask >>= 1; ++ ui32IdOrNameIdx++; ++ } ++#endif ++ ++#if defined(ERNSBRNS_IDS_MAX_IDX) ++ /* Dump the ERN and BRN flags for this core */ ++ ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns; ++ ui32IdOrNameIdx = 1; ++ ++ while (ui64Mask) ++ { ++ if (ui64Mask & 0x1) ++ { ++ if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX) ++ { ++ PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1])); ++ } ++ else ++ { ++ PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1)))); ++ } ++ } ++ ui64Mask >>= 1; ++ ui32IdOrNameIdx++; ++ } ++#endif ++ ++} ++#endif ++ ++static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 *pui64Cfg) ++{ ++ IMG_UINT32 ui32Index; ++ ++ /* Read the feature values for the runtime BVNC */ ++ for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++) ++ { ++ IMG_UINT16 bitPosition = aui16FeaturesWithValuesBitPositions[ui32Index]; ++ IMG_UINT64 ui64PackedValues = pui64Cfg[2 + bitPosition / 64]; ++ IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> (bitPosition % 64); ++ ++ if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]) ++ { ++ if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED) ++ { ++ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED; ++ } ++ else ++ { ++ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex]; ++ } ++ } ++ else ++ { ++ /* This case should never be reached */ ++ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID; ++ PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex)); ++ PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]); ++ } ++ } ++ ++#if defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) ++ /* Code path for Volcanic */ ++ ++ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1; ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && ++ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) ++ { ++ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_RAY+1); ++ } ++#if defined(SUPPORT_AGP) ++ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1); ++#if defined(SUPPORT_AGP4) ++ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM4+1); ++#endif ++#endif ++ ++ /* Get the max number of dusts in the core */ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) ++ { ++ RGX_LAYER_PARAMS sParams = {.psDevInfo = psDevInfo}; ++ ++ if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) == 1) ++ { ++ /* per SPU power island */ ++ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); ++ } ++ else if (RGX_DEVICE_GET_FEATURE_VALUE(&sParams, POWER_ISLAND_VERSION) >= 2) ++ { ++ /* per Cluster power island */ ++ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS); ++ } ++ else ++ { ++ /* All volcanic cores support power islanding */ ++ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; ++ PVR_DPF((PVR_DBG_ERROR, "%s: Power island feature version not found!", __func__)); ++ PVR_ASSERT(0); ++ } ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, RAY_TRACING_ARCH) && ++ RGX_GET_FEATURE_VALUE(psDevInfo, RAY_TRACING_ARCH) > 1) ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RT_RAC_PER_SPU)) ++ { ++ psDevInfo->sDevFeatureCfg.ui32MAXRACCount = RGX_GET_FEATURE_VALUE(psDevInfo, NUM_SPU); ++ } ++ else ++ { ++ psDevInfo->sDevFeatureCfg.ui32MAXRACCount = 1; ++ } ++ } ++ } ++ else ++ { ++ /* This case should never be reached as all cores have clusters */ ++ psDevInfo->sDevFeatureCfg.ui32MAXPowUnitCount = RGX_FEATURE_VALUE_INVALID; ++ PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); ++ PVR_ASSERT(0); ++ } ++#else /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */ ++ /* Code path for Rogue and Oceanic */ ++ ++ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_CDM+1; ++#if defined(SUPPORT_AGP) ++ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = MAX(psDevInfo->sDevFeatureCfg.ui32MAXDMCount, RGXFWIF_DM_GEOM2+1); ++#endif ++ ++ /* Meta feature not present in oceanic */ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED; ++ } ++#endif ++ ++ /* Get the max number of dusts in the core */ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS)) ++ { ++ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2)); ++ } ++ else ++ { ++ /* This case should never be reached as all cores have clusters */ ++ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID; ++ PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__)); ++ PVR_ASSERT(0); ++ } ++#endif /* defined(RGX_FEATURE_POWER_ISLAND_VERSION_MAX_VALUE_IDX) */ ++ ++ /* Meta feature not present in oceanic */ ++#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX) ++ /* Transform the META coremem size info in bytes */ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) ++ { ++ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024; ++ } ++#endif ++} ++ ++static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNC, const IMG_UINT32 ui32RGXDevCount) ++{ ++ const IMG_CHAR *pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC; ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32BVNCCount = 0; ++ IMG_BOOL bRet; ++ IMG_CHAR szBVNCAppHint[RGXBVNC_BUFFER_SIZE]; ++ IMG_CHAR *pszCurrentBVNC = szBVNCAppHint; ++ szBVNCAppHint[0] = '\0'; ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ++ bRet = (IMG_BOOL)OSGetKMAppHintSTRING(APPHINT_NO_DEVICE, ++ pvAppHintState, ++ RGXBVNC, ++ pszAppHintDefault, ++ szBVNCAppHint, ++ sizeof(szBVNCAppHint)); ++ ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ if (!bRet || (szBVNCAppHint[0] == '\0')) ++ { ++ return; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list: %s",__func__, szBVNCAppHint)); ++ ++ while (*pszCurrentBVNC != '\0') ++ { ++ IMG_CHAR *pszNext = pszCurrentBVNC; ++ ++ if (ui32BVNCCount >= PVRSRV_MAX_DEVICES) ++ { ++ break; ++ } ++ ++ while (1) ++ { ++ if (*pszNext == ',') ++ { ++ pszNext[0] = '\0'; ++ pszNext++; ++ break; ++ } else if (*pszNext == '\0') ++ { ++ break; ++ } ++ pszNext++; ++ } ++ ++ if (ui32BVNCCount == ui32RGXDevCount) ++ { ++ OSStringLCopy(pszBVNC, pszCurrentBVNC, RGX_BVNC_STR_SIZE_MAX); ++ return; ++ } ++ ++ ui32BVNCCount++; ++ pszCurrentBVNC = pszNext; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than " ++ "number of actual devices", __func__)); ++ ++ /* If only one BVNC parameter is specified, the same is applied for all RGX ++ * devices detected */ ++ if (1 == ui32BVNCCount) ++ { ++ OSStringLCopy(pszBVNC, szBVNCAppHint, RGX_BVNC_STR_SIZE_MAX); ++ } ++} ++ ++/* Function that parses the BVNC List passed as module parameter */ ++static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT32 *pB, ++ IMG_UINT32 *pV, ++ IMG_UINT32 *pN, ++ IMG_UINT32 *pC, ++ const IMG_UINT32 ui32RGXDevCount) ++{ ++ unsigned int ui32ScanCount = 0; ++ IMG_CHAR aszBVNCString[RGX_BVNC_STR_SIZE_MAX]; ++ ++ aszBVNCString[0] = '\0'; ++ ++ /* 4 components of a BVNC string is B, V, N & C */ ++#define RGX_BVNC_INFO_PARAMS (4) ++ ++ _RGXBvncAcquireAppHint(aszBVNCString, ui32RGXDevCount); ++ ++ if ('\0' == aszBVNCString[0]) ++ { ++ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; ++ } ++ ++ /* Parse the given RGX_BVNC string */ ++ ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STR_FMTSPEC, pB, pV, pN, pC); ++ if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) ++ { ++ ui32ScanCount = OSVSScanf(aszBVNCString, RGX_BVNC_STRP_FMTSPEC, pB, pV, pN, pC); ++ } ++ if (RGX_BVNC_INFO_PARAMS != ui32ScanCount) ++ { ++ return PVRSRV_ERROR_INVALID_BVNC_PARAMS; ++ } ++ PVR_LOG(("BVNC module parameter honoured: %s", aszBVNCString)); ++ ++ return PVRSRV_OK; ++} ++ ++#if !defined(NO_HARDWARE) ++/* ++ * This function obtains the SLCSize from the physical device for GPUs which provide ++ * this information. If the GPU does not provide support we return a value of 0 which will ++ * result in the BVNC supplied definition being used to provide the SLCSize. ++ * Must only be called from driver-live with hardware powered-on. ++ */ ++static IMG_UINT32 _RGXBvncReadSLCSize(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT64 ui64SLCSize = 0ULL; ++ ++#if defined(RGX_CR_SLC_SIZE_IN_KB) ++ /* Rogue and Oceanic hardware */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_CONFIGURABLE)) ++ { ++ ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_SIZE_IN_KB); ++ if (ui64SLCSize == 0ULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: RGX_CR_SIZE_IN_KB = %u", __func__, ++ (IMG_UINT32) ui64SLCSize)); ++ } ++ } ++#else ++ /* Volcanic hardware */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_SIZE_ADJUSTMENT)) ++ { ++ ui64SLCSize = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SLC_STATUS2); ++ ui64SLCSize &= ~RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_CLRMSK; ++ ui64SLCSize >>= RGX_CR_SLC_STATUS2_SLC_SIZE_IN_KB_SHIFT; ++ ++ if (ui64SLCSize == 0ULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Unexpected 0 SLC size. Using default", __func__)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC_SIZE_IN_KILOBYTES = %u", __func__, ++ (IMG_UINT32) ui64SLCSize)); ++ } ++ } ++#endif ++ ++ return (IMG_UINT32)ui64SLCSize * 1024U; ++} ++#endif /* !defined(NO_HARDWARE) */ ++ ++/* This function detects the Rogue variant and configures the essential ++ * config info associated with such a device. ++ * The config info includes features, errata, etc ++ */ ++PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ static IMG_UINT32 ui32RGXDevCnt = 0; ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT64 ui64BVNC=0; ++ IMG_UINT32 B=0, V=0, N=0, C=0; ++ IMG_UINT64 *pui64Cfg = NULL; ++ IMG_UINT32 ui32Cores = 1U; ++ IMG_UINT32 ui32SLCSize = 0; ++ ++ /* Check for load time RGX BVNC parameter */ ++ eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt); ++ if (PVRSRV_OK == eError) ++ { ++ PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC ++ " from driver load parameter", B, V, N, C)); ++ ++ /* Extract the BVNC config from the Features table */ ++ ui64BVNC = BVNC_PACK(B,0,N,C); ++ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); ++ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!"); ++ } ++ ++ { ++ void *pvAppHintState = NULL; ++ const IMG_BOOL bAppHintDefault = PVRSRV_APPHINT_IGNOREHWREPORTEDBVNC; ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, ++ pvAppHintState, ++ IgnoreHWReportedBVNC, ++ &bAppHintDefault, ++ &psDevInfo->bIgnoreHWReportedBVNC); ++ OSFreeKMAppHintState(pvAppHintState); ++ } ++ ++#if !defined(NO_HARDWARE) ++ ++ /* Try to detect the RGX BVNC from the HW device */ ++ if ((NULL == pui64Cfg) && !psDevInfo->bIgnoreHWReportedBVNC) ++ { ++ IMG_UINT64 ui32ID; ++ IMG_BOOL bPowerDown = (psDeviceNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF); ++ ++ /* Power-up the device as required to read the registers */ ++ if (bPowerDown) ++ { ++ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); ++ } ++ ++#if defined(RGX_CR_CORE_ID__PBVNC) ++ /* Core ID reading code for Rogue */ ++ ++ /* Read the BVNC, in to new way first, if B not set, use old scheme */ ++ ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC); ++ ++ if (GET_B(ui32ID)) ++ { ++ B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >> ++ RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT; ++ V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >> ++ RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT; ++ N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> ++ RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT; ++ C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >> ++ RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT; ++ ++ } ++ else ++ { ++ IMG_UINT64 ui32CoreID, ui32CoreRev; ++ ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION); ++ ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); ++ B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >> ++ RGX_CR_CORE_REVISION_MAJOR_SHIFT; ++ V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >> ++ RGX_CR_CORE_REVISION_MINOR_SHIFT; ++ N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >> ++ RGX_CR_CORE_ID_CONFIG_N_SHIFT; ++ C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >> ++ RGX_CR_CORE_ID_CONFIG_C_SHIFT; ++ } ++#else ++ /* Core ID reading code for Volcanic */ ++ ++ ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID); ++ ++ B = (ui32ID & ~RGX_CR_CORE_ID_BRANCH_ID_CLRMSK) >> ++ RGX_CR_CORE_ID_BRANCH_ID_SHIFT; ++ V = (ui32ID & ~RGX_CR_CORE_ID_VERSION_ID_CLRMSK) >> ++ RGX_CR_CORE_ID_VERSION_ID_SHIFT; ++ N = (ui32ID & ~RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_CLRMSK) >> ++ RGX_CR_CORE_ID_NUMBER_OF_SCALABLE_UNITS_SHIFT; ++ C = (ui32ID & ~RGX_CR_CORE_ID_CONFIG_ID_CLRMSK) >> ++ RGX_CR_CORE_ID_CONFIG_ID_SHIFT; ++#endif ++ ++ PVR_LOG(("Read BVNC " RGX_BVNC_STR_FMTSPEC ++ " from HW device registers", B, V, N, C)); ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ /* Read the number of cores in the system for newer BVNC (Branch ID > 20) */ ++ if (B > 20) ++ { ++ ui32Cores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); ++ } ++ } ++ ++ /* Obtain the SLC size from the device */ ++ ui32SLCSize = _RGXBvncReadSLCSize(psDeviceNode); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: SLC Size reported as %u", __func__, ui32SLCSize)); ++ ++ if (bPowerDown) ++ { ++ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); ++ } ++ ++ /* Extract the BVNC config from the Features table */ ++ ui64BVNC = BVNC_PACK(B,0,N,C); ++ if (ui64BVNC != 0) ++ { ++ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); ++ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!"); ++ } ++ else if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ /* ++ * On host OS we should not get here as CORE_ID should not be zero, so flag an error. ++ * On older cores, guest OS only has CORE_ID if defined(RGX_FEATURE_COREID_PER_OS) ++ */ ++ PVR_LOG_ERROR(PVRSRV_ERROR_DEVICE_REGISTER_FAILED, "CORE_ID register returns zero. Unknown BVNC"); ++ } ++ } ++#endif ++ ++#if defined(RGX_BVNC_KM_B) && defined(RGX_BVNC_KM_N) && defined(RGX_BVNC_KM_C) ++ if (NULL == pui64Cfg) ++ { ++ /* We reach here if the HW is not present, ++ * or we are running in a guest OS with no COREID_PER_OS feature, ++ * or HW is unstable during register read giving invalid values, ++ * or runtime detection has been disabled - fall back to compile time BVNC ++ */ ++ B = RGX_BVNC_KM_B; ++ N = RGX_BVNC_KM_N; ++ C = RGX_BVNC_KM_C; ++ { ++ IMG_UINT32 ui32ScanCount = 0; ++ ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%u", &V); ++ if (1 != ui32ScanCount) ++ { ++ ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%up", &V); ++ if (1 != ui32ScanCount) ++ { ++ V = 0; ++ } ++ } ++ } ++ PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM)); ++ ++ /* Extract the BVNC config from the Features table */ ++ ui64BVNC = BVNC_PACK(B,0,N,C); ++ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC); ++ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!"); ++ } ++#endif /* defined(RGX_BVNC) */ ++ ++ /* Have we failed to identify the BVNC to use? */ ++ if (NULL == pui64Cfg) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. " ++ "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); ++ return PVRSRV_ERROR_BVNC_UNSUPPORTED; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016" ++ IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016" ++ IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx "\n", __func__, ++ pui64Cfg[0], pui64Cfg[1], pui64Cfg[2], pui64Cfg[3])); ++ ++ /* Parsing feature config depends on available features on the core ++ * hence this parsing should always follow the above feature assignment */ ++ psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1]; ++ _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg); ++ ++ /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */ ++ ui64BVNC = BVNC_PACK(B,V,N,C); ++ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC); ++ if (NULL == pui64Cfg) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. " ++ "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC)); ++ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0; ++ return PVRSRV_ERROR_BVNC_UNSUPPORTED; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx ++ " 0x%016" IMG_UINT64_FMTSPECx, __func__, *pui64Cfg, pui64Cfg[1])); ++ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1]; ++ ++ psDevInfo->sDevFeatureCfg.ui32B = B; ++ psDevInfo->sDevFeatureCfg.ui32V = V; ++ psDevInfo->sDevFeatureCfg.ui32N = N; ++ psDevInfo->sDevFeatureCfg.ui32C = C; ++ ++ ++ /* ++ * Store the SLCSize in the device info field. If 0 it means the device uses the BVNC ++ * values so grab them here as we've already populated the internal structures. ++ */ ++ if (ui32SLCSize == 0U) ++ { ++ ui32SLCSize = RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_KILOBYTES) * 1024U; ++ ++ /* Verify that we have a valid value returned from the BVNC */ ++ PVR_ASSERT(ui32SLCSize != 0U); ++ } ++ psDevInfo->sDevFeatureCfg.ui32SLCSizeInBytes = ui32SLCSize; ++ ++ /* Message to confirm configuration look up was a success */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) ++ { ++#if defined(NO_HARDWARE) ++ { ++ PVR_UNREFERENCED_PARAMETER(ui32Cores); ++ PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, ++ B, V, N, C)); ++ } ++#else ++ { ++ PVR_LOG(("RGX Device registered BVNC " RGX_BVNC_STR_FMTSPEC ++ " with %u %s in the system", B ,V ,N ,C, ui32Cores , ++ ((ui32Cores == 1U)?"core":"cores"))); ++ } ++#endif ++ } ++ else ++ { ++ PVR_LOG(("RGX Device registered with BVNC " RGX_BVNC_STR_FMTSPEC, ++ B, V, N, C)); ++ } ++ ++ ui32RGXDevCnt++; ++ ++#if defined(DEBUG) ++ _RGXBvncDumpParsedConfig(psDeviceNode); ++#endif ++ return PVRSRV_OK; ++} ++ ++/* ++ * This function checks if a particular feature is available on the given rgx device */ ++IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask) ++ { ++ return IMG_TRUE; ++ } ++ return IMG_FALSE; ++} ++ ++/* ++ * This function returns the value of a feature on the given rgx device */ ++IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX) ++ { ++ return -1; ++ } ++ ++ if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED) ++ { ++ return -1; ++ } ++ ++ return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex]; ++} ++ ++/**************************************************************************/ /*! ++@Function RGXVerifyBVNC ++@Description Checks that the device's BVNC registers have the correct values. ++@Input psDeviceNode Device node ++@Return PVRSRV_ERROR ++*/ /***************************************************************************/ ++#define NUM_RGX_CORE_IDS 8 ++PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT64 ui64MatchBVNC; ++ IMG_UINT32 i; ++ ++ PVR_ASSERT(psDeviceNode != NULL); ++ PVR_ASSERT(psDeviceNode->pvDevice != NULL); ++ ++ /* The device info */ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ PDUMPCOMMENT(psDeviceNode, "PDUMP VERIFY CORE_ID registers for all OSIDs\n"); ++ ++ /* construct the value to match against */ ++ if ((ui64GivenBVNC | ui64CoreIdMask) == 0) /* both zero means use configured DDK value */ ++ { ++ ui64MatchBVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C); ++ } ++ else ++ { ++ /* use the value in CORE_ID for any zero elements in the BVNC */ ++ ui64MatchBVNC = (ui64GivenBVNC & ~ui64CoreIdMask) | (OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID) & ui64CoreIdMask); ++ } ++ PVR_LOG(("matchBVNC %d.%d.%d.%d", ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); ++ ++ /* read in all the CORE_ID registers */ ++ for (i = 0; i < NUM_RGX_CORE_IDS; ++i) ++ { ++#if !defined(NO_HARDWARE) ++ IMG_UINT64 ui64BVNC = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID + (i << 16)); ++ ++ PVR_LOG(("CORE_ID%d returned %d.%d.%d.%d", i, ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); ++ ++ if (ui64BVNC != ui64MatchBVNC) ++ { ++ eError = PVRSRV_ERROR_BVNC_MISMATCH; ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CORE_ID%d %d.%d.%d.%d, Expected %d.%d.%d.%d", __func__, i, ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), ++ (int) ((ui64BVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_B) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_V) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_N) & 0xffff), ++ (int) ((ui64MatchBVNC >> RGX_BVNC_PACK_SHIFT_C) & 0xffff))); ++ break; ++ } ++#endif ++ ++#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) ++ /* check upper DWORD */ ++ eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, ++ (RGX_CR_CORE_ID + 4) + (i << 16), ++ (IMG_UINT32)(ui64MatchBVNC >> 32), ++ 0xFFFFFFFF, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ if (eError == PVRSRV_OK) ++ { ++ /* check lower DWORD */ ++ eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, ++ RGX_CR_CORE_ID + (i << 16), ++ (IMG_UINT32)(ui64MatchBVNC & 0xFFFFFFFF), ++ 0xFFFFFFFF, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ } ++#endif ++ } ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxbvnc.h b/drivers/gpu/drm/img-rogue/rgxbvnc.h +new file mode 100644 +index 000000000000..64c418bc4edb +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxbvnc.h +@@ -0,0 +1,90 @@ ++/*************************************************************************/ /*! ++@File ++@Title BVNC handling specific header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the BVNC related work ++ (see hwdefs/km/rgx_bvnc_table_km.h and ++ hwdefs/km/rgx_bvnc_defs_km.h ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXBVNC_H) ++#define RGXBVNC_H ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "rgxdevice.h" ++ ++/*************************************************************************/ /*! ++@brief This function detects the Rogue variant and configures the ++ essential config info associated with such a device. ++ The config info includes features, errata, etc ++@param psDeviceNode - Device Node pointer ++@return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*************************************************************************/ /*! ++@brief This function checks if a particular feature is available on ++ the given rgx device ++@param psDeviceNode - Device Node pointer ++@param ui64FeatureMask - feature to be checked ++@return true if feature is supported, false otherwise ++*/ /**************************************************************************/ ++IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask); ++ ++/*************************************************************************/ /*! ++@brief This function returns the value of a feature on the given ++ rgx device ++@param psDeviceNode - Device Node pointer ++@param ui64FeatureMask - feature for which to return the value ++@return the value for the specified feature ++*/ /**************************************************************************/ ++IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex); ++ ++/*************************************************************************/ /*! ++@brief This function validates that the BVNC values in CORE_ID regs are ++ consistent and correct. ++@param psDeviceNode - Device Node pointer ++@param GivenBVNC - BVNC to be verified against as supplied by caller ++@param CoreIdMask - mask of components to pull from CORE_ID register ++@return success or fail ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXVerifyBVNC(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64GivenBVNC, IMG_UINT64 ui64CoreIdMask); ++ ++#endif /* RGXBVNC_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxccb.c b/drivers/gpu/drm/img-rogue/rgxccb.c +new file mode 100644 +index 000000000000..7a76f8023b28 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxccb.c +@@ -0,0 +1,2803 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX CCB routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX CCB routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvr_debug.h" ++#include "rgxdevice.h" ++#include "pdump_km.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "rgxfwutils.h" ++#include "osfunc.h" ++#include "rgxccb.h" ++#include "rgx_memallocflags.h" ++#include "devicemem_pdump.h" ++#include "dllist.h" ++#if defined(__linux__) ++#include "trace_events.h" ++#endif ++#include "sync_checkpoint_external.h" ++#include "sync_checkpoint.h" ++#include "rgxutils.h" ++#include "info_page.h" ++#include "rgxtimerquery.h" ++ ++#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK) ++#include "cache_km.h" ++#endif ++ ++/* ++ * Uncomment PVRSRV_ENABLE_CCCB_UTILISATION_INFO define for verbose ++ * info and statistics regarding CCB usage. ++ */ ++//#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO ++ ++/* Default threshold (as a percentage) for the PVRSRV_ENABLE_CCCB_UTILISATION_INFO feature. */ ++#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD (90) ++ ++/* ++ * Defines the number of fence updates to record so that future fences in the ++ * CCB. Can be checked to see if they are already known to be satisfied. ++ */ ++#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32) ++ ++#define RGX_UFO_PTR_ADDR(ufoptr) \ ++ (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC) ++ ++#define GET_CCB_SPACE(WOff, ROff, CCBSize) \ ++ ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1)) ++ ++#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \ ++ (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1)) ++ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ ++#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1 ++#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2 ++#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB 0x4 ++ ++typedef struct _RGX_CLIENT_CCB_UTILISATION_ ++{ ++ /* the threshold in bytes. ++ * when the CCB utilisation hits the threshold then we will print ++ * a warning message. ++ */ ++ IMG_UINT32 ui32ThresholdBytes; ++ /* Maximum cCCB usage at some point in time */ ++ IMG_UINT32 ui32HighWaterMark; ++ /* keep track of the warnings already printed. ++ * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz ++ */ ++ IMG_UINT32 ui32Warnings; ++ /* Keep track how many times CCB was full. ++ * Counters are reset after every grow. ++ */ ++ IMG_UINT32 ui32CCBFull; ++ IMG_UINT32 ui32CCBAcquired; ++} RGX_CLIENT_CCB_UTILISATION; ++ ++#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ ++ ++struct _RGX_CLIENT_CCB_ { ++ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */ ++ void *pvClientCCB; /*!< CPU mapping of the CCB */ ++ DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */ ++ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */ ++ IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */ ++ IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */ ++ IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */ ++ IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */ ++ IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */ ++ IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */ ++ IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */ ++ IMG_UINT32 ui32Size; /*!< Size of the CCB */ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ POS_LOCK hCCBGrowLock; /*!< Prevents CCB Grow while DumpCCB() is called and vice versa */ ++ IMG_UINT32 ui32VirtualAllocSize; /*!< Virtual size of the CCB */ ++ IMG_UINT32 ui32ChunkSize; /*!< CCB Sparse allocation chunk size */ ++ IMG_PUINT32 pui32MappingTable; /*!< Mapping table for sparse allocation of the CCB */ ++#endif ++ DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */ ++ PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */ ++ void *hTransition; /*!< Handle for Transition callback */ ++ IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */ ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor; ++ RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */ ++#endif ++#if defined(DEBUG) ++ IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */ ++ RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */ ++#endif ++ IMG_UINT32 ui32CCBFlags; /*!< Bitmask for various flags relating to CCB. Bit defines in rgxccb.h */ ++}; ++ ++/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for ++ DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings: ++ { "FwClientCCB:" , "FwClientCCBControl:" , }, ++ The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl ++ structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following ++ build assert. */ ++const IMG_CHAR *const aszCCBRequestors[][3] = ++{ ++#define REQUESTOR_STRING(prefix,req) #prefix ":" #req ++#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req }, ++ RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE) ++#undef FORM_REQUESTOR_TUPLE ++}; ++ ++PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ ++ IMG_UINT32 ui32PollOffset; ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); ++#endif ++ ++ if (BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) ++ { ++ /* Draining CCB on a command that hasn't finished, and FW isn't expected ++ * to have updated Roff up to Woff. Only drain to the first ++ * finished command prior to this. The Roff for this ++ * is stored in ui32FinishedPDumpWriteOffset. ++ */ ++ ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset; ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ ui32PDumpFlags, ++ "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)", ++ psClientCCB->szName, ++ psClientCCB, ++ ui32PollOffset); ++ } ++ else ++ { ++ /* Command to a finished CCB stream and FW is drained to empty ++ * out remaining commands until R==W. ++ */ ++ ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset; ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ ui32PDumpFlags, ++ "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)", ++ psClientCCB->szName, ++ psClientCCB, ++ ui32PollOffset); ++ } ++ ++ return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), ++ ui32PollOffset, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXCCBPDumpSyncCCB ++ ++ PURPOSE : Synchronise Client CCBs from both live and playback contexts. ++ Waits for live-FW to empty live-CCB. ++ Waits for sim-FW to empty sim-CCB by adding POL ++ ++ PARAMETERS : psClientCCB - The client CCB ++ ui32PDumpFlags - PDump flags ++ ++ RETURNS : PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXCCBPDumpSyncCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Wait for the live FW to catch up/empty CCB. This is done by returning ++ * retry which will get pushed back out to Services client where it ++ * waits on the event object and then resubmits the command. ++ */ ++ if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset) ++ { ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ /* Wait for the sim FW to catch up/empty sim CCB. ++ * We drain whenever capture range is entered, even if no commands ++ * have been issued on this CCB when out of capture range. We have to ++ * wait for commands that might have been issued in the last capture ++ * range to finish so the connection's sync block snapshot dumped after ++ * all the PDumpTransition callbacks have been execute doesn't clobber ++ * syncs which the sim FW is currently working on. ++ * ++ * Although this is sub-optimal for play-back - while out of capture ++ * range for every continuous operation we synchronise the sim ++ * play-back processing the script and the sim FW, there is no easy ++ * solution. Not all modules that work with syncs register a ++ * PDumpTransition callback and thus we have no way of knowing if we ++ * can skip this sim CCB drain and sync block dump or not. ++ */ ++ ++ eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "RGXCCBPDumpDrainCCB"); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* Live CCB and simulation CCB now empty, FW idle on CCB in both ++ * contexts. ++ */ ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXCCBPDumpFastForwardCCB ++ ++ PURPOSE : Fast-forward sim-CCB and live-CCB offsets to live app-thread ++ values. ++ This helps to skip any commands submitted when out of capture ++ range and start with first command in capture range in both ++ live and playback contexts. In case of Block mode, this helps ++ to playback any intermediate PDump block directly after first ++ block. ++ ++ ++ PARAMETERS : psClientCCB - The client CCB ++ ui32PDumpFlags - PDump flags ++ ++ RETURNS : void ++******************************************************************************/ ++static void RGXCCBPDumpFastForwardCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32PDumpFlags) ++{ ++ volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl; ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); ++#endif ++ ++ /* Make sure that we have synced live-FW and live-App threads */ ++ PVR_ASSERT(psCCBCtl->ui32ReadOffset == psClientCCB->ui32HostWriteOffset); ++ ++ psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; ++ psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; ++ psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset; ++#if defined(SUPPORT_AGP) ++ psCCBCtl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset; ++#if defined(SUPPORT_AGP4) ++ psCCBCtl->ui32ReadOffset3 = psClientCCB->ui32HostWriteOffset; ++ psCCBCtl->ui32ReadOffset4 = psClientCCB->ui32HostWriteOffset; ++#endif ++#endif ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ ui32PDumpFlags, ++ "cCCB(%s@%p): Fast-forward from %d to %d", ++ psClientCCB->szName, ++ psClientCCB, ++ psClientCCB->ui32LastPDumpWriteOffset, ++ psClientCCB->ui32HostWriteOffset); ++ ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, ++ 0, ++ sizeof(RGXFWIF_CCCB_CTL), ++ ui32PDumpFlags); ++ ++ /* Although we've entered capture range for this process connection ++ * we might not do any work on this CCB so update the ++ * ui32LastPDumpWriteOffset to reflect where we got to for next ++ * time so we start the drain from where we got to last time. ++ */ ++ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; ++ ++} ++ ++static PVRSRV_ERROR _RGXCCBPDumpTransition(void *pvData, void *pvDevice, PDUMP_TRANSITION_EVENT eEvent, IMG_UINT32 ui32PDumpFlags) ++{ ++ RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData; ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) pvDevice; ++#endif ++ PVRSRV_ERROR eError; ++ ++ /* Block mode: ++ * Here is block structure at transition (ui32BlockLength=N frames): ++ * ++ * ... ++ * ... ++ * PDUMP_BLOCK_START_0x0000000x{ ++ * ++ * ++ * ... ++ * ... ++ * ... (N frames data) ++ * ... ++ * ... ++ * <(1) Drain sim-KCCB> ''| ++ * <(2) Sync live and sim CCCB> | ++ * }PDUMP_BLOCK_END_0x0000000x | <- BlockTransition Steps ++ * <(3) Split MAIN and BLOCK stream script> | ++ * PDUMP_BLOCK_START_0x0000000y{ | ++ * <(4) Fast-forward sim-CCCB> | ++ * <(5) Re-dump SyncBlocks> ,,| ++ * ... ++ * ... ++ * ... (N frames data) ++ * ... ++ * ... ++ * ++ * ++ * }PDUMP_BLOCK_END_0x0000000y ++ * ... ++ * ... ++ * ++ * Steps (3) and (5) are done in pdump_server.c ++ * */ ++ switch (eEvent) ++ { ++ case PDUMP_TRANSITION_EVENT_RANGE_ENTERED: ++ { ++ /* We're about to transition into capture range and we've submitted ++ * new commands since the last time we entered capture range so drain ++ * the live CCB and simulation (sim) CCB as required, i.e. leave CCB ++ * idle in both live and sim contexts. ++ * This requires the host driver to ensure the live FW & the sim FW ++ * have both emptied out the remaining commands until R==W (CCB empty). ++ */ ++ ++ eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ if (psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) ++ { ++ /* If new commands have been written when out of capture range in ++ * the live CCB then we need to fast forward the sim CCBCtl ++ * offsets past uncaptured commands. This is done by PDUMPing ++ * the CCBCtl memory to align sim values with the live CCBCtl ++ * values. Both live and sim FWs can start with the 1st command ++ * which is in the new capture range. ++ */ ++ RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); ++ } ++ break; ++ } ++ case PDUMP_TRANSITION_EVENT_RANGE_EXITED: ++ { ++ /* Nothing to do */ ++ break; ++ } ++ case PDUMP_TRANSITION_EVENT_BLOCK_FINISHED: ++ { ++ /* (1) Drain KCCB from current block before starting new: ++ * ++ * At playback, this will ensure that sim-FW drains all commands in KCCB ++ * belongs to current block before 'jumping' to any future commands (from ++ * next block). This will synchronise script-thread and sim-FW thread KCCBs ++ * at end of each pdump block. ++ * ++ * This will additionally force redump of KCCBCtl structure at start of next/new block. ++ * */ ++ ++#if defined(PDUMP) ++ eError = RGXPdumpDrainKCCB(psDevInfo, psDevInfo->psKernelCCBCtl->ui32WriteOffset); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXPdumpDrainKCCB"); ++#endif ++ ++ /* (2) Synchronise Client CCBs from live and playback contexts before starting new block: ++ * ++ * This operation will, ++ * a. Force synchronisation between app-thread and live-FW thread (i.e. Wait ++ * for live-FW to empty live Client CCB). ++ * ++ * b. Next, it will dump poll command to drain Client CCB at end of every ++ * pdump block. At playback time this will synchronise sim-FW and ++ * script-thread Client CCBs at end of each block. ++ * ++ * This is to ensure that all commands in CCB from current block are processed ++ * before moving on to future commands. ++ * */ ++ ++ eError = RGXCCBPDumpSyncCCB(psClientCCB, ui32PDumpFlags); ++ PVR_RETURN_IF_ERROR(eError); ++ break; ++ } ++ case PDUMP_TRANSITION_EVENT_BLOCK_STARTED: ++ { ++ /* (4) Fast-forward CCB write offsets to current live values: ++ * ++ * We have already synchronised live-FW and app-thread above at end of each ++ * block (in Step 2a above), now fast-forward Client CCBCtl write offsets to that of ++ * current app-thread values at start of every block. This will allow us to ++ * skip any intermediate pdump blocks and start with last (or any next) block ++ * immediately after first pdump block. ++ * */ ++ ++ RGXCCBPDumpFastForwardCCB(psClientCCB, ui32PDumpFlags); ++ break; ++ } ++ case PDUMP_TRANSITION_EVENT_NONE: ++ /* Invalid event for transition */ ++ default: ++ { ++ /* Unknown Transition event */ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ ++static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) ++{ ++ psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* initialize ui32HighWaterMark level to zero */ ++ psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size * ++ PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100; ++ psClientCCB->sUtilisation.ui32Warnings = 0; ++ psClientCCB->sUtilisation.ui32CCBAcquired = 0; ++ psClientCCB->sUtilisation.ui32CCBFull = 0; ++} ++ ++static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32WarningType, ++ IMG_UINT32 ui32CmdSize) ++{ ++ /* in VERBOSE mode we will print a message for each different ++ * event type as they happen. ++ */ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ if (!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType)) ++ { ++ if (ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED) ++ { ++ PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize)); ++ } ++ ++ PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)", ++ __func__, ++ psClientCCB->szName, ++ psClientCCB->sUtilisation.ui32HighWaterMark, ++ psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size, ++ psClientCCB->ui32Size)); ++ ++ /* record that we have issued a warning of this type */ ++ psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psClientCCB); ++ PVR_UNREFERENCED_PARAMETER(ui32WarningType); ++ PVR_UNREFERENCED_PARAMETER(ui32CmdSize); ++#endif ++} ++ ++/* Check the current CCB utilisation. Print a one-time warning message if it is above the ++ * specified threshold ++ */ ++static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) ++{ ++ /* Print a warning message if the cCCB watermark is above the threshold value */ ++ if (psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes) ++ { ++ _RGXCCBUtilisationEvent(psClientCCB, ++ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD, ++ 0); ++ } ++} ++ ++/* Update the cCCB high watermark level if necessary */ ++static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB) ++{ ++ IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage; ++ ++ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset, ++ psClientCCB->ui32Size); ++ ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace; ++ ++ if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark) ++ { ++ psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage; ++ ++ /* The high water mark has increased. Check if it is above the ++ * threshold so we can print a warning if necessary. ++ */ ++ _RGXCheckCCBUtilisation(psClientCCB); ++ } ++} ++ ++#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ ++ ++PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32CCBSizeLog2, ++ IMG_UINT32 ui32CCBMaxSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ CONNECTION_DATA *psConnectionData, ++ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ RGX_CLIENT_CCB **ppsClientCCB, ++ DEVMEM_MEMDESC **ppsClientCCBMemDesc, ++ DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_MEMALLOCFLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags; ++ IMG_UINT32 ui32FWMainLog2PageSize = DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap); ++ IMG_UINT32 ui32ChunkSize = (1U << ui32FWMainLog2PageSize); ++ IMG_UINT32 ui32AllocSize = MAX((1U << ui32CCBSizeLog2), ui32ChunkSize); ++ IMG_UINT32 ui32MinAllocSize = MAX((1U << MIN_SAFE_CCB_SIZE_LOG2), ui32ChunkSize); ++ RGX_CLIENT_CCB *psClientCCB; ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ IMG_UINT32 ui32NumChunks = ui32AllocSize / ui32ChunkSize; ++ IMG_UINT32 ui32VirtualAllocSize = (1U << ui32CCBMaxSizeLog2); ++ IMG_UINT32 ui32NumVirtChunks = ui32VirtualAllocSize / ui32ChunkSize; ++ IMG_UINT32 i; ++ ++ /* For the allocation request to be valid, at least one page is required. ++ * This is relevant on systems where the page size is greater than the client CCB size. */ ++ ui32NumVirtChunks = MAX(1, ui32NumVirtChunks); ++ PVR_ASSERT((ui32ChunkSize >= (1U << PAGE_SHIFT))); ++#else ++ PVR_UNREFERENCED_PARAMETER(ui32CCBMaxSizeLog2); ++#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ ++ ++ /* All client CCBs should be at-least of the "minimum" size and not to exceed "maximum" */ ++ if ((ui32CCBSizeLog2 < MIN_SAFE_CCB_SIZE_LOG2) || ++ (ui32CCBSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s CCB size is invalid (%d). Should be from %d to %d", ++ __func__, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ ui32CCBSizeLog2, MIN_SAFE_CCB_SIZE_LOG2, MAX_SAFE_CCB_SIZE_LOG2)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ if ((ui32CCBMaxSizeLog2 < ui32CCBSizeLog2) || ++ (ui32CCBMaxSizeLog2 > MAX_SAFE_CCB_SIZE_LOG2)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s CCB maximum size is invalid (%d). Should be from %d to %d", ++ __func__, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ ui32CCBMaxSizeLog2, ui32CCBSizeLog2, MAX_SAFE_CCB_SIZE_LOG2)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++#endif ++ ++ psClientCCB = OSAllocMem(sizeof(*psClientCCB)); ++ if (psClientCCB == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc; ++ } ++ psClientCCB->psServerCommonContext = psServerCommonContext; ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ psClientCCB->ui32VirtualAllocSize = 0; ++ psClientCCB->pui32MappingTable = NULL; ++ psClientCCB->ui32ChunkSize = ui32ChunkSize; ++#endif ++ ++ uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); ++ ++ uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); ++ ++ /* If connection data indicates Sync Lockup Recovery (SLR) should be disabled, ++ * or if the caller has set ui32ContextFlags to disable SLR for this context, ++ * indicate this in psClientCCB->ui32CCBFlags. ++ */ ++ if ((psConnectionData->ui32ClientFlags & SRV_FLAGS_CLIENT_SLR_DISABLED) || ++ (ui32ContextFlags & RGX_CONTEXT_FLAG_DISABLESLR)) ++ { ++ BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); ++ } ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGXFW cCCB"); ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ if (BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN)) ++ { ++ PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; ++ PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap); ++ ++ psClientCCB->ui32VirtualAllocSize = ui32VirtualAllocSize; ++ ++ /* ++ * Growing CCB is doubling the size. Last grow would require only ui32NumVirtChunks/2 new chunks ++ * because another ui32NumVirtChunks/2 is already allocated. ++ * Sometimes initial chunk count would be higher (when CCB size is equal to CCB maximum size) so MAX is needed. ++ */ ++ psClientCCB->pui32MappingTable = OSAllocMem(MAX(ui32NumChunks, ui32NumVirtChunks/2) * sizeof(IMG_UINT32)); ++ if (psClientCCB->pui32MappingTable == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc_mtable; ++ } ++ for (i = 0; i < ui32NumChunks; i++) ++ { ++ psClientCCB->pui32MappingTable[i] = i; ++ } ++ ++ if (eHeapType == PHYS_HEAP_TYPE_LMA || ++ eHeapType == PHYS_HEAP_TYPE_DMA) ++ { ++ /* ++ * On LMA sparse memory can't be mapped to kernel. ++ * To work around this whole ccb memory is allocated at once as contiguous. ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ ui32VirtualAllocSize, ++ uiClientCCBMemAllocFlags, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], ++ &psClientCCB->psClientCCBMemDesc); ++ } ++ else ++ { ++ eError = DevmemFwAllocateSparse(psDevInfo, ++ ui32VirtualAllocSize, ++ ui32ChunkSize, ++ ui32NumChunks, ++ ui32NumVirtChunks, ++ psClientCCB->pui32MappingTable, ++ uiClientCCBMemAllocFlags, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], ++ &psClientCCB->psClientCCBMemDesc); ++ } ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ OSFreeMem(psClientCCB->pui32MappingTable); ++ psClientCCB->pui32MappingTable = NULL; ++ psClientCCB->ui32VirtualAllocSize = 0; ++ } ++ ++ if (!BITMASK_HAS(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN) || ++ (eError != PVRSRV_OK)) ++#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ ++ { ++ /* Allocate ui32AllocSize, or the next best POT allocation */ ++ do ++ { ++ eError = DevmemFwAllocate(psDevInfo, ++ ui32AllocSize, ++ uiClientCCBMemAllocFlags, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING], ++ &psClientCCB->psClientCCBMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ /* Failed to allocate - ensure CCB grow is disabled from ++ * now on for this device. ++ */ ++ BITMASK_UNSET(psDevInfo->ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); ++ ++ /* Failed to allocate, try next POT down */ ++ ui32AllocSize >>= 1; ++ } ++ } while ((eError != PVRSRV_OK) && (ui32AllocSize > ui32MinAllocSize)); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate RGX client CCB (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_alloc_ccb; ++ } ++ ++ OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s", ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ (unsigned long) OSGetCurrentClientProcessIDKM(), ++ (unsigned long) OSGetCurrentClientThreadIDKM(), ++ OSGetCurrentClientProcessNameKM()); ++ ++ if (ui32AllocSize < (1U << ui32CCBSizeLog2)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Unable to allocate %d bytes for RGX client CCB (%s) but allocated %d bytes", ++ __func__, ++ (1U << ui32CCBSizeLog2), ++ psClientCCB->szName, ++ ui32AllocSize)); ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, ++ &psClientCCB->pvClientCCB); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map RGX client CCB (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_map_ccb; ++ } ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGXFW cCCB control"); ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_CCCB_CTL), ++ uiClientCCBCtlMemAllocFlags, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING], ++ &psClientCCB->psClientCCBCtrlMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate RGX client CCB control (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_alloc_ccbctrl; ++ } ++ ++ ++ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc, ++ (void **) &psClientCCB->psClientCCBCtrl); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map RGX client CCB control (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_map_ccbctrl; ++ } ++ ++ /* psClientCCBCtrlMemDesc was zero alloc'd so no need to initialise offsets. */ ++ psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1; ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "cCCB control"); ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, ++ 0, ++ sizeof(RGXFWIF_CCCB_CTL), ++ PDUMP_FLAGS_CONTINUOUS); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ psClientCCB->ui32HostWriteOffset = 0; ++ psClientCCB->ui32LastPDumpWriteOffset = 0; ++ psClientCCB->ui32FinishedPDumpWriteOffset = 0; ++ psClientCCB->ui32Size = ui32AllocSize; ++ psClientCCB->ui32LastROff = ui32AllocSize - 1; ++ psClientCCB->ui32ByteCount = 0; ++ psClientCCB->ui32LastByteCount = 0; ++ BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ eError = OSLockCreate(&psClientCCB->hCCBGrowLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to create hCCBGrowLock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_create_ccbgrow_lock; ++ } ++#endif ++#if defined(DEBUG) ++ psClientCCB->ui32UpdateEntries = 0; ++#endif ++ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ _RGXInitCCBUtilisation(psClientCCB); ++ psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor; ++#endif ++ eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData, ++ _RGXCCBPDumpTransition, ++ psClientCCB, ++ psDevInfo, ++ &psClientCCB->hTransition); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_pdumpreg; ++ } ++ ++ /* ++ * Note: ++ * Save the PDump specific structure, which is ref counted unlike ++ * the connection data, to ensure it's not freed too early ++ */ ++ psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData; ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "New RGXFW cCCB(%s@%p) created", ++ psClientCCB->szName, ++ psClientCCB); ++ ++ *ppsClientCCB = psClientCCB; ++ *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; ++ *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc; ++ return PVRSRV_OK; ++ ++fail_pdumpreg: ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockDestroy(psClientCCB->hCCBGrowLock); ++fail_create_ccbgrow_lock: ++#endif ++ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); ++fail_map_ccbctrl: ++ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); ++fail_alloc_ccbctrl: ++ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); ++fail_map_ccb: ++ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++fail_alloc_ccb: ++ if ( psClientCCB->ui32VirtualAllocSize > 0) ++ { ++ OSFreeMem(psClientCCB->pui32MappingTable); ++ } ++fail_alloc_mtable: ++#else ++fail_alloc_ccb: ++#endif ++ OSFreeMem(psClientCCB); ++fail_alloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB) ++{ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ if (psClientCCB->sUtilisation.ui32CCBFull) ++ { ++ PVR_LOG(("CCBUtilisationInfo: GPU %s command buffer was full %d times out of %d. " ++ "This is not an error but the application may not run optimally.", ++ aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ psClientCCB->sUtilisation.ui32CCBFull, ++ psClientCCB->sUtilisation.ui32CCBAcquired)); ++ } ++#endif ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockDestroy(psClientCCB->hCCBGrowLock); ++#endif ++ PDumpUnregisterTransitionCallback(psClientCCB->hTransition); ++ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc); ++ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psClientCCB->psClientCCBMemDesc); ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ if (psClientCCB->pui32MappingTable) ++ { ++ OSFreeMem(psClientCCB->pui32MappingTable); ++ } ++#endif ++ OSFreeMem(psClientCCB); ++} ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++static PVRSRV_ERROR _RGXCCBMemChangeSparse(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32AllocPageCount) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 i; ++ ++#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE ++ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc); ++#endif ++ ++ for (i = 0; i < ui32AllocPageCount; i++) ++ { ++ psClientCCB->pui32MappingTable[i] = ui32AllocPageCount + i; ++ } ++ ++ /* Double the CCB size (CCB must be POT) by adding ui32AllocPageCount new pages */ ++ eError = DeviceMemChangeSparse(psClientCCB->psClientCCBMemDesc, ++ ui32AllocPageCount, ++ psClientCCB->pui32MappingTable, ++ 0, ++ NULL, ++#if !defined(PVRSRV_UNMAP_ON_SPARSE_CHANGE) ++ SPARSE_MAP_CPU_ADDR | ++#endif ++ SPARSE_RESIZE_ALLOC); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to grow RGX client CCB (%s)", ++ PVRSRVGetErrorString(eError))); ++ ++#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE ++ if (DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, ++ &psClientCCB->pvClientCCB) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to reacquire CCB mapping")); ++ psClientCCB->pvClientCCB = NULL; ++ } ++#endif ++ ++ return eError; ++ } ++ ++#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE ++ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc, ++ &psClientCCB->pvClientCCB); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXAcquireCCB: Failed to map RGX client CCB (%s)", ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ ++ ++PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize) ++{ ++ IMG_UINT32 ui32FreeSpace; ++ ++ /* Check that the CCB can hold this command + padding */ ++ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB" ++ " (%d bytes)", ui32CmdSize, psClientCCB->ui32Size)); ++ return PVRSRV_ERROR_CMD_TOO_BIG; ++ } ++ ++ /* ++ Check we don't overflow the end of the buffer and make sure we have ++ enough space for the padding command. If we don't have enough space ++ (including the minimum amount for the padding command) we need to make ++ sure we insert a padding command now and wrap before adding the main ++ command. ++ */ ++ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) ++ { ++ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset, ++ psClientCCB->ui32Size); ++ ++ /* Don't allow all the space to be used */ ++ if (ui32FreeSpace > ui32CmdSize) ++ { ++ return PVRSRV_OK; ++ } ++ ++ goto e_retry; ++ } ++ else ++ { ++ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; ++ ++ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset, ++ psClientCCB->ui32Size); ++ ++ /* Check there is space for both the command and the padding command */ ++ if (ui32FreeSpace > ui32Remain + ui32CmdSize) ++ { ++ return PVRSRV_OK; ++ } ++ ++ goto e_retry; ++ } ++ ++e_retry: ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ _RGXCCBUtilisationEvent(psClientCCB, ++ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_FULL_CCB, ++ ui32CmdSize); ++#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ ++ ++ return PVRSRV_ERROR_RETRY; ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXAcquireCCB ++ ++ PURPOSE : Obtains access to write some commands to a CCB ++ ++ PARAMETERS : psClientCCB - The client CCB ++ ui32CmdSize - How much space is required ++ ppvBufferSpace - Pointer to space in the buffer ++ ui32PDumpFlags - Should this be PDump continuous? ++ ++ RETURNS : PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32CmdSize, ++ void **ppvBufferSpace, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ IMG_UINT32 ui32RetryCount = 2; ++#endif ++ ++#if defined(PDUMP) ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags); ++ IMG_BOOL bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags); ++ ++ /* ++ PDumpSetFrame will detect as we Transition into capture range for ++ frame based data but if we are PDumping continuous data then we ++ need to inform the PDump layer ourselves ++ ++ First check is to confirm we are in continuous mode ++ Second check is to confirm the pdump client is connected and ready. ++ Third check is to confirm we are not in capture range. ++ */ ++ if (bPDumpFlagsContinuous && ++ bPDumpEnabled && ++ !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE)) ++ { ++ eError = PDumpTransition(psDeviceNode, ++ psClientCCB->psPDumpConnectionData, ++ PDUMP_TRANSITION_EVENT_RANGE_ENTERED, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ } ++#endif ++ ++ /* Check that the CCB can hold this command + padding */ ++ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)", ++ ui32CmdSize, psClientCCB->ui32Size)); ++ return PVRSRV_ERROR_CMD_TOO_BIG; ++ } ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ while (ui32RetryCount--) ++#endif ++ { ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ psClientCCB->sUtilisation.ui32CCBAcquired++; ++#endif ++ ++ /* ++ Check we don't overflow the end of the buffer and make sure we have ++ enough space for the padding command. We don't have enough space (including the ++ minimum amount for the padding command) we will need to make sure we insert a ++ padding command now and wrap before adding the main command. ++ */ ++ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size) ++ { ++ /* The command can fit without wrapping... */ ++ IMG_UINT32 ui32FreeSpace; ++ ++#if defined(PDUMP) ++ /* Wait for sufficient CCB space to become available */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, ++ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", ++ ui32CmdSize, psClientCCB->ui32HostWriteOffset, ++ psClientCCB->szName); ++ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), ++ psClientCCB->ui32HostWriteOffset, ++ ui32CmdSize, ++ psClientCCB->ui32Size); ++#endif ++ ++ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset, ++ psClientCCB->ui32Size); ++ ++ /* Can command fit? */ ++ if (ui32FreeSpace > ui32CmdSize) ++ { ++ *ppvBufferSpace = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); ++ return PVRSRV_OK; ++ } ++ /* There is not enough free space in CCB. */ ++ goto e_retry; ++ } ++ else ++ { ++ /* ++ We're at the end of the buffer without enough contiguous space. ++ The command cannot fit without wrapping, we need to insert a ++ padding command and wrap. We need to do this in one go otherwise ++ we would be leaving unflushed commands and forcing the client to ++ deal with flushing the padding command but not the command they ++ wanted to write. Therefore we either do all or nothing. ++ */ ++ RGXFWIF_CCB_CMD_HEADER *psHeader; ++ IMG_UINT32 ui32FreeSpace; ++ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ /* Check this is a growable CCB */ ++ if (psClientCCB->ui32VirtualAllocSize > 0) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); ++ ++ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset, ++ psClientCCB->ui32Size); ++ /* ++ * Check if CCB should grow or be wrapped. ++ * Wrap CCB if there is no need for grow (CCB is half empty) or CCB can't grow, ++ * and when is free space for command and padding. ++ */ ++ if (((ui32FreeSpace > psClientCCB->ui32Size/2) || (psClientCCB->ui32Size == psClientCCB->ui32VirtualAllocSize)) && ++ (ui32FreeSpace > ui32Remain + ui32CmdSize)) ++ { ++ /* Wrap CCB */ ++ psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); ++ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; ++ psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, ++ "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); ++ if (bPDumpEnabled) ++ { ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, ++ psClientCCB->ui32HostWriteOffset, ++ ui32Remain, ++ ui32PDumpFlags); ++ } ++#endif ++ ++ *ppvBufferSpace = psClientCCB->pvClientCCB; ++ return PVRSRV_OK; ++ } ++ else if ((psClientCCB->ui32Size < psClientCCB->ui32VirtualAllocSize) && ++ (psClientCCB->ui32HostWriteOffset >= psClientCCB->psClientCCBCtrl->ui32ReadOffset)) ++ { ++ /* Grow CCB */ ++ PHYS_HEAP *psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; ++ PHYS_HEAP_TYPE eHeapType = PhysHeapGetType(psPhysHeap); ++ PVRSRV_ERROR eErr = PVRSRV_OK; ++ ++ /* Something went wrong if we are here a second time */ ++ PVR_ASSERT(ui32RetryCount != 0); ++ OSLockAcquire(psClientCCB->hCCBGrowLock); ++ ++ /* ++ * On LMA sparse memory can't be mapped to kernel. ++ * To work around this whole ccb memory was allocated at once as contiguous. ++ * In such case below sparse change is not needed because memory is already allocated. ++ */ ++ if (eHeapType != PHYS_HEAP_TYPE_LMA && ++ eHeapType != PHYS_HEAP_TYPE_DMA) ++ { ++ IMG_UINT32 ui32AllocChunkCount = psClientCCB->ui32Size / psClientCCB->ui32ChunkSize; ++ ++ eErr = _RGXCCBMemChangeSparse(psClientCCB, ui32AllocChunkCount); ++ } ++ ++ /* Setup new CCB size */ ++ if (eErr == PVRSRV_OK) ++ { ++ psClientCCB->ui32Size += psClientCCB->ui32Size; ++ } ++ else ++ { ++ PVR_LOG(("%s: Client CCB (%s) grow failed (%s)", __func__, psClientCCB->szName, PVRSRVGetErrorString(eErr))); ++ OSLockRelease(psClientCCB->hCCBGrowLock); ++ goto e_retry; ++ } ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "cCCB update for grow"); ++ if (bPDumpEnabled) ++ { ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32WrapMask), ++ sizeof(psClientCCB->psClientCCBCtrl->ui32WrapMask), ++ ui32PDumpFlags); ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, ++ offsetof(RGX_CLIENT_CCB, ui32Size), ++ sizeof(psClientCCB->ui32Size), ++ ui32PDumpFlags); ++ } ++#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ ++ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ PVR_LOG(("%s: Client CCB (%s) grew to %u", __func__, psClientCCB->szName, psClientCCB->ui32Size)); ++ /* Reset counters */ ++ _RGXInitCCBUtilisation(psClientCCB); ++#endif ++ ++ /* CCB doubled the size so retry now. */ ++ OSLockRelease(psClientCCB->hCCBGrowLock); ++ } ++ else ++ { ++ /* CCB can't grow anymore and can't be wrapped */ ++#if defined(PDUMP) ++ /* Wait for sufficient CCB space to become available */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, ++ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", ++ ui32Remain, psClientCCB->ui32HostWriteOffset, ++ psClientCCB->szName); ++ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), ++ psClientCCB->ui32HostWriteOffset, ++ ui32Remain, ++ psClientCCB->ui32Size); ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, ++ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", ++ ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, ++ psClientCCB->szName); ++ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), ++ 0 /*ui32HostWriteOffset after wrap */, ++ ui32CmdSize, ++ psClientCCB->ui32Size); ++ /* CCB has now space for our command so try wrapping again. Retry now. */ ++#else /* defined(PDUMP) */ ++ goto e_retry; ++#endif /* defined(PDUMP) */ ++ } ++ } ++ else ++#endif /* defined(PVRSRV_ENABLE_CCCB_GROW) */ ++ { ++#if defined(PDUMP) ++ /* Wait for sufficient CCB space to become available */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, ++ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", ++ ui32Remain, psClientCCB->ui32HostWriteOffset, ++ psClientCCB->szName); ++ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), ++ psClientCCB->ui32HostWriteOffset, ++ ui32Remain, ++ psClientCCB->ui32Size); ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, 0, ++ "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s", ++ ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */, ++ psClientCCB->szName); ++ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset), ++ 0 /*ui32HostWriteOffset after wrap */, ++ ui32CmdSize, ++ psClientCCB->ui32Size); ++#endif ++ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset, ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset, ++ psClientCCB->ui32Size); ++ ++ if (ui32FreeSpace > ui32Remain + ui32CmdSize) ++ { ++ psHeader = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); ++ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING; ++ psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER); ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize); ++ if (bPDumpEnabled) ++ { ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, ++ psClientCCB->ui32HostWriteOffset, ++ ui32Remain, ++ ui32PDumpFlags); ++ } ++#endif ++ ++ *ppvBufferSpace = psClientCCB->pvClientCCB; ++ return PVRSRV_OK; ++ } ++ ++ goto e_retry; ++ } ++ } ++ } ++e_retry: ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ psClientCCB->sUtilisation.ui32CCBFull++; ++ _RGXCCBUtilisationEvent(psClientCCB, ++ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED, ++ ui32CmdSize); ++#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */ ++ return PVRSRV_ERROR_RETRY; ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXReleaseCCB ++ ++ PURPOSE : Release a CCB that we have been writing to. ++ ++ PARAMETERS : psDevData - device data ++ psCCB - the CCB ++ ++ RETURNS : None ++******************************************************************************/ ++void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32CmdSize, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psClientCCB->psServerCommonContext); ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, ui32PDumpFlags); ++ IMG_BOOL bPDumpFlagsContinuous = PDUMP_IS_CONTINUOUS(ui32PDumpFlags); ++#endif ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockAcquire(psClientCCB->hCCBGrowLock); ++#endif ++ /* ++ * If a padding command was needed then we should now move ui32HostWriteOffset ++ * forward. The command has already be dumped (if bPDumpEnabled). ++ */ ++ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size) ++ { ++ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset; ++ ++ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, ++ ui32Remain, ++ psClientCCB->ui32Size); ++ psClientCCB->ui32ByteCount += ui32Remain; ++ } ++ ++#if defined(PDUMP) ++ /* Dump the CCB data */ ++ if (bPDumpEnabled) ++ { ++ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc, ++ psClientCCB->ui32HostWriteOffset, ++ ui32CmdSize, ++ ui32PDumpFlags); ++ } ++#endif ++ ++ /* ++ * Check if there any fences being written that will already be ++ * satisfied by the last written update command in this CCB. At the ++ * same time we can ASSERT that all sync addresses are not NULL. ++ */ ++#if defined(DEBUG) ++ { ++ void *pvBufferStart = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset); ++ void *pvBufferEnd = IMG_OFFSET_ADDR(psClientCCB->pvClientCCB, psClientCCB->ui32HostWriteOffset + ui32CmdSize); ++ IMG_BOOL bMessagePrinted = IMG_FALSE; ++ ++ /* Walk through the commands in this section of CCB being released... */ ++ while (pvBufferStart < pvBufferEnd) ++ { ++ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = pvBufferStart; ++ ++ if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE) ++ { ++ /* If an UPDATE then record the values incase an adjacent fence uses it. */ ++ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); ++ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ psClientCCB->ui32UpdateEntries = 0; ++ while (ui32NumUFOs-- > 0) ++ { ++ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); ++ if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE) ++ { ++ psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++; ++ } ++ } ++ } ++ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) ++ { ++ /* If a FENCE then check the values against the last UPDATE issued. */ ++ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); ++ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ while (ui32NumUFOs-- > 0) ++ { ++ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); ++ ++ if (bMessagePrinted == IMG_FALSE) ++ { ++ RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList; ++ IMG_UINT32 ui32UpdateIndex; ++ ++ for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++) ++ { ++ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) ++ { ++ if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x", ++ psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value)); ++ bMessagePrinted = IMG_TRUE; ++ break; ++ } ++ } ++ else ++ { ++ if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr && ++ psUFOPtr->ui32Value == psUpdatePtr->ui32Value) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x", ++ psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); ++ bMessagePrinted = IMG_TRUE; ++ break; ++ } ++ } ++ psUpdatePtr++; ++ } ++ } ++ ++ psUFOPtr++; ++ } ++ } ++ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR || ++ psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE) ++ { ++ /* For all other UFO ops check the UFO address is not NULL. */ ++ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); ++ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ while (ui32NumUFOs-- > 0) ++ { ++ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0); ++ psUFOPtr++; ++ } ++ } ++ ++ /* Move to the next command in this section of CCB being released... */ ++ pvBufferStart = IMG_OFFSET_ADDR(pvBufferStart, sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize); ++ } ++ } ++#endif /* REDUNDANT_SYNCS_DEBUG */ ++ ++ ++#if defined(PVRSRV_FORCE_FLUSH_CCCB_ON_KICK) ++ { ++ DEVMEM_MEMDESC* psClientCCBMemDesc = psClientCCB->psClientCCBMemDesc; ++ void *pvClientCCBAddr = psClientCCB->pvClientCCB; ++ PMR *psClientCCBMemDescPMR = NULL; ++ IMG_DEVMEM_OFFSET_T uiPMROffset; ++ ++ DevmemGetPMRData(psClientCCBMemDesc, ++ (IMG_HANDLE*)&psClientCCBMemDescPMR, ++ &uiPMROffset); ++ ++ CacheOpValExec(psClientCCBMemDescPMR, ++ (IMG_UINT64)(uintptr_t) pvClientCCBAddr, ++ uiPMROffset, ++ psClientCCBMemDesc->uiAllocSize, ++ PVRSRV_CACHE_OP_FLUSH); ++ ++ } ++#endif ++ /* ++ * Update the CCB write offset. ++ */ ++ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset, ++ ui32CmdSize, ++ psClientCCB->ui32Size); ++ psClientCCB->ui32ByteCount += ui32CmdSize; ++ ++#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO) ++ _RGXUpdateCCBUtilisation(psClientCCB); ++#endif ++ /* ++ PDumpSetFrame will detect as we Transition out of capture range for ++ frame based data but if we are PDumping continuous data then we ++ need to inform the PDump layer ourselves ++ ++ First check is to confirm we are in continuous mode ++ Second check is to confirm the pdump client is connected and ready. ++ Third check is to confirm we are not in capture range. ++ */ ++#if defined(PDUMP) ++ if (bPDumpFlagsContinuous && ++ bPDumpEnabled && ++ !PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_NONE)) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Only Transitioning into capture range can cause an error */ ++ eError = PDumpTransition(psDeviceNode, ++ psClientCCB->psPDumpConnectionData, ++ PDUMP_TRANSITION_EVENT_RANGE_EXITED, ++ ui32PDumpFlags); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ ++ if (bPDumpEnabled) ++ { ++ if (!BIT_ISSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN)) ++ { ++ /* Store offset to last finished CCB command. This offset can ++ * be needed when appending commands to a non finished CCB. ++ */ ++ psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset; ++ } ++ ++ /* Update the PDump write offset to show we PDumped this command */ ++ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset; ++ } ++#endif ++ ++#if defined(NO_HARDWARE) ++ /* ++ The firmware is not running, it cannot update these; we do here instead. ++ */ ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset; ++ psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset; ++#if defined(SUPPORT_AGP) ++ psClientCCB->psClientCCBCtrl->ui32ReadOffset2 = psClientCCB->ui32HostWriteOffset; ++#endif ++#endif ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockRelease(psClientCCB->hCCBGrowLock); ++#endif ++} ++ ++IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB) ++{ ++ return psClientCCB->ui32HostWriteOffset; ++} ++ ++IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB) ++{ ++ return psClientCCB->ui32Size-1; ++} ++ ++PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32Flags) ++{ ++ if ((ui32Flags & RGX_CONTEXT_FLAG_DISABLESLR)) ++ { ++ BIT_SET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); ++ } ++ else ++ { ++ BIT_UNSET(psClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED); ++ } ++ return PVRSRV_OK; ++} ++ ++void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 ui64FBSCEntryMask, ++ IMG_UINT32 ui32ClientFenceCount, ++ IMG_UINT32 ui32ClientUpdateCount, ++ IMG_UINT32 ui32CmdSize, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, ++ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ IMG_BOOL bCacheInval = IMG_TRUE; ++ /* Init the generated data members */ ++ psCmdHelperData->ui32FBSCInvalCmdSize = 0; ++ psCmdHelperData->ui64FBSCEntryMask = 0; ++ psCmdHelperData->ui32FenceCmdSize = 0; ++ psCmdHelperData->ui32UpdateCmdSize = 0; ++ psCmdHelperData->ui32PreTimeStampCmdSize = 0; ++ psCmdHelperData->ui32PostTimeStampCmdSize = 0; ++ psCmdHelperData->ui32RMWUFOCmdSize = 0; ++ ++ /* Only compile if RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE is defined to avoid ++ * compilation errors on rogue cores. ++ */ ++#if defined(RGX_FEATURE_PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) ++ bCacheInval = !(PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, PDS_INSTRUCTION_CACHE_AUTO_INVALIDATE) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, USC_INSTRUCTION_CACHE_AUTO_INVALIDATE) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, TDM_SLC_MMU_AUTO_CACHE_OPS) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GEOM_SLC_MMU_AUTO_CACHE_OPS) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, FRAG_SLC_MMU_AUTO_CACHE_OPS) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, COMPUTE_SLC_MMU_AUTO_CACHE_OPS)) || ++ RGX_IS_BRN_SUPPORTED(psDevInfo, 71960) || ++ RGX_IS_BRN_SUPPORTED(psDevInfo, 72143); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++ ++ /* Total FBSC invalidate command size (header plus command data) */ ++ if (bCacheInval) ++ { ++ if (ui64FBSCEntryMask != 0) ++ { ++ psCmdHelperData->ui32FBSCInvalCmdSize = ++ RGX_CCB_FWALLOC_ALIGN(sizeof(psCmdHelperData->ui64FBSCEntryMask) + ++ sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ psCmdHelperData->ui64FBSCEntryMask = ui64FBSCEntryMask; ++ } ++ } ++ ++ /* total DM command size (header plus command data) */ ++ ++ psCmdHelperData->ui32DMCmdSize = ++ RGX_CCB_FWALLOC_ALIGN(ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ if (ui32ClientFenceCount != 0) ++ { ++ psCmdHelperData->ui32FenceCmdSize = ++ RGX_CCB_FWALLOC_ALIGN(ui32ClientFenceCount * sizeof(RGXFWIF_UFO) + ++ sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ } ++ ++ if (ui32ClientUpdateCount != 0) ++ { ++ psCmdHelperData->ui32UpdateCmdSize = ++ RGX_CCB_FWALLOC_ALIGN(ui32ClientUpdateCount * sizeof(RGXFWIF_UFO) + ++ sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ } ++ ++ if (ppPreAddr && (ppPreAddr->ui32Addr != 0)) ++ { ++ psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) ++ + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); ++ } ++ ++ if (ppPostAddr && (ppPostAddr->ui32Addr != 0)) ++ { ++ psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) ++ + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1)); ++ } ++ ++ if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0)) ++ { ++ psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO); ++ } ++} ++ ++/* ++ Work out how much space this command will require ++*/ ++void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32ClientFenceCount, ++ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, ++ IMG_UINT32 *paui32FenceValue, ++ IMG_UINT32 ui32ClientUpdateCount, ++ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, ++ IMG_UINT32 *paui32UpdateValue, ++ IMG_UINT32 ui32CmdSize, ++ IMG_PBYTE pui8DMCmd, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, ++ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, ++ RGXFWIF_CCB_CMD_TYPE eType, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32PDumpFlags, ++ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, ++ IMG_CHAR *pszCommandName, ++ IMG_BOOL bCCBStateOpen, ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = NULL; ++ ++ /* Job reference values */ ++ psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef; ++ psCmdHelperData->ui32IntJobRef = ui32IntJobRef; ++ ++ /* Save the data we require in the submit call */ ++ psCmdHelperData->psClientCCB = psClientCCB; ++#if defined(PDUMP) ++ psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags; ++ psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++#endif ++ psCmdHelperData->pszCommandName = pszCommandName; ++ if (bCCBStateOpen) ++ { ++ BIT_SET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); ++ } ++ else ++ { ++ BIT_UNSET(psCmdHelperData->psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); ++ } ++ ++ /* Client sync data */ ++ psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount; ++ psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress; ++ psCmdHelperData->paui32FenceValue = paui32FenceValue; ++ psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount; ++ psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress; ++ psCmdHelperData->paui32UpdateValue = paui32UpdateValue; ++ ++ /* Command data */ ++ psCmdHelperData->ui32CmdSize = ui32CmdSize; ++ psCmdHelperData->pui8DMCmd = pui8DMCmd; ++ psCmdHelperData->eType = eType; ++ ++ if (ppPreAddr) ++ { ++ psCmdHelperData->pPreTimestampAddr = *ppPreAddr; ++ } ++ ++ if (ppPostAddr) ++ { ++ psCmdHelperData->pPostTimestampAddr = *ppPostAddr; ++ } ++ ++ if (ppRMWUFOAddr) ++ { ++ psCmdHelperData->pRMWUFOAddr = *ppRMWUFOAddr; ++ } ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "%s Command Server Init on FWCtx %08x", pszCommandName, ++ FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Workload Data added */ ++ psCmdHelperData->psWorkEstKickData = psWorkEstKickData; ++#endif ++} ++ ++/* ++ Work out how much space this command will require ++*/ ++void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT64 ui64FBSCEntryMask, ++ IMG_UINT32 ui32ClientFenceCount, ++ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, ++ IMG_UINT32 *paui32FenceValue, ++ IMG_UINT32 ui32ClientUpdateCount, ++ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, ++ IMG_UINT32 *paui32UpdateValue, ++ IMG_UINT32 ui32CmdSize, ++ IMG_PBYTE pui8DMCmd, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, ++ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, ++ RGXFWIF_CCB_CMD_TYPE eType, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32PDumpFlags, ++ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, ++ IMG_CHAR *pszCommandName, ++ IMG_BOOL bCCBStateOpen, ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) ++{ ++ RGXCmdHelperInitCmdCCB_CommandSize(psDevInfo, ++ ui64FBSCEntryMask, ++ ui32ClientFenceCount, ++ ui32ClientUpdateCount, ++ ui32CmdSize, ++ ppPreAddr, ++ ppPostAddr, ++ ppRMWUFOAddr, ++ psCmdHelperData); ++ ++ RGXCmdHelperInitCmdCCB_OtherData(psClientCCB, ++ ui32ClientFenceCount, ++ pauiFenceUFOAddress, ++ paui32FenceValue, ++ ui32ClientUpdateCount, ++ pauiUpdateUFOAddress, ++ paui32UpdateValue, ++ ui32CmdSize, ++ pui8DMCmd, ++ ppPreAddr, ++ ppPostAddr, ++ ppRMWUFOAddr, ++ eType, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++ psWorkEstKickData, ++ pszCommandName, ++ bCCBStateOpen, ++ psCmdHelperData); ++} ++ ++/* ++ Reserve space in the CCB and fill in the command and client sync data ++*/ ++PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, ++ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) ++{ ++ const IMG_UINT32 ui32MaxUFOCmdSize = RGX_CCB_FWALLOC_ALIGN((RGXFWIF_CCB_CMD_MAX_UFOS * sizeof(RGXFWIF_UFO)) + ++ sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ IMG_UINT32 ui32AllocSize = 0; ++ IMG_UINT32 i; ++ void *pvStartPtr; ++ PVRSRV_ERROR eError; ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(asCmdHelperData->psClientCCB->psServerCommonContext); ++#endif ++ ++ /* ++ Check the number of fences & updates are valid. ++ */ ++ for (i = 0; i < ui32CmdCount; i++) ++ { ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i]; ++ ++ if (psCmdHelperData->ui32FenceCmdSize > ui32MaxUFOCmdSize || ++ psCmdHelperData->ui32UpdateCmdSize > ui32MaxUFOCmdSize) ++ { ++ return PVRSRV_ERROR_TOO_MANY_SYNCS; ++ } ++ } ++ ++ /* ++ Work out how much space we need for all the command(s) ++ */ ++ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); ++ ++#if defined(PDUMP) ++ for (i = 0; i < ui32CmdCount; i++) ++ { ++ if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d", ++ __func__, ++ PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", ++ PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE", ++ ui32CmdCount)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++#endif ++ ++ /* ++ Acquire space in the CCB for all the command(s). ++ */ ++ eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB, ++ ui32AllocSize, ++ &pvStartPtr, ++ asCmdHelperData[0].ui32PDumpFlags); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ return eError; ++ } ++ ++ /* ++ For each command fill in the fence, DM, and update command ++ ++ */ ++ for (i = 0; i < ui32CmdCount; i++) ++ { ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i]; ++ void *pvCmdPtr; ++#if defined(PDUMP) ++ IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr; ++ IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext)); ++#endif ++ ++ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) ++ { ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", ++ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); ++ } ++ ++ pvCmdPtr = pvStartPtr; ++ ++ /* ++ Create the fence command. ++ */ ++ if (psCmdHelperData->ui32FenceCmdSize) ++ { ++ RGXFWIF_CCB_CMD_HEADER *psHeader; ++ IMG_UINT k, uiNextValueIndex; ++ ++ psHeader = pvCmdPtr; ++ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE; ++ ++ psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); ++ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; ++ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; ++ psHeader->sWorkEstKickData.ui64Deadline = 0; ++ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; ++#endif ++ ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ /* Fill in the client fences */ ++ uiNextValueIndex = 0; ++ for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++) ++ { ++ RGXFWIF_UFO *psUFOPtr = pvCmdPtr; ++ ++ psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k]; ++ ++ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) ++ { ++ psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ } ++ else ++ { ++ /* Only increment uiNextValueIndex for non sync checkpoints ++ * (as paui32FenceValue only contains values for sync prims) ++ */ ++ psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++]; ++ } ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); ++ ++#if defined(SYNC_COMMAND_DEBUG) ++ PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x", ++ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); ++#endif ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ ".. %s client sync fence - 0x%x -> 0x%x", ++ psCmdHelperData->psClientCCB->szName, ++ psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); ++ ++ ++ } ++ } ++ ++ /* ++ Create the FBSC invalidate command. ++ */ ++ if (psCmdHelperData->ui32FBSCInvalCmdSize) ++ { ++ RGXFWIF_CCB_CMD_HEADER *psHeader; ++ IMG_UINT64 *pui64FBSCInvalCmdData; ++ ++ /* pui8CmdPtr */ ++ ++ psHeader = pvCmdPtr; ++ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE; ++ ++ psHeader->ui32CmdSize = psCmdHelperData->ui32FBSCInvalCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); ++ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; ++ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; ++ psHeader->sWorkEstKickData.ui64Deadline = 0; ++ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; ++#endif ++ pui64FBSCInvalCmdData = IMG_OFFSET_ADDR(psHeader, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ *pui64FBSCInvalCmdData = psCmdHelperData->ui64FBSCEntryMask; ++ /* leap over the FBSC invalidate command */ ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32FBSCInvalCmdSize); ++ ++ } ++ ++ /* ++ Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to ++ sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have ++ the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types. ++ */ ++ if (psCmdHelperData->ui32PreTimeStampCmdSize != 0) ++ { ++ RGXWriteTimestampCommand(&pvCmdPtr, ++ RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP, ++ psCmdHelperData->pPreTimestampAddr); ++ } ++ ++ /* ++ Create the DM command ++ */ ++ if (psCmdHelperData->ui32DMCmdSize) ++ { ++ RGXFWIF_CCB_CMD_HEADER *psHeader; ++ ++ psHeader = pvCmdPtr; ++ psHeader->eCmdType = psCmdHelperData->eType; ++ ++ psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); ++ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; ++ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (psCmdHelperData->psWorkEstKickData != NULL && ++ psCmdHelperData->eType != RGXFWIF_CCB_CMD_TYPE_NULL) ++ { ++ PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_GEOM || ++ psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D || ++ psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_CDM || ++ psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TQ_TDM); ++ psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData; ++ } ++ else ++ { ++ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; ++ psHeader->sWorkEstKickData.ui64Deadline = 0; ++ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; ++ } ++#endif ++ ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ /* The buffer is write-combine, so no special device memory treatment required. */ ++ OSCachedMemCopy(pvCmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize); ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, psCmdHelperData->ui32CmdSize); ++ } ++ ++ ++ if (psCmdHelperData->ui32PostTimeStampCmdSize != 0) ++ { ++ RGXWriteTimestampCommand(&pvCmdPtr, ++ RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP, ++ psCmdHelperData->pPostTimestampAddr); ++ } ++ ++ ++ if (psCmdHelperData->ui32RMWUFOCmdSize != 0) ++ { ++ RGXFWIF_CCB_CMD_HEADER * psHeader; ++ RGXFWIF_UFO * psUFO; ++ ++ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pvCmdPtr; ++ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE; ++ psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); ++ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; ++ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; ++ psHeader->sWorkEstKickData.ui64Deadline = 0; ++ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; ++#endif ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ psUFO = (RGXFWIF_UFO *) pvCmdPtr; ++ psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr; ++ ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); ++ } ++ ++ /* ++ Create the update command. ++ */ ++ if (psCmdHelperData->ui32UpdateCmdSize) ++ { ++ RGXFWIF_CCB_CMD_HEADER *psHeader; ++ IMG_UINT k, uiNextValueIndex; ++ ++ psHeader = pvCmdPtr; ++ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE; ++ psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER); ++ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef; ++ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ psHeader->sWorkEstKickData.ui16ReturnDataIndex = 0; ++ psHeader->sWorkEstKickData.ui64Deadline = 0; ++ psHeader->sWorkEstKickData.ui32CyclesPrediction = 0; ++#endif ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ /* Fill in the client updates */ ++ uiNextValueIndex = 0; ++ for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++) ++ { ++ RGXFWIF_UFO *psUFOPtr = pvCmdPtr; ++ ++ psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k]; ++ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) ++ { ++ psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ } ++ else ++ { ++ /* Only increment uiNextValueIndex for non sync checkpoints ++ * (as paui32UpdateValue only contains values for sync prims) ++ */ ++ psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++]; ++ } ++ pvCmdPtr = IMG_OFFSET_ADDR(pvCmdPtr, sizeof(RGXFWIF_UFO)); ++ ++#if defined(SYNC_COMMAND_DEBUG) ++ PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x", ++ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value)); ++#endif ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ ".. %s client sync update - 0x%x -> 0x%x", ++ psCmdHelperData->psClientCCB->szName, ++ psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value); ++ ++ } ++ } ++ ++ /* Set the start pointer for the next iteration around the loop */ ++ pvStartPtr = IMG_OFFSET_ADDR(pvStartPtr, ++ psCmdHelperData->ui32FenceCmdSize + ++ psCmdHelperData->ui32FBSCInvalCmdSize + ++ psCmdHelperData->ui32PreTimeStampCmdSize + ++ psCmdHelperData->ui32DMCmdSize + ++ psCmdHelperData->ui32PostTimeStampCmdSize + ++ psCmdHelperData->ui32RMWUFOCmdSize + ++ psCmdHelperData->ui32UpdateCmdSize ); ++ ++ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0) ++ { ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", ++ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); ++ } ++ else ++ { ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes", ++ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff); ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ Fill in the server syncs data and release the CCB space ++*/ ++void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, ++ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, ++ const IMG_CHAR *pcszDMName, ++ IMG_UINT32 ui32CtxAddr) ++{ ++ IMG_UINT32 ui32AllocSize = 0; ++ IMG_UINT32 i; ++#if defined(__linux__) ++ IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced(); ++ IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced(); ++#endif ++ ++ /* ++ Work out how much space we need for all the command(s) ++ */ ++ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData); ++ /* ++ For each command fill in the server sync info ++ */ ++ for (i=0;ipsClientCCB->psServerCommonContext); ++#endif ++ ++#if (!defined(__linux__) || !defined(SUPPORT_RGX)) && !defined(PDUMP) ++ PVR_UNREFERENCED_PARAMETER(psCmdHelperData); ++#endif ++ ++#if defined(__linux__) && defined(SUPPORT_RGX) ++ if (bTraceChecks) ++ { ++ trace_rogue_fence_checks(psCmdHelperData->pszCommandName, ++ pcszDMName, ++ ui32CtxAddr, ++ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, ++ psCmdHelperData->ui32ClientFenceCount, ++ psCmdHelperData->pauiFenceUFOAddress, ++ psCmdHelperData->paui32FenceValue); ++ } ++ if (bTraceUpdates) ++ { ++ trace_rogue_fence_updates(psCmdHelperData->pszCommandName, ++ pcszDMName, ++ ui32CtxAddr, ++ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize, ++ psCmdHelperData->ui32ClientUpdateCount, ++ psCmdHelperData->pauiUpdateUFOAddress, ++ psCmdHelperData->paui32UpdateValue); ++ } ++#endif ++ ++ /* ++ All the commands have been filled in so release the CCB space. ++ The FW still won't run this command until we kick it ++ */ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ psCmdHelperData->ui32PDumpFlags, ++ "%s Command Server Release on FWCtx %08x", ++ psCmdHelperData->pszCommandName, ui32CtxAddr); ++ } ++ ++ RGXReleaseCCB(asCmdHelperData[0].psClientCCB, ++ ui32AllocSize, ++ asCmdHelperData[0].ui32PDumpFlags); ++ ++ BIT_UNSET(asCmdHelperData[0].psClientCCB->ui32CCBFlags, CCB_FLAGS_CCB_STATE_OPEN); ++} ++ ++IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, ++ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData) ++{ ++ IMG_UINT32 ui32AllocSize = 0; ++ IMG_UINT32 i; ++ ++ /* ++ Work out how much space we need for all the command(s) ++ */ ++ for (i = 0; i < ui32CmdCount; i++) ++ { ++ ui32AllocSize += ++ asCmdHelperData[i].ui32FenceCmdSize + ++ asCmdHelperData[i].ui32FBSCInvalCmdSize + ++ asCmdHelperData[i].ui32DMCmdSize + ++ asCmdHelperData[i].ui32UpdateCmdSize + ++ asCmdHelperData[i].ui32PreTimeStampCmdSize + ++ asCmdHelperData[i].ui32PostTimeStampCmdSize + ++ asCmdHelperData[i].ui32RMWUFOCmdSize; ++ } ++ ++ return ui32AllocSize; ++} ++ ++/* Work out how much of an offset there is to a specific command. */ ++IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, ++ IMG_UINT32 ui32Cmdindex) ++{ ++ IMG_UINT32 ui32Offset = 0; ++ IMG_UINT32 i; ++ ++ for (i = 0; i < ui32Cmdindex; i++) ++ { ++ ui32Offset += ++ asCmdHelperData[i].ui32FenceCmdSize + ++ asCmdHelperData[i].ui32FBSCInvalCmdSize + ++ asCmdHelperData[i].ui32DMCmdSize + ++ asCmdHelperData[i].ui32UpdateCmdSize + ++ asCmdHelperData[i].ui32PreTimeStampCmdSize + ++ asCmdHelperData[i].ui32PostTimeStampCmdSize + ++ asCmdHelperData[i].ui32RMWUFOCmdSize; ++ } ++ ++ return ui32Offset; ++} ++ ++/* Returns the offset of the data master command from a write offset */ ++IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData) ++{ ++ return psCmdHelperData->ui32FenceCmdSize + ++ psCmdHelperData->ui32PreTimeStampCmdSize + ++ psCmdHelperData->ui32FBSCInvalCmdSize; ++} ++ ++static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType) ++{ ++ switch (cmdType) ++ { ++ case RGXFWIF_CCB_CMD_TYPE_GEOM: return "TA"; ++ case RGXFWIF_CCB_CMD_TYPE_3D: return "3D"; ++ case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR"; ++ case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM"; ++ case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D"; ++ case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D"; ++ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM"; ++ case RGXFWIF_CCB_CMD_TYPE_FBSC_INVALIDATE: return "FBSC_INVALIDATE"; ++ case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL"; ++ case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE"; ++ case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE"; ++ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR"; ++ case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY"; ++ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE"; ++ case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP"; ++ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE"; ++ case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP"; ++ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE"; ++ case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING"; ++ ++ default: ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ ++ return "INVALID"; ++} ++ ++PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM) ++{ ++ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; ++ IMG_UINT32 ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff, ui32WrapMask; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (psCurrentClientCCB == NULL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL")); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ /* If CCB grow is enabled, take the lock while sampling offsets ++ * (to guard against a grow happening mid-sample) ++ */ ++ OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); ++#endif ++ /* NB. use psCurrentClientCCB->ui32Size as basis for wrap mask (rather than psClientCCBCtrl->ui32WrapMask) ++ * as if CCB grow happens, psCurrentClientCCB->ui32Size will have been updated but ++ * psClientCCBCtrl->ui32WrapMask is only updated once the firmware sees the CCB has grown. ++ * If we use the wrong value, we might incorrectly determine that the offsets are invalid. ++ */ ++ ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); ++ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; ++ ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; ++ ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset; ++ ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockRelease(psCurrentClientCCB->hCCBGrowLock); ++#endif ++ ++ if (ui32SampledRdOff > ui32WrapMask || ++ ui32SampledDpOff > ui32WrapMask || ++ ui32SampledWrOff > ui32WrapMask) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)", ++ ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff)); ++ return PVRSRV_ERROR_INVALID_OFFSET; ++ } ++ ++ if (ui32SampledRdOff != ui32SampledWrOff && ++ psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff && ++ ui32SampledRdOff == psCurrentClientCCB->ui32LastROff && ++ (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice; ++ ++ /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle). ++ * Guest drivers do not initialize psRGXFWIfFwSysData, so they assume FW internal state is ON. */ ++ if (((psDevInfo->psRGXFWIfFwSysData == NULL) || (psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_ON)) && ++ (psDevInfo->ui32SLRHoldoffCounter == 0)) ++ { ++ static __maybe_unused const char *pszStalledAction = ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ "force"; ++#else ++ "warn"; ++#endif ++ /* Don't log this by default unless debugging since a higher up ++ * function will log the stalled condition. Helps avoid double ++ * messages in the log. ++ */ ++ PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"", ++ __func__, pszStalledAction, ui32SampledRdOff, ++ ui32SampledDpOff, ui32SampledWrOff, ++ psCurrentClientCCB->szName)); ++ eError = PVRSRV_ERROR_CCCB_STALLED; ++ ++ { ++ void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; ++ RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); ++ ++ /* Special case - if readOffset is on a PADDING packet, CCB has wrapped. ++ * In this case, skip over the PADDING packet. ++ */ ++ if (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_PADDING) ++ { ++ psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ++ ((ui32SampledRdOff + ++ psCommandHeader->ui32CmdSize + ++ sizeof(RGXFWIF_CCB_CMD_HEADER)) ++ & psCurrentClientCCB->psClientCCBCtrl->ui32WrapMask)); ++ } ++ ++ /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could ++ * take a long time to complete, during which time the CCB ptrs would not advance. ++ */ ++ if (((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) || ++ (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) && ++ (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff))) ++ { ++ /* Acquire the cCCB recovery lock */ ++ OSLockAcquire(psDevInfo->hCCBRecoveryLock); ++ ++ if (!psDevInfo->pvEarliestStalledClientCCB) ++ { ++ psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; ++ psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; ++ } ++ else ++ { ++ /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking ++ * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes ++ * our preferred fence to be unblocked/ ++ */ ++ if ((psCommandHeader->ui32IntJobRef < psDevInfo->ui32OldestSubmissionOrdinal) && ++ ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32IntJobRef) < 0x8000000)) ++ { ++ psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB; ++ psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32IntJobRef; ++ } ++ } ++ ++ /* Release the cCCB recovery lock */ ++ OSLockRelease(psDevInfo->hCCBRecoveryLock); ++ } ++ } ++ } ++ } ++ ++ psCurrentClientCCB->ui32LastROff = ui32SampledRdOff; ++ psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff; ++ psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount; ++ ++ return eError; ++} ++ ++void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, ++ RGX_CLIENT_CCB *psCurrentClientCCB, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; ++ void *pvClientCCBBuff; ++ IMG_UINT32 ui32Offset; ++ IMG_UINT32 ui32DepOffset; ++ IMG_UINT32 ui32EndOffset; ++ IMG_UINT32 ui32WrapMask; ++ IMG_CHAR * pszState = "Ready"; ++ ++ /* Ensure hCCBGrowLock is acquired before reading ++ * psCurrentClientCCB->pvClientCCB as a CCB grow ++ * could remap the virtual addresses. ++ */ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockAcquire(psCurrentClientCCB->hCCBGrowLock); ++#endif ++ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; ++ pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; ++ ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset; ++ OSMemoryBarrier(NULL); ++ ui32Offset = psClientCCBCtrl->ui32ReadOffset; ++ ui32DepOffset = psClientCCBCtrl->ui32DepOffset; ++ /* NB. Use psCurrentClientCCB->ui32Size as basis for wrap mask (rather ++ * than psClientCCBCtrl->ui32WrapMask) as if CCB grow happened, ++ * psCurrentClientCCB->ui32Size will have been updated but ++ * psClientCCBCtrl->ui32WrapMask is only updated once the firmware ++ * sees the CCB has grown. If we use the wrong value, ui32NextOffset ++ * can end up being wrapped prematurely and pointing to garbage. ++ */ ++ ui32WrapMask = RGXGetWrapMaskCCB(psCurrentClientCCB); ++ ++ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr, psCurrentClientCCB->szName); ++ if (ui32Offset == ui32EndOffset) ++ { ++ PVR_DUMPDEBUG_LOG(" `--"); ++ } ++ ++ while (ui32Offset != ui32EndOffset) ++ { ++ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset); ++ IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask; ++ IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE; ++ IMG_BOOL bLastUFO; ++ #define CCB_SYNC_INFO_LEN 80 ++ IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN]; ++ IMG_UINT32 ui32NoOfUpdates, i; ++ RGXFWIF_UFO *psUFOPtr; ++ ++ ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO); ++ psUFOPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ pszSyncInfo[0] = '\0'; ++ ++ if (ui32Offset == ui32DepOffset) ++ { ++ pszState = "Waiting"; ++ } ++ ++ PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u", ++ bLastCommand? "`": "|", ++ pszState, _CCBCmdTypename(psCmdHeader->eCmdType), ++ ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef ++ ); ++ ++ /* switch on type and write checks and updates */ ++ switch (psCmdHeader->eCmdType) ++ { ++ case RGXFWIF_CCB_CMD_TYPE_UPDATE: ++ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: ++ case RGXFWIF_CCB_CMD_TYPE_FENCE: ++ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: ++ { ++ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) ++ { ++ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) ++ { ++ SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, ++ pszSyncInfo, CCB_SYNC_INFO_LEN); ++ } ++ else ++ { ++ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, ++ pszSyncInfo, CCB_SYNC_INFO_LEN); ++ } ++ } ++ ++ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s", ++ bLastCommand? " ": "|", ++ bLastUFO? "`": "|", ++ psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value, ++ pszSyncInfo ++ ); ++ } ++ break; ++ } ++ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: ++ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: ++ { ++ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++) ++ { ++ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE; ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr)) ++ { ++ SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, ++ pszSyncInfo, CCB_SYNC_INFO_LEN); ++ } ++ else ++ { ++ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr, ++ pszSyncInfo, CCB_SYNC_INFO_LEN); ++ } ++ } ++ ++ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val++ %s", ++ bLastCommand? " ": "|", ++ bLastUFO? "`": "|", ++ psUFOPtr->puiAddrUFO.ui32Addr, ++ pszSyncInfo ++ ); ++ } ++ break; ++ } ++ default: ++ break; ++ } ++ ui32Offset = ui32NextOffset; ++ } ++ ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ OSLockRelease(psCurrentClientCCB->hCCBGrowLock); ++#endif ++} ++ ++void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, ++ RGX_CLIENT_CCB *psCurrentClientCCB, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl; ++ void *pvClientCCBBuff = psCurrentClientCCB->pvClientCCB; ++ volatile void *pvPtr; ++ IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset; ++ IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset; ++ IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset; ++ ++ pvPtr = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); ++ ++ if ((ui32SampledRdOff == ui32SampledDepOff) && ++ (ui32SampledRdOff != ui32SampledWrOff)) ++ { ++ volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledRdOff); ++ RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; ++ volatile void *pvPtr = psCommandHeader; ++ ++ /* CCB is stalled on a fence... */ ++ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) ++ { ++#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext); ++ IMG_UINT32 ui32Val; ++#endif ++ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); ++ IMG_UINT32 jj; ++ ++ /* Display details of the fence object on which the context is pending */ ++ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:", ++ sFWCommonContext.ui32Addr, ++ ui32SampledRdOff, ++ psCurrentClientCCB->szName, ++ _CCBCmdTypename(eCommandType)); ++ for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) ++ { ++#if !defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) ++ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); ++#else ++ ui32Val = 0; ++ RGXReadFWModuleAddr(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); ++ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", ++ psUFOPtr[jj].puiAddrUFO.ui32Addr, ++ psUFOPtr[jj].ui32Value, ui32Val); ++#endif ++ } ++ ++ /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */ ++ pvPtr = IMG_OFFSET_ADDR(psUFOPtr, psCommandHeader->ui32CmdSize); ++ psCommandHeader = pvPtr; ++ if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) ++ { ++ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType)); ++ /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */ ++ pvPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize); ++ psCommandHeader = pvPtr; ++ /* If the next command is an update, display details of that so we can see what would then become unblocked */ ++ if (psCommandHeader != IMG_OFFSET_ADDR(pvClientCCBBuff, ui32SampledWrOff)) ++ { ++ eCommandType = psCommandHeader->eCmdType; ++ ++ if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE) ++ { ++ psUFOPtr = IMG_OFFSET_ADDR(psCommandHeader, sizeof(*psCommandHeader)); ++ PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType)); ++ for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) ++ { ++#if !defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) ++ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value); ++#else ++ ui32Val = 0; ++ RGXReadFWModuleAddr(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val); ++ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x", ++ psUFOPtr[jj].puiAddrUFO.ui32Addr, ++ psUFOPtr[jj].ui32Value, ++ ui32Val); ++#endif ++ } ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr); ++ } ++ } ++ } ++} ++ ++void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGX_CLIENT_CCB *psStalledClientCCB; ++ ++ PVR_ASSERT(psDevInfo); ++ ++ psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB; ++ ++ if (psStalledClientCCB) ++ { ++ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl; ++ IMG_UINT32 ui32SampledDepOffset = psClientCCBCtrl->ui32DepOffset; ++ void *pvPtr = IMG_OFFSET_ADDR(psStalledClientCCB->pvClientCCB, ui32SampledDepOffset); ++ RGXFWIF_CCB_CMD_HEADER *psCommandHeader = pvPtr; ++ RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType; ++ ++ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)) ++ { ++ RGXFWIF_UFO *psUFOPtr = IMG_OFFSET_ADDR(pvPtr, sizeof(*psCommandHeader)); ++ IMG_UINT32 jj; ++ IMG_UINT32 ui32NumUnsignalledUFOs = 0; ++ IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNCS]; ++ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ if (!psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName[0]) ++ { ++ OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui64Timestamp); ++ psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); ++ psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; ++ OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLogFirst.aszCCBName, ++ psStalledClientCCB->szName, ++ MAX_CLIENT_CCB_NAME); ++ } ++ else ++ { ++ OSClockMonotonicns64(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui64Timestamp); ++ psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32NumUFOs = (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)); ++ psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].ui32FWCtxAddr = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr; ++ OSStringLCopy(psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp].aszCCBName, ++ psStalledClientCCB->szName, ++ MAX_CLIENT_CCB_NAME); ++ psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp = (psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp + 1) % PVR_SLR_LOG_ENTRIES; ++ } ++ psDevInfo->psRGXFWIfFwOsData->ui32ForcedUpdatesRequested++; ++ /* flush write buffers for psRGXFWIfFwOsData */ ++ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfFwOsData->sSLRLog[psDevInfo->psRGXFWIfFwOsData->ui8SLRLogWp]); ++#endif ++ PVR_LOG(("Fence found on context 0x%x '%s' @ %d has %d UFOs", ++ FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, ++ psStalledClientCCB->szName, ui32SampledDepOffset, ++ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)))); ++ ++ for (jj=0; jjui32CmdSize/sizeof(RGXFWIF_UFO); jj++) ++ { ++ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj])) ++ { ++ IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode, ++ psUFOPtr[jj].puiAddrUFO.ui32Addr); ++ PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1, ++ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), ++ psUFOPtr[jj].puiAddrUFO.ui32Addr, ++ psUFOPtr[jj].ui32Value, ++ ui32ReadValue)); ++ /* If fence is unmet, dump debug info on it */ ++ if (ui32ReadValue != psUFOPtr[jj].ui32Value) ++ { ++ /* Add to our list to pass to pvr_sync */ ++ ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr; ++ ui32NumUnsignalledUFOs++; ++ } ++ } ++ else ++ { ++ PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x", jj+1, ++ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)), ++ psUFOPtr[jj].puiAddrUFO.ui32Addr, ++ psUFOPtr[jj].ui32Value)); ++ } ++ } ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_FALLBACK_FENCE_SYNC) ++ if (ui32NumUnsignalledUFOs > 0) ++ { ++ IMG_UINT32 ui32NumSyncsOwned; ++ PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned); ++ ++ PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed."); ++ } ++#endif ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ if (BIT_ISSET(psStalledClientCCB->ui32CCBFlags, CCB_FLAGS_SLR_DISABLED)) ++ { ++ PRGXFWIF_FWCOMMONCONTEXT psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); ++ ++ PVR_LOG(("SLR disabled for FWCtx 0x%08X", psContext.ui32Addr)); ++ } ++ else ++ { ++ if (ui32NumUnsignalledUFOs > 0) ++ { ++ RGXFWIF_KCCB_CMD sSignalFencesCmd; ++ ++ sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE; ++ sSignalFencesCmd.ui32KCCBFlags = 0; ++ sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext); ++ sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledDepOffset; ++ ++ PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr)); ++ ++ RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext), ++ RGXFWIF_DM_GP, ++ &sSignalFencesCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ } ++#endif ++ } ++ psDevInfo->pvEarliestStalledClientCCB = NULL; ++ } ++} ++ ++/****************************************************************************** ++ End of file (rgxccb.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxccb.h b/drivers/gpu/drm/img-rogue/rgxccb.h +new file mode 100644 +index 000000000000..0dddee171a7e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxccb.h +@@ -0,0 +1,356 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Circular Command Buffer functionality. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX Circular Command Buffer functionality. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXCCB_H) ++#define RGXCCB_H ++ ++#include "devicemem.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "sync_server.h" ++#include "connection_server.h" ++#include "rgxdebug.h" ++#include "rgxdefs_km.h" ++#include "pvr_notifier.h" ++ ++#define MAX_CLIENT_CCB_NAME 30 ++#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX ++ ++/* ++ * This size is to be used when a client CCB is found to consume very ++ * negligible space (e.g. a few hundred bytes to few KBs - less than a page). ++ * In such a case, instead of allocating CCB of size of only a few KBs, we ++ * allocate at-least this much to be future risk-free. ++ */ ++#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */ ++#define MAX_SAFE_CCB_SIZE_LOG2 18 /* 256K (64 Pages) */ ++ ++#define RGX_TQ3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D ++static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_TQ3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is invalid"); ++#define RGX_TQ3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ3D ++static_assert(RGX_TQ3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D ++ && RGX_TQ3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ3D max CCB size is invalid"); ++ ++#define RGX_TQ2D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D ++static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_TQ2D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is invalid"); ++#define RGX_TQ2D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TQ2D ++static_assert(RGX_TQ2D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D && ++ RGX_TQ2D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TQ2D max CCB size is invalid"); ++ ++#define RGX_CDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM ++static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_CDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM CCB size is invalid"); ++#define RGX_CDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_CDM ++static_assert(RGX_CDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM && ++ RGX_CDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "CDM max CCB size is invalid"); ++ ++#define RGX_TA_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA ++static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_TA_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA CCB size is invalid"); ++#define RGX_TA_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TA ++static_assert(RGX_TA_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA && ++ RGX_TA_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TA max CCB size is invalid"); ++ ++#define RGX_3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D ++static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_3D_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D CCB size is invalid"); ++#define RGX_3D_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_3D ++static_assert(RGX_3D_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D && ++ RGX_3D_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "3D max CCB size is invalid"); ++ ++#define RGX_KICKSYNC_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC ++static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_KICKSYNC_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is invalid"); ++#define RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_KICKSYNC ++static_assert(RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC && ++ RGX_KICKSYNC_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "KickSync max CCB size is invalid"); ++ ++#define RGX_TDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM ++static_assert(RGX_TDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_TDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM CCB size is invalid"); ++#define RGX_TDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_TDM ++static_assert(RGX_TDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TDM && ++ RGX_TDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "TDM max CCB size is invalid"); ++ ++#define RGX_RDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM ++static_assert(RGX_RDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2 && ++ RGX_RDM_CCB_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "RDM CCB size is invalid"); ++#define RGX_RDM_CCB_MAX_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_MAX_SIZE_RDM ++static_assert(RGX_RDM_CCB_MAX_SIZE_LOG2 >= PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RDM && ++ RGX_RDM_CCB_MAX_SIZE_LOG2 <= MAX_SAFE_CCB_SIZE_LOG2, "RDM max CCB size is invalid"); ++ ++typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB; ++ ++/* ++ This structure is declared here as it's allocated on the heap by ++ the callers ++*/ ++ ++typedef struct _RGX_CCB_CMD_HELPER_DATA_ { ++ /* Data setup at command init time */ ++ RGX_CLIENT_CCB *psClientCCB; ++ IMG_CHAR *pszCommandName; ++ IMG_UINT32 ui32PDumpFlags; ++ ++ IMG_UINT32 ui32ClientFenceCount; ++ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress; ++ IMG_UINT32 *paui32FenceValue; ++ IMG_UINT32 ui32ClientUpdateCount; ++ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress; ++ IMG_UINT32 *paui32UpdateValue; ++ RGXFWIF_CCB_CMD_TYPE eType; ++ IMG_UINT32 ui32CmdSize; ++ IMG_UINT8 *pui8DMCmd; ++ IMG_UINT32 ui32FenceCmdSize; ++ IMG_UINT32 ui32FBSCInvalCmdSize; ++ IMG_UINT32 ui32DMCmdSize; ++ IMG_UINT32 ui32UpdateCmdSize; ++ ++ /* data for FBSC invalidate command */ ++ IMG_UINT64 ui64FBSCEntryMask; ++ ++ /* timestamp commands */ ++ PRGXFWIF_TIMESTAMP_ADDR pPreTimestampAddr; ++ IMG_UINT32 ui32PreTimeStampCmdSize; ++ PRGXFWIF_TIMESTAMP_ADDR pPostTimestampAddr; ++ IMG_UINT32 ui32PostTimeStampCmdSize; ++ PRGXFWIF_UFO_ADDR pRMWUFOAddr; ++ IMG_UINT32 ui32RMWUFOCmdSize; ++ ++ /* Job reference fields */ ++ IMG_UINT32 ui32ExtJobRef; ++ IMG_UINT32 ui32IntJobRef; ++ ++ /* FW Memdesc for Workload information */ ++ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData; ++ ++} RGX_CCB_CMD_HELPER_DATA; ++ ++#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER)) ++ ++ ++#define RGX_CCB_REQUESTORS(TYPE) \ ++ /* for debugging purposes */ TYPE(UNDEF) \ ++ TYPE(TA) \ ++ TYPE(3D) \ ++ TYPE(CDM) \ ++ TYPE(SH) \ ++ TYPE(RS) \ ++ TYPE(TQ_3D) \ ++ TYPE(TQ_2D) \ ++ TYPE(TQ_TDM) \ ++ TYPE(KICKSYNC) \ ++ TYPE(RAY) \ ++ ++/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as ++ an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere ++ to the following build assert. ++*/ ++typedef enum _RGX_CCB_REQUESTOR_TYPE_ ++{ ++#define CONSTRUCT_ENUM(req) REQ_TYPE_##req, ++ RGX_CCB_REQUESTORS (CONSTRUCT_ENUM) ++#undef CONSTRUCT_ENUM ++ ++ /* should always be at the end */ ++ REQ_TYPE_TOTAL_COUNT, ++} RGX_CCB_REQUESTOR_TYPE; ++ ++/* Tuple describing the columns of the following table */ ++typedef enum _RGX_CCB_REQUESTOR_TUPLE_ ++{ ++ REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */ ++ REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */ ++ REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */ ++ ++ /* should always be at the end */ ++ REQ_TUPLE_CARDINALITY, ++} RGX_CCB_REQUESTOR_TUPLE; ++ ++/* Unpack U8 values from U32. */ ++#define U32toU8_Unpack1(U32Packed) (U32Packed & 0xFF) ++#define U32toU8_Unpack2(U32Packed) ((U32Packed>>8) & 0xFF) ++#define U32toU8_Unpack3(U32Packed) ((U32Packed>>16) & 0xFF) ++#define U32toU8_Unpack4(U32Packed) ((U32Packed>>24) & 0xFF) ++ ++/* Defines for bit meanings within the ui32CCBFlags member of struct _RGX_CLIENT_CCB_ ++ * ++ * ( X = taken/in use, - = available/unused ) ++ * ++ * 31 10 ++ * | || ++ * ------------------------------XX ++ * Bit Meaning ++ * 0 = If set, CCB is still open and commands will be appended to it ++ * 1 = If set, do not perform Sync Lockup Recovery (SLR) for this CCB ++ */ ++#define CCB_FLAGS_CCB_STATE_OPEN (0) /*!< This bit is set to indicate CCB is in the 'Open' state. */ ++#define CCB_FLAGS_SLR_DISABLED (1) /*!< This bit is set to disable Sync Lockup Recovery (SLR) for this CCB. */ ++ ++ ++/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in ++ this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for ++ use in other modules. ++*/ ++extern const IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY]; ++ ++PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32CCBSizeLog2, ++ IMG_UINT32 ui32CCBMaxSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ CONNECTION_DATA *psConnectionData, ++ RGX_CCB_REQUESTOR_TYPE eCCBRequestor, ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ RGX_CLIENT_CCB **ppsClientCCB, ++ DEVMEM_MEMDESC **ppsClientCCBMemDesc, ++ DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc); ++ ++void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB); ++ ++PVRSRV_ERROR RGXCheckSpaceCCB(RGX_CLIENT_CCB *psClientCCB, IMG_UINT32 ui32CmdSize); ++ ++PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32CmdSize, ++ void **ppvBufferSpace, ++ IMG_UINT32 ui32PDumpFlags); ++ ++void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32CmdSize, ++ IMG_UINT32 ui32PDumpFlags); ++ ++IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB); ++IMG_UINT32 RGXGetWrapMaskCCB(RGX_CLIENT_CCB *psClientCCB); ++ ++PVRSRV_ERROR RGXSetCCBFlags(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32Flags); ++ ++void RGXCmdHelperInitCmdCCB_CommandSize(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 ui64FBSCEntryMask, ++ IMG_UINT32 ui32ClientFenceCount, ++ IMG_UINT32 ui32ClientUpdateCount, ++ IMG_UINT32 ui32CmdSize, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, ++ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); ++ ++void RGXCmdHelperInitCmdCCB_OtherData(RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT32 ui32ClientFenceCount, ++ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, ++ IMG_UINT32 *paui32FenceValue, ++ IMG_UINT32 ui32ClientUpdateCount, ++ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, ++ IMG_UINT32 *paui32UpdateValue, ++ IMG_UINT32 ui32CmdSize, ++ IMG_PBYTE pui8DMCmd, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, ++ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, ++ RGXFWIF_CCB_CMD_TYPE eType, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32PDumpFlags, ++ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, ++ IMG_CHAR *pszCommandName, ++ IMG_BOOL bCCBStateOpen, ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); ++ ++void RGXCmdHelperInitCmdCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_CLIENT_CCB *psClientCCB, ++ IMG_UINT64 ui64FBSCEntryMask, ++ IMG_UINT32 ui32ClientFenceCount, ++ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress, ++ IMG_UINT32 *paui32FenceValue, ++ IMG_UINT32 ui32ClientUpdateCount, ++ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress, ++ IMG_UINT32 *paui32UpdateValue, ++ IMG_UINT32 ui32CmdSize, ++ IMG_UINT8 *pui8DMCmd, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr, ++ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr, ++ RGXFWIF_CCB_CMD_TYPE eType, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32PDumpFlags, ++ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData, ++ IMG_CHAR *pszCommandName, ++ IMG_BOOL bCCBStateOpen, ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); ++ ++PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount, ++ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); ++ ++void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount, ++ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, ++ const IMG_CHAR *pcszDMName, ++ IMG_UINT32 ui32CtxAddr); ++ ++IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount, ++ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData); ++ ++IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData, ++ IMG_UINT32 ui32Cmdindex); ++ ++IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData); ++ ++void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, ++ RGX_CLIENT_CCB *psCurrentClientCCB, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext, ++ RGX_CLIENT_CCB *psCurrentClientCCB, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM); ++ ++void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo); ++#endif /* RGXCCB_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxcompute.c b/drivers/gpu/drm/img-rogue/rgxcompute.c +new file mode 100644 +index 000000000000..952940f6f67e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxcompute.c +@@ -0,0 +1,1324 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Compute routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX Compute routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "srvkm.h" ++#include "pdump_km.h" ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgxcompute.h" ++#include "rgx_bvnc_defs_km.h" ++#include "rgxmem.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "osfunc.h" ++#include "rgxccb.h" ++#include "rgxhwperf.h" ++#include "ospvr_gputrace.h" ++#include "htbuffer.h" ++ ++#include "sync_server.h" ++#include "sync_internal.h" ++#include "sync.h" ++#include "rgx_memallocflags.h" ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++#include "pvr_buffer_sync.h" ++#endif ++ ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_internal.h" ++ ++#include "rgxtimerquery.h" ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#include "rgxworkest.h" ++ ++#define HASH_CLEAN_LIMIT 6 ++#endif ++ ++/* Enable this to dump the compiled list of UFOs prior to kick call */ ++#define ENABLE_CMP_UFO_DUMP 0 ++ ++//#define CMP_CHECKPOINT_DEBUG 1 ++ ++#if defined(CMP_CHECKPOINT_DEBUG) ++#define CHKPT_DBG(X) PVR_DPF(X) ++#else ++#define CHKPT_DBG(X) ++#endif ++ ++struct _RGX_SERVER_COMPUTE_CONTEXT_ { ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; ++ DEVMEM_MEMDESC *psFWComputeContextMemDesc; ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc; ++ DEVMEM_MEMDESC *psFWComputeContextStateMemDesc; ++ DLLIST_NODE sListNode; ++ SYNC_ADDR_LIST sSyncAddrListFence; ++ SYNC_ADDR_LIST sSyncAddrListUpdate; ++ POS_LOCK hLock; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WORKEST_HOST_DATA sWorkEstData; ++#endif ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++}; ++ ++PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32FrameworkCommandSize, ++ IMG_PBYTE pbyFrameworkCommand, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32StaticComputecontextStateSize, ++ IMG_PBYTE pStaticComputecontextState, ++ IMG_UINT32 ui32PackedCCBSizeU88, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ IMG_UINT32 ui32MaxDeadlineMS, ++ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext; ++ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; ++ IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; ++ ++ /* Prepare cleanup struct */ ++ *ppsComputeContext = NULL; ++ ++ psComputeContext = OSAllocZMem(sizeof(*psComputeContext)); ++ if (psComputeContext == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ /* ++ Create the FW compute context, this has the CDM common ++ context embedded within it ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_FWCOMPUTECONTEXT), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwComputeContext", ++ &psComputeContext->psFWComputeContextMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_fwcomputecontext; ++ } ++ ++ eError = OSLockCreate(&psComputeContext->hLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to create lock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_createlock; ++ } ++ ++ psComputeContext->psDeviceNode = psDeviceNode; ++ ++ /* ++ Allocate device memory for the firmware GPU context suspend state. ++ Note: the FW reads/writes the state to memory by accessing the GPU register interface. ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware compute context suspend state"); ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_COMPUTECTX_STATE), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwComputeContextState", ++ &psComputeContext->psFWComputeContextStateMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU context suspend state (%d)", ++ __func__, ++ eError)); ++ goto fail_contextsuspendalloc; ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WorkEstInitCompute(psDevInfo, &psComputeContext->sWorkEstData); ++#endif ++ ++ if (ui32FrameworkCommandSize) ++ { ++ /* ++ * Create the FW framework buffer ++ */ ++ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, ++ &psComputeContext->psFWFrameworkMemDesc, ++ ui32FrameworkCommandSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU framework state (%d)", ++ __func__, ++ eError)); ++ goto fail_frameworkcreate; ++ } ++ ++ /* Copy the Framework client data into the framework buffer */ ++ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, ++ psComputeContext->psFWFrameworkMemDesc, ++ pbyFrameworkCommand, ++ ui32FrameworkCommandSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to populate the framework buffer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcopy; ++ } ++ ++ sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc; ++ } ++ ++ ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); ++ ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); ++ eError = FWCommonContextAllocate(psConnection, ++ psDeviceNode, ++ REQ_TYPE_CDM, ++ RGXFWIF_DM_CDM, ++ NULL, ++ psComputeContext->psFWComputeContextMemDesc, ++ offsetof(RGXFWIF_FWCOMPUTECONTEXT, sCDMContext), ++ psFWMemContextMemDesc, ++ psComputeContext->psFWComputeContextStateMemDesc, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_CDM_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_CDM_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ ui32Priority, ++ ui32MaxDeadlineMS, ++ ui64RobustnessAddress, ++ &sInfo, ++ &psComputeContext->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_contextalloc; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, ++ (void **)&psFWComputeContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_acquire_cpu_mapping; ++ } ++ ++ OSDeviceMemCopy(&psFWComputeContext->sStaticComputeContextState, pStaticComputecontextState, ui32StaticComputecontextStateSize); ++ DevmemPDumpLoadMem(psComputeContext->psFWComputeContextMemDesc, 0, sizeof(RGXFWIF_FWCOMPUTECONTEXT), PDUMP_FLAGS_CONTINUOUS); ++ DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ psComputeContext->psBufferSyncContext = ++ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, ++ "rogue-cdm"); ++ if (IS_ERR(psComputeContext->psBufferSyncContext)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to create buffer_sync context (err=%ld)", ++ __func__, PTR_ERR(psComputeContext->psBufferSyncContext))); ++ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_buffer_sync_context_create; ++ } ++#endif ++ ++ SyncAddrListInit(&psComputeContext->sSyncAddrListFence); ++ SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate); ++ ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); ++ } ++ ++ *ppsComputeContext = psComputeContext; ++ return PVRSRV_OK; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++fail_buffer_sync_context_create: ++#endif ++fail_acquire_cpu_mapping: ++ FWCommonContextFree(psComputeContext->psServerCommonContext); ++fail_contextalloc: ++fail_frameworkcopy: ++ if (psComputeContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); ++ } ++fail_frameworkcreate: ++ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc); ++fail_contextsuspendalloc: ++ OSLockDestroy(psComputeContext->hLock); ++fail_createlock: ++ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); ++fail_fwcomputecontext: ++ OSFreeMem(psComputeContext); ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFWIF_FWCOMPUTECONTEXT *psFWComputeContext; ++ IMG_UINT32 ui32WorkEstCCBSubmitted; ++#endif ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode, ++ psComputeContext->psServerCommonContext, ++ RGXFWIF_DM_CDM, ++ PDUMP_FLAGS_NONE); ++ ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ /* remove after RGXFWRequestCommonContextCleanUp() because we might return ++ * RETRY and don't want to be calling this twice */ ++ if (psComputeContext->psBufferSyncContext != NULL) ++ { ++ pvr_buffer_sync_context_destroy(psComputeContext->psBufferSyncContext); ++ psComputeContext->psBufferSyncContext = NULL; ++ } ++#endif ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ eError = DevmemAcquireCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc, ++ (void **)&psFWComputeContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware compute context (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ ui32WorkEstCCBSubmitted = psFWComputeContext->ui32WorkEstCCBSubmitted; ++ ++ DevmemReleaseCpuVirtAddr(psComputeContext->psFWComputeContextMemDesc); ++ ++ /* Check if all of the workload estimation CCB commands for this workload are read */ ++ if (ui32WorkEstCCBSubmitted != psComputeContext->sWorkEstData.ui32WorkEstCCBReceived) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", ++ __func__, ui32WorkEstCCBSubmitted, ++ psComputeContext->sWorkEstData.ui32WorkEstCCBReceived)); ++ ++ return PVRSRV_ERROR_RETRY; ++ } ++#endif ++ ++ /* ... it has so we can free its resources */ ++ ++ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock); ++ dllist_remove_node(&(psComputeContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WorkEstDeInitCompute(psDevInfo, &psComputeContext->sWorkEstData); ++#endif ++ ++ FWCommonContextFree(psComputeContext->psServerCommonContext); ++ if (psComputeContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc); ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psComputeContext->psFWComputeContextMemDesc); ++ ++ OSLockDestroy(psComputeContext->hLock); ++ OSFreeMem(psComputeContext); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ++ IMG_UINT32 ui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 *paui32ClientUpdateSyncOffset, ++ IMG_UINT32 *paui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE *piUpdateFence, ++ IMG_CHAR pszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32CmdSize, ++ IMG_PBYTE pui8DMCmd, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 *paui32SyncPMRFlags, ++ PMR **ppsSyncPMRs, ++ IMG_UINT32 ui32NumWorkgroups, ++ IMG_UINT32 ui32NumWorkitems, ++ IMG_UINT64 ui64DeadlineInus) ++{ ++ RGXFWIF_KCCB_CMD sCmpKCCBCmd; ++ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eError2; ++ IMG_UINT32 ui32CDMCmdOffset = 0; ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psComputeContext->psServerCommonContext); ++ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext); ++ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); ++ IMG_UINT32 ui32FWCtx; ++ IMG_BOOL bCCBStateOpen = IMG_FALSE; ++ ++ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; ++ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; ++ PRGXFWIF_UFO_ADDR pRMWUFOAddr; ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataCompute = {0}; ++ IMG_UINT32 ui32CDMWorkloadDataRO = 0; ++ IMG_UINT32 ui32CDMCmdHeaderOffset = 0; ++ IMG_UINT32 ui32CDMCmdOffsetWrapCheck = 0; ++ RGX_WORKLOAD sWorkloadCharacteristics = {0}; ++#endif ++ ++ IMG_UINT32 ui32IntClientFenceCount = 0; ++ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL; ++ IMG_UINT32 ui32IntClientUpdateCount = 0; ++ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL; ++ IMG_UINT32 *paui32IntUpdateValue = NULL; ++ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; ++ IMG_UINT64 uiCheckFenceUID = 0; ++ IMG_UINT64 uiUpdateFenceUID = 0; ++ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; ++ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; ++ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; ++ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; ++ IMG_UINT32 ui32FenceTimelineUpdateValue = 0; ++ void *pvUpdateFenceFinaliseData = NULL; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; ++ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; ++ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++ CMD_COMMON *psComputeCmdCmn = IMG_OFFSET_ADDR(pui8DMCmd, 0); ++ ++ if (iUpdateTimeline >= 0 && !piUpdateFence) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Ensure we haven't been given a null ptr to ++ * update values if we have been told we ++ * have updates ++ */ ++ if (ui32ClientUpdateCount > 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, ++ "paui32ClientUpdateValue NULL but " ++ "ui32ClientUpdateCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Ensure the string is null-terminated (Required for safety) */ ++ pszUpdateFenceName[31] = '\0'; ++ ++ OSLockAcquire(psComputeContext->hLock); ++ ++ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence, ++ 0, ++ NULL, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ goto err_populate_sync_addr_list; ++ } ++ ++ ui32IntClientUpdateCount = ui32ClientUpdateCount; ++ ++ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate, ++ ui32ClientUpdateCount, ++ pauiClientUpdateUFODevVarBlock, ++ paui32ClientUpdateSyncOffset); ++ if (eError != PVRSRV_OK) ++ { ++ goto err_populate_sync_addr_list; ++ } ++ if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ paui32IntUpdateValue = paui32ClientUpdateValue; ++ ++ if (ui32SyncPMRCount != 0) ++ { ++#if defined(SUPPORT_BUFFER_SYNC) ++ int err; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling " ++ "pvr_buffer_sync_resolve_and_create_fences", __func__)); ++ ++ err = pvr_buffer_sync_resolve_and_create_fences( ++ psComputeContext->psBufferSyncContext, ++ psComputeContext->psDeviceNode->hSyncCheckpointContext, ++ ui32SyncPMRCount, ++ ppsSyncPMRs, ++ paui32SyncPMRFlags, ++ &ui32BufferFenceSyncCheckpointCount, ++ &apsBufferFenceSyncCheckpoints, ++ &psBufferUpdateSyncCheckpoint, ++ &psBufferSyncData ++ ); ++ ++ if (unlikely(err)) ++ { ++ switch (err) ++ { ++ case -EINTR: ++ eError = PVRSRV_ERROR_RETRY; ++ break; ++ case -ENOMEM: ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ break; ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ break; ++ } ++ ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: " ++ "pvr_buffer_sync_resolve_and_create_fences failed (%d)", ++ __func__, eError)); ++ } ++ ++ goto fail_resolve_input_fence; ++ } ++ ++ /* Append buffer sync fences */ ++ if (ui32BufferFenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints " ++ "to CDM Fence (&psTransferContext->sSyncAddrListFence=<%p>, " ++ "pauiIntFenceUFOAddress=<%p>)...", __func__, ++ ui32BufferFenceSyncCheckpointCount, ++ (void *) &psComputeContext->sSyncAddrListFence , ++ (void *) pauiIntFenceUFOAddress)); ++ ++ SyncAddrListAppendAndDeRefCheckpoints(&psComputeContext->sSyncAddrListFence, ++ ui32BufferFenceSyncCheckpointCount, ++ apsBufferFenceSyncCheckpoints); ++ if (pauiIntFenceUFOAddress == NULL) ++ { ++ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; ++ } ++ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++ ++ /* Append the update (from output fence) */ ++ if (psBufferUpdateSyncCheckpoint) ++ { ++ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, ++ 1, &psBufferUpdateSyncCheckpoint); ++ if (pauiIntUpdateUFOAddress == NULL) ++ { ++ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ ui32IntClientUpdateCount++; ++ } ++#else /* defined(SUPPORT_BUFFER_SYNC) */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", ++ __func__, ui32SyncPMRCount)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_populate_sync_addr_list; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); ++ /* Resolve the sync checkpoints that make up the input fence */ ++ eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext, ++ iCheckFence, ++ &ui32FenceSyncCheckpointCount, ++ &apsFenceSyncCheckpoints, ++ &uiCheckFenceUID, ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); ++ goto fail_free_buffer_sync_data; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); ++#if defined(CMP_CHECKPOINT_DEBUG) ++ if (ui32FenceSyncCheckpointCount > 0) ++ { ++ IMG_UINT32 ii; ++ for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); ++ } ++ } ++#endif ++ /* Create the output fence (if required) */ ++ if (iUpdateTimeline != PVRSRV_NO_TIMELINE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __func__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext)); ++ eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode, ++ pszUpdateFenceName, ++ iUpdateTimeline, ++ psComputeContext->psDeviceNode->hSyncCheckpointContext, ++ &iUpdateFence, ++ &uiUpdateFenceUID, ++ &pvUpdateFenceFinaliseData, ++ &psUpdateSyncCheckpoint, ++ (void*)&psFenceTimelineUpdateSync, ++ &ui32FenceTimelineUpdateValue, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __func__, eError)); ++ goto fail_create_output_fence; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __func__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __func__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync)); ++ /* Append the sync prim update for the timeline (if required) */ ++ if (psFenceTimelineUpdateSync) ++ { ++ IMG_UINT32 *pui32TimelineUpdateWp = NULL; ++ ++ /* Allocate memory to hold the list of update values (including our timeline update) */ ++ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); ++ if (!pui32IntAllocatedUpdateValues) ++ { ++ /* Failed to allocate memory */ ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc_update_values_mem; ++ } ++ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); ++ /* Copy the update values into the new memory, then append our timeline update value */ ++ if (paui32IntUpdateValue) ++ { ++ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); ++ } ++#if defined(CMP_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Now set the additional update value */ ++ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; ++ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; ++ ui32IntClientUpdateCount++; ++ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ ++ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __func__, (void*)psFenceTimelineUpdateSync)); ++ /* Now append the timeline sync prim addr to the compute context update list */ ++ SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate, ++ psFenceTimelineUpdateSync); ++#if defined(CMP_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __func__, ui32IntClientUpdateCount)); ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ ++ paui32IntUpdateValue = pui32IntAllocatedUpdateValues; ++ } ++ } ++ ++ /* Append the checks (from input fence) */ ++ if (ui32FenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence)); ++#if defined(CMP_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence, ++ ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ if (!pauiIntFenceUFOAddress) ++ { ++ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs; ++ } ++ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; ++ } ++#if defined(CMP_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __func__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue)); ++ for (iii=0; iii", __func__, iii, (void*)pui32Tmp)); ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __func__, iii, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ ++ if (psUpdateSyncCheckpoint) ++ { ++ /* Append the update (from output fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __func__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint)); ++ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate, ++ 1, ++ &psUpdateSyncCheckpoint); ++ if (!pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ ui32IntClientUpdateCount++; ++#if defined(CMP_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __func__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount)); ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); ++ ++#if (ENABLE_CMP_UFO_DUMP == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __func__)); ++ { ++ IMG_UINT32 ii; ++ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; ++ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; ++ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; ++ ++ /* Dump Fence syncs and Update syncs */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); ++ for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); ++ psTmpIntFenceUFOAddress++; ++ } ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); ++ for (ii=0; iiui32Addr & 0x1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); ++ pui32TmpIntUpdateValue++; ++ } ++ psTmpIntUpdateUFOAddress++; ++ } ++ } ++#endif ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ sWorkloadCharacteristics.sCompute.ui32NumberOfWorkgroups = ui32NumWorkgroups; ++ sWorkloadCharacteristics.sCompute.ui32NumberOfWorkitems = ui32NumWorkitems; ++ ++ /* Prepare workload estimation */ ++ WorkEstPrepare(psComputeContext->psDeviceNode->pvDevice, ++ &psComputeContext->sWorkEstData, ++ &psComputeContext->sWorkEstData.uWorkloadMatchingData.sCompute.sDataCDM, ++ RGXFWIF_CCB_CMD_TYPE_CDM, ++ &sWorkloadCharacteristics, ++ ui64DeadlineInus, ++ &sWorkloadKickDataCompute); ++#endif ++ ++ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice, ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr); ++ ++ RGXCmdHelperInitCmdCCB(psDevInfo, ++ psClientCCB, ++ 0, ++ ui32IntClientFenceCount, ++ pauiIntFenceUFOAddress, ++ NULL, ++ ui32IntClientUpdateCount, ++ pauiIntUpdateUFOAddress, ++ paui32IntUpdateValue, ++ ui32CmdSize, ++ pui8DMCmd, ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr, ++ RGXFWIF_CCB_CMD_TYPE_CDM, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ &sWorkloadKickDataCompute, ++#else ++ NULL, ++#endif ++ "Compute", ++ bCCBStateOpen, ++ asCmdHelperData); ++ ++ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_cmdaquire; ++ } ++ ++ ++ /* ++ We should reserve space in the kernel CCB here and fill in the command ++ directly. ++ This is so if there isn't space in the kernel CCB we can return with ++ retry back to services client before we take any operations ++ */ ++ ++ /* ++ We might only be kicking for flush out a padding packet so only submit ++ the command if the create was successful ++ */ ++ if (eError == PVRSRV_OK) ++ { ++ /* ++ All the required resources are ready at this point, we can't fail so ++ take the required server sync operations and commit all the resources ++ */ ++ ++ ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(psClientCCB); ++ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr); ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* The following is used to determine the offset of the command header containing ++ the workload estimation data so that can be accessed when the KCCB is read */ ++ ui32CDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(asCmdHelperData); ++ ++ ui32CDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext)); ++ ++ /* This checks if the command would wrap around at the end of the CCB and ++ * therefore would start at an offset of 0 rather than the current command ++ * offset */ ++ if (ui32CDMCmdOffset < ui32CDMCmdOffsetWrapCheck) ++ { ++ ui32CDMWorkloadDataRO = ui32CDMCmdOffset; ++ } ++ else ++ { ++ ui32CDMWorkloadDataRO = 0; ++ } ++#endif ++ ++ /* Construct the kernel compute CCB command. */ ++ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); ++ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); ++ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); ++ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; ++ ++ /* Add the Workload data into the KCCB kick */ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Store the offset to the CCCB command header so that it can be referenced ++ * when the KCCB command reaches the FW */ ++ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32CDMWorkloadDataRO + ui32CDMCmdHeaderOffset; ++#else ++ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++#endif ++ ++ ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr; ++ ++ if (psComputeCmdCmn) ++ { ++ HTBLOGK(HTB_SF_MAIN_KICK_CDM, ++ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext, ++ ui32CDMCmdOffset, ++ psComputeCmdCmn->ui32FrameNum, ++ ui32ExtJobRef, ++ ui32IntJobRef); ++ } ++ ++ RGXSRV_HWPERF_ENQ(psComputeContext, ++ OSGetCurrentClientProcessIDKM(), ++ ui32FWCtx, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_CDM, ++ iCheckFence, ++ iUpdateFence, ++ iUpdateTimeline, ++ uiCheckFenceUID, ++ uiUpdateFenceUID, ++ NO_DEADLINE, ++ NO_CYCEST); ++ ++ /* ++ * Submit the compute command to the firmware. ++ */ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, ++ RGXFWIF_DM_CDM, ++ &sCmpKCCBCmd, ++ ui32PDumpFlags); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s failed to schedule kernel CCB command (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError2))); ++ } ++ else ++ { ++ PVRGpuTraceEnqueueEvent(psComputeContext->psDeviceNode->pvDevice, ++ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_CDM); ++ } ++ /* ++ * Now check eError (which may have returned an error from our earlier call ++ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first ++ * so we check it now... ++ */ ++ if (eError != PVRSRV_OK ) ++ { ++ goto fail_cmdaquire; ++ } ++ ++#if defined(NO_HARDWARE) ++ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ ++ if (psUpdateSyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); ++ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); ++ } ++ if (psFenceTimelineUpdateSync) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __func__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue)); ++ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); ++ } ++ SyncCheckpointNoHWUpdateTimelines(NULL); ++#endif /* defined(NO_HARDWARE) */ ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_succeeded(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++ *piUpdateFence = iUpdateFence; ++ ++ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psComputeContext->psDeviceNode, iUpdateFence, ++ pvUpdateFenceFinaliseData, ++ psUpdateSyncCheckpoint, pszUpdateFenceName); ++ } ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++ /* Free memory allocated to hold the internal list of update values */ ++ if (pui32IntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui32IntAllocatedUpdateValues); ++ pui32IntAllocatedUpdateValues = NULL; ++ } ++ ++ OSLockRelease(psComputeContext->hLock); ++ ++ return PVRSRV_OK; ++ ++fail_cmdaquire: ++ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence); ++ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate); ++fail_alloc_update_values_mem: ++ if (iUpdateFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); ++ } ++fail_create_output_fence: ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ ++fail_free_buffer_sync_data: ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_failed(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++ ++fail_resolve_input_fence: ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++err_populate_sync_addr_list: ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++ /* Free memory allocated to hold the internal list of update values */ ++ if (pui32IntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui32IntAllocatedUpdateValues); ++ pui32IntAllocatedUpdateValues = NULL; ++ } ++ OSLockRelease(psComputeContext->hLock); ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) ++{ ++ RGXFWIF_KCCB_CMD sFlushCmd; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psComputeContext->psDeviceNode, ++ PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush"); ++#endif ++ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; ++ sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE; ++ sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE; ++ sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); ++ ++ OSLockAcquire(psComputeContext->hLock); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ RGXFWIF_DM_CDM, ++ &sFlushCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ /* Iterate if we hit a PVRSRV_ERROR_KERNEL_CCB_FULL error */ ++ if ((eError != PVRSRV_ERROR_RETRY) && ++ (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError != PVRSRV_OK) ++ { ++ /* If we hit a temporary KCCB exhaustion, return a RETRY to caller */ ++ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Returning RETRY to caller", __func__)); ++ eError = PVRSRV_ERROR_RETRY; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule SLC flush command (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ } ++ } ++ else ++ { ++ /* Wait for the SLC flush to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Compute flush aborted (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ } ++ else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & ++ RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); ++ } ++ } ++ ++ OSLockRelease(psComputeContext->hLock); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice; ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) && ++ 2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT)) ++ { ++ ++ RGXFWIF_KCCB_CMD sKCCBCmd; ++ PVRSRV_ERROR eError; ++ ++ OSLockAcquire(psComputeContext->hLock); ++ ++ /* Schedule the firmware command */ ++ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; ++ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice, ++ RGXFWIF_DM_CDM, ++ &sKCCBCmd, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule the FW command %d (%s)", ++ __func__, ++ eError, ++ PVRSRVGETERRORSTRING(eError))); ++ } ++ ++ OSLockRelease(psComputeContext->hLock); ++ return eError; ++ }else ++ { ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ++ IMG_UINT32 ui32Priority) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ ++ OSLockAcquire(psComputeContext->hLock); ++ ++ eError = ContextSetPriority(psComputeContext->psServerCommonContext, ++ psConnection, ++ psComputeContext->psDeviceNode->pvDevice, ++ ui32Priority, ++ RGXFWIF_DM_CDM); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __func__, PVRSRVGetErrorString(eError))); ++ } ++ ++ OSLockRelease(psComputeContext->hLock); ++ return eError; ++} ++ ++/* ++ * PVRSRVRGXSetComputeContextPropertyKM ++ */ ++PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ switch (eContextProperty) ++ { ++ case RGX_CONTEXT_PROPERTY_FLAGS: ++ { ++ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; ++ ++ OSLockAcquire(psComputeContext->hLock); ++ eError = FWCommonContextSetFlags(psComputeContext->psServerCommonContext, ++ ui32ContextFlags); ++ OSLockRelease(psComputeContext->hLock); ++ break; ++ } ++ ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ } ++ ++ return eError; ++} ++ ++void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); ++ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); ++ DumpFWCommonContextInfo(psCurrentServerComputeCtx->psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); ++} ++ ++IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT32 ui32ContextBitMask = 0; ++ DLLIST_NODE *psNode, *psNext; ++ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock); ++ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode); ++ ++ if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM) ++ == PVRSRV_ERROR_CCCB_STALLED) ++ { ++ ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM; ++ } ++ } ++ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock); ++ return ui32ContextBitMask; ++} ++ ++/* ++ * PVRSRVRGXGetLastDeviceErrorKM ++ */ ++PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *ui32Error) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ *ui32Error = psDevInfo->eLastDeviceError; ++ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_NONE; ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ End of file (rgxcompute.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxcompute.h b/drivers/gpu/drm/img-rogue/rgxcompute.h +new file mode 100644 +index 000000000000..0ac6e4a3491e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxcompute.h +@@ -0,0 +1,173 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX compute functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX compute functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXCOMPUTE_H) ++#define RGXCOMPUTE_H ++ ++#include "devicemem.h" ++#include "device.h" ++#include "rgxfwutils.h" ++#include "rgx_fwif_resetframework.h" ++#include "rgxdebug.h" ++#include "pvr_notifier.h" ++ ++#include "sync_server.h" ++#include "sync_internal.h" ++#include "connection_server.h" ++ ++ ++typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT; ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXCreateComputeContextKM ++ ++ @Description ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32FrameworkRegisterSize, ++ IMG_PBYTE pbyFrameworkRegisters, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32StaticComputecontextStateSize, ++ IMG_PBYTE pStaticComputecontextState, ++ IMG_UINT32 ui32PackedCCBSizeU88, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ IMG_UINT32 ui32MaxDeadlineMS, ++ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXDestroyComputeContextKM ++ ++ @Description ++ Server-side implementation of RGXDestroyComputeContext ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); ++ ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXKickCDMKM ++ ++ @Description ++ Server-side implementation of RGXKickCDM ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ++ IMG_UINT32 ui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 *paui32ClientUpdateSyncOffset, ++ IMG_UINT32 *paui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE *piUpdateFence, ++ IMG_CHAR pcszUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32CmdSize, ++ IMG_PBYTE pui8DMCmd, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 *paui32SyncPMRFlags, ++ PMR **ppsSyncPMRs, ++ IMG_UINT32 ui32NumWorkgroups, ++ IMG_UINT32 ui32NumWorkitems, ++ IMG_UINT64 ui64DeadlineInus); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXFlushComputeDataKM ++ ++ @Description ++ Server-side implementation of RGXFlushComputeData ++ ++ @Input psComputeContext - Compute context to flush ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM ++ @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate ++ ++ @Input psComputeContext - Compute context to flush ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext); ++ ++PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ++ IMG_UINT32 ui32Priority); ++ ++PVRSRV_ERROR PVRSRVRGXSetComputeContextPropertyKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output); ++ ++PVRSRV_ERROR PVRSRVRGXGetLastDeviceErrorKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *ui32Error); ++ ++/* Debug - Dump debug info of compute contexts on this device */ ++void DumpComputeCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel); ++ ++/* Debug/Watchdog - check if client compute contexts are stalled */ ++IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++#endif /* RGXCOMPUTE_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxdebug.c b/drivers/gpu/drm/img-rogue/rgxdebug.c +new file mode 100644 +index 000000000000..92695696a6a7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxdebug.c +@@ -0,0 +1,5792 @@ ++/*************************************************************************/ /*! ++@File ++@Title Rgx debug information ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX debugging functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++//#define PVR_DPF_FUNCTION_TRACE_ON 1 ++#undef PVR_DPF_FUNCTION_TRACE_ON ++ ++#include "img_defs.h" ++#include "rgxdefs_km.h" ++#include "rgxdevice.h" ++#include "rgxmem.h" ++#include "allocmem.h" ++#include "cache_km.h" ++#include "osfunc.h" ++ ++#include "rgxdebug.h" ++#include "pvrversion.h" ++#include "pvr_debug.h" ++#include "srvkm.h" ++#include "rgxutils.h" ++#include "tlstream.h" ++#include "rgxfwutils.h" ++#include "pvrsrv.h" ++#include "services_km.h" ++ ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "devicemem_utils.h" ++#include "rgx_fwif_km.h" ++#include "rgx_fwif_sf.h" ++#include "rgxfw_log_helper.h" ++#include "fwtrace_string.h" ++#include "rgxfwimageutils.h" ++#include "fwload.h" ++ ++#include "rgxta3d.h" ++#include "rgxkicksync.h" ++#include "rgxcompute.h" ++#include "rgxtransfer.h" ++#include "rgxtdmtransfer.h" ++#include "rgxtimecorr.h" ++#include "rgx_options.h" ++#include "rgxinit.h" ++#include "devicemem_history_server.h" ++#include "info_page.h" ++#include "rgx_bvnc_defs_km.h" ++ ++#define PVR_DUMP_FIRMWARE_INFO(x) \ ++ PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x", \ ++ PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \ ++ PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \ ++ (x).ui32DDKBuild, \ ++ ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\ ++ (x).ui32BuildOptions); ++ ++#define DD_SUMMARY_INDENT "" ++#define DD_NORMAL_INDENT " " ++ ++#define RGX_DEBUG_STR_SIZE (150U) ++#define MAX_FW_DESCRIPTION_LENGTH (500U) ++ ++#define RGX_CR_BIF_CAT_BASE0 (0x1200U) ++#define RGX_CR_BIF_CAT_BASE1 (0x1208U) ++ ++#define RGX_CR_BIF_CAT_BASEN(n) \ ++ RGX_CR_BIF_CAT_BASE0 + \ ++ ((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n) ++ ++ ++#define RGXDBG_BIF_IDS \ ++ X(BIF0)\ ++ X(BIF1)\ ++ X(TEXAS_BIF)\ ++ X(DPX_BIF) \ ++ X(FWCORE) ++ ++#define RGXDBG_SIDEBAND_TYPES \ ++ X(META)\ ++ X(TLA)\ ++ X(DMA)\ ++ X(VDMM)\ ++ X(CDM)\ ++ X(IPP)\ ++ X(PM)\ ++ X(TILING)\ ++ X(MCU)\ ++ X(PDS)\ ++ X(PBE)\ ++ X(VDMS)\ ++ X(IPF)\ ++ X(ISP)\ ++ X(TPF)\ ++ X(USCS)\ ++ X(PPP)\ ++ X(VCE)\ ++ X(TPF_CPF)\ ++ X(IPF_CPF)\ ++ X(FBCDC) ++ ++typedef enum ++{ ++#define X(NAME) RGXDBG_##NAME, ++ RGXDBG_BIF_IDS ++#undef X ++} RGXDBG_BIF_ID; ++ ++typedef enum ++{ ++#define X(NAME) RGXDBG_##NAME, ++ RGXDBG_SIDEBAND_TYPES ++#undef X ++} RGXDBG_SIDEBAND_TYPE; ++ ++static const IMG_CHAR *const pszPowStateName[] = ++{ ++#define X(NAME) #NAME, ++ RGXFWIF_POW_STATES ++#undef X ++}; ++ ++static const IMG_CHAR *const pszBIFNames[] = ++{ ++#define X(NAME) #NAME, ++ RGXDBG_BIF_IDS ++#undef X ++}; ++ ++typedef struct _IMG_FLAGS2DESC_ ++{ ++ IMG_UINT32 uiFlag; ++ const IMG_CHAR *pszLabel; ++} IMG_FLAGS2DESC; ++ ++static const IMG_FLAGS2DESC asCswOpts2Description[] = ++{ ++ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, " Fast CSW profile;"}, ++ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, " Medium CSW profile;"}, ++ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, " Slow CSW profile;"}, ++ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, " No Delay CSW profile;"}, ++ {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, " Random Csw enabled;"}, ++ {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, " SoftReset;"}, ++}; ++ ++static const IMG_FLAGS2DESC asMisc2Description[] = ++{ ++ {RGXFWIF_INICFG_POW_RASCALDUST, " Power Rascal/Dust;"}, ++ {RGXFWIF_INICFG_HWPERF_EN, " HwPerf EN;"}, ++ {RGXFWIF_INICFG_FBCDC_V3_1_EN, " FBCDCv3.1;"}, ++ {RGXFWIF_INICFG_CHECK_MLIST_EN, " Check MList;"}, ++ {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, " ClockGating Off;"}, ++ {RGXFWIF_INICFG_REGCONFIG_EN, " Register Config;"}, ++ {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, " Assert on OOM;"}, ++ {RGXFWIF_INICFG_HWP_DISABLE_FILTER, " HWP Filter Off;"}, ++ {RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN, " CDM Random kill;"}, ++ {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, " DM Overlap Off;"}, ++ {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, " Assert on HWR;"}, ++ {RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED, " Coherent fabric on;"}, ++ {RGXFWIF_INICFG_VALIDATE_IRQ, " Validate IRQ;"}, ++ {RGXFWIF_INICFG_DISABLE_PDP_EN, " PDUMP Panic off;"}, ++ {RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN, " SPU Pow mask change on;"}, ++ {RGXFWIF_INICFG_WORKEST, " Workload Estim;"}, ++ {RGXFWIF_INICFG_PDVFS, " PDVFS;"}, ++ {RGXFWIF_INICFG_CDM_ARBITRATION_TASK_DEMAND, " CDM task demand arbitration;"}, ++ {RGXFWIF_INICFG_CDM_ARBITRATION_ROUND_ROBIN, " CDM round-robin arbitration;"}, ++ {RGXFWIF_INICFG_ISPSCHEDMODE_VER1_IPP, " ISP v1 scheduling;"}, ++ {RGXFWIF_INICFG_ISPSCHEDMODE_VER2_ISP, " ISP v2 scheduling;"}, ++ {RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER, " Validate SOC&USC timers;"}, ++}; ++ ++static const IMG_FLAGS2DESC asFwOsCfg2Description[] = ++{ ++ {RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN, " TDM;"}, ++ {RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN, " TA;"}, ++ {RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN, " 3D;"}, ++ {RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN, " CDM;"}, ++ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM, " LowPrio TDM;"}, ++ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM, " LowPrio TA;"}, ++ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D, " LowPrio 3D;"}, ++ {RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM, " LowPrio CDM;"}, ++}; ++ ++static const IMG_FLAGS2DESC asHwrState2Description[] = ++{ ++ {RGXFWIF_HWR_HARDWARE_OK, " HWR OK;"}, ++ {RGXFWIF_HWR_GENERAL_LOCKUP, " General lockup;"}, ++ {RGXFWIF_HWR_DM_RUNNING_OK, " DM running ok;"}, ++ {RGXFWIF_HWR_DM_STALLING, " DM stalling;"}, ++ {RGXFWIF_HWR_FW_FAULT, " FW fault;"}, ++ {RGXFWIF_HWR_RESTART_REQUESTED, " Restarting;"}, ++}; ++ ++static const IMG_FLAGS2DESC asDmState2Description[] = ++{ ++ {RGXFWIF_DM_STATE_READY_FOR_HWR, " ready for hwr;"}, ++ {RGXFWIF_DM_STATE_NEEDS_SKIP, " needs skip;"}, ++ {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, " needs PR cleanup;"}, ++ {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, " needs trace clear;"}, ++ {RGXFWIF_DM_STATE_GUILTY_LOCKUP, " guilty lockup;"}, ++ {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, " innocent lockup;"}, ++ {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, " guilty overrunning;"}, ++ {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, " innocent overrunning;"}, ++ {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, " hard context switching;"}, ++ {RGXFWIF_DM_STATE_GPU_ECC_HWR, " GPU ECC hwr;"}, ++}; ++ ++const IMG_CHAR * const gapszMipsPermissionPTFlags[4] = ++{ ++ " ", ++ "XI ", ++ "RI ", ++ "RIXI" ++}; ++ ++const IMG_CHAR * const gapszMipsCoherencyPTFlags[8] = ++{ ++ "C", ++ "C", ++ " ", ++ "C", ++ "C", ++ "C", ++ "C", ++ " " ++}; ++ ++const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8] = ++{ ++ " ", ++ " G", ++ " V ", ++ " VG", ++ "D ", ++ "D G", ++ "DV ", ++ "DVG" ++}; ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) ++#if !defined(NO_HARDWARE) ++/* Translation of MIPS exception encoding */ ++typedef struct _MIPS_EXCEPTION_ENCODING_ ++{ ++ const IMG_CHAR *const pszStr; /* Error type */ ++ const IMG_BOOL bIsFatal; /* Error is fatal or non-fatal */ ++} MIPS_EXCEPTION_ENCODING; ++ ++static const MIPS_EXCEPTION_ENCODING apsMIPSExcCodes[] = ++{ ++ {"Interrupt", IMG_FALSE}, ++ {"TLB modified exception", IMG_FALSE}, ++ {"TLB exception (load/instruction fetch)", IMG_FALSE}, ++ {"TLB exception (store)", IMG_FALSE}, ++ {"Address error exception (load/instruction fetch)", IMG_TRUE}, ++ {"Address error exception (store)", IMG_TRUE}, ++ {"Bus error exception (instruction fetch)", IMG_TRUE}, ++ {"Bus error exception (load/store)", IMG_TRUE}, ++ {"Syscall exception", IMG_FALSE}, ++ {"Breakpoint exception (FW assert)", IMG_FALSE}, ++ {"Reserved instruction exception", IMG_TRUE}, ++ {"Coprocessor Unusable exception", IMG_FALSE}, ++ {"Arithmetic Overflow exception", IMG_FALSE}, ++ {"Trap exception", IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ {"Implementation-Specific Exception 1 (COP2)", IMG_FALSE}, ++ {"CorExtend Unusable", IMG_FALSE}, ++ {"Coprocessor 2 exceptions", IMG_FALSE}, ++ {"TLB Read-Inhibit", IMG_TRUE}, ++ {"TLB Execute-Inhibit", IMG_TRUE}, ++ {NULL, IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ {"Reference to WatchHi/WatchLo address", IMG_FALSE}, ++ {"Machine check", IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ {"DSP Module State Disabled exception", IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ {NULL, IMG_FALSE}, ++ /* Can only happen in MIPS debug mode */ ++ {"Parity error", IMG_FALSE}, ++ {NULL, IMG_FALSE} ++}; ++ ++static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode) ++{ ++ if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Only %lu exceptions available in MIPS, %u is not a valid exception code", ++ (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); ++ return NULL; ++ } ++ ++ return apsMIPSExcCodes[ui32ExcCode].pszStr; ++} ++#endif ++#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */ ++ ++typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_ ++{ ++ IMG_UINT32 ui32Mask; ++ const IMG_CHAR * pszExplanation; ++} RGXMIPSFW_C0_DEBUG_TBL_ENTRY; ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) ++#if !defined(NO_HARDWARE) ++static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] = ++{ ++ { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" }, ++ { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" }, ++ { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" }, ++ { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" }, ++ { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" }, ++ { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" }, ++ { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" }, ++ { (IMG_UINT32)RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" } ++}; ++#endif ++#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */ ++ ++static const IMG_CHAR * const apszFwOsStateName[RGXFW_CONNECTION_FW_STATE_COUNT] = ++{ ++ "offline", ++ "ready", ++ "active", ++ "offloading" ++}; ++ ++#if defined(PVR_ENABLE_PHR) ++static const IMG_FLAGS2DESC asPHRConfig2Description[] = ++{ ++ {BIT_ULL(RGXFWIF_PHR_MODE_OFF), "off"}, ++ {BIT_ULL(RGXFWIF_PHR_MODE_RD_RESET), "reset RD hardware"}, ++ {BIT_ULL(RGXFWIF_PHR_MODE_FULL_RESET), "full gpu reset "}, ++}; ++#endif ++ ++static PVRSRV_ERROR ++RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset, ++ IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask) ++{ ++ IMG_UINT32 ui32RegValue, ui32NumPolls = 0; ++ PVRSRV_ERROR eError; ++ ++ do ++ { ++ eError = RGXReadFWModuleAddr(psDevInfo, ui32RegOffset, &ui32RegValue); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ } while (((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000)); ++ ++ return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY; ++} ++ ++static PVRSRV_ERROR ++RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Core Read Ready? */ ++ eError = RGXPollMetaRegThroughSP(psDevInfo, ++ META_CR_TXUXXRXRQ_OFFSET, ++ META_CR_TXUXXRXRQ_DREADY_BIT, ++ META_CR_TXUXXRXRQ_DREADY_BIT); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); ++ ++ /* Set the reg we are interested in reading */ ++ eError = RGXWriteFWModuleAddr(psDevInfo, META_CR_TXUXXRXRQ_OFFSET, ++ ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXWriteFWModuleAddr"); ++ ++ /* Core Read Done? */ ++ eError = RGXPollMetaRegThroughSP(psDevInfo, ++ META_CR_TXUXXRXRQ_OFFSET, ++ META_CR_TXUXXRXRQ_DREADY_BIT, ++ META_CR_TXUXXRXRQ_DREADY_BIT); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXPollMetaRegThroughSP"); ++ ++ /* Read the value */ ++ return RGXReadFWModuleAddr(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal); ++} ++ ++#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) ++static PVRSRV_ERROR _ValidateWithFWModule(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_DEV_VIRTADDR *psFWAddr, ++ void *pvHostCodeAddr, ++ IMG_UINT32 ui32MaxLen, ++ const IMG_CHAR *pszDesc, ++ IMG_UINT32 ui32StartOffset) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32Value = 0; ++ IMG_UINT32 ui32FWCodeDevVAAddr = psFWAddr->ui32Addr + ui32StartOffset; ++ IMG_UINT32 *pui32FWCode = (IMG_PUINT32) ((IMG_PBYTE)pvHostCodeAddr + ui32StartOffset); ++ IMG_UINT32 i; ++ ++#if defined(EMULATOR) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ return PVRSRV_OK; ++ } ++#endif ++ ++ ui32MaxLen -= ui32StartOffset; ++ ui32MaxLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */ ++ ++ for (i = 0; i < ui32MaxLen; i++) ++ { ++ eError = RGXReadFWModuleAddr(psDevInfo, ui32FWCodeDevVAAddr, &ui32Value); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++#if defined(EMULATOR) ++ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++#endif ++ { ++ PVR_DPF((PVR_DBG_VERBOSE, "0x%x: CPU 0x%08x, FW 0x%08x", i * 4, pui32FWCode[i], ui32Value)); ++ ++ if (pui32FWCode[i] != ui32Value) ++ { ++ PVR_DUMPDEBUG_LOG("%s: Mismatch while validating %s at offset 0x%x: CPU 0x%08x (%p), FW 0x%08x (%x)", ++ __func__, pszDesc, ++ (i * 4) + ui32StartOffset, pui32FWCode[i], pui32FWCode, ui32Value, ui32FWCodeDevVAAddr); ++ return PVRSRV_ERROR_FW_IMAGE_MISMATCH; ++ } ++ } ++ ++ ui32FWCodeDevVAAddr += 4; ++ } ++ ++ PVR_DUMPDEBUG_LOG("Match between Host and Firmware view of the %s", pszDesc); ++ return PVRSRV_OK; ++} ++#endif ++ ++static PVRSRV_ERROR _ValidateFWImage(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) ++ PVRSRV_ERROR eError; ++ IMG_UINT32 *pui32HostFWCode = NULL, *pui32HostFWCoremem = NULL; ++ OS_FW_IMAGE *psRGXFW = NULL; ++ const IMG_BYTE *pbRGXFirmware = NULL; ++ IMG_UINT32 *pui32CodeMemoryPointer; ++ RGXFWIF_DEV_VIRTADDR sFWAddr; ++ IMG_UINT32 ui32StartOffset = 0; ++ RGX_LAYER_PARAMS sLayerParams; ++ sLayerParams.psDevInfo = psDevInfo; ++ ++#if defined(EMULATOR) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ PVR_DUMPDEBUG_LOG("Validation of RISC-V FW code is disabled on emulator"); ++ return PVRSRV_OK; ++ } ++#endif ++ ++ if (psDevInfo->pvRegsBaseKM == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: RGX registers not mapped yet!", __func__)); ++ return PVRSRV_ERROR_BAD_MAPPING; ++ } ++ ++ /* Load FW from system for code verification */ ++ pui32HostFWCode = OSAllocZMem(psDevInfo->ui32FWCodeSizeInBytes); ++ if (pui32HostFWCode == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed in allocating memory for FW code. " ++ "So skipping FW code verification", ++ __func__)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ if (psDevInfo->ui32FWCorememCodeSizeInBytes) ++ { ++ pui32HostFWCoremem = OSAllocZMem(psDevInfo->ui32FWCorememCodeSizeInBytes); ++ if (pui32HostFWCoremem == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed in allocating memory for FW core code. " ++ "So skipping FW code verification", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto freeHostFWCode; ++ } ++ } ++ ++ /* Load FW image */ ++ eError = RGXLoadAndGetFWData(psDevInfo->psDeviceNode, &psRGXFW, &pbRGXFirmware); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load FW image file (%s).", ++ __func__, PVRSRVGetErrorString(eError))); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto cleanup_initfw; ++ } ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ eError = ProcessLDRCommandStream(&sLayerParams, pbRGXFirmware, ++ (void*) pui32HostFWCode, NULL, ++ (void*) pui32HostFWCoremem, NULL, NULL); ++ } ++ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, ++ pui32HostFWCode, NULL, ++ NULL, NULL); ++ } ++ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ eError = ProcessELFCommandStream(&sLayerParams, pbRGXFirmware, ++ pui32HostFWCode, NULL, ++ pui32HostFWCoremem, NULL); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed in parsing FW image file.", __func__)); ++ goto cleanup_initfw; ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32CodeMemoryPointer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error in acquiring MIPS FW code memory area (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto cleanup_initfw; ++ } ++ ++ if (OSMemCmp(pui32HostFWCode, pui32CodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes) == 0) ++ { ++ PVR_DUMPDEBUG_LOG("Match between Host and MIPS views of the FW code" ); ++ } ++ else ++ { ++ IMG_UINT32 ui32Count = 10; /* Show only the first 10 mismatches */ ++ IMG_UINT32 ui32Offset; ++ ++ PVR_DUMPDEBUG_LOG("Mismatch between Host and MIPS views of the FW code"); ++ for (ui32Offset = 0; (ui32Offset*4 < psDevInfo->ui32FWCodeSizeInBytes) || (ui32Count == 0); ui32Offset++) ++ { ++ if (pui32HostFWCode[ui32Offset] != pui32CodeMemoryPointer[ui32Offset]) ++ { ++ PVR_DUMPDEBUG_LOG("At %d bytes, code should be 0x%x but it is instead 0x%x", ++ ui32Offset*4, pui32HostFWCode[ui32Offset], pui32CodeMemoryPointer[ui32Offset]); ++ ui32Count--; ++ } ++ } ++ } ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); ++ } ++ else ++ { ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ /* starting checking after BOOT LOADER config */ ++ sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; ++ ++ ui32StartOffset = RGXFW_MAX_BOOTLDR_OFFSET; ++ } ++ else ++ { ++ /* Use bootloader code remap which is always configured before the FW is started */ ++ sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; ++ } ++ ++ eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, ++ psDevInfo, &sFWAddr, ++ pui32HostFWCode, psDevInfo->ui32FWCodeSizeInBytes, ++ "FW code", ui32StartOffset); ++ if (eError != PVRSRV_OK) ++ { ++ goto cleanup_initfw; ++ } ++ ++ if (psDevInfo->ui32FWCorememCodeSizeInBytes) ++ { ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); ++ } ++ else ++ { ++ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); ++ ++ /* Core must be halted while issuing abstract commands */ ++ eError = RGXRiscvHalt(psDevInfo); ++ PVR_GOTO_IF_ERROR(eError, cleanup_initfw); ++ } ++ ++ eError = _ValidateWithFWModule(pfnDumpDebugPrintf, pvDumpDebugFile, ++ psDevInfo, &sFWAddr, ++ pui32HostFWCoremem, psDevInfo->ui32FWCorememCodeSizeInBytes, ++ "FW coremem code", 0); ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ eError = RGXRiscvResume(psDevInfo); ++ PVR_GOTO_IF_ERROR(eError, cleanup_initfw); ++ } ++ } ++ } ++ ++cleanup_initfw: ++ if (psRGXFW) ++ { ++ OSUnloadFirmware(psRGXFW); ++ } ++ ++ if (pui32HostFWCoremem) ++ { ++ OSFreeMem(pui32HostFWCoremem); ++ } ++freeHostFWCode: ++ if (pui32HostFWCode) ++ { ++ OSFreeMem(pui32HostFWCode); ++ } ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); ++ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ return PVRSRV_OK; ++#endif ++} ++ ++#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) ++PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE) ++ IMG_PBYTE pbCodeMemoryPointer; ++ PVRSRV_ERROR eError; ++ RGXFWIF_DEV_VIRTADDR sFWAddr; ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pbCodeMemoryPointer); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR; ++ } ++ else ++ { ++ PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); ++ sFWAddr.ui32Addr = RGXRISCVFW_BOOTLDR_CODE_BASE; ++ }; ++ ++ eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, psDevInfo->ui32FWCodeSizeInBytes, "FW code", 0); ++ if (eError != PVRSRV_OK) ++ { ++ goto releaseFWCodeMapping; ++ } ++ ++ if (psDevInfo->ui32FWCorememCodeSizeInBytes) ++ { ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, (void **)&pbCodeMemoryPointer); ++ if (eError != PVRSRV_OK) ++ { ++ goto releaseFWCoreCodeMapping; ++ } ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, META_COREMEM_CODE); ++ } ++ else ++ { ++ PVR_ASSERT(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)); ++ sFWAddr.ui32Addr = RGXGetFWImageSectionAddress(NULL, RISCV_COREMEM_CODE); ++ } ++ ++ eError = _ValidateWithFWModule(NULL, NULL, psDevInfo, &sFWAddr, pbCodeMemoryPointer, ++ psDevInfo->ui32FWCorememCodeSizeInBytes, "FW coremem code", 0); ++ } ++ ++releaseFWCoreCodeMapping: ++ if (psDevInfo->ui32FWCorememCodeSizeInBytes) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); ++ } ++releaseFWCodeMapping: ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ return PVRSRV_OK; ++#endif ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDecodePMPC ++ ++ @Description ++ ++ Return the name for the PM managed Page Catalogues ++ ++ @Input ui32PC - Page Catalogue number ++ ++ @Return void ++ ++******************************************************************************/ ++static const IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC) ++{ ++ const IMG_CHAR* pszPMPC = " (-)"; ++ ++ switch (ui32PC) ++ { ++ case 0x8: pszPMPC = " (PM-VCE0)"; break; ++ case 0x9: pszPMPC = " (PM-TE0)"; break; ++ case 0xA: pszPMPC = " (PM-ZLS0)"; break; ++ case 0xB: pszPMPC = " (PM-ALIST0)"; break; ++ case 0xC: pszPMPC = " (PM-VCE1)"; break; ++ case 0xD: pszPMPC = " (PM-TE1)"; break; ++ case 0xE: pszPMPC = " (PM-ZLS1)"; break; ++ case 0xF: pszPMPC = " (PM-ALIST1)"; break; ++ } ++ ++ return pszPMPC; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDecodeBIFReqTags ++ ++ @Description ++ ++ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs ++ ++ @Input eBankID - BIF identifier ++ @Input ui32TagID - Tag ID value ++ @Input ui32TagSB - Tag Sideband data ++ @Output ppszTagID - Decoded string from the Tag ID ++ @Output ppszTagSB - Decoded string from the Tag SB ++ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings ++ @Input ui32ScratchBufSize - Size of the provided buffer ++ ++ @Return void ++ ++******************************************************************************/ ++#include "rgxmhdefs_km.h" ++ ++static void _RGXDecodeBIFReqTagsXE(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32TagID, ++ IMG_UINT32 ui32TagSB, ++ IMG_CHAR **ppszTagID, ++ IMG_CHAR **ppszTagSB, ++ IMG_CHAR *pszScratchBuf, ++ IMG_UINT32 ui32ScratchBufSize) ++{ ++ /* default to unknown */ ++ IMG_CHAR *pszTagID = "-"; ++ IMG_CHAR *pszTagSB = "-"; ++ IMG_BOOL bNewTagEncoding = IMG_FALSE; ++ ++ PVR_ASSERT(ppszTagID != NULL); ++ PVR_ASSERT(ppszTagSB != NULL); ++ ++ /* tags updated for all cores (auto & consumer) with branch > 36 or only auto cores with branch = 36 */ ++ if ((psDevInfo->sDevFeatureCfg.ui32B > 36) || ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TILE_REGION_PROTECTION) && (psDevInfo->sDevFeatureCfg.ui32B == 36))) ++ { ++ bNewTagEncoding = IMG_TRUE; ++ } ++ ++ switch (ui32TagID) ++ { ++ /* MMU tags */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_MMU: ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU: ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU: ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU: ++ { ++ switch (ui32TagID) ++ { ++ case RGX_MH_TAG_ENCODING_MH_TAG_MMU: pszTagID = "MMU"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU: pszTagID = "CPU MMU"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU: pszTagID = "CPU IFU"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU: pszTagID = "CPU LSU"; break; ++ } ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST: pszTagSB = "PT"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST: pszTagSB = "PD"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST: pszTagSB = "PC"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST: pszTagSB = "PM PT"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST: pszTagSB = "PM PD"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST: pszTagSB = "PM PC"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST: pszTagSB = "PM PD W"; break; ++ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST: pszTagSB = "PM PC W"; break; ++ } ++ break; ++ } ++ ++ /* MIPS */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_MIPS: ++ { ++ pszTagID = "MIPS"; ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH: pszTagSB = "Opcode"; break; ++ case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS: pszTagSB = "Data"; break; ++ } ++ break; ++ } ++ ++ /* CDM tags */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: ++ { ++ switch (ui32TagID) ++ { ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: pszTagID = "CDM Stage 0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: pszTagID = "CDM Stage 1"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: pszTagID = "CDM Stage 2"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: pszTagID = "CDM Stage 3"; break; ++ } ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM: pszTagSB = "Control"; break; ++ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA: pszTagSB = "Indirect"; break; ++ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA: pszTagSB = "Event"; break; ++ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE: pszTagSB = "Context"; break; ++ } ++ break; ++ } ++ ++ /* VDM tags */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: ++ { ++ switch (ui32TagID) ++ { ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: pszTagID = "VDM Stage 0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: pszTagID = "VDM Stage 1"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: pszTagID = "VDM Stage 2"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: pszTagID = "VDM Stage 3"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: pszTagID = "VDM Stage 4"; break; ++ } ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL: pszTagSB = "Control"; break; ++ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE: pszTagSB = "State"; break; ++ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX: pszTagSB = "Index"; break; ++ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK: pszTagSB = "Stack"; break; ++ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT: pszTagSB = "Context"; break; ++ } ++ break; ++ } ++ ++ /* PDS */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_PDS_0: ++ pszTagID = "PDS req 0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_PDS_1: ++ pszTagID = "PDS req 1"; break; ++ ++ /* MCU */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA: ++ pszTagID = "MCU USCA"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB: ++ pszTagID = "MCU USCB"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC: ++ pszTagID = "MCU USCC"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD: ++ pszTagID = "MCU USCD"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA: ++ pszTagID = "MCU PDS USCA"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB: ++ pszTagID = "MCU PDS USCB"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC: ++ pszTagID = "MCU PDS USCC"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD: ++ pszTagID = "MCU PDSUSCD"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW: ++ pszTagID = "MCU PDS PDSRW"; break; ++ ++ /* TCU */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_TCU_0: ++ pszTagID = "TCU req 0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TCU_1: ++ pszTagID = "TCU req 1"; break; ++ ++ /* FBCDC */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0: ++ pszTagID = bNewTagEncoding ? "TFBDC_TCU0" : "FBCDC0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1: ++ pszTagID = bNewTagEncoding ? "TFBDC_ZLS0" : "FBCDC1"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_2: ++ pszTagID = bNewTagEncoding ? "TFBDC_TCU1" : "FBCDC2"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_3: ++ pszTagID = bNewTagEncoding ? "TFBDC_ZLS1" : "FBCDC3"; break; ++ ++ /* USC Shared */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_USC: ++ pszTagID = "USCS"; break; ++ ++ /* ISP */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS: ++ pszTagID = "ISP0 ZLS"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS: ++ pszTagID = "ISP0 DS"; break; ++ ++ /* TPF */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_TPF: ++ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: ++ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: ++ { ++ switch (ui32TagID) ++ { ++ case RGX_MH_TAG_ENCODING_MH_TAG_TPF: pszTagID = "TPF0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: pszTagID = "TPF0 DBIAS"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: pszTagID = "TPF0 SPF"; break; ++ } ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE: pszTagSB = "PDS state"; break; ++ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS: pszTagSB = "Depth bias"; break; ++ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA: pszTagSB = "Floor offset"; break; ++ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA: pszTagSB = "Delta"; break; ++ } ++ break; ++ } ++ ++ /* IPF */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: ++ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: ++ { ++ switch (ui32TagID) ++ { ++ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: pszTagID = "IPF0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: pszTagID = "IPF0"; break; ++ } ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_ISP_IPP_PIPES)) ++ { ++ if (ui32TagID < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) ++ { ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "CReq%d", ui32TagID); ++ pszTagSB = pszScratchBuf; ++ } ++ else if (ui32TagID < 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) ++ { ++ ui32TagID -= RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES); ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "PReq%d", ui32TagID); ++ pszTagSB = pszScratchBuf; ++ } ++ else ++ { ++ switch (ui32TagSB - 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES)) ++ { ++ case 0: pszTagSB = "RReq"; break; ++ case 1: pszTagSB = "DBSC"; break; ++ case 2: pszTagSB = "CPF"; break; ++ case 3: pszTagSB = "Delta"; break; ++ } ++ } ++ } ++ break; ++ } ++ ++ /* VDM Stage 5 (temporary) */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5: ++ pszTagID = "VDM Stage 5"; break; ++ ++ /* TA */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP: ++ pszTagID = "PPP"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC: ++ pszTagID = "TPW RTC"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC: ++ pszTagID = "TEAC RTC"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC: ++ pszTagID = "PSG RTC"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION: ++ pszTagID = "PSG Region"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM: ++ pszTagID = "PSG Stream"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW: ++ pszTagID = "TPW"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC: ++ pszTagID = "TPC"; break; ++ ++ /* PM */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC: ++ { ++ pszTagID = "PMA"; ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK: pszTagSB = "TA Fstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST: pszTagSB = "TA MList"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK: pszTagSB = "3D Fstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST: pszTagSB = "3D MList"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0: pszTagSB = "Context0"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1: pszTagSB = "Context1"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP: pszTagSB = "MAVP"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK: pszTagSB = "UFstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK: pszTagSB = "TA MMUstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK: pszTagSB = "3D MMUstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK: pszTagSB = "TA UFstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK: pszTagSB = "3D UFstack"; break; ++ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP: pszTagSB = "TA VFP"; break; ++ } ++ break; ++ } ++ case RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC: ++ { ++ pszTagID = "PMD"; ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK: pszTagSB = "TA Fstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST: pszTagSB = "TA MList"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK: pszTagSB = "3D Fstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST: pszTagSB = "3D MList"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0: pszTagSB = "Context0"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1: pszTagSB = "Context1"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK: pszTagSB = "UFstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK: pszTagSB = "TA MMUstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK: pszTagSB = "3D MMUstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK: pszTagSB = "TA UFstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK: pszTagSB = "3D UFstack"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP: pszTagSB = "TA VFP"; break; ++ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP: pszTagSB = "3D VFP"; break; ++ } ++ break; ++ } ++ ++ /* TDM */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA: ++ { ++ pszTagID = "TDM DMA"; ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM: pszTagSB = "Ctl stream"; break; ++ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER: pszTagSB = "Ctx buffer"; break; ++ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL: pszTagSB = "Queue ctl"; break; ++ } ++ break; ++ } ++ case RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL: ++ { ++ pszTagID = "TDM CTL"; ++ switch (ui32TagSB) ++ { ++ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE: pszTagSB = "Fence"; break; ++ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT: pszTagSB = "Context"; break; ++ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE: pszTagSB = "Queue"; break; ++ } ++ break; ++ } ++ ++ /* PBE */ ++ case RGX_MH_TAG_ENCODING_MH_TAG_PBE0: ++ pszTagID = "PBE0"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_PBE1: ++ pszTagID = "PBE1"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_PBE2: ++ pszTagID = "PBE2"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_PBE3: ++ pszTagID = "PBE3"; break; ++ } ++ ++ *ppszTagID = pszTagID; ++ *ppszTagSB = pszTagSB; ++} ++ ++/* RISC-V pf tags */ ++#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU (0x00000001U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU (0x00000002U) ++#define RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU (0x00000003U) ++ ++static void _RGXDecodeBIFReqTagsFwcore(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32TagID, ++ IMG_UINT32 ui32TagSB, ++ IMG_CHAR **ppszTagID, ++ IMG_CHAR **ppszTagSB) ++{ ++ /* default to unknown */ ++ IMG_CHAR *pszTagID = "-"; ++ IMG_CHAR *pszTagSB = "-"; ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ pszTagSB = "RISC-V"; ++ ++ switch (ui32TagID) ++ { ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_MMU: pszTagID = "RISC-V MMU"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_IFU: pszTagID = "RISC-V Instruction Fetch Unit"; break; ++ case RGX_MH_TAG_ENCODING_MH_TAG_CPU_LSU: pszTagID = "RISC-V Load/Store Unit"; break; /* Or Debug Module System Bus */ ++ } ++ } ++ ++ *ppszTagID = pszTagID; ++ *ppszTagSB = pszTagSB; ++} ++ ++static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXDBG_BIF_ID eBankID, ++ IMG_UINT32 ui32TagID, ++ IMG_UINT32 ui32TagSB, ++ IMG_CHAR **ppszTagID, ++ IMG_CHAR **ppszTagSB, ++ IMG_CHAR *pszScratchBuf, ++ IMG_UINT32 ui32ScratchBufSize) ++{ ++ /* default to unknown */ ++ IMG_CHAR *pszTagID = "-"; ++ IMG_CHAR *pszTagSB = "-"; ++ ++ PVR_ASSERT(ppszTagID != NULL); ++ PVR_ASSERT(ppszTagSB != NULL); ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) ++ { ++ if (eBankID == RGXDBG_FWCORE) ++ { ++ _RGXDecodeBIFReqTagsFwcore(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB); ++ } ++ else ++ { ++ _RGXDecodeBIFReqTagsXE(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize); ++ } ++ return; ++ } ++ ++ switch (ui32TagID) ++ { ++ case 0x0: ++ { ++ pszTagID = "MMU"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Table"; break; ++ case 0x1: pszTagSB = "Directory"; break; ++ case 0x2: pszTagSB = "Catalogue"; break; ++ } ++ break; ++ } ++ case 0x1: ++ { ++ pszTagID = "TLA"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Pixel data"; break; ++ case 0x1: pszTagSB = "Command stream data"; break; ++ case 0x2: pszTagSB = "Fence or flush"; break; ++ } ++ break; ++ } ++ case 0x2: ++ { ++ pszTagID = "HOST"; ++ break; ++ } ++ case 0x3: ++ { ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ pszTagID = "META"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "DCache - Thread 0"; break; ++ case 0x1: pszTagSB = "ICache - Thread 0"; break; ++ case 0x2: pszTagSB = "JTag - Thread 0"; break; ++ case 0x3: pszTagSB = "Slave bus - Thread 0"; break; ++ case 0x4: pszTagSB = "DCache - Thread "; break; ++ case 0x5: pszTagSB = "ICache - Thread 1"; break; ++ case 0x6: pszTagSB = "JTag - Thread 1"; break; ++ case 0x7: pszTagSB = "Slave bus - Thread 1"; break; ++ } ++ } ++ else if (RGX_IS_ERN_SUPPORTED(psDevInfo, 57596)) ++ { ++ pszTagID="TCU"; ++ } ++ else ++ { ++ /* Unreachable code */ ++ PVR_ASSERT(IMG_FALSE); ++ } ++ break; ++ } ++ case 0x4: ++ { ++ pszTagID = "USC"; ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, ++ "Cache line %d", (ui32TagSB & 0x3f)); ++ pszTagSB = pszScratchBuf; ++ break; ++ } ++ case 0x5: ++ { ++ pszTagID = "PBE"; ++ break; ++ } ++ case 0x6: ++ { ++ pszTagID = "ISP"; ++ switch (ui32TagSB) ++ { ++ case 0x00: pszTagSB = "ZLS"; break; ++ case 0x20: pszTagSB = "Occlusion Query"; break; ++ } ++ break; ++ } ++ case 0x7: ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) ++ { ++ if (eBankID == RGXDBG_TEXAS_BIF) ++ { ++ pszTagID = "IPF"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "CPF"; break; ++ case 0x1: pszTagSB = "DBSC"; break; ++ case 0x2: ++ case 0x4: ++ case 0x6: ++ case 0x8: pszTagSB = "Control Stream"; break; ++ case 0x3: ++ case 0x5: ++ case 0x7: ++ case 0x9: pszTagSB = "Primitive Block"; break; ++ } ++ } ++ else ++ { ++ pszTagID = "IPP"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Macrotile Header"; break; ++ case 0x1: pszTagSB = "Region Header"; break; ++ } ++ } ++ } ++ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIMPLE_INTERNAL_PARAMETER_FORMAT)) ++ { ++ pszTagID = "IPF"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Region Header"; break; ++ case 0x1: pszTagSB = "DBSC"; break; ++ case 0x2: pszTagSB = "CPF"; break; ++ case 0x3: pszTagSB = "Control Stream"; break; ++ case 0x4: pszTagSB = "Primitive Block"; break; ++ } ++ } ++ else ++ { ++ pszTagID = "IPF"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Macrotile Header"; break; ++ case 0x1: pszTagSB = "Region Header"; break; ++ case 0x2: pszTagSB = "DBSC"; break; ++ case 0x3: pszTagSB = "CPF"; break; ++ case 0x4: ++ case 0x6: ++ case 0x8: pszTagSB = "Control Stream"; break; ++ case 0x5: ++ case 0x7: ++ case 0x9: pszTagSB = "Primitive Block"; break; ++ } ++ } ++ break; ++ } ++ case 0x8: ++ { ++ pszTagID = "CDM"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Control Stream"; break; ++ case 0x1: pszTagSB = "Indirect Data"; break; ++ case 0x2: pszTagSB = "Event Write"; break; ++ case 0x3: pszTagSB = "Context State"; break; ++ } ++ break; ++ } ++ case 0x9: ++ { ++ pszTagID = "VDM"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Control Stream"; break; ++ case 0x1: pszTagSB = "PPP State"; break; ++ case 0x2: pszTagSB = "Index Data"; break; ++ case 0x4: pszTagSB = "Call Stack"; break; ++ case 0x8: pszTagSB = "Context State"; break; ++ } ++ break; ++ } ++ case 0xA: ++ { ++ pszTagID = "PM"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "PMA_TAFSTACK"; break; ++ case 0x1: pszTagSB = "PMA_TAMLIST"; break; ++ case 0x2: pszTagSB = "PMA_3DFSTACK"; break; ++ case 0x3: pszTagSB = "PMA_3DMLIST"; break; ++ case 0x4: pszTagSB = "PMA_PMCTX0"; break; ++ case 0x5: pszTagSB = "PMA_PMCTX1"; break; ++ case 0x6: pszTagSB = "PMA_MAVP"; break; ++ case 0x7: pszTagSB = "PMA_UFSTACK"; break; ++ case 0x8: pszTagSB = "PMD_TAFSTACK"; break; ++ case 0x9: pszTagSB = "PMD_TAMLIST"; break; ++ case 0xA: pszTagSB = "PMD_3DFSTACK"; break; ++ case 0xB: pszTagSB = "PMD_3DMLIST"; break; ++ case 0xC: pszTagSB = "PMD_PMCTX0"; break; ++ case 0xD: pszTagSB = "PMD_PMCTX1"; break; ++ case 0xF: pszTagSB = "PMD_UFSTACK"; break; ++ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; ++ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; ++ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; ++ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; ++ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; ++ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; ++ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; ++ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; ++ case 0x18: pszTagSB = "PMA_TAVFP"; break; ++ case 0x19: pszTagSB = "PMD_3DVFP"; break; ++ case 0x1A: pszTagSB = "PMD_TAVFP"; break; ++ } ++ break; ++ } ++ case 0xB: ++ { ++ pszTagID = "TA"; ++ switch (ui32TagSB) ++ { ++ case 0x1: pszTagSB = "VCE"; break; ++ case 0x2: pszTagSB = "TPC"; break; ++ case 0x3: pszTagSB = "TE Control Stream"; break; ++ case 0x4: pszTagSB = "TE Region Header"; break; ++ case 0x5: pszTagSB = "TE Render Target Cache"; break; ++ case 0x6: pszTagSB = "TEAC Render Target Cache"; break; ++ case 0x7: pszTagSB = "VCE Render Target Cache"; break; ++ case 0x8: pszTagSB = "PPP Context State"; break; ++ } ++ break; ++ } ++ case 0xC: ++ { ++ pszTagID = "TPF"; ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "TPF0: Primitive Block"; break; ++ case 0x1: pszTagSB = "TPF0: Depth Bias"; break; ++ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; ++ case 0x3: pszTagSB = "CPF - Tables"; break; ++ case 0x4: pszTagSB = "TPF1: Primitive Block"; break; ++ case 0x5: pszTagSB = "TPF1: Depth Bias"; break; ++ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; ++ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; ++ case 0x8: pszTagSB = "TPF2: Primitive Block"; break; ++ case 0x9: pszTagSB = "TPF2: Depth Bias"; break; ++ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; ++ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; ++ case 0xC: pszTagSB = "TPF3: Primitive Block"; break; ++ case 0xD: pszTagSB = "TPF3: Depth Bias"; break; ++ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; ++ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; ++ } ++ break; ++ } ++ case 0xD: ++ { ++ pszTagID = "PDS"; ++ break; ++ } ++ case 0xE: ++ { ++ pszTagID = "MCU"; ++ { ++ IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7; ++ IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7; ++ IMG_UINT32 ui32Group = ui32TagSB & 0x3; ++ ++ IMG_CHAR* pszBurst = ""; ++ IMG_CHAR* pszGroupEnc = ""; ++ IMG_CHAR* pszGroup = ""; ++ ++ switch (ui32Burst) ++ { ++ case 0x0: ++ case 0x1: pszBurst = "128bit word within the Lower 256bits"; break; ++ case 0x2: ++ case 0x3: pszBurst = "128bit word within the Upper 256bits"; break; ++ case 0x4: pszBurst = "Lower 256bits"; break; ++ case 0x5: pszBurst = "Upper 256bits"; break; ++ case 0x6: pszBurst = "512 bits"; break; ++ } ++ switch (ui32GroupEnc) ++ { ++ case 0x0: pszGroupEnc = "TPUA_USC"; break; ++ case 0x1: pszGroupEnc = "TPUB_USC"; break; ++ case 0x2: pszGroupEnc = "USCA_USC"; break; ++ case 0x3: pszGroupEnc = "USCB_USC"; break; ++ case 0x4: pszGroupEnc = "PDS_USC"; break; ++ case 0x5: ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && ++ 6 > RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) ++ { ++ pszGroupEnc = "PDSRW"; ++ } else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && ++ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) ++ { ++ pszGroupEnc = "UPUC_USC"; ++ } ++ break; ++ case 0x6: ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && ++ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) ++ { ++ pszGroupEnc = "TPUC_USC"; ++ } ++ break; ++ case 0x7: ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) && ++ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) ++ { ++ pszGroupEnc = "PDSRW"; ++ } ++ break; ++ } ++ switch (ui32Group) ++ { ++ case 0x0: pszGroup = "Banks 0-3"; break; ++ case 0x1: pszGroup = "Banks 4-7"; break; ++ case 0x2: pszGroup = "Banks 8-11"; break; ++ case 0x3: pszGroup = "Banks 12-15"; break; ++ } ++ ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, ++ "%s, %s, %s", pszBurst, pszGroupEnc, pszGroup); ++ pszTagSB = pszScratchBuf; ++ } ++ break; ++ } ++ case 0xF: ++ { ++ pszTagID = "FB_CDC"; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) ++ { ++ IMG_UINT32 ui32Req = (ui32TagSB >> 0) & 0xf; ++ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3; ++ IMG_CHAR* pszReqOrig = ""; ++ ++ switch (ui32Req) ++ { ++ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break; ++ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break; ++ case 0x2: pszReqOrig = "FBC Request, originator Host"; break; ++ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break; ++ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break; ++ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break; ++ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break; ++ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break; ++ case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break; ++ case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break; ++ case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break; ++ case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break; ++ case 0xc: pszReqOrig = "Reserved"; break; ++ case 0xd: pszReqOrig = "Reserved"; break; ++ case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break; ++ case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break; ++ } ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, ++ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB); ++ pszTagSB = pszScratchBuf; ++ } ++ else ++ { ++ IMG_UINT32 ui32Req = (ui32TagSB >> 2) & 0x7; ++ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3; ++ IMG_CHAR* pszReqOrig = ""; ++ ++ switch (ui32Req) ++ { ++ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break; ++ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break; ++ case 0x2: pszReqOrig = "FBC Request, originator Host"; break; ++ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break; ++ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break; ++ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break; ++ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break; ++ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break; ++ } ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, ++ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB); ++ pszTagSB = pszScratchBuf; ++ } ++ break; ++ } ++ } /* switch (TagID) */ ++ ++ *ppszTagID = pszTagID; ++ *ppszTagSB = pszTagSB; ++} ++ ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDecodeMMULevel ++ ++ @Description ++ ++ Return the name for the MMU level that faulted. ++ ++ @Input ui32MMULevel - MMU level ++ ++ @Return IMG_CHAR* to the sting describing the MMU level that faulted. ++ ++******************************************************************************/ ++static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel) ++{ ++ const IMG_CHAR* pszMMULevel = ""; ++ ++ switch (ui32MMULevel) ++ { ++ case 0x0: pszMMULevel = " (Page Table)"; break; ++ case 0x1: pszMMULevel = " (Page Directory)"; break; ++ case 0x2: pszMMULevel = " (Page Catalog)"; break; ++ case 0x3: pszMMULevel = " (Cat Base Reg)"; break; ++ } ++ ++ return pszMMULevel; ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDecodeMMUReqTags ++ ++ @Description ++ ++ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and ++ RGX_CR_MMU_FAULT_STATUS regs. ++ ++ @Input ui32TagID - Tag ID value ++ @Input ui32TagSB - Tag Sideband data ++ @Input bRead - Read flag ++ @Output ppszTagID - Decoded string from the Tag ID ++ @Output ppszTagSB - Decoded string from the Tag SB ++ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings ++ @Input ui32ScratchBufSize - Size of the provided buffer ++ ++ @Return void ++ ++******************************************************************************/ ++static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32TagID, ++ IMG_UINT32 ui32TagSB, ++ IMG_BOOL bRead, ++ IMG_CHAR **ppszTagID, ++ IMG_CHAR **ppszTagSB, ++ IMG_CHAR *pszScratchBuf, ++ IMG_UINT32 ui32ScratchBufSize) ++{ ++ IMG_INT32 i32SideBandType = -1; ++ IMG_CHAR *pszTagID = "-"; ++ IMG_CHAR *pszTagSB = "-"; ++ ++ PVR_ASSERT(ppszTagID != NULL); ++ PVR_ASSERT(ppszTagSB != NULL); ++ ++ ++ switch (ui32TagID) ++ { ++ case 0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break; ++ case 1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break; ++ case 2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break; ++ case 3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break; ++ case 4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break; ++ case 5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break; ++ case 6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break; ++ case 7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break; ++ case 8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break; ++ case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break; ++ case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break; ++ case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break; ++ case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break; ++ case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break; ++ case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break; ++ case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break; ++ case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break; ++ case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break; ++ case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break; ++ case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break; ++ case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break; ++ case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break; ++ case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break; ++ case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break; ++ case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break; ++ case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break; ++ case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break; ++ case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break; ++ case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break; ++ case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break; ++ case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break; ++ case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break; ++ case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break; ++ case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break; ++ case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break; ++ case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break; ++ case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break; ++ case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break; ++ case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break; ++ case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break; ++ } ++ if (('-' == pszTagID[0]) && '\n' == pszTagID[1]) ++ { ++ ++ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) || ++ (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3)) ++ { ++ switch (ui32TagID) ++ { ++ case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break; ++ case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break; ++ case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break; ++ case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break; ++ case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break; ++ case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break; ++ case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break; ++ case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break; ++ } ++ ++ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 50539)) ++ { ++ switch (ui32TagID) ++ { ++ case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; ++ case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; ++ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; ++ case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; ++ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; ++ case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; ++ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; ++ case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; ++ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; ++ case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; ++ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; ++ case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; ++ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; ++ case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; ++ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; ++ case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; ++ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; ++ } ++ }else ++ { ++ switch (ui32TagID) ++ { ++ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; ++ case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; ++ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; ++ case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; ++ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; ++ case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; ++ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; ++ case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; ++ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; ++ case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; ++ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; ++ case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; ++ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; ++ case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; ++ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; ++ case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; ++ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break; ++ } ++ } ++ }else ++ { ++ switch (ui32TagID) ++ { ++ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break; ++ case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; ++ case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break; ++ case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break; ++ case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break; ++ case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; ++ case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break; ++ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break; ++ case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; ++ case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break; ++ case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break; ++ case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break; ++ case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; ++ case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break; ++ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break; ++ case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; ++ case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break; ++ case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break; ++ case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break; ++ case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; ++ case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break; ++ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break; ++ case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; ++ case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break; ++ case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break; ++ case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break; ++ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break; ++ case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; ++ case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break; ++ } ++ } ++ ++ } ++ ++ switch (i32SideBandType) ++ { ++ case RGXDBG_META: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "DCache - Thread 0"; break; ++ case 0x1: pszTagSB = "ICache - Thread 0"; break; ++ case 0x2: pszTagSB = "JTag - Thread 0"; break; ++ case 0x3: pszTagSB = "Slave bus - Thread 0"; break; ++ case 0x4: pszTagSB = "DCache - Thread 1"; break; ++ case 0x5: pszTagSB = "ICache - Thread 1"; break; ++ case 0x6: pszTagSB = "JTag - Thread 1"; break; ++ case 0x7: pszTagSB = "Slave bus - Thread 1"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_TLA: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Pixel data"; break; ++ case 0x1: pszTagSB = "Command stream data"; break; ++ case 0x2: pszTagSB = "Fence or flush"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_VDMM: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Control Stream - Read Only"; break; ++ case 0x1: pszTagSB = "PPP State - Read Only"; break; ++ case 0x2: pszTagSB = "Indices - Read Only"; break; ++ case 0x4: pszTagSB = "Call Stack - Read/Write"; break; ++ case 0x6: pszTagSB = "DrawIndirect - Read Only"; break; ++ case 0xA: pszTagSB = "Context State - Write Only"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_CDM: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Control Stream"; break; ++ case 0x1: pszTagSB = "Indirect Data"; break; ++ case 0x2: pszTagSB = "Event Write"; break; ++ case 0x3: pszTagSB = "Context State"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_IPP: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Macrotile Header"; break; ++ case 0x1: pszTagSB = "Region Header"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_PM: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "PMA_TAFSTACK"; break; ++ case 0x1: pszTagSB = "PMA_TAMLIST"; break; ++ case 0x2: pszTagSB = "PMA_3DFSTACK"; break; ++ case 0x3: pszTagSB = "PMA_3DMLIST"; break; ++ case 0x4: pszTagSB = "PMA_PMCTX0"; break; ++ case 0x5: pszTagSB = "PMA_PMCTX1"; break; ++ case 0x6: pszTagSB = "PMA_MAVP"; break; ++ case 0x7: pszTagSB = "PMA_UFSTACK"; break; ++ case 0x8: pszTagSB = "PMD_TAFSTACK"; break; ++ case 0x9: pszTagSB = "PMD_TAMLIST"; break; ++ case 0xA: pszTagSB = "PMD_3DFSTACK"; break; ++ case 0xB: pszTagSB = "PMD_3DMLIST"; break; ++ case 0xC: pszTagSB = "PMD_PMCTX0"; break; ++ case 0xD: pszTagSB = "PMD_PMCTX1"; break; ++ case 0xF: pszTagSB = "PMD_UFSTACK"; break; ++ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break; ++ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break; ++ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break; ++ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break; ++ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break; ++ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break; ++ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break; ++ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break; ++ case 0x18: pszTagSB = "PMA_TAVFP"; break; ++ case 0x19: pszTagSB = "PMD_3DVFP"; break; ++ case 0x1A: pszTagSB = "PMD_TAVFP"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_TILING: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "PSG Control Stream TP0"; break; ++ case 0x1: pszTagSB = "TPC TP0"; break; ++ case 0x2: pszTagSB = "VCE0"; break; ++ case 0x3: pszTagSB = "VCE1"; break; ++ case 0x4: pszTagSB = "PSG Control Stream TP1"; break; ++ case 0x5: pszTagSB = "TPC TP1"; break; ++ case 0x8: pszTagSB = "PSG Region Header TP0"; break; ++ case 0xC: pszTagSB = "PSG Region Header TP1"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_VDMS: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "Context State - Write Only"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_IPF: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x00: ++ case 0x20: pszTagSB = "CPF"; break; ++ case 0x01: pszTagSB = "DBSC"; break; ++ case 0x02: ++ case 0x04: ++ case 0x06: ++ case 0x08: ++ case 0x0A: ++ case 0x0C: ++ case 0x0E: ++ case 0x10: pszTagSB = "Control Stream"; break; ++ case 0x03: ++ case 0x05: ++ case 0x07: ++ case 0x09: ++ case 0x0B: ++ case 0x0D: ++ case 0x0F: ++ case 0x11: pszTagSB = "Primitive Block"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_ISP: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x00: pszTagSB = "ZLS read/write"; break; ++ case 0x20: pszTagSB = "Occlusion query read/write"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_TPF: ++ { ++ switch (ui32TagSB) ++ { ++ case 0x0: pszTagSB = "TPF0: Primitive Block"; break; ++ case 0x1: pszTagSB = "TPF0: Depth Bias"; break; ++ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break; ++ case 0x3: pszTagSB = "CPF - Tables"; break; ++ case 0x4: pszTagSB = "TPF1: Primitive Block"; break; ++ case 0x5: pszTagSB = "TPF1: Depth Bias"; break; ++ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break; ++ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break; ++ case 0x8: pszTagSB = "TPF2: Primitive Block"; break; ++ case 0x9: pszTagSB = "TPF2: Depth Bias"; break; ++ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break; ++ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break; ++ case 0xC: pszTagSB = "TPF3: Primitive Block"; break; ++ case 0xD: pszTagSB = "TPF3: Depth Bias"; break; ++ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break; ++ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_FBCDC: ++ { ++ /* ++ * FBC faults on a 4-cluster phantom does not always set SB ++ * bit 5, but since FBC is write-only and FBDC is read-only, ++ * we can set bit 5 if this is a write fault, before decoding. ++ */ ++ if (bRead == IMG_FALSE) ++ { ++ ui32TagSB |= 0x20; ++ } ++ ++ switch (ui32TagSB) ++ { ++ case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break; ++ case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break; ++ case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break; ++ case 0x20: pszTagSB = "FBC Request, originator ZLS"; break; ++ case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break; ++ case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break; ++ case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break; ++ case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break; ++ case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break; ++ case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break; ++ case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break; ++ case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break; ++ case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break; ++ } ++ break; ++ } ++ ++ case RGXDBG_MCU: ++ { ++ IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7; ++ IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7; ++ IMG_UINT32 ui32Group = ui32TagSB & 0x3; ++ ++ IMG_CHAR* pszGroup = ""; ++ ++ switch (ui32Group) ++ { ++ case 0x0: pszGroup = "Banks 0-1"; break; ++ case 0x1: pszGroup = "Banks 2-3"; break; ++ case 0x2: pszGroup = "Banks 4-5"; break; ++ case 0x3: pszGroup = "Banks 6-7"; break; ++ } ++ ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, ++ "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup); ++ pszTagSB = pszScratchBuf; ++ break; ++ } ++ ++ default: ++ { ++ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB); ++ pszTagSB = pszScratchBuf; ++ break; ++ } ++ } ++ ++ *ppszTagID = pszTagID; ++ *ppszTagSB = pszTagSB; ++} ++ ++ ++static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer, ++ IMG_UINT64 *pui64Seconds, ++ IMG_UINT64 *pui64Nanoseconds) ++{ ++ IMG_UINT32 ui32Remainder; ++ ++ *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder); ++ *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL); ++} ++ ++ ++typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_ ++{ ++ DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING, ++ DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED, ++ DEVICEMEM_HISTORY_QUERY_INDEX_NEXT, ++ DEVICEMEM_HISTORY_QUERY_INDEX_COUNT, ++} DEVICEMEM_HISTORY_QUERY_INDEX; ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function _PrintDevicememHistoryQueryResult ++ ++ @Description ++ ++ Print details of a single result from a DevicememHistory query ++ ++ @Input pfnDumpDebugPrintf - Debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psFaultProcessInfo - The process info derived from the page fault ++ @Input psResult - The DevicememHistory result to be printed ++ @Input ui32Index - The index of the result ++ ++ @Return void ++ ++******************************************************************************/ ++static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ RGXMEM_PROCESS_INFO *psFaultProcessInfo, ++ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult, ++ IMG_UINT32 ui32Index, ++ const IMG_CHAR* pszIndent) ++{ ++ IMG_UINT32 ui32Remainder; ++ IMG_UINT64 ui64Seconds, ui64Nanoseconds; ++ ++ ConvertOSTimestampToSAndNS(psResult->ui64When, ++ &ui64Seconds, ++ &ui64Nanoseconds); ++ ++ if (psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE) ++ { ++ PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC ++ " Size: " IMG_DEVMEM_SIZE_FMTSPEC ++ " Operation: %s Modified: %" IMG_UINT64_FMTSPEC ++ " us ago (OS time %" IMG_UINT64_FMTSPEC ++ ".%09" IMG_UINT64_FMTSPEC " s)", ++ pszIndent, ++ ui32Index, ++ psResult->szString, ++ psResult->sBaseDevVAddr.uiAddr, ++ psResult->uiSize, ++ psResult->bMap ? "Map": "Unmap", ++ OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), ++ ui64Seconds, ++ ui64Nanoseconds); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("%s [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC ++ " Size: " IMG_DEVMEM_SIZE_FMTSPEC ++ " Operation: %s Modified: %" IMG_UINT64_FMTSPEC ++ " us ago (OS time %" IMG_UINT64_FMTSPEC ++ ".%09" IMG_UINT64_FMTSPEC ++ ") PID: %u (%s)", ++ pszIndent, ++ ui32Index, ++ psResult->szString, ++ psResult->sBaseDevVAddr.uiAddr, ++ psResult->uiSize, ++ psResult->bMap ? "Map": "Unmap", ++ OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder), ++ ui64Seconds, ++ ui64Nanoseconds, ++ psResult->sProcessInfo.uiPID, ++ psResult->sProcessInfo.szProcessName); ++ } ++ ++ if (!psResult->bRange) ++ { ++ PVR_DUMPDEBUG_LOG("%s Whole allocation was %s", pszIndent, psResult->bMap ? "mapped": "unmapped"); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("%s Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s", ++ pszIndent, ++ psResult->ui32StartPage, ++ psResult->ui32StartPage + psResult->ui32PageCount - 1, ++ psResult->sMapStartAddr.uiAddr, ++ psResult->sMapEndAddr.uiAddr, ++ psResult->bAll ? "(whole allocation) " : "", ++ psResult->bMap ? "mapped": "unmapped"); ++ } ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _PrintDevicememHistoryQueryOut ++ ++ @Description ++ ++ Print details of all the results from a DevicememHistory query ++ ++ @Input pfnDumpDebugPrintf - Debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psFaultProcessInfo - The process info derived from the page fault ++ @Input psQueryOut - Storage for the query results ++ ++ @Return void ++ ++******************************************************************************/ ++static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ RGXMEM_PROCESS_INFO *psFaultProcessInfo, ++ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut, ++ const IMG_CHAR* pszIndent) ++{ ++ IMG_UINT32 i; ++ ++ if (psQueryOut->ui32NumResults == 0) ++ { ++ PVR_DUMPDEBUG_LOG("%s No results", pszIndent); ++ } ++ else ++ { ++ for (i = 0; i < psQueryOut->ui32NumResults; i++) ++ { ++ _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile, ++ psFaultProcessInfo, ++ &psQueryOut->sResults[i], ++ i, ++ pszIndent); ++ } ++ } ++} ++ ++/* table of HW page size values and the equivalent */ ++static const unsigned int aui32HWPageSizeTable[][2] = ++{ ++ { 0, PVRSRV_4K_PAGE_SIZE }, ++ { 1, PVRSRV_16K_PAGE_SIZE }, ++ { 2, PVRSRV_64K_PAGE_SIZE }, ++ { 3, PVRSRV_256K_PAGE_SIZE }, ++ { 4, PVRSRV_1M_PAGE_SIZE }, ++ { 5, PVRSRV_2M_PAGE_SIZE } ++}; ++ ++/*! ++******************************************************************************* ++ ++ @Function _PageSizeHWToBytes ++ ++ @Description ++ ++ Convert a HW page size value to its size in bytes ++ ++ @Input ui32PageSizeHW - The HW page size value ++ ++ @Return IMG_UINT32 The page size in bytes ++ ++******************************************************************************/ ++static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW) ++{ ++ if (ui32PageSizeHW > 5) ++ { ++ /* This is invalid, so return a default value as we cannot ASSERT in this code! */ ++ return PVRSRV_4K_PAGE_SIZE; ++ } ++ ++ return aui32HWPageSizeTable[ui32PageSizeHW][1]; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _GetDevicememHistoryData ++ ++ @Description ++ ++ Get the DevicememHistory results for the given PID and faulting device virtual address. ++ The function will query DevicememHistory for information about the faulting page, as well ++ as the page before and after. ++ ++ @Input uiPID - The process ID to search for allocations belonging to ++ @Input sFaultDevVAddr - The device address to search for allocations at/before/after ++ @Input asQueryOut - Storage for the query results ++ @Input ui32PageSizeBytes - Faulted page size in bytes ++ ++ @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault ++ ++******************************************************************************/ ++static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr, ++ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT], ++ IMG_UINT32 ui32PageSizeBytes) ++{ ++ DEVICEMEM_HISTORY_QUERY_IN sQueryIn; ++ IMG_BOOL bAnyHits = IMG_FALSE; ++ ++ /* if the page fault originated in the firmware then the allocation may ++ * appear to belong to any PID, because FW allocations are attributed ++ * to the client process creating the allocation, so instruct the ++ * devicemem_history query to search all available PIDs ++ */ ++ if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) ++ { ++ sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY; ++ } ++ else ++ { ++ sQueryIn.uiPID = uiPID; ++ } ++ ++ /* Query the DevicememHistory for all allocations in the previous page... */ ++ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - ui32PageSizeBytes; ++ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING], ++ ui32PageSizeBytes, IMG_TRUE)) ++ { ++ bAnyHits = IMG_TRUE; ++ } ++ ++ /* Query the DevicememHistory for any record at the exact address... */ ++ sQueryIn.sDevVAddr = sFaultDevVAddr; ++ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], ++ ui32PageSizeBytes, IMG_FALSE)) ++ { ++ bAnyHits = IMG_TRUE; ++ } ++ else ++ { ++ /* If not matched then try matching any record in the faulting page... */ ++ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED], ++ ui32PageSizeBytes, IMG_TRUE)) ++ { ++ bAnyHits = IMG_TRUE; ++ } ++ } ++ ++ /* Query the DevicememHistory for all allocations in the next page... */ ++ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes; ++ if (DevicememHistoryQuery(&sQueryIn, &asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_NEXT], ++ ui32PageSizeBytes, IMG_TRUE)) ++ { ++ bAnyHits = IMG_TRUE; ++ } ++ ++ return bAnyHits; ++} ++ ++/* stored data about one page fault */ ++typedef struct _FAULT_INFO_ ++{ ++ /* the process info of the memory context that page faulted */ ++ RGXMEM_PROCESS_INFO sProcessInfo; ++ IMG_DEV_VIRTADDR sFaultDevVAddr; ++ MMU_FAULT_DATA sMMUFaultData; ++ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT]; ++ /* the CR timer value at the time of the fault, recorded by the FW. ++ * used to differentiate different page faults ++ */ ++ IMG_UINT64 ui64CRTimer; ++ /* time when this FAULT_INFO entry was added. used for timing ++ * reference against the map/unmap information ++ */ ++ IMG_UINT64 ui64When; ++ IMG_UINT32 ui32FaultInfoFlags; ++} FAULT_INFO; ++ ++/* history list of page faults. ++ * Keeps the first `n` page faults and the last `n` page faults, like the FW ++ * HWR log ++ */ ++typedef struct _FAULT_INFO_LOG_ ++{ ++ IMG_UINT32 ui32Head; ++ /* the number of faults in this log need not correspond exactly to ++ * the HWINFO number of the FW, as the FW HWINFO log may contain ++ * non-page fault HWRs ++ */ ++ FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX]; ++} FAULT_INFO_LOG; ++ ++#define FAULT_INFO_PROC_INFO (0x1U) ++#define FAULT_INFO_DEVMEM_HIST (0x2U) ++ ++static FAULT_INFO_LOG gsFaultInfoLog = { 0 }; ++ ++static void _FillAppForFWFaults(PVRSRV_RGXDEV_INFO *psDevInfo, ++ FAULT_INFO *psInfo, ++ RGXMEM_PROCESS_INFO *psProcInfo) ++{ ++ IMG_UINT32 i, j; ++ ++ for (i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) ++ { ++ for (j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++) ++ { ++ IMG_BOOL bFound; ++ ++ RGXMEM_PROCESS_INFO *psProcInfoResult = &psInfo->asQueryOut[i].sResults[j].sProcessInfo; ++ bFound = RGXPCPIDToProcessInfo(psDevInfo, ++ psProcInfoResult->uiPID, ++ psProcInfo); ++ if (!bFound) ++ { ++ OSStringLCopy(psProcInfo->szProcessName, ++ "(unknown)", ++ sizeof(psProcInfo->szProcessName)); ++ } ++ } ++ } ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _PrintFaultInfo ++ ++ @Description ++ ++ Print all the details of a page fault from a FAULT_INFO structure ++ ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psInfo - The page fault occurrence to print ++ ++ @Return void ++ ++******************************************************************************/ ++static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ FAULT_INFO *psInfo, ++ const IMG_CHAR* pszIndent) ++{ ++ IMG_UINT32 i; ++ IMG_UINT64 ui64Seconds, ui64Nanoseconds; ++ ++ ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds); ++ ++ if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_PROC_INFO)) ++ { ++ IMG_PID uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ? ++ 0 : psInfo->sProcessInfo.uiPID; ++ ++ PVR_DUMPDEBUG_LOG("%sDevice memory history for page fault address " IMG_DEV_VIRTADDR_FMTSPEC ++ ", PID: %u " ++ "(%s, unregistered: %u) OS time: " ++ "%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, ++ pszIndent, ++ psInfo->sFaultDevVAddr.uiAddr, ++ uiPID, ++ psInfo->sProcessInfo.szProcessName, ++ psInfo->sProcessInfo.bUnregistered, ++ ui64Seconds, ++ ui64Nanoseconds); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("%sCould not find PID for device memory history on PC of the fault", pszIndent); ++ } ++ ++ if (BITMASK_HAS(psInfo->ui32FaultInfoFlags, FAULT_INFO_DEVMEM_HIST)) ++ { ++ for (i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++) ++ { ++ const IMG_CHAR *pszWhich = NULL; ++ ++ switch (i) ++ { ++ case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING: ++ pszWhich = "Preceding page"; ++ break; ++ case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED: ++ pszWhich = "Faulted page"; ++ break; ++ case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT: ++ pszWhich = "Next page"; ++ break; ++ } ++ ++ PVR_DUMPDEBUG_LOG("%s %s:", pszIndent, pszWhich); ++ _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile, ++ &psInfo->sProcessInfo, ++ &psInfo->asQueryOut[i], ++ pszIndent); ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("%s No matching Devmem History for fault address", pszIndent); ++ } ++} ++ ++static void _RecordFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ FAULT_INFO *psInfo, ++ IMG_DEV_VIRTADDR sFaultDevVAddr, ++ IMG_DEV_PHYADDR sPCDevPAddr, ++ IMG_UINT64 ui64CRTimer, ++ IMG_UINT32 ui32PageSizeBytes) ++{ ++ IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE; ++ RGXMEM_PROCESS_INFO sProcessInfo; ++ ++ psInfo->ui32FaultInfoFlags = 0; ++ psInfo->sFaultDevVAddr = sFaultDevVAddr; ++ psInfo->ui64CRTimer = ui64CRTimer; ++ psInfo->ui64When = OSClockns64(); ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ /* Check if this is PM fault */ ++ if (psInfo->sMMUFaultData.eType == MMU_FAULT_TYPE_PM) ++ { ++ bIsPMFault = IMG_TRUE; ++ bFound = IMG_TRUE; ++ sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM; ++ OSStringLCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName)); ++ sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0'; ++ sProcessInfo.bUnregistered = IMG_FALSE; ++ } ++ else ++ { ++ /* look up the process details for the faulting page catalogue */ ++ bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo); ++ } ++ ++ if (bFound) ++ { ++ IMG_BOOL bHits; ++ ++ psInfo->ui32FaultInfoFlags = FAULT_INFO_PROC_INFO; ++ psInfo->sProcessInfo = sProcessInfo; ++ ++ if (bIsPMFault) ++ { ++ bHits = IMG_TRUE; ++ } ++ else ++ { ++ /* get any DevicememHistory data for the faulting address */ ++ bHits = _GetDevicememHistoryData(sProcessInfo.uiPID, ++ sFaultDevVAddr, ++ psInfo->asQueryOut, ++ ui32PageSizeBytes); ++ ++ if (bHits) ++ { ++ psInfo->ui32FaultInfoFlags |= FAULT_INFO_DEVMEM_HIST; ++ ++ /* if the page fault was caused by the firmware then get information about ++ * which client application created the related allocations. ++ * ++ * Fill in the process info data for each query result. ++ */ ++ ++ if (sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE) ++ { ++ _FillAppForFWFaults(psDevInfo, psInfo, &sProcessInfo); ++ } ++ } ++ } ++ } ++ } ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _DumpFaultAddressHostView ++ ++ @Description ++ ++ Dump FW HWR fault status in human readable form. ++ ++ @Input ui32Index - Index of global Fault info ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Return void ++ ++******************************************************************************/ ++static void _DumpFaultAddressHostView(MMU_FAULT_DATA *psFaultData, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const IMG_CHAR* pszIndent) ++{ ++ MMU_LEVEL eTopLevel; ++ const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" }; ++ const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" }; ++ ++ eTopLevel = psFaultData->eTopLevel; ++ ++ if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN) ++ { ++ PVR_DUMPDEBUG_LOG("%sNo live host MMU data available", pszIndent); ++ return; ++ } ++ else if (psFaultData->eType == MMU_FAULT_TYPE_PM) ++ { ++ PVR_DUMPDEBUG_LOG("%sPM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, pszIndent, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address); ++ } ++ else ++ { ++ MMU_LEVEL eCurrLevel; ++ PVR_ASSERT(eTopLevel < MMU_LEVEL_LAST); ++ ++ for (eCurrLevel = eTopLevel; eCurrLevel > MMU_LEVEL_0; eCurrLevel--) ++ { ++ MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eCurrLevel]; ++ if (psMMULevelData->ui64Address) ++ { ++ if (psMMULevelData->uiBytesPerEntry == 4) ++ { ++ PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%08x and is %s", ++ pszIndent, ++ szPageLevel[eCurrLevel], ++ psMMULevelData->ui32Index, ++ (IMG_UINT) psMMULevelData->ui64Address, ++ psMMULevelData->psDebugStr); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("%s%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s", ++ pszIndent, ++ szPageLevel[eCurrLevel], ++ psMMULevelData->ui32Index, ++ psMMULevelData->ui64Address, ++ psMMULevelData->psDebugStr); ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("%s%s index (%d) out of bounds (%d)", ++ pszIndent, ++ szPageError[eCurrLevel], ++ psMMULevelData->ui32Index, ++ psMMULevelData->ui32NumOfEntries); ++ break; ++ } ++ } ++ } ++ ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDumpRGXBIFBank ++ ++ @Description ++ ++ Dump BIF Bank state in human readable form. ++ ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psDevInfo - RGX device info ++ @Input eBankID - BIF identifier ++ @Input ui64MMUStatus - MMU Status register value ++ @Input ui64ReqStatus - BIF request Status register value ++ @Return void ++ ++******************************************************************************/ ++static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXDBG_BIF_ID eBankID, ++ IMG_UINT64 ui64MMUStatus, ++ IMG_UINT64 ui64ReqStatus, ++ const IMG_CHAR *pszIndent) ++{ ++ if (ui64MMUStatus == 0x0) ++ { ++ PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]); ++ } ++ else ++ { ++ IMG_UINT32 ui32PageSize; ++ IMG_UINT32 ui32PC = ++ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; ++ ++ /* Bank 0 & 1 share the same fields */ ++ PVR_DUMPDEBUG_LOG("%s%s - FAULT:", ++ pszIndent, ++ pszBIFNames[eBankID]); ++ ++ /* MMU Status */ ++ { ++ IMG_UINT32 ui32MMUDataType = ++ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT; ++ ++ IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0; ++ IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0; ++ ++ ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; ++ ++ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d%s, Page Size = %d%s%s%s.", ++ pszIndent, ++ ui64MMUStatus, ++ ui32PC, ++ (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC), ++ ui32PageSize, ++ (bROFault)?", Read Only fault":"", ++ (bProtFault)?", PM/META protection fault":"", ++ _RGXDecodeMMULevel(ui32MMUDataType)); ++ } ++ ++ /* Req Status */ ++ { ++ IMG_CHAR *pszTagID; ++ IMG_CHAR *pszTagSB; ++ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; ++ IMG_BOOL bRead; ++ IMG_UINT32 ui32TagSB, ui32TagID; ++ IMG_UINT64 ui64Addr; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) ++ { ++ bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN) != 0; ++ ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT; ++ ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT; ++ } ++ else ++ { ++ bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0; ++ ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT; ++ ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT; ++ } ++ ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) << ++ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT; ++ ++ _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE); ++ ++ PVR_DUMPDEBUG_LOG("%s * Request (0x%016" IMG_UINT64_FMTSPECx ++ "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".", ++ pszIndent, ++ ui64ReqStatus, ++ pszTagID, ++ pszTagSB, ++ (bRead)?"Reading from":"Writing to", ++ ui64Addr); ++ } ++ } ++} ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__RNW_EN == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_RNW_EN mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_CLRMSK mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_SB_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_SB_SHIFT mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_CLRMSK mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEM__TAG_ID_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_TAG_ID_SHIFT mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_SHIFT mismatch!"); ++static_assert((RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT == RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT), ++ "RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_ALIGNSHIFT mismatch!"); ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDumpRGXMMUFaultStatus ++ ++ @Description ++ ++ Dump MMU Fault status in human readable form. ++ ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psDevInfo - RGX device info ++ @Input ui64MMUStatus - MMU Status register value ++ @Input pszMetaOrCore - string representing call is for META or MMU core ++ @Return void ++ ++******************************************************************************/ ++static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 ui64MMUStatus, ++ const IMG_PCHAR pszMetaOrCore, ++ const IMG_CHAR *pszIndent) ++{ ++ if (ui64MMUStatus == 0x0) ++ { ++ PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore); ++ } ++ else ++ { ++ IMG_UINT32 ui32PC = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; ++ IMG_UINT64 ui64Addr = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) << 4; /* align shift */ ++ IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT; ++ IMG_UINT32 ui32SideBand = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT; ++ IMG_UINT32 ui32MMULevel = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT; ++ IMG_BOOL bRead = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0; ++ IMG_BOOL bFault = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0; ++ IMG_BOOL bROFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2; ++ IMG_BOOL bProtFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3; ++ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE]; ++ IMG_CHAR *pszTagID; ++ IMG_CHAR *pszTagSB; ++ ++ _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE); ++ ++ PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore); ++ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECx "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECx ", %s (%s)%s%s%s%s.", ++ pszIndent, ++ ui64MMUStatus, ++ ui32PC, ++ (bRead)?"Reading from":"Writing to", ++ ui64Addr, ++ pszTagID, ++ pszTagSB, ++ (bFault)?", Fault":"", ++ (bROFault)?", Read Only fault":"", ++ (bProtFault)?", PM/META protection fault":"", ++ _RGXDecodeMMULevel(ui32MMULevel)); ++ ++ } ++} ++static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT), ++ "RGX_CR_MMU_FAULT_STATUS_META mismatch!"); ++ ++ ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) ++#if !defined(NO_HARDWARE) ++static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState) ++{ ++ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; ++ IMG_UINT32 ui32RegRead; ++ IMG_UINT32 eError = PVRSRV_OK; ++ IMG_UINT32 *pui32NMIMemoryPointer; ++ IMG_UINT32 volatile *pui32SyncFlag; ++ IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset; ++ ++ /* Map the FW data area to the kernel */ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, ++ (void **)&pui32NMIMemoryPointer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire NMI shared memory area (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto map_error_fail; ++ } ++ ++ /* Calculate offset to the boot/NMI data page */ ++ uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA)); ++ ++ /* Jump to the NMI shared data area within the page above */ ++ pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE); ++ ++ /* Acquire the NMI operations lock */ ++ OSLockAcquire(psDevInfo->hNMILock); ++ ++ /* Make sure the synchronisation flag is set to 0 */ ++ pui32SyncFlag = &pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET]; ++ *pui32SyncFlag = 0; ++ ++ /* Readback performed as a part of memory barrier */ ++ OSWriteMemoryBarrier(pui32SyncFlag); ++ ++ /* Enable NMI issuing in the MIPS wrapper */ ++ OSWriteHWReg64(pvRegsBaseKM, ++ RGX_CR_MIPS_WRAPPER_NMI_ENABLE, ++ RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN); ++ (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); ++ ++ /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */ ++ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, ++ RGX_CR_MIPS_EXCEPTION_STATUS); ++ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)) ++ { ++ ++ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; ++ goto fail; ++ } ++ ui32RegRead = 0; ++ ++ /* Issue NMI */ ++ OSWriteHWReg32(pvRegsBaseKM, ++ RGX_CR_MIPS_WRAPPER_NMI_EVENT, ++ RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN); ++ (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_EVENT); ++ ++ ++ /* Wait for NMI Taken to be asserted */ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, ++ RGX_CR_MIPS_EXCEPTION_STATUS); ++ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0) ++ { ++ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; ++ goto fail; ++ } ++ ui32RegRead = 0; ++ ++ /* Allow the firmware to proceed */ ++ *pui32SyncFlag = 1; ++ ++ /* Readback performed as a part of memory barrier */ ++ OSWriteMemoryBarrier(pui32SyncFlag); ++ ++ /* Wait for the FW to have finished the NMI routine */ ++ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, ++ RGX_CR_MIPS_EXCEPTION_STATUS); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ ui32RegRead = OSReadHWReg32(pvRegsBaseKM, ++ RGX_CR_MIPS_EXCEPTION_STATUS); ++ if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) ++ { ++ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE; ++ goto fail; ++ } ++ ui32RegRead = 0; ++ ++ /* Copy state */ ++ OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState)); ++ ++ --(psMIPSState->ui32ErrorEPC); ++ --(psMIPSState->ui32EPC); ++ ++ /* Disable NMI issuing in the MIPS wrapper */ ++ OSWriteHWReg32(pvRegsBaseKM, ++ RGX_CR_MIPS_WRAPPER_NMI_ENABLE, ++ 0); ++ (void) OSReadHWReg64(pvRegsBaseKM, RGX_CR_MIPS_WRAPPER_NMI_ENABLE); ++ ++fail: ++ /* Release the NMI operations lock */ ++ OSLockRelease(psDevInfo->hNMILock); ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); ++map_error_fail: ++ return eError; ++} ++ ++/* Print decoded information from cause register */ ++static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32Cause, ++ IMG_UINT32 ui32ErrorState) ++{ ++#define INDENT " " ++ const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause); ++ const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode); ++ ++ if (ui32ErrorState == RGXMIPSFW_NMI_ERROR_STATE_SET && ++ pszException != NULL) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException); ++ } ++ ++ if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending"); ++ } ++ ++ if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV)) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector"); ++ } ++ ++ if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending"); ++ } ++ ++ /* Unusable Coproc exception */ ++ if (ui32ExcCode == 11) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause)); ++ } ++ ++#undef INDENT ++} ++ ++static IMG_BOOL _IsFWCodeException(IMG_UINT32 ui32ExcCode) ++{ ++ if (ui32ExcCode >= sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Only %lu exceptions available in MIPS, %u is not a valid exception code", ++ (unsigned long)sizeof(apsMIPSExcCodes)/sizeof(MIPS_EXCEPTION_ENCODING), ui32ExcCode)); ++ return IMG_FALSE; ++ } ++ ++ return apsMIPSExcCodes[ui32ExcCode].bIsFatal; ++} ++ ++static void _RGXMipsDumpDebugDecode(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32Debug, ++ IMG_UINT32 ui32DEPC) ++{ ++ const IMG_CHAR *pszDException = NULL; ++ IMG_UINT32 i; ++#define INDENT " " ++ ++ if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM)) ++ { ++ return; ++ } ++ ++ PVR_DUMPDEBUG_LOG("DEBUG :"); ++ ++ pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug)); ++ ++ if (pszDException != NULL) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException); ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i) ++ { ++ const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i]; ++ ++ if (ui32Debug & psDebugEntry->ui32Mask) ++ { ++ PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation); ++ } ++ } ++#undef INDENT ++ PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC); ++} ++ ++static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry, ++ const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, ++ const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, ++ IMG_UINT64 *pui64PA0Start, ++ IMG_UINT64 *pui64PA0End, ++ IMG_UINT64 *pui64PA1Start, ++ IMG_UINT64 *pui64PA1End) ++{ ++ IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; ++ IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask); ++ ++ if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0) ++ { ++ /* Dummy values to fail the range checks later */ ++ *pui64PA0Start = -1ULL; ++ *pui64PA0End = -1ULL; ++ } ++ else if (bUseRemapOutput) ++ { ++ *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; ++ *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; ++ } ++ else ++ { ++ *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); ++ *pui64PA0End = *pui64PA0Start + ui64PageSize - 1; ++ } ++ ++ if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0) ++ { ++ /* Dummy values to fail the range checks later */ ++ *pui64PA1Start = -1ULL; ++ *pui64PA1End = -1ULL; ++ } ++ else if (bUseRemapOutput) ++ { ++ *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; ++ *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; ++ } ++ else ++ { ++ *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); ++ *pui64PA1End = *pui64PA1Start + ui64PageSize - 1; ++ } ++} ++ ++static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const RGX_MIPS_TLB_ENTRY *psTLB, ++ const RGX_MIPS_REMAP_ENTRY *psRemap) ++{ ++ IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ; ++ IMG_UINT64 ui64PA0EndI, ui64PA1EndI, ui64PA0EndJ, ui64PA1EndJ; ++ IMG_UINT32 i, j; ++ ++#define RANGES_OVERLAP(start0,end0,start1,end1) ((start0) < (end1) && (start1) < (end0)) ++ ++ for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++) ++ { ++ _GetMipsTLBPARanges(&psTLB[i], ++ psRemap ? &psRemap[i] : NULL, ++ psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, ++ &ui64PA0StartI, &ui64PA0EndI, ++ &ui64PA1StartI, &ui64PA1EndI); ++ ++ for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++) ++ { ++ _GetMipsTLBPARanges(&psTLB[j], ++ psRemap ? &psRemap[j] : NULL, ++ psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL, ++ &ui64PA0StartJ, &ui64PA0EndJ, ++ &ui64PA1StartJ, &ui64PA1EndJ); ++ ++ if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) || ++ RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) || ++ RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) || ++ RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ) ) ++ { ++ PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j); ++ } ++ } ++ } ++} ++ ++static inline IMG_UINT32 _GetMIPSRemapRegionSize(IMG_UINT32 ui32RegionSizeEncoding) ++{ ++ return 1U << ((ui32RegionSizeEncoding + 1U) << 1U); ++} ++ ++static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const RGX_MIPS_TLB_ENTRY *psTLBEntry, ++ const RGX_MIPS_REMAP_ENTRY *psRemapEntry0, ++ const RGX_MIPS_REMAP_ENTRY *psRemapEntry1, ++ IMG_UINT32 ui32Index) ++{ ++ IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE; ++ IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0); ++ IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1); ++ IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0; ++ IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0; ++ ++ if (bDumpRemapEntries) ++ { ++ /* RemapAddrIn is always 4k aligned and on 32 bit */ ++ ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12; ++ ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12; ++ ++ /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */ ++ ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12; ++ ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12; ++ ++ /* If TLB and remap entries match, then merge them else, print them separately */ ++ if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn && ++ (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn) ++ { ++ ui64PA0 = ui64Remap0AddrOut; ++ ui64PA1 = ui64Remap1AddrOut; ++ bDumpRemapEntries = IMG_FALSE; ++ } ++ } ++ ++ PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECx " %s%s%s, " ++ "PA1 0x%08" IMG_UINT64_FMTSPECx " %s%s%s", ++ ui32Index, ++ psTLBEntry->ui32TLBHi, ++ RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask), ++ ui64PA0, ++ gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)], ++ gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)], ++ gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)], ++ ui64PA1, ++ gapszMipsPermissionPTFlags[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)], ++ gapszMipsDirtyGlobalValidPTFlags[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)], ++ gapszMipsCoherencyPTFlags[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]); ++ ++ if (bDumpRemapEntries) ++ { ++ PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, ++ ui32Index, ++ ui32Remap0AddrIn, ++ _GetMIPSRemapRegionSize(psRemapEntry0->ui32RemapRegionSize), ++ ui64Remap0AddrOut); ++ ++ PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECx, ++ ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES, ++ ui32Remap1AddrIn, ++ _GetMIPSRemapRegionSize(psRemapEntry1->ui32RemapRegionSize), ++ ui64Remap1AddrOut); ++ } ++} ++ ++#endif /* !defined(NO_HARDWARE) */ ++#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */ ++ ++static inline IMG_CHAR const *_GetRISCVException(IMG_UINT32 ui32Mcause) ++{ ++ switch (ui32Mcause) ++ { ++#define X(value, fatal, description) \ ++ case value: \ ++ if (fatal) \ ++ return description; \ ++ return NULL; ++ ++ RGXRISCVFW_MCAUSE_TABLE ++#undef X ++ ++ default: ++ PVR_DPF((PVR_DBG_WARNING, "Invalid RISC-V FW mcause value 0x%08x", ui32Mcause)); ++ return NULL; ++ } ++} ++ ++/* ++ Appends flags strings to a null-terminated string buffer - each flag ++ description string starts with a space. ++*/ ++static void _Flags2Description(IMG_CHAR *psDesc, ++ IMG_UINT32 ui32DescSize, ++ const IMG_FLAGS2DESC *psConvTable, ++ IMG_UINT32 ui32TableSize, ++ IMG_UINT32 ui32Flags) ++{ ++ IMG_UINT32 ui32Idx; ++ ++ for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++) ++ { ++ if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag) ++ { ++ OSStringLCat(psDesc, psConvTable[ui32Idx].pszLabel, ui32DescSize); ++ } ++ } ++} ++ ++/* ++ Writes flags strings to an uninitialised buffer. ++*/ ++static void _GetFwSysFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) ++{ ++ const IMG_CHAR szCswLabel[] = "Ctx switch options:"; ++ size_t uLabelLen = sizeof(szCswLabel) - 1; ++ const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; ++ ++ OSStringLCopy(psDesc, szCswLabel, ui32DescSize); ++ ++ _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asCswOpts2Description, ARRAY_SIZE(asCswOpts2Description), ui32RawFlags); ++ _Flags2Description(psDesc, ui32DescSize, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags); ++} ++ ++static void _GetFwOsFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32DescSize, IMG_UINT32 ui32RawFlags) ++{ ++ const IMG_CHAR szCswLabel[] = "Ctx switch:"; ++ size_t uLabelLen = sizeof(szCswLabel) - 1; ++ const size_t uiBytesPerDesc = (ui32DescSize - uLabelLen) / 2U - 1U; ++ ++ OSStringLCopy(psDesc, szCswLabel, ui32DescSize); ++ ++ _Flags2Description(psDesc, uiBytesPerDesc + uLabelLen, asFwOsCfg2Description, ARRAY_SIZE(asFwOsCfg2Description), ui32RawFlags); ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDumpFWAssert ++ ++ @Description ++ ++ Dump FW assert strings when a thread asserts. ++ ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer ++ ++ @Return void ++ ++******************************************************************************/ ++static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl) ++{ ++ const IMG_CHAR *pszTraceAssertPath; ++ const IMG_CHAR *pszTraceAssertInfo; ++ IMG_INT32 ui32TraceAssertLine; ++ IMG_UINT32 i; ++ ++ for (i = 0; i < RGXFW_THREAD_NUM; i++) ++ { ++ pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath; ++ pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo; ++ ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum; ++ ++ /* print non-null assert strings */ ++ if (*pszTraceAssertInfo) ++ { ++ PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)", ++ i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine); ++ } ++ } ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _RGXDumpFWFaults ++ ++ @Description ++ ++ Dump FW assert strings when a thread asserts. ++ ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psFwSysData - RGX FW shared system data ++ ++ @Return void ++ ++******************************************************************************/ ++static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const RGXFWIF_SYSDATA *psFwSysData) ++{ ++ if (psFwSysData->ui32FWFaults > 0) ++ { ++ IMG_UINT32 ui32StartFault = psFwSysData->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX; ++ IMG_UINT32 ui32EndFault = psFwSysData->ui32FWFaults - 1; ++ IMG_UINT32 ui32Index; ++ ++ if (psFwSysData->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX) ++ { ++ ui32StartFault = 0; ++ } ++ ++ for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++) ++ { ++ const RGX_FWFAULTINFO *psFaultInfo = &psFwSysData->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX]; ++ IMG_UINT64 ui64Seconds, ui64Nanoseconds; ++ ++ /* Split OS timestamp in seconds and nanoseconds */ ++ ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); ++ ++ PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)", ++ ui32Index+1, psFaultInfo->sFaultBuf.szInfo, ++ psFaultInfo->sFaultBuf.szPath, ++ psFaultInfo->sFaultBuf.ui32LineNum); ++ PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECx", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC, ++ psFaultInfo->ui32Data, ++ psFaultInfo->ui64CRTimer, ++ ui64Seconds, ui64Nanoseconds); ++ } ++ } ++} ++ ++static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const RGXFWIF_SYSDATA *psFwSysData) ++{ ++ IMG_UINT32 i; ++ for (i = 0; i < RGXFW_THREAD_NUM; i++) ++ { ++ if (psFwSysData->aui32CrPollAddr[i]) ++ { ++ PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)", ++ i, ++ ((psFwSysData->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), ++ psFwSysData->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET, ++ psFwSysData->aui32CrPollMask[i]); ++ } ++ } ++ ++} ++ ++static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ const RGXFWIF_SYSDATA *psFwSysData, ++ const RGXFWIF_HWRINFOBUF *psHWRInfoBuf, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_BOOL bAnyLocked = IMG_FALSE; ++ IMG_UINT32 dm, i; ++ IMG_UINT32 ui32LineSize; ++ IMG_CHAR *pszLine, *pszTemp; ++ const IMG_CHAR *apszDmNames[RGXFWIF_DM_MAX] = {"GP", "TDM", "TA", "3D", "CDM", "RAY", "TA2", "TA3", "TA4"}; ++ const IMG_CHAR szMsgHeader[] = "Number of HWR: "; ++ const IMG_CHAR szMsgFalse[] = "FALSE("; ++ IMG_CHAR *pszLockupType = ""; ++ const IMG_UINT32 ui32MsgHeaderCharCount = ARRAY_SIZE(szMsgHeader) - 1; /* size includes the null */ ++ const IMG_UINT32 ui32MsgFalseCharCount = ARRAY_SIZE(szMsgFalse) - 1; ++ IMG_UINT32 ui32HWRRecoveryFlags; ++ IMG_UINT32 ui32ReadIndex; ++ ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))) ++ { ++ apszDmNames[RGXFWIF_DM_TDM] = "2D"; ++ } ++ ++ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) ++ { ++ if (psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] || ++ psHWRInfoBuf->aui32HwrDmOverranCount[dm]) ++ { ++ bAnyLocked = IMG_TRUE; ++ break; ++ } ++ } ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST) && !bAnyLocked && (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK)) ++ { ++ /* No HWR situation, print nothing */ ++ return; ++ } ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ IMG_BOOL bAnyHWROccured = IMG_FALSE; ++ ++ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) ++ { ++ if (psHWRInfoBuf->aui32HwrDmRecoveredCount[dm] != 0 || ++ psHWRInfoBuf->aui32HwrDmLockedUpCount[dm] != 0 || ++ psHWRInfoBuf->aui32HwrDmOverranCount[dm] !=0) ++ { ++ bAnyHWROccured = IMG_TRUE; ++ break; ++ } ++ } ++ ++ if (!bAnyHWROccured) ++ { ++ return; ++ } ++ } ++ ++ ui32LineSize = sizeof(IMG_CHAR) * ( ++ ui32MsgHeaderCharCount + ++ (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ + ++ 10/*UINT32 max num of digits*/ + ++ 1/*slash*/ + ++ 10/*UINT32 max num of digits*/ + ++ 3/*right parenthesis + comma + space*/)) + ++ ui32MsgFalseCharCount + 1 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6) + 1 ++ /* 'FALSE(' + ')' + (UINT16 max num + comma) per DM + \0 */ ++ ); ++ ++ pszLine = OSAllocMem(ui32LineSize); ++ if (pszLine == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Out of mem allocating line string (size: %d)", ++ __func__, ++ ui32LineSize)); ++ return; ++ } ++ ++ OSStringLCopy(pszLine, szMsgHeader, ui32LineSize); ++ pszTemp = pszLine + ui32MsgHeaderCharCount; ++ ++ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) ++ { ++ pszTemp += OSSNPrintf(pszTemp, ++ 4 + 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 ++ /* (name + left parenthesis) + UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */, ++ "%s(%u/%u+%u), ", ++ apszDmNames[dm], ++ psHWRInfoBuf->aui32HwrDmRecoveredCount[dm], ++ psHWRInfoBuf->aui32HwrDmLockedUpCount[dm], ++ psHWRInfoBuf->aui32HwrDmOverranCount[dm]); ++ } ++ ++ OSStringLCat(pszLine, szMsgFalse, ui32LineSize); ++ pszTemp += ui32MsgFalseCharCount; ++ ++ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) ++ { ++ pszTemp += OSSNPrintf(pszTemp, ++ 10 + 1 + 1 /* UINT32 max num + comma + \0 */, ++ (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"), ++ psHWRInfoBuf->aui32HwrDmFalseDetectCount[dm]); ++ } ++ ++ PVR_DUMPDEBUG_LOG("%s", pszLine); ++ ++ OSFreeMem(pszLine); ++ ++ /* Print out per HWR info */ ++ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++) ++ { ++ if (dm == RGXFWIF_DM_GP) ++ { ++ PVR_DUMPDEBUG_LOG("DM %d (GP)", dm); ++ } ++ else ++ { ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ IMG_UINT32 ui32HWRRecoveryFlags = psFwSysData->aui32HWRRecoveryFlags[dm]; ++ IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE]; ++ sPerDmHwrDescription[0] = '\0'; ++ ++ if (ui32HWRRecoveryFlags == RGXFWIF_DM_STATE_WORKING) ++ { ++ OSStringLCopy(sPerDmHwrDescription, " working;", RGX_DEBUG_STR_SIZE); ++ } ++ else ++ { ++ _Flags2Description(sPerDmHwrDescription, RGX_DEBUG_STR_SIZE, ++ asDmState2Description, ARRAY_SIZE(asDmState2Description), ++ ui32HWRRecoveryFlags); ++ } ++ PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x:%s)", dm, ui32HWRRecoveryFlags, sPerDmHwrDescription); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("DM %d", dm); ++ } ++ } ++ ++ ui32ReadIndex = 0; ++ for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) ++ { ++ IMG_BOOL bPMFault = IMG_FALSE; ++ IMG_UINT32 ui32PC; ++ IMG_UINT32 ui32PageSize = 0; ++ IMG_DEV_PHYADDR sPCDevPAddr = { 0 }; ++ const RGX_HWRINFO *psHWRInfo = &psHWRInfoBuf->sHWRInfo[ui32ReadIndex]; ++ ++ if ((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0)) ++ { ++ IMG_CHAR aui8RecoveryNum[10+10+1]; ++ IMG_UINT64 ui64Seconds, ui64Nanoseconds; ++ IMG_BOOL bPageFault = IMG_FALSE; ++ IMG_DEV_VIRTADDR sFaultDevVAddr; ++ ++ /* Split OS timestamp in seconds and nanoseconds */ ++ ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds); ++ ++ ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags; ++ if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; } ++ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; } ++ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; } ++ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; } ++ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; } ++ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GPU_ECC_HWR) { pszLockupType = ", GPU ECC HWR"; } ++ ++ OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber); ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) ++ { ++ PVR_DUMPDEBUG_LOG(" %s Core = %u, PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", ++ aui8RecoveryNum, ++ psHWRInfo->ui32CoreID, ++ psHWRInfo->ui32PID, ++ psHWRInfo->ui32FrameNum, ++ psHWRInfo->ui32ActiveHWRTData, ++ psHWRInfo->ui32EventStatus, ++ pszLockupType); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" %s PID = %u, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s", ++ aui8RecoveryNum, ++ psHWRInfo->ui32PID, ++ psHWRInfo->ui32FrameNum, ++ psHWRInfo->ui32ActiveHWRTData, ++ psHWRInfo->ui32EventStatus, ++ pszLockupType); ++ } ++ pszTemp = &aui8RecoveryNum[0]; ++ while (*pszTemp != '\0') ++ { ++ *pszTemp++ = ' '; ++ } ++ ++ /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd, ++ aui8RecoveryNum, ++ psHWRInfo->ui64CRTimer, ++ ui64Seconds, ++ ui64Nanoseconds, ++ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd, ++ aui8RecoveryNum, ++ psHWRInfo->ui64CRTimer, ++ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256); ++ } ++ ++ if (psHWRInfo->ui64CRTimeHWResetFinish != 0) ++ { ++ if (psHWRInfo->ui64CRTimeFreelistReady != 0) ++ { ++ /* If ui64CRTimeFreelistReady is less than ui64CRTimeHWResetFinish it means APM kicked in and the time is not valid. */ ++ if (psHWRInfo->ui64CRTimeHWResetFinish < psHWRInfo->ui64CRTimeFreelistReady) ++ { ++ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd, ++ aui8RecoveryNum, ++ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, ++ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, ++ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256, ++ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = , TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, ++ aui8RecoveryNum, ++ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, ++ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, ++ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalResetTimeInCycles = %" IMG_INT64_FMTSPECd, ++ aui8RecoveryNum, ++ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256, ++ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256, ++ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256); ++ } ++ } ++ ++ switch (psHWRInfo->eHWRType) ++ { ++ case RGX_HWRTYPE_BIF0FAULT: ++ case RGX_HWRTYPE_BIF1FAULT: ++ { ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) ++ { ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType), ++ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, ++ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, ++ DD_NORMAL_INDENT); ++ ++ bPageFault = IMG_TRUE; ++ sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); ++ ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; ++ bPMFault = (ui32PC >= 8); ++ ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; ++ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; ++ } ++ } ++ break; ++ case RGX_HWRTYPE_TEXASBIF0FAULT: ++ { ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))) ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) ++ { ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ++ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, ++ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, ++ DD_NORMAL_INDENT); ++ ++ bPageFault = IMG_TRUE; ++ sFaultDevVAddr.uiAddr = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK); ++ ui32PC = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT; ++ bPMFault = (ui32PC >= 8); ++ ui32PageSize = (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >> ++ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT; ++ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; ++ } ++ } ++ } ++ break; ++ ++ case RGX_HWRTYPE_ECCFAULT: ++ { ++ PVR_DUMPDEBUG_LOG(" ECC fault GPU=0x%08x", psHWRInfo->uHWRData.sECCInfo.ui32FaultGPU); ++ } ++ break; ++ ++ case RGX_HWRTYPE_MMUFAULT: ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ++ psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], ++ "Core", ++ DD_NORMAL_INDENT); ++ ++ bPageFault = IMG_TRUE; ++ sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; ++ sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; ++ sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; ++ sFaultDevVAddr.uiAddr <<= 4; /* align shift */ ++ ui32PC = (psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0] & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >> ++ RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT; ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ ui32PC = ui32PC - 1; ++#endif ++ bPMFault = (ui32PC <= 8); ++ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; ++ } ++ } ++ break; ++ ++ case RGX_HWRTYPE_MMUMETAFAULT: ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ++ psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0], ++ "Meta", ++ DD_NORMAL_INDENT); ++ ++ bPageFault = IMG_TRUE; ++ sFaultDevVAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.aui64MMUStatus[0]; ++ sFaultDevVAddr.uiAddr &= ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK; ++ sFaultDevVAddr.uiAddr >>= RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT; ++ sFaultDevVAddr.uiAddr <<= 4; /* align shift */ ++ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress; ++ } ++ } ++ break; ++ ++ case RGX_HWRTYPE_POLLFAILURE: ++ { ++ PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)", ++ psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum, ++ ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")), ++ psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET, ++ psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask, ++ psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue); ++ } ++ break; ++ ++ case RGX_HWRTYPE_MIPSTLBFAULT: ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ IMG_UINT32 ui32EntryLo = psHWRInfo->uHWRData.sTLBInfo.ui32EntryLo; ++ ++ /* This is not exactly what the MMU code does, but the result should be the same */ ++ const IMG_UINT32 ui32UnmappedEntry = ++ ((IMG_UINT32)(MMU_BAD_PHYS_ADDR & 0xffffffff) & RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT) | RGXMIPSFW_ENTRYLO_UNCACHED; ++ ++ PVR_DUMPDEBUG_LOG(" MIPS TLB fault: BadVA = 0x%08X, EntryLo = 0x%08X" ++ " (page PA 0x%" IMG_UINT64_FMTSPECx", V %u)", ++ psHWRInfo->uHWRData.sTLBInfo.ui32BadVAddr, ++ ui32EntryLo, ++ RGXMIPSFW_TLB_GET_PA(ui32EntryLo), ++ ui32EntryLo & RGXMIPSFW_TLB_VALID ? 1 : 0); ++ ++ if (ui32EntryLo == ui32UnmappedEntry) ++ { ++ PVR_DUMPDEBUG_LOG(" Potential use-after-free detected"); ++ } ++ } ++ } ++ break; ++ ++ case RGX_HWRTYPE_MMURISCVFAULT: ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, ++ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus, ++ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus, ++ DD_NORMAL_INDENT); ++ ++ bPageFault = IMG_TRUE; ++ bPMFault = IMG_FALSE; ++ sFaultDevVAddr.uiAddr = ++ (psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus & ++ ~RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS_ADDRESS_CLRMSK); ++ ui32PageSize = ++ (psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus & ++ ~RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_CLRMSK) >> ++ RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS_PAGE_SIZE_SHIFT; ++ sPCDevPAddr.uiAddr = psHWRInfo->uHWRData.sBIFInfo.ui64PCAddress; ++ } ++ } ++ break; ++ ++ case RGX_HWRTYPE_OVERRUN: ++ case RGX_HWRTYPE_UNKNOWNFAILURE: ++ { ++ /* Nothing to dump */ ++ } ++ break; ++ ++ default: ++ { ++ PVR_DUMPDEBUG_LOG(" Unknown HWR Info type: 0x%x", psHWRInfo->eHWRType); ++ } ++ break; ++ } ++ ++ if (bPageFault) ++ { ++ ++ FAULT_INFO *psInfo; ++ ++ OSLockAcquire(psDevInfo->hDebugFaultInfoLock); ++ ++ /* Find the matching Fault Info for this HWRInfo */ ++ psInfo = &gsFaultInfoLog.asFaults[ui32ReadIndex]; ++ ++ /* if they do not match, we need to update the psInfo */ ++ if ((psInfo->ui64CRTimer != psHWRInfo->ui64CRTimer) || ++ (psInfo->sFaultDevVAddr.uiAddr != sFaultDevVAddr.uiAddr)) ++ { ++ MMU_FAULT_DATA *psFaultData = &psInfo->sMMUFaultData; ++ ++ psFaultData->eType = MMU_FAULT_TYPE_UNKNOWN; ++ ++ if (bPMFault) ++ { ++ /* PM fault and we dump PC details only */ ++ psFaultData->eTopLevel = MMU_LEVEL_0; ++ psFaultData->eType = MMU_FAULT_TYPE_PM; ++ psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = sPCDevPAddr.uiAddr; ++ } ++ else ++ { ++ RGXCheckFaultAddress(psDevInfo, &sFaultDevVAddr, &sPCDevPAddr, psFaultData); ++ } ++ ++ _RecordFaultInfo(psDevInfo, psInfo, ++ sFaultDevVAddr, sPCDevPAddr, psHWRInfo->ui64CRTimer, ++ _PageSizeHWToBytes(ui32PageSize)); ++ ++ } ++ ++ _DumpFaultAddressHostView(&psInfo->sMMUFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_NORMAL_INDENT); ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, DD_NORMAL_INDENT); ++ } ++ ++ OSLockRelease(psDevInfo->hDebugFaultInfoLock); ++ } ++ ++ } ++ ++ if (ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1) ++ ui32ReadIndex = psHWRInfoBuf->ui32WriteIndex; ++ else ++ ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST; ++ } ++ } ++} ++ ++#if !defined(NO_HARDWARE) ++ ++/*! ++******************************************************************************* ++ ++ @Function _CheckForPendingPage ++ ++ @Description ++ ++ Check if the MMU indicates it is blocked on a pending page ++ ++ @Input psDevInfo - RGX device info ++ ++ @Return IMG_BOOL - IMG_TRUE if there is a pending page ++ ++******************************************************************************/ ++static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT32 ui32BIFMMUEntry; ++ ++ ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY); ++ ++ if (ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN) ++ { ++ return IMG_TRUE; ++ } ++ else ++ { ++ return IMG_FALSE; ++ } ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _GetPendingPageInfo ++ ++ @Description ++ ++ Get information about the pending page from the MMU status registers ++ ++ @Input psDevInfo - RGX device info ++ @Output psDevVAddr - The device virtual address of the pending MMU address translation ++ @Output pui32CatBase - The page catalog base ++ @Output pui32DataType - The MMU entry data type ++ ++ @Return void ++ ++******************************************************************************/ ++static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr, ++ IMG_UINT32 *pui32CatBase, ++ IMG_UINT32 *pui32DataType) ++{ ++ IMG_UINT64 ui64BIFMMUEntryStatus; ++ ++ ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS); ++ ++ psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK); ++ ++ *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >> ++ RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT; ++ ++ *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >> ++ RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT; ++} ++ ++#endif ++ ++void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_BOOL bRGXPoweredON) ++{ ++ IMG_CHAR *pszState, *pszReason; ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ const RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ IMG_UINT32 ui32OSid; ++ const RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; ++ /* space for the current clock speed and 3 previous */ ++ RGXFWIF_TIME_CORR asTimeCorrs[4]; ++ IMG_UINT32 ui32NumClockSpeedChanges; ++ ++#if defined(NO_HARDWARE) ++ PVR_UNREFERENCED_PARAMETER(bRGXPoweredON); ++#else ++ if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ ++ IMG_UINT64 ui64RegValMMUStatus; ++ ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS); ++ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Core", DD_SUMMARY_INDENT); ++ ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META); ++ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, "Meta", DD_SUMMARY_INDENT); ++ } ++ else ++ { ++ IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus; ++ ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS); ++ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS); ++ ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); ++ ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF))) ++ { ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS); ++ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS); ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_MMU_STATUS); ++ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_MEM_FAULT_REQ_STATUS); ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_FWCORE, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) ++ { ++ IMG_UINT32 ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0; ++ ++ if (ui32PhantomCnt > 1) ++ { ++ IMG_UINT32 ui32Phantom; ++ for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++) ++ { ++ /* This can't be done as it may interfere with the FW... */ ++ /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/ ++ ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); ++ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); ++ ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); ++ } ++ }else ++ { ++ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS); ++ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS); ++ ++ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, DD_SUMMARY_INDENT); ++ } ++ } ++ } ++ ++ if (_CheckForPendingPage(psDevInfo)) ++ { ++ IMG_UINT32 ui32CatBase; ++ IMG_UINT32 ui32DataType; ++ IMG_DEV_VIRTADDR sDevVAddr; ++ ++ PVR_DUMPDEBUG_LOG("MMU Pending page: Yes"); ++ ++ _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType); ++ ++ if (ui32CatBase >= 8) ++ { ++ PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase); ++ } ++ else ++ { ++ IMG_DEV_PHYADDR sPCDevPAddr; ++ MMU_FAULT_DATA sFaultData; ++ ++ sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase)); ++ ++ PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC ++ " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECx, ++ sDevVAddr.uiAddr, ++ ui32CatBase, ++ sPCDevPAddr.uiAddr); ++ RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr, &sFaultData); ++ _DumpFaultAddressHostView(&sFaultData, pfnDumpDebugPrintf, pvDumpDebugFile, DD_SUMMARY_INDENT); ++ } ++ } ++ } ++#endif /* NO_HARDWARE */ ++ ++ /* Firmware state */ ++ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus)) ++ { ++ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break; ++ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break; ++ default: pszState = "UNKNOWN"; break; ++ } ++ ++ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason)) ++ { ++ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failing"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break; ++ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS: pszReason = " - Missing interrupts"; break; ++ default: pszReason = " - Unknown reason"; break; ++ } ++ ++#if !defined(NO_HARDWARE) ++ /* Determine the type virtualisation support used */ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++#if defined(SUPPORT_AUTOVZ) ++#if defined(SUPPORT_AUTOVZ_HW_REGS) ++ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with HW register support"); ++#else ++ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: AutoVz with shared memory"); ++#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ ++#else ++ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with static Fw heap allocation"); ++#endif /* defined(SUPPORT_AUTOVZ) */ ++#else ++ PVR_DUMPDEBUG_LOG("RGX Virtualisation type: Hypervisor-assisted with dynamic Fw heap allocation"); ++#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ ++ } ++#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ ++ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1)) ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ RGXFWIF_CONNECTION_FW_STATE eFwState = KM_GET_FW_CONNECTION(psDevInfo); ++ RGXFWIF_CONNECTION_OS_STATE eOsState = KM_GET_OS_CONNECTION(psDevInfo); ++ ++ PVR_DUMPDEBUG_LOG("RGX Virtualisation firmware connection state: %s (Fw=%s; OS=%s)", ++ ((eFwState == RGXFW_CONNECTION_FW_ACTIVE) && (eOsState == RGXFW_CONNECTION_OS_ACTIVE)) ? ("UP") : ("DOWN"), ++ (eFwState < RGXFW_CONNECTION_FW_STATE_COUNT) ? (apszFwOsStateName[eFwState]) : ("invalid"), ++ (eOsState < RGXFW_CONNECTION_OS_STATE_COUNT) ? (apszFwOsStateName[eOsState]) : ("invalid")); ++ ++ } ++#endif ++ ++#if defined(SUPPORT_AUTOVZ) && defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) ++ if (!PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ IMG_UINT32 ui32FwAliveTS = KM_GET_FW_ALIVE_TOKEN(psDevInfo); ++ IMG_UINT32 ui32OsAliveTS = KM_GET_OS_ALIVE_TOKEN(psDevInfo); ++ ++ PVR_DUMPDEBUG_LOG("RGX Virtualisation watchdog timestamps (in GPU timer ticks): Fw=%u; OS=%u; diff(FW, OS) = %u", ++ ui32FwAliveTS, ui32OsAliveTS, ui32FwAliveTS - ui32OsAliveTS); ++ } ++#endif ++#endif /* !defined(NO_HARDWARE) */ ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE]; ++ IMG_BOOL bOsIsolationEnabled = IMG_FALSE; ++ ++ if (psFwSysData == NULL) ++ { ++ /* can't dump any more information */ ++ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason); ++ return; ++ } ++ ++ sHwrStateDescription[0] = '\0'; ++ ++ _Flags2Description(sHwrStateDescription, RGX_DEBUG_STR_SIZE, ++ asHwrState2Description, ARRAY_SIZE(asHwrState2Description), ++ psFwSysData->ui32HWRStateFlags); ++ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x:%s)", pszState, pszReason, psFwSysData->ui32HWRStateFlags, sHwrStateDescription); ++ PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d retry, %d other, %d total. Latency: %u ms)", ++ pszPowStateName[psFwSysData->ePowState], ++ (psDevInfo->pvAPMISRData)?"enabled":"disabled", ++ psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle, ++ psDevInfo->ui32ActivePMReqDenied, ++ psDevInfo->ui32ActivePMReqNonIdle, ++ psDevInfo->ui32ActivePMReqRetry, ++ psDevInfo->ui32ActivePMReqTotal - ++ psDevInfo->ui32ActivePMReqOk - ++ psDevInfo->ui32ActivePMReqDenied - ++ psDevInfo->ui32ActivePMReqRetry - ++ psDevInfo->ui32ActivePMReqNonIdle, ++ psDevInfo->ui32ActivePMReqTotal, ++ psRuntimeCfg->ui32ActivePMLatencyms); ++ ++ ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges); ++ RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs)); ++ ++ PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. " ++ "Current frequency: %u.%03u MHz (sampled at %" IMG_UINT64_FMTSPEC " ns). " ++ "FW frequency: %u.%03u MHz.", ++ ui32NumClockSpeedChanges, ++ asTimeCorrs[0].ui32CoreClockSpeed / 1000000, ++ (asTimeCorrs[0].ui32CoreClockSpeed / 1000) % 1000, ++ asTimeCorrs[0].ui64OSTimeStamp, ++ psRuntimeCfg->ui32CoreClockSpeed / 1000000, ++ (psRuntimeCfg->ui32CoreClockSpeed / 1000) % 1000); ++ if (ui32NumClockSpeedChanges > 0) ++ { ++ PVR_DUMPDEBUG_LOG(" Previous frequencies: %u.%03u, %u.%03u, %u.%03u MHz (Sampled at " ++ "%" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ", %" IMG_UINT64_FMTSPEC ")", ++ asTimeCorrs[1].ui32CoreClockSpeed / 1000000, ++ (asTimeCorrs[1].ui32CoreClockSpeed / 1000) % 1000, ++ asTimeCorrs[2].ui32CoreClockSpeed / 1000000, ++ (asTimeCorrs[2].ui32CoreClockSpeed / 1000) % 1000, ++ asTimeCorrs[3].ui32CoreClockSpeed / 1000000, ++ (asTimeCorrs[3].ui32CoreClockSpeed / 1000) % 1000, ++ asTimeCorrs[1].ui64OSTimeStamp, ++ asTimeCorrs[2].ui64OSTimeStamp, ++ asTimeCorrs[3].ui64OSTimeStamp); ++ } ++ ++ for (ui32OSid = 0; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) ++ { ++ RGXFWIF_OS_RUNTIME_FLAGS sFwRunFlags = psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; ++ IMG_BOOL bMTSEnabled = IMG_FALSE; ++ ++#if !defined(NO_HARDWARE) ++ if (bRGXPoweredON) ++ { ++ bMTSEnabled = (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)) ? IMG_TRUE : ++ ((OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE_ENABLE) & BIT(ui32OSid)) != 0); ++ } ++#endif ++ ++ ++ PVR_DUMPDEBUG_LOG("RGX FW OS %u - State: %s; Freelists: %s%s; Priority: %d;%s %s", ui32OSid, ++ apszFwOsStateName[sFwRunFlags.bfOsState], ++ (sFwRunFlags.bfFLOk) ? "Ok" : "Not Ok", ++ (sFwRunFlags.bfFLGrowPending) ? "; Grow Request Pending" : "", ++ psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid], ++ (sFwRunFlags.bfIsolatedOS) ? " Isolated;" : "", ++ (bMTSEnabled) ? "MTS on;" : "MTS off;" ++ ); ++ ++ bOsIsolationEnabled |= sFwRunFlags.bfIsolatedOS; ++ } ++ ++#if defined(PVR_ENABLE_PHR) ++ { ++ IMG_CHAR sPHRConfigDescription[RGX_DEBUG_STR_SIZE]; ++ ++ sPHRConfigDescription[0] = '\0'; ++ _Flags2Description(sPHRConfigDescription, RGX_DEBUG_STR_SIZE, ++ asPHRConfig2Description, ARRAY_SIZE(asPHRConfig2Description), ++ BIT_ULL(psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode)); ++ ++ PVR_DUMPDEBUG_LOG("RGX PHR configuration: (%d) %s", psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode, sPHRConfigDescription); ++ } ++#endif ++ ++ if (bRGXPoweredON && RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) ++ { ++ if (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM) > 1U) ++ { ++ PVR_DUMPDEBUG_LOG("RGX MC Configuration: 0x%X (1:primary, 0:secondary)", psFwSysData->ui32McConfig); ++ } ++ } ++ ++ if (bOsIsolationEnabled) ++ { ++ PVR_DUMPDEBUG_LOG("RGX Hard Context Switch deadline: %u ms", psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); ++ } ++ ++ _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBufCtl); ++ _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); ++ _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation"); ++ PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation"); ++ } ++ ++ _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psFwSysData, psDevInfo->psRGXFWIfHWRInfoBufCtl, psDevInfo); ++ ++#if defined(SUPPORT_RGXFW_STATS_FRAMEWORK) ++ /* Dump all non-zero values in lines of 8... */ ++ { ++ IMG_CHAR pszLine[(9*RGXFWIF_STATS_FRAMEWORK_LINESIZE)+1]; ++ const IMG_UINT32 *pui32FWStatsBuf = psFwSysData->aui32FWStatsBuf; ++ IMG_UINT32 ui32Index1, ui32Index2; ++ ++ PVR_DUMPDEBUG_LOG("STATS[START]: RGXFWIF_STATS_FRAMEWORK_MAX=%d", RGXFWIF_STATS_FRAMEWORK_MAX); ++ for (ui32Index1 = 0; ui32Index1 < RGXFWIF_STATS_FRAMEWORK_MAX; ui32Index1 += RGXFWIF_STATS_FRAMEWORK_LINESIZE) ++ { ++ IMG_UINT32 ui32OrOfValues = 0; ++ IMG_CHAR *pszBuf = pszLine; ++ ++ /* Print all values in this line and skip if all zero... */ ++ for (ui32Index2 = 0; ui32Index2 < RGXFWIF_STATS_FRAMEWORK_LINESIZE; ui32Index2++) ++ { ++ ui32OrOfValues |= pui32FWStatsBuf[ui32Index1+ui32Index2]; ++ OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32FWStatsBuf[ui32Index1+ui32Index2]); ++ pszBuf += 9; /* write over the '\0' */ ++ } ++ ++ if (ui32OrOfValues != 0) ++ { ++ PVR_DUMPDEBUG_LOG("STATS[%08x]:%s", ui32Index1, pszLine); ++ } ++ } ++ PVR_DUMPDEBUG_LOG("STATS[END]"); ++ } ++#endif ++} ++ ++static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++/* List of extra META Slave Port debug registers */ ++#define RGX_META_SP_EXTRA_DEBUG \ ++ X(RGX_CR_META_SP_MSLVCTRL0) \ ++ X(RGX_CR_META_SP_MSLVCTRL1) \ ++ X(RGX_CR_META_SP_MSLVDATAX) \ ++ X(RGX_CR_META_SP_MSLVIRQSTATUS) \ ++ X(RGX_CR_META_SP_MSLVIRQENABLE) \ ++ X(RGX_CR_META_SP_MSLVIRQLEVEL) ++ ++ IMG_UINT32 ui32Idx, ui32RegIdx; ++ IMG_UINT32 ui32RegVal; ++ IMG_UINT32 ui32RegAddr; ++ ++ const IMG_UINT32 aui32DebugRegAddr[] = { ++#define X(A) A, ++ RGX_META_SP_EXTRA_DEBUG ++#undef X ++ }; ++ ++ const IMG_CHAR* apszDebugRegName[] = { ++#define X(A) #A, ++ RGX_META_SP_EXTRA_DEBUG ++#undef X ++ }; ++ ++ const IMG_UINT32 aui32Debug2RegAddr[] = {0xA28, 0x0A30, 0x0A38}; ++ ++ PVR_DUMPDEBUG_LOG("META Slave Port extra debug:"); ++ ++ /* dump first set of Slave Port debug registers */ ++ for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++) ++ { ++ const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx]; ++ ++ ui32RegAddr = aui32DebugRegAddr[ui32Idx]; ++ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); ++ PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal); ++ } ++ ++ /* dump second set of Slave Port debug registers */ ++ for (ui32Idx = 0; ui32Idx < 4; ui32Idx++) ++ { ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx); ++ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20); ++ PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal); ++ ++ } ++ ++ for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++) ++ { ++ ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx]; ++ for (ui32Idx = 0; ui32Idx < 2; ui32Idx++) ++ { ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx); ++ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr); ++ PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal); ++ } ++ } ++ ++} ++ ++/* ++ * Array of all the Firmware Trace log IDs used to convert the trace data. ++ */ ++typedef struct _TRACEBUF_LOG_ { ++ RGXFW_LOG_SFids eSFId; ++ const IMG_CHAR *pszName; ++ const IMG_CHAR *pszFmt; ++ IMG_UINT32 ui32ArgNum; ++} TRACEBUF_LOG; ++ ++static const TRACEBUF_LOG aLogDefinitions[] = ++{ ++#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e}, ++ RGXFW_LOG_SFIDLIST ++#undef X ++}; ++ ++#define NARGS_MASK ~(0xF<<16) ++static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ const TRACEBUF_LOG *psLogDef = &aLogDefinitions[0]; ++ IMG_BOOL bIntegrityOk = IMG_TRUE; ++ ++ /* ++ * For every log ID, check the format string and number of arguments is valid. ++ */ ++ while (psLogDef->eSFId != RGXFW_SF_LAST) ++ { ++ const TRACEBUF_LOG *psLogDef2; ++ const IMG_CHAR *pszString; ++ IMG_UINT32 ui32Count; ++ ++ /* ++ * Check the number of arguments matches the number of '%' in the string and ++ * check that no string uses %s which is not supported as it requires a ++ * pointer to memory that is not going to be valid. ++ */ ++ pszString = psLogDef->pszFmt; ++ ui32Count = 0; ++ ++ while (*pszString != '\0') ++ { ++ if (*pszString++ == '%') ++ { ++ ui32Count++; ++ if (*pszString == 's') ++ { ++ bIntegrityOk = IMG_FALSE; ++ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.", ++ psLogDef->pszName, *pszString); ++ } ++ else if (*pszString == '%') ++ { ++ /* Double % is a printable % sign and not a format string... */ ++ ui32Count--; ++ } ++ } ++ } ++ ++ if (ui32Count != psLogDef->ui32ArgNum) ++ { ++ bIntegrityOk = IMG_FALSE; ++ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.", ++ psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum); ++ } ++ ++ /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */ ++ if (ui32Count > 20) ++ { ++ bIntegrityOk = IMG_FALSE; ++ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.", ++ psLogDef->pszName, ui32Count); ++ } ++ ++ /* Check the id number is unique (don't take into account the number of arguments) */ ++ ui32Count = 0; ++ psLogDef2 = &aLogDefinitions[0]; ++ ++ while (psLogDef2->eSFId != RGXFW_SF_LAST) ++ { ++ if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK)) ++ { ++ ui32Count++; ++ } ++ psLogDef2++; ++ } ++ ++ if (ui32Count != 1) ++ { ++ bIntegrityOk = IMG_FALSE; ++ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.", ++ psLogDef->pszName, psLogDef->eSFId, ui32Count - 1); ++ } ++ ++ /* Move to the next log ID... */ ++ psLogDef++; ++ } ++ ++ return bIntegrityOk; ++} ++ ++typedef struct { ++ IMG_UINT16 ui16Mask; ++ const IMG_CHAR *pszStr; ++} RGXFWT_DEBUG_INFO_MSKSTR; /* pair of bit mask and debug info message string */ ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXPrepareExtraDebugInfo ++ ++ @Description ++ ++ Prepares debug info string by decoding ui16DebugInfo value passed ++ ++ @Input pszBuffer - pointer to debug info string buffer ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXPrepareExtraDebugInfo(IMG_CHAR *pszBuffer, IMG_UINT32 ui32BufferSize, IMG_UINT16 ui16DebugInfo) ++{ ++ const RGXFWT_DEBUG_INFO_MSKSTR aDebugInfoMskStr[] = ++ { ++#define X(a, b) {a, b}, ++ RGXFWT_DEBUG_INFO_MSKSTRLIST ++#undef X ++ }; ++ ++ IMG_UINT32 ui32NumFields = sizeof(aDebugInfoMskStr)/sizeof(RGXFWT_DEBUG_INFO_MSKSTR); ++ IMG_UINT32 i; ++ IMG_BOOL bHasExtraDebugInfo = IMG_FALSE; ++ ++ /* Add prepend string */ ++ OSStringLCopy(pszBuffer, RGXFWT_DEBUG_INFO_STR_PREPEND, ui32BufferSize); ++ ++ /* Add debug info strings */ ++ for (i = 0; i < ui32NumFields; i++) ++ { ++ if (ui16DebugInfo & aDebugInfoMskStr[i].ui16Mask) ++ { ++ if (bHasExtraDebugInfo) ++ { ++ OSStringLCat(pszBuffer, ", ", ui32BufferSize); /* Add comma separator */ ++ } ++ OSStringLCat(pszBuffer, aDebugInfoMskStr[i].pszStr, ui32BufferSize); ++ bHasExtraDebugInfo = IMG_TRUE; ++ } ++ } ++ ++ /* Add append string */ ++ OSStringLCat(pszBuffer, RGXFWT_DEBUG_INFO_STR_APPEND, ui32BufferSize); ++} ++ ++void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE; ++ ++ /* Check that the firmware trace is correctly defined... */ ++ if (!bIntegrityCheckPassed) ++ { ++ bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile); ++ if (!bIntegrityCheckPassed) ++ { ++ return; ++ } ++ } ++ ++ /* Dump FW trace information... */ ++ if (psRGXFWIfTraceBufCtl != NULL) ++ { ++ IMG_UINT32 tid; ++ IMG_UINT32 ui32TraceBufSizeInDWords = psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; ++ ++ PVR_DUMPDEBUG_LOG("Device ID: %u", psDevInfo->psDeviceNode->sDevId.ui32InternalID); ++ ++ /* Print the log type settings... */ ++ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) ++ { ++ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", ++ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), ++ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) ++ ); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("Debug log type: none"); ++ } ++ ++ /* Print the decoded log for each thread... */ ++ for (tid = 0; tid < RGXFW_THREAD_NUM; tid++) ++ { ++ volatile IMG_UINT32 *pui32FWWrapCount = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); ++ volatile IMG_UINT32 *pui32FWTracePtr = &(psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); ++ IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; ++ IMG_UINT32 ui32HostWrapCount = *pui32FWWrapCount; ++ IMG_UINT32 ui32HostTracePtr = *pui32FWTracePtr; ++ IMG_UINT32 ui32Count = 0; ++ ++ if (pui32TraceBuf == NULL) ++ { ++ /* trace buffer not yet allocated */ ++ continue; ++ } ++ ++ while (ui32Count < ui32TraceBufSizeInDWords) ++ { ++ IMG_UINT32 ui32Data, ui32DataToId; ++ ++ /* Find the first valid log ID, skipping whitespace... */ ++ do ++ { ++ ui32Data = pui32TraceBuf[ui32HostTracePtr]; ++ ui32DataToId = idToStringID(ui32Data, SFs); ++ ++ /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */ ++ if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data)) ++ { ++ PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data); ++ } ++ ++ /* Update the trace pointer... */ ++ ui32HostTracePtr++; ++ if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) ++ { ++ ui32HostTracePtr = 0; ++ ui32HostWrapCount++; ++ } ++ ui32Count++; ++ } while ((RGXFW_SF_LAST == ui32DataToId) && ++ ui32Count < ui32TraceBufSizeInDWords); ++ ++ if (ui32Count < ui32TraceBufSizeInDWords) ++ { ++ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%" IMG_UINT64_FMTSPEC ":T%u-%s> "; ++ IMG_CHAR szDebugInfoBuffer[RGXFWT_DEBUG_INFO_STR_MAXLEN] = ""; ++ IMG_UINT64 ui64Timestamp; ++ IMG_UINT16 ui16DebugInfo; ++ ++ /* If we hit the ASSERT message then this is the end of the log... */ ++ if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED) ++ { ++ PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u", ++ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo, ++ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath, ++ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum); ++ break; ++ } ++ ++ ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 0) % ui32TraceBufSizeInDWords]) << 32 | ++ (IMG_UINT64)(pui32TraceBuf[(ui32HostTracePtr + 1) % ui32TraceBufSizeInDWords]); ++ ++ ui16DebugInfo = (IMG_UINT16) ((ui64Timestamp & ~RGXFWT_TIMESTAMP_DEBUG_INFO_CLRMSK) >> RGXFWT_TIMESTAMP_DEBUG_INFO_SHIFT); ++ ui64Timestamp = (ui64Timestamp & ~RGXFWT_TIMESTAMP_TIME_CLRMSK) >> RGXFWT_TIMESTAMP_TIME_SHIFT; ++ ++ /* ++ * Print the trace string and provide up to 20 arguments which ++ * printf function will be able to use. We have already checked ++ * that no string uses more than this. ++ */ ++ OSStringLCat(szBuffer, SFs[ui32DataToId].psName, PVR_MAX_DEBUG_MESSAGE_LEN); ++ ++ /* Check and append any extra debug info available */ ++ if (ui16DebugInfo) ++ { ++ /* Prepare debug info string */ ++ RGXPrepareExtraDebugInfo(szDebugInfoBuffer, RGXFWT_DEBUG_INFO_STR_MAXLEN, ui16DebugInfo); ++ ++ /* Append debug info string */ ++ OSStringLCat(szBuffer, szDebugInfoBuffer, PVR_MAX_DEBUG_MESSAGE_LEN); ++ } ++ ++ PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)], ++ pui32TraceBuf[(ui32HostTracePtr + 2) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 3) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 4) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 5) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 6) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 7) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 8) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 9) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 10) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 11) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 12) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 13) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 14) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 15) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 16) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 17) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 18) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 19) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 20) % ui32TraceBufSizeInDWords], ++ pui32TraceBuf[(ui32HostTracePtr + 21) % ui32TraceBufSizeInDWords]); ++ ++ /* Update the trace pointer... */ ++ ui32HostTracePtr = ui32HostTracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data); ++ if (ui32HostTracePtr >= ui32TraceBufSizeInDWords) ++ { ++ ui32HostTracePtr = ui32HostTracePtr % ui32TraceBufSizeInDWords; ++ ui32HostWrapCount++; ++ } ++ ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data)); ++ ++ /* Has the FW trace buffer overtaken the host pointer during the last line printed??? */ ++ if ((*pui32FWWrapCount > ui32HostWrapCount) || ++ ((*pui32FWWrapCount == ui32HostWrapCount) && (*pui32FWTracePtr > ui32HostTracePtr))) ++ { ++ /* Move forward to the oldest entry again... */ ++ PVR_DUMPDEBUG_LOG(". . ."); ++ ui32HostWrapCount = *pui32FWWrapCount; ++ ui32HostTracePtr = *pui32FWTracePtr; ++ } ++ } ++ } ++ } ++ } ++} ++ ++#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) ++void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++ /* Print the power monitoring counters... */ ++ if (psFwSysData != NULL) ++ { ++ const IMG_UINT32 *pui32TraceBuf = psFwSysData->sPowerMonBuf.pui32TraceBuffer; ++ IMG_UINT32 ui32TracePtr = 0; //psFwSysData->sPowerMonBuf.ui32TracePointer; ++ IMG_UINT32 ui32PowerMonBufSizeInDWords = psFwSysData->ui32PowerMonBufSizeInDWords; ++ IMG_UINT32 ui32Count = 0; ++ IMG_UINT64 ui64Timestamp; ++ ++ if (pui32TraceBuf == NULL) ++ { ++ /* power monitoring buffer not yet allocated */ ++ return; ++ } ++ ++ if (pui32TraceBuf[ui32TracePtr] != RGX_CR_TIMER) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Power monitoring data not available.")); ++ return; ++ } ++ ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords]) << 32 | ++ (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords]); ++ ++ /* Update the trace pointer... */ ++ ui32TracePtr = (ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords; ++ ui32Count = (ui32Count + 3); ++ ++ PVR_DPF((PVR_DBG_WARNING, "Dumping power monitoring buffer: CPUVAddr = %p, pointer = 0x%x, size = 0x%x", ++ pui32TraceBuf, ++ ui32TracePtr, ++ ui32PowerMonBufSizeInDWords)); ++ ++ while (ui32Count < ui32PowerMonBufSizeInDWords) ++ { ++ /* power monitoring data is (register, value) dword pairs */ ++ PVR_DUMPDEBUG_LOG("%" IMG_UINT64_FMTSPEC ":POWMON 0x%08x 0x%08x 0x%08x 0x%08x", ++ ui64Timestamp, ++ pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords], ++ pui32TraceBuf[(ui32TracePtr + 1) % ui32PowerMonBufSizeInDWords], ++ pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords], ++ pui32TraceBuf[(ui32TracePtr + 3) % ui32PowerMonBufSizeInDWords]); ++ ++ if (pui32TraceBuf[(ui32TracePtr + 0) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID || ++ pui32TraceBuf[(ui32TracePtr + 2) % ui32PowerMonBufSizeInDWords] == RGXFWIF_TIMEDIFF_ID) ++ { ++ /* end of buffer */ ++ break; ++ } ++ ++ /* Update the trace pointer... */ ++ ui32TracePtr = (ui32TracePtr + 4) % ui32PowerMonBufSizeInDWords; ++ ui32Count = (ui32Count + 4); ++ } ++ } ++} ++#endif ++ ++static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState) ++{ ++ switch (eDevState) ++ { ++ case PVRSRV_DEVICE_STATE_INIT: ++ return "Initialising"; ++ case PVRSRV_DEVICE_STATE_ACTIVE: ++ return "Active"; ++ case PVRSRV_DEVICE_STATE_DEINIT: ++ return "De-initialising"; ++ case PVRSRV_DEVICE_STATE_BAD: ++ return "Bad"; ++ case PVRSRV_DEVICE_STATE_UNDEFINED: ++ PVR_ASSERT(!"Device has undefined state"); ++ __fallthrough; ++ default: ++ return "Unknown"; ++ } ++} ++ ++static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState) ++{ ++ switch (ePowerState) ++ { ++ case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT"; ++ case PVRSRV_DEV_POWER_STATE_OFF: return "OFF"; ++ case PVRSRV_DEV_POWER_STATE_ON: return "ON"; ++ default: return "UNKNOWN"; ++ } ++} ++ ++/* Helper macros to emit data */ ++#define REG32_FMTSPEC "%-30s: 0x%08X" ++#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECx ++#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R)); ++#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R)); ++#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R)); ++#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R)); ++#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V); ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) ++#if !defined(NO_HARDWARE) ++static void RGXDumpMIPSState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; ++ RGX_MIPS_STATE sMIPSState = {0}; ++ PVRSRV_ERROR eError; ++ ++ eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState); ++ PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----"); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DUMPDEBUG_LOG("MIPS extra debug not available"); ++ } ++ else ++ { ++ DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC); ++ DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister); ++ DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister); ++ _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile, ++ sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState); ++ DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister); ++ DDLOGVAL32("EPC", sMIPSState.ui32EPC); ++ DDLOGVAL32("SP", sMIPSState.ui32SP); ++ DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr); ++ _RGXMipsDumpDebugDecode(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ++ sMIPSState.ui32Debug, sMIPSState.ui32DEPC); ++ ++ { ++ IMG_UINT32 ui32Idx; ++ ++ IMG_BOOL bCheckBRN63553WA = ++ RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) && ++ (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN)); ++ ++ IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; ++ ++ PVR_DUMPDEBUG_LOG("TLB :"); ++ ++ for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++) ++ { ++ RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL; ++ RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL; ++ ++ if (bUseRemapRanges) ++ { ++ psRemapEntry0 = &sMIPSState.asRemap[ui32Idx]; ++ psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16]; ++ } ++ ++ _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf, ++ pvDumpDebugFile, ++ &sMIPSState.asTLB[ui32Idx], ++ psRemapEntry0, ++ psRemapEntry1, ++ ui32Idx); ++ ++ if (bCheckBRN63553WA) ++ { ++ const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx]; ++ ++ #define BRN63553_TLB_IS_NUL(X) (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0)) ++ ++ if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1)) ++ { ++ PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0."); ++ } ++ } ++ } ++ ++ /* This implicitly also checks for overlaps between memory and regbank addresses */ ++ _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf, ++ pvDumpDebugFile, ++ sMIPSState.asTLB, ++ bUseRemapRanges ? sMIPSState.asRemap : NULL); ++ ++ if (bUseRemapRanges) ++ { ++ /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */ ++ if (sMIPSState.ui32UnmappedAddress) ++ { ++ PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X", ++ sMIPSState.ui32UnmappedAddress); ++ } ++ } ++ } ++ ++ /* Check FW code corruption in case of known errors */ ++ if (_IsFWCodeException(RGXMIPSFW_C0_CAUSE_EXCCODE(sMIPSState.ui32CauseRegister))) ++ { ++ eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); ++ } ++ } ++ } ++ PVR_DUMPDEBUG_LOG("--------------------------------"); ++} ++#endif ++#endif /* !defined(SUPPORT_TRUSTED_DEVICE) */ ++ ++static PVRSRV_ERROR RGXDumpRISCVState(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; ++ RGXRISCVFW_STATE sRiscvState; ++ const IMG_CHAR *pszException; ++ PVRSRV_ERROR eError; ++ ++ DDLOG64(FWCORE_MEM_CAT_BASE0); ++ DDLOG64(FWCORE_MEM_CAT_BASE1); ++ DDLOG64(FWCORE_MEM_CAT_BASE2); ++ DDLOG64(FWCORE_MEM_CAT_BASE3); ++ DDLOG64(FWCORE_MEM_CAT_BASE4); ++ DDLOG64(FWCORE_MEM_CAT_BASE5); ++ DDLOG64(FWCORE_MEM_CAT_BASE6); ++ DDLOG64(FWCORE_MEM_CAT_BASE7); ++ ++ /* Limit dump to what is currently being used */ ++ DDLOG64(FWCORE_ADDR_REMAP_CONFIG4); ++ DDLOG64(FWCORE_ADDR_REMAP_CONFIG5); ++ DDLOG64(FWCORE_ADDR_REMAP_CONFIG6); ++ DDLOG64(FWCORE_ADDR_REMAP_CONFIG12); ++ DDLOG64(FWCORE_ADDR_REMAP_CONFIG13); ++ DDLOG64(FWCORE_ADDR_REMAP_CONFIG14); ++ ++ DDLOG32(FWCORE_MEM_FAULT_MMU_STATUS); ++ DDLOG64(FWCORE_MEM_FAULT_REQ_STATUS); ++ DDLOG32(FWCORE_MEM_MMU_STATUS); ++ DDLOG32(FWCORE_MEM_READS_EXT_STATUS); ++ DDLOG32(FWCORE_MEM_READS_INT_STATUS); ++ ++ PVR_DUMPDEBUG_LOG("---- [ RISC-V internal state ] ----"); ++ ++#if defined(SUPPORT_VALIDATION) || defined(SUPPORT_RISCV_GDB) ++ if (RGXRiscvIsHalted(psDevInfo)) ++ { ++ /* Avoid resuming the RISC-V FW as most operations ++ * on the debug module require a halted core */ ++ PVR_DUMPDEBUG_LOG("(skipping as RISC-V found halted)"); ++ return PVRSRV_OK; ++ } ++#endif ++ ++ eError = RGXRiscvHalt(psDevInfo); ++ PVR_GOTO_IF_ERROR(eError, _RISCVDMError); ++ ++#define X(name, address) \ ++ eError = RGXRiscvReadReg(psDevInfo, address, &sRiscvState.name); \ ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXRiscvReadReg", _RISCVDMError); \ ++ DDLOGVAL32(#name, sRiscvState.name); ++ ++ RGXRISCVFW_DEBUG_DUMP_REGISTERS ++#undef X ++ ++ eError = RGXRiscvResume(psDevInfo); ++ PVR_GOTO_IF_ERROR(eError, _RISCVDMError); ++ ++ pszException = _GetRISCVException(sRiscvState.mcause); ++ if (pszException != NULL) ++ { ++ PVR_DUMPDEBUG_LOG("RISC-V FW hit an exception: %s", pszException); ++ ++ eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); ++ } ++ } ++ ++ return PVRSRV_OK; ++ ++_RISCVDMError: ++ PVR_DPF((PVR_DBG_ERROR, "Failed to communicate with the Debug Module")); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT32 ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0; ++ IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles; ++ IMG_UINT32 ui32RegVal; ++ IMG_BOOL bFirmwarePerf; ++ IMG_BOOL bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE); ++ IMG_BOOL bMulticore = RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT); ++ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; ++ PVRSRV_ERROR eError; ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------"); ++ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM); ++ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr); ++ ++ /* Check if firmware perf was set at Init time */ ++ bFirmwarePerf = (psDevInfo->psRGXFWIfSysInit->eFirmwarePerf != FW_PERF_CONF_NONE); ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG)) ++ { ++ DDLOG64(CORE_ID); ++ } ++ else ++ { ++ DDLOG32(CORE_ID); ++ } ++ DDLOG32(CORE_REVISION); ++ DDLOG32(DESIGNER_REV_FIELD1); ++ DDLOG32(DESIGNER_REV_FIELD2); ++ DDLOG64(CHANGESET_NUMBER); ++ if (ui32Meta) ++ { ++ DDLOG32(META_SP_MSLVIRQSTATUS); ++ } ++ ++ if (bMulticore) ++ { ++ DDLOG32(MULTICORE_SYSTEM); ++ DDLOG32(MULTICORE_GPU); ++ } ++ ++ DDLOG64(CLK_CTRL); ++ DDLOG64(CLK_STATUS); ++ DDLOG64(CLK_CTRL2); ++ DDLOG64(CLK_STATUS2); ++ ++ if (bS7Infra) ++ { ++ DDLOG64(CLK_XTPLUS_CTRL); ++ DDLOG64(CLK_XTPLUS_STATUS); ++ } ++ DDLOG32(EVENT_STATUS); ++ DDLOG64(TIMER); ++ if (bS7Infra) ++ { ++ DDLOG64(MMU_FAULT_STATUS); ++ DDLOG64(MMU_FAULT_STATUS_META); ++ } ++ else ++ { ++ DDLOG32(BIF_FAULT_BANK0_MMU_STATUS); ++ DDLOG64(BIF_FAULT_BANK0_REQ_STATUS); ++ DDLOG32(BIF_FAULT_BANK1_MMU_STATUS); ++ DDLOG64(BIF_FAULT_BANK1_REQ_STATUS); ++ } ++ DDLOG32(BIF_MMU_STATUS); ++ DDLOG32(BIF_MMU_ENTRY); ++ DDLOG64(BIF_MMU_ENTRY_STATUS); ++ ++ if (bS7Infra) ++ { ++ DDLOG32(BIF_JONES_OUTSTANDING_READ); ++ DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ); ++ DDLOG32(BIF_DUST_OUTSTANDING_READ); ++ } ++ else ++ { ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))) ++ { ++ DDLOG32(BIF_STATUS_MMU); ++ DDLOG32(BIF_READS_EXT_STATUS); ++ DDLOG32(BIF_READS_INT_STATUS); ++ } ++ DDLOG32(BIFPM_STATUS_MMU); ++ DDLOG32(BIFPM_READS_EXT_STATUS); ++ DDLOG32(BIFPM_READS_INT_STATUS); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) ++ { ++ DDLOG64(CONTEXT_MAPPING0); ++ DDLOG64(CONTEXT_MAPPING1); ++ DDLOG64(CONTEXT_MAPPING2); ++ DDLOG64(CONTEXT_MAPPING3); ++ DDLOG64(CONTEXT_MAPPING4); ++ } ++ else ++ { ++ DDLOG64(BIF_CAT_BASE_INDEX); ++ DDLOG64(BIF_CAT_BASE0); ++ DDLOG64(BIF_CAT_BASE1); ++ DDLOG64(BIF_CAT_BASE2); ++ DDLOG64(BIF_CAT_BASE3); ++ DDLOG64(BIF_CAT_BASE4); ++ DDLOG64(BIF_CAT_BASE5); ++ DDLOG64(BIF_CAT_BASE6); ++ DDLOG64(BIF_CAT_BASE7); ++ } ++ ++ DDLOG32(BIF_CTRL_INVAL); ++ DDLOG32(BIF_CTRL); ++ ++ DDLOG64(BIF_PM_CAT_BASE_VCE0); ++ DDLOG64(BIF_PM_CAT_BASE_TE0); ++ DDLOG64(BIF_PM_CAT_BASE_ALIST0); ++ DDLOG64(BIF_PM_CAT_BASE_VCE1); ++ DDLOG64(BIF_PM_CAT_BASE_TE1); ++ DDLOG64(BIF_PM_CAT_BASE_ALIST1); ++ ++ if (bMulticore) ++ { ++ DDLOG32(MULTICORE_GEOMETRY_CTRL_COMMON); ++ DDLOG32(MULTICORE_FRAGMENT_CTRL_COMMON); ++ DDLOG32(MULTICORE_COMPUTE_CTRL_COMMON); ++ } ++ ++ DDLOG32(PERF_TA_PHASE); ++ DDLOG32(PERF_TA_CYCLE); ++ DDLOG32(PERF_3D_PHASE); ++ DDLOG32(PERF_3D_CYCLE); ++ ++ ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE); ++ ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE); ++ ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE); ++ ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0; ++ DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles); ++ DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles); ++ ++ DDLOG32(PERF_COMPUTE_PHASE); ++ DDLOG32(PERF_COMPUTE_CYCLE); ++ ++ DDLOG32(PM_PARTIAL_RENDER_ENABLE); ++ ++ DDLOG32(ISP_RENDER); ++ DDLOG64(TLA_STATUS); ++ DDLOG64(MCU_FENCE); ++ ++ DDLOG32(VDM_CONTEXT_STORE_STATUS); ++ DDLOG64(VDM_CONTEXT_STORE_TASK0); ++ DDLOG64(VDM_CONTEXT_STORE_TASK1); ++ DDLOG64(VDM_CONTEXT_STORE_TASK2); ++ DDLOG64(VDM_CONTEXT_RESUME_TASK0); ++ DDLOG64(VDM_CONTEXT_RESUME_TASK1); ++ DDLOG64(VDM_CONTEXT_RESUME_TASK2); ++ ++ DDLOG32(ISP_CTL); ++ DDLOG32(ISP_STATUS); ++ DDLOG32(MTS_INTCTX); ++ DDLOG32(MTS_BGCTX); ++ DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE); ++ DDLOG32(MTS_SCHEDULE); ++ DDLOG32(MTS_GPU_INT_STATUS); ++ ++ DDLOG32(CDM_CONTEXT_STORE_STATUS); ++ DDLOG64(CDM_CONTEXT_PDS0); ++ DDLOG64(CDM_CONTEXT_PDS1); ++ DDLOG64(CDM_TERMINATE_PDS); ++ DDLOG64(CDM_TERMINATE_PDS1); ++ ++ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 47025)) ++ { ++ DDLOG64(CDM_CONTEXT_LOAD_PDS0); ++ DDLOG64(CDM_CONTEXT_LOAD_PDS1); ++ } ++ ++ if (bS7Infra) ++ { ++ DDLOG32(JONES_IDLE); ++ } ++ ++ DDLOG32(SIDEKICK_IDLE); ++ ++ if (!bS7Infra) ++ { ++ DDLOG32(SLC_IDLE); ++ DDLOG32(SLC_STATUS0); ++ DDLOG64(SLC_STATUS1); ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS)) ++ { ++ DDLOG64(SLC_STATUS2); ++ } ++ ++ DDLOG32(SLC_CTRL_BYPASS); ++ DDLOG64(SLC_CTRL_MISC); ++ } ++ else ++ { ++ DDLOG32(SLC3_IDLE); ++ DDLOG64(SLC3_STATUS); ++ DDLOG32(SLC3_FAULT_STOP_STATUS); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE) && ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) ++ { ++ DDLOG32(SAFETY_EVENT_STATUS__ROGUEXE); ++ DDLOG32(MTS_SAFETY_EVENT_ENABLE__ROGUEXE); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER)) ++ { ++ DDLOG32(FWCORE_WDT_CTRL); ++ } ++ ++ if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) ++ { ++ DDLOG32(SCRATCH0); ++ DDLOG32(SCRATCH1); ++ DDLOG32(SCRATCH2); ++ DDLOG32(SCRATCH3); ++ DDLOG32(SCRATCH4); ++ DDLOG32(SCRATCH5); ++ DDLOG32(SCRATCH6); ++ DDLOG32(SCRATCH7); ++ DDLOG32(SCRATCH8); ++ DDLOG32(SCRATCH9); ++ DDLOG32(SCRATCH10); ++ DDLOG32(SCRATCH11); ++ DDLOG32(SCRATCH12); ++ DDLOG32(SCRATCH13); ++ DDLOG32(SCRATCH14); ++ DDLOG32(SCRATCH15); ++ } ++ ++ if (ui32Meta) ++ { ++ IMG_BOOL bIsT0Enabled = IMG_FALSE, bIsFWFaulted = IMG_FALSE; ++ ++ /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0); ++ ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("T0 TXENABLE", ui32RegVal); ++ if (ui32RegVal & META_CR_TXENABLE_ENABLE_BIT) ++ { ++ bIsT0Enabled = IMG_TRUE; ++ } ++ ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("T0 TXSTATUS", ui32RegVal); ++ ++ /* check for FW fault */ ++ if (((ui32RegVal >> 20) & 0x3) == 0x2) ++ { ++ bIsFWFaulted = IMG_TRUE; ++ } ++ ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("T0 TXDEFR", ui32RegVal); ++ ++ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); ++ DDLOGVAL32("T0 PC", ui32RegVal); ++ ++ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); ++ DDLOGVAL32("T0 PCX", ui32RegVal); ++ ++ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); ++ DDLOGVAL32("T0 SP", ui32RegVal); ++ ++ if ((ui32Meta == MTP218) || (ui32Meta == MTP219)) ++ { ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("T1 TXENABLE", ui32RegVal); ++ ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("T1 TXSTATUS", ui32RegVal); ++ ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("T1 TXDEFR", ui32RegVal); ++ ++ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); ++ DDLOGVAL32("T1 PC", ui32RegVal); ++ ++ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); ++ DDLOGVAL32("T1 PCX", ui32RegVal); ++ ++ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError); ++ DDLOGVAL32("T1 SP", ui32RegVal); ++ } ++ ++ if (bFirmwarePerf) ++ { ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("PERF_COUNT0", ui32RegVal); ++ ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXReadFWModuleAddr", _METASPError); ++ DDLOGVAL32("PERF_COUNT1", ui32RegVal); ++ } ++ ++ if (bIsT0Enabled & bIsFWFaulted) ++ { ++ eError = _ValidateFWImage(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DUMPDEBUG_LOG("Failed to validate any FW code corruption"); ++ } ++ } ++ else if (bIsFWFaulted) ++ { ++ PVR_DUMPDEBUG_LOG("Skipping FW code memory corruption checking as META is disabled"); ++ } ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ DDLOG32(MIPS_ADDR_REMAP1_CONFIG1); ++ DDLOG64(MIPS_ADDR_REMAP1_CONFIG2); ++ DDLOG32(MIPS_ADDR_REMAP2_CONFIG1); ++ DDLOG64(MIPS_ADDR_REMAP2_CONFIG2); ++ DDLOG32(MIPS_ADDR_REMAP3_CONFIG1); ++ DDLOG64(MIPS_ADDR_REMAP3_CONFIG2); ++ DDLOG32(MIPS_ADDR_REMAP4_CONFIG1); ++ DDLOG64(MIPS_ADDR_REMAP4_CONFIG2); ++ DDLOG32(MIPS_ADDR_REMAP5_CONFIG1); ++ DDLOG64(MIPS_ADDR_REMAP5_CONFIG2); ++ DDLOG64(MIPS_WRAPPER_CONFIG); ++ DDLOG32(MIPS_EXCEPTION_STATUS); ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ PVR_DUMPDEBUG_LOG("MIPS extra debug not available with SUPPORT_TRUSTED_DEVICE."); ++#elif !defined(NO_HARDWARE) ++ RGXDumpMIPSState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++#endif ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ eError = RGXDumpRISCVState(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ return PVRSRV_OK; ++ ++_METASPError: ++ PVR_DUMPDEBUG_LOG("Dump Slave Port debug information"); ++ _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++ ++ return eError; ++} ++ ++#undef REG32_FMTSPEC ++#undef REG64_FMTSPEC ++#undef DDLOG32 ++#undef DDLOG64 ++#undef DDLOG32_DPX ++#undef DDLOG64_DPX ++#undef DDLOGVAL32 ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDebugRequestProcess ++ ++ @Description ++ ++ This function will print out the debug for the specified level of verbosity ++ ++ @Input pfnDumpDebugPrintf - Optional replacement print function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psDevInfo - RGX device info ++ @Input ui32VerbLevel - Verbosity level ++ ++ @Return void ++ ++******************************************************************************/ ++static ++void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ IMG_BOOL bRGXPoweredON; ++ IMG_UINT8 ui8FwOsCount; ++ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ const RGXFWIF_OSDATA *psFwOsData = psDevInfo->psRGXFWIfFwOsData; ++ IMG_BOOL bPwrLockAlreadyHeld; ++ ++ bPwrLockAlreadyHeld = PVRSRVPwrLockIsLockedByMe(psDeviceNode); ++ if (!bPwrLockAlreadyHeld) ++ { ++ /* Only acquire the power-lock if not already held by the calling context */ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return; ++ } ++ } ++ ++ ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; ++ ++ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error retrieving RGX power state. No debug info dumped.", ++ __func__)); ++ goto Exit; ++ } ++ ++ if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || ++ (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) ++ { ++ PVR_DUMPDEBUG_LOG("Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", ++ (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount); ++ } ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d Start ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); ++ ++ bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON); ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX Info ]------"); ++ PVR_DUMPDEBUG_LOG("Device Node (Info): %p (%p)", psDevInfo->psDeviceNode, psDevInfo); ++ PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d (%s)", psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C, ++ PVR_ARCH_NAME); ++ PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState)); ++ PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState)); ++ if (psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated) ++ { ++ PVR_DUMP_FIRMWARE_INFO(psDevInfo->psRGXFWIfOsInit->sRGXCompChecks); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED"); ++ } ++ ++ RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON); ++ ++ /* Dump out the kernel CCB. */ ++ { ++ const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; ++ ++ if (psKCCBCtl != NULL) ++ { ++ PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X", ++ psKCCBCtl->ui32WriteOffset, ++ psKCCBCtl->ui32ReadOffset); ++ } ++ } ++ ++ /* Dump out the firmware CCB. */ ++ { ++ const RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl; ++ ++ if (psFCCBCtl != NULL) ++ { ++ PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X", ++ psFCCBCtl->ui32WriteOffset, ++ psFCCBCtl->ui32ReadOffset); ++ } ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Dump out the Workload estimation CCB. */ ++ { ++ const RGXFWIF_CCB_CTL *psWorkEstCCBCtl = psDevInfo->psWorkEstFirmwareCCBCtl; ++ ++ if (psWorkEstCCBCtl != NULL) ++ { ++ PVR_DUMPDEBUG_LOG("RGX WorkEst CCB WO:0x%X RO:0x%X", ++ psWorkEstCCBCtl->ui32WriteOffset, ++ psWorkEstCCBCtl->ui32ReadOffset); ++ } ++ } ++#endif ++ ++ ++ if (psFwOsData != NULL) ++ { ++ /* Dump the KCCB commands executed */ ++ PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d", ++ psFwOsData->ui32KCCBCmdsExecuted); ++ ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ /* Dump the number of times we have performed a forced UFO update, ++ * and (if non-zero) the timestamp of the most recent occurrence/ ++ */ ++ PVR_DUMPDEBUG_LOG("RGX SLR: Forced UFO updates requested = %d", ++ psFwOsData->ui32ForcedUpdatesRequested); ++ if (psFwOsData->ui32ForcedUpdatesRequested > 0) ++ { ++ IMG_UINT8 ui8Idx; ++ IMG_UINT64 ui64Seconds, ui64Nanoseconds; ++ ++ if (psFwOsData->ui64LastForcedUpdateTime > 0ULL) ++ { ++ ConvertOSTimestampToSAndNS(psFwOsData->ui64LastForcedUpdateTime, &ui64Seconds, &ui64Nanoseconds); ++ PVR_DUMPDEBUG_LOG("RGX SLR: (most recent forced update was around %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ")", ++ ui64Seconds, ui64Nanoseconds); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("RGX SLR: (unable to force update as fence contained no sync checkpoints)"); ++ } ++ /* Dump SLR log */ ++ if (psFwOsData->sSLRLogFirst.aszCCBName[0]) ++ { ++ ConvertOSTimestampToSAndNS(psFwOsData->sSLRLogFirst.ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); ++ PVR_DUMPDEBUG_LOG("RGX SLR:{%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ++ "} Fence found on context 0x%x '%s' has %d UFOs", ++ ui64Seconds, ui64Nanoseconds, ++ psFwOsData->sSLRLogFirst.ui32FWCtxAddr, ++ psFwOsData->sSLRLogFirst.aszCCBName, ++ psFwOsData->sSLRLogFirst.ui32NumUFOs); ++ } ++ for (ui8Idx=0; ui8IdxsSLRLog[ui8Idx].aszCCBName[0]) ++ { ++ ConvertOSTimestampToSAndNS(psFwOsData->sSLRLog[ui8Idx].ui64Timestamp, &ui64Seconds, &ui64Nanoseconds); ++ PVR_DUMPDEBUG_LOG("RGX SLR:[%" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ++ "] Fence found on context 0x%x '%s' has %d UFOs", ++ ui64Seconds, ui64Nanoseconds, ++ psFwOsData->sSLRLog[ui8Idx].ui32FWCtxAddr, ++ psFwOsData->sSLRLog[ui8Idx].aszCCBName, ++ psFwOsData->sSLRLog[ui8Idx].ui32NumUFOs); ++ } ++ } ++ } ++#else ++ PVR_DUMPDEBUG_LOG("RGX SLR: Disabled"); ++#endif ++ ++ /* Dump the error counts */ ++ PVR_DUMPDEBUG_LOG("RGX Errors: WGP:%d, TRP:%d", ++ psDevInfo->sErrorCounts.ui32WGPErrorCount, ++ psDevInfo->sErrorCounts.ui32TRPErrorCount); ++ ++ /* Dump the IRQ info for threads or OS IDs */ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ /* only Host has access to registers containing IRQ counters */ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++#endif ++ { ++ IMG_UINT32 ui32idx; ++ ++ for_each_irq_cnt(ui32idx) ++ { ++ IMG_UINT32 ui32IrqCnt; ++ ++ get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); ++ if (ui32IrqCnt) ++ { ++ PVR_DUMPDEBUG_LOG(MSG_IRQ_CNT_TYPE "%u: FW IRQ count = %u", ui32idx, ui32IrqCnt); ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ if (ui32idx == RGXFW_HOST_OS) ++#endif ++ { ++ PVR_DUMPDEBUG_LOG("Last sampled IRQ count in LISR = %u", psDevInfo->aui32SampleIRQCount[ui32idx]); ++ } ++ } ++ } ++ } ++ } ++ ++ /* Dump the FW Sys config flags on the Host */ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ IMG_CHAR sFwSysFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; ++ ++ if (!psFwSysData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Fw Sys Data is not mapped into CPU space", __func__)); ++ goto Exit; ++ } ++ ++ _GetFwSysFlagsDescription(sFwSysFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwSysData->ui32ConfigFlags); ++ PVR_DUMPDEBUG_LOG("FW System config flags = 0x%08X (%s)", psFwSysData->ui32ConfigFlags, sFwSysFlagsDescription); ++ } ++ ++ /* Dump the FW OS config flags */ ++ { ++ IMG_CHAR sFwOsFlagsDescription[MAX_FW_DESCRIPTION_LENGTH]; ++ ++ if (!psFwOsData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Fw Os Data is not mapped into CPU space", __func__)); ++ goto Exit; ++ } ++ ++ _GetFwOsFlagsDescription(sFwOsFlagsDescription, MAX_FW_DESCRIPTION_LENGTH, psFwOsData->ui32FwOsConfigFlags); ++ PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%08X (%s)", psFwOsData->ui32FwOsConfigFlags, sFwOsFlagsDescription); ++ } ++ ++ if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ ++ eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXDumpRGXRegisters failed (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ } ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(GUEST) ? "Guest Mode of operation" : "RGX power is down"); ++ } ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX FW Trace Info ]------"); ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) ++ { ++ IMG_INT tid; ++ /* Dump FW trace information */ ++ if (psRGXFWIfTraceBufCtl != NULL) ++ { ++ for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++) ++ { ++ IMG_UINT32 i; ++ IMG_BOOL bPrevLineWasZero = IMG_FALSE; ++ IMG_BOOL bLineIsAllZeros = IMG_FALSE; ++ IMG_UINT32 ui32CountLines = 0; ++ IMG_UINT32 *pui32TraceBuffer; ++ IMG_CHAR *pszLine; ++ ++ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) ++ { ++ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")", ++ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")), ++ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType) ++ ); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("Debug log type: none"); ++ } ++ ++ pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer; ++ ++ /* Skip if trace buffer is not allocated */ ++ if (pui32TraceBuffer == NULL) ++ { ++ PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid); ++ continue; ++ } ++ ++/* Max number of DWords to be printed per line, in debug dump output */ ++#define PVR_DD_FW_TRACEBUF_LINESIZE 30U ++ /* each element in the line is 8 characters plus a space. The '+ 1' is because of the final trailing '\0'. */ ++ pszLine = OSAllocMem(9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1); ++ if (pszLine == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Out of mem allocating line string (size: %d)", ++ __func__, ++ 9 * PVR_DD_FW_TRACEBUF_LINESIZE + 1)); ++ goto Exit; ++ } ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid); ++ PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer); ++ PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords); ++ ++ for (i = 0; i < psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords; i += PVR_DD_FW_TRACEBUF_LINESIZE) ++ { ++ IMG_UINT32 k = 0; ++ IMG_UINT32 ui32Line = 0x0; ++ IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32); ++ IMG_CHAR *pszBuf = pszLine; ++ ++ for (k = 0; k < PVR_DD_FW_TRACEBUF_LINESIZE; k++) ++ { ++ if ((i + k) >= psRGXFWIfTraceBufCtl->ui32TraceBufSizeInDWords) ++ { ++ /* Stop reading when the index goes beyond trace buffer size. This condition is ++ * hit during printing the last line in DD when ui32TraceBufSizeInDWords is not ++ * a multiple of PVR_DD_FW_TRACEBUF_LINESIZE */ ++ break; ++ } ++ ++ ui32Line |= pui32TraceBuffer[i + k]; ++ ++ /* prepare the line to print it. The '+1' is because of the trailing '\0' added */ ++ OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]); ++ pszBuf += 9; /* write over the '\0' */ ++ } ++ ++ bLineIsAllZeros = (ui32Line == 0x0); ++ ++ if (bLineIsAllZeros) ++ { ++ if (bPrevLineWasZero) ++ { ++ ui32CountLines++; ++ } ++ else ++ { ++ bPrevLineWasZero = IMG_TRUE; ++ ui32CountLines = 1; ++ PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset); ++ } ++ } ++ else ++ { ++ if (bPrevLineWasZero && ui32CountLines > 1) ++ { ++ PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines); ++ } ++ bPrevLineWasZero = IMG_FALSE; ++ ++ PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine); ++ } ++ ++ } ++ if (bPrevLineWasZero) ++ { ++ PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines); ++ } ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid); ++ ++ OSFreeMem(pszLine); ++ } ++ } ++ ++ { ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) ++ { ++ PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------"); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------"); ++ } ++ ++ DumpTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ ++ DumpRenderCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ ++ DumpKickSyncCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE)) ++ { ++ DumpComputeCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ DumpTDMTransferCtxtsInfo(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ } ++ } ++ ++ PVR_DUMPDEBUG_LOG("------[ RGX Device ID:%d End ]------", psDevInfo->psDeviceNode->sDevId.ui32InternalID); ++ ++Exit: ++ if (!bPwrLockAlreadyHeld) ++ { ++ PVRSRVPowerUnlock(psDeviceNode); ++ } ++} ++ ++/*! ++ ****************************************************************************** ++ ++ @Function RGXDebugRequestNotify ++ ++ @Description Dump the debug data for RGX ++ ++ ******************************************************************************/ ++static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = hDbgRequestHandle; ++ ++ /* Only action the request if we've fully init'ed */ ++ if (psDevInfo->bDevInit2Done) ++ { ++ RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel); ++ } ++} ++ ++PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ return PVRSRVRegisterDeviceDbgRequestNotify(&psDevInfo->hDbgReqNotify, ++ psDevInfo->psDeviceNode, ++ RGXDebugRequestNotify, ++ DEBUG_REQUEST_RGX, ++ psDevInfo); ++} ++ ++PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ if (psDevInfo->hDbgReqNotify) ++ { ++ return PVRSRVUnregisterDeviceDbgRequestNotify(psDevInfo->hDbgReqNotify); ++ } ++ ++ /* No notifier registered */ ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ End of file (rgxdebug.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxdebug.h b/drivers/gpu/drm/img-rogue/rgxdebug.h +new file mode 100644 +index 000000000000..f163997ac562 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxdebug.h +@@ -0,0 +1,229 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX debug header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX debugging functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXDEBUG_H) ++#define RGXDEBUG_H ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "device.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "rgxdevice.h" ++ ++/** ++ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in ++ * LISR for each RGX FW thread. ++ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input. ++ */ ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++#define for_each_irq_cnt(ui32idx) \ ++ for (ui32idx = 0; ui32idx < RGX_NUM_OS_SUPPORTED; ui32idx++) ++ ++#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ ++ do { \ ++ extern const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS]; \ ++ ui32Dest = PVRSRV_VZ_MODE_IS(GUEST) ? 0 : OSReadHWReg32((psRgxDevInfo)->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[ui32idx]); \ ++ } while (false) ++ ++#define MSG_IRQ_CNT_TYPE "OS" ++ ++#else ++ ++#define for_each_irq_cnt(ui32idx) \ ++ for (ui32idx = 0; ui32idx < RGXFW_THREAD_NUM; ui32idx++) ++ ++#define get_irq_cnt_val(ui32Dest, ui32idx, psRgxDevInfo) \ ++ ui32Dest = (psRgxDevInfo)->psRGXFWIfFwOsData->aui32InterruptCount[ui32idx] ++ ++#define MSG_IRQ_CNT_TYPE "Thread" ++#endif /* RGX_FW_IRQ_OS_COUNTERS */ ++ ++static inline void RGXDEBUG_PRINT_IRQ_COUNT(PVRSRV_RGXDEV_INFO* psRgxDevInfo) ++{ ++#if defined(PVRSRV_NEED_PVR_DPF) && defined(DEBUG) ++ IMG_UINT32 ui32idx; ++ ++ for_each_irq_cnt(ui32idx) ++ { ++ IMG_UINT32 ui32IrqCnt; ++ ++ get_irq_cnt_val(ui32IrqCnt, ui32idx, psRgxDevInfo); ++ ++ PVR_DPF((DBGPRIV_VERBOSE, MSG_IRQ_CNT_TYPE ++ " %u FW IRQ count = %u", ui32idx, ui32IrqCnt)); ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ if (ui32idx == RGXFW_HOST_OS) ++#endif ++ { ++ PVR_DPF((DBGPRIV_VERBOSE, "Last sampled IRQ count in LISR = %u", ++ (psRgxDevInfo)->aui32SampleIRQCount[ui32idx])); ++ } ++ } ++#endif /* PVRSRV_NEED_PVR_DPF */ ++} ++ ++extern const IMG_CHAR * const gapszMipsPermissionPTFlags[4]; ++extern const IMG_CHAR * const gapszMipsCoherencyPTFlags[8]; ++extern const IMG_CHAR * const gapszMipsDirtyGlobalValidPTFlags[8]; ++/*! ++******************************************************************************* ++ ++ @Function RGXDumpRGXRegisters ++ ++ @Description ++ ++ Dumps an extensive list of RGX registers required for debugging ++ ++ @Input pfnDumpDebugPrintf - Optional replacement print function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psDevInfo - RGX device info ++ ++ @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDumpFirmwareTrace ++ ++ @Description Dumps the decoded version of the firmware trace buffer. ++ ++ Dump useful debugging info ++ ++ @Input pfnDumpDebugPrintf - Optional replacement print function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psDevInfo - RGX device info ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++#if defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) ++void RGXDumpPowerMonitoring(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo); ++#endif ++ ++#if defined(SUPPORT_FW_VIEW_EXTRA_DEBUG) ++/*! ++******************************************************************************* ++ ++ @Function ValidateFWOnLoad ++ ++ @Description Compare the Firmware image as seen from the CPU point of view ++ against the same memory area as seen from the firmware point ++ of view after first power up. ++ ++ @Input psDevInfo - Device Info ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR ValidateFWOnLoad(PVRSRV_RGXDEV_INFO *psDevInfo); ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDumpRGXDebugSummary ++ ++ @Description ++ ++ Dump a summary in human readable form with the RGX state ++ ++ @Input pfnDumpDebugPrintf - The debug printf function ++ @Input pvDumpDebugFile - Optional file identifier to be passed to the ++ 'printf' function if required ++ @Input psDevInfo - RGX device info ++ @Input bRGXPoweredON - IMG_TRUE if RGX device is on ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_BOOL bRGXPoweredON); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDebugInit ++ ++ @Description ++ ++ Setup debug requests, calls into PVRSRVRegisterDeviceDbgRequestNotify ++ ++ @Input psDevInfo RGX device info ++ @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXDebugInit(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDebugDeinit ++ ++ @Description ++ ++ Remove debug requests, calls into PVRSRVUnregisterDeviceDbgRequestNotify ++ ++ @Output phNotify Points to debug notifier handle ++ @Return PVRSRV_ERROR PVRSRV_OK on success otherwise an error ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXDebugDeinit(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++#endif /* RGXDEBUG_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxdevice.h b/drivers/gpu/drm/img-rogue/rgxdevice.h +new file mode 100644 +index 000000000000..4ebbd29159db +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxdevice.h +@@ -0,0 +1,828 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX device node header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX device node ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXDEVICE_H) ++#define RGXDEVICE_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_device_types.h" ++#include "mmu_common.h" ++#include "rgx_fwif_km.h" ++#include "cache_ops.h" ++#include "device.h" ++#include "osfunc.h" ++#include "rgxlayer_impl.h" ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#include "hash.h" ++#endif ++typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT; ++ ++typedef struct { ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc; ++} RGX_COMMON_CONTEXT_INFO; ++ ++ ++/*! ++ ****************************************************************************** ++ * Device state flags ++ *****************************************************************************/ ++#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1) /*!< Zeroing the physical pages of reconstructed free lists */ ++#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x2) /*!< Used to disable the Devices Watchdog logging */ ++#define RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN (0x4) /*!< Used for validation to inject dust requests every TA/3D kick */ ++#define RGXKM_DEVICE_STATE_CCB_GROW_EN (0x8) /*!< Used to indicate CCB grow is permitted */ ++#define RGXKM_DEVICE_STATE_ENABLE_SPU_UNITS_POWER_MASK_CHANGE_EN (0x10) /*!< Used for validation to enable SPU power state mask change */ ++#define RGXKM_DEVICE_STATE_MASK (0x1F) ++ ++/*! ++ ****************************************************************************** ++ * ECC RAM Fault Validation ++ *****************************************************************************/ ++#define RGXKM_ECC_ERR_INJ_DISABLE 0 ++#define RGXKM_ECC_ERR_INJ_SLC 1 ++#define RGXKM_ECC_ERR_INJ_USC 2 ++#define RGXKM_ECC_ERR_INJ_TPU 3 ++#define RGXKM_ECC_ERR_INJ_RASCAL 4 ++#define RGXKM_ECC_ERR_INJ_MARS 5 ++ ++#define RGXKM_ECC_ERR_INJ_INTERVAL 10U ++ ++/*! ++ ****************************************************************************** ++ * GPU DVFS Table ++ *****************************************************************************/ ++ ++#define RGX_GPU_DVFS_TABLE_SIZE 32 ++#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */ ++#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */ ++#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */ ++ ++/*! ++ ****************************************************************************** ++ * Global flags for driver validation ++ *****************************************************************************/ ++#define RGX_VAL_KZ_SIG_CHECK_NOERR_EN (0x10U) /*!< Enable KZ signature check. Signatures must match */ ++#define RGX_VAL_KZ_SIG_CHECK_ERR_EN (0x20U) /*!< Enable KZ signature check. Signatures must not match */ ++#define RGX_VAL_SIG_CHECK_ERR_EN (0U) /*!< Not supported on Rogue cores */ ++ ++typedef struct _GPU_FREQ_TRACKING_DATA_ ++{ ++ /* Core clock speed estimated by the driver */ ++ IMG_UINT32 ui32EstCoreClockSpeed; ++ ++ /* Amount of successful calculations of the estimated core clock speed */ ++ IMG_UINT32 ui32CalibrationCount; ++} GPU_FREQ_TRACKING_DATA; ++ ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++#define RGX_GPU_FREQ_TRACKING_SIZE 16 ++ ++typedef struct ++{ ++ IMG_UINT64 ui64BeginCRTimestamp; ++ IMG_UINT64 ui64BeginOSTimestamp; ++ ++ IMG_UINT64 ui64EndCRTimestamp; ++ IMG_UINT64 ui64EndOSTimestamp; ++ ++ IMG_UINT32 ui32EstCoreClockSpeed; ++ IMG_UINT32 ui32CoreClockSpeed; ++} GPU_FREQ_TRACKING_HISTORY; ++#endif ++ ++typedef struct _RGX_GPU_DVFS_TABLE_ ++{ ++ /* Beginning of current calibration period (in us) */ ++ IMG_UINT64 ui64CalibrationCRTimestamp; ++ IMG_UINT64 ui64CalibrationOSTimestamp; ++ ++ /* Calculated calibration period (in us) */ ++ IMG_UINT64 ui64CalibrationCRTimediff; ++ IMG_UINT64 ui64CalibrationOSTimediff; ++ ++ /* Current calibration period (in us) */ ++ IMG_UINT32 ui32CalibrationPeriod; ++ ++ /* System layer frequency table and frequency tracking data */ ++ IMG_UINT32 ui32FreqIndex; ++ IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE]; ++ GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE]; ++ ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++ IMG_UINT32 ui32HistoryIndex; ++ GPU_FREQ_TRACKING_HISTORY asTrackingHistory[RGX_GPU_FREQ_TRACKING_SIZE]; ++#endif ++} RGX_GPU_DVFS_TABLE; ++ ++ ++/*! ++ ****************************************************************************** ++ * GPU utilisation statistics ++ *****************************************************************************/ ++ ++typedef struct _RGXFWIF_GPU_UTIL_STATS_ ++{ ++ IMG_BOOL bValid; /* If TRUE, statistics are valid. ++ FALSE if the driver couldn't get reliable stats. */ ++ IMG_UINT64 ui64GpuStatActive; /* GPU active statistic */ ++ IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */ ++ IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */ ++ IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */ ++ IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */ ++} RGXFWIF_GPU_UTIL_STATS; ++ ++ ++typedef struct _RGX_REG_CONFIG_ ++{ ++ IMG_BOOL bEnabled; ++ RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush; ++ IMG_UINT32 ui32NumRegRecords; ++ POS_LOCK hLock; ++} RGX_REG_CONFIG; ++ ++typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC; ++ ++typedef struct ++{ ++ IMG_UINT32 ui32DustCount1; ++ IMG_UINT32 ui32DustCount2; ++ IMG_BOOL bToggle; ++} RGX_DUST_STATE; ++ ++typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_ ++{ ++ IMG_UINT64 ui64ErnsBrns; ++ IMG_UINT64 ui64Features; ++ IMG_UINT32 ui32B; ++ IMG_UINT32 ui32V; ++ IMG_UINT32 ui32N; ++ IMG_UINT32 ui32C; ++ IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX]; ++ IMG_UINT32 ui32MAXDMCount; ++ IMG_UINT32 ui32MAXDustCount; ++ IMG_UINT32 ui32SLCSizeInBytes; ++ IMG_PCHAR pszBVNCString; ++}PVRSRV_DEVICE_FEATURE_CONFIG; ++ ++/* This is used to get the value of a specific feature. ++ * Note that it will assert if the feature is disabled or value is invalid. */ ++#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \ ++ ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] ) ++ ++/* This is used to check if the feature value (e.g. with an integer value) is available for the currently running BVNC or not */ ++#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \ ++ ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED ) ++ ++/* This is used to check if the Boolean feature (e.g. WITHOUT an integer value) is available for the currently running BVNC or not */ ++#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \ ++ BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64Features, RGX_FEATURE_##Feature##_BIT_MASK) ++ ++/* This is used to check if the ERN is available for the currently running BVNC or not */ ++#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \ ++ BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, HW_ERN_##ERN##_BIT_MASK) ++ ++/* This is used to check if the BRN is available for the currently running BVNC or not */ ++#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \ ++ BITMASK_HAS(psDevInfo->sDevFeatureCfg.ui64ErnsBrns, FIX_HW_BRN_##BRN##_BIT_MASK) ++ ++/* there is a corresponding define in rgxapi.h */ ++#define RGX_MAX_TIMER_QUERIES 16U ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++/*! ++ * The host maintains a 512-deep cache of submitted workloads per device, ++ * i.e. a global look-up table for TA, 3D and compute (depending on the RGX ++ * hardware support present) ++ */ ++ ++/* ++ * For the workload estimation return data array, the max amount of commands the ++ * MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for ++ * all corner cases ++ */ ++#define RETURN_DATA_ARRAY_SIZE_LOG2 (9) ++#define RETURN_DATA_ARRAY_SIZE ((1U) << RETURN_DATA_ARRAY_SIZE_LOG2) ++#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1) ++ ++#define WORKLOAD_HASH_SIZE_LOG2 6 ++#define WORKLOAD_HASH_SIZE ((1U) << WORKLOAD_HASH_SIZE_LOG2) ++#define WORKLOAD_HASH_WRAP_MASK (WORKLOAD_HASH_SIZE - 1) ++ ++/*! ++ * Workload characteristics for supported data masters. ++ * All characteristics must match for the workload estimate to be used/updated. ++ */ ++typedef union _RGX_WORKLOAD_ ++{ ++ struct ++ { ++ IMG_UINT32 ui32RenderTargetSize; ++ IMG_UINT32 ui32NumberOfDrawCalls; ++ IMG_UINT32 ui32NumberOfIndices; ++ IMG_UINT32 ui32NumberOfMRTs; ++ } sTA3D; ++ ++ struct ++ { ++ IMG_UINT32 ui32NumberOfWorkgroups; ++ IMG_UINT32 ui32NumberOfWorkitems; ++ } sCompute; ++ ++ struct ++ { ++ IMG_UINT32 ui32Characteristic1; ++ IMG_UINT32 ui32Characteristic2; ++ } sTransfer; ++} RGX_WORKLOAD; ++ ++/*! ++ * Host data used to match the return data (actual cycles count) to the ++ * submitted command packet. ++ * The hash table is a per-DM circular buffer containing a key based on the ++ * workload characteristics. On job completion, the oldest workload data ++ * is evicted if the CB is full and the driver matches the characteristics ++ * to the matching data. ++ * ++ * o If the driver finds a match the existing cycle estimate is averaged with ++ * the actual cycles used. ++ * o Otherwise a new hash entry is created with the actual cycles for this ++ * workload. ++ * ++ * Subsequently if a match is found during command submission, the estimate ++ * is passed to the scheduler, e.g. adjust the GPU frequency if PDVFS is enabled. ++ */ ++typedef struct _WORKLOAD_MATCHING_DATA_ ++{ ++ POS_LOCK psHashLock; ++ HASH_TABLE *psHashTable; /*! existing workload cycle estimates for this DM */ ++ RGX_WORKLOAD asHashKeys[WORKLOAD_HASH_SIZE]; ++ IMG_UINT64 aui64HashData[WORKLOAD_HASH_SIZE]; ++ IMG_UINT32 ui32HashArrayWO; /*! track the most recent workload estimates */ ++} WORKLOAD_MATCHING_DATA; ++ ++/*! ++ * A generic container for the workload matching data for GPU contexts: ++ * rendering (TA, 3D), compute, etc. ++ */ ++typedef struct _WORKEST_HOST_DATA_ ++{ ++ union ++ { ++ struct ++ { ++ WORKLOAD_MATCHING_DATA sDataTA; /*!< matching data for TA commands */ ++ WORKLOAD_MATCHING_DATA sData3D; /*!< matching data for 3D commands */ ++ } sTA3D; ++ ++ struct ++ { ++ WORKLOAD_MATCHING_DATA sDataCDM; /*!< matching data for CDM commands */ ++ } sCompute; ++ ++ struct ++ { ++ WORKLOAD_MATCHING_DATA sDataTDM; /*!< matching data for TDM-TQ commands */ ++ } sTransfer; ++ } uWorkloadMatchingData; ++ ++ /* ++ * This is a per-context property, hence the TA and 3D share the same ++ * per render context counter. ++ */ ++ IMG_UINT32 ui32WorkEstCCBReceived; /*!< Used to ensure all submitted work ++ estimation commands are received ++ by the host before clean up. */ ++} WORKEST_HOST_DATA; ++ ++/*! ++ * Entries in the list of submitted workloads, used when the completed command ++ * returns data to the host. ++ * ++ * - the matching data is needed as it holds the hash data ++ * - the host data is needed for completion updates, ensuring memory is not ++ * freed while workload estimates are in-flight. ++ * - the workload characteristic is used in the hash table look-up. ++ */ ++typedef struct _WORKEST_RETURN_DATA_ ++{ ++ WORKEST_HOST_DATA *psWorkEstHostData; ++ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData; ++ RGX_WORKLOAD sWorkloadCharacteristics; ++} WORKEST_RETURN_DATA; ++#endif ++ ++ ++typedef struct ++{ ++#if defined(PDUMP) ++ IMG_HANDLE hPdumpPages; ++#endif ++ PG_HANDLE sPages; ++ IMG_DEV_PHYADDR sPhysAddr; ++} RGX_MIPS_ADDRESS_TRAMPOLINE; ++ ++ ++/*! ++ ****************************************************************************** ++ * RGX Device error counts ++ *****************************************************************************/ ++typedef struct _PVRSRV_RGXDEV_ERROR_COUNTS_ ++{ ++ IMG_UINT32 ui32WGPErrorCount; /*!< count of the number of WGP checksum errors */ ++ IMG_UINT32 ui32TRPErrorCount; /*!< count of the number of TRP checksum errors */ ++} PVRSRV_RGXDEV_ERROR_COUNTS; ++ ++/*! ++ ****************************************************************************** ++ * RGX Device info ++ *****************************************************************************/ ++typedef struct _PVRSRV_RGXDEV_INFO_ ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg; ++ ++ IMG_BOOL bDevInit2Done; ++ ++ IMG_BOOL bFirmwareInitialised; ++ IMG_BOOL bPDPEnabled; ++ ++ IMG_HANDLE hDbgReqNotify; ++ ++ /* Kernel mode linear address of device registers */ ++ void __iomem *pvRegsBaseKM; ++ ++ IMG_HANDLE hRegMapping; ++ ++ /* System physical address of device registers */ ++ IMG_CPU_PHYADDR sRegsPhysBase; ++ /* Register region size in bytes */ ++ IMG_UINT32 ui32RegSize; ++ ++ PVRSRV_STUB_PBDESC *psStubPBDescListKM; ++ ++ /* Firmware memory context info */ ++ DEVMEM_CONTEXT *psKernelDevmemCtx; ++ DEVMEM_HEAP *psFirmwareMainHeap; ++ DEVMEM_HEAP *psFirmwareConfigHeap; ++ MMU_CONTEXT *psKernelMMUCtx; ++ ++ void *pvDeviceMemoryHeap; ++ ++ /* Kernel CCB */ ++ DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */ ++ RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */ ++ DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */ ++ IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */ ++ DEVMEM_MEMDESC *psKernelCCBRtnSlotsMemDesc; /*!< Return slot array for Kernel CCB commands */ ++ IMG_UINT32 *pui32KernelCCBRtnSlots; /*!< kernel mapping for return slot array */ ++ ++ /* Firmware CCB */ ++ DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */ ++ RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */ ++ DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */ ++ IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */ ++ ++ /* Workload Estimation Firmware CCB */ ++ DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */ ++ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */ ++ DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */ ++ IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */ ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++ /* Counter dumping */ ++ DEVMEM_MEMDESC *psCounterBufferMemDesc; /*!< mem desc for counter dumping buffer */ ++ POS_LOCK hCounterDumpingLock; /*!< Lock for guarding access to counter dumping buffer */ ++#endif ++ ++ PVRSRV_MEMALLOCFLAGS_T uiFWPoisonOnFreeFlag; /*!< Flag for poisoning FW allocations when freed */ ++ ++ IMG_BOOL bIgnoreHWReportedBVNC; /*!< Ignore BVNC reported by HW */ ++ ++ /* ++ if we don't preallocate the pagetables we must ++ insert newly allocated page tables dynamically ++ */ ++ void *pvMMUContextList; ++ ++ IMG_UINT32 ui32ClkGateStatusReg; ++ IMG_UINT32 ui32ClkGateStatusMask; ++ ++ DEVMEM_MEMDESC *psRGXFWCodeMemDesc; ++ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase; ++ IMG_UINT32 ui32FWCodeSizeInBytes; ++ DEVMEM_MEMDESC *psRGXFWDataMemDesc; ++ IMG_DEV_VIRTADDR sFWDataDevVAddrBase; ++ RGX_MIPS_ADDRESS_TRAMPOLINE *psTrampoline; ++ ++ DEVMEM_MEMDESC *psRGXFWCorememCodeMemDesc; ++ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase; ++ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr; ++ IMG_UINT32 ui32FWCorememCodeSizeInBytes; ++ ++ DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc; ++ IMG_DEV_VIRTADDR sFWCorememDataStoreDevVAddrBase; ++ RGXFWIF_DEV_VIRTADDR sFWCorememDataStoreFWAddr; ++ ++ DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc; ++ ++#if defined(PDUMP) ++ DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc; ++ IMG_UINT32 ui32SigTAChecksSize; ++ ++ DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc; ++ IMG_UINT32 ui32Sig3DChecksSize; ++ ++ DEVMEM_MEMDESC *psRGXFWSigTDM2DChecksMemDesc; ++ IMG_UINT32 ui32SigTDM2DChecksSize; ++ ++ IMG_BOOL bDumpedKCCBCtlAlready; ++ ++ POS_SPINLOCK hSyncCheckpointSignalSpinLock; /*!< Guards data shared between an atomic & sleepable-context */ ++#endif ++ ++ POS_LOCK hRGXFWIfBufInitLock; /*!< trace buffer lock for initialisation phase */ ++ ++ DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */ ++ DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */ ++ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */ ++ ++ DEVMEM_MEMDESC *psRGXFWIfFwSysDataMemDesc; /*!< memdesc of the firmware-shared system data structure */ ++ RGXFWIF_SYSDATA *psRGXFWIfFwSysData; /*!< structure containing trace control data and actual trace buffer */ ++ ++ DEVMEM_MEMDESC *psRGXFWIfFwOsDataMemDesc; /*!< memdesc of the firmware-shared os structure */ ++ RGXFWIF_OSDATA *psRGXFWIfFwOsData; /*!< structure containing trace control data and actual trace buffer */ ++ ++#if defined(SUPPORT_TBI_INTERFACE) ++ DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */ ++ RGXFWIF_DEV_VIRTADDR sRGXFWIfTBIBuffer; /*!< TBI buffer data */ ++ IMG_UINT32 ui32FWIfTBIBufferSize; ++#endif ++ ++ DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc; ++ RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBufCtl; ++ IMG_UINT32 ui32ClockSource; ++ IMG_UINT32 ui32LastClockSource; ++ ++ DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc; ++ RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb; ++ ++ DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc; ++ IMG_BYTE *psRGXFWIfHWPerfBuf; ++ IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */ ++ ++ DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc; ++ ++ DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc; ++ ++ DEVMEM_MEMDESC *psRGXFWIfConnectionCtlMemDesc; ++ RGXFWIF_CONNECTION_CTL *psRGXFWIfConnectionCtl; ++ ++ DEVMEM_MEMDESC *psRGXFWHeapGuardPageReserveMemDesc; ++ DEVMEM_MEMDESC *psRGXFWIfSysInitMemDesc; ++ RGXFWIF_SYSINIT *psRGXFWIfSysInit; ++ ++ DEVMEM_MEMDESC *psRGXFWIfOsInitMemDesc; ++ RGXFWIF_OSINIT *psRGXFWIfOsInit; ++ ++ DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc; ++ RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg; ++ ++ /* Additional guest firmware memory context info */ ++ DEVMEM_HEAP *psGuestFirmwareRawHeap[RGX_NUM_OS_SUPPORTED]; ++ DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGX_NUM_OS_SUPPORTED]; ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Array to store data needed for workload estimation when a workload ++ has finished and its cycle time is returned to the host. */ ++ WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE]; ++ IMG_UINT32 ui32ReturnDataWO; ++ POS_LOCK hWorkEstLock; ++#endif ++ ++#if defined(SUPPORT_PDVFS) ++ /** ++ * Host memdesc and pointer to memory containing core clock rate in Hz. ++ * Firmware updates the memory on changing the core clock rate over GPIO. ++ * Note: Shared memory needs atomic access from Host driver and firmware, ++ * hence size should not be greater than memory transaction granularity. ++ * Currently it is chosen to be 32 bits. ++ */ ++ DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc; ++ volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate; ++ /** ++ * Last sampled core clk rate. ++ */ ++ volatile IMG_UINT32 ui32CoreClkRateSnapshot; ++#endif ++ ++ /* ++ HWPerf data for the RGX device ++ */ ++ ++ POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code ++ * from multiple thread duplicate init/deinit ++ * and loss/freeing of FW & Host resources while in ++ * use in another thread e.g. MSIR. */ ++ ++ IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */ ++ IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */ ++ IMG_UINT32 ui32L2BufMaxPacketSize;/*!< Max allowed packet size in FW HWPerf TL (L2) buffer */ ++ IMG_BOOL bSuspendHWPerfL2DataCopy; /*! Flag to indicate if copying HWPerf data is suspended */ ++ ++ IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */ ++ POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */ ++ IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */ ++ IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */ ++ IMG_UINT32 ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream. ++ * Guarded by hLockHWPerfHostStream */ ++ IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */ ++ IMG_UINT8 *pui8DeferredEvents; /*! List of HWPerfHost events yet to be emitted in the TL stream. ++ * Events generated from atomic context are deferred "emitted" ++ * as the "emission" code can sleep */ ++ IMG_UINT16 ui16DEReadIdx; /*! Read index in the above deferred events buffer */ ++ IMG_UINT16 ui16DEWriteIdx; /*! Write index in the above deferred events buffer */ ++ void *pvHostHWPerfMISR; /*! MISR to emit pending/deferred events in HWPerfHost TL stream */ ++ POS_SPINLOCK hHWPerfHostSpinLock; /*! Guards data shared between an atomic & sleepable-context */ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ IMG_UINT32 ui32DEHighWatermark; /*! High watermark of deferred events buffer usage. Protected by ++ *! hHWPerfHostSpinLock */ ++ /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */ ++ IMG_UINT32 ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ ++ /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */ ++ IMG_BOOL bWarnedAtomicCtxPktLost; ++ /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */ ++ IMG_UINT32 ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */ ++ /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */ ++ IMG_BOOL bWarnedPktOrdinalBroke; ++#endif ++ ++ void *pvGpuFtraceData; ++ ++ /* Poll data for detecting firmware fatal errors */ ++ IMG_UINT32 aui32CrLastPollCount[RGXFW_THREAD_NUM]; ++ IMG_UINT32 ui32KCCBCmdsExecutedLastTime; ++ IMG_BOOL bKCCBCmdsWaitingLastTime; ++ IMG_UINT32 ui32GEOTimeoutsLastTime; ++ IMG_UINT32 ui32InterruptCountLastTime; ++ IMG_UINT32 ui32MissingInterruptsLastTime; ++ ++ /* Client stall detection */ ++ IMG_UINT32 ui32StalledClientMask; ++ ++ IMG_BOOL bWorkEstEnabled; ++ IMG_BOOL bPDVFSEnabled; ++ ++ void *pvLISRData; ++ void *pvMISRData; ++ void *pvAPMISRData; ++ RGX_ACTIVEPM_CONF eActivePMConf; ++ ++ volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM]; ++ ++ DEVMEM_MEMDESC *psRGXFaultAddressMemDesc; ++ ++ DEVMEM_MEMDESC *psSLC3FenceMemDesc; ++ ++ /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */ ++ IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */ ++ IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */ ++ ++ POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */ ++ DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */ ++ POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */ ++ DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */ ++ PSYNC_PRIM_CONTEXT hSyncPrimContext; ++ PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim; ++ ++ IMG_UINT32 ui32ActivePMReqOk; ++ IMG_UINT32 ui32ActivePMReqDenied; ++ IMG_UINT32 ui32ActivePMReqNonIdle; ++ IMG_UINT32 ui32ActivePMReqRetry; ++ IMG_UINT32 ui32ActivePMReqTotal; ++ ++ IMG_HANDLE hProcessQueuesMISR; ++ ++ IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */ ++ ++ /* GPU DVFS Table */ ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable; ++ ++ /* Pointer to function returning the GPU utilisation statistics since the last ++ * time the function was called. Supports different users at the same time. ++ * ++ * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked) ++ * in microseconds since the last time the function was called ++ * by a specific user (identified by hGpuUtilUser) ++ * ++ * Returns PVRSRV_OK in case the call completed without errors, ++ * some other value otherwise. ++ */ ++ PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hGpuUtilUser, ++ RGXFWIF_GPU_UTIL_STATS *psReturnStats); ++ ++ /* Pointer to function that checks if the physical GPU IRQ ++ * line has been asserted and clears it if so */ ++ IMG_BOOL (*pfnRGXAckIrq) (struct _PVRSRV_RGXDEV_INFO_ *psDevInfo); ++ ++ POS_LOCK hGPUUtilLock; ++ ++ /* Register configuration */ ++ RGX_REG_CONFIG sRegCongfig; ++ ++ IMG_BOOL bRGXPowered; ++ DLLIST_NODE sMemoryContextList; ++ ++ POSWR_LOCK hRenderCtxListLock; ++ POSWR_LOCK hComputeCtxListLock; ++ POSWR_LOCK hTransferCtxListLock; ++ POSWR_LOCK hTDMCtxListLock; ++ POSWR_LOCK hMemoryCtxListLock; ++ POSWR_LOCK hKickSyncCtxListLock; ++ ++ /* Linked list of deferred KCCB commands due to a full KCCB. ++ * Access to members sKCCBDeferredCommandsListHead and ui32KCCBDeferredCommandsCount ++ * are protected by the hLockKCCBDeferredCommandsList spin lock. */ ++ POS_SPINLOCK hLockKCCBDeferredCommandsList; /*!< Protects deferred KCCB commands list */ ++ DLLIST_NODE sKCCBDeferredCommandsListHead; ++ IMG_UINT32 ui32KCCBDeferredCommandsCount; /*!< No of commands in the deferred list */ ++ ++ /* Linked lists of contexts on this device */ ++ DLLIST_NODE sRenderCtxtListHead; ++ DLLIST_NODE sComputeCtxtListHead; ++ DLLIST_NODE sTransferCtxtListHead; ++ DLLIST_NODE sTDMCtxtListHead; ++ DLLIST_NODE sKickSyncCtxtListHead; ++ ++ DLLIST_NODE sCommonCtxtListHead; ++ POSWR_LOCK hCommonCtxtListLock; ++ IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */ ++ ++ POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */ ++ POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */ ++ ++ POS_LOCK hNMILock; /*!< Lock to protect NMI operations */ ++ ++#if defined(SUPPORT_VALIDATION) ++ IMG_UINT32 ui32ValidationFlags; /*!< Validation flags for host driver */ ++#endif ++ RGX_DUST_STATE sDustReqState; ++ ++ RGX_LAYER_PARAMS sLayerParams; ++ ++ RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */ ++ IMG_BOOL bBPSet; /*!< A Breakpoint has been set */ ++ POS_LOCK hBPLock; /*!< Lock for break point operations */ ++ ++ IMG_UINT32 ui32CoherencyTestsDone; ++ ++ ATOMIC_T iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */ ++ POS_LOCK hCCBRecoveryLock; /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables */ ++ void *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */ ++ IMG_UINT32 ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */ ++ IMG_UINT32 ui32SLRHoldoffCounter; /* Decremented each time health check is called until zero. SLR only happen when zero. */ ++ ++ POS_LOCK hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */ ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++ /* Firmware gcov buffer */ ++ DEVMEM_MEMDESC *psFirmwareGcovBufferMemDesc; /*!< mem desc for Firmware gcov dumping buffer */ ++ IMG_UINT32 ui32FirmwareGcovSize; ++#endif ++ ++#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) ++ struct ++ { ++ IMG_UINT64 ui64timerGray; ++ IMG_UINT64 ui64timerBinary; ++ IMG_UINT64 *pui64uscTimers; ++ } sRGXTimerValues; ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++ struct ++ { ++ IMG_UINT64 ui64RegVal; ++ struct completion sRegComp; ++ } sFwRegs; ++#endif ++ ++ IMG_HANDLE hTQCLISharedMem; /*!< TQ Client Shared Mem PMR */ ++ IMG_HANDLE hTQUSCSharedMem; /*!< TQ USC Shared Mem PMR */ ++ ++#if defined(SUPPORT_VALIDATION) ++ IMG_UINT32 ui32TestSLRInterval; /* Don't enqueue an update sync checkpoint every nth kick */ ++ IMG_UINT32 ui32TestSLRCount; /* (used to test SLR operation) */ ++ IMG_UINT32 ui32SLRSkipFWAddr; ++#endif ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ DEVMEM_MEMDESC *psRGXFWIfSecureBufMemDesc; ++ DEVMEM_MEMDESC *psRGXFWIfNonSecureBufMemDesc; ++#endif ++ ++ /* Timer Queries */ ++ IMG_UINT32 ui32ActiveQueryId; /*!< id of the active line */ ++ IMG_BOOL bSaveStart; /*!< save the start time of the next kick on the device*/ ++ IMG_BOOL bSaveEnd; /*!< save the end time of the next kick on the device*/ ++ ++ DEVMEM_MEMDESC *psStartTimeMemDesc; /*!< memdesc for Start Times */ ++ IMG_UINT64 *pui64StartTimeById; /*!< CPU mapping of the above */ ++ ++ DEVMEM_MEMDESC *psEndTimeMemDesc; /*!< memdesc for End Timer */ ++ IMG_UINT64 *pui64EndTimeById; /*!< CPU mapping of the above */ ++ ++ IMG_UINT32 aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES]; /*!< kicks Scheduled on QueryId */ ++ DEVMEM_MEMDESC *psCompletedMemDesc; /*!< kicks Completed on QueryId */ ++ IMG_UINT32 *pui32CompletedById; /*!< CPU mapping of the above */ ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ POS_LOCK hTimerQueryLock; /*!< lock to protect simultaneous access to timer query members */ ++#endif ++ ++ PVRSRV_RGXDEV_ERROR_COUNTS sErrorCounts; /*!< struct containing device error counts */ ++ ++ IMG_UINT32 ui32HostSafetyEventMask;/*!< mask of the safety events handled by the driver */ ++ ++ RGX_CONTEXT_RESET_REASON eLastDeviceError; /*!< device error reported to client */ ++#if defined(SUPPORT_VALIDATION) ++ IMG_UINT32 ui32ECCRAMErrInjModule; ++ IMG_UINT32 ui32ECCRAMErrInjInterval; ++#endif ++ ++ IMG_UINT32 ui32Log2Non4KPgSize; /* Page size of Non4k heap in log2 form */ ++} PVRSRV_RGXDEV_INFO; ++ ++ ++ ++typedef struct _RGX_TIMING_INFORMATION_ ++{ ++ /*! GPU default core clock speed in Hz */ ++ IMG_UINT32 ui32CoreClockSpeed; ++ ++ /*! Active Power Management: GPU actively requests the host driver to be powered off */ ++ IMG_BOOL bEnableActivePM; ++ ++ /*! Enable the GPU to power off internal Power Islands independently from the host driver */ ++ IMG_BOOL bEnableRDPowIsland; ++ ++ /*! Active Power Management: Delay between the GPU idle and the request to the host */ ++ IMG_UINT32 ui32ActivePMLatencyms; ++ ++} RGX_TIMING_INFORMATION; ++ ++typedef struct _RGX_DATA_ ++{ ++ /*! Timing information */ ++ RGX_TIMING_INFORMATION *psRGXTimingInfo; ++} RGX_DATA; ++ ++ ++/* ++ RGX PDUMP register bank name (prefix) ++*/ ++#define RGX_PDUMPREG_NAME "RGXREG" ++#define RGX_TB_PDUMPREG_NAME "EMUREG" ++ ++#endif /* RGXDEVICE_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxfw_log_helper.h b/drivers/gpu/drm/img-rogue/rgxfw_log_helper.h +new file mode 100644 +index 000000000000..275b63aca46b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfw_log_helper.h +@@ -0,0 +1,79 @@ ++/*************************************************************************/ /*! ++@File rgxfw_log_helper.h ++@Title Firmware TBI logging helper function ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Platform Generic ++@Description This file contains some helper code to make TBI logging possible ++ Specifically, it uses the SFIDLIST xmacro to trace ids back to ++ the original strings. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef RGXFW_LOG_HELPER_H ++#define RGXFW_LOG_HELPER_H ++ ++#include "rgx_fwif_sf.h" ++ ++static const IMG_CHAR *const groups[]= { ++#define X(A,B) #B, ++ RGXFW_LOG_SFGROUPLIST ++#undef X ++}; ++ ++/* idToStringID : Search SFs tuples {id,string} for a matching id. ++ * return index to array if found or RGXFW_SF_LAST if none found. ++ * bsearch could be used as ids are in increasing order. */ ++#if defined(RGX_FIRMWARE) ++static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs) ++#else ++static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXKM_STID_FMT *const psSFs) ++#endif ++{ ++ IMG_UINT32 i = 0, ui32Id = (IMG_UINT32)RGXFW_SF_LAST; ++ ++ for ( i = 0 ; psSFs[i].ui32Id != (IMG_UINT32)RGXFW_SF_LAST ; i++) ++ { ++ if ( ui32CheckData == psSFs[i].ui32Id ) ++ { ++ ui32Id = i; ++ break; ++ } ++ } ++ return ui32Id; ++} ++ ++#endif /* RGXFW_LOG_HELPER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxfwdbg.c b/drivers/gpu/drm/img-rogue/rgxfwdbg.c +new file mode 100644 +index 000000000000..1e7a51f5fc83 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwdbg.c +@@ -0,0 +1,282 @@ ++/*************************************************************************/ /*! ++@File ++@Title Debugging and miscellaneous functions server implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Kernel services functions for debugging and other ++ miscellaneous functionality. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvrsrv.h" ++#include "pvr_debug.h" ++#include "rgxfwdbg.h" ++#include "rgxfwutils.h" ++#include "rgxta3d.h" ++#include "pdump_km.h" ++#include "mmu_common.h" ++#include "devicemem_server.h" ++#include "osfunc.h" ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugQueryFWLogKM( ++ const CONNECTION_DATA *psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *pui32RGXFWLogType) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ if (!psDeviceNode || !pui32RGXFWLogType) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBufCtl) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetFWLogKM( ++ const CONNECTION_DATA * psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32RGXFWLogType) ++{ ++ RGXFWIF_KCCB_CMD sLogTypeUpdateCmd; ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT32 ui32OldRGXFWLogTpe; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ IMG_BOOL bWaitForFwUpdate = IMG_FALSE; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType; ++ ++ /* check log type is valid */ ++ if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock); ++ ++ /* set the new log type and ensure the new log type is written to memory ++ * before requesting the FW to read it ++ */ ++ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32RGXFWLogType; ++ OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType); ++ ++ /* Allocate firmware trace buffer resource(s) if not already done */ ++ if (RGXTraceBufferIsInitRequired(psDevInfo)) ++ { ++ eError = RGXTraceBufferInitOnDemandResources(psDevInfo, RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS); ++ } ++#if defined(SUPPORT_TBI_INTERFACE) ++ /* Check if LogType is TBI then allocate resource on demand and copy ++ * SFs to it ++ */ ++ else if (RGXTBIBufferIsInitRequired(psDevInfo)) ++ { ++ eError = RGXTBIBufferInitOnDemandResources(psDevInfo); ++ } ++ ++ /* TBI buffer address will be 0 if not initialised */ ++ sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer; ++#else ++ sLogTypeUpdateCmd.uCmdData.sTBIBuffer.ui32Addr = 0; ++#endif ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate resource on-demand. Reverting to old value", ++ __func__)); ++ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32OldRGXFWLogTpe; ++ OSMemoryBarrier(&psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType); ++ ++ OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); ++ ++ return eError; ++ } ++ ++ OSLockRelease(psDevInfo->hRGXFWIfBufInitLock); ++ ++ eError = PVRSRVPowerLock((PPVRSRV_DEVICE_NODE) psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire power lock (%u)", ++ __func__, ++ eError)); ++ return eError; ++ } ++ ++ eError = PVRSRVGetDevicePowerState((PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState); ++ ++ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) ++ { ++ /* Ask the FW to update its cached version of logType value */ ++ sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE; ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ &sLogTypeUpdateCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); ++ bWaitForFwUpdate = IMG_TRUE; ++ } ++ ++unlock: ++ PVRSRVPowerUnlock( (PPVRSRV_DEVICE_NODE) psDeviceNode); ++ if (bWaitForFwUpdate) ++ { ++ /* Wait for the LogType value to be updated in FW */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); ++ } ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetHCSDeadlineKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32HCSDeadlineMS) ++{ ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS); ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetOSidPriorityKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32OSidPriority) ++{ ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority); ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetOSNewOnlineStateKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32OSNewState) ++{ ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_OS_STATE_CHANGE eState; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ eState = (ui32OSNewState) ? (RGXFWIF_OS_ONLINE) : (RGXFWIF_OS_OFFLINE); ++ return RGXFWSetFwOsState(psDevInfo, ui32OSid, eState); ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugPHRConfigureKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PHRMode) ++{ ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ return RGXFWConfigPHR(psDevInfo, ++ ui32PHRMode); ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugWdgConfigureKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32WdgPeriodUs) ++{ ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ return RGXFWConfigWdg(psDevInfo, ++ ui32WdgPeriodUs); ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugDumpFreelistPageListKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice; ++ DLLIST_NODE *psNode, *psNext; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (dllist_is_empty(&psDevInfo->sFreeListHead)) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------")); ++ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) ++ { ++ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); ++ RGXDumpFreeListPageList(psFreeList); ++ } ++ OSLockRelease(psDevInfo->hLockFreeList); ++ ++ PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------")); ++ ++ return PVRSRV_OK; ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxfwdbg.h b/drivers/gpu/drm/img-rogue/rgxfwdbg.h +new file mode 100644 +index 000000000000..38d487edbcae +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwdbg.h +@@ -0,0 +1,113 @@ ++/*************************************************************************/ /*! ++@File ++@Title Debugging and miscellaneous functions server interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Kernel services functions for debugging and other ++ miscellaneous functionality. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXFWDBG_H) ++#define RGXFWDBG_H ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "device.h" ++#include "pmr.h" ++ ++#include "connection_server.h" ++ ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugInitFWImageKM( ++ PMR *psFWImgDestPMR, ++ PMR *psFWImgSrcPMR, ++ IMG_UINT64 ui64FWImgLen, ++ PMR *psFWImgSigPMR, ++ IMG_UINT64 ui64FWSigLen); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugQueryFWLogKM( ++ const CONNECTION_DATA *psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *pui32RGXFWLogType); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetFWLogKM( ++ const CONNECTION_DATA *psConnection, ++ const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32RGXFWLogType); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetHCSDeadlineKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32HCSDeadlineMS); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetOSidPriorityKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32OSidPriority); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugSetOSNewOnlineStateKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32OSNewState); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugPHRConfigureKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PHRMode); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugWdgConfigureKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32WdgPeriodUs); ++ ++PVRSRV_ERROR ++PVRSRVRGXFWDebugDumpFreelistPageListKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/rgxfwimageutils.c b/drivers/gpu/drm/img-rogue/rgxfwimageutils.c +new file mode 100644 +index 000000000000..0a9813bea84c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwimageutils.c +@@ -0,0 +1,1082 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services Firmware image utilities used at init time ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Services Firmware image utilities used at init time ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* The routines implemented here are built on top of an abstraction layer to ++ * hide DDK/OS-specific details in case they are used outside of the DDK ++ * (e.g. when trusted device is enabled). ++ * Any new dependency should be added to rgxlayer.h. ++ * Any new code should be built on top of the existing abstraction layer, ++ * which should be extended when necessary. */ ++#include "rgxfwimageutils.h" ++#include "pvrsrv.h" ++ ++ ++/************************************************************************ ++* FW layout information ++************************************************************************/ ++#define MAX_NUM_ENTRIES (8) ++static RGX_FW_LAYOUT_ENTRY asRGXFWLayoutTable[MAX_NUM_ENTRIES]; ++static IMG_UINT32 ui32LayoutEntryNum; ++ ++ ++static RGX_FW_LAYOUT_ENTRY* GetTableEntry(const void *hPrivate, RGX_FW_SECTION_ID eId) ++{ ++ IMG_UINT32 i; ++ ++ for (i = 0; i < ui32LayoutEntryNum; i++) ++ { ++ if (asRGXFWLayoutTable[i].eId == eId) ++ { ++ return &asRGXFWLayoutTable[i]; ++ } ++ } ++ ++ RGXErrorLog(hPrivate, "%s: id %u not found, returning entry 0\n", ++ __func__, eId); ++ ++ return &asRGXFWLayoutTable[0]; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function FindMMUSegment ++ ++ @Description Given a 32 bit FW address attempt to find the corresponding ++ pointer to FW allocation ++ ++ @Input ui32OffsetIn : 32 bit FW address ++ @Input pvHostFWCodeAddr : Pointer to FW code ++ @Input pvHostFWDataAddr : Pointer to FW data ++ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code ++ @Input pvHostFWCorememDataAddr : Pointer to FW coremem code ++ @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn, ++ void *pvHostFWCodeAddr, ++ void *pvHostFWDataAddr, ++ void *pvHostFWCorememCodeAddr, ++ void *pvHostFWCorememDataAddr, ++ void **uiHostAddrOut) ++{ ++ IMG_UINT32 i; ++ ++ for (i = 0; i < ui32LayoutEntryNum; i++) ++ { ++ if ((ui32OffsetIn >= asRGXFWLayoutTable[i].ui32BaseAddr) && ++ (ui32OffsetIn < (asRGXFWLayoutTable[i].ui32BaseAddr + asRGXFWLayoutTable[i].ui32AllocSize))) ++ { ++ switch (asRGXFWLayoutTable[i].eType) ++ { ++ case FW_CODE: ++ *uiHostAddrOut = pvHostFWCodeAddr; ++ break; ++ ++ case FW_DATA: ++ *uiHostAddrOut = pvHostFWDataAddr; ++ break; ++ ++ case FW_COREMEM_CODE: ++ *uiHostAddrOut = pvHostFWCorememCodeAddr; ++ break; ++ ++ case FW_COREMEM_DATA: ++ *uiHostAddrOut = pvHostFWCorememDataAddr; ++ break; ++ ++ default: ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ goto found; ++ } ++ } ++ ++ return PVRSRV_ERROR_INIT_FAILURE; ++ ++found: ++ if (*uiHostAddrOut == NULL) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Direct Mem write to mapped memory */ ++ ui32OffsetIn -= asRGXFWLayoutTable[i].ui32BaseAddr; ++ ui32OffsetIn += asRGXFWLayoutTable[i].ui32AllocOffset; ++ ++ /* Add offset to pointer to FW allocation only if ++ * that allocation is available ++ */ ++ if (*uiHostAddrOut) ++ { ++ *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXFWConfigureSegID ++ ++ @Description Configures a single segment of the Segment MMU ++ (base, limit and out_addr) ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr) ++ @Input ui32SegBase : Segment input base address (32 bit FW address) ++ @Input ui32SegLimit : Segment size ++ @Input ui32SegID : Segment ID ++ @Input pszName : Segment name ++ @Input ppui32BootConf : Pointer to bootloader data ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++static void RGXFWConfigureSegID(const void *hPrivate, ++ IMG_UINT64 ui64SegOutAddr, ++ IMG_UINT32 ui32SegBase, ++ IMG_UINT32 ui32SegLimit, ++ IMG_UINT32 ui32SegID, ++ IMG_UINT32 **ppui32BootConf) ++{ ++ IMG_UINT32 *pui32BootConf = *ppui32BootConf; ++ IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL; ++ IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL; ++ ++ /* META segments have a minimum size */ ++ IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ? ++ RGXFW_SEGMMU_ALIGN : ui32SegLimit; ++ /* the limit is an offset, therefore off = size - 1 */ ++ ui32LimitOff -= 1; ++ ++ RGXCommentLog(hPrivate, ++ "* Seg%d: meta_addr = 0x%08x, devv_addr = 0x%" IMG_UINT64_FMTSPECx ", limit = 0x%x", ++ ui32SegID, ++ ui32SegBase, ++ ui64SegOutAddr, ++ ui32LimitOff); ++ ++ ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE; ++ ++ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID); ++ *pui32BootConf++ = ui32SegBase; ++ ++ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID); ++ *pui32BootConf++ = ui32LimitOff; ++ ++ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID); ++ *pui32BootConf++ = ui32SegOutAddr0; ++ ++ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID); ++ *pui32BootConf++ = ui32SegOutAddr1; ++ ++ *ppui32BootConf = pui32BootConf; ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXFWConfigureSegMMU ++ ++ @Description Configures META's Segment MMU ++ ++ @Input hPrivate : Implementation specific data ++ @Input psFWCodeDevVAddrBase : FW code base device virtual address ++ @Input psFWDataDevVAddrBase : FW data base device virtual address ++ @Input ppui32BootConf : Pointer to bootloader data ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++static void RGXFWConfigureSegMMU(const void *hPrivate, ++ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase, ++ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase, ++ IMG_UINT32 **ppui32BootConf) ++{ ++ IMG_UINT64 ui64SegOutAddrTop; ++ IMG_UINT32 i; ++ ++ PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase); ++ ++ /* Configure Segment MMU */ ++ RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********"); ++ ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) ++ { ++ ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWPRIV); ++ } ++ else ++ { ++ ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, RGXFW_SEGMMU_META_BIFDM_ID); ++ } ++ ++ for (i = 0; i < ui32LayoutEntryNum; i++) ++ { ++ /* ++ * FW code is using the bootloader segment which is already configured on boot. ++ * FW coremem code and data don't use the segment MMU. ++ * Only the FW data segment needs to be configured. ++ */ ++ ++ if (asRGXFWLayoutTable[i].eType == FW_DATA) ++ { ++ IMG_UINT64 ui64SegOutAddr; ++ IMG_UINT32 ui32SegId = RGXFW_SEGMMU_DATA_ID; ++ ++ ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) + ++ asRGXFWLayoutTable[i].ui32AllocOffset; ++ ++ RGXFWConfigureSegID(hPrivate, ++ ui64SegOutAddr, ++ asRGXFWLayoutTable[i].ui32BaseAddr, ++ asRGXFWLayoutTable[i].ui32AllocSize, ++ ui32SegId, ++ ppui32BootConf); /*write the sequence to the bootldr */ ++ ++ break; ++ } ++ } ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXFWConfigureMetaCaches ++ ++ @Description Configure and enable the Meta instruction and data caches ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32NumThreads : Number of FW threads in use ++ @Input ppui32BootConf : Pointer to bootloader data ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++static void RGXFWConfigureMetaCaches(const void *hPrivate, ++ IMG_UINT32 ui32NumThreads, ++ IMG_UINT32 **ppui32BootConf) ++{ ++ IMG_UINT32 *pui32BootConf = *ppui32BootConf; ++ IMG_UINT32 ui32DCacheT0, ui32ICacheT0; ++ IMG_UINT32 ui32DCacheT1, ui32ICacheT1; ++ IMG_UINT32 ui32DCacheT2, ui32ICacheT2; ++ IMG_UINT32 ui32DCacheT3, ui32ICacheT3; ++ ++#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600) ++#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14) ++#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6) ++#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8) ++#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31) ++#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8) ++#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16) ++#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF) ++#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7) ++#define META_CR_MMCU_DCACHE_CTRL (0x04830018) ++#define META_CR_MMCU_ICACHE_CTRL (0x04830020) ++#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1) ++ ++ RGXCommentLog(hPrivate, "********** Meta caches configuration *********"); ++ ++ /* Initialise I/Dcache settings */ ++ ui32DCacheT0 = ui32DCacheT1 = (IMG_UINT32)META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; ++ ui32DCacheT2 = ui32DCacheT3 = (IMG_UINT32)META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; ++ ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0; ++ ++ if (ui32NumThreads == 1) ++ { ++ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; ++ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; ++ } ++ else ++ { ++ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; ++ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE; ++ ++ ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | ++ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; ++ ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE | ++ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF; ++ } ++ ++ /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ ++ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL; ++ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN | ++ META_CR_MMCU_LOCAL_EBCTRL_DCWIN; ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_MMCU_LOCAL_EBCTRL, ++ META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN); ++ ++ /* Data cache partitioning thread 0 to 3 */ ++ *pui32BootConf++ = META_CR_SYSC_DCPART(0); ++ *pui32BootConf++ = ui32DCacheT0; ++ *pui32BootConf++ = META_CR_SYSC_DCPART(1); ++ *pui32BootConf++ = ui32DCacheT1; ++ *pui32BootConf++ = META_CR_SYSC_DCPART(2); ++ *pui32BootConf++ = ui32DCacheT2; ++ *pui32BootConf++ = META_CR_SYSC_DCPART(3); ++ *pui32BootConf++ = ui32DCacheT3; ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_DCPART(0), ui32DCacheT0); ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_DCPART(1), ui32DCacheT1); ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_DCPART(2), ui32DCacheT2); ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_DCPART(3), ui32DCacheT3); ++ ++ /* Enable data cache hits */ ++ *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL; ++ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_MMCU_DCACHE_CTRL, ++ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); ++ ++ /* Instruction cache partitioning thread 0 to 3 */ ++ *pui32BootConf++ = META_CR_SYSC_ICPART(0); ++ *pui32BootConf++ = ui32ICacheT0; ++ *pui32BootConf++ = META_CR_SYSC_ICPART(1); ++ *pui32BootConf++ = ui32ICacheT1; ++ *pui32BootConf++ = META_CR_SYSC_ICPART(2); ++ *pui32BootConf++ = ui32ICacheT2; ++ *pui32BootConf++ = META_CR_SYSC_ICPART(3); ++ *pui32BootConf++ = ui32ICacheT3; ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_ICPART(0), ui32ICacheT0); ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_ICPART(1), ui32ICacheT1); ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_ICPART(2), ui32ICacheT2); ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_SYSC_ICPART(3), ui32ICacheT3); ++ ++ /* Enable instruction cache hits */ ++ *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL; ++ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN; ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ META_CR_MMCU_ICACHE_CTRL, ++ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); ++ ++ *pui32BootConf++ = 0x040000C0; ++ *pui32BootConf++ = 0; ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0); ++ ++ *ppui32BootConf = pui32BootConf; ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function ProcessLDRCommandStream ++ ++ @Description Process the output of the Meta toolchain in the .LDR format ++ copying code and data sections into their final location and ++ passing some information to the Meta bootloader ++ ++ @Input hPrivate : Implementation specific data ++ @Input pbLDR : Pointer to FW blob ++ @Input pvHostFWCodeAddr : Pointer to FW code ++ @Input pvHostFWDataAddr : Pointer to FW data ++ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code ++ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data ++ @Input ppui32BootConf : Pointer to bootloader data ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, ++ const IMG_BYTE* pbLDR, ++ void* pvHostFWCodeAddr, ++ void* pvHostFWDataAddr, ++ void* pvHostFWCorememCodeAddr, ++ void* pvHostFWCorememDataAddr, ++ IMG_UINT32 **ppui32BootConf) ++{ ++ RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR; ++ RGX_META_LDR_L1_DATA_BLK *psL1Data = ++ (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData); ++ ++ IMG_UINT32 *pui32BootConf = ppui32BootConf ? *ppui32BootConf : NULL; ++ IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate); ++ ++ RGXCommentLog(hPrivate, "**********************************************"); ++ RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************"); ++ RGXCommentLog(hPrivate, "**********************************************"); ++ ++ while (psL1Data != NULL) ++ { ++ if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd)) ++ { ++ /* Don't process comment blocks */ ++ goto NextBlock; ++ } ++ ++ switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK) ++ { ++ case RGX_META_LDR_CMD_LOADMEM: ++ { ++ RGX_META_LDR_L2_DATA_BLK *psL2Block = ++ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]); ++ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; ++ IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; ++ void *pvWriteAddr; ++ PVRSRV_ERROR eError; ++ ++ if (!RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize) && ++ !RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) ++ { ++ /* Global range is aliased to local range */ ++ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; ++ } ++ ++ eError = FindMMUSegment(ui32Offset, ++ pvHostFWCodeAddr, ++ pvHostFWDataAddr, ++ pvHostFWCorememCodeAddr, ++ pvHostFWCorememDataAddr, ++ &pvWriteAddr); ++ ++ if (eError != PVRSRV_OK) ++ { ++ RGXErrorLog(hPrivate, ++ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", ++ ui32Offset, ui32DataSize); ++ return eError; ++ } ++ ++ /* Write to FW allocation only if available */ ++ if (pvWriteAddr) ++ { ++ RGXMemCopy(hPrivate, ++ pvWriteAddr, ++ psL2Block->aui32BlockData, ++ ui32DataSize); ++ } ++ ++ break; ++ } ++ case RGX_META_LDR_CMD_LOADCORE: ++ case RGX_META_LDR_CMD_LOADMMREG: ++ { ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ case RGX_META_LDR_CMD_START_THREADS: ++ { ++ /* Don't process this block */ ++ break; ++ } ++ case RGX_META_LDR_CMD_ZEROMEM: ++ { ++ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0]; ++ IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1]; ++ void *pvWriteAddr; ++ PVRSRV_ERROR eError; ++ ++ if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize)) ++ { ++ /* cannot zero coremem directly */ ++ break; ++ } ++ ++ /* Global range is aliased to local range */ ++ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT; ++ ++ eError = FindMMUSegment(ui32Offset, ++ pvHostFWCodeAddr, ++ pvHostFWDataAddr, ++ pvHostFWCorememCodeAddr, ++ pvHostFWCorememDataAddr, ++ &pvWriteAddr); ++ ++ if (eError != PVRSRV_OK) ++ { ++ RGXErrorLog(hPrivate, ++ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment", ++ ui32Offset, ui32ByteCount); ++ return eError; ++ } ++ ++ /* Write to FW allocation only if available */ ++ if (pvWriteAddr) ++ { ++ RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount); ++ } ++ ++ break; ++ } ++ case RGX_META_LDR_CMD_CONFIG: ++ { ++ RGX_META_LDR_L2_DATA_BLK *psL2Block = ++ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]); ++ RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData; ++ IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */; ++ IMG_UINT32 ui32CurrBlockSize = 0; ++ ++ while (ui32L2BlockSize) ++ { ++ switch (psConfigCommand->ui32Type) ++ { ++ case RGX_META_LDR_CFG_PAUSE: ++ case RGX_META_LDR_CFG_READ: ++ { ++ ui32CurrBlockSize = 8; ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ case RGX_META_LDR_CFG_WRITE: ++ { ++ IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0]; ++ IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1]; ++ ++ /* Only write to bootloader if we got a valid ++ * pointer to the FW code allocation ++ */ ++ if (pui32BootConf) ++ { ++ /* Do register write */ ++ *pui32BootConf++ = ui32RegisterOffset; ++ *pui32BootConf++ = ui32RegisterValue; ++ } ++ ++ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", ++ ui32RegisterOffset, ui32RegisterValue); ++ ++ ui32CurrBlockSize = 12; ++ break; ++ } ++ case RGX_META_LDR_CFG_MEMSET: ++ case RGX_META_LDR_CFG_MEMCHECK: ++ { ++ ui32CurrBlockSize = 20; ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ default: ++ { ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ } ++ ui32L2BlockSize -= ui32CurrBlockSize; ++ psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize); ++ } ++ ++ break; ++ } ++ default: ++ { ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ } ++ ++NextBlock: ++ ++ if (psL1Data->ui32Next == 0xFFFFFFFF) ++ { ++ psL1Data = NULL; ++ } ++ else ++ { ++ psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next); ++ } ++ } ++ ++ if (pui32BootConf) ++ { ++ *ppui32BootConf = pui32BootConf; ++ } ++ ++ RGXCommentLog(hPrivate, "**********************************************"); ++ RGXCommentLog(hPrivate, "************** End Loader Parsing ************"); ++ RGXCommentLog(hPrivate, "**********************************************"); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function ProcessELFCommandStream ++ ++ @Description Process a file in .ELF format copying code and data sections ++ into their final location ++ ++ @Input hPrivate : Implementation specific data ++ @Input pbELF : Pointer to FW blob ++ @Input pvHostFWCodeAddr : Pointer to FW code ++ @Input pvHostFWDataAddr : Pointer to FW data ++ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code ++ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, ++ const IMG_BYTE *pbELF, ++ void *pvHostFWCodeAddr, ++ void *pvHostFWDataAddr, ++ void* pvHostFWCorememCodeAddr, ++ void* pvHostFWCorememDataAddr) ++{ ++ IMG_UINT32 ui32Entry; ++ IMG_ELF_HDR *psHeader = (IMG_ELF_HDR *)pbELF; ++ IMG_ELF_PROGRAM_HDR *psProgramHeader = ++ (IMG_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff); ++ PVRSRV_ERROR eError; ++ ++ for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++) ++ { ++ void *pvWriteAddr; ++ ++ /* Only consider loadable entries in the ELF segment table */ ++ if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue; ++ ++ eError = FindMMUSegment(psProgramHeader->ui32Pvaddr, ++ pvHostFWCodeAddr, ++ pvHostFWDataAddr, ++ pvHostFWCorememCodeAddr, ++ pvHostFWCorememDataAddr, ++ &pvWriteAddr); ++ ++ if (eError != PVRSRV_OK) ++ { ++ RGXErrorLog(hPrivate, ++ "%s: Addr 0x%x (size: %d) not found in any segment",__func__, ++ psProgramHeader->ui32Pvaddr, ++ psProgramHeader->ui32Pfilesz); ++ return eError; ++ } ++ ++ /* Write to FW allocation only if available */ ++ if (pvWriteAddr) ++ { ++ RGXMemCopy(hPrivate, ++ pvWriteAddr, ++ (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset), ++ psProgramHeader->ui32Pfilesz); ++ ++ RGXMemSet(hPrivate, ++ (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz, ++ 0, ++ psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz); ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, RGX_FW_SECTION_ID eId) ++{ ++ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); ++ ++ return psEntry->ui32AllocOffset; ++} ++ ++IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, RGX_FW_SECTION_ID eId) ++{ ++ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); ++ ++ return psEntry->ui32MaxSize; ++} ++ ++IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, RGX_FW_SECTION_ID eId) ++{ ++ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); ++ ++ return psEntry->ui32AllocSize; ++} ++ ++IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, RGX_FW_SECTION_ID eId) ++{ ++ RGX_FW_LAYOUT_ENTRY *psEntry = GetTableEntry(hPrivate, eId); ++ ++ return psEntry->ui32BaseAddr; ++} ++ ++PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, ++ const IMG_BYTE *pbRGXFirmware, ++ const IMG_UINT32 ui32RGXFirmwareSize, ++ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, ++ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, ++ IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, ++ IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize) ++{ ++ RGX_FW_INFO_HEADER *psInfoHeader; ++ const IMG_BYTE *pbRGXFirmwareInfo; ++ const IMG_BYTE *pbRGXFirmwareLayout; ++ IMG_UINT32 i; ++ ++ if (pbRGXFirmware == NULL || ui32RGXFirmwareSize == 0 || ui32RGXFirmwareSize <= FW_BLOCK_SIZE) ++ { ++ RGXErrorLog(hPrivate, "%s: Invalid FW binary at %p, size %u", ++ __func__, pbRGXFirmware, ui32RGXFirmwareSize); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ ++ /* ++ * Acquire pointer to the FW info header within the FW image. ++ * The format of the header in the FW image might not be the one expected ++ * by the driver, but the driver should still be able to correctly read ++ * the information below, as long as new/incompatible elements are added ++ * at the end of the header (they will be ignored by the driver). ++ */ ++ ++ pbRGXFirmwareInfo = pbRGXFirmware + ui32RGXFirmwareSize - FW_BLOCK_SIZE; ++ psInfoHeader = (RGX_FW_INFO_HEADER*)pbRGXFirmwareInfo; ++ ++ /* If any of the following checks fails, the FW will likely not work properly */ ++ ++ if (psInfoHeader->ui32InfoVersion != FW_INFO_VERSION) ++ { ++ RGXErrorLog(hPrivate, "%s: FW info version mismatch (expected: %u, found: %u)", ++ __func__, ++ (IMG_UINT32) FW_INFO_VERSION, ++ psInfoHeader->ui32InfoVersion); ++ } ++ ++ if (psInfoHeader->ui32HeaderLen != sizeof(RGX_FW_INFO_HEADER)) ++ { ++ RGXErrorLog(hPrivate, "%s: FW info header sizes mismatch (expected: %u, found: %u)", ++ __func__, ++ (IMG_UINT32) sizeof(RGX_FW_INFO_HEADER), ++ psInfoHeader->ui32HeaderLen); ++ } ++ ++ if (psInfoHeader->ui32LayoutEntrySize != sizeof(RGX_FW_LAYOUT_ENTRY)) ++ { ++ RGXErrorLog(hPrivate, "%s: FW layout entry sizes mismatch (expected: %u, found: %u)", ++ __func__, ++ (IMG_UINT32) sizeof(RGX_FW_LAYOUT_ENTRY), ++ psInfoHeader->ui32LayoutEntrySize); ++ } ++ ++ if (psInfoHeader->ui32LayoutEntryNum > MAX_NUM_ENTRIES) ++ { ++ RGXErrorLog(hPrivate, "%s: Not enough storage for the FW layout table (max: %u entries, found: %u)", ++ __func__, ++ MAX_NUM_ENTRIES, ++ psInfoHeader->ui32LayoutEntryNum); ++ } ++ ++#if defined(RGX_FEATURE_MIPS_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) ++ { ++ if (psInfoHeader->ui32FwPageSize != RGXGetOSPageSize(hPrivate)) ++ { ++ RGXErrorLog(hPrivate, "%s: FW page size mismatch (expected: %u, found: %u)", ++ __func__, ++ (IMG_UINT32) RGXGetOSPageSize(hPrivate), ++ psInfoHeader->ui32FwPageSize); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++#endif ++ ++ ui32LayoutEntryNum = psInfoHeader->ui32LayoutEntryNum; ++ ++ ++ /* ++ * Copy FW layout table from FW image to local array. ++ * One entry is copied at a time and the copy is limited to what the driver ++ * expects to find in it. Assuming that new/incompatible elements ++ * are added at the end of each entry, the loop below adapts the table ++ * in the FW image into the format expected by the driver. ++ */ ++ ++ pbRGXFirmwareLayout = pbRGXFirmwareInfo + psInfoHeader->ui32HeaderLen; ++ ++ for (i = 0; i < ui32LayoutEntryNum; i++) ++ { ++ RGX_FW_LAYOUT_ENTRY *psOutEntry = &asRGXFWLayoutTable[i]; ++ ++ RGX_FW_LAYOUT_ENTRY *psInEntry = (RGX_FW_LAYOUT_ENTRY*) ++ (pbRGXFirmwareLayout + i * psInfoHeader->ui32LayoutEntrySize); ++ ++ RGXMemCopy(hPrivate, ++ (void*)psOutEntry, ++ (void*)psInEntry, ++ sizeof(RGX_FW_LAYOUT_ENTRY)); ++ } ++ ++ ++ /* Calculate how much memory the FW needs for its code and data segments */ ++ ++ *puiFWCodeAllocSize = 0; ++ *puiFWDataAllocSize = 0; ++ *puiFWCorememCodeAllocSize = 0; ++ *puiFWCorememDataAllocSize = 0; ++ ++ for (i = 0; i < ui32LayoutEntryNum; i++) ++ { ++ switch (asRGXFWLayoutTable[i].eType) ++ { ++ case FW_CODE: ++ *puiFWCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; ++ break; ++ ++ case FW_DATA: ++ *puiFWDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; ++ break; ++ ++ case FW_COREMEM_CODE: ++ *puiFWCorememCodeAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; ++ break; ++ ++ case FW_COREMEM_DATA: ++ *puiFWCorememDataAllocSize += asRGXFWLayoutTable[i].ui32AllocSize; ++ break; ++ ++ default: ++ RGXErrorLog(hPrivate, "%s: Unknown FW section type %u\n", ++ __func__, asRGXFWLayoutTable[i].eType); ++ break; ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, ++ const IMG_BYTE *pbRGXFirmware, ++ void *pvFWCode, ++ void *pvFWData, ++ void *pvFWCorememCode, ++ void *pvFWCorememData, ++ PVRSRV_FW_BOOT_PARAMS *puFWParams) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_BOOL bMIPS = IMG_FALSE; ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ IMG_BOOL bRISCV = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); ++#endif ++ IMG_BOOL bMETA; ++ ++#if defined(RGX_FEATURE_MIPS_BIT_MASK) ++ bMIPS = (IMG_BOOL)RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ bMETA = (IMG_BOOL)(!bMIPS && !bRISCV); ++#else ++ bMETA = !bMIPS; ++#endif ++ ++ if (bMETA) ++ { ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ IMG_UINT32 *pui32BootConf = NULL; ++ /* Skip bootloader configuration if a pointer to the FW code ++ * allocation is not available ++ */ ++ if (pvFWCode) ++ { ++ /* This variable points to the bootloader code which is mostly ++ * a sequence of pairs ++ */ ++ pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET; ++ ++ /* Slave port and JTAG accesses are privileged */ ++ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD; ++ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN; ++ ++ RGXFWConfigureSegMMU(hPrivate, ++ &puFWParams->sMeta.sFWCodeDevVAddr, ++ &puFWParams->sMeta.sFWDataDevVAddr, ++ &pui32BootConf); ++ } ++ ++ /* Process FW image data stream */ ++ eError = ProcessLDRCommandStream(hPrivate, ++ pbRGXFirmware, ++ pvFWCode, ++ pvFWData, ++ pvFWCorememCode, ++ pvFWCorememData, ++ &pui32BootConf); ++ if (eError != PVRSRV_OK) ++ { ++ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); ++ return eError; ++ } ++ ++ /* Skip bootloader configuration if a pointer to the FW code ++ * allocation is not available ++ */ ++ if (pvFWCode) ++ { ++ IMG_UINT32 ui32NumThreads = puFWParams->sMeta.ui32NumThreads; ++ ++ if ((ui32NumThreads == 0) || (ui32NumThreads > 2)) ++ { ++ RGXErrorLog(hPrivate, ++ "ProcessFWImage: Wrong Meta threads configuration, using one thread only"); ++ ++ ui32NumThreads = 1; ++ } ++ ++ RGXFWConfigureMetaCaches(hPrivate, ++ ui32NumThreads, ++ &pui32BootConf); ++ ++ /* Signal the end of the conf sequence */ ++ *pui32BootConf++ = 0x0; ++ *pui32BootConf++ = 0x0; ++ ++ if (puFWParams->sMeta.uiFWCorememCodeSize && (puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr != 0)) ++ { ++ *pui32BootConf++ = puFWParams->sMeta.sFWCorememCodeFWAddr.ui32Addr; ++ *pui32BootConf++ = puFWParams->sMeta.uiFWCorememCodeSize; ++ } ++ else ++ { ++ *pui32BootConf++ = 0; ++ *pui32BootConf++ = 0; ++ } ++ ++#if defined(RGX_FEATURE_META_DMA_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA)) ++ { ++ *pui32BootConf++ = (IMG_UINT32) (puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr >> 32); ++ *pui32BootConf++ = (IMG_UINT32) puFWParams->sMeta.sFWCorememCodeDevVAddr.uiAddr; ++ } ++ else ++#endif ++ { ++ *pui32BootConf++ = 0; ++ *pui32BootConf++ = 0; ++ } ++ } ++#endif /* defined(RGX_FEATURE_META_MAX_VALUE_IDX) */ ++ } ++#if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) ++ else if (bMIPS) ++ { ++ /* Process FW image data stream */ ++ eError = ProcessELFCommandStream(hPrivate, ++ pbRGXFirmware, ++ pvFWCode, ++ pvFWData, ++ NULL, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); ++ return eError; ++ } ++ ++ if (pvFWData) ++ { ++ RGXMIPSFW_BOOT_DATA *psBootData = (RGXMIPSFW_BOOT_DATA*) ++ /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */ ++ IMG_OFFSET_ADDR(pvFWData, ++ /* ... jump to the boot/NMI data page... */ ++ (RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA) ++ /* ... and then jump to the bootloader data offset within the page */ ++ + RGXMIPSFW_BOOTLDR_CONF_OFFSET)); ++ ++ /* Rogue Registers physical address */ ++ psBootData->ui64RegBase = puFWParams->sMips.sGPURegAddr.uiAddr; ++ ++ /* MIPS Page Table physical address */ ++ psBootData->ui32PTLog2PageSize = puFWParams->sMips.ui32FWPageTableLog2PageSize; ++ psBootData->ui32PTNumPages = puFWParams->sMips.ui32FWPageTableNumPages; ++ psBootData->aui64PTPhyAddr[0U] = puFWParams->sMips.asFWPageTableAddr[0U].uiAddr; ++ psBootData->aui64PTPhyAddr[1U] = puFWParams->sMips.asFWPageTableAddr[1U].uiAddr; ++ psBootData->aui64PTPhyAddr[2U] = puFWParams->sMips.asFWPageTableAddr[2U].uiAddr; ++ psBootData->aui64PTPhyAddr[3U] = puFWParams->sMips.asFWPageTableAddr[3U].uiAddr; ++ ++ /* MIPS Stack Pointer Physical Address */ ++ psBootData->ui64StackPhyAddr = puFWParams->sMips.sFWStackAddr.uiAddr; ++ ++ /* Reserved for future use */ ++ psBootData->ui32Reserved1 = 0; ++ psBootData->ui32Reserved2 = 0; ++ } ++ } ++#endif /* #if defined(RGXMIPSFW_MAX_NUM_PAGETABLE_PAGES) */ ++ else ++ { ++ /* Process FW image data stream */ ++ eError = ProcessELFCommandStream(hPrivate, ++ pbRGXFirmware, ++ pvFWCode, ++ pvFWData, ++ pvFWCorememCode, ++ pvFWCorememData); ++ if (eError != PVRSRV_OK) ++ { ++ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError); ++ return eError; ++ } ++ ++ if (pvFWData) ++ { ++ RGXRISCVFW_BOOT_DATA *psBootData = (RGXRISCVFW_BOOT_DATA*) ++ IMG_OFFSET_ADDR(pvFWData, RGXRISCVFW_BOOTLDR_CONF_OFFSET); ++ ++ psBootData->ui64CorememCodeDevVAddr = puFWParams->sRISCV.sFWCorememCodeDevVAddr.uiAddr; ++ psBootData->ui32CorememCodeFWAddr = puFWParams->sRISCV.sFWCorememCodeFWAddr.ui32Addr; ++ psBootData->ui32CorememCodeSize = puFWParams->sRISCV.uiFWCorememCodeSize; ++ ++ psBootData->ui64CorememDataDevVAddr = puFWParams->sRISCV.sFWCorememDataDevVAddr.uiAddr; ++ psBootData->ui32CorememDataFWAddr = puFWParams->sRISCV.sFWCorememDataFWAddr.ui32Addr; ++ psBootData->ui32CorememDataSize = puFWParams->sRISCV.uiFWCorememDataSize; ++ } ++ } ++ ++ return eError; ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxfwimageutils.h b/drivers/gpu/drm/img-rogue/rgxfwimageutils.h +new file mode 100644 +index 000000000000..e5f9a2afea77 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwimageutils.h +@@ -0,0 +1,223 @@ ++/*************************************************************************/ /*! ++@File ++@Title Header for Services Firmware image utilities used at init time ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for Services Firmware image utilities used at init time ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXFWIMAGEUTILS_H ++#define RGXFWIMAGEUTILS_H ++ ++/* The routines declared here are built on top of an abstraction layer to ++ * hide DDK/OS-specific details in case they are used outside of the DDK ++ * (e.g. when DRM security is enabled). ++ * Any new dependency should be added to rgxlayer.h. ++ * Any new code should be built on top of the existing abstraction layer, ++ * which should be extended when necessary. ++ */ ++#include "rgxlayer.h" ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetFWImageSectionOffset ++ ++ @Input hPrivate : Implementation specific data ++ @Input eId : Section id ++ ++ @Description Return offset of a Firmware section, relative to the beginning ++ of the code or data allocation (depending on the section id) ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetFWImageSectionOffset(const void *hPrivate, ++ RGX_FW_SECTION_ID eId); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetFWImageSectionMaxSize ++ ++ @Input hPrivate : Implementation specific data ++ @Input eId : Section id ++ ++ @Description Return maximum size (not allocation size) of a Firmware section ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetFWImageSectionMaxSize(const void *hPrivate, ++ RGX_FW_SECTION_ID eId); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetFWImageSectionAllocSize ++ ++ @Input hPrivate : Implementation specific data ++ @Input eId : Section id ++ ++ @Description Return allocation size of a Firmware section ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetFWImageSectionAllocSize(const void *hPrivate, ++ RGX_FW_SECTION_ID eId); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetFWImageSectionAddress ++ ++ @Input hPrivate : Implementation specific data ++ @Input eId : Section id ++ ++ @Description Return base address of a Firmware section ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetFWImageSectionAddress(const void *hPrivate, ++ RGX_FW_SECTION_ID eId); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetFWImageAllocSize ++ ++ @Description Return size of Firmware code/data/coremem code allocations ++ ++ @Input hPrivate : Implementation specific data ++ @Input pbRGXFirmware : Pointer to FW binary ++ @Input ui32RGXFirmwareSize : FW binary size ++ @Output puiFWCodeAllocSize : Code size ++ @Output puiFWDataAllocSize : Data size ++ @Output puiFWCorememCodeAllocSize : Coremem code size (0 if N/A) ++ @Output puiFWCorememDataAllocSize : Coremem data size (0 if N/A) ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate, ++ const IMG_BYTE *pbRGXFirmware, ++ const IMG_UINT32 ui32RGXFirmwareSize, ++ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize, ++ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize, ++ IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize, ++ IMG_DEVMEM_SIZE_T *puiFWCorememDataAllocSize); ++ ++/*! ++******************************************************************************* ++ ++ @Function ProcessLDRCommandStream ++ ++ @Description Process the output of the Meta toolchain in the .LDR format ++ copying code and data sections into their final location and ++ passing some information to the Meta bootloader ++ ++ @Input hPrivate : Implementation specific data ++ @Input pbLDR : Pointer to FW blob ++ @Input pvHostFWCodeAddr : Pointer to FW code ++ @Input pvHostFWDataAddr : Pointer to FW data ++ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code ++ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data ++ @Input ppui32BootConf : Pointer to bootloader data ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate, ++ const IMG_BYTE* pbLDR, ++ void* pvHostFWCodeAddr, ++ void* pvHostFWDataAddr, ++ void* pvHostFWCorememCodeAddr, ++ void* pvHostFWCorememDataAddr, ++ IMG_UINT32 **ppui32BootConf); ++ ++/*! ++******************************************************************************* ++ ++ @Function ProcessELFCommandStream ++ ++ @Description Process a file in .ELF format copying code and data sections ++ into their final location ++ ++ @Input hPrivate : Implementation specific data ++ @Input pbELF : Pointer to FW blob ++ @Input pvHostFWCodeAddr : Pointer to FW code ++ @Input pvHostFWDataAddr : Pointer to FW data ++ @Input pvHostFWCorememCodeAddr : Pointer to FW coremem code ++ @Input pvHostFWCorememDataAddr : Pointer to FW coremem data ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate, ++ const IMG_BYTE *pbELF, ++ void *pvHostFWCodeAddr, ++ void *pvHostFWDataAddr, ++ void* pvHostFWCorememCodeAddr, ++ void* pvHostFWCorememDataAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXProcessFWImage ++ ++ @Description Process the Firmware binary blob copying code and data ++ sections into their final location and passing some ++ information to the Firmware bootloader. ++ If a pointer to the final memory location for FW code or data ++ is not valid (NULL) then the relative section will not be ++ processed. ++ ++ @Input hPrivate : Implementation specific data ++ @Input pbRGXFirmware : Pointer to FW blob ++ @Input pvFWCode : Pointer to FW code ++ @Input pvFWData : Pointer to FW data ++ @Input pvFWCorememCode : Pointer to FW coremem code ++ @Input pvFWCorememData : Pointer to FW coremem data ++ @Input puFWParams : Parameters used by the FW at boot time ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate, ++ const IMG_BYTE *pbRGXFirmware, ++ void *pvFWCode, ++ void *pvFWData, ++ void *pvFWCorememCode, ++ void *pvFWCorememData, ++ PVRSRV_FW_BOOT_PARAMS *puFWParams); ++ ++#endif /* RGXFWIMAGEUTILS_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c b/drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c +new file mode 100644 +index 000000000000..d95050871994 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwtrace_strings.c +@@ -0,0 +1,56 @@ ++/*************************************************************************/ /*! ++@File rgxfwtrace_strings.c ++@Title RGX Firmware trace strings ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "rgx_fwif_sf.h" ++#include "fwtrace_string.h" ++ ++/* The tuple pairs that will be generated using XMacros will be stored here. ++ * This macro definition must match the definition of SFids in rgx_fwif_sf.h ++ */ ++const RGXKM_STID_FMT SFs[]= { ++#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e), d }, ++ RGXFW_LOG_SFIDLIST ++#undef X ++}; ++ ++const IMG_UINT32 g_ui32SFsCount = ARRAY_SIZE(SFs); +diff --git a/drivers/gpu/drm/img-rogue/rgxfwutils.c b/drivers/gpu/drm/img-rogue/rgxfwutils.c +new file mode 100644 +index 000000000000..c26bb50fd5d1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwutils.c +@@ -0,0 +1,7825 @@ ++/*************************************************************************/ /*! ++@File ++@Title Rogue firmware utility routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Rogue firmware utility routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "img_defs.h" ++ ++#include "rgxdefs_km.h" ++#include "rgx_fwif_km.h" ++#include "pdump_km.h" ++#include "osfunc.h" ++#include "oskm_apphint.h" ++#include "cache_km.h" ++#include "allocmem.h" ++#include "physheap.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "devicemem_server.h" ++ ++#include "pvr_debug.h" ++#include "pvr_notifier.h" ++#include "rgxfwutils.h" ++#include "rgx_options.h" ++#include "rgx_fwif_alignchecks.h" ++#include "rgx_fwif_resetframework.h" ++#include "rgx_pdump_panics.h" ++#include "fwtrace_string.h" ++#include "rgxheapconfig.h" ++#include "pvrsrv.h" ++#include "rgxdebug.h" ++#include "rgxhwperf.h" ++#include "rgxccb.h" ++#include "rgxcompute.h" ++#include "rgxtransfer.h" ++#include "rgxpower.h" ++#include "rgxtdmtransfer.h" ++#if defined(SUPPORT_DISPLAY_CLASS) ++#include "dc_server.h" ++#endif ++#include "rgxmem.h" ++#include "rgxmmudefs_km.h" ++#include "rgxmipsmmuinit.h" ++#include "rgxta3d.h" ++#include "rgxkicksync.h" ++#include "rgxutils.h" ++#include "rgxtimecorr.h" ++#include "sync_internal.h" ++#include "sync.h" ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_external.h" ++#include "tlstream.h" ++#include "devicemem_server_utils.h" ++#include "htbuffer.h" ++#include "rgx_bvnc_defs_km.h" ++#include "info_page.h" ++ ++#include "physmem_lma.h" ++#include "physmem_osmem.h" ++ ++#ifdef __linux__ ++#include /* sprintf */ ++#include "rogue_trace_events.h" ++#else ++#include ++#include ++#endif ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#include "rgxworkest.h" ++#endif ++ ++#if defined(SUPPORT_PDVFS) ++#include "rgxpdvfs.h" ++#endif ++ ++#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) ++#include "rgxsoctimer.h" ++#endif ++ ++#include "vz_vmm_pvz.h" ++#include "rgx_heaps.h" ++ ++/*! ++ ****************************************************************************** ++ * HWPERF ++ *****************************************************************************/ ++/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the ++ * Firmware and host driver. */ ++#define RGXFW_HWPERF_L1_SIZE_MIN (16U) ++#define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB ++#define RGXFW_HWPERF_L1_SIZE_MAX (12288U) ++ ++/* Firmware CCB length */ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (10) ++#elif defined(SUPPORT_PDVFS) ++#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8) ++#else ++#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5) ++#endif ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++const IMG_UINT32 gaui32FwOsIrqCntRegAddr[RGXFW_MAX_NUM_OS] = {IRQ_COUNTER_STORAGE_REGS}; ++#endif ++ ++/* ++ * Maximum length of time a DM can run for before the DM will be marked ++ * as out-of-time. CDM has an increased value due to longer running kernels. ++ * ++ * These deadlines are increased on FPGA, EMU and VP due to the slower ++ * execution time of these platforms. PDUMPS are also included since they ++ * are often run on EMU, FPGA or in CSim. ++ */ ++#if defined(FPGA) || defined(EMULATOR) || defined(VIRTUAL_PLATFORM) || defined(PDUMP) ++#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (480000) ++#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (1000000) ++#else ++#define RGXFWIF_MAX_WORKLOAD_DEADLINE_MS (40000) ++#define RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS (90000) ++#endif ++ ++/* Workload Estimation Firmware CCB length */ ++#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7) ++ ++/* Size of memory buffer for firmware gcov data ++ * The actual data size is several hundred kilobytes. The buffer is an order of magnitude larger. */ ++#define RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE (4*1024*1024) ++ ++typedef struct ++{ ++ RGXFWIF_KCCB_CMD sKCCBcmd; ++ DLLIST_NODE sListNode; ++ PDUMP_FLAGS_T uiPDumpFlags; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++} RGX_DEFERRED_KCCB_CMD; ++ ++#if defined(PDUMP) ++/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the ++ * PID filter example entries ++ */ ++static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32), ++ "FW PID filtering assumes the IMG_PID type is 32-bits wide as it " ++ "generates WRW commands for loading the PID values"); ++#endif ++ ++static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo); ++static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_SYSINIT* psFwSysInit) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc; ++ IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE( ++ RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); ++ ++ PVR_DPF_ENTERED; ++ ++ eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap, ++ 1, ++ ui32CacheLineSize, ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwSLC3FenceWA", ++ ppsSLC3FenceMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ /* We need to map it so the heap for this allocation is set */ ++ eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc, ++ psDevInfo->psFirmwareMainHeap, ++ &psFwSysInit->sSLC3FenceDevVAddr); ++ if (eError != PVRSRV_OK) ++ { ++ DevmemFree(*ppsSLC3FenceMemDesc); ++ *ppsSLC3FenceMemDesc = NULL; ++ } ++ ++ PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc); ++} ++ ++static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo) ++{ ++ DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc; ++ ++ if (psSLC3FenceMemDesc) ++ { ++ DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc); ++ DevmemFree(psSLC3FenceMemDesc); ++ } ++} ++#endif ++ ++static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value) ++{ ++ /* ensure memory is flushed before kicking MTS */ ++ OSWriteMemoryBarrier(NULL); ++ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value); ++ ++ /* ensure the MTS kick goes through before continuing */ ++#if !defined(NO_HARDWARE) && !defined(INTEGRITY_OS) ++ OSWriteMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + RGX_CR_MTS_SCHEDULE); ++#else ++ OSWriteMemoryBarrier(NULL); ++#endif ++} ++ ++/*************************************************************************/ /*! ++@Function RGXSetupFwAllocation ++ ++@Description Sets a pointer in a firmware data structure. ++ ++@Input psDevInfo Device Info struct ++@Input uiAllocFlags Flags determining type of memory allocation ++@Input ui32Size Size of memory allocation ++@Input pszName Allocation label ++@Input ppsMemDesc pointer to the allocation's memory descriptor ++@Input psFwPtr Address of the firmware pointer to set ++@Input ppvCpuPtr Address of the cpu pointer to set ++@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO* psDevInfo, ++ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszName, ++ DEVMEM_MEMDESC **ppsMemDesc, ++ RGXFWIF_DEV_VIRTADDR *psFwPtr, ++ void **ppvCpuPtr, ++ IMG_UINT32 ui32DevVAFlags) ++{ ++ PVRSRV_ERROR eError; ++#if defined(SUPPORT_AUTOVZ) ++ IMG_BOOL bClearByMemset; ++ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiAllocFlags)) ++ { ++ /* Under AutoVz the ZERO_ON_ALLOC flag is avoided as it causes the memory to ++ * be allocated from a different PMR than an allocation without the flag. ++ * When the content of an allocation needs to be recovered from physical memory ++ * on a later driver reboot, the memory then cannot be zeroed but the allocation ++ * addresses must still match. ++ * If the memory requires clearing, perform a memset after the allocation. */ ++ uiAllocFlags &= ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; ++ bClearByMemset = IMG_TRUE; ++ } ++ else ++ { ++ bClearByMemset = IMG_FALSE; ++ } ++#endif ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate %s", pszName); ++ eError = DevmemFwAllocate(psDevInfo, ++ ui32Size, ++ uiAllocFlags, ++ pszName, ++ ppsMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate %u bytes for %s (%u)", ++ __func__, ++ ui32Size, ++ pszName, ++ eError)); ++ goto fail_alloc; ++ } ++ ++ if (psFwPtr) ++ { ++ eError = RGXSetFirmwareAddress(psFwPtr, *ppsMemDesc, 0, ui32DevVAFlags); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire firmware virtual address for %s (%u)", ++ __func__, ++ pszName, ++ eError)); ++ goto fail_fwaddr; ++ } ++ } ++ ++#if defined(SUPPORT_AUTOVZ) ++ if ((bClearByMemset) || (ppvCpuPtr)) ++#else ++ if (ppvCpuPtr) ++#endif ++ { ++ void *pvTempCpuPtr; ++ ++ eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, &pvTempCpuPtr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire CPU virtual address for %s (%u)", ++ __func__, ++ pszName, ++ eError)); ++ goto fail_cpuva; ++ } ++ ++#if defined(SUPPORT_AUTOVZ) ++ if (bClearByMemset) ++ { ++ if (PVRSRV_CHECK_CPU_WRITE_COMBINE(uiAllocFlags)) ++ { ++ OSCachedMemSetWMB(pvTempCpuPtr, 0, ui32Size); ++ } ++ else ++ { ++ OSDeviceMemSet(pvTempCpuPtr, 0, ui32Size); ++ } ++ } ++ if (ppvCpuPtr) ++#endif ++ { ++ *ppvCpuPtr = pvTempCpuPtr; ++ } ++#if defined(SUPPORT_AUTOVZ) ++ else ++ { ++ DevmemReleaseCpuVirtAddr(*ppsMemDesc); ++ pvTempCpuPtr = NULL; ++ } ++#endif ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s set up at Fw VA 0x%x and CPU VA 0x%p with alloc flags 0x%" IMG_UINT64_FMTSPECX, ++ __func__, pszName, ++ (psFwPtr) ? (psFwPtr->ui32Addr) : (0), ++ (ppvCpuPtr) ? (*ppvCpuPtr) : (NULL), ++ uiAllocFlags)); ++ ++ return eError; ++ ++fail_cpuva: ++ if (psFwPtr) ++ { ++ RGXUnsetFirmwareAddress(*ppsMemDesc); ++ } ++fail_fwaddr: ++ DevmemFree(*ppsMemDesc); ++fail_alloc: ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function GetHwPerfBufferSize ++ ++@Description Computes the effective size of the HW Perf Buffer ++@Input ui32HWPerfFWBufSizeKB Device Info struct ++@Return HwPerfBufferSize ++*/ /**************************************************************************/ ++static IMG_UINT32 GetHwPerfBufferSize(IMG_UINT32 ui32HWPerfFWBufSizeKB) ++{ ++ IMG_UINT32 HwPerfBufferSize; ++ ++ /* HWPerf: Determine the size of the FW buffer */ ++ if (ui32HWPerfFWBufSizeKB == 0 || ++ ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT) ++ { ++ /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero, ++ * use default size from driver constant. Set it to the default ++ * size, no logging. ++ */ ++ HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10; ++ } ++ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX)) ++ { ++ /* Size specified as a AppHint but it is too big */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)", ++ __func__, ++ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX)); ++ HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MAX<<10; ++ } ++ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN)) ++ { ++ /* Size specified as in AppHint HWPerfFWBufSizeInKB */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Using HWPerf FW buffer size of %u KB", ++ __func__, ++ ui32HWPerfFWBufSizeKB)); ++ HwPerfBufferSize = ui32HWPerfFWBufSizeKB<<10; ++ } ++ else ++ { ++ /* Size specified as a AppHint but it is too small */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)", ++ __func__, ++ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN)); ++ HwPerfBufferSize = RGXFW_HWPERF_L1_SIZE_MIN<<10; ++ } ++ ++ return HwPerfBufferSize; ++} ++ ++#if defined(PDUMP) ++/*! ++******************************************************************************* ++ @Function RGXFWSetupSignatureChecks ++ @Description ++ @Input psDevInfo ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo, ++ DEVMEM_MEMDESC** ppsSigChecksMemDesc, ++ IMG_UINT32 ui32SigChecksBufSize, ++ RGXFWIF_SIGBUF_CTL* psSigBufCtl) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Allocate memory for the checks */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, ++ ui32SigChecksBufSize, ++ "FwSignatureChecks", ++ ppsSigChecksMemDesc, ++ &psSigBufCtl->sBuffer, ++ NULL, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ DevmemPDumpLoadMem( *ppsSigChecksMemDesc, ++ 0, ++ ui32SigChecksBufSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32); ++fail: ++ return eError; ++} ++#endif ++ ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++/*! ++******************************************************************************* ++ @Function RGXFWSetupFirmwareGcovBuffer ++ @Description ++ @Input psDevInfo ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXFWSetupFirmwareGcovBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, ++ DEVMEM_MEMDESC** ppsBufferMemDesc, ++ IMG_UINT32 ui32FirmwareGcovBufferSize, ++ RGXFWIF_FIRMWARE_GCOV_CTL* psFirmwareGcovCtl, ++ const IMG_CHAR* pszBufferName) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Allocate memory for gcov */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), ++ ui32FirmwareGcovBufferSize, ++ pszBufferName, ++ ppsBufferMemDesc, ++ &psFirmwareGcovCtl->sBuffer, ++ NULL, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); ++ ++ psFirmwareGcovCtl->ui32Size = ui32FirmwareGcovBufferSize; ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++/*! ++ ****************************************************************************** ++ @Function RGXFWSetupCounterBuffer ++ @Description ++ @Input psDevInfo ++ ++ @Return PVRSRV_ERROR ++ *****************************************************************************/ ++static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo, ++ DEVMEM_MEMDESC** ppsBufferMemDesc, ++ IMG_UINT32 ui32CounterDataBufferSize, ++ RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl, ++ const IMG_CHAR* pszBufferName) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ (RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), ++ ui32CounterDataBufferSize, ++ "FwCounterBuffer", ++ ppsBufferMemDesc, ++ &psCounterDumpCtl->sBuffer, ++ NULL, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSetupFwAllocation"); ++ ++ psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2; ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++/*! ++ ****************************************************************************** ++ @Function RGXFWSetupAlignChecks ++ @Description This functions allocates and fills memory needed for the ++ aligns checks of the UM and KM structures shared with the ++ firmware. The format of the data in the memory is as follows: ++ ++ ++ ++ ++ The UM array is passed from the user side. Now the firmware is ++ is responsible for filling this part of the memory. If that ++ happens the check of the UM structures will be performed ++ by the host driver on client's connect. ++ If the macro is not defined the client driver fills the memory ++ and the firmware checks for the alignment of all structures. ++ @Input psDeviceNode ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM }; ++ IMG_UINT32 ui32RGXFWAlignChecksTotal; ++ IMG_UINT32* paui32AlignChecks; ++ PVRSRV_ERROR eError; ++ ++ /* In this case we don't know the number of elements in UM array. ++ * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. ++ */ ++ ui32RGXFWAlignChecksTotal = sizeof(aui32RGXFWAlignChecksKM) ++ + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32) ++ + 2 * sizeof(IMG_UINT32); ++ ++ /* Allocate memory for the checks */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ ui32RGXFWAlignChecksTotal, ++ "FwAlignmentChecks", ++ &psDevInfo->psRGXFWAlignChecksMemDesc, ++ psAlignChecksDevFW, ++ (void**) &paui32AlignChecks, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ if (!psDeviceNode->bAutoVzFwIsUp) ++ { ++ /* Copy the values */ ++ *paui32AlignChecks++ = ARRAY_SIZE(aui32RGXFWAlignChecksKM); ++ OSCachedMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], ++ sizeof(aui32RGXFWAlignChecksKM)); ++ paui32AlignChecks += ARRAY_SIZE(aui32RGXFWAlignChecksKM); ++ ++ *paui32AlignChecks = 0; ++ } ++ ++ OSWriteMemoryBarrier(paui32AlignChecks); ++ ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWAlignChecksMemDesc, ++ 0, ++ ui32RGXFWAlignChecksTotal, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ return PVRSRV_OK; ++ ++fail: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo) ++{ ++ if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc); ++ psDevInfo->psRGXFWAlignChecksMemDesc = NULL; ++ } ++} ++ ++PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, ++ DEVMEM_MEMDESC *psSrc, ++ IMG_UINT32 uiExtraOffset, ++ IMG_UINT32 ui32Flags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_DEV_VIRTADDR psDevVirtAddr; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc); ++ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ IMG_UINT32 ui32Offset; ++ IMG_BOOL bCachedInMETA; ++ PVRSRV_MEMALLOCFLAGS_T uiDevFlags; ++ IMG_UINT32 uiGPUCacheMode; ++ ++ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); ++ ++ /* Convert to an address in META memmap */ ++ ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; ++ ++ /* Check in the devmem flags whether this memory is cached/uncached */ ++ DevmemGetFlags(psSrc, &uiDevFlags); ++ ++ /* Honour the META cache flags */ ++ bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; ++ ++ /* Honour the SLC cache flags */ ++ eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemDeviceCacheMode", failDevCacheMode); ++ ++ ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS; ++ ++ if (bCachedInMETA) ++ { ++ ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED; ++ } ++ else ++ { ++ ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED; ++ } ++ ++ if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode)) ++ { ++ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED; ++ } ++ else ++ { ++ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED; ++ } ++ ppDest->ui32Addr = ui32Offset; ++ } ++ else ++#endif ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); ++ PVR_GOTO_IF_ERROR(eError, failDevVAAcquire); ++ ++ ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF); ++ } ++ else ++ { ++ IMG_UINT32 ui32Offset; ++ IMG_BOOL bCachedInRISCV; ++ PVRSRV_MEMALLOCFLAGS_T uiDevFlags; ++ ++ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireDevVirtAddr", failDevVAAcquire); ++ ++ /* Convert to an address in RISCV memmap */ ++ ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE; ++ ++ /* Check in the devmem flags whether this memory is cached/uncached */ ++ DevmemGetFlags(psSrc, &uiDevFlags); ++ ++ /* Honour the RISCV cache flags */ ++ bCachedInRISCV = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0; ++ ++ if (bCachedInRISCV) ++ { ++ ui32Offset |= RGXRISCVFW_SHARED_CACHED_DATA_BASE; ++ } ++ else ++ { ++ ui32Offset |= RGXRISCVFW_SHARED_UNCACHED_DATA_BASE; ++ } ++ ++ ppDest->ui32Addr = ui32Offset; ++ } ++ ++ if ((ppDest->ui32Addr & 0x3U) != 0) ++ { ++ IMG_CHAR *pszAnnotation; ++ /* It is expected that the annotation returned by DevmemGetAnnotation() is always valid */ ++ DevmemGetAnnotation(psSrc, &pszAnnotation); ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: %s @ 0x%x is not aligned to 32 bit", ++ __func__, pszAnnotation, ppDest->ui32Addr)); ++ ++ return PVRSRV_ERROR_INVALID_ALIGNMENT; ++ } ++ ++ if (ui32Flags & RFW_FWADDR_NOREF_FLAG) ++ { ++ DevmemReleaseDevVirtAddr(psSrc); ++ } ++ ++ return PVRSRV_OK; ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++failDevCacheMode: ++ DevmemReleaseDevVirtAddr(psSrc); ++#endif ++failDevVAAcquire: ++ return eError; ++} ++ ++void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, ++ DEVMEM_MEMDESC *psSrcMemDesc, ++ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, ++ IMG_UINT32 uiOffset) ++{ ++ PVRSRV_ERROR eError; ++ IMG_DEV_VIRTADDR sDevVirtAddr; ++ ++ eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr; ++ psDest->psDevVirtAddr.uiAddr += uiOffset; ++ psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr; ++ ++ DevmemReleaseDevVirtAddr(psSrcMemDesc); ++} ++ ++ ++void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc) ++{ ++ DevmemReleaseDevVirtAddr(psSrc); ++} ++ ++struct _RGX_SERVER_COMMON_CONTEXT_ { ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ DEVMEM_MEMDESC *psFWCommonContextMemDesc; ++ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr; ++ SERVER_MMU_CONTEXT *psServerMMUContext; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc; ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc; ++ DEVMEM_MEMDESC *psContextStateMemDesc; ++ RGX_CLIENT_CCB *psClientCCB; ++ DEVMEM_MEMDESC *psClientCCBMemDesc; ++ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; ++ IMG_BOOL bCommonContextMemProvided; ++ IMG_UINT32 ui32ContextID; ++ DLLIST_NODE sListNode; ++ RGX_CONTEXT_RESET_REASON eLastResetReason; ++ IMG_UINT32 ui32LastResetJobRef; ++ IMG_INT32 i32Priority; ++ RGX_CCB_REQUESTOR_TYPE eRequestor; ++}; ++ ++/*************************************************************************/ /*! ++@Function _CheckPriority ++@Description Check if priority is allowed for requestor type ++@Input psDevInfo pointer to DevInfo struct ++@Input i32Priority Requested priority ++@Input eRequestor Requestor type specifying data master ++@Return PVRSRV_ERROR PVRSRV_OK on success ++*/ /**************************************************************************/ ++static PVRSRV_ERROR _CheckPriority(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_INT32 i32Priority, ++ RGX_CCB_REQUESTOR_TYPE eRequestor) ++{ ++ /* Only one context allowed with real time priority (highest priority) */ ++ if (i32Priority == RGX_CTX_PRIORITY_REALTIME) ++ { ++ DLLIST_NODE *psNode, *psNext; ++ ++ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_COMMON_CONTEXT *psThisContext = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); ++ ++ if (psThisContext->i32Priority == RGX_CTX_PRIORITY_REALTIME && ++ psThisContext->eRequestor == eRequestor) ++ { ++ PVR_LOG(("Only one context with real time priority allowed")); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, ++ RGXFWIF_DM eDM, ++ SERVER_MMU_CONTEXT *psServerMMUContext, ++ DEVMEM_MEMDESC *psAllocatedMemDesc, ++ IMG_UINT32 ui32AllocatedOffset, ++ DEVMEM_MEMDESC *psFWMemContextMemDesc, ++ DEVMEM_MEMDESC *psContextStateMemDesc, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32MaxDeadlineMS, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_COMMON_CONTEXT_INFO *psInfo, ++ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; ++ RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext; ++ IMG_UINT32 ui32FWCommonContextOffset; ++ IMG_UINT8 *pui8Ptr; ++ IMG_INT32 i32Priority = (IMG_INT32)ui32Priority; ++ PVRSRV_ERROR eError; ++ ++ /* ++ * Allocate all the resources that are required ++ */ ++ psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext)); ++ if (psServerCommonContext == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc; ++ } ++ ++ psServerCommonContext->psDevInfo = psDevInfo; ++ psServerCommonContext->psServerMMUContext = psServerMMUContext; ++ ++ if (psAllocatedMemDesc) ++ { ++ PDUMPCOMMENT(psDeviceNode, ++ "Using existing MemDesc for Rogue firmware %s context (offset = %d)", ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ ui32AllocatedOffset); ++ ui32FWCommonContextOffset = ui32AllocatedOffset; ++ psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc; ++ psServerCommonContext->bCommonContextMemProvided = IMG_TRUE; ++ } ++ else ++ { ++ /* Allocate device memory for the firmware context */ ++ PDUMPCOMMENT(psDeviceNode, ++ "Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(*psFWCommonContext), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwContext", ++ &psServerCommonContext->psFWCommonContextMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware %s context (%s)", ++ __func__, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ PVRSRVGetErrorString(eError))); ++ goto fail_contextalloc; ++ } ++ ui32FWCommonContextOffset = 0; ++ psServerCommonContext->bCommonContextMemProvided = IMG_FALSE; ++ } ++ ++ /* Record this context so we can refer to it if the FW needs to tell us it was reset. */ ++ psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; ++ psServerCommonContext->ui32LastResetJobRef = 0; ++ psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++; ++ ++ /* ++ * Temporarily map the firmware context to the kernel and initialise it ++ */ ++ eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc, ++ (void **)&pui8Ptr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware %s context to CPU (%s)", ++ __func__, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ PVRSRVGetErrorString(eError))); ++ goto fail_cpuvirtacquire; ++ } ++ ++ /* Allocate the client CCB */ ++ eError = RGXCreateCCB(psDevInfo, ++ ui32CCBAllocSizeLog2, ++ ui32CCBMaxAllocSizeLog2, ++ ui32ContextFlags, ++ psConnection, ++ eRGXCCBRequestor, ++ psServerCommonContext, ++ &psServerCommonContext->psClientCCB, ++ &psServerCommonContext->psClientCCBMemDesc, ++ &psServerCommonContext->psClientCCBCtrlMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to create CCB for %s context (%s)", ++ __func__, ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ PVRSRVGetErrorString(eError))); ++ goto fail_allocateccb; ++ } ++ ++ psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset); ++ psFWCommonContext->eDM = eDM; ++ ++ /* Set the firmware CCB device addresses in the firmware common context */ ++ eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCB, ++ psServerCommonContext->psClientCCBMemDesc, ++ 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail_cccbfwaddr); ++ ++ eError = RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl, ++ psServerCommonContext->psClientCCBCtrlMemDesc, ++ 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail_cccbctrlfwaddr); ++ ++#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) ++ { ++ RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr, ++ psServerCommonContext->psClientCCBMemDesc, ++ &psFWCommonContext->psCCB, ++ 0); ++ } ++#endif ++ ++ /* Set the memory context device address */ ++ psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc; ++ eError = RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext, ++ psFWMemContextMemDesc, ++ 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", fail_fwmemctxfwaddr); ++ ++ /* Set the framework register updates address */ ++ psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc; ++ if (psInfo->psFWFrameworkMemDesc != NULL) ++ { ++ eError = RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd, ++ psInfo->psFWFrameworkMemDesc, ++ 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:4", fail_fwframeworkfwaddr); ++ } ++ else ++ { ++ /* This should never be touched in this contexts without a framework ++ * memdesc, but ensure it is zero so we see crashes if it is. ++ */ ++ psFWCommonContext->psRFCmd.ui32Addr = 0; ++ } ++ ++ eError = _CheckPriority(psDevInfo, i32Priority, eRGXCCBRequestor); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); ++ ++ psServerCommonContext->i32Priority = i32Priority; ++ psServerCommonContext->eRequestor = eRGXCCBRequestor; ++ ++ psFWCommonContext->i32Priority = i32Priority; ++ psFWCommonContext->ui32PrioritySeqNum = 0; ++ psFWCommonContext->ui32MaxDeadlineMS = MIN(ui32MaxDeadlineMS, ++ (eDM == RGXFWIF_DM_CDM ? ++ RGXFWIF_MAX_CDM_WORKLOAD_DEADLINE_MS : ++ RGXFWIF_MAX_WORKLOAD_DEADLINE_MS)); ++ psFWCommonContext->ui64RobustnessAddress = ui64RobustnessAddress; ++ ++ /* Store a references to Server Common Context and PID for notifications back from the FW. */ ++ psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID; ++ psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM(); ++ ++ /* Set the firmware GPU context state buffer */ ++ psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc; ++ if (psContextStateMemDesc) ++ { ++ eError = RGXSetFirmwareAddress(&psFWCommonContext->psContextState, ++ psContextStateMemDesc, ++ 0, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:5", fail_ctxstatefwaddr); ++ } ++ ++ /* ++ * Dump the created context ++ */ ++ PDUMPCOMMENT(psDeviceNode, ++ "Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]); ++ DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc, ++ ui32FWCommonContextOffset, ++ sizeof(*psFWCommonContext), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* We've finished the setup so release the CPU mapping */ ++ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); ++ ++ /* Map this allocation into the FW */ ++ eError = RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr, ++ psServerCommonContext->psFWCommonContextMemDesc, ++ ui32FWCommonContextOffset, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:6", fail_fwcommonctxfwaddr); ++ ++#if defined(__linux__) ++ { ++ IMG_UINT32 ui32FWAddr; ++ switch (eDM) { ++ case RGXFWIF_DM_GEOM: ++ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) ++ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext)); ++ break; ++ case RGXFWIF_DM_3D: ++ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t) ++ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext)); ++ break; ++ default: ++ ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr; ++ break; ++ } ++ ++ trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(), ++ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT], ++ ui32FWAddr); ++ } ++#endif ++ /*Add the node to the list when finalised */ ++ OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock); ++ dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock); ++ ++ *ppsServerCommonContext = psServerCommonContext; ++ return PVRSRV_OK; ++ ++fail_fwcommonctxfwaddr: ++ if (psContextStateMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psContextStateMemDesc); ++ } ++fail_ctxstatefwaddr: ++fail_checkpriority: ++ if (psInfo->psFWFrameworkMemDesc != NULL) ++ { ++ RGXUnsetFirmwareAddress(psInfo->psFWFrameworkMemDesc); ++ } ++fail_fwframeworkfwaddr: ++ RGXUnsetFirmwareAddress(psFWMemContextMemDesc); ++fail_fwmemctxfwaddr: ++ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); ++fail_cccbctrlfwaddr: ++ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); ++fail_cccbfwaddr: ++ RGXDestroyCCB(psDevInfo, psServerCommonContext->psClientCCB); ++fail_allocateccb: ++ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc); ++fail_cpuvirtacquire: ++ if (!psServerCommonContext->bCommonContextMemProvided) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc); ++ psServerCommonContext->psFWCommonContextMemDesc = NULL; ++ } ++fail_contextalloc: ++ OSFreeMem(psServerCommonContext); ++fail_alloc: ++ return eError; ++} ++ ++void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) ++{ ++ ++ OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); ++ /* Remove the context from the list of all contexts. */ ++ dllist_remove_node(&psServerCommonContext->sListNode); ++ OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock); ++ ++ /* ++ Unmap the context itself and then all its resources ++ */ ++ ++ /* Unmap the FW common context */ ++ RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc); ++ /* Umap context state buffer (if there was one) */ ++ if (psServerCommonContext->psContextStateMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc); ++ } ++ /* Unmap the framework buffer */ ++ if (psServerCommonContext->psFWFrameworkMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc); ++ } ++ /* Unmap client CCB and CCB control */ ++ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc); ++ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc); ++ /* Unmap the memory context */ ++ RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc); ++ ++ /* Destroy the client CCB */ ++ RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB); ++ ++ ++ /* Free the FW common context (if there was one) */ ++ if (!psServerCommonContext->bCommonContextMemProvided) ++ { ++ DevmemFwUnmapAndFree(psServerCommonContext->psDevInfo, ++ psServerCommonContext->psFWCommonContextMemDesc); ++ psServerCommonContext->psFWCommonContextMemDesc = NULL; ++ } ++ /* Free the hosts representation of the common context */ ++ OSFreeMem(psServerCommonContext); ++} ++ ++PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) ++{ ++ return psServerCommonContext->sFWCommonContextFWAddr; ++} ++ ++RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) ++{ ++ return psServerCommonContext->psClientCCB; ++} ++ ++RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ IMG_UINT32 *pui32LastResetJobRef) ++{ ++ RGX_CONTEXT_RESET_REASON eLastResetReason; ++ ++ PVR_ASSERT(psServerCommonContext != NULL); ++ PVR_ASSERT(pui32LastResetJobRef != NULL); ++ ++ /* Take the most recent reason & job ref and reset for next time... */ ++ eLastResetReason = psServerCommonContext->eLastResetReason; ++ *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef; ++ psServerCommonContext->eLastResetReason = RGX_CONTEXT_RESET_REASON_NONE; ++ psServerCommonContext->ui32LastResetJobRef = 0; ++ ++ if (eLastResetReason == RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "A Hard Context Switch was triggered on the GPU to ensure Quality of Service.")); ++ } ++ ++ return eLastResetReason; ++} ++ ++PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext) ++{ ++ return psServerCommonContext->psDevInfo; ++} ++ ++PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, ++ SERVER_MMU_CONTEXT *psServerMMUContext, ++ PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_COMMON_CONTEXT *psThisContext = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); ++ ++ if (psThisContext->psServerMMUContext == psServerMMUContext) ++ { ++ psFWCommonContextFWAddr->ui32Addr = psThisContext->sFWCommonContextFWAddr.ui32Addr; ++ return PVRSRV_OK; ++ } ++ } ++ return PVRSRV_ERROR_INVALID_PARAMS; ++} ++ ++PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ IMG_UINT32 ui32ContextFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (BITMASK_ANY(ui32ContextFlags, ~RGX_CONTEXT_FLAGS_WRITEABLE_MASK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Context flag(s) invalid or not writeable (%d)", ++ __func__, ui32ContextFlags)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ else ++ { ++ RGXSetCCBFlags(psServerCommonContext->psClientCCB, ++ ui32ContextFlags); ++ } ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXFreeCCB ++ @Description Free the kernel or firmware CCB ++ @Input psDevInfo ++ @Input ppsCCBCtl ++ @Input ppsCCBCtlMemDesc ++ @Input ppsCCBMemDesc ++ @Input psCCBCtlFWAddr ++******************************************************************************/ ++static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_CCB_CTL **ppsCCBCtl, ++ DEVMEM_MEMDESC **ppsCCBCtlMemDesc, ++ IMG_UINT8 **ppui8CCB, ++ DEVMEM_MEMDESC **ppsCCBMemDesc) ++{ ++ if (*ppsCCBMemDesc != NULL) ++ { ++ if (*ppui8CCB != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc); ++ *ppui8CCB = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, *ppsCCBMemDesc); ++ *ppsCCBMemDesc = NULL; ++ } ++ if (*ppsCCBCtlMemDesc != NULL) ++ { ++ if (*ppsCCBCtl != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc); ++ *ppsCCBCtl = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, *ppsCCBCtlMemDesc); ++ *ppsCCBCtlMemDesc = NULL; ++ } ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXFreeCCBReturnSlots ++ @Description Free the kernel CCB's return slot array and associated mappings ++ @Input psDevInfo Device Info struct ++ @Input ppui32CCBRtnSlots CPU mapping of slot array ++ @Input ppsCCBRtnSlotsMemDesc Slot array's device memdesc ++******************************************************************************/ ++static void RGXFreeCCBReturnSlots(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 **ppui32CCBRtnSlots, ++ DEVMEM_MEMDESC **ppsCCBRtnSlotsMemDesc) ++{ ++ /* Free the return slot array if allocated */ ++ if (*ppsCCBRtnSlotsMemDesc != NULL) ++ { ++ /* Before freeing, ensure the CPU mapping as well is released */ ++ if (*ppui32CCBRtnSlots != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(*ppsCCBRtnSlotsMemDesc); ++ *ppui32CCBRtnSlots = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, *ppsCCBRtnSlotsMemDesc); ++ *ppsCCBRtnSlotsMemDesc = NULL; ++ } ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXSetupCCB ++ @Description Allocate and initialise a circular command buffer ++ @Input psDevInfo ++ @Input ppsCCBCtl ++ @Input ppsCCBCtlMemDesc ++ @Input ppui8CCB ++ @Input ppsCCBMemDesc ++ @Input psCCBCtlFWAddr ++ @Input ui32NumCmdsLog2 ++ @Input ui32CmdSize ++ @Input uiCCBMemAllocFlags ++ @Input pszName ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_CCB_CTL **ppsCCBCtl, ++ DEVMEM_MEMDESC **ppsCCBCtlMemDesc, ++ IMG_UINT8 **ppui8CCB, ++ DEVMEM_MEMDESC **ppsCCBMemDesc, ++ PRGXFWIF_CCB_CTL *psCCBCtlFWAddr, ++ PRGXFWIF_CCB *psCCBFWAddr, ++ IMG_UINT32 ui32NumCmdsLog2, ++ IMG_UINT32 ui32CmdSize, ++ PVRSRV_MEMALLOCFLAGS_T uiCCBMemAllocFlags, ++ const IMG_CHAR *pszName) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_CCB_CTL *psCCBCtl; ++ IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2); ++ IMG_CHAR szCCBCtlName[DEVMEM_ANNOTATION_MAX_LEN]; ++ IMG_INT32 iStrLen; ++ ++ /* Append "Control" to the name for the control struct. */ ++ iStrLen = OSSNPrintf(szCCBCtlName, sizeof(szCCBCtlName), "%sControl", pszName); ++ PVR_ASSERT(iStrLen < sizeof(szCCBCtlName)); ++ ++ if (unlikely(iStrLen < 0)) ++ { ++ OSStringLCopy(szCCBCtlName, "FwCCBControl", DEVMEM_ANNOTATION_MAX_LEN); ++ } ++ ++ /* Allocate memory for the CCB control.*/ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ sizeof(RGXFWIF_CCB_CTL), ++ szCCBCtlName, ++ ppsCCBCtlMemDesc, ++ psCCBCtlFWAddr, ++ (void**) ppsCCBCtl, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ /* ++ * Allocate memory for the CCB. ++ * (this will reference further command data in non-shared CCBs) ++ */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ uiCCBMemAllocFlags, ++ ui32CCBSize * ui32CmdSize, ++ pszName, ++ ppsCCBMemDesc, ++ psCCBFWAddr, ++ (void**) ppui8CCB, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ /* ++ * Initialise the CCB control. ++ */ ++ psCCBCtl = *ppsCCBCtl; ++ psCCBCtl->ui32WriteOffset = 0; ++ psCCBCtl->ui32ReadOffset = 0; ++ psCCBCtl->ui32WrapMask = ui32CCBSize - 1; ++ psCCBCtl->ui32CmdSize = ui32CmdSize; ++ ++ /* Pdump the CCB control */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Initialise %s", szCCBCtlName); ++ DevmemPDumpLoadMem(*ppsCCBCtlMemDesc, ++ 0, ++ sizeof(RGXFWIF_CCB_CTL), ++ 0); ++ ++ return PVRSRV_OK; ++ ++fail: ++ RGXFreeCCB(psDevInfo, ++ ppsCCBCtl, ++ ppsCCBCtlMemDesc, ++ ppui8CCB, ++ ppsCCBMemDesc); ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ PMR *psPMR; ++ ++ if (psDevInfo->psRGXFaultAddressMemDesc) ++ { ++ if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR) == PVRSRV_OK) ++ { ++ PMRUnlockSysPhysAddresses(psPMR); ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); ++ psDevInfo->psRGXFaultAddressMemDesc = NULL; ++ } ++} ++ ++static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_SYSINIT *psFwSysInit) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 *pui32MemoryVirtAddr; ++ IMG_UINT32 i; ++ size_t ui32PageSize = OSGetPageSize(); ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PMR *psPMR; ++ ++ /* Allocate page of memory to use for page faults on non-blocking memory transactions. ++ * Doesn't need to be cleared as it is initialised with the 0xDEADBEE0 pattern below. */ ++ psDevInfo->psRGXFaultAddressMemDesc = NULL; ++ eError = DevmemFwAllocateExportable(psDeviceNode, ++ ui32PageSize, ++ ui32PageSize, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, ++ "FwExFaultAddress", ++ &psDevInfo->psRGXFaultAddressMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate mem for fault address (%u)", ++ __func__, eError)); ++ goto failFaultAddressDescAlloc; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc, ++ (void **)&pui32MemoryVirtAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire mem for fault address (%u)", ++ __func__, eError)); ++ goto failFaultAddressDescAqCpuVirt; ++ } ++ ++ if (!psDeviceNode->bAutoVzFwIsUp) ++ { ++ /* fill the page with a known pattern when booting the firmware */ ++ for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++) ++ { ++ *(pui32MemoryVirtAddr + i) = 0xDEADBEE0; ++ } ++ } ++ ++ OSWriteMemoryBarrier(pui32MemoryVirtAddr); ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc); ++ ++ eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc, (void **)&psPMR); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error getting PMR for fault address (%u)", ++ __func__, eError)); ++ ++ goto failFaultAddressDescGetPMR; ++ } ++ else ++ { ++ IMG_BOOL bValid; ++ IMG_UINT32 ui32Log2PageSize = OSGetPageShift(); ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error locking physical address for fault address MemDesc (%u)", ++ __func__, eError)); ++ ++ goto failFaultAddressDescLockPhys; ++ } ++ ++ eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize, 1, 0, &(psFwSysInit->sFaultPhysAddr), &bValid); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error getting physical address for fault address MemDesc (%u)", ++ __func__, eError)); ++ ++ goto failFaultAddressDescGetPhys; ++ } ++ ++ if (!bValid) ++ { ++ psFwSysInit->sFaultPhysAddr.uiAddr = 0; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")", ++ __func__, psFwSysInit->sFaultPhysAddr.uiAddr)); ++ ++ goto failFaultAddressDescGetPhys; ++ } ++ } ++ ++ return PVRSRV_OK; ++ ++failFaultAddressDescGetPhys: ++ PMRUnlockSysPhysAddresses(psPMR); ++ ++failFaultAddressDescLockPhys: ++failFaultAddressDescGetPMR: ++failFaultAddressDescAqCpuVirt: ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc); ++ psDevInfo->psRGXFaultAddressMemDesc = NULL; ++ ++failFaultAddressDescAlloc: ++ ++ return eError; ++} ++ ++#if defined(PDUMP) ++/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */ ++static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ PVRSRV_ERROR eError; ++ PMR *psFWInitPMR, *psFaultAddrPMR; ++ IMG_UINT32 ui32Dstoffset; ++ ++ psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfSysInitMemDesc->psImport->hPMR); ++ ui32Dstoffset = psDevInfo->psRGXFWIfSysInitMemDesc->uiOffset + offsetof(RGXFWIF_SYSINIT, sFaultPhysAddr.uiAddr); ++ ++ psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR); ++ ++ eError = PDumpMemLabelToMem64(psFaultAddrPMR, ++ psFWInitPMR, ++ 0, ++ ui32Dstoffset, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Dump of Fault Page Phys address failed(%u)", __func__, eError)); ++ } ++ return eError; ++} ++#endif ++ ++#if defined(SUPPORT_TBI_INTERFACE) ++/*************************************************************************/ /*! ++@Function RGXTBIBufferIsInitRequired ++ ++@Description Returns true if the firmware tbi buffer is not allocated and ++ might be required by the firmware soon. TBI buffer allocated ++ on-demand to reduce RAM footprint on systems not needing ++ tbi. ++ ++@Input psDevInfo RGX device info ++ ++@Return IMG_BOOL Whether on-demand allocation(s) is/are needed ++ or not ++*/ /**************************************************************************/ ++INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ ++ /* The firmware expects a tbi buffer only when: ++ * - Logtype is "tbi" ++ */ ++ if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL) ++ && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE) ++ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)) ++ { ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXTBIBufferDeinit ++ ++@Description Deinitialises all the allocations and references that are made ++ for the FW tbi buffer ++ ++@Input ppsDevInfo RGX device info ++@Return void ++*/ /**************************************************************************/ ++static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc); ++ psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL; ++ psDevInfo->ui32RGXFWIfHWPerfBufSize = 0; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXTBIBufferInitOnDemandResources ++ ++@Description Allocates the firmware TBI buffer required for reading SFs ++ strings and initialize it with SFs. ++ ++@Input psDevInfo RGX device info ++ ++@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 i, ui32Len; ++ const IMG_UINT32 ui32FWTBIBufsize = g_ui32SFsCount * sizeof(RGXFW_STID_FMT); ++ RGXFW_STID_FMT *psFW_SFs = NULL; ++ ++ /* Firmware address should not be already set */ ++ if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: FW address for FWTBI is already set. Resetting it with newly allocated one", ++ __func__)); ++ } ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS, ++ ui32FWTBIBufsize, ++ "FwTBIBuffer", ++ &psDevInfo->psRGXFWIfTBIBufferMemDesc, ++ &psDevInfo->sRGXFWIfTBIBuffer, ++ (void**)&psFW_SFs, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ /* Copy SFs entries to FW buffer */ ++ for (i = 0; i < g_ui32SFsCount; i++) ++ { ++ OSCachedMemCopy(&psFW_SFs[i].ui32Id, &SFs[i].ui32Id, sizeof(SFs[i].ui32Id)); ++ ui32Len = OSStringLength(SFs[i].psName); ++ OSCachedMemCopy(psFW_SFs[i].sName, SFs[i].psName, MIN(ui32Len, IMG_SF_STRING_MAX_SIZE - 1)); ++ } ++ ++ /* flush write buffers for psFW_SFs */ ++ OSWriteMemoryBarrier(psFW_SFs); ++ ++ /* Set size of TBI buffer */ ++ psDevInfo->ui32FWIfTBIBufferSize = ui32FWTBIBufsize; ++ ++ /* release CPU mapping */ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc); ++ ++ return PVRSRV_OK; ++fail: ++ RGXTBIBufferDeinit(psDevInfo); ++ return eError; ++} ++#endif ++ ++/*************************************************************************/ /*! ++@Function RGXTraceBufferIsInitRequired ++ ++@Description Returns true if the firmware trace buffer is not allocated and ++ might be required by the firmware soon. Trace buffer allocated ++ on-demand to reduce RAM footprint on systems not needing ++ firmware trace. ++ ++@Input psDevInfo RGX device info ++ ++@Return IMG_BOOL Whether on-demand allocation(s) is/are needed ++ or not ++*/ /**************************************************************************/ ++INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ ++ /* The firmware expects a trace buffer only when: ++ * - Logtype is "trace" AND ++ * - at least one LogGroup is configured ++ * - the Driver Mode is not Guest ++ */ ++ if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL) ++ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE) ++ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK) ++ && !PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXTraceBufferDeinit ++ ++@Description Deinitialises all the allocations and references that are made ++ for the FW trace buffer(s) ++ ++@Input ppsDevInfo RGX device info ++@Return void ++*/ /**************************************************************************/ ++static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ IMG_UINT32 i; ++ ++ for (i = 0; i < RGXFW_THREAD_NUM; i++) ++ { ++ if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i]) ++ { ++ if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); ++ psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL; ++ } ++ ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]); ++ psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL; ++ } ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function RGXTraceBufferInitOnDemandResources ++ ++@Description Allocates the firmware trace buffer required for dumping trace ++ info from the firmware. ++ ++@Input psDevInfo RGX device info ++ ++@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise. ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, ++ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags) ++{ ++ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32FwThreadNum; ++ IMG_UINT32 ui32DefaultTraceBufSize; ++ IMG_DEVMEM_SIZE_T uiTraceBufSizeInBytes; ++ void *pvAppHintState = NULL; ++ IMG_CHAR pszBufferName[] = "FwTraceBuffer_Thread0"; ++ ++ /* Check AppHint value for module-param FWTraceBufSizeInDWords */ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ui32DefaultTraceBufSize = RGXFW_TRACE_BUF_DEFAULT_SIZE_IN_DWORDS; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, ++ pvAppHintState, ++ FWTraceBufSizeInDWords, ++ &ui32DefaultTraceBufSize, ++ &psTraceBufCtl->ui32TraceBufSizeInDWords); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ uiTraceBufSizeInBytes = psTraceBufCtl->ui32TraceBufSizeInDWords * sizeof(IMG_UINT32); ++ ++ for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++) ++ { ++#if !defined(SUPPORT_AUTOVZ) ++ /* Ensure allocation API is only called when not already allocated */ ++ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL); ++ /* Firmware address should not be already set */ ++ PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0); ++#endif ++ ++ /* update the firmware thread number in the Trace Buffer's name */ ++ pszBufferName[sizeof(pszBufferName) - 2] += ui32FwThreadNum; ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ uiAllocFlags, ++ uiTraceBufSizeInBytes, ++ pszBufferName, ++ &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum], ++ &psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer, ++ (void**)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ } ++ ++ return PVRSRV_OK; ++ ++fail: ++ RGXTraceBufferDeinit(psDevInfo); ++ return eError; ++} ++ ++#if defined(PDUMP) ++/*************************************************************************/ /*! ++@Function RGXPDumpLoadFWInitData ++ ++@Description Allocates the firmware trace buffer required for dumping trace ++ info from the firmware. ++ ++@Input psDevInfo RGX device info ++ */ /*************************************************************************/ ++static void RGXPDumpLoadFWInitData(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32HWPerfCountersDataSize, ++ IMG_BOOL bEnableSignatureChecks) ++{ ++ IMG_UINT32 ui32ConfigFlags = psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags; ++ IMG_UINT32 ui32FwOsCfgFlags = psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags; ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Dump RGXFW Init data"); ++ if (!bEnableSignatureChecks) ++ { ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(to enable rgxfw signatures place the following line after the RTCONF line)"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, asSigBufCtl), ++ sizeof(RGXFWIF_SIGBUF_CTL)*(RGXFWIF_DM_MAX), ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump initial state of FW runtime configuration"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ++ 0, ++ sizeof(RGXFWIF_RUNTIME_CFG), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump rgxfw hwperfctl structure"); ++ DevmemPDumpLoadZeroMem(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, ++ 0, ++ ui32HWPerfCountersDataSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump rgxfw trace control structure"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, ++ 0, ++ sizeof(RGXFWIF_TRACEBUF), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump firmware system data structure"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwSysDataMemDesc, ++ 0, ++ sizeof(RGXFWIF_SYSDATA), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump firmware OS data structure"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfFwOsDataMemDesc, ++ 0, ++ sizeof(RGXFWIF_OSDATA), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++#if defined(SUPPORT_TBI_INTERFACE) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump rgx TBI buffer"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfTBIBufferMemDesc, ++ 0, ++ psDevInfo->ui32FWIfTBIBufferSize, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif /* defined(SUPPORT_TBI_INTERFACE) */ ++ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump rgxfw register configuration buffer"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfRegCfgMemDesc, ++ 0, ++ sizeof(RGXFWIF_REG_CFG), ++ PDUMP_FLAGS_CONTINUOUS); ++#endif /* defined(SUPPORT_USER_REGISTER_CONFIGURATION) */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump rgxfw system init structure"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, ++ 0, ++ sizeof(RGXFWIF_SYSINIT), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Dump rgxfw os init structure"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOsInitMemDesc, ++ 0, ++ sizeof(RGXFWIF_OSINIT), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Overwrite FaultPhysAddr of FwSysInit in pdump with actual physical address"); ++ RGXPDumpFaultReadRegister(psDevInfo); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "RTCONF: run-time configuration"); ++ ++ ++ /* Dump the config options so they can be edited. ++ * ++ */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(Set the FW system config options here)"); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable generic DM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( FBCDC Version 3.1 Enable: 0x%08x)", RGXFWIF_INICFG_FBCDC_V3_1_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable coherent memory accesses: 0x%08x)", RGXFWIF_INICFG_FABRIC_COHERENCY_ENABLED); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable IRQ validation: 0x%08x)", RGXFWIF_INICFG_VALIDATE_IRQ); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( SPU power state mask change Enable: 0x%08x)", RGXFWIF_INICFG_SPU_POWER_STATE_MASK_CHANGE_EN); ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable Workload Estimation: 0x%08x)", RGXFWIF_INICFG_WORKEST); ++#if defined(SUPPORT_PDVFS) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Enable Proactive DVFS: 0x%08x)", RGXFWIF_INICFG_PDVFS); ++#endif /* defined(SUPPORT_PDVFS) */ ++#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( CDM Arbitration Mode (task demand=b'01, round robin=b'10): 0x%08x)", RGXFWIF_INICFG_CDM_ARBITRATION_MASK); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( ISP Scheduling Mode (v1=b'01, v2=b'10): 0x%08x)", RGXFWIF_INICFG_ISPSCHEDMODE_MASK); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Validate SOC & USC timers: 0x%08x)", RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER); ++ ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwSysDataMemDesc, ++ offsetof(RGXFWIF_SYSDATA, ui32ConfigFlags), ++ ui32ConfigFlags, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Extended FW system config options not used.)"); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(Set the FW OS config options here)"); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Ctx Switch TDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_TDM_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_GEOM_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_3D_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_CTXSWITCH_CDM_EN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_TDM); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_GEOM); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_3D); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_OS_LOW_PRIO_CS_CDM); ++ ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfFwOsDataMemDesc, ++ offsetof(RGXFWIF_OSDATA, ui32FwOsConfigFlags), ++ ui32FwOsCfgFlags, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(Select one or more security tests here)"); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Read/write FW private data from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_DATA); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Read/write FW code from non-FW contexts: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_READ_WRITE_FW_CODE); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Execute FW code from non-secure memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_NONSECURE); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Execute FW code from secure (non-FW) memory: 0x%08x)", RGXFWIF_SECURE_ACCESS_TEST_RUN_FROM_SECURE); ++ ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, ui32SecurityTestFlags), ++ psDevInfo->psRGXFWIfSysInit->ui32SecurityTestFlags, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)", ++ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT, ++ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT); ++ ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, sPIDFilter.eMode), ++ psDevInfo->psRGXFWIfSysInit->sPIDFilter.eMode, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))", ++ RGXFWIF_PID_FILTER_MAX_NUM_PIDS); ++ { ++ IMG_UINT32 i; ++ ++ /* generate a few WRWs in the pdump stream as an example */ ++ for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++) ++ { ++ /* ++ * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of ++ * a non-const variable in the expression, which it needs to be const. Typical compiler output is ++ * "expression must have a constant value". ++ */ ++ const IMG_DEVMEM_OFFSET_T uiPIDOff ++ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].uiPID); ++ ++ const IMG_DEVMEM_OFFSET_T uiOSIDOff ++ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_SYSINIT *)0)->sPIDFilter.asItems[i].ui32OSID); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(PID and OSID pair %u)", i); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(PID)"); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, ++ uiPIDOff, ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(OSID)"); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfSysInitMemDesc, ++ uiOSIDOff, ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ } ++ ++ /* ++ * Dump the log config so it can be edited. ++ */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(Set the log config here)"); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( Log Type: set bit 0 for TRACE, reset for TBI)"); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP); ++ ++#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) ++ { ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA); ++ } ++#endif ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( MISC Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MISC); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc, ++ offsetof(RGXFWIF_TRACEBUF, ui32LogType), ++ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Set the HWPerf Filter config here, see \"hwperfbin2jsont -h\""); ++ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, ui64HWPerfFilter), ++ psDevInfo->psRGXFWIfSysInit->ui64HWPerfFilter, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))", ++ RGXFWIF_REG_CFG_TYPE_PWR_ON, ++ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, ++ RGXFWIF_REG_CFG_TYPE_TA, ++ RGXFWIF_REG_CFG_TYPE_3D, ++ RGXFWIF_REG_CFG_TYPE_CDM, ++ RGXFWIF_REG_CFG_TYPE_TLA, ++ RGXFWIF_REG_CFG_TYPE_TDM); ++ ++ { ++ IMG_UINT32 i; ++ ++ /* Write 32 bits in each iteration as required by PDUMP WRW command */ ++ for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32)) ++ { ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc, ++ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]), ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ } ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "(Set registers here: address, mask, value)"); ++ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, ++ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr), ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, ++ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask), ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc, ++ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value), ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */ ++} ++#endif /* defined(PDUMP) */ ++ ++/*! ++******************************************************************************* ++ @Function RGXSetupFwGuardPage ++ ++ @Description Allocate a Guard Page at the start of a Guest's Main Heap ++ ++ @Input psDevceNode ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXSetupFwGuardPage(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)), ++ OSGetPageSize(), ++ "FwGuardPage", ++ &psDevInfo->psRGXFWHeapGuardPageReserveMemDesc, ++ NULL, ++ NULL, ++ RFW_FWADDR_FLAG_NONE); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXSetupFwSysData ++ ++ @Description Sets up all system-wide firmware related data ++ ++ @Input psDevInfo ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXSetupFwSysData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bEnableSignatureChecks, ++ IMG_UINT32 ui32SignatureChecksBufSize, ++ IMG_UINT32 ui32HWPerfFWBufSizeKB, ++ IMG_UINT64 ui64HWPerfFilter, ++ IMG_UINT32 ui32ConfigFlags, ++ IMG_UINT32 ui32ConfigFlagsExt, ++ IMG_UINT32 ui32LogType, ++ IMG_UINT32 ui32FilterFlags, ++ IMG_UINT32 ui32JonesDisableMask, ++ IMG_UINT32 ui32HWPerfCountersDataSize, ++ IMG_UINT32 *pui32TPUTrilinearFracMask, ++ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, ++ FW_PERF_CONF eFirmwarePerf) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_SYSINIT *psFwSysInitScratch = NULL; ++ ++ psFwSysInitScratch = OSAllocZMem(sizeof(*psFwSysInitScratch)); ++ PVR_LOG_GOTO_IF_NOMEM(psFwSysInitScratch, eError, fail); ++ ++ /* Sys Fw init data */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ (RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)) & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(RGXFWIF_SYSINIT), ++ "FwSysInitStructure", ++ &psDevInfo->psRGXFWIfSysInitMemDesc, ++ NULL, ++ (void**) &psDevInfo->psRGXFWIfSysInit, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Sys Init structure allocation", fail); ++ ++ /* Setup Fault read register */ ++ eError = RGXSetupFaultReadRegister(psDeviceNode, psFwSysInitScratch); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Fault read register setup", fail); ++ ++#if defined(SUPPORT_AUTOVZ) ++ psFwSysInitScratch->ui32VzWdgPeriod = PVR_AUTOVZ_WDG_PERIOD_MS; ++#endif ++ ++ /* RD Power Island */ ++ { ++ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; ++ IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland; ++ IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) || ++ (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON); ++ ++ ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0; ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST; ++#if defined(SUPPORT_PDVFS) ++ { ++ RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo; ++ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg; ++ ++ /* Pro-active DVFS depends on Workload Estimation */ ++ psPDVFSOPPInfo = &psFwSysInitScratch->sPDVFSOPPInfo; ++ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg; ++ PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFwSysData: Missing OPP Table"); ++ ++ if (psDVFSDeviceCfg->pasOPPTable != NULL) ++ { ++ if (psDVFSDeviceCfg->ui32OPPTableSize > ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OPP Table too large: Size = %u, Maximum size = %lu", ++ __func__, ++ psDVFSDeviceCfg->ui32OPPTableSize, ++ (unsigned long)(ARRAY_SIZE(psPDVFSOPPInfo->asOPPValues)))); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail; ++ } ++ ++ OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues, ++ psDVFSDeviceCfg->pasOPPTable, ++ sizeof(psPDVFSOPPInfo->asOPPValues)); ++ ++ psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1; ++ ++ ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS; ++ } ++ } ++#endif /* defined(SUPPORT_PDVFS) */ ++#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ ++ ++ /* FW trace control structure */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(RGXFWIF_TRACEBUF), ++ "FwTraceCtlStruct", ++ &psDevInfo->psRGXFWIfTraceBufCtlMemDesc, ++ &psFwSysInitScratch->sTraceBufCtl, ++ (void**) &psDevInfo->psRGXFWIfTraceBufCtl, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ if (!psDeviceNode->bAutoVzFwIsUp) ++ { ++ /* Set initial firmware log type/group(s) */ ++ if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid initial log type (0x%X)", ++ __func__, ui32LogType)); ++ goto fail; ++ } ++ psDevInfo->psRGXFWIfTraceBufCtl->ui32LogType = ui32LogType; ++ } ++ ++ /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource ++ * (irrespective of loggroup(s) enabled), given that logtype/loggroups can ++ * be set during PDump playback in logconfig, at any point of time, ++ * Otherwise, allocate only if required. */ ++#if !defined(PDUMP) ++#if defined(SUPPORT_AUTOVZ) ++ /* always allocate trace buffer for AutoVz Host drivers to allow ++ * deterministic addresses of all SysData structures */ ++ if ((PVRSRV_VZ_MODE_IS(HOST)) || (RGXTraceBufferIsInitRequired(psDevInfo))) ++#else ++ if (RGXTraceBufferIsInitRequired(psDevInfo)) ++#endif ++#endif ++ { ++ eError = RGXTraceBufferInitOnDemandResources(psDevInfo, ++ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp)); ++ } ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail); ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(RGXFWIF_SYSDATA), ++ "FwSysData", ++ &psDevInfo->psRGXFWIfFwSysDataMemDesc, ++ &psFwSysInitScratch->sFwSysData, ++ (void**) &psDevInfo->psRGXFWIfFwSysData, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ /* GPIO validation setup */ ++ psFwSysInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF; ++#if defined(SUPPORT_VALIDATION) ++ { ++ IMG_INT32 ui32AppHintDefault; ++ IMG_INT32 ui32GPIOValidationMode; ++ void *pvAppHintState = NULL; ++ ++ /* Check AppHint for GPIO validation mode */ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, ++ pvAppHintState, ++ GPIOValidationMode, ++ &ui32AppHintDefault, ++ &ui32GPIOValidationMode); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.", ++ __func__, ++ ui32GPIOValidationMode, ++ RGXFWIF_GPIO_VAL_LAST)); ++ } ++ else ++ { ++ psFwSysInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode; ++ } ++ ++ psFwSysInitScratch->eGPIOValidationMode = ui32GPIOValidationMode; ++ } ++#endif ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++ eError = RGXFWSetupCounterBuffer(psDevInfo, ++ &psDevInfo->psCounterBufferMemDesc, ++ PAGE_SIZE, ++ &psFwSysInitScratch->sCounterDumpCtl, ++ "CounterBuffer"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Counter Buffer allocation", fail); ++#endif /* defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) */ ++ ++#if defined(SUPPORT_VALIDATION) ++ { ++ IMG_UINT32 ui32EnablePollOnChecksumErrorStatus; ++ IMG_UINT32 ui32ApphintDefault = 0; ++ void *pvAppHintState = NULL; ++ ++ /* Check AppHint for polling on GPU Checksum status */ ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, ++ pvAppHintState, ++ EnablePollOnChecksumErrorStatus, ++ &ui32ApphintDefault, ++ &ui32EnablePollOnChecksumErrorStatus); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ switch (ui32EnablePollOnChecksumErrorStatus) ++ { ++ case 0: /* no checking */ break; ++ case 3: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_NOERR_EN; break; ++ case 4: psDevInfo->ui32ValidationFlags |= RGX_VAL_KZ_SIG_CHECK_ERR_EN; break; ++ default: ++ PVR_DPF((PVR_DBG_WARNING, "Unsupported value in EnablePollOnChecksumErrorStatus (%d)", ui32EnablePollOnChecksumErrorStatus)); ++ break; ++ } ++ } ++#endif /* defined(SUPPORT_VALIDATION) */ ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++ eError = RGXFWSetupFirmwareGcovBuffer(psDevInfo, ++ &psDevInfo->psFirmwareGcovBufferMemDesc, ++ RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE, ++ &psFwSysInitScratch->sFirmwareGcovCtl, ++ "FirmwareGcovBuffer"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware GCOV buffer allocation", fail); ++ psDevInfo->ui32FirmwareGcovSize = RGXFWIF_FIRMWARE_GCOV_BUFFER_SIZE; ++#endif /* defined(SUPPORT_FIRMWARE_GCOV) */ ++ ++#if defined(PDUMP) ++ /* Require a minimum amount of memory for the signature buffers */ ++ if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN) ++ { ++ ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN; ++ } ++ ++ /* Setup Signature and Checksum Buffers for TDM, GEOM and 3D */ ++ eError = RGXFWSetupSignatureChecks(psDevInfo, ++ &psDevInfo->psRGXFWSigTAChecksMemDesc, ++ ui32SignatureChecksBufSize, ++ &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TA Signature check setup", fail); ++ psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize; ++ ++ eError = RGXFWSetupSignatureChecks(psDevInfo, ++ &psDevInfo->psRGXFWSig3DChecksMemDesc, ++ ui32SignatureChecksBufSize, ++ &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "3D Signature check setup", fail); ++ psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize; ++ ++ psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; ++ psDevInfo->ui32SigTDM2DChecksSize = 0; ++ ++#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM)) ++ { ++ /* Buffer allocated only when feature present because, all known TDM ++ * signature registers are dependent on this feature being present */ ++ eError = RGXFWSetupSignatureChecks(psDevInfo, ++ &psDevInfo->psRGXFWSigTDM2DChecksMemDesc, ++ ui32SignatureChecksBufSize, ++ &psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TDM Signature check setup", fail); ++ psDevInfo->ui32SigTDM2DChecksSize = ui32SignatureChecksBufSize; ++ } ++#endif ++ ++ if (!bEnableSignatureChecks) ++ { ++ psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_TDM].sBuffer.ui32Addr = 0x0; ++ psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_GEOM].sBuffer.ui32Addr = 0x0; ++ psFwSysInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0; ++ } ++#endif /* defined(PDUMP) */ ++ ++ eError = RGXFWSetupAlignChecks(psDeviceNode, ++ &psFwSysInitScratch->sAlignChecks); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Alignment checks setup", fail); ++ ++ psFwSysInitScratch->ui32FilterFlags = ui32FilterFlags; ++ ++ /* Fill the remaining bits of fw the init data */ ++ psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE; ++ psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE; ++ psFwSysInitScratch->sFBCDCStateTableBase.uiAddr = RGX_FBCDC_HEAP_BASE; ++ psFwSysInitScratch->sFBCDCLargeStateTableBase.uiAddr = RGX_FBCDC_LARGE_HEAP_BASE; ++ psFwSysInitScratch->sTextureHeapBase.uiAddr = RGX_TEXTURE_STATE_HEAP_BASE; ++ ++#if defined(FIX_HW_BRN_65273_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) ++ { ++ /* Fill the remaining bits of fw the init data */ ++ psFwSysInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE; ++ psFwSysInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE; ++ } ++#endif ++ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ psFwSysInitScratch->ui32JonesDisableMask = ui32JonesDisableMask; ++ } ++#endif ++#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) ++ { ++ eError = _AllocateSLC3Fence(psDevInfo, psFwSysInitScratch); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SLC3Fence memory allocation", fail); ++ } ++#endif ++#if defined(SUPPORT_PDVFS) ++ /* Core clock rate */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(IMG_UINT32), ++ "FwPDVFSCoreClkRate", ++ &psDevInfo->psRGXFWIFCoreClkRateMemDesc, ++ &psFwSysInitScratch->sCoreClockRate, ++ (void**) &psDevInfo->pui32RGXFWIFCoreClkRate, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PDVFS core clock rate memory setup", fail); ++#endif ++ { ++ /* Timestamps */ ++ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags = ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */ ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC; ++ ++ /* ++ the timer query arrays ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate timer query arrays (FW)"); ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES, ++ uiMemAllocFlags | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, ++ "FwStartTimesArray", ++ &psDevInfo->psStartTimeMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map start times array", ++ __func__)); ++ goto fail; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc, ++ (void **)& psDevInfo->pui64StartTimeById); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map start times array", ++ __func__)); ++ goto fail; ++ } ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES, ++ uiMemAllocFlags | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE, ++ "FwEndTimesArray", ++ & psDevInfo->psEndTimeMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map end times array", ++ __func__)); ++ goto fail; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc, ++ (void **)& psDevInfo->pui64EndTimeById); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map end times array", ++ __func__)); ++ goto fail; ++ } ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES, ++ uiMemAllocFlags, ++ "FwCompletedOpsArray", ++ & psDevInfo->psCompletedMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to completed ops array", ++ __func__)); ++ goto fail; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc, ++ (void **)& psDevInfo->pui32CompletedById); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map completed ops array", ++ __func__)); ++ goto fail; ++ } ++ } ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ eError = OSLockCreate(&psDevInfo->hTimerQueryLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate log for timer query", ++ __func__)); ++ goto fail; ++ } ++#endif ++#if defined(SUPPORT_TBI_INTERFACE) ++#if !defined(PDUMP) ++ /* allocate only if required */ ++ if (RGXTBIBufferIsInitRequired(psDevInfo)) ++#endif /* !defined(PDUMP) */ ++ { ++ /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource ++ * (irrespective of loggroup(s) enabled), given that logtype/loggroups ++ * can be set during PDump playback in logconfig, at any point of time ++ */ ++ eError = RGXTBIBufferInitOnDemandResources(psDevInfo); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail); ++ } ++ ++ psFwSysInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer; ++#endif /* defined(SUPPORT_TBI_INTERFACE) */ ++ ++ /* Allocate shared buffer for GPU utilisation */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(RGXFWIF_GPU_UTIL_FWCB), ++ "FwGPUUtilisationBuffer", ++ &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc, ++ &psFwSysInitScratch->sGpuUtilFWCbCtl, ++ (void**) &psDevInfo->psRGXFWIfGpuUtilFWCb, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "GPU Utilisation Buffer ctl allocation", fail); ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(RGXFWIF_RUNTIME_CFG), ++ "FwRuntimeCfg", ++ &psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ++ &psFwSysInitScratch->sRuntimeCfg, ++ (void**) &psDevInfo->psRGXFWIfRuntimeCfg, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware runtime configuration memory allocation", fail); ++ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ sizeof(RGXFWIF_REG_CFG), ++ "FwRegisterConfigStructure", ++ &psDevInfo->psRGXFWIfRegCfgMemDesc, ++ &psFwSysInitScratch->sRegCfg, ++ NULL, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware register user configuration structure allocation", fail); ++#endif ++ ++ psDevInfo->ui32RGXFWIfHWPerfBufSize = GetHwPerfBufferSize(ui32HWPerfFWBufSizeKB); ++ /* Second stage initialisation or HWPerf, hHWPerfLock created in first ++ * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */ ++ if (psDevInfo->ui64HWPerfFilter == 0) ++ { ++ psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter; ++ psFwSysInitScratch->ui64HWPerfFilter = ui64HWPerfFilter; ++ } ++ else ++ { ++ /* The filter has already been modified. This can happen if ++ * pvr/apphint/EnableFTraceGPU was enabled. */ ++ psFwSysInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter; ++ } ++ ++#if !defined(PDUMP) ++ /* Allocate if HWPerf filter has already been set. This is possible either ++ * by setting a proper AppHint or enabling GPU ftrace events. */ ++ if (psDevInfo->ui64HWPerfFilter != 0) ++#endif ++ { ++ /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources ++ * (irrespective of HWPerf enabled or not), given that HWPerf can be ++ * enabled during PDump playback via RTCONF at any point of time. */ ++ eError = RGXHWPerfInitOnDemandResources(psDevInfo); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail); ++ } ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp), ++ ui32HWPerfCountersDataSize, ++ "FwHWPerfControlStructure", ++ &psDevInfo->psRGXFWIfHWPerfCountersMemDesc, ++ &psFwSysInitScratch->sHWPerfCtl, ++ NULL, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware HW Perf control struct allocation", fail); ++ ++ psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_INICFG_DISABLE_PDP_EN) ++ ? IMG_FALSE : IMG_TRUE; ++ ++ psFwSysInitScratch->eFirmwarePerf = eFirmwarePerf; ++ ++#if defined(PDUMP) ++ /* default: no filter */ ++ psFwSysInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT; ++ psFwSysInitScratch->sPIDFilter.asItems[0].uiPID = 0; ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++ { ++ IMG_UINT32 dm; ++ ++ /* TPU trilinear rounding mask override */ ++ for (dm = 0; dm < RGXFWIF_TPU_DM_LAST; dm++) ++ { ++ psFwSysInitScratch->aui32TPUTrilinearFracMask[dm] = pui32TPUTrilinearFracMask[dm]; ++ } ++ } ++#endif ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ { ++ PVRSRV_MEMALLOCFLAGS_T uiFlags = RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS; ++ PVRSRV_SET_PHYS_HEAP_HINT(GPU_SECURE, uiFlags); ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate non-secure buffer for security validation test"); ++ eError = DevmemFwAllocateExportable(psDeviceNode, ++ OSGetPageSize(), ++ OSGetPageSize(), ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, ++ "FwExNonSecureBuffer", ++ &psDevInfo->psRGXFWIfNonSecureBufMemDesc); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Non-secure buffer allocation", fail); ++ ++ eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbNonSecureBuffer, ++ psDevInfo->psRGXFWIfNonSecureBufMemDesc, ++ 0, RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", fail); ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate secure buffer for security validation test"); ++ eError = DevmemFwAllocateExportable(psDeviceNode, ++ OSGetPageSize(), ++ OSGetPageSize(), ++ uiFlags, ++ "FwExSecureBuffer", ++ &psDevInfo->psRGXFWIfSecureBufMemDesc); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Secure buffer allocation", fail); ++ ++ eError = RGXSetFirmwareAddress(&psFwSysInitScratch->pbSecureBuffer, ++ psDevInfo->psRGXFWIfSecureBufMemDesc, ++ 0, RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", fail); ++ } ++#endif /* SUPPORT_SECURITY_VALIDATION */ ++ ++#if defined(RGX_FEATURE_TFBC_LOSSY_37_PERCENT_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION)) ++ { ++ psFwSysInitScratch->ui32TFBCCompressionControl = ++ (ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK) >> RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT; ++ } ++#endif ++ ++ /* Initialize FW started flag */ ++ psFwSysInitScratch->bFirmwareStarted = IMG_FALSE; ++ psFwSysInitScratch->ui32MarkerVal = 1; ++ ++ if (!psDeviceNode->bAutoVzFwIsUp) ++ { ++ IMG_UINT32 ui32OSIndex; ++ ++ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; ++ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; ++ ++ /* Required info by FW to calculate the ActivePM idle timer latency */ ++ psFwSysInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; ++ psFwSysInitScratch->ui32InitialActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms; ++ ++ /* Initialise variable runtime configuration to the system defaults */ ++ psRuntimeCfg->ui32CoreClockSpeed = psFwSysInitScratch->ui32InitialCoreClockSpeed; ++ psRuntimeCfg->ui32ActivePMLatencyms = psFwSysInitScratch->ui32InitialActivePMLatencyms; ++ psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE; ++ psRuntimeCfg->ui32WdgPeriodUs = RGXFW_SAFETY_WATCHDOG_PERIOD_IN_US; ++ psRuntimeCfg->ui32HCSDeadlineMS = RGX_HCS_DEFAULT_DEADLINE_MS; ++ ++ if (PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ psRuntimeCfg->aui32OSidPriority[RGXFW_HOST_OS] = 0; ++ } ++ else ++ { ++ for (ui32OSIndex = 0; ui32OSIndex < RGX_NUM_OS_SUPPORTED; ui32OSIndex++) ++ { ++ const IMG_INT32 ai32DefaultOsPriority[RGXFW_MAX_NUM_OS] = ++ {RGX_OSID_0_DEFAULT_PRIORITY, RGX_OSID_1_DEFAULT_PRIORITY, RGX_OSID_2_DEFAULT_PRIORITY, RGX_OSID_3_DEFAULT_PRIORITY, ++ RGX_OSID_4_DEFAULT_PRIORITY, RGX_OSID_5_DEFAULT_PRIORITY, RGX_OSID_6_DEFAULT_PRIORITY, RGX_OSID_7_DEFAULT_PRIORITY}; ++ ++ /* Set up initial priorities between different OSes */ ++ psRuntimeCfg->aui32OSidPriority[ui32OSIndex] = (IMG_UINT32)ai32DefaultOsPriority[ui32OSIndex]; ++ } ++ } ++ ++#if defined(PVR_ENABLE_PHR) && defined(PDUMP) ++ psRuntimeCfg->ui32PHRMode = RGXFWIF_PHR_MODE_RD_RESET; ++#else ++ psRuntimeCfg->ui32PHRMode = 0; ++#endif ++ ++ /* Initialize the DefaultDustsNumInit Field to Max Dusts */ ++ psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; ++ ++ /* flush write buffers for psDevInfo->psRGXFWIfRuntimeCfg */ ++ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfRuntimeCfg); ++ ++ /* Setup FW coremem data */ ++ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) ++ { ++ psFwSysInitScratch->sCorememDataStore.pbyFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; ++ ++#if defined(RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA)) ++ { ++ RGXSetMetaDMAAddress(&psFwSysInitScratch->sCorememDataStore, ++ psDevInfo->psRGXFWIfCorememDataStoreMemDesc, ++ &psFwSysInitScratch->sCorememDataStore.pbyFWAddr, ++ 0); ++ } ++#endif ++ } ++ ++ psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL; ++ psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlagsExt = ui32ConfigFlagsExt & RGXFWIF_INICFG_EXT_ALL; ++ ++ /* Initialise GPU utilisation buffer */ ++ psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord = ++ RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE); ++ ++ /* init HWPERF data */ ++ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfRIdx = 0; ++ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWIdx = 0; ++ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfWrapCount = 0; ++ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; ++ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfUt = 0; ++ psDevInfo->psRGXFWIfFwSysData->ui32HWPerfDropCount = 0; ++ psDevInfo->psRGXFWIfFwSysData->ui32FirstDropOrdinal = 0; ++ psDevInfo->psRGXFWIfFwSysData->ui32LastDropOrdinal = 0; ++ ++ /*Send through the BVNC Feature Flags*/ ++ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psFwSysInitScratch->sBvncKmFeatureFlags); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail); ++ ++ /* populate the real FwOsInit structure with the values stored in the scratch copy */ ++ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfSysInit, psFwSysInitScratch, sizeof(RGXFWIF_SYSINIT)); ++ } ++ ++ OSFreeMem(psFwSysInitScratch); ++ ++ return PVRSRV_OK; ++ ++fail: ++ if (psFwSysInitScratch) ++ { ++ OSFreeMem(psFwSysInitScratch); ++ } ++ ++ RGXFreeFwSysData(psDevInfo); ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXSetupFwOsData ++ ++ @Description Sets up all os-specific firmware related data ++ ++ @Input psDevInfo ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXSetupFwOsData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32KCCBSizeLog2, ++ IMG_UINT32 ui32HWRDebugDumpLimit, ++ IMG_UINT32 ui32FwOsCfgFlags) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_OSINIT sFwOsInitScratch; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSCachedMemSet(&sFwOsInitScratch, 0, sizeof(RGXFWIF_OSINIT)); ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = RGXSetupFwGuardPage(psDevInfo); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware heap guard pages", fail); ++ } ++ ++ /* Memory tracking the connection state should be non-volatile and ++ * is not cleared on allocation to prevent loss of pre-reset information */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS & ++ ~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, ++ sizeof(RGXFWIF_CONNECTION_CTL), ++ "FwConnectionCtl", ++ &psDevInfo->psRGXFWIfConnectionCtlMemDesc, ++ NULL, ++ (void**) &psDevInfo->psRGXFWIfConnectionCtl, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Connection Control structure allocation", fail); ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED), ++ sizeof(RGXFWIF_OSINIT), ++ "FwOsInitStructure", ++ &psDevInfo->psRGXFWIfOsInitMemDesc, ++ NULL, ++ (void**) &psDevInfo->psRGXFWIfOsInit, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware Os Init structure allocation", fail); ++ ++ /* init HWR frame info */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, ++ sizeof(RGXFWIF_HWRINFOBUF), ++ "FwHWRInfoBuffer", ++ &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc, ++ &sFwOsInitScratch.sRGXFWIfHWRInfoBufCtl, ++ (void**) &psDevInfo->psRGXFWIfHWRInfoBufCtl, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "HWR Info Buffer allocation", fail); ++ ++ /* Might be uncached. Be conservative and use a DeviceMemSet */ ++ OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBufCtl, 0, sizeof(RGXFWIF_HWRINFOBUF)); ++ ++ /* Allocate a sync for power management */ ++ eError = SyncPrimContextCreate(psDevInfo->psDeviceNode, ++ &psDevInfo->hSyncPrimContext); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive context allocation", fail); ++ ++ eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Sync primitive allocation", fail); ++ ++ /* Set up kernel CCB */ ++ eError = RGXSetupCCB(psDevInfo, ++ &psDevInfo->psKernelCCBCtl, ++ &psDevInfo->psKernelCCBCtlMemDesc, ++ &psDevInfo->psKernelCCB, ++ &psDevInfo->psKernelCCBMemDesc, ++ &sFwOsInitScratch.psKernelCCBCtl, ++ &sFwOsInitScratch.psKernelCCB, ++ ui32KCCBSizeLog2, ++ sizeof(RGXFWIF_KCCB_CMD), ++ (RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)), ++ "FwKernelCCB"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB allocation", fail); ++ ++ /* KCCB additionally uses a return slot array for FW to be able to send back ++ * return codes for each required command ++ */ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, ++ (1U << ui32KCCBSizeLog2) * sizeof(IMG_UINT32), ++ "FwKernelCCBRtnSlots", ++ &psDevInfo->psKernelCCBRtnSlotsMemDesc, ++ &sFwOsInitScratch.psKernelCCBRtnSlots, ++ (void**) &psDevInfo->pui32KernelCCBRtnSlots, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Kernel CCB return slot array allocation", fail); ++ ++ /* Set up firmware CCB */ ++ eError = RGXSetupCCB(psDevInfo, ++ &psDevInfo->psFirmwareCCBCtl, ++ &psDevInfo->psFirmwareCCBCtlMemDesc, ++ &psDevInfo->psFirmwareCCB, ++ &psDevInfo->psFirmwareCCBMemDesc, ++ &sFwOsInitScratch.psFirmwareCCBCtl, ++ &sFwOsInitScratch.psFirmwareCCB, ++ RGXFWIF_FWCCB_NUMCMDS_LOG2, ++ sizeof(RGXFWIF_FWCCB_CMD), ++ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, ++ "FwCCB"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Firmware CCB allocation", fail); ++ ++ eError = RGXSetupFwAllocation(psDevInfo, ++ RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS, ++ sizeof(RGXFWIF_OSDATA), ++ "FwOsData", ++ &psDevInfo->psRGXFWIfFwOsDataMemDesc, ++ &sFwOsInitScratch.sFwOsData, ++ (void**) &psDevInfo->psRGXFWIfFwOsData, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetupFwAllocation", fail); ++ ++ psDevInfo->psRGXFWIfFwOsData->ui32FwOsConfigFlags = ui32FwOsCfgFlags & RGXFWIF_INICFG_OS_ALL; ++ ++ eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psDevInfo->psRGXFWIfFwOsData->sPowerSync.ui32Addr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Get Sync Prim FW address", fail); ++ ++ /* flush write buffers for psRGXFWIfFwOsData */ ++ OSWriteMemoryBarrier(psDevInfo->psRGXFWIfFwOsData); ++ ++ sFwOsInitScratch.ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit; ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Set up Workload Estimation firmware CCB */ ++ eError = RGXSetupCCB(psDevInfo, ++ &psDevInfo->psWorkEstFirmwareCCBCtl, ++ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, ++ &psDevInfo->psWorkEstFirmwareCCB, ++ &psDevInfo->psWorkEstFirmwareCCBMemDesc, ++ &sFwOsInitScratch.psWorkEstFirmwareCCBCtl, ++ &sFwOsInitScratch.psWorkEstFirmwareCCB, ++ RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2, ++ sizeof(RGXFWIF_WORKEST_FWCCB_CMD), ++ RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS, ++ "FwWEstCCB"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Workload Estimation Firmware CCB allocation", fail); ++#endif /* defined(SUPPORT_WORKLOAD_ESTIMATION) */ ++ ++ /* Initialise the compatibility check data */ ++ RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sFWBVNC); ++ RGXFWIF_COMPCHECKS_BVNC_INIT(sFwOsInitScratch.sRGXCompChecks.sHWBVNC); ++ ++ /* populate the real FwOsInit structure with the values stored in the scratch copy */ ++ OSCachedMemCopyWMB(psDevInfo->psRGXFWIfOsInit, &sFwOsInitScratch, sizeof(RGXFWIF_OSINIT)); ++ ++ return PVRSRV_OK; ++ ++fail: ++ RGXFreeFwOsData(psDevInfo); ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXSetupFirmware ++ ++ @Description Sets up all firmware related data ++ ++ @Input psDevInfo ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bEnableSignatureChecks, ++ IMG_UINT32 ui32SignatureChecksBufSize, ++ IMG_UINT32 ui32HWPerfFWBufSizeKB, ++ IMG_UINT64 ui64HWPerfFilter, ++ IMG_UINT32 ui32ConfigFlags, ++ IMG_UINT32 ui32ConfigFlagsExt, ++ IMG_UINT32 ui32FwOsCfgFlags, ++ IMG_UINT32 ui32LogType, ++ IMG_UINT32 ui32FilterFlags, ++ IMG_UINT32 ui32JonesDisableMask, ++ IMG_UINT32 ui32HWRDebugDumpLimit, ++ IMG_UINT32 ui32HWPerfCountersDataSize, ++ IMG_UINT32 *pui32TPUTrilinearFracMask, ++ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, ++ FW_PERF_CONF eFirmwarePerf, ++ IMG_UINT32 ui32KCCBSizeLog2) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ eError = RGXSetupFwOsData(psDeviceNode, ++ ui32KCCBSizeLog2, ++ ui32HWRDebugDumpLimit, ++ ui32FwOsCfgFlags); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware os data", fail); ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ /* Guest drivers do not configure system-wide firmware data */ ++ psDevInfo->psRGXFWIfSysInit = NULL; ++ } ++ else ++ { ++ /* Native and Host drivers must initialise the firmware's system data */ ++ eError = RGXSetupFwSysData(psDeviceNode, ++ bEnableSignatureChecks, ++ ui32SignatureChecksBufSize, ++ ui32HWPerfFWBufSizeKB, ++ ui64HWPerfFilter, ++ ui32ConfigFlags, ++ ui32ConfigFlagsExt, ++ ui32LogType, ++ ui32FilterFlags, ++ ui32JonesDisableMask, ++ ui32HWPerfCountersDataSize, ++ pui32TPUTrilinearFracMask, ++ eRGXRDPowerIslandConf, ++ eFirmwarePerf); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Setting up firmware system data", fail); ++ } ++ ++ psDevInfo->bFirmwareInitialised = IMG_TRUE; ++ ++#if defined(PDUMP) ++ RGXPDumpLoadFWInitData(psDevInfo, ++ ui32HWPerfCountersDataSize, ++ bEnableSignatureChecks); ++#endif /* PDUMP */ ++ ++fail: ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXFreeFwSysData ++ ++ @Description Frees all system-wide firmware related data ++ ++ @Input psDevInfo ++******************************************************************************/ ++static void RGXFreeFwSysData(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ psDevInfo->bFirmwareInitialised = IMG_FALSE; ++ ++ if (psDevInfo->psRGXFWAlignChecksMemDesc) ++ { ++ RGXFWFreeAlignChecks(psDevInfo); ++ } ++ ++#if defined(PDUMP) ++#if defined(RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TDM_PDS_CHECKSUM) && ++ psDevInfo->psRGXFWSigTDM2DChecksMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTDM2DChecksMemDesc); ++ psDevInfo->psRGXFWSigTDM2DChecksMemDesc = NULL; ++ } ++#endif ++ ++ if (psDevInfo->psRGXFWSigTAChecksMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc); ++ psDevInfo->psRGXFWSigTAChecksMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWSig3DChecksMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc); ++ psDevInfo->psRGXFWSig3DChecksMemDesc = NULL; ++ } ++#endif ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++ if (psDevInfo->psCounterBufferMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCounterBufferMemDesc); ++ psDevInfo->psCounterBufferMemDesc = NULL; ++ } ++#endif ++ ++#if defined(SUPPORT_FIRMWARE_GCOV) ++ if (psDevInfo->psFirmwareGcovBufferMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psFirmwareGcovBufferMemDesc); ++ psDevInfo->psFirmwareGcovBufferMemDesc = NULL; ++ } ++#endif ++ ++ RGXSetupFaultReadRegisterRollback(psDevInfo); ++ ++ if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc) ++ { ++ if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); ++ psDevInfo->psRGXFWIfGpuUtilFWCb = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc); ++ psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc) ++ { ++ if (psDevInfo->psRGXFWIfRuntimeCfg != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc); ++ psDevInfo->psRGXFWIfRuntimeCfg = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc); ++ psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) ++ { ++ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc) ++ { ++ if (psDevInfo->psRGXFWIfTraceBufCtl != NULL) ++ { ++ /* first deinit/free the tracebuffer allocation */ ++ RGXTraceBufferDeinit(psDevInfo); ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc); ++ psDevInfo->psRGXFWIfTraceBufCtl = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc); ++ psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfFwSysDataMemDesc) ++ { ++ if (psDevInfo->psRGXFWIfFwSysData != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwSysDataMemDesc); ++ psDevInfo->psRGXFWIfFwSysData = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwSysDataMemDesc); ++ psDevInfo->psRGXFWIfFwSysDataMemDesc = NULL; ++ } ++ ++#if defined(SUPPORT_TBI_INTERFACE) ++ if (psDevInfo->psRGXFWIfTBIBufferMemDesc) ++ { ++ RGXTBIBufferDeinit(psDevInfo); ++ } ++#endif ++ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ if (psDevInfo->psRGXFWIfRegCfgMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc); ++ psDevInfo->psRGXFWIfRegCfgMemDesc = NULL; ++ } ++#endif ++ if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); ++ psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL; ++ } ++ ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ if (psDevInfo->psRGXFWIfNonSecureBufMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfNonSecureBufMemDesc); ++ psDevInfo->psRGXFWIfNonSecureBufMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfSecureBufMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSecureBufMemDesc); ++ psDevInfo->psRGXFWIfSecureBufMemDesc = NULL; ++ } ++#endif ++ ++#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) ++ { ++ _FreeSLC3Fence(psDevInfo); ++ } ++#endif ++#if defined(SUPPORT_PDVFS) ++ if (psDevInfo->psRGXFWIFCoreClkRateMemDesc) ++ { ++ if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc); ++ psDevInfo->pui32RGXFWIFCoreClkRate = NULL; ++ } ++ ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc); ++ psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL; ++ } ++#endif ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXFreeFwOsData ++ ++ @Description Frees all os-specific firmware related data ++ ++ @Input psDevInfo ++******************************************************************************/ ++static void RGXFreeFwOsData(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFreeCCBReturnSlots(psDevInfo, ++ &psDevInfo->pui32KernelCCBRtnSlots, ++ &psDevInfo->psKernelCCBRtnSlotsMemDesc); ++ RGXFreeCCB(psDevInfo, ++ &psDevInfo->psKernelCCBCtl, ++ &psDevInfo->psKernelCCBCtlMemDesc, ++ &psDevInfo->psKernelCCB, ++ &psDevInfo->psKernelCCBMemDesc); ++ ++ RGXFreeCCB(psDevInfo, ++ &psDevInfo->psFirmwareCCBCtl, ++ &psDevInfo->psFirmwareCCBCtlMemDesc, ++ &psDevInfo->psFirmwareCCB, ++ &psDevInfo->psFirmwareCCBMemDesc); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFreeCCB(psDevInfo, ++ &psDevInfo->psWorkEstFirmwareCCBCtl, ++ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc, ++ &psDevInfo->psWorkEstFirmwareCCB, ++ &psDevInfo->psWorkEstFirmwareCCBMemDesc); ++#endif ++ ++ if (psDevInfo->psPowSyncPrim != NULL) ++ { ++ SyncPrimFree(psDevInfo->psPowSyncPrim); ++ psDevInfo->psPowSyncPrim = NULL; ++ } ++ ++ if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL) ++ { ++ SyncPrimContextDestroy(psDevInfo->hSyncPrimContext); ++ psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc) ++ { ++ if (psDevInfo->psRGXFWIfHWRInfoBufCtl != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); ++ psDevInfo->psRGXFWIfHWRInfoBufCtl = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc); ++ psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfFwOsDataMemDesc) ++ { ++ if (psDevInfo->psRGXFWIfFwOsData != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfFwOsDataMemDesc); ++ psDevInfo->psRGXFWIfFwOsData = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfFwOsDataMemDesc); ++ psDevInfo->psRGXFWIfFwOsDataMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psCompletedMemDesc) ++ { ++ if (psDevInfo->pui32CompletedById) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc); ++ psDevInfo->pui32CompletedById = NULL; ++ } ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psCompletedMemDesc); ++ psDevInfo->psCompletedMemDesc = NULL; ++ } ++ if (psDevInfo->psEndTimeMemDesc) ++ { ++ if (psDevInfo->pui64EndTimeById) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc); ++ psDevInfo->pui64EndTimeById = NULL; ++ } ++ ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psEndTimeMemDesc); ++ psDevInfo->psEndTimeMemDesc = NULL; ++ } ++ if (psDevInfo->psStartTimeMemDesc) ++ { ++ if (psDevInfo->pui64StartTimeById) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc); ++ psDevInfo->pui64StartTimeById = NULL; ++ } ++ ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psStartTimeMemDesc); ++ psDevInfo->psStartTimeMemDesc = NULL; ++ } ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ if (psDevInfo->hTimerQueryLock) ++ { ++ OSLockDestroy(psDevInfo->hTimerQueryLock); ++ psDevInfo->hTimerQueryLock = NULL; ++ } ++#endif ++ ++ if (psDevInfo->psRGXFWHeapGuardPageReserveMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWHeapGuardPageReserveMemDesc); ++ } ++} ++ ++/*! ++******************************************************************************* ++ @Function RGXFreeFirmware ++ ++ @Description Frees all the firmware-related allocations ++ ++ @Input psDevInfo ++******************************************************************************/ ++void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFreeFwOsData(psDevInfo); ++ ++ if (psDevInfo->psRGXFWIfConnectionCtl) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfConnectionCtlMemDesc); ++ psDevInfo->psRGXFWIfConnectionCtl = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfConnectionCtlMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfConnectionCtlMemDesc); ++ psDevInfo->psRGXFWIfConnectionCtlMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfOsInit) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOsInitMemDesc); ++ psDevInfo->psRGXFWIfOsInit = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfOsInitMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfOsInitMemDesc); ++ psDevInfo->psRGXFWIfOsInitMemDesc = NULL; ++ } ++ ++ RGXFreeFwSysData(psDevInfo); ++ if (psDevInfo->psRGXFWIfSysInit) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfSysInitMemDesc); ++ psDevInfo->psRGXFWIfSysInit = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfSysInitMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfSysInitMemDesc); ++ psDevInfo->psRGXFWIfSysInitMemDesc = NULL; ++ } ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXAcquireKernelCCBSlot ++ ++ PURPOSE : Attempts to obtain a slot in the Kernel CCB ++ ++ PARAMETERS : psCCB - the CCB ++ : Address of space if available, NULL otherwise ++ ++ RETURNS : PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXAcquireKernelCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ const RGXFWIF_CCB_CTL *psKCCBCtl, ++ IMG_UINT32 *pui32Offset) ++{ ++ IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; ++#if defined(PDUMP) ++ const DEVMEM_MEMDESC *psKCCBCtrlMemDesc = psDevInfo->psKernelCCBCtlMemDesc; ++#endif ++ ++ ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; ++ ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; ++ ++#if defined(PDUMP) ++ /* Wait for sufficient CCB space to become available */ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, 0, ++ "Wait for kCCB woff=%u", ui32NextWriteOffset); ++ DevmemPDumpCBP(psKCCBCtrlMemDesc, ++ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), ++ ui32NextWriteOffset, ++ 1, ++ (psKCCBCtl->ui32WrapMask + 1)); ++#endif ++ ++ if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset) ++ { ++ return PVRSRV_ERROR_KERNEL_CCB_FULL; ++ } ++ *pui32Offset = ui32NextWriteOffset; ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXPollKernelCCBSlot ++ ++ PURPOSE : Poll for space in Kernel CCB ++ ++ PARAMETERS : psCCB - the CCB ++ : Address of space if available, NULL otherwise ++ ++ RETURNS : PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXPollKernelCCBSlot(const DEVMEM_MEMDESC *psKCCBCtrlMemDesc, ++ const RGXFWIF_CCB_CTL *psKCCBCtl) ++{ ++ IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset; ++ ++ ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; ++ ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask; ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ ++ if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* ++ * The following check doesn't impact performance, since the ++ * CPU has to wait for the GPU anyway (full kernel CCB). ++ */ ++ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ return PVRSRV_ERROR_KERNEL_CCB_FULL; ++ } ++ ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ return PVRSRV_ERROR_KERNEL_CCB_FULL; ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXGetCmdMemCopySize ++ ++ PURPOSE : Calculates actual size of KCCB command getting used ++ ++ PARAMETERS : eCmdType Type of KCCB command ++ ++ RETURNS : Returns actual size of KCCB command on success else zero ++******************************************************************************/ ++static IMG_UINT32 RGXGetCmdMemCopySize(RGXFWIF_KCCB_CMD_TYPE eCmdType) ++{ ++ /* First get offset of uCmdData inside the struct RGXFWIF_KCCB_CMD ++ * This will account alignment requirement of uCmdData union ++ * ++ * Then add command-data size depending on command type to calculate actual ++ * command size required to do mem copy ++ * ++ * NOTE: Make sure that uCmdData is the last member of RGXFWIF_KCCB_CMD struct. ++ */ ++ switch (eCmdType) ++ { ++ case RGXFWIF_KCCB_CMD_KICK: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_KICK_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_MMUCACHE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_MMUCACHEDATA); ++ } ++#if defined(SUPPORT_USC_BREAKPOINT) ++ case RGXFWIF_KCCB_CMD_BP: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_BPDATA); ++ } ++#endif ++ case RGXFWIF_KCCB_CMD_SLCFLUSHINVAL: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_SLCFLUSHINVALDATA); ++ } ++ case RGXFWIF_KCCB_CMD_CLEANUP: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CLEANUP_REQUEST); ++ } ++ case RGXFWIF_KCCB_CMD_POW: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_POWER_REQUEST); ++ } ++ case RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE: ++ case RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_ZSBUFFER_BACKING_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELIST_GS_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_FREELISTS_RECONSTRUCTION_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_WRITE_OFFSET_UPDATE_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_FORCE_UPDATE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA); ++ } ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ case RGXFWIF_KCCB_CMD_REGCONFIG: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_REGCONFIG_DATA); ++ } ++#endif ++ case RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS); ++ } ++#if defined(SUPPORT_PDVFS) ++ case RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_PDVFS_MAX_FREQ_DATA); ++ } ++#endif ++ case RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_OS_STATE_CHANGE_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_COUNTER_DUMP: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_COUNTER_DUMP_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL); ++ } ++ case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS); ++ } ++ case RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CONFIG_DA_BLKS); ++ } ++ case RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_HWPERF_CTRL_BLKS); ++ } ++ case RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_CORECLKSPEEDCHANGE_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE: ++ case RGXFWIF_KCCB_CMD_WDG_CFG: ++ case RGXFWIF_KCCB_CMD_PHR_CFG: ++ case RGXFWIF_KCCB_CMD_HEALTH_CHECK: ++ case RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE: ++ case RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL: ++ { ++ /* No command specific data */ ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData); ++ } ++#if defined(SUPPORT_VALIDATION) ++ case RGXFWIF_KCCB_CMD_RGXREG: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_RGXREG_DATA); ++ } ++ case RGXFWIF_KCCB_CMD_GPUMAP: ++ { ++ return offsetof(RGXFWIF_KCCB_CMD, uCmdData) + sizeof(RGXFWIF_GPUMAP_DATA); ++ } ++#endif ++ default: ++ { ++ /* Invalid (OR) Unused (OR) Newly added command type */ ++ return 0; /* Error */ ++ } ++ } ++} ++ ++PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32SlotNum, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PVRSRVWaitForValueKM( ++ (IMG_UINT32 __iomem *)&psDevInfo->pui32KernelCCBRtnSlots[ui32SlotNum], ++ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, ++ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVWaitForValueKM"); ++ ++#if defined(PDUMP) ++ /* PDumping conditions same as RGXSendCommandRaw for the actual command and poll command to go in harmony */ ++ if (PDumpCheckFlagsWrite(psDevInfo->psDeviceNode, ui32PDumpFlags)) ++ { ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Poll on KCCB slot %u for value %u (mask: 0x%x)", ui32SlotNum, ++ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED); ++ ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, ++ ui32SlotNum * sizeof(IMG_UINT32), ++ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, ++ RGXFWIF_KCCB_RTN_SLOT_CMD_EXECUTED, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++#endif ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 uiPDumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; ++ IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB; ++ IMG_UINT32 ui32NewWriteOffset; ++ IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset; ++ IMG_UINT32 ui32CmdMemCopySize; ++ ++#if !defined(PDUMP) ++ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags); ++#else ++ IMG_BOOL bContCaptureOn = PDumpCheckFlagsWrite(psDeviceNode, PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); /* client connected or in pdump init phase */ ++ IMG_BOOL bPDumpEnabled = PDumpCheckFlagsWrite(psDeviceNode, uiPDumpFlags); /* Are we in capture range or continuous and not in a power transition */ ++ ++ if (bContCaptureOn) ++ { ++ /* in capture range */ ++ if (bPDumpEnabled) ++ { ++ if (!psDevInfo->bDumpedKCCBCtlAlready) ++ { ++ /* entering capture range */ ++ psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE; ++ ++ /* Wait for the live FW to catch up */ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: waiting on fw to catch-up, roff: %d, woff: %d", ++ __func__, ++ psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset)); ++ PVRSRVPollForValueKM(psDeviceNode, ++ (IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, ++ ui32OldWriteOffset, 0xFFFFFFFF, ++ POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP); ++ ++ /* Dump Init state of Kernel CCB control (read and write offset) */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, ++ "Initial state of kernel CCB Control, roff: %d, woff: %d", ++ psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset); ++ ++ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, ++ 0, ++ sizeof(RGXFWIF_CCB_CTL), ++ uiPDumpFlags); ++ } ++ } ++ } ++#endif ++ ++#if defined(SUPPORT_AUTOVZ) ++ if (!((KM_FW_CONNECTION_IS(READY, psDevInfo) && KM_OS_CONNECTION_IS(READY, psDevInfo)) || ++ (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) && ++ !PVRSRV_VZ_MODE_IS(NATIVE)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: The firmware-driver connection is invalid:" ++ "driver state = %u / firmware state = %u;" ++ "expected READY (%u/%u) or ACTIVE (%u/%u);", ++ __func__, KM_GET_OS_CONNECTION(psDevInfo), KM_GET_FW_CONNECTION(psDevInfo), ++ RGXFW_CONNECTION_OS_READY, RGXFW_CONNECTION_FW_READY, ++ RGXFW_CONNECTION_OS_ACTIVE, RGXFW_CONNECTION_FW_ACTIVE)); ++ eError = PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE; ++ goto _RGXSendCommandRaw_Exit; ++ } ++#endif ++ ++ PVR_ASSERT(sizeof(RGXFWIF_KCCB_CMD) == psKCCBCtl->ui32CmdSize); ++ if (!OSLockIsLocked(psDeviceNode->hPowerLock)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s called without power lock held!", ++ __func__)); ++ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); ++ } ++ ++ /* Acquire a slot in the CCB */ ++ eError = RGXAcquireKernelCCBSlot(psDevInfo, psKCCBCtl, &ui32NewWriteOffset); ++ if (eError != PVRSRV_OK) ++ { ++ goto _RGXSendCommandRaw_Exit; ++ } ++ ++ /* Calculate actual size of command to optimize device mem copy */ ++ ui32CmdMemCopySize = RGXGetCmdMemCopySize(psKCCBCmd->eCmdType); ++ PVR_LOG_RETURN_IF_FALSE(ui32CmdMemCopySize !=0, "RGXGetCmdMemCopySize failed", PVRSRV_ERROR_INVALID_CCB_COMMAND); ++ ++ /* Copy the command into the CCB */ ++ OSCachedMemCopyWMB(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize], ++ psKCCBCmd, ui32CmdMemCopySize); ++ ++ /* If non-NULL pui32CmdKCCBSlot passed-in, return the kCCB slot in which the command was enqueued */ ++ if (pui32CmdKCCBSlot) ++ { ++ *pui32CmdKCCBSlot = ui32OldWriteOffset; ++ ++ /* Each such command enqueue needs to reset the slot value first. This is so that a caller ++ * doesn't get to see stale/false value in allotted slot */ ++ OSWriteDeviceMem32WithWMB(&psDevInfo->pui32KernelCCBRtnSlots[ui32OldWriteOffset], ++ RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE); ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, ++ "Reset kCCB slot number %u", ui32OldWriteOffset); ++ DevmemPDumpLoadMem(psDevInfo->psKernelCCBRtnSlotsMemDesc, ++ ui32OldWriteOffset * sizeof(IMG_UINT32), ++ sizeof(IMG_UINT32), ++ uiPDumpFlags); ++#endif ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Device (%p) KCCB slot %u reset with value %u for command type %x", ++ __func__, psDevInfo, ui32OldWriteOffset, RGXFWIF_KCCB_RTN_SLOT_NO_RESPONSE, psKCCBCmd->eCmdType)); ++ } ++ ++ /* Move past the current command */ ++ psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset; ++ ++ OSWriteMemoryBarrier(&psKCCBCtl->ui32WriteOffset); ++ ++#if defined(PDUMP) ++ if (bContCaptureOn) ++ { ++ /* in capture range */ ++ if (bPDumpEnabled) ++ { ++ /* Dump new Kernel CCB content */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ++ uiPDumpFlags, "Dump kCCB cmd woff = %d", ++ ui32OldWriteOffset); ++ DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc, ++ ui32OldWriteOffset * psKCCBCtl->ui32CmdSize, ++ ui32CmdMemCopySize, ++ uiPDumpFlags); ++ ++ /* Dump new kernel CCB write offset */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ++ uiPDumpFlags, "Dump kCCBCtl woff: %d", ++ ui32NewWriteOffset); ++ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc, ++ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), ++ sizeof(IMG_UINT32), ++ uiPDumpFlags); ++ ++ /* mimic the read-back of the write from above */ ++ DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, ++ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset), ++ ui32NewWriteOffset, ++ 0xFFFFFFFF, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ uiPDumpFlags); ++ } ++ /* out of capture range */ ++ else ++ { ++ eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXPdumpDrainKCCB", _RGXSendCommandRaw_Exit); ++ } ++ } ++#endif ++ ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, uiPDumpFlags, "MTS kick for kernel CCB"); ++ /* ++ * Kick the MTS to schedule the firmware. ++ */ ++ __MTSScheduleWrite(psDevInfo, RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK); ++ ++ PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, ++ RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, uiPDumpFlags); ++ ++#if defined(SUPPORT_AUTOVZ) ++ RGXUpdateAutoVzWdgToken(psDevInfo); ++#endif ++ ++#if defined(NO_HARDWARE) ++ /* keep the roff updated because fw isn't there to update it */ ++ psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset; ++#endif ++ ++_RGXSendCommandRaw_Exit: ++ return eError; ++} ++ ++/****************************************************************************** ++ FUNCTION : _AllocDeferredCommand ++ ++ PURPOSE : Allocate a KCCB command and add it to KCCB deferred list ++ ++ PARAMETERS : psDevInfo RGX device info ++ : eKCCBType Firmware Command type ++ : psKCCBCmd Firmware Command ++ : uiPDumpFlags Pdump flags ++ ++ RETURNS : PVRSRV_OK If all went good, PVRSRV_ERROR_RETRY otherwise. ++******************************************************************************/ ++static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 uiPDumpFlags) ++{ ++ RGX_DEFERRED_KCCB_CMD *psDeferredCommand; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand)); ++ ++ if (!psDeferredCommand) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Deferring a KCCB command failed: allocation failure: requesting retry")); ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ psDeferredCommand->sKCCBcmd = *psKCCBCmd; ++ psDeferredCommand->uiPDumpFlags = uiPDumpFlags; ++ psDeferredCommand->psDevInfo = psDevInfo; ++ ++ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode)); ++ psDevInfo->ui32KCCBDeferredCommandsCount++; ++ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ FUNCTION : _FreeDeferredCommand ++ ++ PURPOSE : Remove from the deferred list the sent deferred KCCB command ++ ++ PARAMETERS : psNode Node in deferred list ++ : psDeferredKCCBCmd KCCB Command to free ++ ++ RETURNS : None ++******************************************************************************/ ++static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd) ++{ ++ dllist_remove_node(psNode); ++ psDeferredKCCBCmd->psDevInfo->ui32KCCBDeferredCommandsCount--; ++ OSFreeMem(psDeferredKCCBCmd); ++} ++ ++/****************************************************************************** ++ FUNCTION : RGXSendCommandsFromDeferredList ++ ++ PURPOSE : Try send KCCB commands in deferred list to KCCB ++ Should be called by holding PowerLock ++ ++ PARAMETERS : psDevInfo RGX device info ++ : bPoll Poll for space in KCCB ++ ++ RETURNS : PVRSRV_OK If all commands in deferred list are sent to KCCB, ++ PVRSRV_ERROR_KERNEL_CCB_FULL otherwise. ++******************************************************************************/ ++PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ DLLIST_NODE *psNode, *psNext; ++ RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd; ++ DLLIST_NODE sCommandList; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ PVR_ASSERT(PVRSRVPwrLockIsLockedByMe(psDevInfo->psDeviceNode)); ++ ++ /* !!! Important !!! ++ * ++ * The idea of moving the whole list hLockKCCBDeferredCommandsList below ++ * to the temporary list is only valid under the principle that all of the ++ * operations are also protected by the power lock. It must be held ++ * so that the order of the commands doesn't get messed up while we're ++ * performing the operations on the local list. ++ * ++ * The necessity of releasing the hLockKCCBDeferredCommandsList comes from ++ * the fact that _FreeDeferredCommand() is allocating memory and it can't ++ * be done in atomic context (inside section protected by a spin lock). ++ * ++ * We're using spin lock here instead of mutex to quickly perform a check ++ * if the list is empty in MISR without a risk that the MISR is going ++ * to sleep due to a lock. ++ */ ++ ++ /* move the whole list to a local list so it can be processed without lock */ ++ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ dllist_replace_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); ++ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ if (dllist_is_empty(&sCommandList)) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* For every deferred KCCB command, try to send it*/ ++ dllist_foreach_node(&sCommandList, psNode, psNext) ++ { ++ psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode); ++ eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo, ++ &psTempDeferredKCCBCmd->sKCCBcmd, ++ psTempDeferredKCCBCmd->uiPDumpFlags, ++ NULL /* We surely aren't interested in kCCB slot number of deferred command */); ++ if (eError != PVRSRV_OK) ++ { ++ if (!bPoll) ++ { ++ eError = PVRSRV_ERROR_KERNEL_CCB_FULL; ++ goto cleanup_; ++ } ++ break; ++ } ++ ++ _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd); ++ } ++ ++ if (bPoll) ++ { ++ PVRSRV_ERROR eErrPollForKCCBSlot; ++ ++ /* Don't overwrite eError because if RGXPollKernelCCBSlot returns OK and the ++ * outer loop times-out, we'll still want to return KCCB_FULL to caller ++ */ ++ eErrPollForKCCBSlot = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, ++ psDevInfo->psKernelCCBCtl); ++ if (eErrPollForKCCBSlot == PVRSRV_ERROR_KERNEL_CCB_FULL) ++ { ++ eError = PVRSRV_ERROR_KERNEL_CCB_FULL; ++ goto cleanup_; ++ } ++ } ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++cleanup_: ++ /* if the local list is not empty put it back to the deferred list head ++ * so that the old order of commands is retained */ ++ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ dllist_insert_list_at_head(&psDevInfo->sKCCBDeferredCommandsListHead, &sCommandList); ++ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 uiPDumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot) ++{ ++ IMG_BOOL bPoll = (pui32CmdKCCBSlot != NULL); ++ PVRSRV_ERROR eError; ++ ++ /* ++ * First try to Flush all the cmds in deferred list. ++ * ++ * We cannot defer an incoming command if the caller is interested in ++ * knowing the command's kCCB slot: it plans to poll/wait for a ++ * response from the FW just after the command is enqueued, so we must ++ * poll for space to be available. ++ */ ++ eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll); ++ if (eError == PVRSRV_OK) ++ { ++ eError = RGXSendCommandRaw(psDevInfo, ++ psKCCBCmd, ++ uiPDumpFlags, ++ pui32CmdKCCBSlot); ++ } ++ ++ /* ++ * If we don't manage to enqueue one of the deferred commands or the command ++ * passed as argument because the KCCB is full, insert the latter into the deferred commands list. ++ * The deferred commands will also be flushed eventually by: ++ * - one more KCCB command sent for any DM ++ * - RGX_MISRHandler_CheckFWActivePowerState ++ */ ++ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL) ++ { ++ if (pui32CmdKCCBSlot == NULL) ++ { ++ eError = _AllocDeferredCommand(psDevInfo, psKCCBCmd, uiPDumpFlags); ++ } ++ else ++ { ++ /* Let the caller retry. Otherwise if we deferred the command and returned OK, ++ * the caller can end up looking in a stale CCB slot. ++ */ ++ PVR_DPF((PVR_DBG_WARNING, "%s: Couldn't flush the deferred queue for a command (Type:%d) " ++ "- will be retried", __func__, psKCCBCmd->eCmdType)); ++ } ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ ++ /* Ensure Rogue is powered up before kicking MTS */ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: failed to acquire powerlock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ ++ goto _PVRSRVPowerLock_Exit; ++ } ++ ++ PDUMPPOWCMDSTART(psDeviceNode); ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_ON, ++ PVRSRV_POWER_FLAGS_NONE); ++ PDUMPPOWCMDEND(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ ++ goto _PVRSRVSetDevicePowerStateKM_Exit; ++ } ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ psKCCBCmd, ++ ui32PDumpFlags, ++ pui32CmdKCCBSlot); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to schedule command (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++#if defined(DEBUG) ++ /* PVRSRVDebugRequest must be called without powerlock */ ++ PVRSRVPowerUnlock(psDeviceNode); ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ goto _PVRSRVPowerLock_Exit; ++#endif ++ } ++ ++_PVRSRVSetDevicePowerStateKM_Exit: ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++_PVRSRVPowerLock_Exit: ++ return eError; ++} ++ ++void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSScheduleMISR(psDevInfo->hProcessQueuesMISR); ++} ++ ++#if defined(SUPPORT_VALIDATION) ++PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 ui64RegVal, ++ IMG_UINT64 ui64Size, ++ IMG_UINT32 ui32Offset, ++ IMG_BOOL bWriteOp) ++{ ++ RGXFWIF_KCCB_CMD sRgxRegsCmd = {0}; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_ERROR eError; ++ ++ sRgxRegsCmd.eCmdType = RGXFWIF_KCCB_CMD_RGXREG; ++ sRgxRegsCmd.uCmdData.sFwRgxData.ui64RegVal = ui64RegVal; ++ sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegWidth = ui64Size; ++ sRgxRegsCmd.uCmdData.sFwRgxData.ui32RegAddr = ui32Offset; ++ sRgxRegsCmd.uCmdData.sFwRgxData.bWriteOp = bWriteOp; ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sRgxRegsCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); ++ ++ if (bWriteOp) ++ { ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ++ ui32kCCBCommandSlot, ++ PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); ++ } ++ ++ return eError; ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGX_MISRHandler_ScheduleProcessQueues ++ ++ @Description - Sends uncounted kick to all the DMs (the FW will process all ++ the queue for all the DMs) ++******************************************************************************/ ++static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ return; ++ } ++ ++ /* Check whether it's worth waking up the GPU */ ++ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST) && ++ (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF)) ++ { ++ /* For now, guest drivers will always wake-up the GPU */ ++ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ IMG_BOOL bGPUHasWorkWaiting; ++ ++ bGPUHasWorkWaiting = ++ (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED); ++ ++ if (!bGPUHasWorkWaiting) ++ { ++ /* all queues are empty, don't wake up the GPU */ ++ PVRSRVPowerUnlock(psDeviceNode); ++ return; ++ } ++ } ++ ++ PDUMPPOWCMDSTART(psDeviceNode); ++ /* wake up the GPU */ ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_ON, ++ PVRSRV_POWER_FLAGS_NONE); ++ PDUMPPOWCMDEND(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition Rogue to ON (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ return; ++ } ++ ++ /* uncounted kick to the FW */ ++ HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED); ++ __MTSScheduleWrite(psDevInfo, (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED); ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++} ++ ++PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ return OSInstallMISR(phMISR, ++ RGX_MISRHandler_ScheduleProcessQueues, ++ psDeviceNode, ++ "RGX_ScheduleProcessQueues"); ++} ++ ++PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_DM eKCCBType, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 uiMMUSyncUpdate; ++ ++ /* Don't send the command/power up request if the device is de-initialising. ++ * The de-init thread could destroy the device whilst the power up ++ * sequence below is accessing the HW registers. ++ */ ++ if (unlikely((psDevInfo == NULL) || ++ (psDevInfo->psDeviceNode == NULL) || ++ (psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) ++ { ++ return PVRSRV_ERROR_INVALID_DEVICE; ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ /* For validation, force the core to different dust count states with each kick */ ++ if ((eKCCBType == RGXFWIF_DM_GEOM) || (eKCCBType == RGXFWIF_DM_CDM)) ++ { ++ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN) ++ { ++ IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount); ++ PVRSRVDeviceGPUUnitsPowerChange(psDevInfo->psDeviceNode, ui32NumDusts); ++ } ++ } ++ ++ if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE) ++ { ++ if (psDevInfo->ui32ECCRAMErrInjInterval > 0U) ++ { ++ --psDevInfo->ui32ECCRAMErrInjInterval; ++ } ++ else ++ { ++ IMG_UINT64 ui64ECCRegVal = 0U; ++ ++ psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL; ++ ++ if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_SLC) ++ { ++ PVR_LOG(("ECC RAM Error Inject SLC")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SLC_SIDEKICK_EN; ++ } ++ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_USC) ++ { ++ PVR_LOG(("ECC RAM Error Inject USC")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN; ++ } ++ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_TPU) ++ { ++#if defined(RGX_FEATURE_MAX_TPU_PER_SPU) ++ PVR_LOG(("ECC RAM Error Inject Swift TPU")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_SWIFT_EN; ++#else ++ PVR_LOG(("ECC RAM Error Inject TPU MCU L0")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_TPU_MCU_L0_EN; ++#endif ++ } ++ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_RASCAL) ++ { ++#if defined(RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN) ++ PVR_LOG(("ECC RAM Error Inject RASCAL")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_RASCAL_EN; ++#else ++ PVR_LOG(("ECC RAM Error Inject USC")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_USC_EN; ++#endif ++ } ++ else if (psDevInfo->ui32ECCRAMErrInjModule == RGXKM_ECC_ERR_INJ_MARS) ++ { ++ PVR_LOG(("ECC RAM Error Inject MARS")); ++ ui64ECCRegVal = RGX_CR_ECC_RAM_ERR_INJ_MARS_EN; ++ } ++ else ++ { ++ } ++ ++ OSWriteMemoryBarrier(NULL); ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal); ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Write reg ECC_RAM_ERR_INJ"); ++ PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_ECC_RAM_ERR_INJ, ui64ECCRegVal, PDUMP_FLAGS_CONTINUOUS); ++ OSWriteMemoryBarrier(NULL); ++ } ++ } ++#endif ++ ++ /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful ++ in a scenario with several applications allocating resources. */ ++ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ ++ /* If system is found powered OFF, Retry scheduling the command */ ++ if (likely(eError == PVRSRV_ERROR_SYSTEM_STATE_POWERED_OFF)) ++ { ++ eError = PVRSRV_ERROR_RETRY; ++ } ++ ++ goto RGXScheduleCommand_exit; ++ } ++ ++ if (unlikely(psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT)) ++ { ++ /* If we have the power lock the device is valid but the deinit ++ * thread could be waiting for the lock. */ ++ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); ++ return PVRSRV_ERROR_INVALID_DEVICE; ++ } ++ ++ /* Ensure device is powered up before sending any commands */ ++ PDUMPPOWCMDSTART(psDevInfo->psDeviceNode); ++ eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_ON, ++ PVRSRV_POWER_FLAGS_NONE); ++ PDUMPPOWCMDEND(psDevInfo->psDeviceNode); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto _PVRSRVSetDevicePowerStateKM_Exit; ++ } ++ ++ eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate); ++ if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, pui32CmdKCCBSlot); ++ if (unlikely(eError != PVRSRV_OK)) goto _PVRSRVSetDevicePowerStateKM_Exit; ++ ++_PVRSRVSetDevicePowerStateKM_Exit: ++ PVRSRVPowerUnlock(psDevInfo->psDeviceNode); ++ ++RGXScheduleCommand_exit: ++ return eError; ++} ++ ++/* ++ * RGXCheckFirmwareCCB ++ */ ++void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl; ++ IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB; ++ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) ++ PVR_LOG_RETURN_VOID_IF_FALSE(PVRSRV_VZ_MODE_IS(NATIVE) || ++ (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && ++ KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)), ++ "FW-KM connection is down"); ++#endif ++ ++ while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset) ++ { ++ /* Point to the next command */ ++ const RGXFWIF_FWCCB_CMD *psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset; ++ ++ HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType); ++ switch (psFwCCBCmd->eCmdType) ++ { ++ case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING: ++ { ++ if (psDevInfo->bPDPEnabled) ++ { ++ PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_BACKING, ++ "Request to add backing to ZSBuffer"); ++ } ++ RGXProcessRequestZSBufferBacking(psDevInfo, ++ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING: ++ { ++ if (psDevInfo->bPDPEnabled) ++ { ++ PDUMP_PANIC(psDevInfo->psDeviceNode, ZSBUFFER_UNBACKING, ++ "Request to remove backing from ZSBuffer"); ++ } ++ RGXProcessRequestZSBufferUnbacking(psDevInfo, ++ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID); ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_FREELIST_GROW: ++ { ++ if (psDevInfo->bPDPEnabled) ++ { ++ PDUMP_PANIC(psDevInfo->psDeviceNode, FREELIST_GROW, ++ "Request to grow the free list"); ++ } ++ RGXProcessRequestGrow(psDevInfo, ++ psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID); ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION: ++ { ++ if (psDevInfo->bPDPEnabled) ++ { ++ PDUMP_PANIC(psDevInfo->psDeviceNode, FREELISTS_RECONSTRUCTION, ++ "Request to reconstruct free lists"); ++ } ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d) for %d freelists", ++ __func__, ++ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, ++ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); ++ } ++ else ++ { ++ PVR_ASSERT(psDevInfo->psRGXFWIfHWRInfoBufCtl); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freelist reconstruction request (%d/%d) for %d freelists", ++ __func__, ++ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1, ++ psDevInfo->psRGXFWIfHWRInfoBufCtl->ui32HwrCounter+1, ++ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount)); ++ } ++ ++ RGXProcessRequestFreelistsReconstruction(psDevInfo, ++ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount, ++ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs); ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_CONTEXT_FW_PF_NOTIFICATION: ++ { ++ /* Notify client drivers */ ++ /* Client notification of device error will be achieved by ++ * clients calling UM function RGXGetLastDeviceError() */ ++ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; ++ ++ /* Notify system layer */ ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ const RGXFWIF_FWCCB_CMD_FW_PAGEFAULT_DATA *psCmdFwPagefault = ++ &psFwCCBCmd->uCmdData.sCmdFWPagefault; ++ ++ if (psDevConfig->pfnSysDevErrorNotify) ++ { ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; ++ ++ sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT; ++ sErrorData.uErrData.sFwPFErrData.sFWFaultAddr.uiAddr = psCmdFwPagefault->sFWFaultAddr.uiAddr; ++ ++ psDevConfig->pfnSysDevErrorNotify(psDevConfig, ++ &sErrorData); ++ } ++ } ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION: ++ { ++ DLLIST_NODE *psNode, *psNext; ++ const RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification = ++ &psFwCCBCmd->uCmdData.sCmdContextResetNotification; ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL; ++ IMG_UINT32 ui32ErrorPid = 0; ++ ++ OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock); ++ ++ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_COMMON_CONTEXT *psThisContext = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode); ++ ++ /* If the notification applies to all contexts update reset info ++ * for all contexts, otherwise only do so for the appropriate ID. ++ */ ++ if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) ++ { ++ /* Notification applies to all contexts */ ++ psThisContext->eLastResetReason = psCmdContextResetNotification->eResetReason; ++ psThisContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; ++ } ++ else ++ { ++ /* Notification applies to one context only */ ++ if (psThisContext->ui32ContextID == psCmdContextResetNotification->ui32ServerCommonContextID) ++ { ++ psServerCommonContext = psThisContext; ++ psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason; ++ psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef; ++ ui32ErrorPid = RGXGetPIDFromServerMMUContext(psServerCommonContext->psServerMMUContext); ++ break; ++ } ++ } ++ } ++ ++ if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_ALL_CTXS) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: All contexts reset (Reason=%d, JobRef=0x%08x)", ++ __func__, ++ (IMG_UINT32)(psCmdContextResetNotification->eResetReason), ++ psCmdContextResetNotification->ui32ResetJobRef)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)", ++ __func__, ++ psServerCommonContext, ++ psCmdContextResetNotification->ui32ServerCommonContextID, ++ (IMG_UINT32)(psCmdContextResetNotification->eResetReason), ++ psCmdContextResetNotification->ui32ResetJobRef)); ++ } ++ ++ /* Increment error counter (if appropriate) */ ++ if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM) ++ { ++ /* Avoid wrapping the error count (which would then ++ * make it appear we had far fewer errors), by limiting ++ * it to IMG_UINT32_MAX. ++ */ ++ if (psDevInfo->sErrorCounts.ui32WGPErrorCount < IMG_UINT32_MAX) ++ { ++ psDevInfo->sErrorCounts.ui32WGPErrorCount++; ++ } ++ } ++ else if (psCmdContextResetNotification->eResetReason == RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM) ++ { ++ /* Avoid wrapping the error count (which would then ++ * make it appear we had far fewer errors), by limiting ++ * it to IMG_UINT32_MAX. ++ */ ++ if (psDevInfo->sErrorCounts.ui32TRPErrorCount < IMG_UINT32_MAX) ++ { ++ psDevInfo->sErrorCounts.ui32TRPErrorCount++; ++ } ++ } ++ OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock); ++ ++ /* Notify system layer */ ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ ++ if (psDevConfig->pfnSysDevErrorNotify) ++ { ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; ++ ++ sErrorData.eResetReason = psCmdContextResetNotification->eResetReason; ++ sErrorData.pid = ui32ErrorPid; ++ ++ /* Populate error data according to reset reason */ ++ switch (psCmdContextResetNotification->eResetReason) ++ { ++ case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: ++ case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: ++ { ++ sErrorData.uErrData.sChecksumErrData.ui32ExtJobRef = psCmdContextResetNotification->ui32ResetJobRef; ++ sErrorData.uErrData.sChecksumErrData.eDM = psCmdContextResetNotification->eDM; ++ break; ++ } ++ default: ++ { ++ break; ++ } ++ } ++ ++ psDevConfig->pfnSysDevErrorNotify(psDevConfig, ++ &sErrorData); ++ } ++ } ++ ++ /* Notify if a page fault */ ++ if (psCmdContextResetNotification->ui32Flags & RGXFWIF_FWCCB_CMD_CONTEXT_RESET_FLAG_PF) ++ { ++ DevmemIntPFNotify(psDevInfo->psDeviceNode, ++ psCmdContextResetNotification->ui64PCAddress, ++ psCmdContextResetNotification->sFaultAddress); ++ } ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_DEBUG_DUMP: ++ { ++ PVRSRV_ERROR eError; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ OSAtomicWrite(&psDevInfo->psDeviceNode->eDebugDumpRequested, PVRSRV_DEVICE_DEBUG_DUMP_CAPTURE); ++ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal FW Cmd debug dump event, dumping now instead", __func__)); ++ PVRSRVDebugRequest(psDevInfo->psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ } ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_UPDATE_STATS: ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner; ++ IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue; ++ ++ switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate) ++ { ++ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS: ++ { ++ PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp); ++ break; ++ } ++ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY: ++ { ++ PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp); ++ break; ++ } ++ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES: ++ { ++ PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp); ++ break; ++ } ++ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES: ++ { ++ PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp); ++ break; ++ } ++ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES: ++ { ++ PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp); ++ break; ++ } ++ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TDM_STORES: ++ { ++ PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp); ++ break; ++ } ++ } ++#endif ++ break; ++ } ++ case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE: ++ { ++#if defined(SUPPORT_PDVFS) ++ PDVFS_PROCESS_CORE_CLK_RATE_CHANGE(psDevInfo, ++ psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate); ++#endif ++ break; ++ } ++ ++ case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART: ++ { ++ if (psDevInfo->psRGXFWIfFwSysData != NULL && ++ psDevInfo->psRGXFWIfFwSysData->ePowState != RGXFWIF_POW_OFF) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Power down... */ ++ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, ++ PVRSRV_SYS_POWER_STATE_OFF, PVRSRV_POWER_FLAGS_NONE); ++ if (eError == PVRSRV_OK) ++ { ++ /* Clear the FW faulted flags... */ ++ psDevInfo->psRGXFWIfFwSysData->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED); ++ ++ /* Power back up again... */ ++ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode, ++ PVRSRV_SYS_POWER_STATE_ON, PVRSRV_POWER_FLAGS_NONE); ++ ++ /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */ ++ if (eError == PVRSRV_OK) ++ { ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXFWHealthCheckCmd(psDevInfo); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ } ++ } ++ ++ /* Notify client drivers and system layer of FW fault */ ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ ++ /* Client notification of device error will be achieved by ++ * clients calling UM function RGXGetLastDeviceError() */ ++ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; ++ ++ /* Notify system layer */ ++ if (psDevConfig->pfnSysDevErrorNotify) ++ { ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; ++ ++ sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR; ++ psDevConfig->pfnSysDevErrorNotify(psDevConfig, ++ &sErrorData); ++ } ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ } ++ } ++ break; ++ } ++#if defined(SUPPORT_VALIDATION) ++ case RGXFWIF_FWCCB_CMD_REG_READ: ++ { ++ psDevInfo->sFwRegs.ui64RegVal = psFwCCBCmd->uCmdData.sCmdRgxRegReadData.ui64RegValue; ++ complete(&psDevInfo->sFwRegs.sRegComp); ++ break; ++ } ++#if defined(SUPPORT_SOC_TIMER) ++ case RGXFWIF_FWCCB_CMD_SAMPLE_TIMERS: ++ { ++ if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) ++ { ++ PVRSRV_ERROR eSOCtimerErr = RGXValidateSOCUSCTimer(psDevInfo, ++ PDUMP_NONE, ++ psFwCCBCmd->uCmdData.sCmdTimers.ui64timerGray, ++ psFwCCBCmd->uCmdData.sCmdTimers.ui64timerBinary, ++ psFwCCBCmd->uCmdData.sCmdTimers.aui64uscTimers); ++ if (PVRSRV_OK == eSOCtimerErr) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have increased over time")); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "SoC or USC Timers have NOT increased over time")); ++ } ++ } ++ break; ++ } ++#endif ++#endif ++ default: ++ { ++ /* unknown command */ ++ PVR_DPF((PVR_DBG_WARNING, "%s: Unknown Command (eCmdType=0x%08x)", ++ __func__, psFwCCBCmd->eCmdType)); ++ /* Assert on magic value corruption */ ++ PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD); ++ } ++ } ++ ++ /* Update read offset */ ++ psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask; ++ } ++} ++ ++/* ++ * PVRSRVRGXFrameworkCopyCommand ++*/ ++PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc, ++ IMG_PBYTE pbyGPUFRegisterList, ++ IMG_UINT32 ui32FrameworkRegisterSize) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_RF_REGISTERS *psRFReg; ++ ++ eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc, ++ (void **)&psRFReg); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware render context state (%u)", ++ __func__, eError)); ++ return eError; ++ } ++ ++ OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize); ++ ++ /* Release the CPU mapping */ ++ DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc); ++ ++ /* ++ * Dump the FW framework buffer ++ */ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Dump FWFramework buffer"); ++ DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS); ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * PVRSRVRGXFrameworkCreateKM ++*/ ++PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC **ppsFWFrameworkMemDesc, ++ IMG_UINT32 ui32FrameworkCommandSize) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ /* ++ Allocate device memory for the firmware GPU framework state. ++ Sufficient info to kick one or more DMs should be contained in this buffer ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate Rogue firmware framework state"); ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ ui32FrameworkCommandSize, ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwGPUFrameworkState", ++ ppsFWFrameworkMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware framework state (%u)", ++ __func__, eError)); ++ return eError; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, ++ volatile IMG_UINT32 __iomem *pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32CurrentQueueLength, ui32MaxRetries; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++ const RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl; ++ ++ ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 + ++ psKCCBCtl->ui32WriteOffset - ++ psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask; ++ ui32CurrentQueueLength += psDevInfo->ui32KCCBDeferredCommandsCount; ++ ++ for (ui32MaxRetries = ui32CurrentQueueLength + 1; ++ ui32MaxRetries > 0; ++ ui32MaxRetries--) ++ { ++ ++ /* ++ * PVRSRVPollForValueKM flags are set to POLL_FLAG_NONE in this case so that the function ++ * does not generate an error message. In this case, the PollForValueKM is expected to ++ * timeout as there is work ongoing on the GPU which may take longer than the timeout period. ++ */ ++ eError = PVRSRVPollForValueKM(psDevNode, pui32LinMemAddr, ui32Value, ui32Mask, POLL_FLAG_NONE); ++ if (eError != PVRSRV_ERROR_TIMEOUT) ++ { ++ break; ++ } ++ ++ RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); ++ } ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Failed! Error(%s) CPU linear address(%p) Expected value(%u)", ++ __func__, PVRSRVGetErrorString(eError), ++ pui32LinMemAddr, ui32Value)); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Config, ++ IMG_UINT32 *pui32ConfigState, ++ IMG_BOOL bSetNotClear) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ RGXFWIF_KCCB_CMD sStateFlagCmd = { 0 }; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ RGXFWIF_SYSDATA *psSysData; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ IMG_BOOL bWaitForFwUpdate = IMG_FALSE; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ if (!psDevInfo) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ psDeviceNode = psDevInfo->psDeviceNode; ++ psSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++ if (NULL == psSysData) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Fw Sys Config is not mapped into CPU space", __func__)); ++ return PVRSRV_ERROR_INVALID_CPU_ADDR; ++ } ++ ++ /* apply change and ensure the new data is written to memory ++ * before requesting the FW to read it ++ */ ++ ui32Config = ui32Config & RGXFWIF_INICFG_ALL; ++ if (bSetNotClear) ++ { ++ psSysData->ui32ConfigFlags |= ui32Config; ++ } ++ else ++ { ++ psSysData->ui32ConfigFlags &= ~ui32Config; ++ } ++ ++ /* return current/new value to caller */ ++ if (pui32ConfigState) ++ { ++ *pui32ConfigState = psSysData->ui32ConfigFlags; ++ } ++ ++ OSMemoryBarrier(&psSysData->ui32ConfigFlags); ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVPowerLock"); ++ ++ /* notify FW to update setting */ ++ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); ++ ++ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) ++ { ++ /* Ask the FW to update its cached version of the value */ ++ sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL; ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ &sStateFlagCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot", unlock); ++ bWaitForFwUpdate = IMG_TRUE; ++ } ++ ++unlock: ++ PVRSRVPowerUnlock(psDeviceNode); ++ if (bWaitForFwUpdate) ++ { ++ /* Wait for the value to be updated as the FW validates ++ * the parameters and modifies the ui32ConfigFlags ++ * accordingly ++ * (for completeness as registered callbacks should also ++ * not permit invalid transitions) ++ */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); ++ } ++ return eError; ++} ++ ++static ++PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_DM eDM, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ RGXFWIF_CLEANUP_TYPE eCleanupType, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ /* Clean-up commands sent during frame capture intervals must be dumped even when not in capture range... */ ++ ui32PDumpFlags |= PDUMP_FLAGS_INTERVAL; ++ ++ psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP; ++ psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType; ++ ++ /* ++ Send the cleanup request to the firmware. If the resource is still busy ++ the firmware will tell us and we'll drop out with a retry. ++ */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ eDM, ++ psKCCBCmd, ++ ui32PDumpFlags, ++ &ui32kCCBCommandSlot); ++ if (eError != PVRSRV_OK) ++ { ++ /* If caller may retry, fail with no error message */ ++ if ((eError != PVRSRV_ERROR_RETRY) && ++ (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) ++ { ++ PVR_DPF((PVR_DBG_ERROR ,"RGXScheduleCommandAndGetKCCBSlot() failed (%s) in %s()", ++ PVRSRVGETERRORSTRING(eError), __func__)); ++ } ++ goto fail_command; ++ } ++ ++ /* Wait for command kCCB slot to be updated by FW */ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Wait for the firmware to reply to the cleanup command"); ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, ++ ui32PDumpFlags); ++ /* ++ If the firmware hasn't got back to us in a timely manner ++ then bail and let the caller retry the command. ++ */ ++ if (eError == PVRSRV_ERROR_TIMEOUT) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: RGXWaitForKCCBSlotUpdate timed out. Dump debug information.", ++ __func__)); ++ ++ eError = PVRSRV_ERROR_RETRY; ++#if defined(DEBUG) ++ PVRSRVDebugRequest(psDevInfo->psDeviceNode, ++ DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++#endif ++ goto fail_poll; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ goto fail_poll; ++ } ++ ++#if defined(PDUMP) ++ /* ++ * The cleanup request to the firmware will tell us if a given resource is busy or not. ++ * If the RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY flag is set, this means that the resource is ++ * still in use. In this case we return a PVRSRV_ERROR_RETRY error to the client drivers ++ * and they will re-issue the cleanup request until it succeed. ++ * ++ * Since this retry mechanism doesn't work for pdumps, client drivers should ensure ++ * that cleanup requests are only submitted if the resource is unused. ++ * If this is not the case, the following poll will block infinitely, making sure ++ * the issue doesn't go unnoticed. ++ */ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps", ++ eDM, ++ psKCCBCmd->uCmdData.sCleanupData.eCleanupType, ++ psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBRtnSlotsMemDesc, ++ ui32kCCBCommandSlot * sizeof(IMG_UINT32), ++ 0, ++ RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); ++#endif ++ ++ /* ++ If the command has was run but a resource was busy, then the request ++ will need to be retried. ++ */ ++ if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_CLEANUP_BUSY)) ++ { ++ if (psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); ++ } ++ eError = PVRSRV_ERROR_RETRY; ++ goto fail_requestbusy; ++ } ++ ++ return PVRSRV_OK; ++ ++fail_requestbusy: ++fail_poll: ++fail_command: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++/* ++ RGXRequestCommonContextCleanUp ++*/ ++PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ RGXFWIF_DM eDM, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0}; ++ PVRSRV_ERROR eError; ++ PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; ++ ++ /* Force retry if this context's CCB is currently being dumped ++ * as part of the stalled CCB debug */ ++ if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Forcing retry as psDevInfo->pvEarliestStalledClientCCB = psServerCommonContext->psClientCCB <%p>", ++ __func__, ++ (void*)psServerCommonContext->psClientCCB)); ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext); ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Common ctx cleanup Request DM%d [context = 0x%08x]", ++ eDM, psFWCommonContextFWAddr.ui32Addr); ++ PDUMPCOMMENT(psDeviceNode, "Wait for CCB to be empty before common ctx cleanup"); ++ ++ RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags); ++#endif ++ ++ /* Setup our command data, the cleanup call will fill in the rest */ ++ sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr; ++ ++ /* Request cleanup of the firmware resource */ ++ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, ++ eDM, ++ &sRCCleanUpCmd, ++ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, ++ ui32PDumpFlags); ++ ++ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule a memory context cleanup with error (%u)", ++ __func__, eError)); ++ } ++ ++ return eError; ++} ++ ++/* ++ * RGXFWRequestHWRTDataCleanUp ++ */ ++ ++PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PRGXFWIF_HWRTDATA psHWRTData) ++{ ++ RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0}; ++ PVRSRV_ERROR eError; ++ ++ PDUMPCOMMENT(psDeviceNode, "HW RTData cleanup Request [HWRTData = 0x%08x]", psHWRTData.ui32Addr); ++ ++ sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData; ++ ++ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sHWRTDataCleanUpCmd, ++ RGXFWIF_CLEANUP_HWRTDATA, ++ PDUMP_FLAGS_NONE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ /* If caller may retry, fail with no error message */ ++ if ((eError != PVRSRV_ERROR_RETRY) && ++ (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule a HWRTData cleanup with error (%u)", ++ __func__, eError)); ++ } ++ } ++ ++ return eError; ++} ++ ++/* ++ RGXFWRequestFreeListCleanUp ++*/ ++PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PRGXFWIF_FREELIST psFWFreeList) ++{ ++ RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0}; ++ PVRSRV_ERROR eError; ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr); ++ ++ /* Setup our command data, the cleanup call will fill in the rest */ ++ sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList; ++ ++ /* Request cleanup of the firmware resource */ ++ eError = RGXScheduleCleanupCommand(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sFLCleanUpCmd, ++ RGXFWIF_CLEANUP_FREELIST, ++ PDUMP_FLAGS_NONE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ /* If caller may retry, fail with no error message */ ++ if ((eError != PVRSRV_ERROR_RETRY) && ++ (eError != PVRSRV_ERROR_KERNEL_CCB_FULL)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule a memory context cleanup with error (%u)", ++ __func__, eError)); ++ } ++ } ++ ++ return eError; ++} ++ ++/* ++ RGXFWRequestZSBufferCleanUp ++*/ ++PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PRGXFWIF_ZSBUFFER psFWZSBuffer) ++{ ++ RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0}; ++ PVRSRV_ERROR eError; ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr); ++ ++ /* Setup our command data, the cleanup call will fill in the rest */ ++ sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer; ++ ++ /* Request cleanup of the firmware resource */ ++ eError = RGXScheduleCleanupCommand(psDevInfo, ++ RGXFWIF_DM_3D, ++ &sZSBufferCleanUpCmd, ++ RGXFWIF_CLEANUP_ZSBUFFER, ++ PDUMP_FLAGS_NONE); ++ ++ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule a memory context cleanup with error (%u)", ++ __func__, eError)); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32HCSDeadlineMs) ++{ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS = ui32HCSDeadlineMs; ++ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32HCSDeadlineMS); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Updating the Hard Context Switching deadline inside RGXFWIfRuntimeCfg"); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ++ offsetof(RGXFWIF_RUNTIME_CFG, ui32HCSDeadlineMS), ++ ui32HCSDeadlineMs, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_KCCB_CMD sCmpKCCBCmd = { 0 }; ++ ++ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; ++ ++ return RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sCmpKCCBCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++} ++ ++PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32OSid, ++ RGXFWIF_OS_STATE_CHANGE eOSOnlineState) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sOSOnlineStateCmd = { 0 }; ++ RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++ sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE; ++ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid; ++ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState; ++ ++#if defined(SUPPORT_AUTOVZ) ++ { ++ IMG_BOOL bConnectionDown = IMG_FALSE; ++ ++ PVR_UNREFERENCED_PARAMETER(psFwSysData); ++ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = RGXFWIF_OS_OFFLINE; ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ /* Send the offline command regardless if power lock is held or not. ++ * Under AutoVz this is done during regular driver deinit, store-to-ram suspend ++ * or (optionally) from a kernel panic callback. Deinit and suspend operations ++ * take the lock in the rgx pre/post power functions as expected. ++ * The kernel panic callback is a last resort way of letting the firmware know that ++ * the VM is unrecoverable and the vz connection must be disabled. It cannot wait ++ * on other kernel threads to finish and release the lock. */ ++ eError = RGXSendCommand(psDevInfo, ++ &sOSOnlineStateCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ /* Guests and Host going offline should wait for confirmation ++ * from the Firmware of the state change. If this fails, break ++ * the connection on the OS Driver's end as backup. */ ++ if (PVRSRV_VZ_MODE_IS(GUEST) || (ui32OSid == RGXFW_HOST_OS)) ++ { ++ LOOP_UNTIL_TIMEOUT(SECONDS_TO_MICROSECONDS/2) ++ { ++ if (KM_FW_CONNECTION_IS(READY, psDevInfo)) ++ { ++ bConnectionDown = IMG_TRUE; ++ break; ++ } ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (!bConnectionDown) ++ { ++ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); ++ } ++ } ++ } ++#else ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ /* no reason for Guests to update their state or any other VM's. ++ * This is the Hypervisor and Host driver's responsibility. */ ++ return PVRSRV_OK; ++ } ++ else if (eOSOnlineState == RGXFWIF_OS_ONLINE) ++ { ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sOSOnlineStateCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_ERROR_RETRY) break; ++ ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ } ++ else if (psFwSysData) ++ { ++ const volatile RGXFWIF_OS_RUNTIME_FLAGS *psFwRunFlags = ++ (const volatile RGXFWIF_OS_RUNTIME_FLAGS*) &psFwSysData->asOsRuntimeFlagsMirror[ui32OSid]; ++ ++ /* Attempt several times until the FW manages to offload the OS */ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ /* Send request */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sOSOnlineStateCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ if (unlikely(eError == PVRSRV_ERROR_RETRY)) continue; ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommand", return_); ++ ++ /* Wait for FW to process the cmd */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); ++ ++ /* read the OS state */ ++ OSMemoryBarrier(NULL); ++ /* check if FW finished offloading the OSID and is stopped */ ++ if (psFwRunFlags->bfOsState == RGXFW_CONNECTION_FW_OFFLINE) ++ { ++ eError = PVRSRV_OK; ++ break; ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_NOT_INITIALISED; ++ } ++ ++return_ : ++#endif ++ return eError; ++} ++ ++PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32Priority) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_KCCB_CMD sOSidPriorityCmd = { 0 }; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE; ++ psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid] = ui32Priority; ++ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->aui32OSidPriority[ui32OSid]); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Updating the priority of OSID%u inside RGXFWIfRuntimeCfg", ui32OSid); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ++ offsetof(RGXFWIF_RUNTIME_CFG, aui32OSidPriority) + (ui32OSid * sizeof(ui32Priority)), ++ ui32Priority , ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sOSidPriorityCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, ++ CONNECTION_DATA *psConnection, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Priority, ++ RGXFWIF_DM eDM) ++{ ++ IMG_UINT32 ui32CmdSize; ++ IMG_UINT8 *pui8CmdPtr; ++ RGXFWIF_KCCB_CMD sPriorityCmd = { 0 }; ++ RGXFWIF_CCB_CMD_HEADER *psCmdHeader; ++ RGXFWIF_CMD_PRIORITY *psCmd; ++ PVRSRV_ERROR eError; ++ IMG_INT32 i32Priority = (IMG_INT32)ui32Priority; ++ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psContext); ++ ++ eError = _CheckPriority(psDevInfo, i32Priority, psContext->eRequestor); ++ PVR_LOG_GOTO_IF_ERROR(eError, "_CheckPriority", fail_checkpriority); ++ ++ /* ++ Get space for command ++ */ ++ ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY)); ++ ++ eError = RGXAcquireCCB(psClientCCB, ++ ui32CmdSize, ++ (void **) &pui8CmdPtr, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __func__)); ++ } ++ goto fail_ccbacquire; ++ } ++ ++ /* ++ Write the command header and command ++ */ ++ psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr; ++ psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY; ++ psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY)); ++ pui8CmdPtr += sizeof(*psCmdHeader); ++ ++ psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr; ++ psCmd->i32Priority = i32Priority; ++ pui8CmdPtr += sizeof(*psCmd); ++ ++ /* ++ We should reserve space in the kernel CCB here and fill in the command ++ directly. ++ This is so if there isn't space in the kernel CCB we can return with ++ retry back to services client before we take any operations ++ */ ++ ++ /* ++ Submit the command ++ */ ++ RGXReleaseCCB(psClientCCB, ++ ui32CmdSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __func__)); ++ return eError; ++ } ++ ++ /* Construct the priority command. */ ++ sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext); ++ sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); ++ sPriorityCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); ++ sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; ++ sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ eDM, ++ &sPriorityCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to submit set priority command with error (%u)", ++ __func__, ++ eError)); ++ goto fail_cmdacquire; ++ } ++ ++ psContext->i32Priority = i32Priority; ++ ++ return PVRSRV_OK; ++ ++fail_ccbacquire: ++fail_checkpriority: ++fail_cmdacquire: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32PHRMode) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_KCCB_CMD sCfgPHRCmd = { 0 }; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ sCfgPHRCmd.eCmdType = RGXFWIF_KCCB_CMD_PHR_CFG; ++ psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode = ui32PHRMode; ++ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32PHRMode); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Updating the Periodic Hardware Reset Mode inside RGXFWIfRuntimeCfg"); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ++ offsetof(RGXFWIF_RUNTIME_CFG, ui32PHRMode), ++ ui32PHRMode, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sCfgPHRCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32WdgPeriodUs) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_KCCB_CMD sCfgWdgCmd = { 0 }; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ sCfgWdgCmd.eCmdType = RGXFWIF_KCCB_CMD_WDG_CFG; ++ psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs = ui32WdgPeriodUs; ++ OSWriteMemoryBarrier(&psDevInfo->psRGXFWIfRuntimeCfg->ui32WdgPeriodUs); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Updating the firmware watchdog period inside RGXFWIfRuntimeCfg"); ++ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRuntimeCfgMemDesc, ++ offsetof(RGXFWIF_RUNTIME_CFG, ui32WdgPeriodUs), ++ ui32WdgPeriodUs, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GP, ++ &sCfgWdgCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ return eError; ++} ++ ++ ++ ++void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious) ++{ ++ /* Attempt to detect and deal with any stalled client contexts. ++ * bIgnorePrevious may be set by the caller if they know a context to be ++ * stalled, as otherwise this function will only identify stalled ++ * contexts which have not been previously reported. ++ */ ++ ++ IMG_UINT32 ui32StalledClientMask = 0; ++ ++ if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock))) ++ { ++ PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning...")); ++ return; ++ } ++ ++ ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo); ++ ++ ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo); ++ ++ ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo); ++ ++ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK) ++ { ++ ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo); ++ } ++ ++ /* If at least one DM stalled bit is different than before */ ++ if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask)) ++ { ++ if (ui32StalledClientMask > 0) ++ { ++ static __maybe_unused const char *pszStalledAction = ++#if defined(PVRSRV_STALLED_CCB_ACTION) ++ "force"; ++#else ++ "warn"; ++#endif ++ /* Print all the stalled DMs */ ++ PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s%s%s", ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D), ++ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D))); ++ ++ PVR_LOG(("Trying to identify stalled context...(%s) [%d]", ++ pszStalledAction, bIgnorePrevious)); ++ ++ DumpStalledContextInfo(psDevInfo); ++ } ++ else ++ { ++ if (psDevInfo->ui32StalledClientMask> 0) ++ { ++ /* Indicate there are no stalled DMs */ ++ PVR_LOG(("No further stalled client contexts exist")); ++ } ++ } ++ psDevInfo->ui32StalledClientMask = ui32StalledClientMask; ++ psDevInfo->pvEarliestStalledClientCCB = NULL; ++ } ++ OSLockRelease(psDevInfo->hCCBStallCheckLock); ++} ++ ++/* ++ RGXUpdateHealthStatus ++*/ ++PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, ++ IMG_BOOL bCheckAfterTimePassed) ++{ ++ const PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK; ++ PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE; ++ PVRSRV_RGXDEV_INFO* psDevInfo; ++ const RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl; ++ const RGXFWIF_SYSDATA* psFwSysData; ++ const RGXFWIF_OSDATA* psFwOsData; ++ const RGXFWIF_CCB_CTL* psKCCBCtl; ++ IMG_UINT32 ui32ThreadCount; ++ IMG_BOOL bKCCBCmdsWaiting; ++ ++ PVR_ASSERT(psDevNode != NULL); ++ psDevInfo = psDevNode->pvDevice; ++ ++ /* If the firmware is not yet initialised or has already deinitialised, stop here */ ++ if (psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL || ++ psDevInfo->psDeviceNode == NULL || psDevInfo->psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT) ++ { ++ return PVRSRV_OK; ++ } ++ ++ psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBufCtl; ++ psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ psFwOsData = psDevInfo->psRGXFWIfFwOsData; ++ ++ /* If this is a quick update, then include the last current value... */ ++ if (!bCheckAfterTimePassed) ++ { ++ eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus); ++ eNewReason = OSAtomicRead(&psDevNode->eHealthReason); ++ } ++ ++ /* Decrement the SLR holdoff counter (if non-zero) */ ++ if (psDevInfo->ui32SLRHoldoffCounter > 0) ++ { ++ psDevInfo->ui32SLRHoldoffCounter--; ++ } ++ ++ /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */ ++ if (PVRSRVIsDevicePowered(psDevNode)) ++ { ++ if (psRGXFWIfTraceBufCtl != NULL) ++ { ++ /* ++ Firmware thread checks... ++ */ ++ for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++) ++ { ++ const IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo; ++ ++ /* ++ Check if the FW has hit an assert... ++ */ ++ if (*pszTraceAssertInfo != '\0') ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)", ++ __func__, ui32ThreadCount, pszTraceAssertInfo, ++ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath, ++ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum)); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED; ++ goto _RGXUpdateHealthStatus_Exit; ++ } ++ ++ /* ++ Check the threads to see if they are in the same poll locations as last time... ++ */ ++ if (bCheckAfterTimePassed) ++ { ++ if (psFwSysData->aui32CrPollAddr[ui32ThreadCount] != 0 && ++ psFwSysData->aui32CrPollCount[ui32ThreadCount] == psDevInfo->aui32CrLastPollCount[ui32ThreadCount]) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)", ++ __func__, ui32ThreadCount, ++ ((psFwSysData->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")), ++ psFwSysData->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET, ++ psFwSysData->aui32CrPollMask[ui32ThreadCount])); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING; ++ goto _RGXUpdateHealthStatus_Exit; ++ } ++ psDevInfo->aui32CrLastPollCount[ui32ThreadCount] = psFwSysData->aui32CrPollCount[ui32ThreadCount]; ++ } ++ } ++ ++ /* ++ Check if the FW has faulted... ++ */ ++ if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Firmware has faulted and needs to restart", ++ __func__)); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT; ++ if (psFwSysData->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED) ++ { ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING; ++ } ++ else ++ { ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING; ++ } ++ goto _RGXUpdateHealthStatus_Exit; ++ } ++ } ++ ++ /* ++ Event Object Timeouts check... ++ */ ++ if (!bCheckAfterTimePassed) ++ { ++ if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)", ++ __func__, ++ psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts)); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS; ++ } ++ psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts; ++ } ++ ++ /* ++ Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check ++ that some have executed since then. ++ */ ++ bKCCBCmdsWaiting = IMG_FALSE; ++ psKCCBCtl = psDevInfo->psKernelCCBCtl; ++ ++ if (psKCCBCtl != NULL) ++ { ++ if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask || ++ psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)", ++ __func__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset)); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD; ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; ++ } ++ ++ if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset) ++ { ++ bKCCBCmdsWaiting = IMG_TRUE; ++ } ++ } ++ ++ if (bCheckAfterTimePassed && psFwOsData != NULL) ++ { ++ IMG_UINT32 ui32KCCBCmdsExecuted = psFwOsData->ui32KCCBCmdsExecuted; ++ ++ if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted) ++ { ++ /* ++ If something was waiting last time then the Firmware has stopped processing commands. ++ */ ++ if (psDevInfo->bKCCBCmdsWaitingLastTime) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!", ++ __func__)); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED; ++ } ++ ++ /* ++ If no commands are currently pending and nothing happened since the last poll, then ++ schedule a dummy command to ping the firmware so we know it is alive and processing. ++ */ ++ if (!bKCCBCmdsWaiting) ++ { ++ /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the ++ * PMR lock itself, because some bridge functions will take the PMR lock ++ * before calling RGXScheduleCommand ++ */ ++ PVRSRV_ERROR eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)", ++ __func__, eError)); ++ } ++ else ++ { ++ bKCCBCmdsWaiting = IMG_TRUE; ++ } ++ } ++ } ++ ++ psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting; ++ psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted; ++ } ++ } ++ ++ /* ++ Interrupt counts check... ++ */ ++ if (bCheckAfterTimePassed && psFwOsData != NULL) ++ { ++ IMG_UINT32 ui32LISRCount = 0; ++ IMG_UINT32 ui32FWCount = 0; ++ IMG_UINT32 ui32MissingInts = 0; ++ ++ /* Add up the total number of interrupts issued, sampled/received and missed... */ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ /* Only the Host OS has a sample count, so only one counter to check. */ ++ ui32LISRCount += psDevInfo->aui32SampleIRQCount[RGXFW_HOST_OS]; ++ ui32FWCount += OSReadHWReg32(psDevInfo->pvRegsBaseKM, gaui32FwOsIrqCntRegAddr[RGXFW_HOST_OS]); ++#else ++ IMG_UINT32 ui32Index; ++ ++ for (ui32Index = 0; ui32Index < RGXFW_THREAD_NUM; ui32Index++) ++ { ++ ui32LISRCount += psDevInfo->aui32SampleIRQCount[ui32Index]; ++ ui32FWCount += psFwOsData->aui32InterruptCount[ui32Index]; ++ } ++#endif /* RGX_FW_IRQ_OS_COUNTERS */ ++ ++ if (ui32LISRCount < ui32FWCount) ++ { ++ ui32MissingInts = (ui32FWCount-ui32LISRCount); ++ } ++ ++ if (ui32LISRCount == psDevInfo->ui32InterruptCountLastTime && ++ ui32MissingInts >= psDevInfo->ui32MissingInterruptsLastTime && ++ psDevInfo->ui32MissingInterruptsLastTime > 1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: LISR has not received the last %d interrupts", ++ __func__, ui32MissingInts)); ++ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING; ++ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; ++ ++ /* Schedule the MISRs to help mitigate the problems of missing interrupts. */ ++ OSScheduleMISR(psDevInfo->pvMISRData); ++ if (psDevInfo->pvAPMISRData != NULL) ++ { ++ OSScheduleMISR(psDevInfo->pvAPMISRData); ++ } ++ } ++ psDevInfo->ui32InterruptCountLastTime = ui32LISRCount; ++ psDevInfo->ui32MissingInterruptsLastTime = ui32MissingInts; ++ } ++ ++ /* ++ Stalled CCB check... ++ */ ++ if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus)) ++ { ++ RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE); ++ } ++ ++ /* Notify client driver and system layer of any eNewStatus errors */ ++ if (eNewStatus > PVRSRV_DEVICE_HEALTH_STATUS_OK) ++ { ++ /* Client notification of device error will be achieved by ++ * clients calling UM function RGXGetLastDeviceError() */ ++ psDevInfo->eLastDeviceError = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR; ++ ++ /* Notify system layer */ ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ ++ if (psDevConfig->pfnSysDevErrorNotify) ++ { ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; ++ ++ sErrorData.eResetReason = RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR; ++ sErrorData.uErrData.sHostWdgData.ui32Status = (IMG_UINT32)eNewStatus; ++ sErrorData.uErrData.sHostWdgData.ui32Reason = (IMG_UINT32)eNewReason; ++ ++ psDevConfig->pfnSysDevErrorNotify(psDevConfig, ++ &sErrorData); ++ } ++ } ++ } ++ ++ /* ++ Finished, save the new status... ++ */ ++_RGXUpdateHealthStatus_Exit: ++ OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus); ++ OSAtomicWrite(&psDevNode->eHealthReason, eNewReason); ++ RGXSRV_HWPERF_DEVICE_INFO(psDevInfo, RGX_HWPERF_DEV_INFO_EV_HEALTH, eNewStatus, eNewReason); ++ ++ /* ++ * Attempt to service the HWPerf buffer to regularly transport idle/periodic ++ * packets to host buffer. ++ */ ++ if (psDevNode->pfnServiceHWPerf != NULL) ++ { ++ PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: " ++ "Error occurred when servicing HWPerf buffer (%d)", ++ __func__, eError)); ++ } ++ } ++ ++ /* Attempt to refresh timer correlation data */ ++ RGXTimeCorrRestartPeriodic(psDevNode); ++ ++ return PVRSRV_OK; ++} /* RGXUpdateHealthStatus */ ++ ++#if defined(SUPPORT_AUTOVZ) ++void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ if (likely(KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && KM_OS_CONNECTION_IS(ACTIVE, psDevInfo))) ++ { ++ /* read and write back the alive token value to confirm to the ++ * virtualisation watchdog that this connection is healthy */ ++ KM_SET_OS_ALIVE_TOKEN(KM_GET_FW_ALIVE_TOKEN(psDevInfo), psDevInfo); ++ } ++} ++ ++/* ++ RGXUpdateAutoVzWatchdog ++*/ ++void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode) ++{ ++ if (likely(psDevNode != NULL)) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++ ++ if (unlikely((psDevInfo == NULL || !psDevInfo->bFirmwareInitialised || !psDevInfo->bRGXPowered || ++ psDevInfo->pvRegsBaseKM == NULL || psDevNode->eDevState == PVRSRV_DEVICE_STATE_DEINIT))) ++ { ++ /* If the firmware is not initialised, stop here */ ++ return; ++ } ++ else ++ { ++ PVRSRV_ERROR eError = PVRSRVPowerLock(psDevNode); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "PVRSRVPowerLock"); ++ ++ RGXUpdateAutoVzWdgToken(psDevInfo); ++ PVRSRVPowerUnlock(psDevNode); ++ } ++ } ++} ++#endif /* SUPPORT_AUTOVZ */ ++ ++PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM) ++{ ++ if (psCurrentServerCommonContext == NULL) ++ { ++ /* the context has already been freed so there is nothing to do here */ ++ return PVRSRV_OK; ++ } ++ ++ return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, ++ psCurrentServerCommonContext->psClientCCB, ++ eKickTypeDM); ++} ++ ++void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ if (psCurrentServerCommonContext == NULL) ++ { ++ /* the context has already been freed so there is nothing to do here */ ++ return; ++ } ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_HIGH)) ++ { ++ /* If high verbosity requested, dump whole CCB */ ++ DumpCCB(psCurrentServerCommonContext->psDevInfo, ++ psCurrentServerCommonContext->sFWCommonContextFWAddr, ++ psCurrentServerCommonContext->psClientCCB, ++ pfnDumpDebugPrintf, ++ pvDumpDebugFile); ++ } ++ else ++ { ++ /* Otherwise, only dump first stalled command in the CCB */ ++ DumpStalledCCBCommand(psCurrentServerCommonContext->sFWCommonContextFWAddr, ++ psCurrentServerCommonContext->psClientCCB, ++ pfnDumpDebugPrintf, ++ pvDumpDebugFile); ++ } ++} ++ ++PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, ++ IMG_UINT32 *pui32NumCleanupCtl, ++ RGXFWIF_DM eDM, ++ IMG_BOOL bKick, ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, ++ RGX_ZSBUFFER_DATA *psZSBuffer, ++ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer) ++{ ++ PVRSRV_ERROR eError; ++ PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl; ++ ++ PVR_ASSERT((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); ++ PVR_RETURN_IF_INVALID_PARAM((eDM == RGXFWIF_DM_GEOM) || (eDM == RGXFWIF_DM_3D)); ++ ++ if (bKick) ++ { ++ if (psKMHWRTDataSet) ++ { ++ PRGXFWIF_CLEANUP_CTL psCleanupCtl; ++ ++ eError = RGXSetFirmwareAddress(&psCleanupCtl, psKMHWRTDataSet->psHWRTDataFwMemDesc, ++ offsetof(RGXFWIF_HWRTDATA, sCleanupState), ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ *(psCleanupCtlWrite++) = psCleanupCtl; ++ } ++ ++ if (eDM == RGXFWIF_DM_3D) ++ { ++ RGXFWIF_PRBUFFER_TYPE eBufferType; ++ RGX_ZSBUFFER_DATA *psBuffer = NULL; ++ ++ for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++) ++ { ++ switch (eBufferType) ++ { ++ case RGXFWIF_PRBUFFER_ZSBUFFER: ++ psBuffer = psZSBuffer; ++ break; ++ case RGXFWIF_PRBUFFER_MSAABUFFER: ++ psBuffer = psMSAAScratchBuffer; ++ break; ++ case RGXFWIF_PRBUFFER_MAXSUPPORTED: ++ psBuffer = NULL; ++ break; ++ } ++ if (psBuffer) ++ { ++ (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr + ++ offsetof(RGXFWIF_PRBUFFER, sCleanupState); ++ psBuffer = NULL; ++ } ++ } ++ } ++ } ++ ++ *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl; ++ PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ RGXFWIF_HWRINFOBUF *psHWRInfoBuf; ++ IMG_UINT32 i; ++ ++ if (psDevNode->pvDevice == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_DEVINFO; ++ } ++ psDevInfo = psDevNode->pvDevice; ++ ++ psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBufCtl; ++ ++ for (i = 0 ; i < RGXFWIF_DM_MAX ; i++) ++ { ++ /* Reset the HWR numbers */ ++ psHWRInfoBuf->aui32HwrDmLockedUpCount[i] = 0; ++ psHWRInfoBuf->aui32HwrDmFalseDetectCount[i] = 0; ++ psHWRInfoBuf->aui32HwrDmRecoveredCount[i] = 0; ++ psHWRInfoBuf->aui32HwrDmOverranCount[i] = 0; ++ } ++ ++ for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++) ++ { ++ psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0; ++ } ++ ++ psHWRInfoBuf->ui32WriteIndex = 0; ++ psHWRInfoBuf->ui32DDReqCount = 0; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, ++ IMG_DEV_PHYADDR *psPhyAddr, ++ IMG_UINT32 ui32LogicalOffset, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_BOOL *bValid) ++{ ++ ++ PVRSRV_ERROR eError; ++ ++ eError = PMRLockSysPhysAddresses(psPMR); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: PMRLockSysPhysAddresses failed (%u)", ++ __func__, ++ eError)); ++ return eError; ++ } ++ ++ eError = PMR_DevPhysAddr(psPMR, ++ ui32Log2PageSize, ++ ui32NumOfPages, ++ ui32LogicalOffset, ++ psPhyAddr, ++ bValid); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: PMR_DevPhysAddr failed (%u)", ++ __func__, ++ eError)); ++ return eError; ++ } ++ ++ ++ eError = PMRUnlockSysPhysAddresses(psPMR); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: PMRUnLockSysPhysAddresses failed (%u)", ++ __func__, ++ eError)); ++ return eError; ++ } ++ ++ return eError; ++} ++ ++#if defined(PDUMP) ++PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (psDevInfo->bDumpedKCCBCtlAlready) ++ { ++ /* exiting capture range or pdump block */ ++ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; ++ ++ /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER, ++ "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)", ++ psDevInfo->psKernelCCBCtl, ++ ui32WriteOffset, ++ ui32WriteOffset); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc, ++ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset), ++ ui32WriteOffset, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: problem pdumping POL for kCCBCtl (%d)", __func__, eError)); ++ } ++ } ++ ++ return eError; ++ ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXClientConnectCompatCheck_ClientAgainstFW ++ ++ @Description ++ ++ Check compatibility of client and firmware (build options) ++ at the connection time. ++ ++ @Input psDeviceNode - device node ++ @Input ui32ClientBuildOptions - build options for the client ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32ClientBuildOptions) ++{ ++#if !defined(NO_HARDWARE) || defined(PDUMP) ++#if !defined(NO_HARDWARE) ++ IMG_UINT32 ui32BuildOptionsMismatch; ++ IMG_UINT32 ui32BuildOptionsFW; ++#endif ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++#endif ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++#if !defined(NO_HARDWARE) ++ if (psDevInfo == NULL || psDevInfo->psRGXFWIfOsInitMemDesc == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_OSINIT structure not allocated.", ++ __func__)); ++ return PVRSRV_ERROR_NOT_INITIALISED; ++ } ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ if (*((volatile IMG_BOOL *) &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) ++ { ++ /* No need to wait if the FW has already updated the values */ ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++#endif ++ ++#if defined(PDUMP) ++ { ++ PVRSRV_ERROR eError; ++ ++ PDUMPCOMMENT(psDeviceNode, "Compatibility check: client and FW build options"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions), ++ ui32ClientBuildOptions, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", ++ __func__, ++ eError)); ++ return eError; ++ } ++ } ++#endif ++ ++#if !defined(NO_HARDWARE) ++ ui32BuildOptionsFW = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.ui32BuildOptions; ++ ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW; ++ ++ if (ui32BuildOptionsMismatch != 0) ++ { ++ if ((ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " ++ "extra options present in client: (0x%x). Please check rgx_options.h", ++ ui32ClientBuildOptions & ui32BuildOptionsMismatch )); ++ } ++ ++ if ((ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; " ++ "extra options present in Firmware: (0x%x). Please check rgx_options.h", ++ ui32BuildOptionsFW & ui32BuildOptionsMismatch )); ++ } ++ ++ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware and client build options match. [ OK ]", __func__)); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXFwRawHeapAllocMap ++ ++ @Description Register firmware heap for the specified guest OSID ++ ++ @Input psDeviceNode - device node ++ @Input ui32OSID - Guest OSID ++ @Input sDevPAddr - Heap address ++ @Input ui64DevPSize - Heap size ++ ++ @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful. ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSID, ++ IMG_DEV_PHYADDR sDevPAddr, ++ IMG_UINT64 ui64DevPSize) ++{ ++ PVRSRV_ERROR eError; ++ IMG_CHAR szRegionRAName[RA_MAX_NAME_LENGTH]; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_MEMALLOCFLAGS_T uiRawFwHeapAllocFlags = (RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PREMAP0 + ui32OSID)); ++ PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, ++ PHYS_HEAP_USAGE_FW_MAIN); ++ PHYS_HEAP_CONFIG sFwHeapConfig; ++ ++ PVRSRV_VZ_RET_IF_NOT_MODE(HOST, PVRSRV_OK); ++ ++ if (psFwMainConfig == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "FW_MAIN heap config not found.")); ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ ++ OSSNPrintf(szRegionRAName, sizeof(szRegionRAName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); ++ ++ if (!ui64DevPSize || ++ !sDevPAddr.uiAddr || ++ ui32OSID >= RGX_NUM_OS_SUPPORTED || ++ ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Invalid parameters for %s", szRegionRAName)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ sFwHeapConfig = *psFwMainConfig; ++ sFwHeapConfig.sStartAddr.uiAddr = 0; ++ sFwHeapConfig.sCardBase.uiAddr = sDevPAddr.uiAddr; ++ sFwHeapConfig.uiSize = RGX_FIRMWARE_RAW_HEAP_SIZE; ++ sFwHeapConfig.eType = PHYS_HEAP_TYPE_LMA; ++ ++ eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, szRegionRAName, &psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); ++ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysmemCreateHeapLMA:PREMAP [%d]", ui32OSID); ++ ++ eError = PhysHeapAcquire(psDeviceNode->apsFWPremapPhysHeap[ui32OSID]); ++ PVR_LOG_RETURN_IF_ERROR_VA(eError, "PhysHeapAcquire:PREMAP [%d]", ui32OSID); ++ ++ psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID] = psDeviceNode->apsFWPremapPhysHeap[ui32OSID]; ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate and map raw firmware heap for OSID: [%d]", ui32OSID); ++ ++#if (RGX_NUM_OS_SUPPORTED > 1) ++ /* don't clear the heap of other guests on allocation */ ++ uiRawFwHeapAllocFlags &= (ui32OSID > RGXFW_HOST_OS) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL); ++#endif ++ ++ /* if the firmware is already powered up, consider the firmware heaps are pre-mapped. */ ++ if (psDeviceNode->bAutoVzFwIsUp) ++ { ++ uiRawFwHeapAllocFlags &= RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); ++ DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); ++ } ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ RGX_FIRMWARE_RAW_HEAP_SIZE, ++ uiRawFwHeapAllocFlags, ++ psDevInfo->psGuestFirmwareRawHeap[ui32OSID]->pszName, ++ &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); ++ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); ++ ++ /* Mark this devmem heap as premapped so allocations will not require device mapping. */ ++ DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_TRUE); ++ ++ if (ui32OSID == RGXFW_HOST_OS) ++ { ++ /* if the Host's raw fw heap is premapped, mark its main & config sub-heaps accordingly ++ * No memory allocated from these sub-heaps will be individually mapped into the device's ++ * address space so they can remain marked permanently as premapped. */ ++ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); ++ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); ++ } ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXFwRawHeapUnmapFree ++ ++ @Description Unregister firmware heap for the specified guest OSID ++ ++ @Input psDeviceNode - device node ++ @Input ui32OSID - Guest OSID ++ ++******************************************************************************/ ++void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSID) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ /* remove the premap status, so the heap can be unmapped and freed */ ++ if (psDevInfo->psGuestFirmwareRawHeap[ui32OSID]) ++ { ++ DevmemHeapSetPremapStatus(psDevInfo->psGuestFirmwareRawHeap[ui32OSID], IMG_FALSE); ++ } ++ ++ if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]); ++ psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL; ++ } ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvHalt ++ ++@Description Halt the RISC-V FW core (required for certain operations ++ done through Debug Module) ++ ++@Input psDevInfo Pointer to device info ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ PDUMP_FLAGS_CONTINUOUS, "Halt RISC-V FW"); ++ ++ /* Send halt request (no need to select one or more harts on this RISC-V core) */ ++ PDUMPREG32(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until hart is halted */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_DMSTATUS, ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ /* Clear halt request */ ++ PDUMPREG32(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, ++ PDUMP_FLAGS_CONTINUOUS); ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Send halt request (no need to select one or more harts on this RISC-V core) */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_HALTREQ_EN | ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); ++ ++ /* Wait until hart is halted */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Hart not halted (0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* Clear halt request */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvIsHalted ++ ++@Description Check if the RISC-V FW is halted ++ ++@Input psDevInfo Pointer to device info ++ ++@Return IMG_BOOL ++******************************************************************************/ ++IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++#if defined(NO_HARDWARE) ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ /* Assume the core is always halted in nohw */ ++ return IMG_TRUE; ++#else ++ ++ return (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS) & ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLHALTED_EN) != 0U; ++#endif ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvResume ++ ++@Description Resume the RISC-V FW core ++ ++@Input psDevInfo Pointer to device info ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ++ PDUMP_FLAGS_CONTINUOUS, "Resume RISC-V FW"); ++ ++ /* Send resume request (no need to select one or more harts on this RISC-V core) */ ++ PDUMPREG32(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until hart is resumed */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_DMSTATUS, ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ /* Clear resume request */ ++ PDUMPREG32(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN, ++ PDUMP_FLAGS_CONTINUOUS); ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Send resume request (no need to select one or more harts on this RISC-V core) */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_RESUMEREQ_EN | ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); ++ ++ /* Wait until hart is resumed */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_DMSTATUS/sizeof(IMG_UINT32), ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, ++ RGX_CR_FWCORE_DMI_DMSTATUS_ALLRESUMEACK_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Hart not resumed (0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMSTATUS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* Clear resume request */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DMCONTROL, ++ RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvCheckAbstractCmdError ++ ++@Description Check for RISC-V abstract command errors and clear them ++ ++@Input psDevInfo Pointer to GPU device info ++ ++@Return RGXRISCVFW_ABSTRACT_CMD_ERR ++******************************************************************************/ ++static RGXRISCVFW_ABSTRACT_CMD_ERR RGXRiscvCheckAbstractCmdError(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXRISCVFW_ABSTRACT_CMD_ERR eCmdErr; ++ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ eCmdErr = RISCV_ABSTRACT_CMD_NO_ERROR; ++ ++ /* Check error status */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS, ++ RISCV_ABSTRACT_CMD_NO_ERROR << RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT, ++ ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++#else ++ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; ++ ++ /* Check error status */ ++ eCmdErr = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS) ++ & ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK) ++ >> RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_SHIFT; ++ ++ if (eCmdErr != RISCV_ABSTRACT_CMD_NO_ERROR) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "RISC-V FW abstract command error %u", eCmdErr)); ++ ++ /* Clear the error (note CMDERR field is write-1-to-clear) */ ++ OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS, ++ ~RGX_CR_FWCORE_DMI_ABSTRACTCS_CMDERR_CLRMSK); ++ } ++#endif ++ ++ return eCmdErr; ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvReadReg ++ ++@Description Read a value from the given RISC-V register (GPR or CSR) ++ ++@Input psDevInfo Pointer to device info ++@Input ui32RegAddr RISC-V register address ++ ++@Output pui32Value Read value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 *pui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(ui32RegAddr); ++ PVR_UNREFERENCED_PARAMETER(pui32Value); ++ ++ /* Reading HW registers is not supported in nohw/pdump */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Send abstract register read command */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_READ | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | ++ ui32RegAddr); ++ ++ /* Wait until abstract command is completed */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) ++ { ++ /* Read register value */ ++ *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); ++ } ++ else ++ { ++ *pui32Value = 0U; ++ } ++ ++ return PVRSRV_OK; ++#endif ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvPollReg ++ ++@Description Poll for a value from the given RISC-V register (GPR or CSR) ++ ++@Input psDevInfo Pointer to device info ++@Input ui32RegAddr RISC-V register address ++@Input ui32Value Expected value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Poll RISC-V register 0x%x (expected 0x%08x)", ++ ui32RegAddr, ui32Value); ++ ++ /* Send abstract register read command */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_READ | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | ++ ui32RegAddr, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until abstract command is completed */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS, ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ RGXRiscvCheckAbstractCmdError(psDevInfo); ++ ++ /* Check read value */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_DATA0, ++ ui32Value, ++ 0xFFFFFFFF, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ return PVRSRV_OK; ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(ui32RegAddr); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ ++ /* Polling HW registers is currently not required driverlive */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#endif ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvWriteReg ++ ++@Description Write a value to the given RISC-V register (GPR or CSR) ++ ++@Input psDevInfo Pointer to device info ++@Input ui32RegAddr RISC-V register address ++@Input ui32Value Write value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Write RISC-V register 0x%x (value 0x%08x)", ++ ui32RegAddr, ui32Value); ++ ++ /* Prepare data to be written to register */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, ++ ui32Value, PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Send abstract register write command */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_WRITE | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | ++ ui32RegAddr, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until abstract command is completed */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS, ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Prepare data to be written to register */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); ++ ++ /* Send abstract register write command */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_REGISTER << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_WRITE | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT | ++ ui32RegAddr); ++ ++ /* Wait until abstract command is completed */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvCheckSysBusError ++ ++@Description Check for RISC-V system bus errors and clear them ++ ++@Input psDevInfo Pointer to GPU device info ++ ++@Return RGXRISCVFW_SYSBUS_ERR ++******************************************************************************/ ++static __maybe_unused RGXRISCVFW_SYSBUS_ERR RGXRiscvCheckSysBusError(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXRISCVFW_SYSBUS_ERR eSBError; ++ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ eSBError = RISCV_SYSBUS_NO_ERROR; ++ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_SBCS, ++ RISCV_SYSBUS_NO_ERROR << RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT, ++ ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++#else ++ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM; ++ ++ eSBError = (OSReadHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS) ++ & ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK) ++ >> RGX_CR_FWCORE_DMI_SBCS_SBERROR_SHIFT; ++ ++ if (eSBError != RISCV_SYSBUS_NO_ERROR) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "RISC-V FW system bus error %u", eSBError)); ++ ++ /* Clear the error (note SBERROR field is write-1-to-clear) */ ++ OSWriteHWReg32(pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS, ++ ~RGX_CR_FWCORE_DMI_SBCS_SBERROR_CLRMSK); ++ } ++#endif ++ ++ return eSBError; ++} ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++/*! ++******************************************************************************* ++@Function RGXRiscvReadAbstractMem ++ ++@Description Read a value at the given address in RISC-V memory space ++ using RISC-V abstract memory commands ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++ ++@Output pui32Value Read value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR ++RGXRiscvReadAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(ui32Addr); ++ PVR_UNREFERENCED_PARAMETER(pui32Value); ++ ++ /* Reading memory is not supported in nohw/pdump */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Prepare read address */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); ++ ++ /* Send abstract memory read command */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_READ | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); ++ ++ /* Wait until abstract command is completed */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ if (RGXRiscvCheckAbstractCmdError(psDevInfo) == RISCV_ABSTRACT_CMD_NO_ERROR) ++ { ++ /* Read memory value */ ++ *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0); ++ } ++ else ++ { ++ *pui32Value = 0U; ++ } ++ ++ return PVRSRV_OK; ++#endif ++} ++#endif /* !defined(EMULATOR) */ ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvPollAbstractMem ++ ++@Description Poll for a value at the given address in RISC-V memory space ++ using RISC-V abstract memory commands ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Expected value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR ++RGXRiscvPollAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Poll RISC-V address 0x%x (expected 0x%08x)", ++ ui32Addr, ui32Value); ++ ++ /* Prepare read address */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, ++ ui32Addr, PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Send abstract memory read command */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_READ | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until abstract command is completed */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS, ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ RGXRiscvCheckAbstractCmdError(psDevInfo); ++ ++ /* Check read value */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_DATA0, ++ ui32Value, ++ 0xFFFFFFFF, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ return PVRSRV_OK; ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(ui32Addr); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ ++ /* Polling memory is currently not required driverlive */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#endif ++} ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++/*! ++******************************************************************************* ++@Function RGXRiscvReadSysBusMem ++ ++@Description Read a value at the given address in RISC-V memory space ++ using the RISC-V system bus ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++ ++@Output pui32Value Read value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR ++RGXRiscvReadSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 *pui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(ui32Addr); ++ PVR_UNREFERENCED_PARAMETER(pui32Value); ++ ++ /* Reading memory is not supported in nohw/pdump */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Configure system bus to read 32 bit every time a new address is provided */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_CR_FWCORE_DMI_SBCS, ++ (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | ++ RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN); ++ ++ /* Perform read */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); ++ ++ /* Wait until system bus is idle */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), ++ 0U, ++ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ if (RGXRiscvCheckSysBusError(psDevInfo) == RISCV_SYSBUS_NO_ERROR) ++ { ++ /* Read value from debug system bus */ ++ *pui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0); ++ } ++ else ++ { ++ *pui32Value = 0U; ++ } ++ ++ return PVRSRV_OK; ++#endif ++} ++#endif /* !defined(EMULATOR) */ ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvPollSysBusMem ++ ++@Description Poll for a value at the given address in RISC-V memory space ++ using the RISC-V system bus ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Expected value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR ++RGXRiscvPollSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Poll RISC-V address 0x%x (expected 0x%08x)", ++ ui32Addr, ui32Value); ++ ++ /* Configure system bus to read 32 bit every time a new address is provided */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, ++ (RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT) | ++ RGX_CR_FWCORE_DMI_SBCS_SBREADONADDR_EN, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Perform read */ ++ PDUMPREG32(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, ++ ui32Addr, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until system bus is idle */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_SBCS, ++ 0U, ++ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ RGXRiscvCheckSysBusError(psDevInfo); ++ ++ /* Check read value */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_SBDATA0, ++ ui32Value, ++ 0xFFFFFFFF, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ return PVRSRV_OK; ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(ui32Addr); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ ++ /* Polling memory is currently not required driverlive */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#endif ++} ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++/*! ++******************************************************************************* ++@Function RGXRiscvReadMem ++ ++@Description Read a value at the given address in RISC-V memory space ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++ ++@Output pui32Value Read value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXRiscvReadMem(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Addr, ++ IMG_UINT32 *pui32Value) ++{ ++ if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) ++ { ++ return RGXRiscvReadAbstractMem(psDevInfo, ui32Addr, pui32Value); ++ } ++ ++ return RGXRiscvReadSysBusMem(psDevInfo, ui32Addr, pui32Value); ++} ++#endif /* !defined(EMULATOR) */ ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvPollMem ++ ++@Description Poll a value at the given address in RISC-V memory space ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Expected value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Addr, ++ IMG_UINT32 ui32Value) ++{ ++ if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) ++ { ++ return RGXRiscvPollAbstractMem(psDevInfo, ui32Addr, ui32Value); ++ } ++ ++ return RGXRiscvPollSysBusMem(psDevInfo, ui32Addr, ui32Value); ++} ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++/*! ++******************************************************************************* ++@Function RGXRiscvWriteAbstractMem ++ ++@Description Write a value at the given address in RISC-V memory space ++ using RISC-V abstract memory commands ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Write value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR ++RGXRiscvWriteAbstractMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Write RISC-V address 0x%x (value 0x%08x)", ++ ui32Addr, ui32Value); ++ ++ /* Prepare write address */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA1, ++ ui32Addr, PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Prepare write data */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_DATA0, ++ ui32Value, PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Send abstract register write command */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_WRITE | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until abstract command is completed */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS, ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Prepare write address */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA1, ui32Addr); ++ ++ /* Prepare write data */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_DATA0, ui32Value); ++ ++ /* Send abstract memory write command */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_CR_FWCORE_DMI_COMMAND, ++ (RGXRISCVFW_DMI_COMMAND_ACCESS_MEMORY << RGX_CR_FWCORE_DMI_COMMAND_CMDTYPE_SHIFT) | ++ RGXRISCVFW_DMI_COMMAND_WRITE | ++ RGXRISCVFW_DMI_COMMAND_AAxSIZE_32BIT); ++ ++ /* Wait until abstract command is completed */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_ABSTRACTCS/sizeof(IMG_UINT32), ++ 0U, ++ RGX_CR_FWCORE_DMI_ABSTRACTCS_BUSY_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Abstract command did not complete in time (abstractcs = 0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_ABSTRACTCS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvWriteSysBusMem ++ ++@Description Write a value at the given address in RISC-V memory space ++ using the RISC-V system bus ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Write value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR ++RGXRiscvWriteSysBusMem(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Addr, IMG_UINT32 ui32Value) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Write RISC-V address 0x%x (value 0x%08x)", ++ ui32Addr, ui32Value); ++ ++ /* Configure system bus to read 32 bit every time a new address is provided */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBCS, ++ RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Prepare write address */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBADDRESS0, ++ ui32Addr, PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Prepare write data and initiate write */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_FWCORE_DMI_SBDATA0, ++ ui32Value, PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Wait until system bus is idle */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_FWCORE_DMI_SBCS, ++ 0U, ++ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, ++ PDUMP_FLAGS_CONTINUOUS, ++ PDUMP_POLL_OPERATOR_EQUAL); ++#else ++ IMG_UINT32 __iomem *pui32RegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Configure system bus for 32 bit accesses */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_CR_FWCORE_DMI_SBCS, ++ RGXRISCVFW_DMI_SBCS_SBACCESS_32BIT << RGX_CR_FWCORE_DMI_SBCS_SBACCESS_SHIFT); ++ ++ /* Prepare write address */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBADDRESS0, ui32Addr); ++ ++ /* Prepare write data and initiate write */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBDATA0, ui32Value); ++ ++ /* Wait until system bus is idle */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ pui32RegsBase + RGX_CR_FWCORE_DMI_SBCS/sizeof(IMG_UINT32), ++ 0U, ++ RGX_CR_FWCORE_DMI_SBCS_SBBUSY_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: System Bus did not go idle in time (sbcs = 0x%x)", ++ __func__, OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FWCORE_DMI_SBCS))); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvWriteMem ++ ++@Description Write a value to the given address in RISC-V memory space ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Write value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXRiscvWriteMem(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Addr, ++ IMG_UINT32 ui32Value) ++{ ++ if (ui32Addr >= RGXRISCVFW_COREMEM_BASE && ui32Addr <= RGXRISCVFW_COREMEM_END) ++ { ++ return RGXRiscvWriteAbstractMem(psDevInfo, ui32Addr, ui32Value); ++ } ++ ++ return RGXRiscvWriteSysBusMem(psDevInfo, ui32Addr, ui32Value); ++} ++#endif /* !defined(EMULATOR) */ ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvDmiOp ++ ++@Description Acquire the powerlock and perform an operation on the RISC-V ++ Debug Module Interface, but only if the GPU is powered on. ++ ++@Input psDevInfo Pointer to device info ++@InOut pui64DMI Encoding of a request for the RISC-V Debug ++ Module with same format as the 'dmi' register ++ from the RISC-V debug specification (v0.13+). ++ On return, this is updated with the result of ++ the request, encoded the same way. ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 *pui64DMI) ++{ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++ PVR_UNREFERENCED_PARAMETER(pui64DMI); ++ ++ /* Accessing DM registers is not supported in nohw/pdump */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#else ++#define DMI_BASE RGX_CR_FWCORE_DMI_RESERVED00 ++#define DMI_STRIDE (RGX_CR_FWCORE_DMI_RESERVED01 - RGX_CR_FWCORE_DMI_RESERVED00) ++#define DMI_REG(r) ((DMI_BASE) + (DMI_STRIDE) * (r)) ++ ++#define DMI_OP_SHIFT 0U ++#define DMI_OP_MASK 0x3ULL ++#define DMI_DATA_SHIFT 2U ++#define DMI_DATA_MASK 0x3FFFFFFFCULL ++#define DMI_ADDRESS_SHIFT 34U ++#define DMI_ADDRESS_MASK 0xFC00000000ULL ++ ++#define DMI_OP_NOP 0U ++#define DMI_OP_READ 1U ++#define DMI_OP_WRITE 2U ++#define DMI_OP_RESERVED 3U ++ ++#define DMI_OP_STATUS_SUCCESS 0U ++#define DMI_OP_STATUS_RESERVED 1U ++#define DMI_OP_STATUS_FAILED 2U ++#define DMI_OP_STATUS_BUSY 3U ++ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ PVRSRV_ERROR eError; ++ IMG_UINT64 ui64Op, ui64Address, ui64Data; ++ ++ ui64Op = (*pui64DMI & DMI_OP_MASK) >> DMI_OP_SHIFT; ++ ui64Address = (*pui64DMI & DMI_ADDRESS_MASK) >> DMI_ADDRESS_SHIFT; ++ ui64Data = (*pui64DMI & DMI_DATA_MASK) >> DMI_DATA_SHIFT; ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ ui64Op = DMI_OP_STATUS_FAILED; ++ goto dmiop_update; ++ } ++ ++ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to retrieve RGX power state (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ ui64Op = DMI_OP_STATUS_FAILED; ++ goto dmiop_release_lock; ++ } ++ ++ if (ePowerState == PVRSRV_DEV_POWER_STATE_ON) ++ { ++ switch (ui64Op) ++ { ++ case DMI_OP_NOP: ++ ui64Op = DMI_OP_STATUS_SUCCESS; ++ break; ++ case DMI_OP_WRITE: ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ++ DMI_REG(ui64Address), ++ (IMG_UINT32)ui64Data); ++ ui64Op = DMI_OP_STATUS_SUCCESS; ++ break; ++ case DMI_OP_READ: ++ ui64Data = (IMG_UINT64)OSReadHWReg32(psDevInfo->pvRegsBaseKM, ++ DMI_REG(ui64Address)); ++ ui64Op = DMI_OP_STATUS_SUCCESS; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: unknown op %u", __func__, (IMG_UINT32)ui64Op)); ++ ui64Op = DMI_OP_STATUS_FAILED; ++ break; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Accessing RISC-V Debug Module is not " ++ "possible while the GPU is powered off", __func__)); ++ ++ ui64Op = DMI_OP_STATUS_FAILED; ++ } ++ ++dmiop_release_lock: ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++dmiop_update: ++ *pui64DMI = (ui64Op << DMI_OP_SHIFT) | ++ (ui64Address << DMI_ADDRESS_SHIFT) | ++ (ui64Data << DMI_DATA_SHIFT); ++ ++ return eError; ++#endif ++} ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++/* ++ RGXReadMETAAddr ++*/ ++static PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value) ++{ ++ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; ++ IMG_UINT32 ui32Value; ++ ++ /* Wait for Slave Port to be Ready */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* Issue the Read */ ++ OSWriteHWReg32( ++ psDevInfo->pvRegsBaseKM, ++ RGX_CR_META_SP_MSLVCTRL0, ++ ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); ++ (void) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0); ++ ++ /* Wait for Slave Port to be Ready: read complete */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* Read the value */ ++ ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX); ++ ++ *pui32Value = ui32Value; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ RGXWriteMETAAddr ++*/ ++static PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value) ++{ ++ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM; ++ ++ /* Wait for Slave Port to be Ready */ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ (IMG_UINT32 __iomem *)(pui8RegBase + RGX_CR_META_SP_MSLVCTRL1), ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* Issue the Write */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr); ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value); ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value) ++{ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ return RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value); ++ } ++#endif ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ return RGXRiscvReadMem(psDevInfo, ui32FWAddr, pui32Value); ++ } ++#endif ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value) ++{ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ return RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value); ++ } ++#endif ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ return RGXRiscvWriteMem(psDevInfo, ui32FWAddr, ui32Value); ++ } ++#endif ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++} ++ ++PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FwVA, ++ IMG_CPU_PHYADDR *psCpuPA, ++ IMG_DEV_PHYADDR *psDevPA, ++ IMG_UINT64 *pui64RawPTE) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_CPU_PHYADDR sCpuPA = {0U}; ++ IMG_DEV_PHYADDR sDevPA = {0U}; ++ IMG_UINT64 ui64RawPTE = 0U; ++ MMU_FAULT_DATA sFaultData = {0U}; ++ MMU_CONTEXT *psFwMMUCtx = psDevInfo->psKernelMMUCtx; ++ IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX); ++ IMG_UINT32 ui32FwHeapEnd = ui32FwHeapBase + (RGX_NUM_OS_SUPPORTED * RGX_FIRMWARE_RAW_HEAP_SIZE); ++ IMG_UINT32 ui32OSID = (ui32FwVA - ui32FwHeapBase) / RGX_FIRMWARE_RAW_HEAP_SIZE; ++ IMG_UINT32 ui32HeapId; ++ PHYS_HEAP *psPhysHeap; ++ ++ /* MIPS uses the same page size as the OS, while others default to 4K pages */ ++ IMG_UINT32 ui32FwPageSize = RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? ++ OSGetPageSize() : BIT(RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT); ++ IMG_UINT32 ui32PageOffset = (ui32FwVA & (ui32FwPageSize - 1)); ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM((ui32OSID < RGX_NUM_OS_SUPPORTED), ++ eError, ErrorExit); ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(((psCpuPA != NULL) || ++ (psDevPA != NULL) || ++ (pui64RawPTE != NULL)), ++ eError, ErrorExit); ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(((ui32FwVA >= ui32FwHeapBase) && ++ (ui32FwVA < ui32FwHeapEnd)), ++ eError, ErrorExit); ++ ++ ui32HeapId = (ui32OSID == RGXFW_HOST_OS) ? ++ PVRSRV_PHYS_HEAP_FW_MAIN : (PVRSRV_PHYS_HEAP_FW_PREMAP0 + ui32OSID); ++ psPhysHeap = psDevInfo->psDeviceNode->apsPhysHeap[ui32HeapId]; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ /* MIPS is equipped with a dedicated MMU */ ++ RGXMipsCheckFaultAddress(psFwMMUCtx, ui32FwVA, &sFaultData); ++ } ++ else ++ { ++ IMG_UINT64 ui64FwDataBaseMask; ++ IMG_DEV_VIRTADDR sDevVAddr; ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ ui64FwDataBaseMask = ~(RGXFW_SEGMMU_DATA_META_CACHE_MASK | ++ RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK | ++ RGXFW_SEGMMU_DATA_BASE_ADDRESS); ++ } ++ else ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) && !defined(EMULATOR) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ ui64FwDataBaseMask = ~(RGXRISCVFW_GET_REGION_BASE(0xF)); ++ } ++ else ++#endif ++ { ++ PVR_LOG_GOTO_WITH_ERROR("RGXGetFwMapping", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); ++ } ++ ++ sDevVAddr.uiAddr = (ui32FwVA & ui64FwDataBaseMask) | RGX_FIRMWARE_RAW_HEAP_BASE; ++ ++ /* Fw CPU shares a subset of the GPU's VA space */ ++ MMU_CheckFaultAddress(psFwMMUCtx, &sDevVAddr, &sFaultData); ++ } ++ ++ ui64RawPTE = sFaultData.sLevelData[MMU_LEVEL_1].ui64Address; ++ ++ if (eError == PVRSRV_OK) ++ { ++ IMG_BOOL bValidPage = (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? ++ BITMASK_HAS(ui64RawPTE, RGXMIPSFW_TLB_VALID) : ++ BITMASK_HAS(ui64RawPTE, RGX_MMUCTRL_PT_DATA_VALID_EN); ++ if (!bValidPage) ++ { ++ /* don't report invalid pages */ ++ eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING; ++ } ++ else ++ { ++ sDevPA.uiAddr = ui32PageOffset + ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ? ++ RGXMIPSFW_TLB_GET_PA(ui64RawPTE) : ++ (ui64RawPTE & ~RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK)); ++ ++ /* Only the Host's Firmware heap is present in the Host's CPU IPA space */ ++ if (ui32OSID == RGXFW_HOST_OS) ++ { ++ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPA, &sDevPA); ++ } ++ else ++ { ++ sCpuPA.uiAddr = 0U; ++ } ++ } ++ } ++ ++ if (psCpuPA != NULL) ++ { ++ *psCpuPA = sCpuPA; ++ } ++ ++ if (psDevPA != NULL) ++ { ++ *psDevPA = sDevPA; ++ } ++ ++ if (pui64RawPTE != NULL) ++ { ++ *pui64RawPTE = ui64RawPTE; ++ } ++ ++ErrorExit: ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (rgxfwutils.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxfwutils.h b/drivers/gpu/drm/img-rogue/rgxfwutils.h +new file mode 100644 +index 000000000000..d69f92f8ae9c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxfwutils.h +@@ -0,0 +1,1362 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX firmware utility routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX firmware utility routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXFWUTILS_H ++#define RGXFWUTILS_H ++ ++#include "rgx_memallocflags.h" ++#include "log2.h" ++#include "rgxdevice.h" ++#include "rgxccb.h" ++#include "devicemem.h" ++#include "device.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "connection_server.h" ++#include "rgxta3d.h" ++#include "devicemem_utils.h" ++#include "rgxmem.h" ++ ++#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "FwRawOSID%d" /*!< RGX Raw Firmware Heap identifier */ ++ ++static INLINE PVRSRV_ERROR _SelectDevMemHeap(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PVRSRV_MEMALLOCFLAGS_T *puiFlags, ++ DEVMEM_HEAP **ppsFwHeap) ++{ ++ PVRSRV_PHYS_HEAP ePhysHeap = (PVRSRV_PHYS_HEAP)PVRSRV_GET_PHYS_HEAP_HINT(*puiFlags); ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ switch (ePhysHeap) ++ { ++#if defined(SUPPORT_SECURITY_VALIDATION) ++ /* call with GPU_SECURE from RGXSetupFwSysData */ ++ case PVRSRV_PHYS_HEAP_GPU_SECURE: ++#endif ++ case PVRSRV_PHYS_HEAP_FW_CODE: ++ case PVRSRV_PHYS_HEAP_FW_PRIV_DATA: ++ case PVRSRV_PHYS_HEAP_FW_MAIN: ++ { ++ *ppsFwHeap = psDevInfo->psFirmwareMainHeap; ++ break; ++ } ++ case PVRSRV_PHYS_HEAP_FW_CONFIG: ++ { ++ *ppsFwHeap = psDevInfo->psFirmwareConfigHeap; ++ break; ++ } ++ case PVRSRV_PHYS_HEAP_FW_PREMAP0: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP1: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP2: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP3: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP4: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP5: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP6: ++ case PVRSRV_PHYS_HEAP_FW_PREMAP7: ++ { ++ IMG_UINT32 ui32OSID = ePhysHeap - PVRSRV_PHYS_HEAP_FW_PREMAP0; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(ui32OSID < RGX_NUM_OS_SUPPORTED, "ui32OSID"); ++ *ppsFwHeap = psDevInfo->psGuestFirmwareRawHeap[ui32OSID]; ++ break; ++ } ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: invalid phys heap", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ break; ++ } ++ } ++ ++ return eError; ++} ++ ++/* ++ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size. ++ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems ++ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't ++ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation. ++ */ ++static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_DEVMEM_SIZE_T uiSize, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ IMG_DEV_VIRTADDR sTmpDevVAddr; ++ PVRSRV_ERROR eError; ++ DEVMEM_HEAP *psFwHeap; ++ IMG_DEVMEM_ALIGN_T uiAlign; ++ ++ PVR_DPF_ENTERED; ++ ++ /* Enforce the standard pre-fix naming scheme callers must follow */ ++ PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); ++ ++ /* Imported from AppHint , flag to poison allocations when freed */ ++ uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; ++ ++ eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++#define MIPS_CACHE_LINE_SIZE_IN_BYTES 16 ++ uiAlign = (psFwHeap == psDevInfo->psFirmwareConfigHeap) ? ++ (RGX_FIRMWARE_CONFIG_HEAP_ALLOC_GRANULARITY) : ++/* ++ * Aligning fw based allocations for MIPS based rogue cores at cache line boundary(16 bytes) instead of SLC(64 bytes) ++ * to have more compact memory with less wastage and hopefully save some tlb misses. ++ */ ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? MIPS_CACHE_LINE_SIZE_IN_BYTES ++ : GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))); ++ ++ eError = DevmemAllocateAndMap(psFwHeap, ++ uiSize, ++ uiAlign, ++ uiFlags, ++ pszText, ++ ppsMemDescPtr, ++ &sTmpDevVAddr); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_ALIGN_T uiAlign, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; ++ IMG_DEV_VIRTADDR sTmpDevVAddr; ++ PVRSRV_ERROR eError; ++ DEVMEM_HEAP *psFwHeap; ++ ++ PVR_DPF_ENTERED; ++ ++ /* Enforce the standard pre-fix naming scheme callers must follow */ ++ PVR_ASSERT((pszText != NULL) && ++ (pszText[0] == 'F') && (pszText[1] == 'w') && ++ (pszText[2] == 'E') && (pszText[3] == 'x')); ++ ++ /* Imported from AppHint , flag to poison allocations when freed */ ++ uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; ++ ++ eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ eError = DevmemAllocateExportable(psDeviceNode, ++ uiSize, ++ uiAlign, ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ? ++ ExactLog2(uiAlign) : ++ DevmemGetHeapLog2PageSize(psFwHeap), ++ uiFlags, ++ pszText, ++ ppsMemDescPtr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "FW DevmemAllocateExportable failed (%u)", eError)); ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ /* ++ We need to map it so the heap for this allocation ++ is set ++ */ ++ eError = DevmemMapToDevice(*ppsMemDescPtr, ++ psDevInfo->psFirmwareMainHeap, ++ &sTmpDevVAddr); ++ if (eError != PVRSRV_OK) ++ { ++ DevmemFree(*ppsMemDescPtr); ++ PVR_DPF((PVR_DBG_ERROR, "FW DevmemMapToDevice failed (%u)", eError)); ++ } ++ ++ PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr); ++} ++ ++static INLINE PVRSRV_ERROR DevmemFwAllocateSparse(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_DEVMEM_SIZE_T uiChunkSize, ++ IMG_UINT32 ui32NumPhysChunks, ++ IMG_UINT32 ui32NumVirtChunks, ++ IMG_UINT32 *pui32MappingTable, ++ PVRSRV_MEMALLOCFLAGS_T uiFlags, ++ const IMG_CHAR *pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ IMG_DEV_VIRTADDR sTmpDevVAddr; ++ PVRSRV_ERROR eError; ++ DEVMEM_HEAP *psFwHeap; ++ IMG_UINT32 ui32Align; ++ ++ PVR_DPF_ENTERED; ++ ++ /* Enforce the standard pre-fix naming scheme callers must follow */ ++ PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w')); ++ ui32Align = GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)); ++ ++ /* Imported from AppHint , flag to poison allocations when freed */ ++ uiFlags |= psDevInfo->uiFWPoisonOnFreeFlag; ++ ++ eError = _SelectDevMemHeap(psDevInfo, &uiFlags, &psFwHeap); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ eError = DevmemAllocateSparse(psDevInfo->psDeviceNode, ++ uiSize, ++ uiChunkSize, ++ ui32NumPhysChunks, ++ ui32NumVirtChunks, ++ pui32MappingTable, ++ ui32Align, ++ DevmemGetHeapLog2PageSize(psFwHeap), ++ uiFlags | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING, ++ pszText, ++ ppsMemDescPtr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ /* ++ We need to map it so the heap for this allocation ++ is set ++ */ ++ eError = DevmemMapToDevice(*ppsMemDescPtr, ++ psFwHeap, ++ &sTmpDevVAddr); ++ if (eError != PVRSRV_OK) ++ { ++ DevmemFree(*ppsMemDescPtr); ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++ ++static INLINE void DevmemFwUnmapAndFree(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVR_DPF_ENTERED1(psMemDesc); ++ ++ DevmemReleaseDevVirtAddr(psMemDesc); ++ DevmemFree(psMemDesc); ++ ++ PVR_DPF_RETURN; ++} ++ ++/* ++ * This function returns the value of the hardware register RGX_CR_TIMER ++ * which is a timer counting in ticks. ++ */ ++ ++static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); ++ ++ /* ++ * In order to avoid having to issue three 32-bit reads to detect the ++ * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated ++ * in the MSB of the high 32-bit word. If the wrap happens, we just read ++ * the register again (it will not wrap again so soon). ++ */ ++ if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK) ++ { ++ ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER); ++ } ++ ++ return (ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT; ++} ++ ++/* ++ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes. ++ * Otherwise this allocation is only used by the FW. ++ * Therefore the GPU cache doesn't need coherency, and write-combine will ++ * suffice on the CPU side (WC buffer will be flushed at the first kick) ++ */ ++#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \ ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) ++ ++#define RGX_FWSHAREDMEM_MAIN_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) ++ ++#define RGX_FWSHAREDMEM_CONFIG_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CONFIG)) ++ ++#define RGX_FWSHAREDMEM_GPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN)) ++ ++/* Firmware memory that is not accessible by the CPU. */ ++#define RGX_FWSHAREDMEM_GPU_ONLY_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) ++ ++/* Firmware shared memory that is supposed to be read-only to the CPU. ++ * In reality it isn't due to ZERO_ON_ALLOC which enforces CPU_WRITEABLE ++ * flag on the allocations. */ ++#define RGX_FWSHAREDMEM_CPU_RO_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \ ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | \ ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \ ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \ ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \ ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | \ ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | \ ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) ++ ++/* data content being kept from previous boot cycles from physical memory must not be cleared during allocation */ ++#define RGX_AUTOVZ_KEEP_FW_DATA_MASK(bKeepMem) ((bKeepMem) ? (~PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) : (~0ULL)) ++ ++/****************************************************************************** ++ * RGXSetFirmwareAddress Flags ++ *****************************************************************************/ ++#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */ ++#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer, ++ otherwise RGXUnsetFirmwareAddress() must be call when finished. */ ++ ++IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); ++PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO* psDevInfo, PVRSRV_MEMALLOCFLAGS_T uiAllocFlags); ++ ++#if defined(SUPPORT_TBI_INTERFACE) ++IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo); ++PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo); ++#endif ++ ++PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bEnableSignatureChecks, ++ IMG_UINT32 ui32SignatureChecksBufSize, ++ IMG_UINT32 ui32HWPerfFWBufSizeKB, ++ IMG_UINT64 ui64HWPerfFilter, ++ IMG_UINT32 ui32ConfigFlags, ++ IMG_UINT32 ui32ConfigFlagsExt, ++ IMG_UINT32 ui32FwOsCfgFlags, ++ IMG_UINT32 ui32LogType, ++ IMG_UINT32 ui32FilterFlags, ++ IMG_UINT32 ui32JonesDisableMask, ++ IMG_UINT32 ui32HWRDebugDumpLimit, ++ IMG_UINT32 ui32HWPerfCountersDataSize, ++ IMG_UINT32 *pui32TPUTrilinearFracMask, ++ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf, ++ FW_PERF_CONF eFirmwarePerf, ++ IMG_UINT32 ui32KCCBSizeLog2); ++ ++ ++ ++void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*************************************************************************/ /*! ++@Function RGXSetupFwAllocation ++ ++@Description Sets a pointer in a firmware data structure. ++ ++@Input psDevInfo Device Info struct ++@Input uiAllocFlags Flags determining type of memory allocation ++@Input ui32Size Size of memory allocation ++@Input pszName Allocation label ++@Input psFwPtr Address of the firmware pointer to set ++@Input ppvCpuPtr Address of the cpu pointer to set ++@Input ui32DevVAFlags Any combination of RFW_FWADDR_*_FLAG ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXSetupFwAllocation(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags, ++ IMG_UINT32 ui32Size, ++ const IMG_CHAR *pszName, ++ DEVMEM_MEMDESC **ppsMemDesc, ++ RGXFWIF_DEV_VIRTADDR *psFwPtr, ++ void **ppvCpuPtr, ++ IMG_UINT32 ui32DevVAFlags); ++ ++/*************************************************************************/ /*! ++@Function RGXSetFirmwareAddress ++ ++@Description Sets a pointer in a firmware data structure. ++ ++@Input ppDest Address of the pointer to set ++@Input psSrc MemDesc describing the pointer ++@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest, ++ DEVMEM_MEMDESC *psSrc, ++ IMG_UINT32 uiOffset, ++ IMG_UINT32 ui32Flags); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXSetMetaDMAAddress ++ ++@Description Fills a Firmware structure used to setup the Meta DMA with two ++ pointers to the same data, one on 40 bit and one on 32 bit ++ (pointer in the FW memory space). ++ ++@Input ppDest Address of the structure to set ++@Input psSrcMemDesc MemDesc describing the pointer ++@Input psSrcFWDevVAddr Firmware memory space pointer ++ ++@Return void ++*/ /**************************************************************************/ ++void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest, ++ DEVMEM_MEMDESC *psSrcMemDesc, ++ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr, ++ IMG_UINT32 uiOffset); ++ ++ ++/*************************************************************************/ /*! ++@Function RGXUnsetFirmwareAddress ++ ++@Description Unsets a pointer in a firmware data structure ++ ++@Input psSrc MemDesc describing the pointer ++ ++@Return void ++*/ /**************************************************************************/ ++void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc); ++ ++/*************************************************************************/ /*! ++@Function FWCommonContextAllocate ++ ++@Description Allocate a FW common context. This allocates the HW memory ++ for the context, the CCB and wires it all together. ++ ++@Input psConnection Connection this context is being created on ++@Input psDeviceNode Device node to create the FW context on ++ (must be RGX device node) ++@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which ++ which represents the requestor of this FWCC ++@Input eDM Data Master type ++@Input psServerMMUContext Server MMU memory context. ++@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use ++ as the FW context or NULL if this function ++ should allocate it ++@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use ++ as the FW context. If psAllocatedMemDesc ++ is NULL then this parameter is ignored ++@Input psFWMemContextMemDesc MemDesc of the FW memory context this ++ common context resides on ++@Input psContextStateMemDesc FW context state (context switch) MemDesc ++@Input ui32CCBAllocSizeLog2 Size of the CCB for this context ++@Input ui32CCBMaxAllocSizeLog2 Maximum size to which CCB can grow for this context ++@Input ui32ContextFlags Flags which specify properties of the context ++@Input ui32Priority Priority of the context ++@Input ui32MaxDeadlineMS Max deadline limit in MS that the workload can run ++@Input ui64RobustnessAddress Address for FW to signal a context reset ++@Input psInfo Structure that contains extra info ++ required for the creation of the context ++ (elements might change from core to core) ++@Return PVRSRV_OK if the context was successfully created ++*/ /**************************************************************************/ ++PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor, ++ RGXFWIF_DM eDM, ++ SERVER_MMU_CONTEXT *psServerMMUContext, ++ DEVMEM_MEMDESC *psAllocatedMemDesc, ++ IMG_UINT32 ui32AllocatedOffset, ++ DEVMEM_MEMDESC *psFWMemContextMemDesc, ++ DEVMEM_MEMDESC *psContextStateMemDesc, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32MaxDeadlineMS, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_COMMON_CONTEXT_INFO *psInfo, ++ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext); ++ ++void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); ++ ++PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); ++ ++RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); ++ ++RGX_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ IMG_UINT32 *pui32LastResetJobRef); ++ ++PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext); ++ ++PVRSRV_ERROR RGXGetFWCommonContextAddrFromServerMMUCtx(PVRSRV_RGXDEV_INFO *psDevInfo, ++ SERVER_MMU_CONTEXT *psServerMMUContext, ++ PRGXFWIF_FWCOMMONCONTEXT *psFWCommonContextFWAddr); ++ ++PVRSRV_ERROR FWCommonContextSetFlags(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ IMG_UINT32 ui32ContextFlags); ++/*! ++******************************************************************************* ++@Function RGXScheduleProcessQueuesKM ++ ++@Description Software command complete handler ++ (sends uncounted kicks for all the DMs through the MISR) ++ ++@Input hCmdCompHandle RGX device node ++ ++@Return None ++******************************************************************************/ ++void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle); ++ ++#if defined(SUPPORT_VALIDATION) ++/*! ++******************************************************************************* ++@Function RGXScheduleRgxRegCommand ++ ++@Input psDevInfo Device Info struct ++@Input ui64RegVal Value to write into FW register ++@Input ui64Size Register size ++@Input ui32Offset Register Offset ++@Input bWriteOp Register Write or Read toggle ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXScheduleRgxRegCommand(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 ui64RegVal, ++ IMG_UINT64 ui64Size, ++ IMG_UINT32 ui32Offset, ++ IMG_BOOL bWriteOp); ++ ++#endif ++ ++/*! ++******************************************************************************* ++ ++@Function RGXInstallProcessQueuesMISR ++ ++@Description Installs the MISR to handle Process Queues operations ++ ++@Input phMISR Pointer to the MISR handler ++@Input psDeviceNode RGX Device node ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll); ++ ++/*************************************************************************/ /*! ++@Function RGXSendCommandWithPowLockAndGetKCCBSlot ++ ++@Description Sends a command to a particular DM without honouring ++ pending cache operations but taking the power lock. ++ ++@Input psDevInfo Device Info ++@Input psKCCBCmd The cmd to send. ++@Input ui32PDumpFlags Pdump flags ++@Output pui32CmdKCCBSlot When non-NULL: ++ - Pointer on return contains the kCCB slot ++ number in which the command was enqueued. ++ - Resets the value of the allotted slot to ++ RGXFWIF_KCCB_RTN_SLOT_RST ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXSendCommandWithPowLockAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot); ++ ++#define RGXSendCommandWithPowLock(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ ++ RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) ++ ++/*************************************************************************/ /*! ++@Function RGXSendCommandAndGetKCCBSlot ++ ++@Description Sends a command to a particular DM without honouring ++ pending cache operations or the power lock. ++ The function flushes any deferred KCCB commands first. ++ ++@Input psDevInfo Device Info ++@Input psKCCBCmd The cmd to send. ++@Input uiPdumpFlags PDump flags. ++@Output pui32CmdKCCBSlot When non-NULL: ++ - Pointer on return contains the kCCB slot ++ number in which the command was enqueued. ++ - Resets the value of the allotted slot to ++ RGXFWIF_KCCB_RTN_SLOT_RST ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXSendCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ PDUMP_FLAGS_T uiPdumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot); ++ ++#define RGXSendCommand(psDevInfo, psKCCBCmd, ui32PDumpFlags) \ ++ RGXSendCommandAndGetKCCBSlot(psDevInfo, psKCCBCmd, ui32PDumpFlags, NULL) ++ ++/*************************************************************************/ /*! ++@Function RGXScheduleCommand ++ ++@Description Sends a command to a particular DM and kicks the firmware but ++ first schedules any commands which have to happen before ++ handle ++ ++@Input psDevInfo Device Info ++@Input eDM To which DM the cmd is sent. ++@Input psKCCBCmd The cmd to send. ++@Input ui32PDumpFlags PDump flags ++@Output pui32CmdKCCBSlot When non-NULL: ++ - Pointer on return contains the kCCB slot ++ number in which the command was enqueued. ++ - Resets the value of the allotted slot to ++ RGXFWIF_KCCB_RTN_SLOT_RST ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXScheduleCommandAndGetKCCBSlot(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_DM eKCCBType, ++ RGXFWIF_KCCB_CMD *psKCCBCmd, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 *pui32CmdKCCBSlot); ++#define RGXScheduleCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags) \ ++ RGXScheduleCommandAndGetKCCBSlot(psDevInfo, eKCCBType, psKCCBCmd, ui32PDumpFlags, NULL) ++ ++/*************************************************************************/ /*! ++@Function RGXWaitForKCCBSlotUpdate ++ ++@Description Waits until the required kCCB slot value is updated by the FW ++ (signifies command completion). Additionally, dumps a relevant ++ PDump poll command. ++ ++@Input psDevInfo Device Info ++@Input ui32SlotNum The kCCB slot number to wait for an update on ++@Input ui32PDumpFlags ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXWaitForKCCBSlotUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32SlotNum, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRGXFrameworkCopyCommand ++ ++@Description Copy framework command into FW addressable buffer ++ ++@param psDeviceNode ++@param psFWFrameworkMemDesc ++@param pbyGPUFRegisterList ++@param ui32FrameworkRegisterSize ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc, ++ IMG_PBYTE pbyGPUFRegisterList, ++ IMG_UINT32 ui32FrameworkRegisterSize); ++ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRGXFrameworkCreateKM ++ ++@Description Create FW addressable buffer for framework ++ ++@param psDeviceNode ++@param ppsFWFrameworkMemDesc ++@param ui32FrameworkRegisterSize ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode, ++ DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc, ++ IMG_UINT32 ui32FrameworkRegisterSize); ++ ++/*************************************************************************/ /*! ++@Function RGXPollForGPCommandCompletion ++ ++@Description Polls for completion of a submitted GP command. Poll is done ++ on a value matching a masked read from the address. ++ ++@Input psDevNode Pointer to device node struct ++@Input pui32LinMemAddr CPU linear address to poll ++@Input ui32Value Required value ++@Input ui32Mask Mask ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXPollForGPCommandCompletion(PVRSRV_DEVICE_NODE *psDevNode, ++ volatile IMG_UINT32 __iomem *pui32LinMemAddr, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask); ++ ++/*************************************************************************/ /*! ++@Function RGXStateFlagCtrl ++ ++@Description Set and return FW internal state flags. ++ ++@Input psDevInfo Device Info ++@Input ui32Config AppHint config flags ++@Output pui32State Current AppHint state flag configuration ++@Input bSetNotClear Set or clear the provided config flags ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Config, ++ IMG_UINT32 *pui32State, ++ IMG_BOOL bSetNotClear); ++ ++/*! ++******************************************************************************* ++@Function RGXFWRequestCommonContextCleanUp ++ ++@Description Schedules a FW common context cleanup. The firmware doesn't ++ block waiting for the resource to become idle but rather ++ notifies the host that the resources is busy. ++ ++@Input psDeviceNode pointer to device node ++@Input psServerCommonContext context to be cleaned up ++@Input eDM Data master, to which the cleanup command should ++ be sent ++@Input ui32PDumpFlags PDump continuous flag ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext, ++ RGXFWIF_DM eDM, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*! ++******************************************************************************* ++@Function RGXFWRequestHWRTDataCleanUp ++ ++@Description Schedules a FW HWRTData memory cleanup. The firmware doesn't ++ block waiting for the resource to become idle but rather ++ notifies the host that the resources is busy. ++ ++@Input psDeviceNode pointer to device node ++@Input psHWRTData firmware address of the HWRTData for clean-up ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PRGXFWIF_HWRTDATA psHWRTData); ++ ++/*! ++******************************************************************************* ++@Function RGXFWRequestFreeListCleanUp ++ ++@Description Schedules a FW FreeList cleanup. The firmware doesn't block ++ waiting for the resource to become idle but rather notifies the ++ host that the resources is busy. ++ ++@Input psDeviceNode pointer to device node ++@Input psFWFreeList firmware address of the FreeList for clean-up ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode, ++ PRGXFWIF_FREELIST psFWFreeList); ++ ++/*! ++******************************************************************************* ++@Function RGXFWRequestZSBufferCleanUp ++ ++@Description Schedules a FW ZS Buffer cleanup. The firmware doesn't block ++ waiting for the resource to become idle but rather notifies the ++ host that the resources is busy. ++ ++@Input psDevInfo pointer to device node ++@Input psFWZSBuffer firmware address of the ZS Buffer for clean-up ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PRGXFWIF_ZSBUFFER psFWZSBuffer); ++ ++PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext, ++ CONNECTION_DATA *psConnection, ++ PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Priority, ++ RGXFWIF_DM eDM); ++ ++/*! ++******************************************************************************* ++@Function RGXFWSetHCSDeadline ++ ++@Description Requests the Firmware to set a new Hard Context Switch timeout ++ deadline. Context switches that surpass that deadline cause the ++ system to kill the currently running workloads. ++ ++@Input psDeviceNode pointer to device node ++@Input ui32HCSDeadlineMs The deadline in milliseconds. ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32HCSDeadlineMs); ++ ++/*! ++******************************************************************************* ++@Function RGXFWChangeOSidPriority ++ ++@Description Requests the Firmware to change the priority of an operating ++ system. Higher priority number equals higher priority on the ++ scheduling system. ++ ++@Input psDevInfo pointer to device info ++@Input ui32OSid The OSid whose priority is to be altered ++@Input ui32Priority The new priority number for the specified OSid ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32OSid, ++ IMG_UINT32 ui32Priority); ++ ++/*! ++******************************************************************************* ++@Function RGXFWHealthCheckCmd ++ ++@Description Ping the firmware to check if it is responsive. ++ ++@Input psDevInfo pointer to device info ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWHealthCheckCmd(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++@Function RGXFWSetFwOsState ++ ++@Description Requests the Firmware to change the guest OS Online states. ++ This should be initiated by the VMM when a guest VM comes ++ online or goes offline. If offline, the FW offloads any current ++ resource from that OSID. The request is repeated until the FW ++ has had time to free all the resources or has waited for ++ workloads to finish. ++ ++@Input psDevInfo pointer to device info ++@Input ui32OSid The Guest OSid whose state is being altered ++@Input eOSOnlineState The new state (Online or Offline) ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWSetFwOsState(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32OSid, ++ RGXFWIF_OS_STATE_CHANGE eOSOnlineState); ++ ++#if defined(SUPPORT_AUTOVZ) ++/*! ++******************************************************************************* ++@Function RGXUpdateAutoVzWdgToken ++ ++@Description If the driver-firmware connection is active, read the ++ firmware's watchdog token and copy its value back into the OS ++ token. This indicates to the firmware that this driver is alive ++ and responsive. ++ ++@Input psDevInfo pointer to device info ++******************************************************************************/ ++void RGXUpdateAutoVzWdgToken(PVRSRV_RGXDEV_INFO *psDevInfo); ++#endif ++ ++/*! ++******************************************************************************* ++@Function RGXFWConfigPHR ++ ++@Description Configure the Periodic Hardware Reset functionality ++ ++@Input psDevInfo pointer to device info ++@Input ui32PHRMode desired PHR mode ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWConfigPHR(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32PHRMode); ++ ++/*! ++******************************************************************************* ++@Function RGXFWConfigWdg ++ ++@Description Configure the Safety watchdog trigger period ++ ++@Input psDevInfo pointer to device info ++@Input ui32WdgPeriodUs requested period in microseconds ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXFWConfigWdg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32WdgPeriod); ++ ++/*! ++******************************************************************************* ++@Function RGXCheckFirmwareCCB ++ ++@Description Processes all commands that are found in the Firmware CCB. ++ ++@Input psDevInfo pointer to device ++ ++@Return None ++******************************************************************************/ ++void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++@Function RGXCheckForStalledClientContexts ++ ++@Description Checks all client contexts, for the device with device info ++ provided, to see if any are waiting for a fence to signal and ++ optionally force signalling of the fence for the context which ++ has been waiting the longest. ++ This function is called by RGXUpdateHealthStatus() and also ++ may be invoked from other trigger points. ++ ++@Input psDevInfo pointer to device info ++@Input bIgnorePrevious If IMG_TRUE, any stalled contexts will be ++ indicated immediately, rather than only ++ checking against any previous stalled contexts ++ ++@Return None ++******************************************************************************/ ++void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious); ++ ++/*! ++******************************************************************************* ++@Function RGXUpdateHealthStatus ++ ++@Description Tests a number of conditions which might indicate a fatal error ++ has occurred in the firmware. The result is stored in the ++ device node eHealthStatus. ++ ++@Input psDevNode Pointer to device node structure. ++@Input bCheckAfterTimePassed When TRUE, the function will also test ++ for firmware queues and polls not changing ++ since the previous test. ++ ++ Note: if not enough time has passed since the ++ last call, false positives may occur. ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode, ++ IMG_BOOL bCheckAfterTimePassed); ++ ++ ++PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM); ++ ++#if defined(SUPPORT_AUTOVZ) ++/*! ++******************************************************************************* ++@Function RGXUpdateAutoVzWatchdog ++ ++@Description Updates AutoVz watchdog that maintains the fw-driver connection ++ ++@Input psDevNode Pointer to device node structure. ++******************************************************************************/ ++void RGXUpdateAutoVzWatchdog(PVRSRV_DEVICE_NODE* psDevNode); ++#endif /* SUPPORT_AUTOVZ */ ++ ++void DumpFWCommonContextInfo(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel); ++ ++/*! ++******************************************************************************* ++@Function AttachKickResourcesCleanupCtls ++ ++@Description Attaches the cleanup structures to a kick command so that ++ submission reference counting can be performed when the ++ firmware processes the command ++ ++@Output apsCleanupCtl Array of CleanupCtl structure pointers to populate. ++@Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out. ++@Input eDM Which data master is the subject of the command. ++@Input bKick TRUE if the client originally wanted to kick this DM. ++@Input psRTDataCleanup Optional RTData cleanup associated with the command. ++@Input psZBuffer Optional ZSBuffer associated with the command. ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl, ++ IMG_UINT32 *pui32NumCleanupCtl, ++ RGXFWIF_DM eDM, ++ IMG_BOOL bKick, ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, ++ RGX_ZSBUFFER_DATA *psZSBuffer, ++ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer); ++ ++/*! ++******************************************************************************* ++@Function RGXResetHWRLogs ++ ++@Description Resets the HWR Logs buffer ++ (the hardware recovery count is not reset) ++ ++@Input psDevNode Pointer to the device ++ ++@Return PVRSRV_ERROR PVRSRV_OK on success. ++ Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode); ++ ++/*! ++******************************************************************************* ++@Function RGXGetPhyAddr ++ ++@Description Get the physical address of a PMR at an offset within it ++ ++@Input psPMR PMR of the allocation ++@Input ui32LogicalOffset Logical offset ++ ++@Output psPhyAddr Physical address of the allocation ++ ++@Return PVRSRV_ERROR PVRSRV_OK on success. ++ Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR, ++ IMG_DEV_PHYADDR *psPhyAddr, ++ IMG_UINT32 ui32LogicalOffset, ++ IMG_UINT32 ui32Log2PageSize, ++ IMG_UINT32 ui32NumOfPages, ++ IMG_BOOL *bValid); ++ ++#if defined(PDUMP) ++/*! ++******************************************************************************* ++@Function RGXPdumpDrainKCCB ++ ++@Description Wait for the firmware to execute all the commands in the kCCB ++ ++@Input psDevInfo Pointer to the device ++@Input ui32WriteOffset Woff we have to POL for the Roff to be equal to ++ ++@Return PVRSRV_ERROR PVRSRV_OK on success. ++ Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32WriteOffset); ++#endif /* PDUMP */ ++ ++/*! ++******************************************************************************* ++@Function RGXFwRawHeapAllocMap ++ ++@Description Register and maps to device, a raw firmware physheap ++ ++@Return PVRSRV_ERROR PVRSRV_OK on success. ++ Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR RGXFwRawHeapAllocMap(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSID, ++ IMG_DEV_PHYADDR sDevPAddr, ++ IMG_UINT64 ui64DevPSize); ++ ++/*! ++******************************************************************************* ++@Function RGXFwRawHeapUnmapFree ++ ++@Description Unregister and unmap from device, a raw firmware physheap ++******************************************************************************/ ++void RGXFwRawHeapUnmapFree(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32OSID); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvHalt ++ ++@Description Halt the RISC-V FW core (required for certain operations ++ done through Debug Module) ++ ++@Input psDevInfo Pointer to device info ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvHalt(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvIsHalted ++ ++@Description Check if the RISC-V FW is halted ++ ++@Input psDevInfo Pointer to device info ++ ++@Return IMG_BOOL ++******************************************************************************/ ++IMG_BOOL RGXRiscvIsHalted(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvResume ++ ++@Description Resume the RISC-V FW core ++ ++@Input psDevInfo Pointer to device info ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvResume(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvReadReg ++ ++@Description Read a value from the given RISC-V register (GPR or CSR) ++ ++@Input psDevInfo Pointer to device info ++@Input ui32RegAddr RISC-V register address ++ ++@Output pui32Value Read value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvReadReg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 *pui32Value); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvPollReg ++ ++@Description Poll for a value from the given RISC-V register (GPR or CSR) ++ ++@Input psDevInfo Pointer to device info ++@Input ui32RegAddr RISC-V register address ++@Input ui32Value Expected value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvPollReg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32Value); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvWriteReg ++ ++@Description Write a value to the given RISC-V register (GPR or CSR) ++ ++@Input psDevInfo Pointer to device info ++@Input ui32RegAddr RISC-V register address ++@Input ui32Value Write value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvWriteReg(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32Value); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvPollMem ++ ++@Description Poll for a value at the given address in RISC-V memory space ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Expected value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvPollMem(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Addr, ++ IMG_UINT32 ui32Value); ++ ++/*! ++******************************************************************************* ++@Function RGXRiscvDmiOp ++ ++@Description Acquire the powerlock and perform an operation on the RISC-V ++ Debug Module Interface, but only if the GPU is powered on. ++ ++@Input psDevInfo Pointer to device info ++@InOut pui64DMI Encoding of a request for the RISC-V Debug ++ Module with same format as the 'dmi' register ++ from the RISC-V debug specification (v0.13+). ++ On return, this is updated with the result of ++ the request, encoded the same way. ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXRiscvDmiOp(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT64 *pui64DMI); ++ ++/*! ++******************************************************************************* ++@Function RGXReadFWModuleAddr ++ ++@Description Read a value at the given address in META or RISCV memory space ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in META or RISCV memory space ++ ++@Output pui32Value Read value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXReadFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Addr, ++ IMG_UINT32 *pui32Value); ++ ++/*! ++******************************************************************************* ++@Function RGXWriteFWModuleAddr ++ ++@Description Write a value to the given address in META or RISC memory space ++ ++@Input psDevInfo Pointer to device info ++@Input ui32Addr Address in RISC-V memory space ++@Input ui32Value Write value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXWriteFWModuleAddr(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32MemAddr, ++ IMG_UINT32 ui32Value); ++ ++/*! ++******************************************************************************* ++@Function RGXGetFwMapping ++ ++@Description Retrieve any of the CPU Physical Address, Device Physical ++ Address or the raw value of the page table entry associated ++ with the firmware virtual address given. ++ ++@Input psDevInfo Pointer to device info ++@Input ui32FwVA The Fw VA that needs decoding ++@Output psCpuPA Pointer to the resulting CPU PA ++@Output psDevPA Pointer to the resulting Dev PA ++@Output pui64RawPTE Pointer to the raw Page Table Entry value ++ ++@Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR RGXGetFwMapping(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FwVA, ++ IMG_CPU_PHYADDR *psCpuPA, ++ IMG_DEV_PHYADDR *psDevPA, ++ IMG_UINT64 *pui64RawPTE); ++ ++#if defined(SUPPORT_AUTOVZ_HW_REGS) && !defined(SUPPORT_AUTOVZ) ++#error "VZ build configuration error: use of OS scratch registers supported only in AutoVz drivers." ++#endif ++ ++#if defined(SUPPORT_AUTOVZ_HW_REGS) ++/* AutoVz with hw support */ ++#define KM_GET_FW_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH3) ++#define KM_GET_OS_CONNECTION(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2) ++#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH2, RGXFW_CONNECTION_OS_##val) ++ ++#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH1) ++#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0) ++#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OS0_SCRATCH0, val) ++ ++#else ++ ++#if defined(SUPPORT_AUTOVZ) ++#define KM_GET_FW_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveFwToken) ++#define KM_GET_OS_ALIVE_TOKEN(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken) ++#define KM_SET_OS_ALIVE_TOKEN(val, psDevInfo) OSWriteDeviceMem32WithWMB((volatile IMG_UINT32 *) &psDevInfo->psRGXFWIfConnectionCtl->ui32AliveOsToken, val) ++#endif /* defined(SUPPORT_AUTOVZ) */ ++ ++#if !defined(NO_HARDWARE) && (defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1))) ++/* native, static-vz and AutoVz using shared memory */ ++#define KM_GET_FW_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionFwState) ++#define KM_GET_OS_CONNECTION(psDevInfo) (psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState) ++#define KM_SET_OS_CONNECTION(val, psDevInfo) OSWriteDeviceMem32WithWMB((void*)&psDevInfo->psRGXFWIfConnectionCtl->eConnectionOsState, RGXFW_CONNECTION_OS_##val) ++#else ++/* dynamic-vz & nohw */ ++#define KM_GET_FW_CONNECTION(psDevInfo) (RGXFW_CONNECTION_FW_ACTIVE) ++#define KM_GET_OS_CONNECTION(psDevInfo) (RGXFW_CONNECTION_OS_ACTIVE) ++#define KM_SET_OS_CONNECTION(val, psDevInfo) ++#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) || (RGX_NUM_OS_SUPPORTED == 1) */ ++#endif /* defined(SUPPORT_AUTOVZ_HW_REGS) */ ++ ++#if defined(SUPPORT_AUTOVZ) ++#define RGX_FIRST_RAW_HEAP_OSID RGXFW_HOST_OS ++#else ++#define RGX_FIRST_RAW_HEAP_OSID RGXFW_GUEST_OSID_START ++#endif ++ ++#define KM_OS_CONNECTION_IS(val, psDevInfo) (KM_GET_OS_CONNECTION(psDevInfo) == RGXFW_CONNECTION_OS_##val) ++#define KM_FW_CONNECTION_IS(val, psDevInfo) (KM_GET_FW_CONNECTION(psDevInfo) == RGXFW_CONNECTION_FW_##val) ++ ++#endif /* RGXFWUTILS_H */ ++/****************************************************************************** ++ End of file (rgxfwutils.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxheapconfig.h b/drivers/gpu/drm/img-rogue/rgxheapconfig.h +new file mode 100644 +index 000000000000..abb63084acef +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxheapconfig.h +@@ -0,0 +1,290 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Device virtual memory map ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory heaps device specific configuration ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHEAPCONFIG_H ++#define RGXHEAPCONFIG_H ++ ++#include "rgxdefs_km.h" ++ ++ ++#define RGX_HEAP_SIZE_4KiB IMG_UINT64_C(0x0000001000) ++#define RGX_HEAP_SIZE_64KiB IMG_UINT64_C(0x0000010000) ++#define RGX_HEAP_SIZE_256KiB IMG_UINT64_C(0x0000040000) ++ ++#define RGX_HEAP_SIZE_1MiB IMG_UINT64_C(0x0000100000) ++#define RGX_HEAP_SIZE_2MiB IMG_UINT64_C(0x0000200000) ++#define RGX_HEAP_SIZE_4MiB IMG_UINT64_C(0x0000400000) ++#define RGX_HEAP_SIZE_16MiB IMG_UINT64_C(0x0001000000) ++#define RGX_HEAP_SIZE_256MiB IMG_UINT64_C(0x0010000000) ++ ++#define RGX_HEAP_SIZE_1GiB IMG_UINT64_C(0x0040000000) ++#define RGX_HEAP_SIZE_2GiB IMG_UINT64_C(0x0080000000) ++#define RGX_HEAP_SIZE_4GiB IMG_UINT64_C(0x0100000000) ++#define RGX_HEAP_SIZE_16GiB IMG_UINT64_C(0x0400000000) ++#define RGX_HEAP_SIZE_32GiB IMG_UINT64_C(0x0800000000) ++#define RGX_HEAP_SIZE_64GiB IMG_UINT64_C(0x1000000000) ++#define RGX_HEAP_SIZE_128GiB IMG_UINT64_C(0x2000000000) ++#define RGX_HEAP_SIZE_256GiB IMG_UINT64_C(0x4000000000) ++#define RGX_HEAP_SIZE_512GiB IMG_UINT64_C(0x8000000000) ++ ++/* ++ RGX Device Virtual Address Space Definitions ++ ++ This file defines the RGX virtual address heaps that are used in ++ application memory contexts. It also shows where the Firmware memory heap ++ fits into this, but the firmware heap is only ever created in the ++ Services KM/server component. ++ ++ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed, ++ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_* ++ respectively. Therefore if clients use multiple configs they must still ++ be consistent with their definitions for these heaps. ++ ++ Shared virtual memory (GENERAL_SVM) support requires half of the address ++ space (512 GiB) be reserved for SVM allocations to mirror application CPU ++ addresses. However, if BRN_65273 WA is active in which case the SVM heap ++ is disabled. This is reflected in the device connection capability bits ++ returned to user space. ++ ++ The GENERAL non-SVM region is 512 GiB to 768 GiB and is shared between the ++ general (4KiB) heap and the general non-4K heap. The first 128 GiB is used ++ for the GENERAL_HEAP (4KiB) and the last 32 GiB is used for the ++ GENERAL_NON4K_HEAP. This heap has a default page-size of 16K. ++ AppHint PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE can be used to forced it ++ to these values: 4K,64K,256K,1M,2M. ++ ++ The heaps defined for BRN_65273 _replace_ the non-BRN equivalents below ++ when this BRN WA is active on affected cores. This is different to most ++ other BRNs and hence has been given its own header file for clarity, ++ see below. This is a special case, other BRNs that need 1 or 2 additional ++ heaps should be added to this file, like BRN_63142 below. ++ NOTE: All regular heaps below greater than 1GB require a BRN_65273 WA heap. ++ ++ Base addresses have to be a multiple of 4MiB ++ Heaps must not start at 0x0000000000, as this is reserved for internal ++ use within device memory layer. ++ Range comments, those starting in column 0 below are a section heading of ++ sorts and are above the heaps in that range. Often this is the reserved ++ size of the heap within the range. ++*/ ++ ++/* This BRN requires a different virtual memory map from the standard one ++ * defined in this file below. Hence the alternative heap definitions for this ++ * BRN are provided in a separate file for clarity. */ ++#include "rgxheapconfig_65273.h" ++ ++ ++/* 0x00_0000_0000 ************************************************************/ ++ ++/* 0x00_0000_0000 - 0x00_0040_0000 **/ ++ /* 0 MiB to 4 MiB, size of 4 MiB : RESERVED **/ ++ ++ /* BRN_65273 TQ3DPARAMETERS base 0x0000010000 */ ++ /* BRN_65273 GENERAL base 0x65C0000000 */ ++ /* BRN_65273 GENERAL_NON4K base 0x73C0000000 */ ++ ++/* 0x00_0040_0000 - 0x7F_FFC0_0000 **/ ++ /* 4 MiB to 512 GiB, size of 512 GiB less 4 MiB : GENERAL_SVM_HEAP **/ ++ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000) ++ #define RGX_GENERAL_SVM_HEAP_SIZE (RGX_HEAP_SIZE_512GiB - RGX_HEAP_SIZE_4MiB) ++ ++ ++/* 0x80_0000_0000 ************************************************************/ ++ ++/* 0x80_0000_0000 - 0x9F_FFFF_FFFF **/ ++ /* 512 GiB to 640 GiB, size of 128 GiB : GENERAL_HEAP **/ ++ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000) ++ #define RGX_GENERAL_HEAP_SIZE RGX_HEAP_SIZE_128GiB ++ ++ /* BRN_65273 PDSCODEDATA base 0xA800000000 */ ++ ++/* 0xA0_0000_0000 - 0xAF_FFFF_FFFF **/ ++ /* 640 GiB to 704 GiB, size of 64 GiB : FREE **/ ++ ++/* B0_0000_0000 - 0xB7_FFFF_FFFF **/ ++ /* 704 GiB to 736 GiB, size of 32 GiB : FREE **/ ++ ++ /* BRN_65273 USCCODE base 0xBA00000000 */ ++ ++/* 0xB8_0000_0000 - 0xBF_FFFF_FFFF **/ ++ /* 736 GiB to 768 GiB, size of 32 GiB : GENERAL_NON4K_HEAP **/ ++ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xB800000000) ++ #define RGX_GENERAL_NON4K_HEAP_SIZE RGX_HEAP_SIZE_32GiB ++ ++ ++/* 0xC0_0000_0000 ************************************************************/ ++ ++/* 0xC0_0000_0000 - 0xD9_FFFF_FFFF **/ ++ /* 768 GiB to 872 GiB, size of 104 GiB : FREE **/ ++ ++/* 0xDA_0000_0000 - 0xDA_FFFF_FFFF **/ ++ /* 872 GiB to 876 GiB, size of 4 GiB : PDSCODEDATA_HEAP **/ ++ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000) ++ #define RGX_PDSCODEDATA_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xDB_0000_0000 - 0xDB_FFFF_FFFF **/ ++ /* 876 GiB to 880 GiB, size of 256 MiB (reserved 4GiB) : BRN **/ ++ /* HWBRN63142 workaround requires Region Header memory to be at the top ++ of a 16GiB aligned range. This is so when masked with 0x03FFFFFFFF the ++ address will avoid aliasing PB addresses. Start at 879.75GiB. Size of 256MiB. */ ++ #define RGX_RGNHDR_BRN_63142_HEAP_BASE IMG_UINT64_C(0xDBF0000000) ++ #define RGX_RGNHDR_BRN_63142_HEAP_SIZE RGX_HEAP_SIZE_256MiB ++ ++/* 0xDC_0000_0000 - 0xDF_FFFF_FFFF **/ ++ /* 880 GiB to 896 GiB, size of 16 GiB : FREE **/ ++ ++/* 0xE0_0000_0000 - 0xE0_FFFF_FFFF **/ ++ /* 896 GiB to 900 GiB, size of 4 GiB : USCCODE_HEAP **/ ++ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000) ++ #define RGX_USCCODE_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xE1_0000_0000 - 0xE1_BFFF_FFFF **/ ++ /* 900 GiB to 903 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xE1_C000_000 - 0xE1_FFFF_FFFF **/ ++ /* 903 GiB to 904 GiB, reserved 1 GiB, : FIRMWARE_HEAP **/ ++ ++ /* Firmware heaps defined in rgx_heap_firmware.h as they are not present in ++ application memory contexts, see: ++ RGX_FIRMWARE_RAW_HEAP_BASE ++ RGX_FIRMWARE_RAW_HEAP_SIZE ++ See header for other sub-heaps details ++ */ ++ ++/* 0xE2_0000_0000 - 0xE3_FFFF_FFFF **/ ++ /* 904 GiB to 912 GiB, size of 8 GiB : FREE **/ ++ ++ /* BRN_65273 VISIBILITY_TEST base 0xE400000000 */ ++ ++/* 0xE4_0000_0000 - 0xE7_FFFF_FFFF **/ ++ /* 912 GiB to 928 GiB, size 16 GiB : TQ3DPARAMETERS_HEAP **/ ++ /* Aligned to match RGX_CR_ISP_PIXEL_BASE at 16 GiB */ ++ #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000) ++ #define RGX_TQ3DPARAMETERS_HEAP_SIZE RGX_HEAP_SIZE_16GiB ++ ++/* 0xE8_0000_0000 - 0xE8_FFFF_FFFF **/ ++ /* 928 GiB to 932 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xE9_0000_0000 - 0xE9_3FFF_FFFF **/ ++ /* 932 GiB to 933 GiB, size of 1 GiB : VK_CAPT_REPLAY_HEAP **/ ++ #define RGX_VK_CAPT_REPLAY_HEAP_BASE IMG_UINT64_C(0xE900000000) ++ #define RGX_VK_CAPT_REPLAY_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xE9_4000_0000 - 0xE9_FFFF_FFFF **/ ++ /* 933 GiB to 936 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEA_0000_0000 - 0xEA_0000_0FFF **/ ++ /* 936 GiB to 937 GiB, size of min heap size : SIGNALS_HEAP **/ ++ /* CDM Signals heap (31 signals less one reserved for Services). ++ * Size 960B rounded up to minimum heap size */ ++ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000) ++ #define RGX_SIGNALS_HEAP_SIZE DEVMEM_HEAP_MINIMUM_SIZE ++ ++/* 0xEA_4000_0000 - 0xEA_FFFF_FFFF **/ ++ /* 937 GiB to 940 GiB, size of 3 GiB : FREE **/ ++ ++/* 0xEB_0000_0000 - 0xEB_FFFF_FFFF **/ ++ /* 940 GiB to 944 GiB, size of 4 GiB : RESERVED VOLCANIC **/ ++ ++/* 0xEC_0000_0000 - 0xEC_001F_FFFF **/ ++ /* 944 GiB to 945 GiB, size 2 MiB : FBCDC_HEAP **/ ++ #define RGX_FBCDC_HEAP_BASE IMG_UINT64_C(0xEC00000000) ++ #define RGX_FBCDC_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xEC_4000_0000 - 0xEC_401F_FFFF **/ ++ /* 945 GiB to 946 GiB, size 2 MiB : FBCDC_LARGE_HEAP **/ ++ #define RGX_FBCDC_LARGE_HEAP_BASE IMG_UINT64_C(0xEC40000000) ++ #define RGX_FBCDC_LARGE_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xEC_8000_0000 - 0xED_FFFF_FFFF **/ ++ /* 946 GiB to 952 GiB, size of 6 GiB : RESERVED VOLCANIC **/ ++ ++/* 0xEE_0000_0000 - 0xEE_3FFF_FFFF **/ ++ /* 952 GiB to 953 GiB, size of 1 GiB : CMP_MISSION_RMW_HEAP **/ ++ #define RGX_CMP_MISSION_RMW_HEAP_BASE IMG_UINT64_C(0xEE00000000) ++ #define RGX_CMP_MISSION_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xEE_4000_0000 - 0xEE_FFFF_FFFF **/ ++ /* 953 GiB to 956 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xEF_0000_0000 - 0xEF_3FFF_FFFF **/ ++ /* 956 GiB to 957 GiB, size of 1 GiB : CMP_SAFETY_RMW_HEAP **/ ++ #define RGX_CMP_SAFETY_RMW_HEAP_BASE IMG_UINT64_C(0xEF00000000) ++ #define RGX_CMP_SAFETY_RMW_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xEF_4000_0000 - 0xEF_FFFF_FFFF **/ ++ /* 957 GiB to 960 GiB, size of 3 GiB : RESERVED **/ ++ ++/* 0xF0_0000_0000 - 0xF0_FFFF_FFFF **/ ++ /* 960 GiB to 964 GiB, size of 4 GiB : TEXTURE_STATE_HEAP (36-bit aligned) */ ++ #define RGX_TEXTURE_STATE_HEAP_BASE IMG_UINT64_C(0xF000000000) ++ #define RGX_TEXTURE_STATE_HEAP_SIZE RGX_HEAP_SIZE_4GiB ++ ++/* 0xF1_0000_0000 - 0xF1_FFFF_FFFF **/ ++ /* 964 GiB to 968 GiB, size of 4 GiB : FREE **/ ++ ++/* 0xF2_0000_0000 - 0xF2_001F_FFFF **/ ++ /* 968 GiB to 969 GiB, size of 2 MiB : VISIBILITY_TEST_HEAP **/ ++ #define RGX_VISIBILITY_TEST_HEAP_BASE IMG_UINT64_C(0xF200000000) ++ #define RGX_VISIBILITY_TEST_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xF2_4000_0000 - 0xF2_FFFF_FFFF **/ ++ /* 969 GiB to 972 GiB, size of 3 GiB : FREE **/ ++ ++ /* BRN_65273 MMU_INIA base 0xF800000000 */ ++ /* BRN_65273 MMU_INIB base 0xF900000000 */ ++ ++/* 0xF3_0000_0000 - 0xFF_FFFF_FFFF **/ ++ /* 972 GiB to 1024 GiB, size of 52 GiB : FREE **/ ++ ++ ++ ++/* 0xFF_FFFF_FFFF ************************************************************/ ++ ++/* End of RGX Device Virtual Address Space definitions */ ++ ++#endif /* RGXHEAPCONFIG_H */ ++ ++/****************************************************************************** ++ End of file (rgxheapconfig.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h b/drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h +new file mode 100644 +index 000000000000..31f90fee9d42 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxheapconfig_65273.h +@@ -0,0 +1,124 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Device virtual memory map for BRN_65273. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Memory heaps device specific configuration ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHEAPCONFIG_65273_H ++#define RGXHEAPCONFIG_65273_H ++ ++/* ++ RGX Device Virtual Address Space Definitions ++ ++ This file defines the RGX virtual address replacement heaps that are used in ++ in application memory contexts for BRN_65273. ++ ++ The heaps defined for BRN_65273 _replace_ the non-BRN equivalents when this ++ BRN WA is active on affected cores. This is different to most other BRNs ++ and hence has been given its own header file for clarity. The SVM_HEAP is ++ also disabled and unavailable when the WA is active. This is reflected ++ in the device connection capability bits returned to user space. ++ NOTE: All regular heaps in rgxheapconfig.h greater than 1GB require ++ a BRN_65273 WA heap. ++ ++ Base addresses must have to be a multiple of 4MiB ++ Heaps must not start at 0x0000000000, as this is reserved for internal ++ use within device memory layer. ++ Range comments, those starting in column 0 below are a section heading of ++ sorts and are above the heaps in that range. ++*/ ++ ++ ++/* 0x00_0000_0000 ************************************************************/ ++ ++/* 0x00_0001_0000 - 0x00_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires TQ memory to start at 64 KiB and use a ++ * unique single 0.99GiB PCE entry. */ ++ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE IMG_UINT64_C(0x0000010000) ++ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE (RGX_HEAP_SIZE_1GiB - RGX_HEAP_SIZE_64KiB) ++ ++/* 0x65_C000_0000 - 0x66_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires General Heap to use a unique PCE entry for each GiB in range */ ++ #define RGX_GENERAL_BRN_65273_HEAP_BASE IMG_UINT64_C(0x65C0000000) ++ #define RGX_GENERAL_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2GiB ++ ++/* 0x73_C000_0000 - 0x74_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires Non4K memory to use a unique PCE entry for each GiB in range */ ++ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE IMG_UINT64_C(0x73C0000000) ++ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2GiB ++ ++ ++/* 0x80_0000_0000 ************************************************************/ ++ ++/* 0xA8_0000_0000 - 0xA8_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires PDS memory to use a unique single 1GiB PCE entry. */ ++ #define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xA800000000) ++ #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++/* 0xBA_0000_0000 - 0xBA_3FFF_FFFF **/ ++ /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */ ++ #define RGX_USCCODE_BRN_65273_HEAP_BASE IMG_UINT64_C(0xBA00000000) ++ #define RGX_USCCODE_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++ ++/* 0xC0_0000_0000 ************************************************************/ ++ ++/* 0xE4_0000_0000 - 0xE4_001F_FFFF **/ ++ /* HWBRN65273 workaround requires USC memory to use a unique single 1GiB PCE entry. */ ++ #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE IMG_UINT64_C(0xE400000000) ++ #define RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_2MiB ++ ++/* 0xF8_0000_0000 - 0xF9_FFFF_FFFF **/ ++ /* HWBRN65273 workaround requires two Region Header buffers 4GiB apart. */ ++ #define RGX_MMU_INIA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF800000000) ++ #define RGX_MMU_INIA_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ #define RGX_MMU_INIB_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF900000000) ++ #define RGX_MMU_INIB_BRN_65273_HEAP_SIZE RGX_HEAP_SIZE_1GiB ++ ++ ++/* 0xFF_FFFF_FFFF ************************************************************/ ++ ++/* End of RGX Device Virtual Address Space definitions */ ++ ++#endif /* RGXHEAPCONFIG_65273_H */ ++ ++/****************************************************************************** ++ End of file (rgxheapconfig_65273.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf.c b/drivers/gpu/drm/img-rogue/rgxhwperf.c +new file mode 100644 +index 000000000000..a6e2dd420eaf +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxhwperf.c +@@ -0,0 +1,694 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HW Performance implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX HW Performance implementation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++//#define PVR_DPF_FUNCTION_TRACE_ON 1 ++#undef PVR_DPF_FUNCTION_TRACE_ON ++ ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "rgxdevice.h" ++#include "pvrsrv_error.h" ++#include "pvr_notifier.h" ++#include "osfunc.h" ++#include "allocmem.h" ++ ++#include "pvrsrv.h" ++#include "pvrsrv_tlstreams.h" ++#include "pvrsrv_tlcommon.h" ++#include "tlclient.h" ++#include "tlstream.h" ++ ++#include "rgxhwperf.h" ++#include "rgxapi_km.h" ++#include "rgxfwutils.h" ++#include "rgxtimecorr.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "pdump_km.h" ++#include "pvrsrv_apphint.h" ++#include "process_stats.h" ++#include "rgx_hwperf_table.h" ++#include "rgxinit.h" ++ ++#include "info_page_defs.h" ++ ++/* This is defined by default to enable producer callbacks. ++ * Clients of the TL interface can disable the use of the callback ++ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ ++#define SUPPORT_TL_PRODUCER_CALLBACK 1 ++ ++/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ ++#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) ++ ++/* Defines size of buffers returned from acquire/release calls */ ++#define FW_STREAM_BUFFER_SIZE (0x80000) ++#define HOST_STREAM_BUFFER_SIZE (0x20000) ++ ++/* Must be at least as large as two tl packets of maximum size */ ++static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), ++ "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); ++static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), ++ "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); ++ ++static inline IMG_UINT32 ++RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, ++ IMG_UINT32 ui32AllowedSize, ++ RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) ++{ ++ IMG_UINT32 sizeSum = 0; ++ ++ /* Traverse the array to find how many packets will fit in the available space. */ ++ while ( sizeSum < ui32BytesExp && ++ sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) ++ { ++ sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); ++ psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); ++ } ++ ++ return sizeSum; ++} ++ ++static inline void ++RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, ++ IMG_BOOL bIsReaderConnected) ++{ ++ if (!bIsReaderConnected) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full " ++ "and no reader is currently connected, suspending event collection. " ++ "Connect a reader or restart driver to avoid event loss.", __func__)); ++ psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; ++ } ++} ++ ++/****************************************************************************** ++ * RGX HW Performance Profiling Server API(s) ++ *****************************************************************************/ ++ ++static IMG_BOOL RGXServerFeatureFlagsToHWPerfFlagsAddBlock( ++ RGX_HWPERF_BVNC_BLOCK * const psBlocks, ++ IMG_UINT16 * const pui16Count, ++ const IMG_UINT16 ui16BlockID, /* see RGX_HWPERF_CNTBLK_ID */ ++ const IMG_UINT16 ui16NumCounters, ++ const IMG_UINT16 ui16NumBlocks) ++{ ++ const IMG_UINT16 ui16Count = *pui16Count; ++ ++ if (ui16Count < RGX_HWPERF_MAX_BVNC_BLOCK_LEN) ++ { ++ RGX_HWPERF_BVNC_BLOCK * const psBlock = &psBlocks[ui16Count]; ++ ++ /* If the GROUP is non-zero, convert from e.g. RGX_CNTBLK_ID_USC0 to RGX_CNTBLK_ID_USC_ALL. The table stores the former (plus the ++ number of blocks and counters) but PVRScopeServices expects the latter (plus the number of blocks and counters). The conversion ++ could always be moved to PVRScopeServices, but it's less code this way. */ ++ psBlock->ui16BlockID = (ui16BlockID & RGX_CNTBLK_ID_GROUP_MASK) ? (ui16BlockID | RGX_CNTBLK_ID_UNIT_ALL_MASK) : ui16BlockID; ++ if ((ui16BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) ++ { ++ psBlock->ui16NumCounters = RGX_CNTBLK_COUNTERS_MAX; ++ } ++ else ++ { ++ psBlock->ui16NumCounters = ui16NumCounters; ++ } ++ psBlock->ui16NumBlocks = ui16NumBlocks; ++ ++ *pui16Count = ui16Count + 1; ++ return IMG_TRUE; ++ } ++ return IMG_FALSE; ++} ++ ++PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_HWPERF_BVNC *psBVNC) ++{ ++ IMG_PCHAR pszBVNC; ++ PVR_LOG_RETURN_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ if ((pszBVNC = RGXDevBVNCString(psDevInfo))) ++ { ++ size_t uiStringLength = OSStringNLength(pszBVNC, RGX_HWPERF_MAX_BVNC_LEN - 1); ++ OSStringLCopy(psBVNC->aszBvncString, pszBVNC, uiStringLength + 1); ++ memset(&psBVNC->aszBvncString[uiStringLength], 0, RGX_HWPERF_MAX_BVNC_LEN - uiStringLength); ++ } ++ else ++ { ++ *psBVNC->aszBvncString = 0; ++ } ++ ++ psBVNC->ui32BvncKmFeatureFlags = 0x0; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG; ++ } ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG; ++ } ++#endif ++#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG; ++ } ++#endif ++#if defined(RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG; ++ } ++#endif ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG; ++ } ++#if defined(RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG; ++ } ++#endif ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG; ++ } ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) ++ { ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_MULTICORE_FLAG; ++ } ++ ++#ifdef SUPPORT_WORKLOAD_ESTIMATION ++ /* Not a part of BVNC feature line and so doesn't need the feature supported check */ ++ psBVNC->ui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION; ++#endif ++ ++ /* Define the HW counter block counts. */ ++ { ++ RGX_HWPERF_BVNC_BLOCK * const psBlocks = psBVNC->aBvncBlocks; ++ IMG_UINT16 * const pui16Count = &psBVNC->ui16BvncBlocks; ++ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; ++ const IMG_UINT32 ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); ++ IMG_UINT32 ui32BlkCfgIdx; ++ size_t uiCount; ++ IMG_BOOL bOk = IMG_TRUE; ++ ++ // Initialise to zero blocks ++ *pui16Count = 0; ++ ++ // Add all the blocks ++ for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) ++ { ++ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL * const psCntBlkInfo = &asCntBlkTypeModel[ui32BlkCfgIdx]; ++ RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; ++ /* psCntBlkInfo->ui8NumUnits gives compile-time info. For BVNC agnosticism, we use this: */ ++ if (psCntBlkInfo->pfnIsBlkPresent(psCntBlkInfo, psDevInfo, &sCntBlkRtInfo)) ++ { ++ bOk &= RGXServerFeatureFlagsToHWPerfFlagsAddBlock(psBlocks, pui16Count, psCntBlkInfo->ui32CntBlkIdBase, psCntBlkInfo->ui8NumCounters, sCntBlkRtInfo.ui32NumUnits); ++ } ++ } ++ ++ /* If this fails, consider why the static_assert didn't fail, and consider increasing RGX_HWPERF_MAX_BVNC_BLOCK_LEN */ ++ PVR_ASSERT(bOk); ++ ++ // Zero the remaining entries ++ uiCount = *pui16Count; ++ OSDeviceMemSet(&psBlocks[uiCount], 0, (RGX_HWPERF_MAX_BVNC_BLOCK_LEN - uiCount) * sizeof(*psBlocks)); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ PVRSRVRGXConfigMuxHWPerfCountersKM ++ */ ++PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32ArrayLen, ++ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sKccbCmd; ++ DEVMEM_MEMDESC* psFwBlkConfigsMemDesc; ++ RGX_HWPERF_CONFIG_MUX_CNTBLK* psFwArray; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_RGXDEV_INFO *psDevice; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ psDevice = psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ PVR_DPF_ENTERED; ++ ++ /* Fill in the command structure with the parameters needed ++ */ ++ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS; ++ sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen; ++ ++ /* used for passing counters config to the Firmware, write-only for the CPU */ ++ eError = DevmemFwAllocate(psDevice, ++ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen, ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwHWPerfCountersConfigBlock", ++ &psFwBlkConfigsMemDesc); ++ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); ++ ++ eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs, ++ psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); ++ ++ eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); ++ ++ OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen); ++ DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, ++ 0, ++ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)*ui32ArrayLen, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM parameters set, calling FW"));*/ ++ ++ /* Ask the FW to carry out the HWPerf configuration command ++ */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, ++ RGXFWIF_DM_GP, ++ &sKccbCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); ++ ++ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM command scheduled for FW"));*/ ++ ++ /* Wait for FW to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); ++ ++ /* Release temporary memory used for block configuration ++ */ ++ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); ++ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); ++ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); ++ ++ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigMuxHWPerfCountersKM firmware completed"));*/ ++ ++ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen)); ++ ++ PVR_DPF_RETURN_OK; ++ ++fail3: ++ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); ++fail2: ++ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); ++fail1: ++ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++ ++/* ++ PVRSRVRGXConfigCustomCountersReadingHWPerfKM ++ */ ++PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT16 ui16CustomBlockID, ++ IMG_UINT16 ui16NumCustomCounters, ++ IMG_UINT32 * pui32CustomCounterIDs) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sKccbCmd; ++ DEVMEM_MEMDESC* psFwSelectCntrsMemDesc = NULL; ++ IMG_UINT32* psFwArray; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_RGXDEV_INFO *psDevice = psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psDeviceNode); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters)); ++ ++ /* Fill in the command structure with the parameters needed */ ++ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS; ++ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters; ++ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID; ++ ++ if (ui16NumCustomCounters > 0) ++ { ++ PVR_ASSERT(pui32CustomCounterIDs); ++ ++ /* used for passing counters config to the Firmware, write-only for the CPU */ ++ eError = DevmemFwAllocate(psDevice, ++ sizeof(IMG_UINT32) * ui16NumCustomCounters, ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwHWPerfConfigCustomCounters", ++ &psFwSelectCntrsMemDesc); ++ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); ++ ++ eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs, ++ psFwSelectCntrsMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); ++ ++ eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail2); ++ ++ OSCachedMemCopyWMB(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters); ++ DevmemPDumpLoadMem(psFwSelectCntrsMemDesc, ++ 0, ++ sizeof(IMG_UINT32) * ui16NumCustomCounters, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ ++ /* Push in the KCCB the command to configure the custom counters block */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, ++ RGXFWIF_DM_GP, ++ &sKccbCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail3); ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled")); ++ ++ /* Wait for FW to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed")); ++ ++ if (ui16NumCustomCounters > 0) ++ { ++ /* Release temporary memory used for block configuration */ ++ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc); ++ DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc); ++ DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc); ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters)); ++ ++ PVR_DPF_RETURN_OK; ++ ++fail3: ++ if (psFwSelectCntrsMemDesc) ++ { ++ DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc); ++ } ++fail2: ++ if (psFwSelectCntrsMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc); ++ } ++fail1: ++ if (psFwSelectCntrsMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevice, psFwSelectCntrsMemDesc); ++ } ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++/* ++ PVRSRVRGXConfigureHWPerfBlocksKM ++ */ ++PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32CtrlWord, ++ IMG_UINT32 ui32ArrayLen, ++ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sKccbCmd; ++ DEVMEM_MEMDESC *psFwBlkConfigsMemDesc; ++ RGX_HWPERF_CONFIG_CNTBLK *psFwArray; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_RGXDEV_INFO *psDevice; ++ ++ PVR_LOG_RETURN_IF_FALSE(psDeviceNode != NULL, "psDeviceNode is NULL", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psDevice = psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(ui32CtrlWord); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ PVR_LOG_RETURN_IF_FALSE(ui32ArrayLen > 0, "ui32ArrayLen is 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ PVR_LOG_RETURN_IF_FALSE(psBlockConfigs != NULL, "psBlockConfigs is NULL", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ PVR_DPF_ENTERED; ++ ++ /* Fill in the command structure with the parameters needed */ ++ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_BLKS; ++ sKccbCmd.uCmdData.sHWPerfCfgDABlks.ui32NumBlocks = ui32ArrayLen; ++ ++ /* used for passing counters config to the Firmware, write-only for the CPU */ ++ eError = DevmemFwAllocate(psDevice, ++ sizeof(RGX_HWPERF_CONFIG_CNTBLK) * ui32ArrayLen, ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwHWPerfCountersDAConfigBlock", ++ &psFwBlkConfigsMemDesc); ++ PVR_LOG_RETURN_IF_ERROR(eError, "DevmemFwAllocate"); ++ ++ eError = RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgDABlks.sBlockConfigs, ++ psFwBlkConfigsMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", fail1); ++ ++ eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevMemAcquireCpuVirtAddr", fail2); ++ ++ OSCachedMemCopyWMB(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen); ++ DevmemPDumpLoadMem(psFwBlkConfigsMemDesc, ++ 0, ++ sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ /* Ask the FW to carry out the HWPerf configuration command. */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, ++ RGXFWIF_DM_GP, ++ &sKccbCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot", fail2); ++ ++ /* Wait for FW to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", fail3); ++ ++ /* Release temporary memory used for block configuration. */ ++ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); ++ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); ++ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); ++ ++ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ++ ui32ArrayLen)); ++ ++ PVR_DPF_RETURN_OK; ++ ++fail3: ++ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc); ++ ++fail2: ++ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc); ++ ++fail1: ++ DevmemFwUnmapAndFree(psDevice, psFwBlkConfigsMemDesc); ++ ++ PVR_DPF_RETURN_RC (eError); ++} ++ ++/****************************************************************************** ++ * Currently only implemented on Linux. Feature can be enabled to provide ++ * an interface to 3rd-party kernel modules that wish to access the ++ * HWPerf data. The API is documented in the rgxapi_km.h header and ++ * the rgx_hwperf* headers. ++ *****************************************************************************/ ++ ++/* Internal HWPerf kernel connection/device data object to track the state ++ * of a client session. ++ */ ++typedef struct ++{ ++ PVRSRV_DEVICE_NODE* psRgxDevNode; ++ PVRSRV_RGXDEV_INFO* psRgxDevInfo; ++ ++ /* TL Open/close state */ ++ IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; ++ ++ /* TL Acquire/release state */ ++ IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ ++ IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ ++ IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ ++ IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ ++ IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ ++ IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ ++ IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ ++ ++ ++} RGX_KM_HWPERF_DEVDATA; ++ ++PVRSRV_ERROR RGXHWPerfConfigMuxCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ RGX_HWPERF_CONFIG_MUX_CNTBLK *asBlockConfigs) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGX_KM_HWPERF_DEVDATA* psDevData; ++ RGX_HWPERF_DEVICE *psHWPerfDev; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Validate input argument values supplied by the caller */ ++ if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; ++ ++ while (psHWPerfDev) ++ { ++ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; ++ ++ /* Call the internal server API */ ++ eError = PVRSRVRGXConfigMuxHWPerfCountersKM(NULL, ++ psDevData->psRgxDevNode, ++ ui32NumBlocks, ++ asBlockConfigs); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); ++ ++ psHWPerfDev = psHWPerfDev->psNext; ++ } ++ ++ return eError; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT16 ui16CustomBlockID, ++ IMG_UINT16 ui16NumCustomCounters, ++ IMG_UINT32 *pui32CustomCounterIDs) ++{ ++ PVRSRV_ERROR eError; ++ RGX_HWPERF_DEVICE *psHWPerfDev; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Validate input arguments supplied by the caller */ ++ PVR_LOG_RETURN_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ PVR_LOG_RETURN_IF_FALSE((0 != ui16NumCustomCounters), "uiNumBlocks invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ PVR_LOG_RETURN_IF_FALSE((NULL != pui32CustomCounterIDs),"asBlockConfigs invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ /* Check # of blocks */ ++ PVR_LOG_RETURN_IF_FALSE((!(ui16CustomBlockID > RGX_HWPERF_MAX_CUSTOM_BLKS)),"ui16CustomBlockID invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ /* Check # of counters */ ++ PVR_LOG_RETURN_IF_FALSE((!(ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)),"ui16NumCustomCounters invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; ++ ++ while (psHWPerfDev) ++ { ++ RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; ++ ++ eError = PVRSRVRGXConfigCustomCountersKM(NULL, ++ psDevData->psRgxDevNode, ++ ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlCustHWPerfKM"); ++ ++ psHWPerfDev = psHWPerfDev->psNext; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ End of file (rgxhwperf.c) ++ ******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf.h b/drivers/gpu/drm/img-rogue/rgxhwperf.h +new file mode 100644 +index 000000000000..8819fe4f2682 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxhwperf.h +@@ -0,0 +1,74 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HW Performance header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX HWPerf functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHWPERF_H_ ++#define RGXHWPERF_H_ ++ ++#include "rgxhwperf_common.h" ++ ++/****************************************************************************** ++ * RGX HW Performance Profiling API(s) Rogue specific ++ *****************************************************************************/ ++ ++PVRSRV_ERROR PVRSRVRGXConfigMuxHWPerfCountersKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32ArrayLen, ++ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigs); ++ ++ ++PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT16 ui16CustomBlockID, ++ IMG_UINT16 ui16NumCustomCounters, ++ IMG_UINT32 * pui32CustomCounterIDs); ++ ++PVRSRV_ERROR PVRSRVRGXConfigureHWPerfBlocksKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32CtrlWord, ++ IMG_UINT32 ui32ArrayLen, ++ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs); ++ ++#endif /* RGXHWPERF_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf_common.c b/drivers/gpu/drm/img-rogue/rgxhwperf_common.c +new file mode 100644 +index 000000000000..e2b472d5150d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxhwperf_common.c +@@ -0,0 +1,3715 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HW Performance implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX HW Performance implementation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++//#define PVR_DPF_FUNCTION_TRACE_ON 1 ++#undef PVR_DPF_FUNCTION_TRACE_ON ++ ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "rgxdevice.h" ++#include "pvrsrv_error.h" ++#include "pvr_notifier.h" ++#include "osfunc.h" ++#include "allocmem.h" ++ ++#include "pvrsrv.h" ++#include "pvrsrv_tlstreams.h" ++#include "pvrsrv_tlcommon.h" ++#include "tlclient.h" ++#include "tlstream.h" ++ ++#include "rgxhwperf.h" ++#include "rgxapi_km.h" ++#include "rgxfwutils.h" ++#include "rgxtimecorr.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "pdump_km.h" ++#include "pvrsrv_apphint.h" ++#include "process_stats.h" ++#include "rgx_hwperf_table.h" ++#include "rgxinit.h" ++ ++#include "info_page_defs.h" ++ ++/* This is defined by default to enable producer callbacks. ++ * Clients of the TL interface can disable the use of the callback ++ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */ ++#define SUPPORT_TL_PRODUCER_CALLBACK 1 ++ ++/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */ ++#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT) ++ ++/* Defines size of buffers returned from acquire/release calls */ ++#define FW_STREAM_BUFFER_SIZE (0x80000) ++#define HOST_STREAM_BUFFER_SIZE (0x20000) ++ ++/* Must be at least as large as two tl packets of maximum size */ ++static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), ++ "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); ++static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1), ++ "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)"); ++ ++IMG_INTERNAL /*static inline*/ IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **); ++ ++static inline IMG_UINT32 ++RGXHWPerfGetPackets(IMG_UINT32 ui32BytesExp, ++ IMG_UINT32 ui32AllowedSize, ++ RGX_PHWPERF_V2_PACKET_HDR psCurPkt ) ++{ ++ IMG_UINT32 sizeSum = 0; ++ ++ /* Traverse the array to find how many packets will fit in the available space. */ ++ while ( sizeSum < ui32BytesExp && ++ sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize ) ++ { ++ sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt); ++ psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt); ++ } ++ ++ return sizeSum; ++} ++ ++static inline void ++RGXSuspendHWPerfL2DataCopy(PVRSRV_RGXDEV_INFO* psDeviceInfo, ++ IMG_BOOL bIsReaderConnected) ++{ ++ if (!bIsReaderConnected) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s : HWPerf FW events enabled but host buffer for FW events is full " ++ "and no reader is currently connected, suspending event collection. " ++ "Connect a reader or restart driver to avoid event loss.", __func__)); ++ psDeviceInfo->bSuspendHWPerfL2DataCopy = IMG_TRUE; ++ } ++} ++ ++/* ++ RGXHWPerfCopyDataL1toL2 ++ */ ++static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo, ++ IMG_BYTE *pbFwBuffer, ++ IMG_UINT32 ui32BytesExp) ++{ ++ IMG_HANDLE hHWPerfStream = psDeviceInfo->hHWPerfStream; ++ IMG_BYTE * pbL2Buffer; ++ IMG_UINT32 ui32L2BufFree; ++ IMG_UINT32 ui32BytesCopied = 0; ++ IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer)); ++ PVRSRV_ERROR eError; ++ IMG_BOOL bIsReaderConnected; ++ ++ /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */ ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX; ++#endif ++ ++ PVR_DPF_ENTERED; ++ ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d", ++ pbFwBuffer, ui32BytesExp)); ++#endif ++ ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ { ++ /* Check the incoming buffer of data has not lost any packets */ ++ IMG_BYTE *pbFwBufferIter = pbFwBuffer; ++ IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp; ++ do ++ { ++ RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter); ++ IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal; ++ if (gui32Ordinal != IMG_UINT32_MAX) ++ { ++ if ((gui32Ordinal+1) != ui32CurOrdinal) ++ { ++ if (gui32Ordinal < ui32CurOrdinal) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u", ++ pbFwBufferIter, ++ ui32CurOrdinal - gui32Ordinal - 1, ++ gui32Ordinal, ++ ui32CurOrdinal)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u", ++ pbFwBufferIter, ++ gui32Ordinal, ++ ui32CurOrdinal)); ++ } ++ } ++ } ++ gui32Ordinal = asCurPos->ui32Ordinal; ++ pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos); ++ } while (pbFwBufferIter < pbFwBufferEnd); ++ } ++#endif ++ ++ if (ui32BytesExp > psDeviceInfo->ui32L2BufMaxPacketSize) ++ { ++ IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ++ psDeviceInfo->ui32L2BufMaxPacketSize, ++ RGX_HWPERF_GET_PACKET(pbFwBuffer)); ++ ++ if (0 != sizeSum) ++ { ++ ui32BytesExp = sizeSum; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as " ++ "packet is too big and hence it breaches TL " ++ "packet size limit (TLBufferSize / 2.5)")); ++ goto e0; ++ } ++ } ++ ++ /* Try submitting all data in one TL packet. */ ++ eError = TLStreamReserve2(hHWPerfStream, ++ &pbL2Buffer, ++ (size_t)ui32BytesExp, ui32BytesExpMin, ++ &ui32L2BufFree, &bIsReaderConnected); ++ if ( eError == PVRSRV_OK ) ++ { ++ OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp ); ++ eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp); ++ if ( eError != PVRSRV_OK ) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", ++ eError, __func__)); ++ goto e0; ++ } ++ /* Data were successfully written */ ++ ui32BytesCopied = ui32BytesExp; ++ } ++ else if (eError == PVRSRV_ERROR_STREAM_FULL) ++ { ++ /* There was not enough space for all data, copy as much as possible */ ++ IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer)); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree)); ++ ++ if ( 0 != sizeSum ) ++ { ++ eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum); ++ ++ if ( eError == PVRSRV_OK ) ++ { ++ OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum ); ++ eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum); ++ if ( eError != PVRSRV_OK ) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer", ++ eError, __func__)); ++ goto e0; ++ } ++ /* sizeSum bytes of hwperf packets have been successfully written */ ++ ui32BytesCopied = sizeSum; ++ } ++ else if ( PVRSRV_ERROR_STREAM_FULL == eError ) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); ++ RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree)); ++ RGXSuspendHWPerfL2DataCopy(psDeviceInfo, bIsReaderConnected); ++ } ++ } ++ if ( PVRSRV_OK != eError && /* Some other error occurred */ ++ PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller */ ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.", ++ eError)); ++ } ++ ++e0: ++ /* Return the remaining packets left to be transported. */ ++ PVR_DPF_RETURN_VAL(ui32BytesCopied); ++} ++ ++ ++static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx( ++ const IMG_UINT32 ui32BufSize, ++ const IMG_UINT32 ui32Pos, ++ const IMG_UINT32 ui32Size) ++{ ++ return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 ); ++} ++ ++ ++/* ++ RGXHWPerfDataStore ++ */ ++static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf; ++ IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount; ++ IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0; ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ IMG_UINT32 ui32BytesExpSum = 0; ++#endif ++ ++ PVR_DPF_ENTERED; ++ ++ /* Caller should check this member is valid before calling */ ++ PVR_ASSERT(psDevInfo->hHWPerfStream); ++ ++ if (psDevInfo->bSuspendHWPerfL2DataCopy) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s : Copying data to host buffer for FW events is " ++ "suspended. Start HWPerf consumer or restart driver if " ++ "HWPerf FW events are needed", __func__)); ++ ++ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); ++ } ++ ++ /* Get a copy of the current ++ * read (first packet to read) ++ * write (empty location for the next write to be inserted) ++ * WrapCount (size in bytes of the buffer at or past end) ++ * indexes of the FW buffer */ ++ ui32SrcRIdx = psFwSysData->ui32HWPerfRIdx; ++ ui32SrcWIdx = psFwSysData->ui32HWPerfWIdx; ++ OSMemoryBarrier(NULL); ++ ui32SrcWrapCount = psFwSysData->ui32HWPerfWrapCount; ++ ++#if defined(HWPERF_MISR_FUNC_DEBUG) || defined(EMULATOR) ++ { ++ IMG_UINT32 ui32SrcBufSize = psDevInfo->ui32RGXFWIfHWPerfBufSize; ++ ++ if (ui32SrcRIdx >= ui32SrcBufSize || ui32SrcWIdx >= ui32SrcBufSize) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s : Invalid read/write offsets found! srcRIdx:%u srcWIdx:%u srcBufSize:%u", ++ __func__, ui32SrcRIdx, ui32SrcWIdx, ui32SrcBufSize)); ++ ++ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); ++ } ++ } ++#endif ++ ++ /* Is there any data in the buffer not yet retrieved? */ ++ if ( ui32SrcRIdx != ui32SrcWIdx ) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx)); ++ ++ /* Is the write position higher than the read position? */ ++ if ( ui32SrcWIdx > ui32SrcRIdx ) ++ { ++ /* Yes, buffer has not wrapped */ ++ ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx; ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ ui32BytesExpSum += ui32BytesExp; ++#endif ++ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, ++ psHwPerfInfo + ui32SrcRIdx, ++ ui32BytesExp); ++ ++ /* Advance the read index and the free bytes counter by the number ++ * of bytes transported. Items will be left in buffer if not all data ++ * could be transported. Exit to allow buffer to drain. */ ++ OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfRIdx, ++ RGXHWPerfAdvanceRIdx(psDevInfo->ui32RGXFWIfHWPerfBufSize, ++ ui32SrcRIdx, ++ ui32BytesCopied)); ++ ++ ui32BytesCopiedSum += ui32BytesCopied; ++ } ++ /* No, buffer has wrapped and write position is behind read position */ ++ else ++ { ++ /* Byte count equal to ++ * number of bytes from read position to the end of the buffer, ++ * + data in the extra space in the end of the buffer. */ ++ ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx; ++ ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ ui32BytesExpSum += ui32BytesExp; ++#endif ++ /* Attempt to transfer the packets to the TL stream buffer */ ++ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, ++ psHwPerfInfo + ui32SrcRIdx, ++ ui32BytesExp); ++ ++ /* Advance read index as before and Update the local copy of the ++ * read index as it might be used in the last if branch*/ ++ ui32SrcRIdx = RGXHWPerfAdvanceRIdx( ++ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, ++ ui32BytesCopied); ++ ++ /* Update Wrap Count */ ++ if ( ui32SrcRIdx == 0) ++ { ++ OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfWrapCount, ++ psDevInfo->ui32RGXFWIfHWPerfBufSize); ++ } ++ OSWriteDeviceMem32WithWMB(&psFwSysData->ui32HWPerfRIdx, ui32SrcRIdx); ++ ++ ui32BytesCopiedSum += ui32BytesCopied; ++ ++ /* If all the data in the end of the array was copied, try copying ++ * wrapped data in the beginning of the array, assuming there is ++ * any and the RIdx was wrapped. */ ++ if ( (ui32BytesCopied == ui32BytesExp) ++ && (ui32SrcWIdx > 0) ++ && (ui32SrcRIdx == 0) ) ++ { ++ ui32BytesExp = ui32SrcWIdx; ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ ui32BytesExpSum += ui32BytesExp; ++#endif ++ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo, ++ psHwPerfInfo, ++ ui32BytesExp); ++ /* Advance the FW buffer read position. */ ++ psFwSysData->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx( ++ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx, ++ ui32BytesCopied); ++ ++ ui32BytesCopiedSum += ui32BytesCopied; ++ } ++ } ++#ifdef HWPERF_MISR_FUNC_DEBUG ++ if (ui32BytesCopiedSum != ui32BytesExpSum) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psFwSysData->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum)); ++ } ++#endif ++ ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport")); ++ } ++ ++ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum); ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO* psRgxDevInfo; ++ IMG_UINT32 ui32BytesCopied; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psDevInfo); ++ psRgxDevInfo = psDevInfo->pvDevice; ++ ++ /* Store FW event data if the destination buffer exists.*/ ++ if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) ++ { ++ OSLockAcquire(psRgxDevInfo->hHWPerfLock); ++ ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo); ++ if ( ui32BytesCopied ) ++ { /* Signal consumers that packets may be available to read when ++ * running from a HW kick, not when called by client APP thread ++ * via the transport layer CB as this can lead to stream ++ * corruption.*/ ++ eError = TLStreamSync(psRgxDevInfo->hHWPerfStream); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied")); ++ RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo); ++ } ++ OSLockRelease(psRgxDevInfo->hHWPerfLock); ++ } ++ ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++ ++/* Currently supported by default */ ++#if defined(SUPPORT_TL_PRODUCER_CALLBACK) ++static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream, ++ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser; ++ ++ PVR_UNREFERENCED_PARAMETER(hStream); ++ PVR_UNREFERENCED_PARAMETER(ui32Resp); ++ ++ PVR_ASSERT(psRgxDevInfo); ++ ++ switch (ui32ReqOp) ++ { ++ case TL_SOURCECB_OP_CLIENT_EOS: ++ /* Keep HWPerf resource init check and use of ++ * resources atomic, they may not be freed during use ++ */ ++ ++ /* This solution is for avoiding a deadlock situation where - ++ * in DoTLStreamReserve(), writer has acquired HWPerfLock and ++ * ReadLock and is waiting on ReadPending (which will be reset ++ * by reader), And ++ * the reader after setting ReadPending in TLStreamAcquireReadPos(), ++ * is waiting for HWPerfLock in RGXHWPerfTLCB(). ++ * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we ++ * will return to the reader without waiting to acquire HWPerfLock. ++ */ ++ if (!OSTryLockAcquire(psRgxDevInfo->hHWPerfLock)) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write " ++ "operation might already be in process")); ++ return PVRSRV_OK; ++ } ++ ++ if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL) ++ { ++ (void) RGXHWPerfDataStore(psRgxDevInfo); ++ } ++ OSLockRelease(psRgxDevInfo->hHWPerfLock); ++ break; ++ ++ default: ++ break; ++ } ++ ++ return eError; ++} ++#endif ++ ++ ++static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc) ++ { ++ if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); ++ psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL; ++ } ++ DevmemFwUnmapAndFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); ++ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfInit ++ ++@Description Called during driver init for initialization of HWPerf module ++ in the Rogue device driver. This function keeps allocated ++ only the minimal necessary resources, which are required for ++ functioning of HWPerf server module. ++ ++@Input psRgxDevInfo RGX Device Info ++ ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ PVR_DPF_ENTERED; ++ ++ /* expecting a valid device info */ ++ PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL); ++ ++ /* Create a lock for HWPerf server module used for serializing, L1 to L2 ++ * copy calls (e.g. in case of TL producer callback) and L1, L2 resource ++ * allocation */ ++ eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock); ++ PVR_LOG_RETURN_IF_ERROR(eError, "OSLockCreate"); ++ ++ /* avoid uninitialised data */ ++ psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL; ++ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL; ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfIsInitRequired ++ ++@Description Returns true if the HWperf firmware buffer (L1 buffer) and host ++ driver TL buffer (L2 buffer) are not already allocated. Caller ++ must possess hHWPerfLock lock before calling this ++ function so the state tested is not inconsistent. ++ ++@Input psRgxDevInfo RGX Device Info, on which init requirement is ++ checked. ++ ++@Return IMG_BOOL Whether initialization (allocation) is required ++ */ /**************************************************************************/ ++static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock)); ++ ++#if !defined(NO_HARDWARE) ++ /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver ++ * built for actual hardware (TC, EMU, etc.) ++ */ ++ if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL) ++ { ++ /* The allocation API (RGXHWPerfInitOnDemandResources) allocates ++ * device memory for both L1 and L2 without any checks. Hence, ++ * either both should be allocated or both be NULL. ++ * ++ * In-case this changes in future (for e.g. a situation where one ++ * of the 2 buffers is already allocated and other is required), ++ * add required checks before allocation calls to avoid memory leaks. ++ */ ++ PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL); ++ return IMG_TRUE; ++ } ++ PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL); ++#else ++ /* On a NO-HW driver L2 is not allocated. So, no point in checking its ++ * allocation */ ++ if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL) ++ { ++ return IMG_TRUE; ++ } ++#endif ++ return IMG_FALSE; ++} ++#if !defined(NO_HARDWARE) ++static void _HWPerfFWOnReaderOpenCB(void *pvArg) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg; ++ PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode; ++ RGXFWIF_KCCB_CMD sKccbCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ ++ /* Clear any previously suspended state for bSuspendHWPerfL2DataCopy as we ++ * now have a reader attached so the data will be delivered upstream. */ ++ if (psRgxDevInfo->bSuspendHWPerfL2DataCopy) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Resuming HWPerf FW event collection.", ++ __func__)); ++ psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; ++ } ++ ++ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; ++ sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = RGXFWIF_HWPERF_CTRL_EMIT_FEATURES_EV; ++ sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = 0; ++ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sKccbCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in " ++ "firmware (error = %d)", __func__, eError)); ++ return; ++ } ++ ++ eError = RGXWaitForKCCBSlotUpdate(psRgxDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); ++} ++#endif ++/*************************************************************************/ /*! ++@Function RGXHWPerfInitOnDemandResources ++ ++@Description This function allocates the HWperf firmware buffer (L1 buffer) ++ and host driver TL buffer (L2 buffer) if HWPerf is enabled at ++ driver load time. Otherwise, these buffers are allocated ++ on-demand as and when required. Caller ++ must possess hHWPerfLock lock before calling this ++ function so the state tested is not inconsistent if called ++ outside of driver initialisation. ++ ++@Input psRgxDevInfo RGX Device Info, on which init is done ++ ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo) ++{ ++ IMG_HANDLE hStream = NULL; /* Init required for noHW */ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32L2BufferSize = 0; ++ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; ++ IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold ++ names up to "hwperf_9999", which is enough */ ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ PVR_DPF_ENTERED; ++ ++ /* Create the L1 HWPerf buffer on demand, read-only for the CPU ++ * (except for the zero/poison operations) */ ++ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) ++ | PVRSRV_MEMALLOCFLAG_GPU_READABLE ++ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE ++ | PVRSRV_MEMALLOCFLAG_GPU_UNCACHED ++ | PVRSRV_MEMALLOCFLAG_CPU_READABLE ++ | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC ++ | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE ++#if defined(PDUMP) /* Helps show where the packet data ends */ ++ | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC ++#else /* Helps show corruption issues in driver-live */ ++ | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC ++#endif ++ | PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); ++ ++ /* Allocate HWPerf FW L1 buffer */ ++ eError = DevmemFwAllocate(psRgxDevInfo, ++ /* Pad it enough to hold the biggest variable sized packet. */ ++ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGX_HWPERF_MAX_PACKET_SIZE, ++ uiMemAllocFlags, ++ "FwHWPerfBuffer", ++ &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate kernel fw hwperf buffer (%u)", ++ __func__, eError)); ++ goto e0; ++ } ++ ++ /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory. ++ * Also, make sure the FW address is not already set */ ++ PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0); ++ ++ /* Meta cached flag removed from this allocation as it was found ++ * FW performance was better without it. */ ++ eError = RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, ++ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, ++ 0, RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e0); ++ ++#if defined(RGX_FEATURE_HWPERF_VOLCANIC) ++ RGXSetMetaDMAAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfDMABuf, ++ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, ++ &psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf, ++ 0); ++#endif ++ ++ /* flush write buffers for psRgxDevInfo->psRGXFWIfRuntimeCfg */ ++ OSWriteMemoryBarrier(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr); ++ ++ eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc, ++ (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire kernel hwperf buffer (%u)", ++ __func__, eError)); ++ goto e0; ++ } ++ ++ /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence, ++ * L2 buffer is not allocated */ ++#if !defined(NO_HARDWARE) ++ /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer ++ * accessed by the FW. The MISR may try to write one packet the size of the L1 ++ * buffer in some scenarios. When logging is enabled in the MISR, it can be seen ++ * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers ++ * are the more chance of this happening. ++ * Size chosen to allow MISR to write an L1 sized packet and for the client ++ * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1. ++ */ ++ ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize + ++ (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1); ++ ++ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ ++ if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d", ++ PVRSRV_TL_HWPERF_RGX_FW_STREAM, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to form HWPerf stream name for device %d", ++ __func__, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ eError = TLStreamCreate(&hStream, ++ pszHWPerfStreamName, ++ ui32L2BufferSize, ++ TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT, ++ _HWPerfFWOnReaderOpenCB, psRgxDevInfo, ++#if !defined(SUPPORT_TL_PRODUCER_CALLBACK) ++ NULL, NULL ++#else ++ /* Not enabled by default */ ++ RGXHWPerfTLCB, psRgxDevInfo ++#endif ++ ); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLStreamCreate", e1); ++ ++ eError = TLStreamSetNotifStream(hStream, ++ PVRSRVGetPVRSRVData()->hTLCtrlStream); ++ /* we can still discover host stream so leave it as is and just log error */ ++ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); ++ ++ /* send the event here because host stream is implicitly opened for write ++ * in TLStreamCreate and TLStreamOpen is never called (so the event is ++ * never emitted) */ ++ TLStreamMarkStreamOpen(hStream); ++ ++ { ++ TL_STREAM_INFO sTLStreamInfo; ++ ++ TLStreamInfo(hStream, &sTLStreamInfo); ++ psRgxDevInfo->ui32L2BufMaxPacketSize = sTLStreamInfo.maxTLpacketSize; ++ ++ psRgxDevInfo->bSuspendHWPerfL2DataCopy = IMG_FALSE; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d", ++ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize)); ++ ++#else /* defined(NO_HARDWARE) */ ++ PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize); ++ PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB); ++ PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName); ++ ui32L2BufferSize = 0; ++#endif ++ ++ psRgxDevInfo->hHWPerfStream = hStream; ++ PVR_DPF_RETURN_OK; ++ ++#if !defined(NO_HARDWARE) ++e1: /* L2 buffer initialisation failures */ ++ psRgxDevInfo->hHWPerfStream = NULL; ++#endif ++e0: /* L1 buffer initialisation failures */ ++ RGXHWPerfL1BufferDeinit(psRgxDevInfo); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++ ++void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ IMG_HANDLE hStream = psRgxDevInfo->hHWPerfStream; ++ ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psRgxDevInfo); ++ psRgxDevInfo->hHWPerfStream = NULL; ++ ++ /* Clean up the L2 buffer stream object if allocated */ ++ if (hStream) ++ { ++ /* send the event here because host stream is implicitly opened for ++ * write in TLStreamCreate and TLStreamClose is never called (so the ++ * event is never emitted) */ ++ TLStreamMarkStreamClose(hStream); ++ TLStreamClose(hStream); ++ } ++ ++ /* Cleanup L1 buffer resources */ ++ RGXHWPerfL1BufferDeinit(psRgxDevInfo); ++ ++ /* Cleanup the HWPerf server module lock resource */ ++ if (psRgxDevInfo->hHWPerfLock) ++ { ++ OSLockDestroy(psRgxDevInfo->hHWPerfLock); ++ psRgxDevInfo->hHWPerfLock = NULL; ++ } ++ ++ PVR_DPF_RETURN; ++} ++ ++ ++/****************************************************************************** ++ * RGX HW Performance Profiling Server API(s) ++ *****************************************************************************/ ++ ++static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bToggle, ++ IMG_UINT64 ui64Mask) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; ++ RGXFWIF_KCCB_CMD sKccbCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ /* If this method is being used whether to enable or disable ++ * then the hwperf buffers (host and FW) are likely to be needed ++ * eventually so create them, also helps unit testing. Buffers ++ * allocated on demand to reduce RAM foot print on systems not ++ * needing HWPerf resources. ++ * Obtain lock first, test and init if required. */ ++ OSLockAcquire(psDevice->hHWPerfLock); ++ ++ if (!psDevice->bFirmwareInitialised) ++ { ++ psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter ++ eError = PVRSRV_ERROR_NOT_INITIALISED; ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "HWPerf has NOT been initialised yet. Mask has been SET to " ++ "(%" IMG_UINT64_FMTSPECx ")", ++ ui64Mask)); ++ ++ goto unlock_and_return; ++ } ++ ++ if (RGXHWPerfIsInitRequired(psDevice)) ++ { ++ eError = RGXHWPerfInitOnDemandResources(psDevice); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW " ++ "resources failed", __func__)); ++ goto unlock_and_return; ++ } ++ } ++ ++#if defined(RGX_FEATURE_HWPERF_VOLCANIC) && defined(SUPPORT_POWMON_COMPONENT) && defined(SUPPORT_POWER_VALIDATION_VIA_DEBUGFS) ++ if (RGXPowmonBufferIsInitRequired(psDeviceNode->pvDevice)) ++ { ++ /* Allocate power monitoring log buffer if enabled */ ++ eError = RGXPowmonBufferInitOnDemandResources(psDeviceNode->pvDevice); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand power monitoring " ++ "resources failed", __func__)); ++ goto unlock_and_return; ++ } ++ } ++#endif ++ ++ /* Unlock here as no further HWPerf resources are used below that would be ++ * affected if freed by another thread */ ++ OSLockRelease(psDevice->hHWPerfLock); ++ ++ /* Return if the filter is the same */ ++ if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask) ++ goto return_; ++ ++ /* Prepare command parameters ... */ ++ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG; ++ sKccbCmd.uCmdData.sHWPerfCtrl.eOpCode = bToggle ? RGXFWIF_HWPERF_CTRL_TOGGLE : RGXFWIF_HWPERF_CTRL_SET; ++ sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask; ++ ++ /* Ask the FW to carry out the HWPerf configuration command */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, ++ RGXFWIF_DM_GP, ++ &sKccbCmd, ++ IMG_TRUE, ++ &ui32kCCBCommandSlot); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in " ++ "firmware (error = %d)", __func__, eError)); ++ goto return_; ++ } ++ ++ psDevice->ui64HWPerfFilter = bToggle ? ++ psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask; ++ ++ /* Wait for FW to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate", return_); ++ ++#if defined(DEBUG) ++ if (bToggle) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED", ++ ui64Mask)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")", ++ ui64Mask)); ++ } ++#endif ++ ++ return PVRSRV_OK; ++ ++unlock_and_return: ++ OSLockRelease(psDevice->hHWPerfLock); ++ ++return_: ++ return eError; ++} ++ ++#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800 ++ ++static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bToggle, ++ IMG_UINT32 ui32Mask) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice; ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter; ++#endif ++ ++ OSLockAcquire(psDevice->hLockHWPerfHostStream); ++ if (psDevice->hHWPerfHostStream == NULL) ++ { ++ eError = RGXHWPerfHostInitOnDemandResources(psDevice); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Initialisation of on-demand HWPerfHost resources failed", ++ __func__)); ++ OSLockRelease(psDevice->hLockHWPerfHostStream); ++ return eError; ++ } ++ } ++ ++ psDevice->ui32HWPerfHostFilter = bToggle ? ++ psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask; ++ ++ // Deferred creation of host periodic events thread ++ if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) ++ { ++ eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); ++ } ++ else if (!(psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO))) ++ { ++ eError = PVRSRVDestroyHWPerfHostThread(); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVDestroyHWPerfHostThread"); ++ } ++ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ // Log deferred events stats if filter changed from non-zero to zero ++ if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0)) ++ { ++ PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)", ++ psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS)); ++ ++ PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) " ++ "WaitForRightOrdPktHighWatermark(%u)", ++ psDevice->ui32WaitForAtomicCtxPktHighWatermark, ++ psDevice->ui32WaitForRightOrdPktHighWatermark)); ++ } ++#endif ++ ++ OSLockRelease(psDevice->hLockHWPerfHostStream); ++ ++#if defined(DEBUG) ++ if (bToggle) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED", ++ ui32Mask)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)", ++ ui32Mask)); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle, ++ IMG_UINT32 ui32InfoPageIdx, ++ IMG_UINT32 ui32Mask) ++{ ++ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); ++ ++ PVR_LOG_RETURN_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START && ++ ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info" ++ " page index", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ OSLockAcquire(psData->hInfoPageLock); ++ psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ? ++ psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask; ++ OSLockRelease(psData->hInfoPageLock); ++ ++#if defined(DEBUG) ++ if (bToggle) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED", ++ ui32InfoPageIdx, ui32Mask)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)", ++ ui32InfoPageIdx, ui32Mask)); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_HWPERF_BVNC *psBVNC) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_FALSE((NULL != psDeviceNode), "psDeviceNode invalid", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, psBVNC); ++ ++ return eError; ++} ++ ++/* ++ AppHint interfaces ++ */ ++static ++PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT64 ui64Value) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDeviceInfo; ++ ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); ++ ++ psDeviceInfo = psDeviceNode->pvDevice; ++ ++ eError = RGXHWPerfCtrlFwBuffer(psDeviceNode, IMG_FALSE, ui64Value); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to set HWPerf firmware filter for device (%u)", ++ psDeviceNode->sDevId.ui32InternalID)); ++ return eError; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static ++PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT64 *pui64Value) ++{ ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); ++ ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ *pui64Value = ++ ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui64HWPerfFilter; ++ ++ return PVRSRV_OK; ++} ++ ++static ++PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); ++ ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ eError = RGXHWPerfCtrlHostBuffer(psDeviceNode, IMG_FALSE, ui32Value); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to set HWPerf firmware filter for device (%u)", ++ psDeviceNode->sDevId.ui32InternalID)); ++ return eError; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static ++PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode != NULL); ++ PVR_RETURN_IF_INVALID_PARAM(psDeviceNode->pvDevice != NULL); ++ ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ *pui32Value = ++ ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32HWPerfHostFilter; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice, ++ const void *psPrivData, ++ IMG_UINT32 *pui32Value) ++{ ++ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData(); ++ IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; ++ PVR_UNREFERENCED_PARAMETER(psDevice); ++ ++ OSLockAcquire(psData->hInfoPageLock); ++ *pui32Value = psData->pui32InfoPage[ui32Idx]; ++ OSLockRelease(psData->hInfoPageLock); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice, ++ const void *psPrivData, ++ IMG_UINT32 ui32Value) ++{ ++ IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData; ++ PVR_UNREFERENCED_PARAMETER(psDevice); ++ ++ return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value); ++} ++ ++void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter, ++ RGXHWPerfReadFwFilter, ++ RGXHWPerfSetFwFilter, ++ psDeviceNode, ++ NULL); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter, ++ RGXHWPerfReadHostFilter, ++ RGXHWPerfSetHostFilter, ++ psDeviceNode, ++ NULL); ++} ++ ++void RGXHWPerfClientInitAppHintCallbacks(void) ++{ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services, ++ _ReadClientFilter, ++ _WriteClientFilter, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) HWPERF_FILTER_SERVICES_IDX); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL, ++ _ReadClientFilter, ++ _WriteClientFilter, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) HWPERF_FILTER_EGL_IDX); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES, ++ _ReadClientFilter, ++ _WriteClientFilter, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) HWPERF_FILTER_OPENGLES_IDX); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL, ++ _ReadClientFilter, ++ _WriteClientFilter, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) HWPERF_FILTER_OPENCL_IDX); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan, ++ _ReadClientFilter, ++ _WriteClientFilter, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) HWPERF_FILTER_VULKAN_IDX); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGL, ++ _ReadClientFilter, ++ _WriteClientFilter, ++ APPHINT_OF_DRIVER_NO_DEVICE, ++ (void *) HWPERF_FILTER_OPENGL_IDX); ++} ++ ++static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB) ++{ ++ if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX) ++ { ++ /* Size specified as a AppHint but it is too big */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "RGXHWPerfHostInit: HWPerf Host buffer size " ++ "value (%u) too big, using maximum (%u)", ++ ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MAX)); ++ return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10; ++ } ++ else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN) ++ { ++ return ui32BufSizeKB<<10; ++ } ++ else if (ui32BufSizeKB > 0) ++ { ++ /* Size specified as a AppHint but it is too small */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "RGXHWPerfHostInit: HWPerf Host buffer size " ++ "value (%u) too small, using minimum (%u)", ++ ui32BufSizeKB, HWPERF_HOST_TL_STREAM_SIZE_MIN)); ++ return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10; ++ } ++ else ++ { ++ /* 0 size implies AppHint not set or is set to zero, ++ * use default size from driver constant. */ ++ return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10; ++ } ++} ++ ++/****************************************************************************** ++ * RGX HW Performance Host Stream API ++ *****************************************************************************/ ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfHostInit ++ ++@Description Called during driver init for initialisation of HWPerfHost ++ stream in the Rogue device driver. This function keeps allocated ++ only the minimal necessary resources, which are required for ++ functioning of HWPerf server module. ++ ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ PVR_RETURN_IF_INVALID_PARAM(psRgxDevInfo != NULL); ++ ++ eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", error); ++ ++ psRgxDevInfo->hHWPerfHostStream = NULL; ++ psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */ ++ psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1; ++ psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB); ++ psRgxDevInfo->pvHostHWPerfMISR = NULL; ++ psRgxDevInfo->pui8DeferredEvents = NULL; ++ /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic ++ * is maintained */ ++ psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0; ++ psRgxDevInfo->hHWPerfHostSpinLock = NULL; ++ ++error: ++ return eError; ++} ++ ++#define RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE \ ++ ((IMG_UINT32)(offsetof(RGX_HWPERF_HOST_CLIENT_INFO_DATA, uDetail) + \ ++ sizeof(((RGX_HWPERF_HOST_CLIENT_INFO_DETAIL*)0)->sProcName.ui32Count))) ++ ++static void _HWPerfHostOnConnectCB(void *pvArg) ++{ ++ PVRSRV_RGXDEV_INFO* psDevice; ++ PVRSRV_ERROR eError; ++ ++ RGXSRV_HWPERF_CLK_SYNC(pvArg); ++ ++ psDevice = (PVRSRV_RGXDEV_INFO*) pvArg; ++ ++ /* Handle the case where the RGX_HWPERF_HOST_INFO bit is set in the event filter ++ * before the host stream is opened for reading by a HWPerf client. ++ * Which can result in the host periodic thread sleeping for a long duration as TLStreamIsOpenForReading may return false. */ ++ if (psDevice->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HOST_INFO)) ++ { ++ eError = PVRSRVCreateHWPerfHostThread(PVRSRV_APPHINT_HWPERFHOSTTHREADTIMEOUTINMS); ++ PVR_LOG_IF_ERROR(eError, "PVRSRVCreateHWPerfHostThread"); ++ } ++ ++ if (RGXHWPerfHostIsEventEnabled(psDevice, RGX_HWPERF_HOST_CLIENT_INFO)) ++ { ++ // GCC throws -Werror=frame-larger-than error if the frame size is > 1024 bytes, ++ // so use a heap allocation - is there an alternate solution? ++ IMG_BYTE *pbPktPayload = (IMG_BYTE*)OSAllocMem(RGX_HWPERF_MAX_PAYLOAD_SIZE); ++ ++ if (pbPktPayload) ++ { ++ RGX_HWPERF_HOST_CLIENT_INFO_DATA *psHostClientInfo; ++ RGX_HWPERF_HOST_CLIENT_PROC_NAME *psProcName; ++ IMG_UINT32 ui32TotalPayloadSize, ui32NameLen, ui32ProcNamePktSize; ++ DLLIST_NODE *pNode, *pNext; ++ ++ psHostClientInfo = IMG_OFFSET_ADDR(pbPktPayload,0); ++ psHostClientInfo->eType = RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME; ++ psHostClientInfo->uDetail.sProcName.ui32Count = 0U; ++ psProcName = psHostClientInfo->uDetail.sProcName.asProcNames; ++ ui32TotalPayloadSize = RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE; ++ ++ OSLockAcquire(psDevice->psDeviceNode->hConnectionsLock); ++ ++ // Announce current client connections to the reader ++ dllist_foreach_node(&psDevice->psDeviceNode->sConnections, pNode, pNext) ++ { ++ CONNECTION_DATA *psData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); ++ ++ ui32NameLen = OSStringLength(psData->pszProcName) + 1U; ++ ui32ProcNamePktSize = RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen); ++ ++ // Unlikely case where we have too much data to fit into a single hwperf packet ++ if (ui32ProcNamePktSize + ui32TotalPayloadSize > RGX_HWPERF_MAX_PAYLOAD_SIZE) ++ { ++ RGXHWPerfHostPostRaw(psDevice, RGX_HWPERF_HOST_CLIENT_INFO, pbPktPayload, ui32TotalPayloadSize); ++ ++ psHostClientInfo->uDetail.sProcName.ui32Count = 0U; ++ psProcName = psHostClientInfo->uDetail.sProcName.asProcNames; ++ ui32TotalPayloadSize = RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE; ++ } ++ ++ // Setup packet data ++ psHostClientInfo->uDetail.sProcName.ui32Count++; ++ psProcName->uiClientPID = psData->pid; ++ psProcName->ui32Length = ui32NameLen; ++ (void)OSStringLCopy(psProcName->acName, psData->pszProcName, ui32NameLen); ++ ++ psProcName = (RGX_HWPERF_HOST_CLIENT_PROC_NAME*)IMG_OFFSET_ADDR(psProcName, ui32ProcNamePktSize); ++ ui32TotalPayloadSize += ui32ProcNamePktSize; ++ } ++ ++ OSLockRelease(psDevice->psDeviceNode->hConnectionsLock); ++ RGXHWPerfHostPostRaw(psDevice, RGX_HWPERF_HOST_CLIENT_INFO, pbPktPayload, ui32TotalPayloadSize); ++ OSFreeMem(pbPktPayload); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for RGX_HWPERF_HOST_CLIENT_INFO_DATA packet.", __func__)); ++ } ++ } ++} ++ ++/* Avoiding a holder struct using fields below, as a struct gets along padding, ++ * packing, and other compiler dependencies, and we want a continuous stream of ++ * bytes for (header+data) for use in TLStreamWrite. See ++ * _HWPerfHostDeferredEventsEmitter(). ++ * ++ * A deferred (UFO) packet is represented in memory as: ++ * - IMG_BOOL --> Indicates whether a packet write is ++ * "complete" by atomic context or not. ++ * - RGX_HWPERF_V2_PACKET_HDR --. ++ * |--> Fed together to TLStreamWrite for ++ * | deferred packet to be written to ++ * | HWPerfHost buffer ++ * - RGX_HWPERF_HOST_UFO_DATA---` ++ * ++ * PS: Currently only UFO events are supported in deferred list */ ++#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\ ++ sizeof(RGX_HWPERF_V2_PACKET_HDR) +\ ++ sizeof(RGX_HWPERF_HOST_UFO_DATA)) ++ ++static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData); ++static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT32 ui32MaxOrdinal); ++ ++/*************************************************************************/ /*! ++@Function RGXHWPerfHostInitOnDemandResources ++ ++@Description This function allocates the HWPerfHost buffer if HWPerf is ++ enabled at driver load time. Otherwise, these buffers are ++ allocated on-demand as and when required. ++ ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ PVRSRV_ERROR eError; ++ /* 5 makes space up to "hwperf_host_9999" streams */ ++ IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ if (psRgxDevInfo->hHWPerfHostStream != NULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf host stream already initialised")); ++ return PVRSRV_OK; ++ } ++ ++ /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ ++ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", ++ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to form HWPerf host stream name for device %d", ++ __func__, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream, ++ pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize, ++ TL_OPMODE_DROP_NEWER, ++ _HWPerfHostOnConnectCB, psRgxDevInfo, ++ NULL, NULL); ++ PVR_LOG_RETURN_IF_ERROR(eError, "TLStreamCreate"); ++ ++ eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream, ++ PVRSRVGetPVRSRVData()->hTLCtrlStream); ++ /* we can still discover host stream so leave it as is and just log error */ ++ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream"); ++ ++ /* send the event here because host stream is implicitly opened for write ++ * in TLStreamCreate and TLStreamOpen is never called (so the event is ++ * never emitted) */ ++ eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream); ++ PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen"); ++ ++ /* HWPerfHost deferred events specific initialization */ ++ eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR, ++ RGX_MISRHandler_HWPerfPostDeferredHostEvents, ++ psRgxDevInfo, ++ "RGX_HWPerfDeferredEventPoster"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR", err_install_misr); ++ ++ eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create); ++ ++ psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS ++ * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE); ++ if (NULL == psRgxDevInfo->pui8DeferredEvents) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for " ++ "HWPerfHost deferred events array", __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_alloc_deferred_events; ++ } ++ psRgxDevInfo->ui16DEReadIdx = 0; ++ psRgxDevInfo->ui16DEWriteIdx = 0; ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ psRgxDevInfo->ui32DEHighWatermark = 0; ++ psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0; ++ psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0; ++#endif ++ ++ PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB", ++ psRgxDevInfo->ui32HWPerfHostBufSize)); ++ ++ return PVRSRV_OK; ++ ++err_alloc_deferred_events: ++ OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); ++ psRgxDevInfo->hHWPerfHostSpinLock = NULL; ++ ++err_spinlock_create: ++ (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); ++ psRgxDevInfo->pvHostHWPerfMISR = NULL; ++ ++err_install_misr: ++ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); ++ TLStreamClose(psRgxDevInfo->hHWPerfHostStream); ++ psRgxDevInfo->hHWPerfHostStream = NULL; ++ ++ return eError; ++} ++ ++void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ ++ PVR_ASSERT (psRgxDevInfo); ++ ++ if (psRgxDevInfo->pui8DeferredEvents) ++ { ++ OSFreeMem(psRgxDevInfo->pui8DeferredEvents); ++ psRgxDevInfo->pui8DeferredEvents = NULL; ++ } ++ ++ if (psRgxDevInfo->hHWPerfHostSpinLock) ++ { ++ OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock); ++ psRgxDevInfo->hHWPerfHostSpinLock = NULL; ++ } ++ ++ if (psRgxDevInfo->pvHostHWPerfMISR) ++ { ++ (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR); ++ psRgxDevInfo->pvHostHWPerfMISR = NULL; ++ } ++ ++ if (psRgxDevInfo->hHWPerfHostStream) ++ { ++ /* send the event here because host stream is implicitly opened for ++ * write in TLStreamCreate and TLStreamClose is never called (so the ++ * event is never emitted) */ ++ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream); ++ TLStreamClose(psRgxDevInfo->hHWPerfHostStream); ++ psRgxDevInfo->hHWPerfHostStream = NULL; ++ } ++ ++ if (psRgxDevInfo->hLockHWPerfHostStream) ++ { ++ OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream); ++ psRgxDevInfo->hLockHWPerfHostStream = NULL; ++ } ++} ++ ++inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter) ++{ ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter; ++} ++ ++inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent) ++{ ++ PVR_ASSERT(psRgxDevInfo); ++ return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE; ++} ++ ++#define MAX_RETRY_COUNT 80 ++static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT32 ui32CurrentOrdinal) ++{ ++ IMG_UINT32 ui32Retry = MAX_RETRY_COUNT; ++ ++ PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL); ++ PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL); ++ ++ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); ++ ++ /* First, flush pending events (if any) */ ++ _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal); ++ ++ while ((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1) ++ && (--ui32Retry != 0)) ++ { ++ /* Release lock and give a chance to a waiting context to emit the ++ * expected packet */ ++ OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream); ++ OSSleepms(100); ++ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); ++ } ++ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Will warn only once! Potential packet(s) lost after ordinal" ++ " %u (Current ordinal = %u)", ++ __func__, ++ psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal)); ++ psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE; ++ } ++ ++ if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) ++ { ++ psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; ++ } ++#endif ++} ++ ++static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT32 ui32CurrentOrdinal) ++{ ++ /* update last ordinal emitted */ ++ psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal; ++ ++ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); ++ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); ++} ++ ++static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) ++{ ++ IMG_UINT8 *pui8Dest; ++ ++ PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream, ++ &pui8Dest, ui32Size); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer" ++ " (%d). Dropping packet.", ++ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); ++ return NULL; ++ } ++ PVR_ASSERT(pui8Dest != NULL); ++ ++ return pui8Dest; ++} ++ ++static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size) ++{ ++ PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream, ++ ui32Size); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s" ++ " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); ++ } ++} ++ ++/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */ ++static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_V2_PACKET_HDR *psHeader) ++{ ++ PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream, ++ IMG_OFFSET_ADDR(psHeader, 0), psHeader->ui32Size); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer" ++ " (%d). Dropping packet.", ++ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError)); ++ } ++ ++ /* Regardless of whether write passed/failed, we consider it "written" */ ++ psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal; ++ ++ return (eError == PVRSRV_OK); ++} ++ ++/* Helper macros for deferred events operations */ ++#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS) ++#define GET_DE_EVENT_BASE(_idx) (IMG_OFFSET_ADDR(psRgxDevInfo->pui8DeferredEvents, \ ++ (_idx) * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)) ++ ++#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*)((void *)(_base))) ++#define GET_DE_EVENT_DATA(_base) (IMG_OFFSET_ADDR((_base), sizeof(IMG_BOOL))) ++ ++/* Emits HWPerfHost event packets present in the deferred list stopping when one ++ * of the following cases is hit: ++ * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering ++ * criteria (ordinal == last_ordinal + 1) ++ * ++ * case 2: A packet with ordinal > ui32MaxOrdinal is found ++ * ++ * case 3: Deferred list's (read == write) i.e. no more deferred packets. ++ * ++ * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling ++ * this function.*/ ++static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT32 ui32MaxOrdinal) ++{ ++ RGX_HWPERF_V2_PACKET_HDR *psHeader; ++ IMG_UINT32 ui32Retry; ++ IMG_UINT8 *pui8DeferredEvent; ++ IMG_BOOL *pbPacketWritten; ++ IMG_BOOL bWritePassed; ++ ++ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream)); ++ ++ while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) ++ { ++ pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx); ++ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent); ++ psHeader = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent); ++ ++ for (ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--) ++ { ++ /* Packet not yet written, re-check after a while. Wait for a short period as ++ * atomic contexts are generally expected to finish fast */ ++ OSWaitus(10); ++ } ++ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Will warn only once. Dropping a deferred packet as atomic context" ++ " took too long to write it", ++ __func__)); ++ psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE; ++ } ++ ++ if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry)) ++ { ++ psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry; ++ } ++#endif ++ ++ if (*pbPacketWritten) ++ { ++ if ((psHeader->ui32Ordinal > ui32MaxOrdinal) || ++ (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1))) ++ { ++ /* Leave remaining events to be emitted by next call to this function */ ++ break; ++ } ++ bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__)); ++ bWritePassed = IMG_FALSE; ++ } ++ ++ /* Move on to next packet */ ++ psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx); ++ ++ if (!bWritePassed // if write failed ++ && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR ++ && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events ++ { ++ /* Stop emitting here and re-schedule MISR */ ++ OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); ++ break; ++ } ++ } ++} ++ ++static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData) ++{ ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData; ++ ++ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); ++ ++ /* Since we're called from MISR, there is no upper cap of ordinal to be emitted. ++ * Send IMG_UINT32_MAX to signify all possible packets. */ ++ _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX); ++ ++ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); ++} ++ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ IMG_UINT32 ui32DEWatermark; ++ IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx; ++ IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx; ++ ++ if (ui16LWrite >= ui16LRead) ++ { ++ ui32DEWatermark = ui16LWrite - ui16LRead; ++ } ++ else ++ { ++ ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite); ++ } ++ ++ if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark) ++ { ++ psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark; ++ } ++} ++#endif ++ ++/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost ++ buffer. Since the data returned by this function is required in both, an ++ atomic as well as a process/sleepable context, it is protected under spinlock ++ ++ @Output pui32Ordinal Pointer to ordinal number assigned to this packet ++ @Output pui64Timestamp Timestamp value for this packet ++ @Output ppui8Dest If the current context cannot sleep, pointer to a place in ++ deferred events buffer where the packet data should be written. ++ Don't care, otherwise. ++ */ ++static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT32 *pui32Ordinal, ++ IMG_UINT64 *pui64Timestamp, ++ IMG_UINT8 **ppui8Dest, ++ IMG_BOOL bSleepAllowed) ++{ ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* Spin lock is required to avoid getting scheduled out by a higher priority ++ * context while we're getting header specific details and packet place in ++ * HWPerf buffer (when in atomic context) for ourselves */ ++ OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); ++ ++ *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++; ++ *pui64Timestamp = RGXTimeCorrGetClockus64(psRgxDevInfo->psDeviceNode); ++ ++ if (!bSleepAllowed) ++ { ++ /* We're in an atomic context. So return the next position available in ++ * deferred events buffer */ ++ IMG_UINT16 ui16NewWriteIdx; ++ IMG_BOOL *pbPacketWritten; ++ ++ PVR_ASSERT(ppui8Dest != NULL); ++ ++ ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx); ++ if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx) ++ { ++ /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be ++ * big enough to avoid any such scenario */ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do ++ * this debug output here when trace_printk support is added to DDK */ ++// PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u", ++// __func__, psRgxDevInfo->ui32DEHighWatermark, ++// HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx, ++// psRgxDevInfo->ui16DEReadIdx)); ++#endif ++ *ppui8Dest = NULL; ++ } ++ else ++ { ++ /* Return the position where deferred event would be written */ ++ *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx); ++ ++ /* Make sure packet write "state" is "write-pending" _before_ moving write ++ * pointer forward */ ++ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest); ++ *pbPacketWritten = IMG_FALSE; ++ ++ psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx; ++ ++#if defined(PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS) ++ _UpdateDEBufferHighWatermark(psRgxDevInfo); ++#endif ++ } ++ } ++ ++ OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, uiFlags); ++} ++ ++static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_HOST_EVENT_TYPE eEvType, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32Ordinal, ++ IMG_UINT64 ui64Timestamp) ++{ ++ RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) ((void *)pui8Dest); ++ ++ PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE); ++ ++ psHeader->ui32Ordinal = ui32Ordinal; ++ psHeader->ui64Timestamp = ui64Timestamp; ++ psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG; ++ psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST, ++ eEvType, 0, 0, 0); ++ psHeader->ui32Size = ui32Size; ++} ++ ++static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_KICK_TYPE eEnqType, ++ IMG_UINT32 ui32Pid, ++ IMG_UINT32 ui32FWDMContext, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ PVRSRV_FENCE hCheckFence, ++ PVRSRV_FENCE hUpdateFence, ++ PVRSRV_TIMELINE hUpdateTimeline, ++ IMG_UINT64 ui64CheckFenceUID, ++ IMG_UINT64 ui64UpdateFenceUID, ++ IMG_UINT64 ui64DeadlineInus, ++ IMG_UINT32 ui32CycleEstimate) ++{ ++ RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ psData->ui32EnqType = eEnqType; ++ psData->ui32PID = ui32Pid; ++ psData->ui32ExtJobRef = ui32ExtJobRef; ++ psData->ui32IntJobRef = ui32IntJobRef; ++ psData->ui32DMContext = ui32FWDMContext; ++ psData->hCheckFence = hCheckFence; ++ psData->hUpdateFence = hUpdateFence; ++ psData->hUpdateTimeline = hUpdateTimeline; ++ psData->ui64CheckFence_UID = ui64CheckFenceUID; ++ psData->ui64UpdateFence_UID = ui64UpdateFenceUID; ++ psData->ui64DeadlineInus = ui64DeadlineInus; ++ psData->ui32CycleEstimate = ui32CycleEstimate; ++} ++ ++void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_EVENT_TYPE eEvType, ++ IMG_BYTE *pbPayload, ++ IMG_UINT32 ui32PayloadSize) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32PktSize; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ PVR_ASSERT(ui32PayloadSize <= RGX_HWPERF_MAX_PAYLOAD_SIZE); ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ ui32PktSize = RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32PayloadSize); ++ pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32PktSize); ++ ++ if (pui8Dest == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, eEvType, ui32PktSize, ui32Ordinal, ui64Timestamp); ++ OSDeviceMemCopy((IMG_UINT8*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)), pbPayload, ui32PayloadSize); ++ _CommitHWPerfStream(psRgxDevInfo, ui32PktSize); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_KICK_TYPE eEnqType, ++ IMG_UINT32 ui32Pid, ++ IMG_UINT32 ui32FWDMContext, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ PVRSRV_FENCE hCheckFence, ++ PVRSRV_FENCE hUpdateFence, ++ PVRSRV_TIMELINE hUpdateTimeline, ++ IMG_UINT64 ui64CheckFenceUID, ++ IMG_UINT64 ui64UpdateFenceUID, ++ IMG_UINT64 ui64DeadlineInus, ++ IMG_UINT32 ui32CycleEstimate ) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA); ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size, ++ ui32Ordinal, ui64Timestamp); ++ _SetupHostEnqPacketData(pui8Dest, ++ eEnqType, ++ ui32Pid, ++ ui32FWDMContext, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ hCheckFence, ++ hUpdateFence, ++ hUpdateTimeline, ++ ui64CheckFenceUID, ++ ui64UpdateFenceUID, ++ ui64DeadlineInus, ++ ui32CycleEstimate); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType) ++{ ++ IMG_UINT32 ui32Size = ++ (IMG_UINT32) offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData); ++ RGX_HWPERF_UFO_DATA_ELEMENT *puData; ++ ++ switch (eUfoType) ++ { ++ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: ++ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: ++ ui32Size += sizeof(puData->sCheckSuccess); ++ break; ++ case RGX_HWPERF_UFO_EV_CHECK_FAIL: ++ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: ++ ui32Size += sizeof(puData->sCheckFail); ++ break; ++ case RGX_HWPERF_UFO_EV_UPDATE: ++ ui32Size += sizeof(puData->sUpdate); ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" ++ " event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_UFO_EV eUfoType, ++ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData) ++{ ++ RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) ++ psData->aui32StreamData; ++ ++ psData->eEvType = eUfoType; ++ /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping ++ * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */ ++ psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1, ++ offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData)); ++ ++ switch (eUfoType) ++ { ++ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS: ++ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS: ++ puData->sCheckSuccess.ui32FWAddr = ++ psUFOData->sCheckSuccess.ui32FWAddr; ++ puData->sCheckSuccess.ui32Value = ++ psUFOData->sCheckSuccess.ui32Value; ++ break; ++ case RGX_HWPERF_UFO_EV_CHECK_FAIL: ++ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL: ++ puData->sCheckFail.ui32FWAddr = ++ psUFOData->sCheckFail.ui32FWAddr; ++ puData->sCheckFail.ui32Value = ++ psUFOData->sCheckFail.ui32Value; ++ puData->sCheckFail.ui32Required = ++ psUFOData->sCheckFail.ui32Required; ++ break; ++ case RGX_HWPERF_UFO_EV_UPDATE: ++ puData->sUpdate.ui32FWAddr = ++ psUFOData->sUpdate.ui32FWAddr; ++ puData->sUpdate.ui32OldValue = ++ psUFOData->sUpdate.ui32OldValue; ++ puData->sUpdate.ui32NewValue = ++ psUFOData->sUpdate.ui32NewValue; ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO" ++ " event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++} ++ ++void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_UFO_EV eUfoType, ++ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, ++ const IMG_BOOL bSleepAllowed) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType); ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ IMG_BOOL *pbPacketWritten = NULL; ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ &pui8Dest, bSleepAllowed); ++ ++ if (bSleepAllowed) ++ { ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ } ++ else ++ { ++ if (pui8Dest == NULL) ++ { ++ // Give-up if we couldn't get a place in deferred events buffer ++ goto cleanup; ++ } ++ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest); ++ pui8Dest = GET_DE_EVENT_DATA(pui8Dest); ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size, ++ ui32Ordinal, ui64Timestamp); ++ _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData); ++ ++ if (bSleepAllowed) ++ { ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ } ++ else ++ { ++ *pbPacketWritten = IMG_TRUE; ++ OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR); ++ } ++ ++cleanup: ++ if (bSleepAllowed) ++ { ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++ } ++} ++ ++#define UNKNOWN_SYNC_NAME "UnknownSync" ++ ++static_assert(PVRSRV_SYNC_NAME_LENGTH==PVRSRV_SYNC_NAME_LENGTH, "Sync class name max does not match Fence Sync name max"); ++ ++static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize( ++ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, ++ const IMG_CHAR **ppsName, ++ IMG_UINT32 *ui32NameSize) ++{ ++ RGX_HWPERF_HOST_ALLOC_DATA *psData; ++ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail); ++ ++ if (*ppsName != NULL && *ui32NameSize > 0) ++ { ++ /* if string longer than maximum cut it (leave space for '\0') */ ++ if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) ++ *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid" ++ " resource name given.")); ++ *ppsName = UNKNOWN_SYNC_NAME; ++ *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME); ++ } ++ ++ switch (eAllocType) ++ { ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: ++ ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - PVRSRV_SYNC_NAME_LENGTH + ++ *ui32NameSize; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: ++ ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + ++ *ui32NameSize; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: ++ ui32Size += sizeof(psData->uAllocDetail.sSWFenceAlloc) - PVRSRV_SYNC_NAME_LENGTH + ++ *ui32NameSize; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: ++ ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - PVRSRV_SYNC_NAME_LENGTH + ++ *ui32NameSize; ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, ++ RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail, ++ const IMG_CHAR *psName, ++ IMG_UINT32 ui32NameSize) ++{ ++ RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ ++ IMG_CHAR *acName = NULL; ++ ++ psData->ui32AllocType = eAllocType; ++ ++ switch (eAllocType) ++ { ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: ++ psData->uAllocDetail.sSyncAlloc = puAllocDetail->sSyncAlloc; ++ acName = psData->uAllocDetail.sSyncAlloc.acName; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: ++ psData->uAllocDetail.sFenceAlloc = puAllocDetail->sFenceAlloc; ++ acName = psData->uAllocDetail.sFenceAlloc.acName; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW: ++ psData->uAllocDetail.sSWFenceAlloc = puAllocDetail->sSWFenceAlloc; ++ acName = psData->uAllocDetail.sSWFenceAlloc.acName; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: ++ psData->uAllocDetail.sSyncCheckPointAlloc = puAllocDetail->sSyncCheckPointAlloc; ++ acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName; ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXHWPerfHostPostAllocEvent: Invalid alloc event type")); ++ PVR_ASSERT(IMG_FALSE); ++ } ++ ++ ++ if (acName != NULL) ++ { ++ if (ui32NameSize) ++ { ++ OSStringLCopy(acName, psName, ui32NameSize); ++ } ++ else ++ { ++ /* In case no name was given make sure we don't access random ++ * memory */ ++ acName[0] = '\0'; ++ } ++ } ++} ++ ++void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, ++ const IMG_CHAR *psName, ++ IMG_UINT32 ui32NameSize, ++ RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT64 ui64Timestamp; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType, ++ &psName, ++ &ui32NameSize); ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size, ++ ui32Ordinal, ui64Timestamp); ++ ++ _SetupHostAllocPacketData(pui8Dest, ++ eAllocType, ++ puAllocDetail, ++ psName, ++ ui32NameSize); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, ++ IMG_UINT64 ui64UID, ++ IMG_UINT32 ui32PID, ++ IMG_UINT32 ui32FWAddr) ++{ ++ RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ ++ psData->ui32FreeType = eFreeType; ++ ++ switch (eFreeType) ++ { ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC: ++ psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: ++ psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID; ++ break; ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP: ++ psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr; ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXHWPerfHostPostFreeEvent: Invalid free event type")); ++ PVR_ASSERT(IMG_FALSE); ++ } ++} ++ ++void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, ++ IMG_UINT64 ui64UID, ++ IMG_UINT32 ui32PID, ++ IMG_UINT32 ui32FWAddr) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA); ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size, ++ ui32Ordinal, ui64Timestamp); ++ _SetupHostFreePacketData(pui8Dest, ++ eFreeType, ++ ui64UID, ++ ui32PID, ++ ui32FWAddr); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize( ++ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, ++ const IMG_CHAR **ppsName, ++ IMG_UINT32 *ui32NameSize) ++{ ++ RGX_HWPERF_HOST_MODIFY_DATA *psData; ++ RGX_HWPERF_HOST_MODIFY_DETAIL *puData; ++ IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType); ++ ++ if (*ppsName != NULL && *ui32NameSize > 0) ++ { ++ /* first strip the terminator */ ++ if ((*ppsName)[*ui32NameSize - 1] == '\0') ++ *ui32NameSize -= 1; ++ /* if string longer than maximum cut it (leave space for '\0') */ ++ if (*ui32NameSize >= PVRSRV_SYNC_NAME_LENGTH) ++ *ui32NameSize = PVRSRV_SYNC_NAME_LENGTH - 1; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid" ++ " resource name given.")); ++ *ppsName = UNKNOWN_SYNC_NAME; ++ *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1; ++ } ++ ++ switch (eModifyType) ++ { ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: ++ ui32Size += sizeof(puData->sFenceMerge) - PVRSRV_SYNC_NAME_LENGTH + ++ *ui32NameSize + 1; /* +1 for '\0' */ ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, ++ IMG_UINT64 ui64NewUID, ++ IMG_UINT64 ui64UID1, ++ IMG_UINT64 ui64UID2, ++ const IMG_CHAR *psName, ++ IMG_UINT32 ui32NameSize) ++{ ++ RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ ++ IMG_CHAR *acName = NULL; ++ ++ psData->ui32ModifyType = eModifyType; ++ ++ switch (eModifyType) ++ { ++ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR: ++ psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID; ++ psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1; ++ psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2; ++ acName = psData->uModifyDetail.sFenceMerge.acName; ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXHWPerfHostPostModifyEvent: Invalid modify event type")); ++ PVR_ASSERT(IMG_FALSE); ++ } ++ ++ if (acName != NULL) ++ { ++ if (ui32NameSize) ++ { ++ OSStringLCopy(acName, psName, ui32NameSize); ++ } ++ else ++ { ++ /* In case no name was given make sure we don't access random ++ * memory */ ++ acName[0] = '\0'; ++ } ++ } ++} ++ ++void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, ++ IMG_UINT64 ui64NewUID, ++ IMG_UINT64 ui64UID1, ++ IMG_UINT64 ui64UID2, ++ const IMG_CHAR *psName, ++ IMG_UINT32 ui32NameSize) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT64 ui64Timestamp; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType, ++ &psName, ++ &ui32NameSize); ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size, ++ ui32Ordinal, ui64Timestamp); ++ _SetupHostModifyPacketData(pui8Dest, ++ eModifyType, ++ ui64NewUID, ++ ui64UID1, ++ ui64UID2, ++ psName, ++ ui32NameSize); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest) ++{ ++ RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb; ++ IMG_UINT32 ui32CurrIdx = ++ RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); ++ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx]; ++ ++ psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp; ++ psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp; ++ psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed; ++} ++ ++void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size = ++ RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA); ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ /* if the buffer for time correlation data is not yet available (possibly ++ * device not initialised yet) skip this event */ ++ if (psRgxDevInfo->psRGXFWIfGpuUtilFWCb == NULL) ++ { ++ return; ++ } ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size, ++ ui32Ordinal, ui64Timestamp); ++ _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++static inline RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS _ConvDeviceHealthStatus(PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus) ++{ ++ switch (eDeviceHealthStatus) ++ { ++ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; ++ case PVRSRV_DEVICE_HEALTH_STATUS_OK: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_OK; ++ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_RESPONDING; ++ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_DEAD; ++ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_FAULT; ++ default: return RGX_HWPERF_HOST_DEVICE_HEALTH_STATUS_UNDEFINED; ++ } ++} ++ ++static inline RGX_HWPERF_HOST_DEVICE_HEALTH_REASON _ConvDeviceHealthReason(PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) ++{ ++ switch (eDeviceHealthReason) ++ { ++ case PVRSRV_DEVICE_HEALTH_REASON_NONE: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_NONE; ++ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_ASSERTED; ++ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_POLL_FAILING; ++ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_TIMEOUTS; ++ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_CORRUPT; ++ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_QUEUE_STALLED; ++ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_IDLING; ++ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_RESTARTING; ++ case PVRSRV_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS:return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_MISSING_INTERRUPTS; ++ default: return RGX_HWPERF_HOST_DEVICE_HEALTH_REASON_UNDEFINED; ++ } ++} ++ ++static inline void _SetupHostDeviceInfoPacketData(RGX_HWPERF_DEV_INFO_EV eEvType, ++ PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, ++ PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason, ++ IMG_UINT8 *pui8Dest) ++{ ++ RGX_HWPERF_HOST_DEV_INFO_DATA *psData = (RGX_HWPERF_HOST_DEV_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ psData->eEvType = eEvType; ++ ++ switch (eEvType) ++ { ++ case RGX_HWPERF_DEV_INFO_EV_HEALTH: ++ psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthStatus = _ConvDeviceHealthStatus(eDeviceHealthStatus); ++ psData->uDevInfoDetail.sDeviceStatus.eDeviceHealthReason = _ConvDeviceHealthReason(eDeviceHealthReason); ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++} ++ ++static inline IMG_UINT32 _CalculateHostDeviceInfoPacketSize(RGX_HWPERF_DEV_INFO_EV eEvType) ++{ ++ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_DEV_INFO_DATA, uDevInfoDetail); ++ ++ switch (eEvType) ++ { ++ case RGX_HWPERF_DEV_INFO_EV_HEALTH: ++ ui32Size += sizeof(((RGX_HWPERF_HOST_DEV_INFO_DATA*)0)->uDevInfoDetail.sDeviceStatus); ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostDeviceInfo: Invalid event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_DEV_INFO_EV eEvType, ++ PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, ++ PVRSRV_DEVICE_HEALTH_REASON eDeviceHealthReason) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ IMG_UINT32 ui32Size; ++ ++ OSLockAcquire(psRgxDevInfo->hHWPerfLock); ++ ++ if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) ++ { ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ui32Size = _CalculateHostDeviceInfoPacketSize(eEvType); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) ++ { ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_DEV_INFO, ui32Size, ui32Ordinal, ui64Timestamp); ++ _SetupHostDeviceInfoPacketData(eEvType, eDeviceHealthStatus, eDeviceHealthReason, pui8Dest); ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ } ++ ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++ } ++ ++ OSLockRelease(psRgxDevInfo->hHWPerfLock); ++} ++ ++static inline void _SetupHostInfoPacketData(RGX_HWPERF_INFO_EV eEvType, ++ IMG_UINT32 ui32TotalMemoryUsage, ++ IMG_UINT32 ui32LivePids, ++ PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage, ++ IMG_UINT8 *pui8Dest) ++{ ++ IMG_INT i; ++ RGX_HWPERF_HOST_INFO_DATA *psData = (RGX_HWPERF_HOST_INFO_DATA *)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ psData->eEvType = eEvType; ++ ++ switch (eEvType) ++ { ++ case RGX_HWPERF_INFO_EV_MEM_USAGE: ++ psData->uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage = ui32TotalMemoryUsage; ++ ++ if (psPerProcessMemUsage) ++ { ++ for (i = 0; i < ui32LivePids; ++i) ++ { ++ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32Pid = psPerProcessMemUsage[i].ui32Pid; ++ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32KernelMemUsage = psPerProcessMemUsage[i].ui32KernelMemUsage; ++ psData->uInfoDetail.sMemUsageStats.sPerProcessUsage[i].ui32GraphicsMemUsage = psPerProcessMemUsage[i].ui32GraphicsMemUsage; ++ } ++ } ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++} ++ ++static inline IMG_UINT32 _CalculateHostInfoPacketSize(RGX_HWPERF_INFO_EV eEvType, ++ IMG_UINT32 *pui32TotalMemoryUsage, ++ IMG_UINT32 *pui32LivePids, ++ PVRSRV_PER_PROCESS_MEM_USAGE **ppsPerProcessMemUsage) ++{ ++ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail); ++ ++ switch (eEvType) ++ { ++ case RGX_HWPERF_INFO_EV_MEM_USAGE: ++#if !defined(__QNXNTO__) ++ if (PVRSRVGetProcessMemUsage(pui32TotalMemoryUsage, pui32LivePids, ppsPerProcessMemUsage) == PVRSRV_OK) ++ { ++ ui32Size += ((offsetof(RGX_HWPERF_HOST_INFO_DATA, uInfoDetail.sMemUsageStats.ui32TotalMemoryUsage) - ui32Size) ++ + ((*pui32LivePids) * sizeof(((RGX_HWPERF_HOST_INFO_DATA*)0)->uInfoDetail.sMemUsageStats.sPerProcessUsage))); ++ } ++#else ++ PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); ++#endif ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostInfo: Invalid event type")); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_INFO_EV eEvType) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ IMG_UINT32 ui32TotalMemoryUsage = 0; ++ PVRSRV_PER_PROCESS_MEM_USAGE *psPerProcessMemUsage = NULL; ++ IMG_UINT32 ui32LivePids = 0; ++ ++ OSLockAcquire(psRgxDevInfo->hHWPerfLock); ++ ++ if (psRgxDevInfo->hHWPerfHostStream != (IMG_HANDLE) NULL) ++ { ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ ui32Size = _CalculateHostInfoPacketSize(eEvType, &ui32TotalMemoryUsage, &ui32LivePids, &psPerProcessMemUsage); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) != NULL) ++ { ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_INFO, ui32Size, ui32Ordinal, ui64Timestamp); ++ _SetupHostInfoPacketData(eEvType, ui32TotalMemoryUsage, ui32LivePids, psPerProcessMemUsage, pui8Dest); ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ } ++ ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++ ++ if (psPerProcessMemUsage) ++ OSFreeMemNoStats(psPerProcessMemUsage); // psPerProcessMemUsage was allocated with OSAllocZMemNoStats ++ } ++ ++ OSLockRelease(psRgxDevInfo->hHWPerfLock); ++} ++ ++static inline IMG_UINT32 ++_CalculateHostFenceWaitPacketSize(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType) ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psSizeCalculator; ++ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA, uDetail); ++ ++ switch (eWaitType) ++ { ++ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: ++ ui32Size += sizeof(psSizeCalculator->uDetail.sBegin); ++ break; ++ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: ++ ui32Size += sizeof(psSizeCalculator->uDetail.sEnd); ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid wait event type (%u)", __func__, ++ eWaitType)); ++ PVR_ASSERT(IMG_FALSE); ++ break; ++ } ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++static inline void ++_SetupHostFenceWaitPacketData(IMG_UINT8 *pui8Dest, ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eWaitType, ++ IMG_PID uiPID, ++ PVRSRV_FENCE hFence, ++ IMG_UINT32 ui32Data) ++{ ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *psData = (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ ++ psData->eType = eWaitType; ++ psData->uiPID = uiPID; ++ psData->hFence = hFence; ++ ++ switch (eWaitType) ++ { ++ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_BEGIN: ++ psData->uDetail.sBegin.ui32TimeoutInMs = ui32Data; ++ break; ++ case RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_END: ++ psData->uDetail.sEnd.eResult = ++ (RGX_HWPERF_HOST_SYNC_FENCE_WAIT_RESULT) ui32Data; ++ break; ++ default: ++ // unknown type - this should never happen ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid fence-wait event type", __func__)); ++ PVR_ASSERT(IMG_FALSE); ++ } ++} ++ ++void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, ++ IMG_PID uiPID, ++ PVRSRV_FENCE hFence, ++ IMG_UINT32 ui32Data) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ ui32Size = _CalculateHostFenceWaitPacketSize(eType); ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_FENCE_WAIT, ++ ui32Size, ui32Ordinal, ui64Timestamp); ++ _SetupHostFenceWaitPacketData(pui8Dest, eType, uiPID, hFence, ui32Data); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++static inline IMG_UINT32 _CalculateHostSWTimelineAdvPacketSize(void) ++{ ++ IMG_UINT32 ui32Size = sizeof(RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA); ++ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size); ++} ++ ++static inline void ++_SetupHostSWTimelineAdvPacketData(IMG_UINT8 *pui8Dest, ++ IMG_PID uiPID, ++ PVRSRV_TIMELINE hSWTimeline, ++ IMG_UINT64 ui64SyncPtIndex) ++ ++{ ++ RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *psData = (RGX_HWPERF_HOST_SYNC_SW_TL_ADV_DATA *) ++ IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ ++ psData->uiPID = uiPID; ++ psData->hTimeline = hSWTimeline; ++ psData->ui64SyncPtIndex = ui64SyncPtIndex; ++} ++ ++void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_PID uiPID, ++ PVRSRV_TIMELINE hSWTimeline, ++ IMG_UINT64 ui64SyncPtIndex) ++{ ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, ++ NULL, IMG_TRUE); ++ ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ ui32Size = _CalculateHostSWTimelineAdvPacketSize(); ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE, ++ ui32Size, ui32Ordinal, ui64Timestamp); ++ _SetupHostSWTimelineAdvPacketData(pui8Dest, uiPID, hSWTimeline, ui64SyncPtIndex); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++ ++} ++ ++void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_PID uiPID, ++ const IMG_CHAR *psName) ++{ ++ RGX_HWPERF_HOST_CLIENT_INFO_DATA* psPkt; ++ IMG_UINT8 *pui8Dest; ++ IMG_UINT32 ui32Size; ++ IMG_UINT32 ui32NameLen; ++ IMG_UINT32 ui32Ordinal; ++ IMG_UINT64 ui64Timestamp; ++ ++ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp, NULL, IMG_TRUE); ++ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal); ++ ++ ui32NameLen = OSStringLength(psName) + 1U; ++ ui32Size = RGX_HWPERF_MAKE_SIZE_VARIABLE(RGX_HWPERF_HOST_CLIENT_INFO_PROC_NAME_BASE_SIZE ++ + RGX_HWPERF_HOST_CLIENT_PROC_NAME_SIZE(ui32NameLen)); ++ ++ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL) ++ { ++ goto cleanup; ++ } ++ ++ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLIENT_INFO, ++ ui32Size, ui32Ordinal, ui64Timestamp); ++ ++ psPkt = (RGX_HWPERF_HOST_CLIENT_INFO_DATA*)IMG_OFFSET_ADDR(pui8Dest, sizeof(RGX_HWPERF_V2_PACKET_HDR)); ++ psPkt->eType = RGX_HWPERF_HOST_CLIENT_INFO_TYPE_PROCESS_NAME; ++ psPkt->uDetail.sProcName.ui32Count = 1U; ++ psPkt->uDetail.sProcName.asProcNames[0].uiClientPID = uiPID; ++ psPkt->uDetail.sProcName.asProcNames[0].ui32Length = ui32NameLen; ++ (void)OSStringLCopy(psPkt->uDetail.sProcName.asProcNames[0].acName, psName, ui32NameLen); ++ ++ _CommitHWPerfStream(psRgxDevInfo, ui32Size); ++ ++cleanup: ++ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal); ++} ++ ++/****************************************************************************** ++ * Currently only implemented on Linux. Feature can be enabled to provide ++ * an interface to 3rd-party kernel modules that wish to access the ++ * HWPerf data. The API is documented in the rgxapi_km.h header and ++ * the rgx_hwperf* headers. ++ *****************************************************************************/ ++ ++/* Internal HWPerf kernel connection/device data object to track the state ++ * of a client session. ++ */ ++typedef struct ++{ ++ PVRSRV_DEVICE_NODE* psRgxDevNode; ++ PVRSRV_RGXDEV_INFO* psRgxDevInfo; ++ ++ /* TL Open/close state */ ++ IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID]; ++ ++ /* TL Acquire/release state */ ++ IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */ ++ IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */ ++ IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */ ++ IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */ ++ IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */ ++ IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */ ++ IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */ ++ ++ ++} RGX_KM_HWPERF_DEVDATA; ++ ++PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ RGX_KM_HWPERF_DEVDATA *psDevData; ++ RGX_HWPERF_DEVICE *psNewHWPerfDevice; ++ RGX_HWPERF_CONNECTION* psHWPerfConnection; ++ IMG_BOOL bFWActive = IMG_FALSE; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* avoid uninitialised data */ ++ PVR_ASSERT(*ppsHWPerfConnection == NULL); ++ PVR_ASSERT(psPVRSRVData); ++ ++ /* Allocate connection object */ ++ psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection)); ++ if (!psHWPerfConnection) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ /* early save the return pointer to aid clean-up if failure occurs */ ++ *ppsHWPerfConnection = psHWPerfConnection; ++ ++ OSWRLockAcquireRead(psPVRSRVData->hDeviceNodeListLock); ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++ while (psDeviceNode) ++ { ++ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: HWPerf: Device not currently active. ID:%u", ++ __func__, ++ psDeviceNode->sDevId.i32OsDeviceID)); ++ psDeviceNode = psDeviceNode->psNext; ++ continue; ++ } ++ /* Create a list node to be attached to connection object's list */ ++ psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice)); ++ if (!psNewHWPerfDevice) ++ { ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ /* Insert node at head of the list */ ++ psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList; ++ psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice; ++ ++ /* create a device data object for kernel server */ ++ psDevData = OSAllocZMem(sizeof(*psDevData)); ++ psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData; ++ if (!psDevData) ++ { ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName), ++ "hwperf_device_%d", psDeviceNode->sDevId.i32OsDeviceID) < 0) ++ { ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to form HWPerf device name for device %d", ++ __func__, ++ psDeviceNode->sDevId.i32OsDeviceID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psDevData->psRgxDevNode = psDeviceNode; ++ psDevData->psRgxDevInfo = psDeviceNode->pvDevice; ++ ++ psDeviceNode = psDeviceNode->psNext; ++ ++ /* At least one device is active */ ++ bFWActive = IMG_TRUE; ++ } ++ ++ OSWRLockReleaseRead(psPVRSRVData->hDeviceNodeListLock); ++ ++ if (!bFWActive) ++ { ++ return PVRSRV_ERROR_NOT_READY; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection) ++{ ++ RGX_KM_HWPERF_DEVDATA *psDevData; ++ RGX_HWPERF_DEVICE *psHWPerfDev; ++ PVRSRV_RGXDEV_INFO *psRgxDevInfo; ++ PVRSRV_ERROR eError; ++ IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; ++ IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; ++ IMG_UINT32 ui32BufSize; ++ ++ /* Disable producer callback by default for the Kernel API. */ ++ IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING | ++ PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Validate input argument values supplied by the caller */ ++ if (!psHWPerfConnection) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; ++ while (psHWPerfDev) ++ { ++ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; ++ psRgxDevInfo = psDevData->psRgxDevInfo; ++ ++ /* In the case where the AppHint has not been set we need to ++ * initialise the HWPerf resources here. Allocated on-demand ++ * to reduce RAM foot print on systems not needing HWPerf. ++ */ ++ OSLockAcquire(psRgxDevInfo->hHWPerfLock); ++ if (RGXHWPerfIsInitRequired(psRgxDevInfo)) ++ { ++ eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Initialisation of on-demand HWPerfFW resources failed", ++ __func__)); ++ OSLockRelease(psRgxDevInfo->hHWPerfLock); ++ return eError; ++ } ++ } ++ OSLockRelease(psRgxDevInfo->hHWPerfLock); ++ ++ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream); ++ if (psRgxDevInfo->hHWPerfHostStream == NULL) ++ { ++ eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Initialisation of on-demand HWPerfHost resources failed", ++ __func__)); ++ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); ++ return eError; ++ } ++ } ++ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream); ++ ++ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */ ++ if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d", ++ PVRSRV_TL_HWPERF_RGX_FW_STREAM, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to form HWPerf stream name for device %d", ++ __func__, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ /* Open the RGX TL stream for reading in this session */ ++ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, ++ pszHWPerfFwStreamName, ++ ui32StreamFlags, ++ &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]); ++ PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)"); ++ ++ /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */ ++ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d", ++ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID) < 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to form HWPerf host stream name for device %d", ++ __func__, ++ psRgxDevInfo->psDeviceNode->sDevId.i32OsDeviceID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Open the host TL stream for reading in this session */ ++ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, ++ pszHWPerfHostStreamName, ++ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING, ++ &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]); ++ PVR_LOG_RETURN_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)"); ++ ++ /* Allocate a large enough buffer for use during the entire session to ++ * avoid the need to resize in the Acquire call as this might be in an ISR ++ * Choose size that can contain at least one packet. ++ */ ++ /* Allocate buffer for FW Stream */ ++ ui32BufSize = FW_STREAM_BUFFER_SIZE; ++ psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize); ++ if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize; ++ ++ /* Allocate buffer for Host Stream */ ++ ui32BufSize = HOST_STREAM_BUFFER_SIZE; ++ psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize); ++ if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL) ++ { ++ OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize; ++ ++ psHWPerfDev = psHWPerfDev->psNext; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ eError = RGXHWPerfLazyConnect(ppsHWPerfConnection); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0); ++ ++ eError = RGXHWPerfOpen(*ppsHWPerfConnection); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfOpen", e1); ++ ++ return PVRSRV_OK; ++ ++e1: /* HWPerfOpen might have opened some, and then failed */ ++ RGXHWPerfClose(*ppsHWPerfConnection); ++e0: /* LazyConnect might have allocated some resources and then failed, ++ * make sure they are cleaned up */ ++ RGXHWPerfFreeConnection(ppsHWPerfConnection); ++ return eError; ++} ++ ++/* ++ PVRSRVRGXControlHWPerfBlocksKM ++ */ ++PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_BOOL bEnable, ++ IMG_UINT32 ui32ArrayLen, ++ IMG_UINT16 * psBlockIDs) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sKccbCmd; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ PVRSRV_RGXDEV_INFO *psDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psBlockIDs != NULL, "psBlockIDs"); ++ PVR_LOG_RETURN_IF_INVALID_PARAM((ui32ArrayLen>0) && (ui32ArrayLen <= RGXFWIF_HWPERF_CTRL_BLKS_MAX), "ui32ArrayLen"); ++ ++ PVR_ASSERT(psDeviceNode); ++ psDevice = psDeviceNode->pvDevice; ++ ++ /* Fill in the command structure with the parameters needed ++ */ ++ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS; ++ sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable; ++ sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen; ++ ++ OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16) * ui32ArrayLen); ++ ++ ++ /* Ask the FW to carry out the HWPerf configuration command ++ */ ++ eError = RGXScheduleCommandAndGetKCCBSlot(psDevice, ++ RGXFWIF_DM_GP, ++ &sKccbCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXScheduleCommandAndGetKCCBSlot"); ++ ++ /* Wait for FW to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevice, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXWaitForKCCBSlotUpdate"); ++ ++ ++#if defined(DEBUG) ++ if (bEnable) ++ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen)); ++ else ++ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen)); ++#endif ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++/* ++ PVRSRVRGXCtrlHWPerfKM ++ */ ++PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_BOOL bToggle, ++ IMG_UINT64 ui64Mask) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ PVR_DPF_ENTERED; ++ PVR_ASSERT(psDeviceNode); ++ ++ if (eStreamId == RGX_HWPERF_STREAM_ID0_FW) ++ { ++ return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask); ++ } ++ else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST) ++ { ++ return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask); ++ } ++ else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT) ++ { ++ IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32); ++ IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask; ++ ++ return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id.")); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++PVRSRV_ERROR RGXHWPerfControl( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_BOOL bToggle, ++ IMG_UINT64 ui64Mask) ++{ ++ PVRSRV_ERROR eError; ++ RGX_KM_HWPERF_DEVDATA* psDevData; ++ RGX_HWPERF_DEVICE* psHWPerfDev; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Validate input argument values supplied by the caller */ ++ if (!psHWPerfConnection) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; ++ ++ while (psHWPerfDev) ++ { ++ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; ++ ++ /* Call the internal server API */ ++ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM"); ++ ++ psHWPerfDev = psHWPerfDev->psNext; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++ ++IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ IMG_UINT16* aeBlockIDs, ++ IMG_BOOL bToggle, ++ const char* szFunctionString); ++ ++IMG_INTERNAL PVRSRV_ERROR RGXHWPerfToggleCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ IMG_UINT16* aeBlockIDs, ++ IMG_BOOL bToggle, ++ const char* szFunctionString) ++{ ++ PVRSRV_ERROR eError; ++ RGX_KM_HWPERF_DEVDATA* psDevData; ++ RGX_HWPERF_DEVICE* psHWPerfDev; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; ++ ++ while (psHWPerfDev) ++ { ++ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; ++ ++ /* Call the internal server API */ ++ eError = PVRSRVRGXControlHWPerfBlocksKM(NULL, ++ psDevData->psRgxDevNode, ++ bToggle, ++ ui32NumBlocks, ++ aeBlockIDs); ++ ++ PVR_LOG_RETURN_IF_ERROR(eError, szFunctionString); ++ ++ psHWPerfDev = psHWPerfDev->psNext; ++ } ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXHWPerfDisableCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ IMG_UINT16* aeBlockIDs) ++{ ++ return RGXHWPerfToggleCounters(psHWPerfConnection, ++ ui32NumBlocks, ++ aeBlockIDs, ++ IMG_FALSE, ++ __func__); ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfEnableCounters( ++ RGX_HWPERF_CONNECTION *psHWPerfConnection, ++ IMG_UINT32 ui32NumBlocks, ++ IMG_UINT16* aeBlockIDs) ++{ ++ return RGXHWPerfToggleCounters(psHWPerfConnection, ++ ui32NumBlocks, ++ aeBlockIDs, ++ IMG_TRUE, ++ __func__); ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfAcquireEvents( ++ IMG_HANDLE hDevData, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_PBYTE* ppBuf, ++ IMG_UINT32* pui32BufLen) ++{ ++ PVRSRV_ERROR eError; ++ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; ++ IMG_PBYTE pDataDest; ++ IMG_UINT32 ui32TlPackets = 0; ++ IMG_PBYTE pBufferEnd; ++ PVRSRVTL_PPACKETHDR psHDRptr; ++ PVRSRVTL_PACKETTYPE ui16TlType; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Reset the output arguments in case we discover an error */ ++ *ppBuf = NULL; ++ *pui32BufLen = 0; ++ ++ /* Valid input argument values supplied by the caller */ ++ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (psDevData->pTlBuf[eStreamId] == NULL) ++ { ++ /* Acquire some data to read from the HWPerf TL stream */ ++ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, ++ psDevData->hSD[eStreamId], ++ &psDevData->pTlBuf[eStreamId], ++ &psDevData->ui32AcqDataLen[eStreamId]); ++ PVR_LOG_RETURN_IF_ERROR(eError, "TLClientAcquireData"); ++ ++ psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId]; ++ } ++ ++ /* TL indicates no data exists so return OK and zero. */ ++ if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0)) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Process each TL packet in the data buffer we have acquired */ ++ pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId]; ++ pDataDest = psDevData->pHwpBuf[eStreamId]; ++ psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]); ++ psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId]; ++ while (psHDRptr < (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) ++ { ++ ui16TlType = GET_PACKET_TYPE(psHDRptr); ++ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA) ++ { ++ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr); ++ if (0 == ui16DataLen) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr)); ++ } ++ else ++ { ++ /* Check next packet does not fill buffer */ ++ if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId]) ++ { ++ break; ++ } ++ ++ /* For valid data copy it into the client buffer and move ++ * the write position on */ ++ OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen); ++ pDataDest += ui16DataLen; ++ } ++ } ++ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full")); ++ } ++ else ++ { ++ /* else Ignore padding packet type and others */ ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType )); ++ } ++ ++ /* Update loop variable to the next packet and increment counts */ ++ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr); ++ /* Updated to keep track of the next packet to be read. */ ++ psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) ((void *)psHDRptr); ++ ui32TlPackets++; ++ } ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets)); ++ ++ psDevData->bRelease[eStreamId] = IMG_FALSE; ++ if (psHDRptr >= (PVRSRVTL_PPACKETHDR)((void *)pBufferEnd)) ++ { ++ psDevData->bRelease[eStreamId] = IMG_TRUE; ++ } ++ ++ /* Update output arguments with client buffer details and true length */ ++ *ppBuf = psDevData->pHwpBuf[eStreamId]; ++ *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId]; ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfReleaseEvents( ++ IMG_HANDLE hDevData, ++ RGX_HWPERF_STREAM_ID eStreamId) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Valid input argument values supplied by the caller */ ++ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (psDevData->bRelease[eStreamId]) ++ { ++ /* Inform the TL that we are done with reading the data. */ ++ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]); ++ psDevData->ui32AcqDataLen[eStreamId] = 0; ++ psDevData->pTlBuf[eStreamId] = NULL; ++ } ++ else ++ { ++ psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId]; ++ } ++ return eError; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfGetFilter( ++ IMG_HANDLE hDevData, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_UINT64 *ui64Filter) ++{ ++ PVRSRV_RGXDEV_INFO* psRgxDevInfo = ++ hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ /* Valid input argument values supplied by the caller */ ++ if (!psRgxDevInfo) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* No need to take hHWPerfLock here since we are only reading data ++ * from always existing integers to return to debugfs which is an ++ * atomic operation. ++ */ ++ switch (eStreamId) { ++ case RGX_HWPERF_STREAM_ID0_FW: ++ *ui64Filter = psRgxDevInfo->ui64HWPerfFilter; ++ break; ++ case RGX_HWPERF_STREAM_ID1_HOST: ++ *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) ++{ ++ RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev; ++ RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection; ++ ++ /* if connection object itself is NULL, nothing to free */ ++ if (psHWPerfConnection == NULL) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList; ++ while (psHWPerfNextDev) ++ { ++ psHWPerfDev = psHWPerfNextDev; ++ psHWPerfNextDev = psHWPerfNextDev->psNext; ++ ++ /* Free the session memory */ ++ if (psHWPerfDev->hDevData) ++ OSFreeMem(psHWPerfDev->hDevData); ++ OSFreeMem(psHWPerfDev); ++ } ++ OSFreeMem(psHWPerfConnection); ++ *ppsHWPerfConnection = NULL; ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection) ++{ ++ RGX_HWPERF_DEVICE *psHWPerfDev; ++ RGX_KM_HWPERF_DEVDATA* psDevData; ++ IMG_UINT uiStreamId; ++ PVRSRV_ERROR eError; ++ ++ /* Check session connection is not zero */ ++ if (!psHWPerfConnection) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ psHWPerfDev = psHWPerfConnection->psHWPerfDevList; ++ while (psHWPerfDev) ++ { ++ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData; ++ for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++) ++ { ++ /* If the TL buffer exists they have not called ReleaseData ++ * before disconnecting so clean it up */ ++ if (psDevData->pTlBuf[uiStreamId]) ++ { ++ /* TLClientReleaseData call and null out the buffer fields ++ * and length */ ++ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]); ++ psDevData->ui32AcqDataLen[uiStreamId] = 0; ++ psDevData->pTlBuf[uiStreamId] = NULL; ++ PVR_LOG_IF_ERROR(eError, "TLClientReleaseData"); ++ /* Packets may be lost if release was not required */ ++ if (!psDevData->bRelease[uiStreamId]) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost.")); ++ } ++ } ++ ++ /* Close the TL stream, ignore the error if it occurs as we ++ * are disconnecting */ ++ if (psDevData->hSD[uiStreamId]) ++ { ++ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, ++ psDevData->hSD[uiStreamId]); ++ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream"); ++ psDevData->hSD[uiStreamId] = NULL; ++ } ++ ++ /* Free the client buffer used in session */ ++ if (psDevData->pHwpBuf[uiStreamId]) ++ { ++ OSFreeMem(psDevData->pHwpBuf[uiStreamId]); ++ psDevData->pHwpBuf[uiStreamId] = NULL; ++ } ++ } ++ psHWPerfDev = psHWPerfDev->psNext; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED); ++ ++ eError = RGXHWPerfClose(*ppsHWPerfConnection); ++ PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose"); ++ ++ eError = RGXHWPerfFreeConnection(ppsHWPerfConnection); ++ PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection"); ++ ++ return eError; ++} ++ ++IMG_UINT64 RGXHWPerfConvertCRTimeStamp( ++ IMG_UINT32 ui32ClkSpeed, ++ IMG_UINT64 ui64CorrCRTimeStamp, ++ IMG_UINT64 ui64CorrOSTimeStamp, ++ IMG_UINT64 ui64CRTimeStamp) ++{ ++ IMG_UINT64 ui64CRDeltaToOSDeltaKNs; ++ IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns; ++ ++ if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp)) ++ { ++ return 0; ++ } ++ ++ ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(ui32ClkSpeed); ++ ++ /* RGX CR timer ticks delta */ ++ deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp; ++ /* RGX time delta in nanoseconds */ ++ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs); ++ /* Calculate OS time of HWPerf event */ ++ ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns; ++ ++ return ui64EventOSTimestamp; ++} ++ ++/****************************************************************************** ++ End of file (rgxhwperf_common.c) ++ ******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxhwperf_common.h b/drivers/gpu/drm/img-rogue/rgxhwperf_common.h +new file mode 100644 +index 000000000000..76957c35e246 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxhwperf_common.h +@@ -0,0 +1,512 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX HW Performance header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX HWPerf functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXHWPERF_COMMON_H_ ++#define RGXHWPERF_COMMON_H_ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#include "device.h" ++#include "connection_server.h" ++#include "rgxdevice.h" ++#include "rgx_hwperf.h" ++ ++/* HWPerf host buffer size constraints in KBs */ ++#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB ++#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U) ++#define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U) ++ ++/****************************************************************************** ++ * RGX HW Performance decode Bvnc Features for HWPerf ++ *****************************************************************************/ ++PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGX_HWPERF_BVNC *psBVNC); ++ ++PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_HWPERF_BVNC *psBVNC); ++ ++/****************************************************************************** ++ * RGX HW Performance Data Transport Routines ++ *****************************************************************************/ ++ ++PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo); ++ ++PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); ++PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); ++void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); ++void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); ++void RGXHWPerfClientInitAppHintCallbacks(void); ++ ++/****************************************************************************** ++ * RGX HW Performance Profiling API(s) ++ *****************************************************************************/ ++ ++PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ RGX_HWPERF_STREAM_ID eStreamId, ++ IMG_BOOL bToggle, ++ IMG_UINT64 ui64Mask); ++ ++PVRSRV_ERROR PVRSRVRGXControlHWPerfBlocksKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_BOOL bEnable, ++ IMG_UINT32 ui32ArrayLen, ++ IMG_UINT16 * psBlockIDs); ++ ++/****************************************************************************** ++ * RGX HW Performance Host Stream API ++ *****************************************************************************/ ++ ++PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB); ++PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo); ++void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo); ++ ++void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_UINT32 ui32Filter); ++ ++void RGXHWPerfHostPostRaw(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_EVENT_TYPE eEvType, ++ IMG_BYTE *pbPayload, ++ IMG_UINT32 ui32PayloadSize); ++ ++void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_KICK_TYPE eEnqType, ++ IMG_UINT32 ui32Pid, ++ IMG_UINT32 ui32FWDMContext, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ PVRSRV_FENCE hCheckFence, ++ PVRSRV_FENCE hUpdateFence, ++ PVRSRV_TIMELINE hUpdateTimeline, ++ IMG_UINT64 ui64CheckFenceUID, ++ IMG_UINT64 ui64UpdateFenceUID, ++ IMG_UINT64 ui64DeadlineInus, ++ IMG_UINT32 ui32CycleEstimate); ++ ++void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType, ++ const IMG_CHAR *psName, ++ IMG_UINT32 ui32NameSize, ++ RGX_HWPERF_HOST_ALLOC_DETAIL *puAllocDetail); ++ ++void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType, ++ IMG_UINT64 ui64UID, ++ IMG_UINT32 ui32PID, ++ IMG_UINT32 ui32FWAddr); ++ ++void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType, ++ IMG_UINT64 ui64NewUID, ++ IMG_UINT64 ui64UID1, ++ IMG_UINT64 ui64UID2, ++ const IMG_CHAR *psName, ++ IMG_UINT32 ui32NameSize); ++ ++void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_UFO_EV eUfoType, ++ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData, ++ const IMG_BOOL bSleepAllowed); ++ ++void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo); ++ ++void RGXHWPerfHostPostDeviceInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_DEV_INFO_EV eEvType, ++ PVRSRV_DEVICE_HEALTH_STATUS eDeviceHealthStatus, ++ PVRSRV_DEVICE_HEALTH_REASON eDeviceHeathReason); ++ ++void RGXHWPerfHostPostInfo(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_INFO_EV eEvType); ++ ++void RGXHWPerfHostPostFenceWait(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE eType, ++ IMG_PID uiPID, ++ PVRSRV_FENCE hFence, ++ IMG_UINT32 ui32Data); ++ ++void RGXHWPerfHostPostSWTimelineAdv(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_PID uiPID, ++ PVRSRV_TIMELINE hSWTimeline, ++ IMG_UINT64 ui64SyncPtIndex); ++ ++void RGXHWPerfHostPostClientInfoProcName(PVRSRV_RGXDEV_INFO *psRgxDevInfo, ++ IMG_PID uiPID, ++ const IMG_CHAR *psName); ++ ++IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent); ++ ++#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \ ++ (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \ ++ & RGX_HWPERF_EVENT_MASK_VALUE(EV)) ++ ++#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \ ++ ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice) ++ ++#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \ ++ ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice) ++ ++/* Deadline and cycle estimate is not supported for all ENQ events */ ++#define NO_DEADLINE 0 ++#define NO_CYCEST 0 ++ ++ ++#if defined(SUPPORT_RGX) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param C Kick context ++ * @param P Pid of kicking process ++ * @param X Related FW context ++ * @param E External job reference ++ * @param I Job ID ++ * @param K Kick type ++ * @param CF Check fence handle ++ * @param UF Update fence handle ++ * @param UT Update timeline (on which above UF was created) handle ++ * @param CHKUID Check fence UID ++ * @param UPDUID Update fence UID ++ * @param D Deadline ++ * @param CE Cycle estimate ++ */ ++#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) \ ++ do { \ ++ if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \ ++ { \ ++ RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \ ++ (K), (P), (X), (E), (I), \ ++ (CF), (UF), (UT), \ ++ (CHKUID), (UPDUID), (D), (CE)); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param I Device Info pointer ++ * @param T Host UFO event type ++ * @param D Pointer to UFO data ++ * @param S Is sleeping allowed? ++ */ ++#define RGXSRV_HWPERF_UFO(I, T, D, S) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \ ++ { \ ++ RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param D Device node pointer ++ * @param T Host ALLOC event type ++ * @param FWADDR sync firmware address ++ * @param N string containing sync name ++ * @param Z string size including null terminating character ++ */ ++#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ ++ { \ ++ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ ++ uAllocDetail.sSyncAlloc.ui32FWAddr = (FWADDR); \ ++ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ ++ (N), (Z), &uAllocDetail); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param D Device Node pointer ++ * @param PID ID of allocating process ++ * @param FENCE PVRSRV_FENCE object ++ * @param FWADDR sync firmware address ++ * @param N string containing sync name ++ * @param Z string size including null terminating character ++ */ ++#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ ++ { \ ++ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ ++ uAllocDetail.sFenceAlloc.uiPID = (PID); \ ++ uAllocDetail.sFenceAlloc.hFence = (FENCE); \ ++ uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = (FWADDR); \ ++ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, \ ++ N, Z, &uAllocDetail); \ ++ } \ ++ } while (0) ++ ++/** ++ * @param D Device Node pointer ++ * @param TL PVRSRV_TIMELINE on which CP is allocated ++ * @param PID Allocating process ID of this TL/FENCE ++ * @param FENCE PVRSRV_FENCE as passed to SyncCheckpointResolveFence OR PVRSRV_NO_FENCE ++ * @param FWADDR sync firmware address ++ * @param N string containing sync name ++ * @param Z string size including null terminating character ++ */ ++#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ ++ { \ ++ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ ++ uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = (FWADDR); \ ++ uAllocDetail.sSyncCheckPointAlloc.hTimeline = (TL); \ ++ uAllocDetail.sSyncCheckPointAlloc.uiPID = (PID); \ ++ uAllocDetail.sSyncCheckPointAlloc.hFence = (FENCE); \ ++ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC_CP, \ ++ N, Z, &uAllocDetail); \ ++ } \ ++ } while (0) ++ ++/** ++ * @param D Device Node pointer ++ * @param PID ID of allocating process ++ * @param SW_FENCE PVRSRV_FENCE object ++ * @param SW_TL PVRSRV_TIMELINE on which SW_FENCE is allocated ++ * @param SPI Sync point index on the SW_TL on which this SW_FENCE is allocated ++ * @param N string containing sync name ++ * @param Z string size including null terminating character ++ */ ++#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \ ++ { \ ++ RGX_HWPERF_HOST_ALLOC_DETAIL uAllocDetail; \ ++ uAllocDetail.sSWFenceAlloc.uiPID = (PID); \ ++ uAllocDetail.sSWFenceAlloc.hSWFence = (SW_FENCE); \ ++ uAllocDetail.sSWFenceAlloc.hSWTimeline = (SW_TL); \ ++ uAllocDetail.sSWFenceAlloc.ui64SyncPtIndex = (SPI); \ ++ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_SW, \ ++ N, Z, &uAllocDetail); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param D Device Node pointer ++ * @param T Host ALLOC event type ++ * @param FWADDR sync firmware address ++ */ ++#define RGXSRV_HWPERF_FREE(D, T, FWADDR) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ ++ { \ ++ RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ ++ (0), (0), (FWADDR)); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param D Device Node pointer ++ * @param T Host ALLOC event type ++ * @param UID ID of input object ++ * @param PID ID of allocating process ++ * @param FWADDR sync firmware address ++ */ ++#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \ ++ { \ ++ RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ ++ (UID), (PID), (FWADDR)); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param D Device Node pointer ++ * @param T Host ALLOC event type ++ * @param NEWUID ID of output object ++ * @param UID1 ID of first input object ++ * @param UID2 ID of second input object ++ * @param N string containing new object's name ++ * @param Z string size including null terminating character ++ */ ++#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \ ++ { \ ++ RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \ ++ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \ ++ (NEWUID), (UID1), (UID2), N, Z); \ ++ } \ ++ } while (0) ++ ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param I Device info pointer ++ */ ++#define RGXSRV_HWPERF_CLK_SYNC(I) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \ ++ { \ ++ RGXHWPerfHostPostClkSyncEvent((I)); \ ++ } \ ++ } while (0) ++ ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts a device info event to the HWPerfHost stream. ++ * ++ * @param I Device info pointer ++ * @param T Event type ++ * @param H Health status enum ++ * @param R Health reason enum ++ */ ++#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) \ ++ do { \ ++ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_DEV_INFO)) \ ++ { \ ++ RGXHWPerfHostPostDeviceInfo((I), (T), (H), (R)); \ ++ } \ ++ } while (0) ++ ++/** ++ * This macro checks if HWPerfHost and the event are enabled and if they are ++ * it posts event to the HWPerfHost stream. ++ * ++ * @param I Device info pointer ++ * @param T Event type ++ */ ++#define RGXSRV_HWPERF_HOST_INFO(I, T) \ ++do { \ ++ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_INFO)) \ ++ { \ ++ RGXHWPerfHostPostInfo((I), (T)); \ ++ } \ ++} while (0) ++ ++/** ++ * @param I Device info pointer ++ * @param T Wait Event type ++ * @param PID Process ID that the following fence belongs to ++ * @param F Fence handle ++ * @param D Data for this wait event type ++ */ ++#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) \ ++do { \ ++ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_FENCE_WAIT)) \ ++ { \ ++ RGXHWPerfHostPostFenceWait(I, RGX_HWPERF_HOST_SYNC_FENCE_WAIT_TYPE_##T, \ ++ (PID), (F), (D)); \ ++ } \ ++} while (0) ++ ++/** ++ * @param I Device info pointer ++ * @param PID Process ID that the following timeline belongs to ++ * @param F SW-timeline handle ++ * @param SPI Sync-pt index where this SW-timeline has reached ++ */ ++#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI)\ ++do { \ ++ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_SYNC_SW_TL_ADVANCE)) \ ++ { \ ++ RGXHWPerfHostPostSWTimelineAdv((I), (PID), (SW_TL), (SPI)); \ ++ } \ ++} while (0) ++ ++/** ++ * @param D Device Node pointer ++ * @param PID Process ID that the following timeline belongs to ++ * @param N Null terminated string containing the process name ++ */ ++#define RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(D, PID, N) \ ++do { \ ++ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_CLIENT_INFO)) \ ++ { \ ++ RGXHWPerfHostPostClientInfoProcName(_RGX_DEVICE_INFO_FROM_NODE(D), (PID), (N)); \ ++ } \ ++} while (0) ++ ++#else ++ ++#define RGXSRV_HWPERF_ENQ(C, P, X, E, I, K, CF, UF, UT, CHKUID, UPDUID, D, CE) ++#define RGXSRV_HWPERF_UFO(I, T, D, S) ++#define RGXSRV_HWPERF_ALLOC(D, T, FWADDR, N, Z) ++#define RGXSRV_HWPERF_ALLOC_FENCE(D, PID, FENCE, FWADDR, N, Z) ++#define RGXSRV_HWPERF_ALLOC_SYNC_CP(D, TL, PID, FENCE, FWADDR, N, Z) ++#define RGXSRV_HWPERF_ALLOC_SW_FENCE(D, PID, SW_FENCE, SW_TL, SPI, N, Z) ++#define RGXSRV_HWPERF_FREE(D, T, FWADDR) ++#define RGXSRV_HWPERF_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) ++#define RGXSRV_HWPERF_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) ++#define RGXSRV_HWPERF_CLK_SYNC(I) ++#define RGXSRV_HWPERF_DEVICE_INFO(I, T, H, R) ++#define RGXSRV_HWPERF_HOST_INFO(I, T) ++#define RGXSRV_HWPERF_SYNC_FENCE_WAIT(I, T, PID, F, D) ++#define RGXSRV_HWPERF_SYNC_SW_TL_ADV(I, PID, SW_TL, SPI) ++#define RGXSRV_HWPERF_HOST_CLIENT_INFO_PROCESS_NAME(D, PID, N) ++ ++#endif ++ ++#endif /* RGXHWPERF_COMMON_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/rgxinit.c b/drivers/gpu/drm/img-rogue/rgxinit.c +new file mode 100644 +index 000000000000..cde6055cc6f5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxinit.c +@@ -0,0 +1,5158 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific initialisation routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "img_defs.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "pvrsrv_bridge_init.h" ++#include "rgx_bridge_init.h" ++#include "syscommon.h" ++#include "rgx_heaps.h" ++#include "rgxheapconfig.h" ++#include "rgxpower.h" ++#include "tlstream.h" ++#include "pvrsrv_tlstreams.h" ++ ++#include "rgxinit.h" ++#include "rgxbvnc.h" ++#include "rgxmulticore.h" ++ ++#include "pdump_km.h" ++#include "handle.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "rgxmem.h" ++#include "sync_internal.h" ++#include "pvrsrv_apphint.h" ++#include "oskm_apphint.h" ++#include "rgxfwdbg.h" ++#include "info_page.h" ++ ++#include "rgxfwimageutils.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgx_fwif_km.h" ++ ++#include "rgxmmuinit.h" ++#include "rgxmipsmmuinit.h" ++#include "physmem.h" ++#include "devicemem_utils.h" ++#include "devicemem_server.h" ++#include "physmem_osmem.h" ++#include "physmem_lma.h" ++ ++#include "rgxdebug.h" ++#include "rgxhwperf.h" ++#include "htbserver.h" ++ ++#include "rgx_options.h" ++#include "pvrversion.h" ++ ++#include "rgx_compat_bvnc.h" ++ ++#include "rgx_heaps.h" ++ ++#include "rgxta3d.h" ++#include "rgxtimecorr.h" ++#include "rgxshader.h" ++ ++#include "rgx_bvnc_defs_km.h" ++#if defined(PDUMP) ++#include "rgxstartstop.h" ++#endif ++ ++#include "rgx_fwif_alignchecks.h" ++#include "vmm_pvz_client.h" ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#include "rgxworkest.h" ++#endif ++ ++#if defined(SUPPORT_PDVFS) ++#include "rgxpdvfs.h" ++#endif ++ ++#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) ++#include "rgxsoctimer.h" ++#endif ++ ++#if defined(PDUMP) && defined(SUPPORT_SECURITY_VALIDATION) ++#include "pdump_physmem.h" ++#endif ++ ++#undef linux ++#define CREATE_TRACE_POINTS ++#include "gpu_trace_point.h" ++ ++static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode); ++static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString); ++static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed); ++static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2); ++static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode); ++static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#if (RGX_NUM_OS_SUPPORTED > 1) ++static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid); ++static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap); ++#endif ++ ++/* Services internal heap identification used in this file only */ ++#define RGX_FIRMWARE_MAIN_HEAP_IDENT "FwMain" /*!< RGX Main Firmware Heap identifier */ ++#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "FwConfig" /*!< RGX Config firmware Heap identifier */ ++ ++#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024) ++#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024) ++#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024) ++#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024) ++#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024) ++#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024) ++#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB ++#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB ++ ++#define VAR(x) #x ++ ++static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo); ++ ++#if !defined(NO_HARDWARE) ++/*************************************************************************/ /*! ++@Function SampleIRQCount ++@Description Utility function taking snapshots of RGX FW interrupt count. ++@Input psDevInfo Device Info structure ++ ++@Return IMG_BOOL Returns IMG_TRUE if RGX FW IRQ is not equal to ++ sampled RGX FW IRQ count for any RGX FW thread. ++ */ /**************************************************************************/ ++static INLINE IMG_BOOL SampleIRQCount(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_BOOL bReturnVal = IMG_FALSE; ++ volatile IMG_UINT32 *aui32SampleIrqCount = psDevInfo->aui32SampleIRQCount; ++ IMG_UINT32 ui32IrqCnt; ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ if PVRSRV_VZ_MODE_IS(GUEST) ++ { ++ bReturnVal = IMG_TRUE; ++ } ++ else ++ { ++ get_irq_cnt_val(ui32IrqCnt, RGXFW_HOST_OS, psDevInfo); ++ ++ if (ui32IrqCnt != aui32SampleIrqCount[RGXFW_THREAD_0]) ++ { ++ aui32SampleIrqCount[RGXFW_THREAD_0] = ui32IrqCnt; ++ bReturnVal = IMG_TRUE; ++ } ++ } ++#else ++ IMG_UINT32 ui32TID; ++ ++ for_each_irq_cnt(ui32TID) ++ { ++ get_irq_cnt_val(ui32IrqCnt, ui32TID, psDevInfo); ++ ++ /* treat unhandled interrupts here to align host count with fw count */ ++ if (aui32SampleIrqCount[ui32TID] != ui32IrqCnt) ++ { ++ aui32SampleIrqCount[ui32TID] = ui32IrqCnt; ++ bReturnVal = IMG_TRUE; ++ } ++ } ++#endif ++ ++ return bReturnVal; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXHostSafetyEvents ++@Description Returns the event status masked to keep only the safety ++ events handled by the Host ++@Input psDevInfo Device Info structure ++@Return IMG_UINT32 Status of Host-handled safety events ++ */ /**************************************************************************/ ++static INLINE IMG_UINT32 RGXHostSafetyEvents(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDevInfo->ui32HostSafetyEventMask == 0)) ++ { ++ return 0; ++ } ++ else ++ { ++ IMG_UINT32 ui32SafetyEventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE); ++ return (ui32SafetyEventStatus & psDevInfo->ui32HostSafetyEventMask); ++ } ++} ++ ++/*************************************************************************/ /*! ++@Function RGXSafetyEventCheck ++@Description Clears the Event Status register and checks if any of the ++ safety events need Host handling ++@Input psDevInfo Device Info structure ++@Return IMG_BOOL Are there any safety events for Host to handle ? ++ */ /**************************************************************************/ ++static INLINE IMG_BOOL RGXSafetyEventCheck(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_BOOL bSafetyEvent = IMG_FALSE; ++ ++ if (psDevInfo->ui32HostSafetyEventMask != 0) ++ { ++ IMG_UINT32 ui32EventStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_STATUS); ++ ++ if (BIT_ISSET(ui32EventStatus, RGX_CR_EVENT_STATUS_SAFETY_SHIFT)) ++ { ++ /* clear the safety event */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_EVENT_CLEAR, RGX_CR_EVENT_CLEAR_SAFETY_EN); ++ ++ /* report if there is anything for the Host to handle */ ++ bSafetyEvent = (RGXHostSafetyEvents(psDevInfo) != 0); ++ } ++ } ++ ++ return bSafetyEvent; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXSafetyEventHandler ++@Description Handles the Safety Events that the Host is responsible for ++@Input psDevInfo Device Info structure ++ */ /**************************************************************************/ ++static void RGXSafetyEventHandler(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT32 ui32HostSafetyStatus = RGXHostSafetyEvents(psDevInfo); ++ RGX_CONTEXT_RESET_REASON eResetReason = RGX_CONTEXT_RESET_REASON_NONE; ++ ++ if (ui32HostSafetyStatus != 0) ++ { ++ /* clear the safety bus events handled by the Host */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SAFETY_EVENT_CLEAR__ROGUEXE, ui32HostSafetyStatus); ++ ++ if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_SHIFT)) ++ { ++ IMG_UINT32 ui32FaultFlag; ++ IMG_UINT32 ui32FaultFW = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_STATUS); ++ IMG_UINT32 ui32CorrectedBitOffset = RGX_CR_FAULT_FW_STATUS_CPU_CORRECT_SHIFT - ++ RGX_CR_FAULT_FW_STATUS_CPU_DETECT_SHIFT; ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety fault status: 0x%X", __func__, ui32FaultFW)); ++ ++ for (ui32FaultFlag = 0; ui32FaultFlag < ui32CorrectedBitOffset; ui32FaultFlag++) ++ { ++ if (BIT_ISSET(ui32FaultFW, ui32FaultFlag)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware safety hardware fault detected (0x%lX).", ++ __func__, BIT(ui32FaultFlag))); ++ eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_ERR; ++ } ++ else if BIT_ISSET(ui32FaultFW, ui32FaultFlag + ui32CorrectedBitOffset) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware safety hardware fault corrected.(0x%lX).", ++ __func__, BIT(ui32FaultFlag))); ++ ++ /* Only report this if we haven't detected a more serious error */ ++ if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) ++ { ++ eResetReason = RGX_CONTEXT_RESET_REASON_FW_ECC_OK; ++ } ++ } ++ } ++ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_FAULT_FW_CLEAR, ui32FaultFW); ++ } ++ ++ if (BIT_ISSET(ui32HostSafetyStatus, RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_SHIFT)) ++ { ++ volatile RGXFWIF_POW_STATE ePowState = psDevInfo->psRGXFWIfFwSysData->ePowState; ++ ++ if (ePowState == RGXFWIF_POW_ON) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Safety Watchdog Trigger !", __func__)); ++ ++ /* Only report this if we haven't detected a more serious error */ ++ if (eResetReason != RGX_CONTEXT_RESET_REASON_FW_ECC_ERR) ++ { ++ eResetReason = RGX_CONTEXT_RESET_REASON_FW_WATCHDOG; ++ } ++ } ++ } ++ ++ /* Notify client and system layer of any error */ ++ if (eResetReason != RGX_CONTEXT_RESET_REASON_NONE) ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = psDevInfo->psDeviceNode; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig; ++ ++ /* Client notification of device error will be achieved by ++ * clients calling UM function RGXGetLastDeviceError() */ ++ psDevInfo->eLastDeviceError = eResetReason; ++ ++ /* Notify system layer of any error */ ++ if (psDevConfig->pfnSysDevErrorNotify) ++ { ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA sErrorData = {0}; ++ ++ sErrorData.eResetReason = eResetReason; ++ ++ psDevConfig->pfnSysDevErrorNotify(psDevConfig, ++ &sErrorData); ++ } ++ } ++ } ++} ++ ++static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++#if defined(PVRSRV_DEBUG_LISR_EXECUTION) ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ IMG_UINT32 ui32idx; ++#endif ++ ++ RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo); ++ ++#if defined(PVRSRV_DEBUG_LISR_EXECUTION) ++ PVR_DPF((PVR_DBG_ERROR, ++ "Last RGX_LISRHandler State (DevID %u): 0x%08X Clock: %llu", ++ psDeviceNode->sDevId.ui32InternalID, ++ psDeviceNode->sLISRExecutionInfo.ui32Status, ++ psDeviceNode->sLISRExecutionInfo.ui64Clockns)); ++ ++ for_each_irq_cnt(ui32idx) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ MSG_IRQ_CNT_TYPE " %u: InterruptCountSnapshot: 0x%X", ++ ui32idx, psDeviceNode->sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32idx])); ++ } ++#else ++ PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION")); ++#endif ++ ++ return SampleIRQCount(psDevInfo); ++} ++ ++void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_BOOL bScheduleMISR; ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ bScheduleMISR = IMG_TRUE; ++ } ++ else ++ { ++ bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo); ++ } ++ ++ if (bScheduleMISR) ++ { ++ OSScheduleMISR(psDevInfo->pvMISRData); ++ ++ if (psDevInfo->pvAPMISRData != NULL) ++ { ++ OSScheduleMISR(psDevInfo->pvAPMISRData); ++ } ++ } ++} ++ ++static inline IMG_BOOL RGXAckHwIrq(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32IRQStatusReg, ++ IMG_UINT32 ui32IRQStatusEventMsk, ++ IMG_UINT32 ui32IRQClearReg, ++ IMG_UINT32 ui32IRQClearMask) ++{ ++ IMG_UINT32 ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg); ++ ++ trace_gpu_interrupt(ui32IRQStatusReg, ui32IRQStatus); ++ if (ui32IRQStatus & ui32IRQStatusEventMsk) ++ { ++ /* acknowledge and clear the interrupt */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask); ++ return IMG_TRUE; ++ } ++ else ++ { ++ /* spurious interrupt */ ++ return IMG_FALSE; ++ } ++} ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++static IMG_BOOL RGXAckIrqMETA(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ return RGXAckHwIrq(psDevInfo, ++ RGX_CR_META_SP_MSLVIRQSTATUS, ++ RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN, ++ RGX_CR_META_SP_MSLVIRQSTATUS, ++ RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK); ++} ++#endif ++ ++static IMG_BOOL RGXAckIrqMIPS(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ return RGXAckHwIrq(psDevInfo, ++ RGX_CR_MIPS_WRAPPER_IRQ_STATUS, ++ RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN, ++ RGX_CR_MIPS_WRAPPER_IRQ_CLEAR, ++ RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN); ++} ++ ++static IMG_BOOL RGXAckIrqDedicated(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ /* status & clearing registers are available on both Host and Guests ++ * and are agnostic of the Fw CPU type. Due to the remappings done by ++ * the 2nd stage device MMU, all drivers assume they are accessing ++ * register bank 0 */ ++ return RGXAckHwIrq(psDevInfo, ++ RGX_CR_IRQ_OS0_EVENT_STATUS, ++ RGX_CR_IRQ_OS0_EVENT_STATUS_SOURCE_EN, ++ RGX_CR_IRQ_OS0_EVENT_CLEAR, ++ RGX_CR_IRQ_OS0_EVENT_CLEAR_SOURCE_EN); ++} ++ ++static IMG_BOOL RGX_LISRHandler(void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_BOOL bIrqAcknowledged = IMG_FALSE; ++ ++#if defined(PVRSRV_DEBUG_LISR_EXECUTION) ++ IMG_UINT32 ui32idx, ui32IrqCnt; ++ ++ for_each_irq_cnt(ui32idx) ++ { ++ get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); ++ UPDATE_LISR_DBG_SNAPSHOT(ui32idx, ui32IrqCnt); ++ } ++ UPDATE_LISR_DBG_STATUS(RGX_LISR_INIT); ++ UPDATE_LISR_DBG_TIMESTAMP(); ++#endif ++ ++ UPDATE_LISR_DBG_COUNTER(); ++ ++ if (psDevInfo->bRGXPowered) ++ { ++ IMG_BOOL bSafetyEvent = RGXSafetyEventCheck(psDevInfo); ++ ++ if ((psDevInfo->pfnRGXAckIrq == NULL) || psDevInfo->pfnRGXAckIrq(psDevInfo) || bSafetyEvent) ++ { ++ bIrqAcknowledged = IMG_TRUE; ++ ++ if (SampleIRQCount(psDevInfo) || bSafetyEvent) ++ { ++ UPDATE_LISR_DBG_STATUS(RGX_LISR_PROCESSED); ++ UPDATE_MISR_DBG_COUNTER(); ++ ++ OSScheduleMISR(psDevInfo->pvMISRData); ++ ++#if defined(SUPPORT_AUTOVZ) ++ RGXUpdateAutoVzWdgToken(psDevInfo); ++#endif ++ if (psDevInfo->pvAPMISRData != NULL) ++ { ++ OSScheduleMISR(psDevInfo->pvAPMISRData); ++ } ++ } ++ else ++ { ++ UPDATE_LISR_DBG_STATUS(RGX_LISR_FW_IRQ_COUNTER_NOT_UPDATED); ++ } ++ } ++ else ++ { ++ UPDATE_LISR_DBG_STATUS(RGX_LISR_NOT_TRIGGERED_BY_HW); ++ } ++ } ++ else ++ { ++ /* AutoVz drivers rebooting while the firmware is active must acknowledge ++ * and clear the hw IRQ line before the RGXInit() has finished. */ ++ if (!(psDevInfo->psDeviceNode->bAutoVzFwIsUp && ++ (psDevInfo->pfnRGXAckIrq != NULL) && ++ psDevInfo->pfnRGXAckIrq(psDevInfo))) ++ { ++ UPDATE_LISR_DBG_STATUS(RGX_LISR_DEVICE_NOT_POWERED); ++ } ++ /* When handling interrupts, there may be a situation where the GPU is powered off, ++ * return IMG_TRUE to avoid the OS considering that this interrupt is nobody cared */ ++ bIrqAcknowledged = IMG_TRUE; ++ } ++ ++ return bIrqAcknowledged; ++} ++ ++static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* First check whether there are pending commands in Deferred KCCB List */ ++ OSSpinLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead)) ++ { ++ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ return; ++ } ++ OSSpinLockRelease(psDevInfo->hLockKCCBDeferredCommandsList, uiFlags); ++ ++ /* Powerlock to avoid further Power transition requests ++ while KCCB deferred list is being processed */ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire PowerLock (device: %p, error: %s)", ++ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); ++ return; ++ } ++ ++ /* Try to send deferred KCCB commands Do not Poll from here*/ ++ eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE); ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s could not flush Deferred KCCB list, KCCB is full.", ++ __func__)); ++ } ++} ++ ++static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevice; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++#if defined(SUPPORT_LINUX_DVFS) ++ IMG_DVFS_DEVICE *psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice; ++ static IMG_BOOL bSuspendDevfreq = IMG_TRUE; ++#endif ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++#if defined(SUPPORT_LINUX_DVFS) ++ if (psFwSysData->ePowState == RGXFWIF_POW_ON) ++ { ++ if (bSuspendDevfreq) ++ { ++ devfreq_resume_device(psDVFSDevice->psDevFreq); ++ bSuspendDevfreq = IMG_FALSE; ++ } ++ } ++ else ++ { ++ if (!bSuspendDevfreq) ++ { ++ devfreq_suspend_device(psDVFSDevice->psDevFreq); ++ bSuspendDevfreq = IMG_TRUE; ++ } ++ } ++#endif ++ ++ if (psFwSysData->ePowState == RGXFWIF_POW_ON || psFwSysData->ePowState == RGXFWIF_POW_IDLE) ++ { ++ RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); ++ } ++ ++ if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) ++ { ++ /* The FW is IDLE and therefore could be shut down */ ++ eError = RGXActivePowerRequest(psDeviceNode); ++ ++ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)) ++ { ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Failed RGXActivePowerRequest call (device: %p) with %s", ++ __func__, psDeviceNode, PVRSRVGetErrorString(eError))); ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ } ++ else ++ { ++ /* Re-schedule the power down request as it was deferred. */ ++ OSScheduleMISR(psDevInfo->pvAPMISRData); ++ } ++ } ++ } ++ ++} ++ ++/* Shorter defines to keep the code a bit shorter */ ++#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE ++#define GPU_ACTIVE RGXFWIF_GPU_UTIL_STATE_ACTIVE ++#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED ++#define MAX_ITERATIONS 64 ++ ++static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hGpuUtilUser, ++ RGXFWIF_GPU_UTIL_STATS *psReturnStats) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ RGXFWIF_GPU_UTIL_STATS *psAggregateStats; ++ IMG_UINT64 ui64TimeNow; ++ IMG_UINT32 ui32Attempts; ++ IMG_UINT32 ui32Remainder; ++ ++ ++ /***** (1) Initialise return stats *****/ ++ ++ psReturnStats->bValid = IMG_FALSE; ++ psReturnStats->ui64GpuStatIdle = 0; ++ psReturnStats->ui64GpuStatActive = 0; ++ psReturnStats->ui64GpuStatBlocked = 0; ++ psReturnStats->ui64GpuStatCumulative = 0; ++ ++ if (hGpuUtilUser == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ psAggregateStats = hGpuUtilUser; ++ ++ ++ /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */ ++ for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++) ++ { ++ IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0}; ++ IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0; ++ IMG_UINT32 i = 0; ++ ++ ++ /***** (2) Get latest data from shared area *****/ ++ ++ OSLockAcquire(psDevInfo->hGPUUtilLock); ++ ++ /* ++ * First attempt at detecting if the FW is in the middle of an update. ++ * This should also help if the FW is in the middle of a 64 bit variable update. ++ */ ++ while (((ui64LastWord != psUtilFWCb->ui64LastWord) || ++ (aui64TmpCounters[ui64LastState] != ++ psUtilFWCb->aui64StatsCounters[ui64LastState])) && ++ (i < MAX_ITERATIONS)) ++ { ++ ui64LastWord = psUtilFWCb->ui64LastWord; ++ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord); ++ aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE]; ++ aui64TmpCounters[GPU_ACTIVE] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE]; ++ aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED]; ++ i++; ++ } ++ ++ OSLockRelease(psDevInfo->hGPUUtilLock); ++ ++ if (i == MAX_ITERATIONS) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "RGXGetGpuUtilStats could not get reliable data after trying %u times", i)); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ ++ /***** (3) Compute return stats *****/ ++ ++ /* Update temp counters to account for the time since the last update to the shared ones */ ++ OSMemoryBarrier(NULL); /* Ensure the current time is read after the loop above */ ++ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDeviceNode)); ++ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord); ++ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); ++ aui64TmpCounters[ui64LastState] += ui64LastPeriod; ++ ++ /* Get statistics for a user since its last request */ ++ psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE], ++ psAggregateStats->ui64GpuStatIdle); ++ psReturnStats->ui64GpuStatActive = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE], ++ psAggregateStats->ui64GpuStatActive); ++ psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED], ++ psAggregateStats->ui64GpuStatBlocked); ++ psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatIdle + ++ psReturnStats->ui64GpuStatActive + ++ psReturnStats->ui64GpuStatBlocked; ++ ++ if (psAggregateStats->ui64TimeStamp != 0) ++ { ++ IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp; ++ /* We expect to return at least 75% of the time since the last call in GPU stats */ ++ IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4); ++ ++ /* ++ * If the returned stats are substantially lower than the time since ++ * the last call, then the Host might have read a partial update from the FW. ++ * If this happens, try sampling the shared counters again. ++ */ ++ if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low " ++ "(call period %" IMG_UINT64_FMTSPEC ")", ++ __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall)); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again", ++ __func__, ui32Attempts)); ++ continue; ++ } ++ } ++ ++ break; ++ } ++ ++ ++ /***** (4) Update aggregate stats for the current user *****/ ++ ++ psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle; ++ psAggregateStats->ui64GpuStatActive += psReturnStats->ui64GpuStatActive; ++ psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked; ++ psAggregateStats->ui64TimeStamp = ui64TimeNow; ++ ++ ++ /***** (5) Convert return stats to microseconds *****/ ++ ++ psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder); ++ psReturnStats->ui64GpuStatActive = OSDivide64(psReturnStats->ui64GpuStatActive, 1000, &ui32Remainder); ++ psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder); ++ psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder); ++ ++ /* Check that the return stats make sense */ ++ if (psReturnStats->ui64GpuStatCumulative == 0) ++ { ++ /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD ++ * returned 0. This could happen if the GPU frequency value ++ * is not well calibrated and the FW is updating the GPU state ++ * while the Host is reading it. ++ * When such an event happens frequently, timers or the aggregate ++ * stats might not be accurate... ++ */ ++ PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data.")); ++ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ } ++ ++ psReturnStats->bValid = IMG_TRUE; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser) ++{ ++ RGXFWIF_GPU_UTIL_STATS *psAggregateStats; ++ ++ /* NoStats used since this may be called outside of the register/de-register ++ * process calls which track memory use. */ ++ psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS)); ++ if (psAggregateStats == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psAggregateStats->ui64GpuStatIdle = 0; ++ psAggregateStats->ui64GpuStatActive = 0; ++ psAggregateStats->ui64GpuStatBlocked = 0; ++ psAggregateStats->ui64TimeStamp = 0; ++ ++ /* Not used */ ++ psAggregateStats->bValid = IMG_FALSE; ++ psAggregateStats->ui64GpuStatCumulative = 0; ++ ++ *phGpuUtilUser = psAggregateStats; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser) ++{ ++ RGXFWIF_GPU_UTIL_STATS *psAggregateStats; ++ ++ if (hGpuUtilUser == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psAggregateStats = hGpuUtilUser; ++ OSFreeMemNoStats(psAggregateStats); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ RGX MISR Handler ++*/ ++static void RGX_MISRHandler_Main (void *pvData) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = pvData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ /* Give the HWPerf service a chance to transfer some data from the FW ++ * buffer to the host driver transport layer buffer. ++ */ ++ RGXHWPerfDataStoreCB(psDeviceNode); ++ ++ /* Inform other services devices that we have finished an operation */ ++ PVRSRVNotifyCommandCompletion(psDeviceNode); ++ ++#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD) ++ /* Normally, firmware CCB only exists for the primary FW thread unless PDVFS ++ is running on the second[ary] FW thread, here we process said CCB */ ++ RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice); ++#endif ++ ++ /* Handle Safety events if necessary */ ++ RGXSafetyEventHandler(psDeviceNode->pvDevice); ++ ++ /* Signal the global event object */ ++ PVRSRVSignalGlobalEO(); ++ ++ /* Process the Firmware CCB for pending commands */ ++ RGXCheckFirmwareCCB(psDeviceNode->pvDevice); ++ ++ /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */ ++ RGXTimeCorrRestartPeriodic(psDeviceNode); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Process Workload Estimation Specific commands from the FW */ ++ WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice); ++#endif ++ ++ if (psDevInfo->pvAPMISRData == NULL) ++ { ++ RGX_MISR_ProcessKCCBDeferredList(psDeviceNode); ++ } ++} ++#endif /* !defined(NO_HARDWARE) */ ++ ++ ++#if defined(PDUMP) ++static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ PMR *psFWDataPMR; ++ RGXMIPSFW_BOOT_DATA *psBootData; ++ IMG_DEV_PHYADDR sTmpAddr; ++ IMG_UINT32 ui32BootConfOffset, ui32ParamOffset, i; ++ PVRSRV_ERROR eError; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); ++ ui32BootConfOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); ++ ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET; ++ ++ /* The physical addresses used by a pdump player will be different ++ * than the ones we have put in the MIPS bootloader configuration data. ++ * We have to tell the pdump player to replace the original values with the real ones. ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Pass new boot parameters to the FW"); ++ ++ /* Rogue Registers physical address */ ++ ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64RegBase); ++ ++ eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME, ++ 0x0, ++ psFWDataPMR, ++ ui32ParamOffset, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError)); ++ return eError; ++ } ++ ++ /* Page Table physical Address */ ++ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXBootldrDataInit: MMU_AcquireBaseAddr failed (%u)", ++ eError)); ++ return eError; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, ++ (void **)&psBootData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire pointer to FW data (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ psBootData = IMG_OFFSET_ADDR(psBootData, ui32BootConfOffset); ++ ++ for (i = 0; i < psBootData->ui32PTNumPages; i++) ++ { ++ ui32ParamOffset = ui32BootConfOffset + ++ offsetof(RGXMIPSFW_BOOT_DATA, aui64PTPhyAddr[0]) ++ + i * sizeof(psBootData->aui64PTPhyAddr[0]); ++ ++ eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, ++ psFWDataPMR, ++ 0, ++ ui32ParamOffset, ++ PDUMP_FLAGS_CONTINUOUS, ++ MMU_LEVEL_1, ++ sTmpAddr.uiAddr, ++ i << psBootData->ui32PTLog2PageSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError)); ++ return eError; ++ } ++ } ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); ++ ++ /* Stack physical address */ ++ ui32ParamOffset = ui32BootConfOffset + offsetof(RGXMIPSFW_BOOT_DATA, ui64StackPhyAddr); ++ ++ eError = PDumpMemLabelToMem64(psFWDataPMR, ++ psFWDataPMR, ++ RGXGetFWImageSectionOffset(NULL, MIPS_STACK), ++ ui32ParamOffset, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError)); ++ return eError; ++ } ++ ++ return eError; ++} ++#endif /* PDUMP */ ++ ++static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo, ++ PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ /* Save information used on power transitions for later ++ * (when RGXStart and RGXStop are executed) ++ */ ++ psDevInfo->sLayerParams.psDevInfo = psDevInfo; ++ psDevInfo->sLayerParams.psDevConfig = psDevConfig; ++#if defined(PDUMP) ++ psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS; ++#endif ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) || defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ IMG_DEV_PHYADDR sKernelMMUCtxPCAddr; ++ ++ if (psDevInfo->psDeviceNode->bAutoVzFwIsUp) ++ { ++ /* If AutoVz firmware is up at this stage, the driver initialised it ++ * during a previous life-cycle. The firmware's memory is already pre-mapped ++ * and the MMU page tables reside in the predetermined memory carveout. ++ * The Kernel MMU Context created in this life-cycle is a dummy structure ++ * that is not used for mapping. ++ * To program the Device's BIF with the correct PC address, use the base ++ * address of the carveout reserved for MMU mappings as Kernel MMU PC Address */ ++#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) ++ sKernelMMUCtxPCAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; ++#else ++ PHYS_HEAP_CONFIG *psFwHeapCfg = FindPhysHeapConfig(psDevConfig, ++ PHYS_HEAP_USAGE_FW_MAIN); ++ eError = (psFwHeapCfg != NULL) ? PVRSRV_OK : PVRSRV_ERROR_PHYSHEAP_CONFIG; ++ PVR_LOG_RETURN_IF_ERROR(eError, "FindPhysHeapConfig(PHYS_HEAP_USAGE_FW_MAIN)"); ++ ++ sKernelMMUCtxPCAddr.uiAddr = psFwHeapCfg->sCardBase.uiAddr + ++ (RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED); ++#endif /* PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR */ ++ } ++ else ++ { ++ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, ++ &sKernelMMUCtxPCAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog")); ++ return eError; ++ } ++ } ++ ++ psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr; ++ } ++ else ++#endif ++ { ++ PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR); ++ PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR); ++ IMG_DEV_PHYADDR sPhyAddr; ++ IMG_BOOL bValid; ++ ++#if defined(SUPPORT_ALT_REGBASE) ++ psDevInfo->sLayerParams.sGPURegAddr = psDevConfig->sAltRegsGpuPBase; ++#else ++ /* The physical address of the GPU registers needs to be translated ++ * in case we are in a LMA scenario ++ */ ++ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], ++ 1, ++ &sPhyAddr, ++ &(psDevConfig->sRegsCpuPBase)); ++ ++ psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr; ++#endif ++ ++ /* Register bank must be aligned to 512KB (as per the core integration) to ++ * prevent the FW accessing incorrect registers */ ++ if ((psDevInfo->sLayerParams.sGPURegAddr.uiAddr & 0x7FFFFU) != 0U) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Register bank must be aligned to 512KB, but current address (0x%016"IMG_UINT64_FMTSPECX") is not", ++ psDevInfo->sLayerParams.sGPURegAddr.uiAddr)); ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ eError = RGXGetPhyAddr(psFWCodePMR, ++ &sPhyAddr, ++ RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE), ++ OSGetPageShift(), /* FW will be using the same page size as the OS */ ++ 1, ++ &bValid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address")); ++ return eError; ++ } ++ ++ psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr; ++ ++ eError = RGXGetPhyAddr(psFWDataPMR, ++ &sPhyAddr, ++ RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA), ++ OSGetPageShift(), ++ 1, ++ &bValid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address")); ++ return eError; ++ } ++ ++ psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr; ++ ++ eError = RGXGetPhyAddr(psFWCodePMR, ++ &sPhyAddr, ++ RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE), ++ OSGetPageShift(), ++ 1, ++ &bValid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address")); ++ return eError; ++ } ++ ++ psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr; ++ ++ psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr; ++ ++ psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid; ++ } ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ /* Send information used on power transitions to the trusted device as ++ * in this setup the driver cannot start/stop the GPU and perform resets ++ */ ++ if (psDevConfig->pfnTDSetPowerParams) ++ { ++ PVRSRV_TD_POWER_PARAMS sTDPowerParams; ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; ++ } ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr; ++ } ++#endif ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ sTDPowerParams.sGPURegAddr = psDevInfo->sLayerParams.sGPURegAddr; ++ sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr; ++ sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr; ++ sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr; ++ } ++ ++ eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData, ++ &sTDPowerParams); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!")); ++ eError = PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++#endif ++ ++ return eError; ++} ++ ++/* ++ RGXSystemHasFBCDCVersion31 ++*/ ++static IMG_BOOL RGXSystemHasFBCDCVersion31(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(SUPPORT_VALIDATION) ++ IMG_UINT32 ui32FBCDCVersionOverride = 0; ++#endif ++ ++#if defined(FIX_HW_ERN_66622_BIT_MASK) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 66622)) ++ { ++#if defined(SUPPORT_VALIDATION) ++ void *pvAppHintState = NULL; ++ ++ IMG_UINT32 ui32AppHintDefault; ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ui32AppHintDefault = PVRSRV_APPHINT_FBCDCVERSIONOVERRIDE; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, FBCDCVersionOverride, ++ &ui32AppHintDefault, &ui32FBCDCVersionOverride); ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ if (ui32FBCDCVersionOverride > 0) ++ { ++ if (ui32FBCDCVersionOverride == 2) ++ { ++ return IMG_TRUE; ++ } ++ } ++ else ++#endif ++ { ++ if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) ++ { ++ return IMG_TRUE; ++ } ++ } ++ } ++ else ++#endif ++ { ++ ++#if defined(SUPPORT_VALIDATION) ++ if (ui32FBCDCVersionOverride == 2) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: FBCDCVersionOverride forces FBC3.1 but this core doesn't support it!", ++ __func__)); ++ } ++#endif ++ ++#if !defined(NO_HARDWARE) ++ if (psDeviceNode->psDevConfig->bHasFBCDCVersion31) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: System uses FBCDC3.1 but GPU doesn't support it!", ++ __func__)); ++ } ++#endif ++ } ++ ++ return IMG_FALSE; ++} ++ ++/* ++ RGXDevMMUAttributes ++*/ ++static MMU_DEVICEATTRIBS *RGXDevMMUAttributes(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bKernelMemoryCtx) ++{ ++ MMU_DEVICEATTRIBS *psMMUDevAttrs; ++ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS)) ++ { ++ psMMUDevAttrs = bKernelMemoryCtx ? ++ psDeviceNode->psFirmwareMMUDevAttrs : ++ psDeviceNode->psMMUDevAttrs; ++ } ++ else ++ { ++ psMMUDevAttrs = psDeviceNode->psMMUDevAttrs; ++ } ++ ++ return psMMUDevAttrs; ++} ++ ++/* ++ * RGXInitDevPart2 ++ */ ++PVRSRV_ERROR RGXInitDevPart2(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32DeviceFlags, ++ IMG_UINT32 ui32HWPerfHostFilter, ++ RGX_ACTIVEPM_CONF eActivePMConf) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; ++ ++ /* Assume system layer has turned power on by this point, required before powering device */ ++ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON; ++ ++ PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 2"); ++ ++#if defined(PDUMP) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ RGXPDumpBootldrData(psDeviceNode, psDevInfo); ++ } ++#endif ++#if defined(TIMING) || defined(DEBUG) ++ OSUserModeAccessToPerfCountersEn(); ++#endif ++ ++ /* Initialise Device Flags */ ++ psDevInfo->ui32DeviceFlags = 0; ++ RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE); ++ ++ /* Allocate DVFS Table (needs to be allocated before GPU trace events ++ * component is initialised because there is a dependency between them) */ ++ psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable))); ++ PVR_LOG_GOTO_IF_NOMEM(psDevInfo->psGpuDVFSTable, eError, ErrorExit); ++ ++ if (psDevInfo->ui32HWPerfHostFilter == 0) ++ { ++ RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter); ++ } ++ ++ /* If HWPerf enabled allocate all resources for the host side buffer. */ ++ if (psDevInfo->ui32HWPerfHostFilter != 0) ++ { ++ if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand" ++ " initialisation failed.")); ++ } ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Initialise work estimation lock */ ++ eError = OSLockCreate(&psDevInfo->hWorkEstLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(WorkEstLock)", ErrorExit); ++#endif ++ ++ /* Initialise lists of ZSBuffers */ ++ eError = OSLockCreate(&psDevInfo->hLockZSBuffer); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockZSBuffer)", ErrorExit); ++ dllist_init(&psDevInfo->sZSBufferHead); ++ psDevInfo->ui32ZSBufferCurrID = 1; ++ ++ /* Initialise lists of growable Freelists */ ++ eError = OSLockCreate(&psDevInfo->hLockFreeList); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(LockFreeList)", ErrorExit); ++ dllist_init(&psDevInfo->sFreeListHead); ++ psDevInfo->ui32FreelistCurrID = 1; ++ ++ eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(DebugFaultInfoLock)", ErrorExit); ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(MMUCtxUnregLock)", ErrorExit); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ eError = OSLockCreate(&psDevInfo->hNMILock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(NMILock)", ErrorExit); ++ } ++ ++ /* Setup GPU utilisation stats update callback */ ++ eError = OSLockCreate(&psDevInfo->hGPUUtilLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate(GPUUtilLock)", ErrorExit); ++#if !defined(NO_HARDWARE) ++ psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats; ++#endif ++ ++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON; ++ psDevInfo->eActivePMConf = eActivePMConf; ++ ++ /* set-up the Active Power Mgmt callback */ ++#if !defined(NO_HARDWARE) ++ { ++ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; ++ IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM; ++ IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) || ++ (eActivePMConf == RGX_ACTIVEPM_FORCE_ON); ++ ++ if (bEnableAPM && (!PVRSRV_VZ_MODE_IS(NATIVE))) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Active Power Management disabled in virtualization mode", __func__)); ++ bEnableAPM = IMG_FALSE; ++ } ++ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) && defined(SUPPORT_AUTOVZ) ++ /* The AutoVz driver enable a virtualisation watchdog not compatible with APM */ ++ PVR_ASSERT(bEnableAPM == IMG_FALSE); ++#endif ++ ++ if (bEnableAPM) ++ { ++ eError = OSInstallMISR(&psDevInfo->pvAPMISRData, ++ RGX_MISRHandler_CheckFWActivePowerState, ++ psDeviceNode, ++ "RGX_CheckFWActivePower"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(APMISR)", ErrorExit); ++ ++ /* Prevent the device being woken up before there is something to do. */ ++ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF; ++ } ++ } ++#endif ++ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM, ++ RGXQueryAPMState, ++ RGXSetAPMState, ++ psDeviceNode, ++ NULL); ++ ++ RGXTimeCorrInitAppHintCallbacks(psDeviceNode); ++ ++ /* Register the device with the power manager */ ++ eError = PVRSRVRegisterPowerDevice(psDeviceNode, ++ (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPrePowerState : &RGXVzPrePowerState, ++ (PVRSRV_VZ_MODE_IS(NATIVE)) ? &RGXPostPowerState : &RGXVzPostPowerState, ++ psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState, ++ &RGXPreClockSpeedChange, &RGXPostClockSpeedChange, ++ &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest, ++ &RGXDustCountChange, ++ (IMG_HANDLE)psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_OFF, ++ eDefaultPowerState); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVRegisterPowerDevice", ErrorExit); ++ ++ eError = RGXSetPowerParams(psDevInfo, psDevConfig); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetPowerParams", ErrorExit); ++ ++#if defined(SUPPORT_VALIDATION) ++ { ++ void *pvAppHintState = NULL; ++ ++ IMG_UINT32 ui32AppHintDefault; ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ ui32AppHintDefault = PVRSRV_APPHINT_TESTSLRINTERVAL; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, TestSLRInterval, ++ &ui32AppHintDefault, &psDevInfo->ui32TestSLRInterval); ++ PVR_LOG(("OSGetKMAppHintUINT32(TestSLRInterval) ui32AppHintDefault=%d, psDevInfo->ui32TestSLRInterval=%d", ++ ui32AppHintDefault, psDevInfo->ui32TestSLRInterval)); ++ OSFreeKMAppHintState(pvAppHintState); ++ psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; ++ psDevInfo->ui32SLRSkipFWAddr = 0; ++ ++ ui32AppHintDefault = 0; ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ECCRAMErrInj, &ui32AppHintDefault, &psDevInfo->ui32ECCRAMErrInjModule); ++ psDevInfo->ui32ECCRAMErrInjInterval = RGXKM_ECC_ERR_INJ_INTERVAL; ++ ++#if defined(PDUMP) && defined(SUPPORT_VALIDATION) ++ /* POL on ECC RAM GPU fault events, MARS is FW fault */ ++ if (psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_DISABLE && ++ psDevInfo->ui32ECCRAMErrInjModule != RGXKM_ECC_ERR_INJ_MARS) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Verify ECC fault event"); ++ eError = PDUMPREGPOL(psDeviceNode, RGX_PDUMPREG_NAME, ++ RGX_CR_SCRATCH11, ++ 1U, ++ 0xFFFFFFFF, ++ PDUMP_FLAGS_DEINIT, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ } ++#endif ++ } ++#endif ++ ++#if defined(PDUMP) ++#if defined(NO_HARDWARE) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, "Wait for the FW to signal idle"); ++ ++ /* Kick the FW once, in case it still needs to detect and set the idle state */ ++ PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, ++ RGX_CR_MTS_SCHEDULE, ++ RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK, ++ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); ++ ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfFwSysDataMemDesc, ++ offsetof(RGXFWIF_SYSDATA, ePowState), ++ RGXFWIF_POW_IDLE, ++ 0xFFFFFFFFU, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_DEINIT); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemPDumpDevmemPol32", ErrorExit); ++#endif ++ ++ /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_DEINIT, ++ "RGX deinitialisation commands"); ++ ++ psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW; ++ ++ if (! PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = RGXStop(&psDevInfo->sLayerParams); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXStop", ErrorExit); ++ } ++ ++ psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW); ++#endif ++ ++#if !defined(NO_HARDWARE) ++ eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInstallProcessQueuesMISR", ErrorExit); ++ ++ /* Register RGX to receive notifies when other devices complete some work */ ++ PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode); ++ ++ /* Register the interrupt handlers */ ++ eError = OSInstallMISR(&psDevInfo->pvMISRData, ++ RGX_MISRHandler_Main, ++ psDeviceNode, ++ "RGX_Main"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSInstallMISR(MISR)", ErrorExit); ++ ++ /* Register appropriate mechanism for clearing hw interrupts */ ++ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, IRQ_PER_OS)) && (!PVRSRV_VZ_MODE_IS(NATIVE))) ++ { ++ psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; ++ } ++ else if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ psDevInfo->pfnRGXAckIrq = NULL; ++ } ++ else ++ { ++ /* native and host drivers must clear the unique GPU physical interrupt */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ psDevInfo->pfnRGXAckIrq = RGXAckIrqMIPS; ++ } ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ psDevInfo->pfnRGXAckIrq = RGXAckIrqMETA; ++ } ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ psDevInfo->pfnRGXAckIrq = RGXAckIrqDedicated; ++ } ++#endif ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: GPU IRQ clearing mechanism not implemented " ++ "for the this architecture.", __func__)); ++ PVR_LOG_GOTO_WITH_ERROR("pfnRGXAckIrq", eError, PVRSRV_ERROR_NOT_IMPLEMENTED, ErrorExit); ++ } ++ } ++ ++#if defined(RGX_IRQ_HYPERV_HANDLER) ++ /* The hypervisor receives and acknowledges the GPU irq, then it injects an ++ * irq only in the recipient OS. The KM driver doesn't handle the GPU irq line */ ++ psDevInfo->pfnRGXAckIrq = NULL; ++#endif ++ ++ eError = SysInstallDeviceLISR(psDevConfig->hSysData, ++ psDevConfig->ui32IRQ, ++ PVRSRV_MODNAME, ++ RGX_LISRHandler, ++ psDeviceNode, ++ &psDevInfo->pvLISRData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "SysInstallDeviceLISR", ErrorExit); ++#endif /* !defined(NO_HARDWARE) */ ++ ++#if defined(PDUMP) ++/* We need to wrap the check for S7_CACHE_HIERARCHY being supported inside ++ * #if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK)...#endif, as the ++ * RGX_IS_FEATURE_SUPPORTED macro references a bitmask define derived from its ++ * last parameter which will not exist on architectures which do not have this ++ * feature. ++ * Note we check for RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK rather than for ++ * RGX_FEATURE_S7_CACHE_HIERARCHY (which might seem a better choice) as this ++ * means we can build the kernel driver without having to worry about the BVNC ++ * (the BIT_MASK is defined in rgx_bvnc_defs_km.h for all BVNCs for a given ++ * architecture, whereas the FEATURE is only defined for those BVNCs that ++ * support it). ++ */ ++#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY))) ++#endif ++ { ++ if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) && ++ !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "System has NO cache snooping"); ++ } ++ else ++ { ++ if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig)) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "System has CPU cache snooping"); ++ } ++ if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig)) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "System has DEVICE cache snooping"); ++ } ++ } ++ } ++#endif ++ ++#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK) ++ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY)) ++#endif ++ { ++ eError = PVRSRVTQLoadShaders(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVTQLoadShaders", ErrorExit); ++ } ++ ++ psDevInfo->bDevInit2Done = IMG_TRUE; ++ ++ return PVRSRV_OK; ++ ++ErrorExit: ++ DevPart2DeInitRGX(psDeviceNode); ++ ++ return eError; ++} ++ ++#define VZ_RGX_FW_FILENAME_SUFFIX ".vz" ++#define RGX_64K_FW_FILENAME_SUFFIX ".64k" ++#define RGX_FW_FILENAME_MAX_SIZE ((sizeof(RGX_FW_FILENAME)+ \ ++ RGX_BVNC_STR_SIZE_MAX+sizeof(VZ_RGX_FW_FILENAME_SUFFIX) + sizeof(RGX_64K_FW_FILENAME_SUFFIX))) ++ ++static void _GetFWFileName(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR *pszFWFilenameStr, ++ IMG_CHAR *pszFWpFilenameStr) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ const IMG_CHAR * const pszFWFilenameSuffix = ++ PVRSRV_VZ_MODE_IS(NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX; ++ ++ const IMG_CHAR * const pszFWFilenameSuffix2 = ++ ((OSGetPageSize() == RGX_MMU_PAGE_SIZE_64KB) && ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ ? RGX_64K_FW_FILENAME_SUFFIX : ""; ++ ++ OSSNPrintf(pszFWFilenameStr, RGX_FW_FILENAME_MAX_SIZE, ++ "%s." RGX_BVNC_STR_FMTSPEC "%s%s", ++ RGX_FW_FILENAME, ++ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, ++ pszFWFilenameSuffix, pszFWFilenameSuffix2); ++ ++ OSSNPrintf(pszFWpFilenameStr, RGX_FW_FILENAME_MAX_SIZE, ++ "%s." RGX_BVNC_STRP_FMTSPEC "%s%s", ++ RGX_FW_FILENAME, ++ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C, ++ pszFWFilenameSuffix, pszFWFilenameSuffix2); ++} ++ ++PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ OS_FW_IMAGE **ppsRGXFW, ++ const IMG_BYTE **ppbFWData) ++{ ++ IMG_CHAR aszFWFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; ++ IMG_CHAR aszFWpFilenameStr[RGX_FW_FILENAME_MAX_SIZE]; ++ IMG_CHAR *pszLoadedFwStr; ++ PVRSRV_ERROR eErr; ++ ++ /* Prepare the image filenames to use in the following code */ ++ _GetFWFileName(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr); ++ ++ /* Get pointer to Firmware image */ ++ pszLoadedFwStr = aszFWFilenameStr; ++ eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); ++ if (eErr == PVRSRV_ERROR_NOT_FOUND) ++ { ++ pszLoadedFwStr = aszFWpFilenameStr; ++ eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); ++ if (eErr == PVRSRV_ERROR_NOT_FOUND) ++ { ++ pszLoadedFwStr = RGX_FW_FILENAME; ++ eErr = OSLoadFirmware(psDeviceNode, pszLoadedFwStr, OS_FW_VERIFY_FUNCTION, ppsRGXFW); ++ if (eErr == PVRSRV_ERROR_NOT_FOUND) ++ { ++ PVR_DPF((PVR_DBG_FATAL, "All RGX Firmware image loads failed for '%s' (%s)", ++ aszFWFilenameStr, PVRSRVGetErrorString(eErr))); ++ } ++ } ++ } ++ ++ if (eErr == PVRSRV_OK) ++ { ++ PVR_LOG(("RGX Firmware image '%s' loaded", pszLoadedFwStr)); ++ *ppbFWData = (const IMG_BYTE*)OSFirmwareData(*ppsRGXFW); ++ } ++ else ++ { ++ *ppbFWData = NULL; ++ } ++ ++ return eErr; ++ ++} ++ ++#if defined(PDUMP) ++PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ return PVRSRV_OK; ++} ++#endif ++ ++PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ /* set up fw memory contexts */ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ ++#if defined(SUPPORT_AUTOVZ) ++ PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; ++ ++ if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) ++ { ++ /* Temporarily swap the MMU and default GPU physheap to allow the page ++ * tables of all memory mapped by the FwKernel context to be placed ++ * in a dedicated memory carveout. This should allow the firmware mappings to ++ * persist after a Host kernel crash or driver reset. */ ++ ++ psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap; ++ } ++#endif ++ ++ /* Register callbacks for creation of device memory contexts */ ++ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; ++ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; ++ ++ /* Create the memory context for the firmware. */ ++ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META, ++ &psDevInfo->psKernelDevmemCtx); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed DevmemCreateContext (%u)", ++ __func__, ++ eError)); ++ goto failed_to_create_ctx; ++ } ++ ++ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT, ++ &psDevInfo->psFirmwareMainHeap); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed DevmemFindHeapByName (%u)", ++ __func__, ++ eError)); ++ goto failed_to_find_heap; ++ } ++ ++ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT, ++ &psDevInfo->psFirmwareConfigHeap); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed DevmemFindHeapByName (%u)", ++ __func__, ++ eError)); ++ goto failed_to_find_heap; ++ } ++ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++ IMG_UINT32 ui32OSID; ++ for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) ++ { ++ IMG_CHAR szHeapName[RA_MAX_NAME_LENGTH]; ++ ++ OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID); ++ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName, ++ &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemFindHeapByName", failed_to_find_heap); ++ } ++ } ++#endif ++ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++ IMG_DEV_PHYADDR sPhysHeapBase; ++ IMG_UINT32 ui32OSID; ++ ++ eError = PhysHeapGetDevPAddr(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN], &sPhysHeapBase); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapGetDevPAddr", failed_to_find_heap); ++ ++ for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) ++ { ++ IMG_DEV_PHYADDR sRawFwHeapBase = {sPhysHeapBase.uiAddr + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE)}; ++ ++ eError = RGXFwRawHeapAllocMap(psDeviceNode, ++ ui32OSID, ++ sRawFwHeapBase, ++ RGX_FIRMWARE_RAW_HEAP_SIZE); ++ if (eError != PVRSRV_OK) ++ { ++ for (; ui32OSID > RGX_FIRST_RAW_HEAP_OSID; ui32OSID--) ++ { ++ RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); ++ } ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", failed_to_find_heap); ++ } ++ } ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* restore default Px setup */ ++ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; ++#endif ++ } ++#else ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = PvzClientMapDevPhysHeap(psDeviceNode->psDevConfig); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PvzClientMapDevPhysHeap", failed_to_find_heap); ++ } ++#endif /* defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) */ ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_TRUE); ++ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_TRUE); ++ } ++ ++ return eError; ++ ++failed_to_find_heap: ++ /* ++ * Clear the mem context create callbacks before destroying the RGX firmware ++ * context to avoid a spurious callback. ++ */ ++ psDeviceNode->pfnRegisterMemoryContext = NULL; ++ psDeviceNode->pfnUnregisterMemoryContext = NULL; ++ DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); ++ psDevInfo->psKernelDevmemCtx = NULL; ++failed_to_create_ctx: ++ return eError; ++} ++ ++void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++#if defined(SUPPORT_AUTOVZ) ++ PHYS_HEAP *psDefaultPhysHeap = psDeviceNode->psMMUPhysHeap; ++ ++ psDeviceNode->psMMUPhysHeap = psDeviceNode->psFwMMUReservedPhysHeap; ++ ++ if (!psDeviceNode->bAutoVzFwIsUp) ++#endif ++ { ++ IMG_UINT32 ui32OSID; ++ ++ for (ui32OSID = RGX_FIRST_RAW_HEAP_OSID; ui32OSID < RGX_NUM_OS_SUPPORTED; ui32OSID++) ++ { ++ RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); ++ } ++ } ++#if defined(SUPPORT_AUTOVZ) ++ psDeviceNode->psMMUPhysHeap = psDefaultPhysHeap; ++#endif ++ } ++#else ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ (void) PvzClientUnmapDevPhysHeap(psDeviceNode->psDevConfig); ++ ++ if (psDevInfo->psFirmwareMainHeap) ++ { ++ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareMainHeap, IMG_FALSE); ++ } ++ if (psDevInfo->psFirmwareConfigHeap) ++ { ++ DevmemHeapSetPremapStatus(psDevInfo->psFirmwareConfigHeap, IMG_FALSE); ++ } ++ } ++#endif ++ ++ /* ++ * Clear the mem context create callbacks before destroying the RGX firmware ++ * context to avoid a spurious callback. ++ */ ++ psDeviceNode->pfnRegisterMemoryContext = NULL; ++ psDeviceNode->pfnUnregisterMemoryContext = NULL; ++ ++ if (psDevInfo->psKernelDevmemCtx) ++ { ++ eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++} ++ ++static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32AlignChecksSizeUM, ++ IMG_UINT32 aui32AlignChecksUM[]) ++{ ++ static const IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM}; ++ IMG_UINT32 ui32UMChecksOffset = ARRAY_SIZE(aui32AlignChecksKM) + 1; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++ IMG_UINT32 i, *paui32FWAlignChecks; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Skip the alignment check if the driver is guest ++ since there is no firmware to check against */ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, eError); ++ ++ if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: FW Alignment Check Mem Descriptor is NULL", ++ __func__)); ++ return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc, ++ (void **) &paui32FWAlignChecks); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire kernel address for alignment checks (%u)", ++ __func__, ++ eError)); ++ return eError; ++ } ++ ++ paui32FWAlignChecks += ui32UMChecksOffset; ++ if (*paui32FWAlignChecks++ != ui32AlignChecksSizeUM) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Mismatching sizes of RGXFW_ALIGN_CHECKS_INIT" ++ " array between UM(%d) and FW(%d)", ++ __func__, ++ ui32AlignChecksSizeUM, ++ *paui32FWAlignChecks)); ++ eError = PVRSRV_ERROR_INVALID_ALIGNMENT; ++ goto return_; ++ } ++ ++ for (i = 0; i < ui32AlignChecksSizeUM; i++) ++ { ++ if (aui32AlignChecksUM[i] != paui32FWAlignChecks[i]) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: size/offset mismatch in RGXFW_ALIGN_CHECKS_INIT[%d]" ++ " between UM(%d) and FW(%d)", ++ __func__, i, aui32AlignChecksUM[i], paui32FWAlignChecks[i])); ++ eError = PVRSRV_ERROR_INVALID_ALIGNMENT; ++ } ++ } ++ ++ if (eError == PVRSRV_ERROR_INVALID_ALIGNMENT) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Check for FW/KM structure" ++ " alignment failed.", __func__)); ++ } ++ ++return_: ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc); ++ ++ return eError; ++} ++ ++static ++PVRSRV_ERROR RGXAllocateFWMemoryRegion(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEVMEM_SIZE_T ui32Size, ++ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags, ++ const IMG_PCHAR pszText, ++ DEVMEM_MEMDESC **ppsMemDescPtr) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift(); ++#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++#endif ++ ++#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_MEMORY) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K; ++ } ++#endif ++ ++ uiMemAllocFlags = (uiMemAllocFlags | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) & ++ RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ uiMemAllocFlags &= PVRSRV_MEMALLOCFLAGS_TDFWMASK; ++#endif ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate FW %s memory", pszText); ++ ++ eError = DevmemFwAllocateExportable(psDeviceNode, ++ ui32Size, ++ 1ULL << uiLog2Align, ++ uiMemAllocFlags, ++ pszText, ++ ppsMemDescPtr); ++ ++ return eError; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver ++ ++ @Description ++ ++ Validate the FW build options against KM driver build options (KM build options only) ++ ++ Following check is redundant, because next check checks the same bits. ++ Redundancy occurs because if client-server are build-compatible and client-firmware are ++ build-compatible then server-firmware are build-compatible as well. ++ ++ This check is left for clarity in error messages if any incompatibility occurs. ++ ++ @Input psFwOsInit - FW init data ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_OSINIT *psFwOsInit) ++{ ++#if !defined(NO_HARDWARE) ++ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch; ++ ++ if (psFwOsInit == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM & RGX_BUILD_OPTIONS_MASK_FW); ++ ++ ui32BuildOptionsFWKMPart = psFwOsInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_FW; ++ ++ /* Check if the FW is missing support for any features required by the driver */ ++ if (~ui32BuildOptionsFWKMPart & ui32BuildOptions) ++ { ++ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart; ++#if !defined(PVRSRV_STRICT_COMPAT_CHECK) ++ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ ++ ui32BuildOptionsMismatch &= OPTIONS_STRICT; ++#endif ++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; " ++ "extra options present in the KM driver: (0x%x). Please check rgx_options.h", ++ ui32BuildOptions & ui32BuildOptionsMismatch )); ++ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; ++ } ++ ++ if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; " ++ "extra options present in Firmware: (0x%x). Please check rgx_options.h", ++ ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch )); ++ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH; ++ } ++ PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ.")); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]")); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver ++ ++ @Description ++ ++ Validate FW DDK version against driver DDK version ++ ++ @Input psDevInfo - device info ++ @Input psFwOsInit - FW init data ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_OSINIT *psFwOsInit) ++{ ++#if defined(PDUMP)||(!defined(NO_HARDWARE)) ++ IMG_UINT32 ui32DDKVersion; ++ PVRSRV_ERROR eError; ++ ++ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); ++#endif ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Compatibility check: KM driver and FW DDK version"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion), ++ ui32DDKVersion, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++#endif ++ ++#if !defined(NO_HARDWARE) ++ if (psFwOsInit == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ if (psFwOsInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware DDK version (%u.%u).", ++ PVRVERSION_MAJ, PVRVERSION_MIN, ++ PVRVERSION_UNPACK_MAJ(psFwOsInit->sRGXCompChecks.ui32DDKVersion), ++ PVRVERSION_UNPACK_MIN(psFwOsInit->sRGXCompChecks.ui32DDKVersion))); ++ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH; ++ PVR_DBG_BREAK; ++ return eError; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware DDK version (%u.%u) match. [ OK ]", ++ PVRVERSION_MAJ, PVRVERSION_MIN, ++ PVRVERSION_MAJ, PVRVERSION_MIN)); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver ++ ++ @Description ++ ++ Validate FW DDK build against driver DDK build ++ ++ @Input psDevInfo - device info ++ @Input psFwOsInit - FW init data ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_OSINIT *psFwOsInit) ++{ ++ PVRSRV_ERROR eError=PVRSRV_OK; ++#if defined(PDUMP)||(!defined(NO_HARDWARE)) ++ IMG_UINT32 ui32DDKBuild; ++ ++ ui32DDKBuild = PVRVERSION_BUILD; ++#endif ++ ++#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Compatibility check: KM driver and FW DDK build"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild), ++ ui32DDKBuild, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++#endif ++ ++#if !defined(NO_HARDWARE) ++ if (psFwOsInit == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ if (psFwOsInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild) ++ { ++ PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).", ++ ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); ++#if defined(PVRSRV_STRICT_COMPAT_CHECK) ++ eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH; ++ PVR_DBG_BREAK; ++ return eError; ++#endif ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]", ++ ui32DDKBuild, psFwOsInit->sRGXCompChecks.ui32DDKBuild)); ++ } ++#endif ++ return eError; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver ++ ++ @Description ++ ++ Validate FW BVNC against driver BVNC ++ ++ @Input psDevInfo - device info ++ @Input psFwOsInit - FW init data ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_OSINIT *psFwOsInit) ++{ ++#if !defined(NO_HARDWARE) ++ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; ++#endif ++#if defined(PDUMP)||(!defined(NO_HARDWARE)) ++ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC); ++ PVRSRV_ERROR eError; ++ ++ sBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C); ++#endif ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Compatibility check: KM driver and FW BVNC (struct version)"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), ++ sBVNC.ui32LayoutVersion, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ } ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Compatibility check: KM driver and FW BVNC (BVNC part - Lower 32 bits)"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), ++ (IMG_UINT32)sBVNC.ui64BVNC, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ } ++ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Compatibility check: KM driver and FW BVNC (BVNC part - Higher 32 bits)"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + ++ sizeof(IMG_UINT32), ++ (IMG_UINT32)(sBVNC.ui64BVNC >> 32), ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ } ++#endif ++ ++#if !defined(NO_HARDWARE) ++ if (psFwOsInit == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ RGX_BVNC_EQUAL(sBVNC, psFwOsInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); ++ ++ if (!bCompatibleAll) ++ { ++ if (!bCompatibleVersion) ++ { ++ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).", ++ __func__, ++ sBVNC.ui32LayoutVersion, ++ psFwOsInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion)); ++ eError = PVRSRV_ERROR_BVNC_MISMATCH; ++ return eError; ++ } ++ ++ if (!bCompatibleBVNC) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%u.%u.%u) and Firmware BVNC (%u.%u.%u.%u)", ++ RGX_BVNC_PACKED_EXTR_B(sBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_V(sBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_N(sBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_C(sBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_B(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_V(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_N(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_C(psFwOsInit->sRGXCompChecks.sFWBVNC.ui64BVNC))); ++ eError = PVRSRV_ERROR_BVNC_MISMATCH; ++ return eError; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]")); ++ } ++#endif ++ return PVRSRV_OK; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver ++ ++ @Description ++ ++ Validate HW BVNC against driver BVNC ++ ++ @Input psDevInfo - device info ++ @Input psFwOsInit - FW init data ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_OSINIT *psFwOsInit) ++{ ++#if defined(PDUMP) || !defined(NO_HARDWARE) ++ IMG_UINT64 ui64MaskBVNC = RGX_BVNC_PACK_MASK_B | ++ RGX_BVNC_PACK_MASK_V | ++ RGX_BVNC_PACK_MASK_N | ++ RGX_BVNC_PACK_MASK_C; ++ ++ PVRSRV_ERROR eError; ++ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC); ++#endif ++ ++#if defined(PDUMP) ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++#endif ++ ++#if !defined(NO_HARDWARE) ++ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC); ++ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleBVNC; ++#endif ++ ++ if (psDevInfo->bIgnoreHWReportedBVNC) ++ { ++ PVR_LOG(("BVNC compatibility checks between driver and HW are disabled (AppHint override)")); ++ return PVRSRV_OK; ++ } ++ ++#if defined(PDUMP) || !defined(NO_HARDWARE) ++#if defined(COMPAT_BVNC_MASK_V) ++ ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_V; ++#endif ++#if defined(COMPAT_BVNC_MASK_N) ++ ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_N; ++#endif ++#if defined(COMPAT_BVNC_MASK_C) ++ ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; ++#endif ++ ++ sSWBVNC.ui64BVNC = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C); ++ ++#if defined(FIX_HW_BRN_38344_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 38344) && (psDevInfo->sDevFeatureCfg.ui32C >= 10)) ++ { ++ ui64MaskBVNC &= ~RGX_BVNC_PACK_MASK_C; ++ } ++#endif ++ if (ui64MaskBVNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_V | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) ++ { ++ PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.", ++ ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_B))?("B"):("")), ++ ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_V))?("V"):("")), ++ ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_N))?("N"):("")), ++ ((!(ui64MaskBVNC & RGX_BVNC_PACK_MASK_C))?("C"):("")))); ++ } ++#endif ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Compatibility check: Layout version of compchecks struct"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion), ++ sSWBVNC.ui32LayoutVersion, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++ ++ PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check started"); ++ if (ui64MaskBVNC & (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) ++ { ++ PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); ++ PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Compatibility check: HW BNC and FW BNC (Lower 32 bits)"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC), ++ (IMG_UINT32)sSWBVNC.ui64BVNC , ++ (IMG_UINT32)(ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V), ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Compatibility check: HW BNC and FW BNC (Higher 32 bits)"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + ++ sizeof(IMG_UINT32), ++ (IMG_UINT32)(sSWBVNC.ui64BVNC >> 32), ++ (IMG_UINT32)((ui64MaskBVNC & ~RGX_BVNC_PACK_MASK_V) >> 32), ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++ ++ PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWBNC_CHECK", ui32PDumpFlags); ++ } ++ if (ui64MaskBVNC & RGX_BVNC_PACK_MASK_V) ++ { ++ PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); ++ PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Compatibility check: HW V and FW V"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) + ++ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BVNC) + ++ ((RGX_BVNC_PACK_SHIFT_V >= 32) ? sizeof(IMG_UINT32) : 0), ++ (IMG_UINT32)(sSWBVNC.ui64BVNC >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0)), ++ RGX_BVNC_PACK_MASK_V >> ((RGX_BVNC_PACK_SHIFT_V >= 32) ? 32 : 0), ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++ PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWV_CHECK", ui32PDumpFlags); ++ } ++ PDUMPCOM(psDevInfo->psDeviceNode, ui32PDumpFlags, "BVNC compatibility check finished"); ++#endif ++ ++#if !defined(NO_HARDWARE) ++ if (psFwOsInit == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ sHWBVNC = psFwOsInit->sRGXCompChecks.sHWBVNC; ++ ++ sHWBVNC.ui64BVNC &= ui64MaskBVNC; ++ sSWBVNC.ui64BVNC &= ui64MaskBVNC; ++ ++ RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleBVNC); ++ ++ if (!bCompatibleAll) ++ { ++ if (!bCompatibleVersion) ++ { ++ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).", ++ __func__, ++ sHWBVNC.ui32LayoutVersion, ++ sSWBVNC.ui32LayoutVersion)); ++ eError = PVRSRV_ERROR_BVNC_MISMATCH; ++ return eError; ++ } ++ ++ if (!bCompatibleBVNC) ++ { ++ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d).", ++ RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); ++ eError = PVRSRV_ERROR_BVNC_MISMATCH; ++ return eError; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%d.%d.%d) and FW BVNC (%d.%d.%d.%d) match. [ OK ]", ++ RGX_BVNC_PACKED_EXTR_B(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_V(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_N(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_C(sHWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_B(sSWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_V(sSWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_N(sSWBVNC.ui64BVNC), ++ RGX_BVNC_PACKED_EXTR_C(sSWBVNC.ui64BVNC))); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver ++ ++ @Description ++ ++ Validate HW META version against driver META version ++ ++ @Input psDevInfo - device info ++ @Input psFwOsInit - FW init data ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_OSINIT *psFwOsInit) ++{ ++#if defined(PDUMP)||(!defined(NO_HARDWARE)) ++ PVRSRV_ERROR eError; ++#endif ++#if defined(PDUMP) ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++#endif ++ IMG_UINT32 ui32FWCoreIDValue = 0; ++ IMG_CHAR *pcRGXFW_PROCESSOR = NULL; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE; ++ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; ++ } ++ else ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ switch (RGX_GET_FEATURE_VALUE(psDevInfo, META)) ++ { ++ case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break; ++ case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break; ++ case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break; ++ case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); ++ PVR_ASSERT(0); ++ } ++ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; ++ } ++ else ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ ui32FWCoreIDValue = RGXRISCVFW_CORE_ID_VALUE; ++ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; ++ } ++ else ++#endif ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Undefined FW_CORE_ID_VALUE", __func__)); ++ PVR_ASSERT(0); ++ } ++ ++#if defined(PDUMP) ++ PDUMPIF(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); ++ PDUMPELSE(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Compatibility check: KM driver and HW FW Processor version"); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfOsInitMemDesc, ++ offsetof(RGXFWIF_OSINIT, sRGXCompChecks) + ++ offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion), ++ ui32FWCoreIDValue, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfOsInitMemDesc (%d)", eError)); ++ return eError; ++ } ++ PDUMPFI(psDevInfo->psDeviceNode, "DISABLE_HWMETA_CHECK", ui32PDumpFlags); ++#endif ++ ++#if !defined(NO_HARDWARE) ++ if (psFwOsInit == NULL) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ if (psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue) ++ { ++ PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).", ++ pcRGXFW_PROCESSOR, ++ ui32FWCoreIDValue, ++ pcRGXFW_PROCESSOR, ++ psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); ++ eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH; ++ PVR_DBG_BREAK; ++ return eError; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].", ++ pcRGXFW_PROCESSOR, ++ ui32FWCoreIDValue, ++ pcRGXFW_PROCESSOR, ++ psFwOsInit->sRGXCompChecks.ui32FWProcessorVersion)); ++ } ++#endif ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDevInitCompatCheck ++ ++ @Description ++ ++ Check compatibility of host driver and firmware (DDK and build options) ++ for RGX devices at services/device initialisation ++ ++ @Input psDeviceNode - device node ++ ++ @Return PVRSRV_ERROR - depending on mismatch found ++ ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++#if !defined(NO_HARDWARE) ++ IMG_UINT32 ui32RegValue; ++ IMG_UINT8 ui8FwOsCount; ++ IMG_UINT32 ui32FwTimeout = MAX_HW_TIME_US; ++ ++ LOOP_UNTIL_TIMEOUT(ui32FwTimeout) ++ { ++ if (*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) ++ { ++ /* No need to wait if the FW has already updated the values */ ++ break; ++ } ++ OSWaitus(ui32FwTimeout/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ ui32RegValue = 0; ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if ((!PVRSRV_VZ_MODE_IS(GUEST)) && ++ RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ eError = RGXReadFWModuleAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)", ++ __func__, eError)); ++ goto chk_exit; ++ } ++ ++ if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT)) ++ { ++ eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)", ++ __func__, psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated, eError)); ++ goto chk_exit; ++ } ++ } ++#endif ++ ++ if (!*((volatile IMG_BOOL *)&psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated)) ++ { ++ eError = PVRSRV_ERROR_TIMEOUT; ++ PVR_DPF((PVR_DBG_ERROR, "%s: GPU Firmware not responding: failed to supply compatibility info (%u)", ++ __func__, eError)); ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Potential causes: firmware not initialised or the current Guest driver's " ++ "OsConfig initialisation data was not accepted by the firmware", __func__)); ++ } ++ goto chk_exit; ++ } ++ ++ ui8FwOsCount = psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.sInitOptions.ui8OsCountSupport; ++ if ((PVRSRV_VZ_MODE_IS(NATIVE) && (ui8FwOsCount > 1)) || ++ (PVRSRV_VZ_MODE_IS(HOST) && (ui8FwOsCount != RGX_NUM_OS_SUPPORTED))) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch between the number of Operating Systems supported by KM driver (%d) and FW (%d)", ++ __func__, (PVRSRV_VZ_MODE_IS(NATIVE)) ? (1) : (RGX_NUM_OS_SUPPORTED), ui8FwOsCount)); ++ } ++#endif /* defined(NO_HARDWARE) */ ++ ++ eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psDevInfo->psRGXFWIfOsInit); ++ if (eError != PVRSRV_OK) ++ { ++ goto chk_exit; ++ } ++ ++ eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); ++ if (eError != PVRSRV_OK) ++ { ++ goto chk_exit; ++ } ++ ++ eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); ++ if (eError != PVRSRV_OK) ++ { ++ goto chk_exit; ++ } ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); ++ if (eError != PVRSRV_OK) ++ { ++ goto chk_exit; ++ } ++ ++ eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); ++ if (eError != PVRSRV_OK) ++ { ++ goto chk_exit; ++ } ++ } ++ ++ eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psDevInfo->psRGXFWIfOsInit); ++ if (eError != PVRSRV_OK) ++ { ++ goto chk_exit; ++ } ++ ++ eError = PVRSRV_OK; ++chk_exit: ++ ++ return eError; ++} ++ ++/**************************************************************************/ /*! ++@Function RGXSoftReset ++@Description Resets some modules of the RGX device ++@Input psDeviceNode Device node ++@Input ui64ResetValue1 A mask for which each bit set corresponds ++ to a module to reset (via the SOFT_RESET ++ register). ++@Input ui64ResetValue2 A mask for which each bit set corresponds ++ to a module to reset (via the SOFT_RESET2 ++ register). ++@Return PVRSRV_ERROR ++ */ /***************************************************************************/ ++static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT64 ui64ResetValue1, ++ IMG_UINT64 ui64ResetValue2) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_BOOL bSoftReset = IMG_FALSE; ++ IMG_UINT64 ui64SoftResetMask = 0; ++ ++ PVR_ASSERT(psDeviceNode != NULL); ++ PVR_ASSERT(psDeviceNode->pvDevice != NULL); ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ /* the device info */ ++ psDevInfo = psDeviceNode->pvDevice; ++#if defined(RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE)) ++ { ++ ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL; ++ }else ++#endif ++ { ++ ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL; ++ } ++ ++#if defined(RGX_CR_SOFT_RESET2_MASKFULL) ++ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) && ++ ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2)) ++ { ++ bSoftReset = IMG_TRUE; ++ } ++#endif ++ ++ if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Set in soft-reset */ ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1); ++ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2); ++ } ++#endif ++ ++ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ ++ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); ++ } ++#endif ++ ++ /* Take the modules out of reset... */ ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0); ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0); ++ } ++#endif ++ ++ /* ...and fence again */ ++ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET); ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2); ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline; ++ ++static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ DevPhysMemFree(psDeviceNode, ++#if defined(PDUMP) ++ psDevInfo->psTrampoline->hPdumpPages, ++#endif ++ &psDevInfo->psTrampoline->sPages); ++ ++ if (psDevInfo->psTrampoline != &sNullTrampoline) ++ { ++ OSFreeMem(psDevInfo->psTrampoline); ++ } ++ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; ++} ++ ++#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size)) ++#define TRAMPOLINE_ALLOC_MAX_RETRIES (3) ++ ++static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ IMG_INT32 i, j; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETRIES]; ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate pages for trampoline"); ++ ++ /* Retry the allocation of the trampoline block (16KB), retaining any ++ * previous allocations overlapping with the target range until we get an ++ * allocation that doesn't overlap with the target range. ++ * Any allocation like this will require a maximum of 3 tries as we are ++ * allocating a physical contiguous block of memory, not individual pages. ++ * Free the unused allocations at the end only after the desired range ++ * is obtained to prevent the alloc function from returning the same bad ++ * range repeatedly. ++ */ ++ for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETRIES; i++) ++ { ++ pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE)); ++ eError = DevPhysMemAlloc(psDeviceNode, ++ RGXMIPSFW_TRAMPOLINE_SIZE, ++ RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE, ++ 0, // (init) u8Value ++ IMG_FALSE, // bInitPage, ++#if defined(PDUMP) ++ psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName, ++ "TrampolineRegion", ++ &pasTrampoline[i]->hPdumpPages, ++#endif ++ &pasTrampoline[i]->sPages, ++ &pasTrampoline[i]->sPhysAddr); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s failed (%u)", ++ __func__, ++ eError)); ++ goto fail; ++ } ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++ /* Set the persistent uiOSid value so that we free from the correct ++ * base arena when unloading the driver and freeing the trampoline. ++ */ ++ pasTrampoline[i]->sPages.uiOSid = 0; /* Firmware global arena */ ++#endif ++ ++ if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr, ++ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, ++ RGXMIPSFW_TRAMPOLINE_SIZE)) ++ { ++ break; ++ } ++ } ++ if (TRAMPOLINE_ALLOC_MAX_RETRIES == i) ++ { ++ /* Failed to find a physical allocation after 3 attempts */ ++ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s failed to allocate non-overlapping pages (%u)", ++ __func__, eError)); ++ /* Fall through, clean up and return error. */ ++ } ++ else ++ { ++ /* Remember the last physical block allocated, it will not be freed */ ++ psDevInfo->psTrampoline = pasTrampoline[i]; ++ } ++ ++fail: ++ /* free all unused allocations */ ++ for (j = 0; j < i; j++) ++ { ++ DevPhysMemFree(psDeviceNode, ++#if defined(PDUMP) ++ pasTrampoline[j]->hPdumpPages, ++#endif ++ &pasTrampoline[j]->sPages); ++ OSFreeMem(pasTrampoline[j]); ++ } ++ ++ return eError; ++} ++ ++#undef RANGES_OVERLAP ++ ++ ++PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEVMEM_SIZE_T uiFWCodeLen, ++ IMG_DEVMEM_SIZE_T uiFWDataLen, ++ IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, ++ IMG_DEVMEM_SIZE_T uiFWCorememDataLen) ++{ ++ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ IMG_DEVMEM_SIZE_T uiDummyLen; ++ DEVMEM_MEMDESC *psDummyMemDesc = NULL; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && ++ (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) ++ { ++ eError = RGXAllocTrampoline(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to allocate trampoline region (%u)", ++ eError)); ++ goto failTrampolineMemDescAlloc; ++ } ++ } ++ ++ /* ++ * Set up Allocation for FW code section ++ */ ++ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); ++ ++ eError = RGXAllocateFWMemoryRegion(psDeviceNode, ++ uiFWCodeLen, ++ uiMemAllocFlags, ++ "FwExCodeRegion", ++ &psDevInfo->psRGXFWCodeMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to allocate fw code mem (%u)", ++ eError)); ++ goto failFWCodeMemDescAlloc; ++ } ++ ++ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc, ++ &psDevInfo->sFWCodeDevVAddrBase); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to acquire devVAddr for fw code mem (%u)", ++ eError)); ++ goto failFWCodeMemDescAqDevVirt; ++ } ++ ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(GUEST)))) ++ { ++ /* ++ * The FW code must be the first allocation in the firmware heap, otherwise ++ * the bootloader will not work (the FW will not be able to find the bootloader). ++ */ ++ PVR_ASSERT(psDevInfo->sFWCodeDevVAddrBase.uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ /* ++ * Allocate Dummy Pages so that Data segment allocation gets the same ++ * device virtual address as specified in MIPS firmware linker script ++ */ ++ uiDummyLen = RGXGetFWImageSectionMaxSize(NULL, MIPS_CODE) + ++ RGXGetFWImageSectionMaxSize(NULL, MIPS_EXCEPTIONS_CODE) + ++ RGXGetFWImageSectionMaxSize(NULL, MIPS_BOOT_CODE) - ++ uiFWCodeLen; /* code actual size */ ++ ++ if (uiDummyLen > 0) ++ { ++ eError = DevmemFwAllocateExportable(psDeviceNode, ++ uiDummyLen, ++ OSGetPageSize(), ++ uiMemAllocFlags, ++ "FwExDummyPages", ++ &psDummyMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to allocate fw dummy mem (%u)", ++ eError)); ++ goto failDummyMemDescAlloc; ++ } ++ } ++ } ++ ++ /* ++ * Set up Allocation for FW data section ++ */ ++ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA); ++ ++ eError = RGXAllocateFWMemoryRegion(psDeviceNode, ++ uiFWDataLen, ++ uiMemAllocFlags, ++ "FwExDataRegion", ++ &psDevInfo->psRGXFWDataMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to allocate fw data mem (%u)", ++ eError)); ++ goto failFWDataMemDescAlloc; ++ } ++ ++ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc, ++ &psDevInfo->sFWDataDevVAddrBase); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to acquire devVAddr for fw data mem (%u)", ++ eError)); ++ goto failFWDataMemDescAqDevVirt; ++ } ++ ++ if (uiFWCorememCodeLen != 0) ++ { ++ /* ++ * Set up Allocation for FW coremem code section ++ */ ++ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_CODE); ++ ++ eError = RGXAllocateFWMemoryRegion(psDeviceNode, ++ uiFWCorememCodeLen, ++ uiMemAllocFlags, ++ "FwExCorememCodeRegion", ++ &psDevInfo->psRGXFWCorememCodeMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to allocate fw coremem code mem, size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", ++ uiFWCorememCodeLen, uiMemAllocFlags, eError)); ++ goto failFWCorememCodeMemDescAlloc; ++ } ++ ++ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, ++ &psDevInfo->sFWCorememCodeDevVAddrBase); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to acquire devVAddr for fw coremem mem code (%u)", ++ eError)); ++ goto failFWCorememCodeMemDescAqDevVirt; ++ } ++ ++ eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr, ++ psDevInfo->psRGXFWCorememCodeMemDesc, ++ 0, RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", failFWCorememCodeMemDescFwAddr); ++ } ++ else ++ { ++ psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0; ++ psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0; ++ } ++ ++ if (uiFWCorememDataLen != 0) ++ { ++ /* ++ * Set up Allocation for FW coremem data section ++ */ ++ uiMemAllocFlags = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_PRIV_DATA)) ++ & RGX_AUTOVZ_KEEP_FW_DATA_MASK(psDeviceNode->bAutoVzFwIsUp); ++ ++ eError = RGXAllocateFWMemoryRegion(psDeviceNode, ++ uiFWCorememDataLen, ++ uiMemAllocFlags, ++ "FwExCorememDataRegion", ++ &psDevInfo->psRGXFWIfCorememDataStoreMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to allocate fw coremem data mem, " ++ "size: %" IMG_INT64_FMTSPECd ", flags: %" PVRSRV_MEMALLOCFLAGS_FMTSPEC " (%u)", ++ uiFWCorememDataLen, ++ uiMemAllocFlags, ++ eError)); ++ goto failFWCorememDataMemDescAlloc; ++ } ++ ++ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, ++ &psDevInfo->sFWCorememDataStoreDevVAddrBase); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Failed to acquire devVAddr for fw coremem mem data (%u)", ++ eError)); ++ goto failFWCorememDataMemDescAqDevVirt; ++ } ++ ++ eError = RGXSetFirmwareAddress(&psDevInfo->sFWCorememDataStoreFWAddr, ++ psDevInfo->psRGXFWIfCorememDataStoreMemDesc, ++ 0, RFW_FWADDR_NOREF_FLAG); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", failFWCorememDataMemDescFwAddr); ++ } ++ else ++ { ++ psDevInfo->sFWCorememDataStoreDevVAddrBase.uiAddr = 0; ++ psDevInfo->sFWCorememDataStoreFWAddr.ui32Addr = 0; ++ } ++ ++ /* Free Dummy Pages */ ++ if (psDummyMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); ++ } ++ ++ return PVRSRV_OK; ++ ++failFWCorememDataMemDescFwAddr: ++failFWCorememDataMemDescAqDevVirt: ++ if (uiFWCorememDataLen != 0) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); ++ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; ++ } ++failFWCorememDataMemDescAlloc: ++failFWCorememCodeMemDescFwAddr: ++failFWCorememCodeMemDescAqDevVirt: ++ if (uiFWCorememCodeLen != 0) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); ++ psDevInfo->psRGXFWCorememCodeMemDesc = NULL; ++ } ++failFWCorememCodeMemDescAlloc: ++failFWDataMemDescAqDevVirt: ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); ++ psDevInfo->psRGXFWDataMemDesc = NULL; ++failFWDataMemDescAlloc: ++ if (psDummyMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psDummyMemDesc); ++ } ++failDummyMemDescAlloc: ++failFWCodeMemDescAqDevVirt: ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); ++ psDevInfo->psRGXFWCodeMemDesc = NULL; ++failFWCodeMemDescAlloc: ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && ++ (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) ++ { ++ RGXFreeTrampoline(psDeviceNode); ++ } ++failTrampolineMemDescAlloc: ++ return eError; ++} ++ ++/* ++ AppHint parameter interface ++ */ ++static ++PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ PVRSRV_ERROR eResult; ++ ++ eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); ++ *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK; ++ return eResult; ++} ++ ++static ++PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ PVRSRV_ERROR eResult; ++ ++ eResult = PVRSRVRGXFWDebugQueryFWLogKM(NULL, psDeviceNode, pui32Value); ++ if (PVRSRV_OK == eResult) ++ { ++ if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE) ++ { ++ *pui32Value = 0; /* Trace */ ++ } ++ else ++ { ++ *pui32Value = 1; /* TBI */ ++ } ++ } ++ return eResult; ++} ++ ++static ++PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eResult; ++ IMG_UINT32 ui32RGXFWLogType; ++ ++ eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType); ++ if (PVRSRV_OK == eResult) ++ { ++ if (0 == ui32RGXFWLogType) ++ { ++ BITMASK_SET(ui32Value, RGXFWIF_LOG_TYPE_TRACE); ++ } ++ eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32Value); ++ } ++ return eResult; ++} ++ ++static ++PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eResult; ++ IMG_UINT32 ui32RGXFWLogType = ui32Value; ++ ++ eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType); ++ if (PVRSRV_OK != eResult) ++ { ++ return eResult; ++ } ++ ++ /* 0 - trace, 1 - tbi */ ++ if (0 == ui32Value) ++ { ++ BITMASK_SET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); ++ } ++#if defined(SUPPORT_TBI_INTERFACE) ++ else if (1 == ui32Value) ++ { ++ BITMASK_UNSET(ui32RGXFWLogType, RGXFWIF_LOG_TYPE_TRACE); ++ } ++#endif ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid parameter %u specified to set FW log type AppHint.", ++ __func__, ui32Value)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ eResult = PVRSRVRGXFWDebugSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType); ++ return eResult; ++} ++ ++#if defined(DEBUG) ++static ++PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_BOOL *pbValue) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; ++ ++ *pbValue = (PVRSRV_MEMALLOCFLAG_POISON_ON_FREE == psDevInfo->uiFWPoisonOnFreeFlag) ++ ? IMG_TRUE ++ : IMG_FALSE; ++ return PVRSRV_OK; ++} ++ ++static ++PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_BOOL bValue) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; ++ psDevInfo->uiFWPoisonOnFreeFlag = bValue ++ ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE ++ : 0ULL; ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++/* ++ * RGXInitFirmware ++ */ ++PVRSRV_ERROR ++RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bEnableSignatureChecks, ++ IMG_UINT32 ui32SignatureChecksBufSize, ++ IMG_UINT32 ui32HWPerfFWBufSizeKB, ++ IMG_UINT64 ui64HWPerfFilter, ++ IMG_UINT32 ui32ConfigFlags, ++ IMG_UINT32 ui32LogType, ++ IMG_UINT32 ui32FilterFlags, ++ IMG_UINT32 ui32JonesDisableMask, ++ IMG_UINT32 ui32HWRDebugDumpLimit, ++ IMG_UINT32 ui32HWPerfCountersDataSize, ++ IMG_UINT32 *pui32TPUTrilinearFracMask, ++ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, ++ FW_PERF_CONF eFirmwarePerf, ++ IMG_UINT32 ui32KCCBSizeLog2, ++ IMG_UINT32 ui32ConfigFlagsExt, ++ IMG_UINT32 ui32FwOsCfgFlags) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++#if defined(DEBUG) ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32AppHintDefault; ++ IMG_BOOL bEnableFWPoisonOnFree = IMG_FALSE; ++#endif ++ ++ eError = RGXSetupFirmware(psDeviceNode, ++ bEnableSignatureChecks, ++ ui32SignatureChecksBufSize, ++ ui32HWPerfFWBufSizeKB, ++ ui64HWPerfFilter, ++ ui32ConfigFlags, ++ ui32ConfigFlagsExt, ++ ui32FwOsCfgFlags, ++ ui32LogType, ++ ui32FilterFlags, ++ ui32JonesDisableMask, ++ ui32HWRDebugDumpLimit, ++ ui32HWPerfCountersDataSize, ++ pui32TPUTrilinearFracMask, ++ eRGXRDPowerIslandingConf, ++ eFirmwarePerf, ++ ui32KCCBSizeLog2); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", ++ eError)); ++ goto failed_init_firmware; ++ } ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup, ++ RGXFWTraceQueryFilter, ++ RGXFWTraceSetFilter, ++ psDeviceNode, ++ NULL); ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType, ++ RGXFWTraceQueryLogType, ++ RGXFWTraceSetLogType, ++ psDeviceNode, ++ NULL); ++ } ++ ++#if defined(DEBUG) ++ OSCreateKMAppHintState(&pvAppHintState); ++ ++ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE; ++ OSGetKMAppHintBOOL(psDeviceNode, ++ pvAppHintState, ++ EnableFWPoisonOnFree, ++ &ui32AppHintDefault, ++ &bEnableFWPoisonOnFree); ++ ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree, ++ RGXQueryFWPoisonOnFree, ++ RGXSetFWPoisonOnFree, ++ psDeviceNode, ++ NULL); ++ ++ psDevInfo->uiFWPoisonOnFreeFlag = bEnableFWPoisonOnFree ++ ? PVRSRV_MEMALLOCFLAG_POISON_ON_FREE ++ : 0ULL; ++#else ++ psDevInfo->uiFWPoisonOnFreeFlag = 0ULL; ++#endif ++ ++ psDevInfo->ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; ++ psDevInfo->ui32LastClockSource = PVRSRV_APPHINT_TIMECORRCLOCK; ++ ++ return PVRSRV_OK; ++ ++failed_init_firmware: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/* See device.h for function declaration */ ++static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC **psMemDesc, ++ IMG_UINT32 *puiSyncPrimVAddr, ++ IMG_UINT32 *puiSyncPrimBlockSize) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVRSRV_ERROR eError; ++ RGXFWIF_DEV_VIRTADDR pFirmwareAddr; ++ IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32); ++ IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32); ++ IMG_UINT32 ui32CoherencyFlag = 0; ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ /* Size and align are 'expanded' because we request an Exportalign allocation */ ++ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), ++ &uiUFOBlockSize, ++ &ui32UFOBlockAlign); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && ++ PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) ++ { ++ ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_CACHE_COHERENT; ++ } ++ else ++ { ++ ui32CoherencyFlag = PVRSRV_MEMALLOCFLAG_UNCACHED; ++ } ++ ++ eError = DevmemFwAllocateExportable(psDeviceNode, ++ uiUFOBlockSize, ++ ui32UFOBlockAlign, ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ ui32CoherencyFlag, ++ "FwExUFOBlock", ++ psMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ eError = RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr; ++ *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize); ++ ++ return PVRSRV_OK; ++ ++e1: ++ DevmemFwUnmapAndFree(psDevInfo, *psMemDesc); ++e0: ++ return eError; ++} ++ ++/* See device.h for function declaration */ ++static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psMemDesc) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ /* ++ If the system has snooping of the device cache then the UFO block ++ might be in the cache so we need to flush it out before freeing ++ the memory ++ ++ When the device is being shutdown/destroyed we don't care anymore. ++ Several necessary data structures to issue a flush were destroyed ++ already. ++ */ ++ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) && ++ psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT) ++ { ++ RGXFWIF_KCCB_CMD sFlushInvalCmd; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ /* Schedule the SLC flush command ... */ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Submit SLC flush and invalidate"); ++#endif ++ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0; ++ ++ eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, ++ &sFlushInvalCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule SLC flush command with error (%u)", ++ __func__, ++ eError)); ++ } ++ else ++ { ++ /* Wait for the SLC flush to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: SLC flush and invalidate aborted with error (%u)", ++ __func__, ++ eError)); ++ } ++ else if (unlikely(psDevInfo->pui32KernelCCBRtnSlots[ui32kCCBCommandSlot] & ++ RGXFWIF_KCCB_RTN_SLOT_POLL_FAILURE)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: FW poll on a HW operation failed", __func__)); ++ } ++ } ++ } ++ ++ RGXUnsetFirmwareAddress(psMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psMemDesc); ++} ++ ++static void DevPart2DeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; ++ ++ psDevInfo->bDevInit2Done = IMG_FALSE; ++ ++#if defined(RGX_FEATURE_COMPUTE_ONLY_BIT_MASK) ++ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE_ONLY)) ++#endif ++ { ++ if ((psDevInfo->hTQUSCSharedMem != NULL) && ++ (psDevInfo->hTQCLISharedMem != NULL)) ++ { ++ PVRSRVTQUnloadShaders(psDeviceNode); ++ } ++ } ++ ++#if !defined(NO_HARDWARE) ++ if (psDevInfo->pvLISRData != NULL) ++ { ++ (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData); ++ } ++ if (psDevInfo->pvMISRData != NULL) ++ { ++ (void) OSUninstallMISR(psDevInfo->pvMISRData); ++ } ++ if (psDevInfo->hProcessQueuesMISR != NULL) ++ { ++ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR); ++ } ++ if (psDevInfo->pvAPMISRData != NULL) ++ { ++ (void) OSUninstallMISR(psDevInfo->pvAPMISRData); ++ } ++ if (psDeviceNode->hCmdCompNotify != NULL) ++ { ++ /* Cancel notifications to this device */ ++ PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify); ++ psDeviceNode->hCmdCompNotify = NULL; ++ } ++#endif /* !NO_HARDWARE */ ++ ++ /* Remove the device from the power manager */ ++ PVRSRVRemovePowerDevice(psDeviceNode); ++ ++ psDevInfo->pfnGetGpuUtilStats = NULL; ++ if (psDevInfo->hGPUUtilLock != NULL) ++ { ++ OSLockDestroy(psDevInfo->hGPUUtilLock); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && ++ (psDevInfo->hNMILock != NULL)) ++ { ++ OSLockDestroy(psDevInfo->hNMILock); ++ } ++ ++ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && ++ (psDevInfo->hMMUCtxUnregLock != NULL)) ++ { ++ OSLockDestroy(psDevInfo->hMMUCtxUnregLock); ++ } ++ ++ if (psDevInfo->hDebugFaultInfoLock != NULL) ++ { ++ OSLockDestroy(psDevInfo->hDebugFaultInfoLock); ++ } ++ ++ /* De-init Freelists/ZBuffers... */ ++ if (psDevInfo->hLockFreeList != NULL) ++ { ++ OSLockDestroy(psDevInfo->hLockFreeList); ++ } ++ ++ if (psDevInfo->hLockZSBuffer != NULL) ++ { ++ OSLockDestroy(psDevInfo->hLockZSBuffer); ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* De-init work estimation lock */ ++ if (psDevInfo->hWorkEstLock != NULL) ++ { ++ OSLockDestroy(psDevInfo->hWorkEstLock); ++ } ++#endif ++ ++ /* Free DVFS Table */ ++ if (psDevInfo->psGpuDVFSTable != NULL) ++ { ++ OSFreeMem(psDevInfo->psGpuDVFSTable); ++ psDevInfo->psGpuDVFSTable = NULL; ++ } ++} ++ ++/* ++ DevDeInitRGX ++ */ ++PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ DEVICE_MEMORY_INFO *psDevMemoryInfo; ++ IMG_UINT32 ui32Temp=0; ++ ++ if (!psDevInfo) ++ { ++ /* Can happen if DevInitRGX failed */ ++ PVR_DPF((PVR_DBG_ERROR, "DevDeInitRGX: Null DevInfo")); ++ return PVRSRV_OK; ++ } ++ ++ if (psDevInfo->psRGXFWIfOsInit) ++ { ++ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); ++ } ++ ++ DeviceDepBridgeDeInit(psDevInfo); ++ ++#if defined(PDUMP) ++ DevmemIntFreeDefBackingPage(psDeviceNode, ++ &psDeviceNode->sDummyPage, ++ DUMMY_PAGE); ++ DevmemIntFreeDefBackingPage(psDeviceNode, ++ &psDeviceNode->sDevZeroPage, ++ DEV_ZERO_PAGE); ++#endif ++ ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0); ++ PVR_UNREFERENCED_PARAMETER(ui32Temp); ++ } ++ else ++#else ++ { ++ /*Delete the Dummy page related info */ ++ ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter); ++ if (0 != ui32Temp) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Dummy page reference counter is non zero (%u)", ++ __func__, ++ ui32Temp)); ++ PVR_ASSERT(0); ++ } ++ } ++#endif ++ ++ /*Delete the Dummy page related info */ ++ ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDevZeroPage.atRefCounter); ++ if (0 != ui32Temp) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Zero page reference counter is non zero (%u)", ++ __func__, ++ ui32Temp)); ++ } ++ ++#if defined(PDUMP) ++ if (NULL != psDeviceNode->sDummyPage.hPdumpPg) ++ { ++ PDUMPCOMMENT(psDeviceNode, "Error dummy page handle is still active"); ++ } ++ ++ if (NULL != psDeviceNode->sDevZeroPage.hPdumpPg) ++ { ++ PDUMPCOMMENT(psDeviceNode, "Error Zero page handle is still active"); ++ } ++#endif ++ ++ /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */ ++ OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); ++ ++ /* Destroy the zero page lock */ ++ OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++ OSLockDestroy(psDevInfo->hCounterDumpingLock); ++#endif ++ ++ RGXDeInitMultiCoreInfo(psDeviceNode); ++ ++ /* Unregister debug request notifiers first as they could depend on anything. */ ++ ++ RGXDebugDeinit(psDevInfo); ++ ++ /* De-initialise in reverse order, so stage 2 init is undone first. */ ++ if (psDevInfo->bDevInit2Done) ++ { ++ DevPart2DeInitRGX(psDeviceNode); ++ } ++ ++ /* Unregister MMU related stuff */ ++ eError = RGXMMUInit_Unregister(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", ++ eError)); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ /* Unregister MMU related stuff */ ++ eError = RGXMipsMMUInit_Unregister(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", ++ eError)); ++ } ++ } ++ ++ /* UnMap Regs */ ++ if (psDevInfo->pvRegsBaseKM != NULL) ++ { ++#if !defined(NO_HARDWARE) ++ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, ++ psDevInfo->ui32RegSize); ++#endif /* !NO_HARDWARE */ ++ psDevInfo->pvRegsBaseKM = NULL; ++ } ++ ++#if 0 /* not required at this time */ ++ if (psDevInfo->hTimer) ++ { ++ eError = OSRemoveTimer(psDevInfo->hTimer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevDeInitRGX: Failed to remove timer")); ++ return eError; ++ } ++ psDevInfo->hTimer = NULL; ++ } ++#endif ++ ++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; ++ ++ RGXDeInitHeaps(psDevMemoryInfo); ++ ++ if (psDevInfo->psRGXFWCodeMemDesc) ++ { ++ /* Free fw code */ ++ PDUMPCOMMENT(psDeviceNode, "Freeing FW code memory"); ++ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc); ++ psDevInfo->psRGXFWCodeMemDesc = NULL; ++ } ++ else if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "No firmware code memory to free")); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && ++ (RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) == 32)) ++ { ++ if (psDevInfo->psTrampoline->sPages.u.pvHandle) ++ { ++ /* Free trampoline region */ ++ PDUMPCOMMENT(psDeviceNode, "Freeing trampoline memory"); ++ RGXFreeTrampoline(psDeviceNode); ++ } ++ } ++ ++ if (psDevInfo->psRGXFWDataMemDesc) ++ { ++ /* Free fw data */ ++ PDUMPCOMMENT(psDeviceNode, "Freeing FW data memory"); ++ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc); ++ psDevInfo->psRGXFWDataMemDesc = NULL; ++ } ++ else if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "No firmware data memory to free")); ++ } ++ ++ if (psDevInfo->psRGXFWCorememCodeMemDesc) ++ { ++ /* Free fw core mem code */ ++ PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem code memory"); ++ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWCorememCodeMemDesc); ++ psDevInfo->psRGXFWCorememCodeMemDesc = NULL; ++ } ++ ++ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc) ++ { ++ /* Free fw core mem data */ ++ PDUMPCOMMENT(psDeviceNode, "Freeing FW coremem data store memory"); ++ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc); ++ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL; ++ } ++ ++ /* ++ Free the firmware allocations. ++ */ ++ RGXFreeFirmware(psDevInfo); ++ ++ /* De-initialise non-device specific (TL) users of RGX device memory */ ++ RGXHWPerfDeinit(psDevInfo); ++ RGXHWPerfHostDeInit(psDevInfo); ++ eError = HTBDeInit(); ++ PVR_LOG_IF_ERROR(eError, "HTBDeInit"); ++ ++ RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode); ++ ++ /* destroy the stalled CCB locks */ ++ OSLockDestroy(psDevInfo->hCCBRecoveryLock); ++ OSLockDestroy(psDevInfo->hCCBStallCheckLock); ++ ++ /* destroy the context list locks */ ++ OSLockDestroy(psDevInfo->sRegCongfig.hLock); ++ OSLockDestroy(psDevInfo->hBPLock); ++ OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); ++ OSWRLockDestroy(psDevInfo->hRenderCtxListLock); ++ OSWRLockDestroy(psDevInfo->hComputeCtxListLock); ++ OSWRLockDestroy(psDevInfo->hTransferCtxListLock); ++ OSWRLockDestroy(psDevInfo->hTDMCtxListLock); ++ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); ++ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); ++ OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); ++ OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); ++ ++ /* Free device BVNC string */ ++ if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString) ++ { ++ OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString); ++ } ++ ++ /* DeAllocate devinfo */ ++ OSFreeMem(psDevInfo); ++ ++ psDeviceNode->pvDevice = NULL; ++ ++ return PVRSRV_OK; ++} ++ ++#if defined(PDUMP) ++static ++PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice); ++ ++ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE; ++ ++ return PVRSRV_OK; ++} ++#endif /* PDUMP */ ++ ++/* Takes a log2 page size parameter and calculates a suitable page size ++ * for the RGX heaps. Returns 0 if parameter is wrong.*/ ++static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize) ++{ ++ IMG_BOOL bFound = IMG_FALSE; ++ ++ /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT, ++ * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/ ++ if (uiLog2PageSize == 0U || ++ (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) || ++ (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Provided incompatible log2 page size %u", ++ __func__, ++ uiLog2PageSize)); ++ PVR_ASSERT(0); ++ return 0; ++ } ++ ++ do ++ { ++ switch (uiLog2PageSize) ++ { ++ case RGX_HEAP_4KB_PAGE_SHIFT: ++ case RGX_HEAP_16KB_PAGE_SHIFT: ++ case RGX_HEAP_64KB_PAGE_SHIFT: ++ case RGX_HEAP_256KB_PAGE_SHIFT: ++ case RGX_HEAP_1MB_PAGE_SHIFT: ++ case RGX_HEAP_2MB_PAGE_SHIFT: ++ /* All good, RGX page size equals given page size ++ * => use it as default for heaps */ ++ bFound = IMG_TRUE; ++ break; ++ default: ++ /* We have to fall back to a smaller device ++ * page size than given page size because there ++ * is no exact match for any supported size. */ ++ uiLog2PageSize -= 1U; ++ break; ++ } ++ } while (!bFound); ++ ++ return uiLog2PageSize; ++} ++ ++/* First 16-bits define possible types */ ++#define HEAP_INST_VALUE_MASK (0xFFFF) ++#define HEAP_INST_DEFAULT_VALUE (1U) /* Used to show either the heap is always instantiated by default (pfn = NULL) ++ OR ++ that this is the default configuration of the heap with an Alternative BRN */ ++#define HEAP_INST_BRN_DEP_VALUE (2U) /* The inclusion of this heap is dependent on the brn being present */ ++#define HEAP_INST_FEAT_DEP_VALUE (3U) /* The inclusion of this heap is dependent on the feature being present */ ++#define HEAP_INST_BRN_ALT_VALUE (4U) /* This entry is a possible alternative to the default determined by a BRN */ ++#define HEAP_INST_FEAT_ALT_VALUE (5U) /* The entry is a possible alternative to the default determined by a Feature define */ ++ ++/* Latter 16-bits define other flags we may need */ ++#define HEAP_INST_NON4K_FLAG (1 << 16U) /* This is a possible NON4K Entry and we should use the device ++ NON4K size when instantiating */ ++ ++typedef struct RGX_HEAP_INFO_TAG RGX_HEAP_INFO; // Forward declaration ++typedef IMG_BOOL (*PFN_IS_PRESENT)(PVRSRV_RGXDEV_INFO*, const RGX_HEAP_INFO*); ++ ++struct RGX_HEAP_INFO_TAG ++{ ++ IMG_CHAR *pszName; ++ IMG_UINT64 ui64HeapBase; ++ IMG_DEVMEM_SIZE_T uiHeapLength; ++ IMG_DEVMEM_SIZE_T uiHeapReservedRegionLength; ++ IMG_UINT32 ui32Log2ImportAlignment; ++ PFN_IS_PRESENT pfnIsHeapPresent; ++ IMG_UINT32 ui32HeapInstanceFlags; ++}; ++ ++/* Feature Present function prototypes */ ++ ++static IMG_BOOL BRN65273IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++#if defined(FIX_HW_BRN_65273_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) ++ { ++ return (((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_ALT_VALUE) || ++ ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_BRN_DEP_VALUE)) ? ++ IMG_TRUE : IMG_FALSE; ++ } ++ else ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++#endif ++ { ++ return ((pksHeapInfo->ui32HeapInstanceFlags & HEAP_INST_VALUE_MASK) == HEAP_INST_DEFAULT_VALUE) ? IMG_TRUE : IMG_FALSE; ++ } ++} ++ ++static IMG_BOOL BRN63142IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); ++ ++#if defined(FIX_HW_BRN_63142_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142)) ++ { ++ PVR_ASSERT((pksHeapInfo->ui64HeapBase & IMG_UINT64_C(0x3FFFFFFFF)) + ++ pksHeapInfo->uiHeapLength == IMG_UINT64_C(0x400000000)); ++ ++ return IMG_TRUE; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++#endif ++ ++ return IMG_FALSE; ++} ++ ++static IMG_BOOL FBCDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); ++ ++ if (RGX_GET_FEATURE_VALUE(psDevInfo, FBC_MAX_DEFAULT_DESCRIPTORS)) ++ { ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++static IMG_BOOL FBCLargeDescriptorIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); ++ ++ if (RGX_GET_FEATURE_VALUE(psDevInfo, FBC_MAX_LARGE_DESCRIPTORS)) ++ { ++ return IMG_TRUE; ++ } ++ ++ return IMG_FALSE; ++} ++ ++static IMG_BOOL TextureStateIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); ++#if defined(RGX_FEATURE_BINDLESS_IMAGE_AND_TEXTURE_STATE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, BINDLESS_IMAGE_AND_TEXTURE_STATE)) ++ { ++ return IMG_TRUE; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++#endif ++ return IMG_FALSE; ++} ++ ++static IMG_BOOL SignalSnoopingIsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++ PVR_UNREFERENCED_PARAMETER(pksHeapInfo); ++ ++#if defined(RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING)) ++ { ++ return IMG_TRUE; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevInfo); ++#endif ++ ++ return IMG_FALSE; ++} ++ ++static IMG_BOOL FWBRN65101IsPresent(PVRSRV_RGXDEV_INFO *psDevInfo, const RGX_HEAP_INFO *pksHeapInfo) ++{ ++ /* Used to determine the correct table row to instantiate as a heap by checking ++ * the Heap size and base at run time VS the current table instance ++ */ ++ IMG_UINT64 ui64MainSubHeapSize; ++ ++ /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST))) ++ { ++#if defined(FIX_HW_BRN_65101_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) ++ { ++ ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101; ++ } ++ else ++#endif ++ { ++ ui64MainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL; ++ } ++ } ++ else ++ { ++ ui64MainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE; ++ } ++ ++ /* Determine if we should include this entry based upon previous checks */ ++ return (pksHeapInfo->uiHeapLength == ui64MainSubHeapSize && ++ pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_MAIN_HEAP_BASE) ? ++ IMG_TRUE : IMG_FALSE; ++} ++ ++static IMG_BOOL FWVZConfigPresent(PVRSRV_RGXDEV_INFO* psDevInfo, const RGX_HEAP_INFO* pksHeapInfo) ++{ ++ /* Used to determine the correct table row to instantiate as a heap by checking ++ * the Heap base at run time VS the current table instance ++ */ ++ ++ /* Determine if we should include this entry based upon previous checks */ ++ return (pksHeapInfo->ui64HeapBase == RGX_FIRMWARE_CONFIG_HEAP_BASE) ? IMG_TRUE : IMG_FALSE; ++} ++ ++/* Blueprint array. note: not all heaps are available to clients*/ ++ ++static const RGX_HEAP_INFO gasRGXHeapLayoutApp[] = ++{ ++ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnPresent HeapInstanceFlags */ ++ {RGX_GENERAL_SVM_HEAP_IDENT, RGX_GENERAL_SVM_HEAP_BASE, RGX_GENERAL_SVM_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_HEAP_BASE, RGX_GENERAL_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_GENERAL_HEAP_IDENT, RGX_GENERAL_BRN_65273_HEAP_BASE, RGX_GENERAL_BRN_65273_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE }, ++ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_HEAP_BASE, RGX_GENERAL_NON4K_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE | HEAP_INST_NON4K_FLAG }, ++ {RGX_GENERAL_NON4K_HEAP_IDENT, RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE, RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE | HEAP_INST_NON4K_FLAG }, ++ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_HEAP_BASE, RGX_PDSCODEDATA_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_PDSCODEDATA_HEAP_IDENT, RGX_PDSCODEDATA_BRN_65273_HEAP_BASE, RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE }, ++ {RGX_RGNHDR_BRN_63142_HEAP_IDENT, RGX_RGNHDR_BRN_63142_HEAP_BASE, RGX_RGNHDR_BRN_63142_HEAP_SIZE, 0, 0, BRN63142IsPresent, HEAP_INST_BRN_DEP_VALUE }, ++ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_HEAP_BASE, RGX_USCCODE_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_USCCODE_HEAP_IDENT, RGX_USCCODE_BRN_65273_HEAP_BASE, RGX_USCCODE_BRN_65273_HEAP_SIZE, (1 * DEVMEM_HEAP_RESERVED_SIZE_GRANULARITY), 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE }, ++ {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_HEAP_BASE, RGX_TQ3DPARAMETERS_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_TQ3DPARAMETERS_HEAP_IDENT, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE, RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE }, ++ {RGX_VK_CAPT_REPLAY_HEAP_IDENT, RGX_VK_CAPT_REPLAY_HEAP_BASE, RGX_VK_CAPT_REPLAY_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_SIGNALS_HEAP_IDENT, RGX_SIGNALS_HEAP_BASE, RGX_SIGNALS_HEAP_SIZE, 0, 0, SignalSnoopingIsPresent, HEAP_INST_FEAT_DEP_VALUE}, ++ {RGX_FBCDC_HEAP_IDENT, RGX_FBCDC_HEAP_BASE, RGX_FBCDC_HEAP_SIZE, 0, 0, FBCDescriptorIsPresent, HEAP_INST_FEAT_DEP_VALUE}, ++ {RGX_FBCDC_LARGE_HEAP_IDENT, RGX_FBCDC_LARGE_HEAP_BASE, RGX_FBCDC_LARGE_HEAP_SIZE, 0, 0, FBCLargeDescriptorIsPresent, HEAP_INST_FEAT_DEP_VALUE}, ++ {RGX_CMP_MISSION_RMW_HEAP_IDENT, RGX_CMP_MISSION_RMW_HEAP_BASE, RGX_CMP_MISSION_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_CMP_SAFETY_RMW_HEAP_IDENT, RGX_CMP_SAFETY_RMW_HEAP_BASE, RGX_CMP_SAFETY_RMW_HEAP_SIZE, 0, 0, NULL, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_TEXTURE_STATE_HEAP_IDENT, RGX_TEXTURE_STATE_HEAP_BASE, RGX_TEXTURE_STATE_HEAP_SIZE, 0, 0, TextureStateIsPresent, HEAP_INST_FEAT_DEP_VALUE}, ++ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_HEAP_BASE, RGX_VISIBILITY_TEST_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_DEFAULT_VALUE }, ++ {RGX_VISIBILITY_TEST_HEAP_IDENT, RGX_VISIBILITY_TEST_BRN_65273_HEAP_BASE, RGX_VISIBILITY_TEST_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_ALT_VALUE }, ++ {RGX_MMU_INIA_BRN_65273_HEAP_IDENT, RGX_MMU_INIA_BRN_65273_HEAP_BASE, RGX_MMU_INIA_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_DEP_VALUE }, ++ {RGX_MMU_INIB_BRN_65273_HEAP_IDENT, RGX_MMU_INIB_BRN_65273_HEAP_BASE, RGX_MMU_INIB_BRN_65273_HEAP_SIZE, 0, 0, BRN65273IsPresent, HEAP_INST_BRN_DEP_VALUE } ++}; ++ ++static const RGX_HEAP_INFO gasRGXHeapLayoutFW[] = ++{ ++ /* Name HeapBase HeapLength HeapReservedRegionLength Log2ImportAlignment pfnIsHeapPresent HeapInstanceFlags*/ ++ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE, 0, 0, FWBRN65101IsPresent, HEAP_INST_DEFAULT_VALUE}, ++ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL, 0, 0, FWBRN65101IsPresent, HEAP_INST_DEFAULT_VALUE}, ++ {RGX_FIRMWARE_MAIN_HEAP_IDENT, RGX_FIRMWARE_MAIN_HEAP_BASE, RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101, 0, 0, FWBRN65101IsPresent, HEAP_INST_BRN_ALT_VALUE}, ++ {RGX_FIRMWARE_CONFIG_HEAP_IDENT, RGX_FIRMWARE_CONFIG_HEAP_BASE, RGX_FIRMWARE_CONFIG_HEAP_SIZE, 0, 0, FWVZConfigPresent, HEAP_INST_DEFAULT_VALUE}, ++}; ++ ++/* Generic counting method. */ ++static void _CountRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, ++ const RGX_HEAP_INFO pksHeapInfo[], ++ IMG_UINT32 ui32HeapListSize, ++ IMG_UINT32* ui32HeapCount) ++{ ++ IMG_UINT32 i; ++ ++ /* Loop over rows in the heap data array using callback to decide if we ++ * should include the heap ++ */ ++ for (i = 0; i < ui32HeapListSize; i++) ++ { ++ const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; ++ ++ if (psHeapInfo->pfnIsHeapPresent) ++ { ++ if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo)) ++ { ++ /* We don't need to create this heap */ ++ continue; ++ } ++ } ++ ++ (*ui32HeapCount)++; ++ } ++} ++/* Generic heap instantiator */ ++static void _InstantiateRequiredHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, ++ const RGX_HEAP_INFO pksHeapInfo[], ++ IMG_UINT32 ui32HeapListSize, ++ DEVMEM_HEAP_BLUEPRINT **psDeviceMemoryHeapCursor) ++{ ++ IMG_UINT32 i; ++ /* We now have a list of the heaps to include and so we should loop over this ++ * list and instantiate. ++ */ ++ for (i = 0; i < ui32HeapListSize; i++) ++ { ++ IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); ++ IMG_UINT32 ui32Log2DataPageSize = 0; ++ ++ const RGX_HEAP_INFO *psHeapInfo = &pksHeapInfo[i]; ++ ++ if (psHeapInfo->pfnIsHeapPresent) ++ { ++ if (!psHeapInfo->pfnIsHeapPresent(psDevInfo, psHeapInfo)) ++ { ++ /* We don't need to create this heap */ ++ continue; ++ } ++ } ++ ++ if (psHeapInfo->ui32HeapInstanceFlags & HEAP_INST_NON4K_FLAG) ++ { ++ ui32Log2DataPageSize = psDevInfo->ui32Log2Non4KPgSize; ++ } ++ else ++ { ++ ui32Log2DataPageSize = ui32Log2RgxDefaultPageShift; ++ } ++ ++ HeapCfgBlueprintInit(psHeapInfo->pszName, ++ psHeapInfo->ui64HeapBase, ++ psHeapInfo->uiHeapLength, ++ psHeapInfo->uiHeapReservedRegionLength, ++ ui32Log2DataPageSize, ++ psHeapInfo->ui32Log2ImportAlignment, ++ *psDeviceMemoryHeapCursor); ++ ++ (*psDeviceMemoryHeapCursor)++; ++ } ++} ++ ++static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DEVICE_MEMORY_INFO *psNewMemoryInfo) ++{ ++ PVRSRV_ERROR eError; ++ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor; ++ ++ IMG_UINT32 ui32HeapListSize = ARRAY_SIZE(gasRGXHeapLayoutApp); ++ IMG_UINT32 ui32FWHeapListSize = ARRAY_SIZE(gasRGXHeapLayoutFW); ++ IMG_UINT32 ui32CountedHeapSize; ++ ++ IMG_UINT32 ui32HeapCount = 0; ++ IMG_UINT32 ui32FWHeapCount = 0; ++ ++ /* Count heaps required for the app heaps */ ++ _CountRequiredHeaps(psDevInfo, ++ gasRGXHeapLayoutApp, ++ ui32HeapListSize, ++ &ui32HeapCount); ++ ++ /* Count heaps required for the FW heaps */ ++ _CountRequiredHeaps(psDevInfo, ++ gasRGXHeapLayoutFW, ++ ui32FWHeapListSize, ++ &ui32FWHeapCount); ++ ++ ui32CountedHeapSize = (ui32HeapCount + ui32FWHeapCount + RGX_NUM_OS_SUPPORTED); ++ ++ psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * ui32CountedHeapSize); ++ PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeap, eError, e0); ++ ++ /* Initialise the heaps */ ++ psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap; ++ ++ /* Instantiate App Heaps */ ++ _InstantiateRequiredHeaps(psDevInfo, ++ gasRGXHeapLayoutApp, ++ ui32HeapListSize, ++ &psDeviceMemoryHeapCursor); ++ ++ /* Instantiate FW Heaps */ ++ _InstantiateRequiredHeaps(psDevInfo, ++ gasRGXHeapLayoutFW, ++ ui32FWHeapListSize, ++ &psDeviceMemoryHeapCursor); ++ ++ /* set the heap count */ ++ psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap); ++ ++ /* Check we have allocated the correct # of heaps, minus any VZ heaps as these ++ * have not been created at this point ++ */ ++ PVR_ASSERT(psNewMemoryInfo->ui32HeapCount == (ui32CountedHeapSize - RGX_NUM_OS_SUPPORTED)); ++ ++ /* ++ In the new heap setup, we initialise 2 configurations: ++ 1 - One will be for the firmware only (index 1 in array) ++ a. This primarily has the firmware heap in it. ++ b. It also has additional guest OSID firmware heap(s) ++ - Only if the number of support firmware OSID > 1 ++ 2 - Others shall be for clients only (index 0 in array) ++ a. This has all the other client heaps in it. ++ */ ++ psNewMemoryInfo->uiNumHeapConfigs = 2; ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs); ++ PVR_LOG_GOTO_IF_NOMEM(psNewMemoryInfo->psDeviceMemoryHeapConfigArray, eError, e1); ++ ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration"; ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap; ++ ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration"; ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor - RGX_FIRMWARE_NUMBER_OF_FW_HEAPS; ++ ++#if (RGX_NUM_OS_SUPPORTED > 1) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++ IMG_UINT32 ui32OSid; ++ ++ /* Create additional raw firmware heaps */ ++ for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) ++ { ++ if (RGXInitFwRawHeap(psDeviceMemoryHeapCursor, ui32OSid) != PVRSRV_OK) ++ { ++ /* if any allocation fails, free previously allocated heaps and abandon initialisation */ ++ for (; ui32OSid > RGX_FIRST_RAW_HEAP_OSID; ui32OSid--) ++ { ++ RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); ++ psDeviceMemoryHeapCursor--; ++ } ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e1; ++ } ++ ++ /* Append additional firmware heaps to host driver firmware context heap configuration */ ++ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1; ++ ++ /* advance to the next heap */ ++ psDeviceMemoryHeapCursor++; ++ } ++ } ++#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ ++ ++ return PVRSRV_OK; ++e1: ++ OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap); ++e0: ++ return eError; ++} ++ ++static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo) ++{ ++#if (RGX_NUM_OS_SUPPORTED > 1) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++ IMG_UINT32 ui32OSid; ++ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap; ++ ++ /* Delete all guest firmware heaps */ ++ for (ui32OSid = RGX_FIRST_RAW_HEAP_OSID; ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) ++ { ++ RGXDeInitFwRawHeap(psDeviceMemoryHeapCursor); ++ psDeviceMemoryHeapCursor++; ++ } ++ } ++#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ ++ ++ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray); ++ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap); ++} ++ ++static PVRSRV_ERROR RGXPhysMemDeviceHeapsInit(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ PHYS_HEAP_CONFIG *psFwMainConfig = FindPhysHeapConfig(psDeviceNode->psDevConfig, ++ PHYS_HEAP_USAGE_FW_MAIN); ++ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED > 1) ++ /* VZ heap validation */ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ PVR_LOG_RETURN_IF_FALSE(psFwMainConfig != NULL, ++ "FW Main heap is required for VZ Guest.", ++ PVRSRV_ERROR_PHYSHEAP_CONFIG); ++ } ++#endif ++ ++ if (psFwMainConfig != NULL) ++ { ++ /* Check FW_MAIN for multiple usage flags. Because FW_MAIN is divided ++ into subheaps, shared usage with other heaps is not allowed. */ ++ PVR_LOG_RETURN_IF_FALSE(psFwMainConfig->ui32UsageFlags == PHYS_HEAP_USAGE_FW_MAIN, ++ "FW Main phys heap config specified with more than one usage. FW Main must be FW Main only.", ++ PVRSRV_ERROR_PHYSHEAP_CONFIG); ++ } ++ ++ if (psFwMainConfig == NULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap not set", __func__)); ++ } ++ else if (psFwMainConfig->eType == PHYS_HEAP_TYPE_UMA) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses OS System memory (UMA)", __func__)); ++ } ++ else /* PHYS_HEAP_TYPE_LMA or PHYS_HEAP_TYPE_DMA */ ++ { ++ IMG_UINT64 uFwMainSubHeapSize; ++ PHYS_HEAP_CONFIG sFwHeapConfig; ++ ++ /* MIPS Firmware must reserve some space in its Host/Native heap for GPU memory mappings */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && (!PVRSRV_VZ_MODE_IS(GUEST))) ++ { ++#if defined(FIX_HW_BRN_65101_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65101)) ++ { ++ uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_BRN65101; ++ } ++ else ++#endif ++ { ++ uFwMainSubHeapSize = RGX_FIRMWARE_HOST_MIPS_MAIN_HEAP_SIZE_NORMAL; ++ } ++ } ++ else ++ { ++ uFwMainSubHeapSize = RGX_FIRMWARE_DEFAULT_MAIN_HEAP_SIZE; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Firmware physical heap uses local memory managed by the driver (LMA)", __func__)); ++ ++ PVR_LOG_GOTO_IF_FALSE(psFwMainConfig->uiSize >= RGX_FIRMWARE_RAW_HEAP_SIZE, ++ "Invalid firmware physical heap size.", ErrorDeinit); ++ ++ /* Now we construct RAs to manage the FW heaps */ ++ ++#if defined(SUPPORT_AUTOVZ) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++ /* 1 Mb can hold the maximum amount of page tables for the memory shared between the firmware and all KM drivers: ++ * MAX(RAW_HEAP_SIZE) = 32 Mb; MAX(NUMBER_OS) = 8; Total shared memory = 256 Mb; ++ * MMU objects required: 65536 PTEs; 16 PDEs; 1 PCE; */ ++ IMG_UINT64 uMaxFwMmuPageTableSize = 1 * 1024 * 1024; ++ ++ sFwHeapConfig = *psFwMainConfig; ++ ++ /* By default the firmware MMU's page tables are allocated from the same carveout memory as the firmware heap. ++ * If a different base address is specified for this reserved range, use the overriding define instead. */ ++#if defined(PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR) ++ sFwHeapConfig.sStartAddr.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; ++ sFwHeapConfig.sCardBase.uiAddr = PVR_AUTOVZ_OVERRIDE_FW_MMU_CARVEOUT_BASE_ADDR; ++#else ++ sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED; ++ sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE * RGX_NUM_OS_SUPPORTED; ++#endif ++ ++ sFwHeapConfig.uiSize = uMaxFwMmuPageTableSize; ++ sFwHeapConfig.ui32UsageFlags = 0; ++ ++ eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw MMU subheap", ++ &psDeviceNode->psFwMMUReservedPhysHeap); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MMU", ErrorDeinit); ++ } ++#endif ++ ++ /* Subheap layout: Main + (optional MIPS reserved range) + Config */ ++ sFwHeapConfig = *psFwMainConfig; ++ sFwHeapConfig.uiSize = uFwMainSubHeapSize; ++ sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN; ++ ++ eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Main subheap", &psDeviceNode->psFWMainPhysHeap); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:MAIN", ErrorDeinit); ++ ++ sFwHeapConfig = *psFwMainConfig; ++ sFwHeapConfig.sStartAddr.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE; ++ sFwHeapConfig.sCardBase.uiAddr += RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE; ++ sFwHeapConfig.uiSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE; ++ sFwHeapConfig.ui32UsageFlags = PHYS_HEAP_USAGE_FW_CONFIG; ++ ++ eError = PhysmemCreateHeapLMA(psDeviceNode, &sFwHeapConfig, "Fw Cfg subheap", &psDeviceNode->psFWCfgPhysHeap); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysmemCreateHeapLMA:CFG", ErrorDeinit); ++ } ++ ++ /* Acquire FW heaps */ ++ eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_MAIN, psDeviceNode, ++ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_MAIN", ErrorDeinit); ++ ++ eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CONFIG, psDeviceNode, ++ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CONFIG]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CONFIG", ErrorDeinit); ++ ++ eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_CODE, psDeviceNode, ++ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_CODE]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_CODE", ErrorDeinit); ++ ++ eError = PhysHeapAcquireByDevPhysHeap(PVRSRV_PHYS_HEAP_FW_PRIV_DATA, psDeviceNode, ++ &psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_PRIV_DATA]); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PhysHeapAcquire:FW_DATA", ErrorDeinit); ++ ++ return eError; ++ ++ErrorDeinit: ++ PVR_ASSERT(IMG_FALSE); ++ PVRSRVPhysMemHeapsDeinit(psDeviceNode); ++ ++ return eError; ++} ++ ++static void _ReadNon4KHeapPageSize(IMG_UINT32 *pui32Log2Non4KPgSize) ++{ ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERALNON4KHEAPPAGESIZE; ++ IMG_UINT32 ui32GeneralNon4KHeapPageSize; ++ ++ /* Get the page size for the dummy page from the NON4K heap apphint */ ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, ++ GeneralNon4KHeapPageSize, &ui32AppHintDefault, ++ &ui32GeneralNon4KHeapPageSize); ++ *pui32Log2Non4KPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize); ++ OSFreeKMAppHintState(pvAppHintState); ++} ++ ++/* RGXRegisterDevice ++ * ++ * NOTE: No PDUMP statements are allowed in until Part 2 of the device initialisation ++ * is reached. ++ */ ++PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ DEVICE_MEMORY_INFO *psDevMemoryInfo; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32AppHintDefault = HWPERF_HOST_TL_STREAM_SIZE_DEFAULT, ui32HWPerfHostBufSizeKB; ++ ++ ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE; ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintUINT32(APPHINT_NO_DEVICE, pvAppHintState, HWPerfHostBufSizeInKB, ++ &ui32AppHintDefault, &ui32HWPerfHostBufSizeKB); ++ OSFreeKMAppHintState(pvAppHintState); ++ pvAppHintState = NULL; ++ ++ /********************* ++ * Device node setup * ++ *********************/ ++ /* Setup static data and callbacks on the device agnostic device node */ ++#if defined(PDUMP) ++ psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME; ++ psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); ++ psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump; ++#endif /* PDUMP */ ++ ++ OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK); ++ OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE); ++ ++ /* Configure MMU specific stuff */ ++ RGXMMUInit_Register(psDeviceNode); ++ ++ psDeviceNode->pfnDevSLCFlushRange = NULL; ++ psDeviceNode->pfnInvalFBSCTable = NULL; ++ ++ psDeviceNode->pfnValidateOrTweakPhysAddrs = NULL; ++ ++ psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate; ++ ++ psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick; ++ ++ psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck; ++ ++ /* Register callbacks for creation of device memory contexts */ ++ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext; ++ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext; ++ ++ /* Register callbacks for Unified Fence Objects */ ++ psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock; ++ psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock; ++ ++ /* Register callback for checking the device's health */ ++ psDeviceNode->pfnUpdateHealthStatus = PVRSRV_VZ_MODE_IS(GUEST) ? NULL : RGXUpdateHealthStatus; ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* Register callback for updating the virtualization watchdog */ ++ psDeviceNode->pfnUpdateAutoVzWatchdog = RGXUpdateAutoVzWatchdog; ++#endif ++ ++ /* Register method to service the FW HWPerf buffer */ ++ psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB; ++ ++ /* Register callback for getting the device version information string */ ++ psDeviceNode->pfnDeviceVersionString = RGXDevVersionString; ++ ++ /* Register callback for getting the device clock speed */ ++ psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed; ++ ++ /* Register callback for soft resetting some device modules */ ++ psDeviceNode->pfnSoftReset = RGXSoftReset; ++ ++ /* Register callback for resetting the HWR logs */ ++ psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs; ++ ++ /* Register callback for resetting the HWR logs */ ++ psDeviceNode->pfnVerifyBVNC = RGXVerifyBVNC; ++ ++ /* Register callback for checking alignment of UM structures */ ++ psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck; ++ ++ /*Register callback for checking the supported features and getting the ++ * corresponding values */ ++ psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported; ++ psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue; ++ ++ /* Callback for checking if system layer supports FBC 3.1 */ ++ psDeviceNode->pfnHasFBCDCVersion31 = RGXSystemHasFBCDCVersion31; ++ ++ /* Callback for getting the MMU device attributes */ ++ psDeviceNode->pfnGetMMUDeviceAttributes = RGXDevMMUAttributes; ++ ++ /* Register callback for initialising device-specific physical memory heaps */ ++ psDeviceNode->pfnPhysMemDeviceHeapsInit = RGXPhysMemDeviceHeapsInit; ++ ++ /* Set up required support for dummy page */ ++ OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0); ++ OSAtomicWrite(&(psDeviceNode->sDevZeroPage.atRefCounter), 0); ++ ++ /* Set the order to 0 */ ++ psDeviceNode->sDummyPage.sPageHandle.uiOrder = 0; ++ psDeviceNode->sDevZeroPage.sPageHandle.uiOrder = 0; ++ ++ /* Set the size of the Dummy page to zero */ ++ psDeviceNode->sDummyPage.ui32Log2PgSize = 0; ++ ++ /* Set the size of the Zero page to zero */ ++ psDeviceNode->sDevZeroPage.ui32Log2PgSize = 0; ++ ++ /* Set the Dummy page phys addr */ ++ psDeviceNode->sDummyPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; ++ ++ /* Set the Zero page phys addr */ ++ psDeviceNode->sDevZeroPage.ui64PgPhysAddr = MMU_BAD_PHYS_ADDR; ++ ++ /* The lock can be acquired from MISR (Z-buffer) path */ ++ eError = OSLockCreate(&psDeviceNode->sDummyPage.psPgLock); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__)); ++ return eError; ++ } ++ ++ /* Create the lock for zero page */ ++ eError = OSLockCreate(&psDeviceNode->sDevZeroPage.psPgLock); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create Zero page lock", __func__)); ++ goto free_dummy_page; ++ } ++#if defined(PDUMP) ++ psDeviceNode->sDummyPage.hPdumpPg = NULL; ++ psDeviceNode->sDevZeroPage.hPdumpPg = NULL; ++#endif ++ ++ /********************* ++ * Device info setup * ++ *********************/ ++ /* Allocate device control block */ ++ psDevInfo = OSAllocZMem(sizeof(*psDevInfo)); ++ if (psDevInfo == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevInitRGXPart1 : Failed to alloc memory for DevInfo")); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ /* Default psTrampoline to point to null struct */ ++ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline; ++ ++ /* create locks for the context lists stored in the DevInfo structure. ++ * these lists are modified on context create/destroy and read by the ++ * watchdog thread ++ */ ++ ++ eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock)); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__)); ++ goto e0; ++ } ++ ++ eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock)); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__)); ++ goto e1; ++ } ++ ++ eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock)); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__)); ++ goto e2; ++ } ++ ++ eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock)); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__)); ++ goto e3; ++ } ++ ++ eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock)); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__)); ++ goto e4; ++ } ++ ++ eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock)); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__)); ++ goto e5; ++ } ++ ++ eError = OSSpinLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__)); ++ goto e6; ++ } ++ dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead)); ++ ++ dllist_init(&(psDevInfo->sRenderCtxtListHead)); ++ dllist_init(&(psDevInfo->sComputeCtxtListHead)); ++ dllist_init(&(psDevInfo->sTransferCtxtListHead)); ++ dllist_init(&(psDevInfo->sTDMCtxtListHead)); ++ dllist_init(&(psDevInfo->sKickSyncCtxtListHead)); ++ ++ dllist_init(&(psDevInfo->sCommonCtxtListHead)); ++ psDevInfo->ui32CommonCtxtCurrentID = 1; ++ ++ ++ eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__)); ++ goto e7; ++ } ++ ++ eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__)); ++ goto e8; ++ } ++ ++ eError = OSLockCreate(&psDevInfo->hBPLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__)); ++ goto e9; ++ } ++ ++ eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__)); ++ goto e10; ++ } ++ ++ eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__)); ++ goto e11; ++ } ++ eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__)); ++ goto e12; ++ } ++ ++ dllist_init(&psDevInfo->sMemoryContextList); ++ ++ /* initialise ui32SLRHoldoffCounter */ ++ if (RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS > DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT) ++ { ++ psDevInfo->ui32SLRHoldoffCounter = RGX_INITIAL_SLR_HOLDOFF_PERIOD_MS / DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT; ++ } ++ else ++ { ++ psDevInfo->ui32SLRHoldoffCounter = 0; ++ } ++ ++ /* Setup static data and callbacks on the device specific device info */ ++ psDevInfo->psDeviceNode = psDeviceNode; ++ ++ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo; ++ psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap; ++ ++ /* ++ * Map RGX Registers ++ */ ++ psDevInfo->ui32RegSize = psDeviceNode->psDevConfig->ui32RegsSize; ++ psDevInfo->sRegsPhysBase = psDeviceNode->psDevConfig->sRegsCpuPBase; ++ ++#if !defined(NO_HARDWARE) ++ psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase, ++ psDeviceNode->psDevConfig->ui32RegsSize, ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED); ++ ++ if (psDevInfo->pvRegsBaseKM == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to create RGX register mapping", ++ __func__)); ++ eError = PVRSRV_ERROR_BAD_MAPPING; ++ goto e13; ++ } ++#endif ++ ++ psDeviceNode->pvDevice = psDevInfo; ++ ++ eError = RGXBvncInitialiseConfiguration(psDeviceNode); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Unsupported HW device detected by driver", ++ __func__)); ++ goto e14; ++ } ++ ++ _ReadNon4KHeapPageSize(&psDevInfo->ui32Log2Non4KPgSize); ++ ++ /*Set the zero & dummy page sizes as needed for the heap with largest page size */ ++ psDeviceNode->sDevZeroPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; ++ psDeviceNode->sDummyPage.ui32Log2PgSize = psDevInfo->ui32Log2Non4KPgSize; ++ ++ eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo); ++ if (eError != PVRSRV_OK) ++ { ++ goto e14; ++ } ++ ++ eError = RGXHWPerfInit(psDevInfo); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfInit", e14); ++ ++ eError = RGXHWPerfHostInit(psDeviceNode->pvDevice, ui32HWPerfHostBufSizeKB); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXHWPerfHostInit", ErrorDeInitHWPerfFw); ++ ++#if defined(SUPPORT_VALIDATION) ++ /* This completion will be signaled by the ISR when processing ++ * the answer CCB command carrying an RGX Register read value */ ++ init_completion(&psDevInfo->sFwRegs.sRegComp); ++ psDevInfo->sFwRegs.ui64RegVal = 0; ++ ++#if defined(SUPPORT_SOC_TIMER) ++ { ++ IMG_BOOL ui32AppHintDefault = IMG_FALSE; ++ IMG_BOOL bInitSocTimer; ++ void *pvAppHintState = NULL; ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, ValidateSOCUSCTimer, &ui32AppHintDefault, &bInitSocTimer); ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ if (bInitSocTimer) ++ { ++ eError = RGXInitSOCUSCTimer(psDeviceNode); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXInitSOCUSCTimer", ErrorDeInitHWPerfHost); ++ } ++ } ++#endif ++#endif ++ ++ /* Register callback for dumping debug info */ ++ eError = RGXDebugInit(psDevInfo); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXDebugInit", ErrorDeInitHWPerfHost); ++ ++ /* Register callback for fw mmu init */ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ psDeviceNode->pfnFwMMUInit = RGXMipsMMUInit_Register; ++ } ++ ++ /* The device shared-virtual-memory heap address-space size is stored here for faster ++ look-up without having to walk the device heap configuration structures during ++ client device connection (i.e. this size is relative to a zero-based offset) */ ++#if defined(FIX_HW_BRN_65273_BIT_MASK) ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273)) ++ { ++ psDeviceNode->ui64GeneralSVMHeapTopVA = 0; ++ }else ++#endif ++ { ++ psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE; ++ } ++ ++ if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit) ++ { ++ psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig, ++ psDevInfo->sDevFeatureCfg.ui64Features); ++ } ++ ++ psDeviceNode->bHasSystemDMA = psDeviceNode->psDevConfig->bHasDma; ++ ++ /* Initialise the device dependent bridges */ ++ eError = DeviceDepBridgeInit(psDevInfo); ++ PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit"); ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++ eError = OSLockCreate(&psDevInfo->hCounterDumpingLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__)); ++ goto ErrorDeInitDeviceDepBridge; ++ } ++#endif ++ ++ /* Initialise error counters */ ++ memset(&psDevInfo->sErrorCounts, 0, sizeof(PVRSRV_RGXDEV_ERROR_COUNTS)); ++ ++ return PVRSRV_OK; ++ ++#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) ++ErrorDeInitDeviceDepBridge: ++ DeviceDepBridgeDeInit(psDevInfo); ++#endif ++ ++ErrorDeInitHWPerfHost: ++ RGXHWPerfHostDeInit(psDevInfo); ++ ++ErrorDeInitHWPerfFw: ++ RGXHWPerfDeinit(psDevInfo); ++ ++e14: ++#if !defined(NO_HARDWARE) ++ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM, ++ psDevInfo->ui32RegSize); ++ ++e13: ++#endif /* !NO_HARDWARE */ ++ OSLockDestroy(psDevInfo->hCCBRecoveryLock); ++e12: ++ OSLockDestroy(psDevInfo->hCCBStallCheckLock); ++e11: ++ OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock); ++e10: ++ OSLockDestroy(psDevInfo->hBPLock); ++e9: ++ OSLockDestroy(psDevInfo->sRegCongfig.hLock); ++e8: ++ OSWRLockDestroy(psDevInfo->hCommonCtxtListLock); ++e7: ++ OSSpinLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList); ++e6: ++ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock); ++e5: ++ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock); ++e4: ++ OSWRLockDestroy(psDevInfo->hTDMCtxListLock); ++e3: ++ OSWRLockDestroy(psDevInfo->hTransferCtxListLock); ++e2: ++ OSWRLockDestroy(psDevInfo->hComputeCtxListLock); ++e1: ++ OSWRLockDestroy(psDevInfo->hRenderCtxListLock); ++e0: ++ OSFreeMem(psDevInfo); ++ ++ /* Destroy the zero page lock created above */ ++ OSLockDestroy(psDeviceNode->sDevZeroPage.psPgLock); ++ ++free_dummy_page: ++ /* Destroy the dummy page lock created above */ ++ OSLockDestroy(psDeviceNode->sDummyPage.psPgLock); ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_PCHAR psz = psDevInfo->sDevFeatureCfg.pszBVNCString; ++ if (NULL == psz) ++ { ++ IMG_CHAR pszBVNCInfo[RGX_HWPERF_MAX_BVNC_LEN]; ++ size_t uiBVNCStringSize; ++ size_t uiStringLength; ++ ++ uiStringLength = OSSNPrintf(pszBVNCInfo, RGX_HWPERF_MAX_BVNC_LEN, "%d.%d.%d.%d", ++ psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C); ++ PVR_ASSERT(uiStringLength < RGX_HWPERF_MAX_BVNC_LEN); ++ ++ uiBVNCStringSize = (uiStringLength + 1) * sizeof(IMG_CHAR); ++ psz = OSAllocMem(uiBVNCStringSize); ++ if (NULL != psz) ++ { ++ OSCachedMemCopy(psz, pszBVNCInfo, uiBVNCStringSize); ++ psDevInfo->sDevFeatureCfg.pszBVNCString = psz; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Allocating memory for BVNC Info string failed", ++ __func__)); ++ } ++ } ++ ++ return psz; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXDevVersionString ++@Description Gets the version string for the given device node and returns ++ a pointer to it in ppszVersionString. It is then the ++ responsibility of the caller to free this memory. ++@Input psDeviceNode Device node from which to obtain the ++ version string ++@Output ppszVersionString Contains the version string upon return ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_CHAR **ppszVersionString) ++{ ++#if defined(NO_HARDWARE) || defined(EMULATOR) ++ const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (SW)"; ++#else ++ const IMG_CHAR szFormatString[] = "GPU variant BVNC: %s (HW)"; ++#endif ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_PCHAR pszBVNC; ++ size_t uiStringLength; ++ ++ if (psDeviceNode == NULL || ppszVersionString == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ pszBVNC = RGXDevBVNCString(psDevInfo); ++ ++ if (NULL == pszBVNC) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ uiStringLength = OSStringLength(pszBVNC); ++ uiStringLength += (sizeof(szFormatString) - 2); /* sizeof includes the null, -2 for "%s" */ ++ *ppszVersionString = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); ++ if (*ppszVersionString == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ OSSNPrintf(*ppszVersionString, uiStringLength, szFormatString, ++ pszBVNC); ++ ++ return PVRSRV_OK; ++} ++ ++/**************************************************************************/ /*! ++@Function RGXDevClockSpeed ++@Description Gets the clock speed for the given device node and returns ++ it in pui32RGXClockSpeed. ++@Input psDeviceNode Device node ++@Output pui32RGXClockSpeed Variable for storing the clock speed ++@Return PVRSRV_ERROR ++ */ /***************************************************************************/ ++static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_PUINT32 pui32RGXClockSpeed) ++{ ++ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData; ++ ++ /* get clock speed */ ++ *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; ++ ++ return PVRSRV_OK; ++} ++ ++#if (RGX_NUM_OS_SUPPORTED > 1) ++/*! ++ ******************************************************************************* ++ ++ @Function RGXInitFwRawHeap ++ ++ @Description Called to perform additional initialisation ++ ******************************************************************************/ ++static PVRSRV_ERROR RGXInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap, IMG_UINT32 ui32OSid) ++{ ++ IMG_UINT32 uiStringLength; ++ IMG_UINT32 uiStringLengthMax = 32; ++ ++ IMG_UINT32 ui32Log2RgxDefaultPageShift = RGXHeapDerivePageSize(OSGetPageShift()); ++ ++ uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1); ++ ++ /* Start by allocating memory for this OSID heap identification string */ ++ psDevMemHeap->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR)); ++ if (psDevMemHeap->pszName == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ /* Append the OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */ ++ OSSNPrintf((IMG_CHAR *)psDevMemHeap->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSid); ++ ++ /* Use the common blueprint template support function to initialise the heap */ ++ HeapCfgBlueprintInit(psDevMemHeap->pszName, ++ RGX_FIRMWARE_RAW_HEAP_BASE + (ui32OSid * RGX_FIRMWARE_RAW_HEAP_SIZE), ++ RGX_FIRMWARE_RAW_HEAP_SIZE, ++ 0, ++ ui32Log2RgxDefaultPageShift, ++ 0, ++ psDevMemHeap); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDeInitFwRawHeap ++ ++ @Description Called to perform additional deinitialisation ++ ******************************************************************************/ ++static void RGXDeInitFwRawHeap(DEVMEM_HEAP_BLUEPRINT *psDevMemHeap) ++{ ++ IMG_UINT64 uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE; ++ IMG_UINT64 uiSpan = uiBase + ((RGX_NUM_OS_SUPPORTED - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE); ++ ++ /* Safe to do as the guest firmware heaps are last in the list */ ++ if (psDevMemHeap->sHeapBaseAddr.uiAddr >= uiBase && ++ psDevMemHeap->sHeapBaseAddr.uiAddr < uiSpan) ++ { ++ void *pszName = (void*)psDevMemHeap->pszName; ++ OSFreeMem(pszName); ++ } ++} ++#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ ++ ++/****************************************************************************** ++ End of file (rgxinit.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxinit.h b/drivers/gpu/drm/img-rogue/rgxinit.h +new file mode 100644 +index 000000000000..6cc8c8b1c256 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxinit.h +@@ -0,0 +1,281 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX initialisation header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX initialisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXINIT_H) ++#define RGXINIT_H ++ ++#include "connection_server.h" ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "rgx_bridge.h" ++#include "fwload.h" ++ ++#if defined(__linux__) ++#define OS_FW_VERIFY_FUNCTION OSVerifyFirmware ++#else ++#define OS_FW_VERIFY_FUNCTION NULL ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXInitDevPart2 ++ ++ @Description ++ ++ Second part of server-side RGX initialisation ++ ++ @Input psDeviceNode - device node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXInitDevPart2 (PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32DeviceFlags, ++ IMG_UINT32 ui32HWPerfHostFilter, ++ RGX_ACTIVEPM_CONF eActivePMConf); ++ ++PVRSRV_ERROR RGXInitAllocFWImgMem(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEVMEM_SIZE_T ui32FWCodeLen, ++ IMG_DEVMEM_SIZE_T ui32FWDataLen, ++ IMG_DEVMEM_SIZE_T uiFWCorememCodeLen, ++ IMG_DEVMEM_SIZE_T uiFWCorememDataLen); ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXInitFirmware ++ ++ @Description ++ ++ Server-side RGX firmware initialisation ++ ++ @Input psDeviceNode - device node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR ++RGXInitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_BOOL bEnableSignatureChecks, ++ IMG_UINT32 ui32SignatureChecksBufSize, ++ IMG_UINT32 ui32HWPerfFWBufSizeKB, ++ IMG_UINT64 ui64HWPerfFilter, ++ IMG_UINT32 ui32ConfigFlags, ++ IMG_UINT32 ui32LogType, ++ IMG_UINT32 ui32FilterFlags, ++ IMG_UINT32 ui32JonesDisableMask, ++ IMG_UINT32 ui32HWRDebugDumpLimit, ++ IMG_UINT32 ui32HWPerfCountersDataSize, ++ IMG_UINT32 *pui32TPUTrilinearFracMask, ++ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf, ++ FW_PERF_CONF eFirmwarePerf, ++ IMG_UINT32 ui32KCCBSizeLog2, ++ IMG_UINT32 ui32ConfigFlagsExt, ++ IMG_UINT32 ui32FwOsCfgFlags); ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXLoadAndGetFWData ++ ++ @Description ++ ++ Load FW and return pointer to FW data. ++ ++ @Input psDeviceNode - device node ++ ++ @Input ppsRGXFW - fw pointer ++ ++ @Output ppbFWData - pointer to FW data (NULL if an error occurred) ++ ++ @Return PVRSRV_ERROR - PVRSRV_OK on success ++ PVRSRV_ERROR_NOT_READY if filesystem is not ready ++ PVRSRV_ERROR_NOT_FOUND if no suitable FW image found ++ PVRSRV_ERROR_OUT_OF_MEMORY if unable to alloc memory for FW image ++ PVRSRV_ERROR_NOT_AUTHENTICATED if FW image failed verification ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXLoadAndGetFWData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ OS_FW_IMAGE **ppsRGXFW, ++ const IMG_BYTE **ppbFWData); ++ ++#if defined(PDUMP) ++/*! ++******************************************************************************* ++ ++ @Function RGXInitHWPerfCounters ++ ++ @Description ++ ++ Initialisation of the performance counters ++ ++ @Input psDeviceNode - device node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXInitHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode); ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXRegisterDevice ++ ++ @Description ++ ++ Registers the device with the system ++ ++ @Input: psDeviceNode - device node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDevBVNCString ++ ++ @Description ++ ++ Returns the Device BVNC string. It will allocate and fill it first, if necessary. ++ ++ @Input: psDevInfo - device info (must not be null) ++ ++ @Return IMG_PCHAR - pointer to BVNC string ++ ++******************************************************************************/ ++IMG_PCHAR RGXDevBVNCString(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++ ++ @Function DevDeInitRGX ++ ++ @Description ++ ++ Reset and deinitialise Chip ++ ++ @Input psDeviceNode - device info. structure ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++ ++#if !defined(NO_HARDWARE) ++ ++void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/*! ++******************************************************************************* ++ ++ @Function SORgxGpuUtilStatsRegister ++ ++ @Description SO Interface function called from the OS layer implementation. ++ Initialise data used to compute GPU utilisation statistics ++ for a particular user (identified by the handle passed as ++ argument). This function must be called only once for each ++ different user/handle. ++ ++ @Input phGpuUtilUser - Pointer to handle used to identify a user of ++ RGXGetGpuUtilStats ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser); ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function SORgxGpuUtilStatsUnregister ++ ++ @Description SO Interface function called from the OS layer implementation. ++ Free data previously used to compute GPU utilisation statistics ++ for a particular user (identified by the handle passed as ++ argument). ++ ++ @Input hGpuUtilUser - Handle used to identify a user of ++ RGXGetGpuUtilStats ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser); ++#endif /* !defined(NO_HARDWARE) */ ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXInitCreateFWKernelMemoryContext ++ ++ @Description Called to perform initialisation during firmware kernel context ++ creation. ++ ++ @Input psDeviceNode device node ++ ******************************************************************************/ ++PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++ ******************************************************************************* ++ ++ @Function RGXDeInitDestroyFWKernelMemoryContext ++ ++ @Description Called to perform deinitialisation during firmware kernel ++ context destruction. ++ ++ @Input psDeviceNode device node ++ ******************************************************************************/ ++void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* RGXINIT_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxkicksync.c b/drivers/gpu/drm/img-rogue/rgxkicksync.c +new file mode 100644 +index 000000000000..73f1b783f4d2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxkicksync.c +@@ -0,0 +1,794 @@ ++/*************************************************************************/ /*! ++@File rgxkicksync.c ++@Title Server side of the sync only kick API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "rgxkicksync.h" ++ ++#include "rgxdevice.h" ++#include "rgxmem.h" ++#include "rgxfwutils.h" ++#include "allocmem.h" ++#include "sync.h" ++#include "rgxhwperf.h" ++#include "ospvr_gputrace.h" ++ ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_internal.h" ++ ++/* Enable this to dump the compiled list of UFOs prior to kick call */ ++#define ENABLE_KICKSYNC_UFO_DUMP 0 ++ ++//#define KICKSYNC_CHECKPOINT_DEBUG 1 ++ ++#if defined(KICKSYNC_CHECKPOINT_DEBUG) ++#define CHKPT_DBG(X) PVR_DPF(X) ++#else ++#define CHKPT_DBG(X) ++#endif ++ ++struct _RGX_SERVER_KICKSYNC_CONTEXT_ ++{ ++ PVRSRV_DEVICE_NODE * psDeviceNode; ++ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; ++ DLLIST_NODE sListNode; ++ SYNC_ADDR_LIST sSyncAddrListFence; ++ SYNC_ADDR_LIST sSyncAddrListUpdate; ++ POS_LOCK hLock; ++}; ++ ++ ++PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32PackedCCBSizeU88, ++ IMG_UINT32 ui32ContextFlags, ++ RGX_SERVER_KICKSYNC_CONTEXT **ppsKickSyncContext) ++{ ++ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext; ++ RGX_COMMON_CONTEXT_INFO sInfo; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32CCBAllocSizeLog2, ui32CCBMaxAllocSizeLog2; ++ ++ memset(&sInfo, 0, sizeof(sInfo)); ++ ++ /* Prepare cleanup struct */ ++ * ppsKickSyncContext = NULL; ++ psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext)); ++ if (psKickSyncContext == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ eError = OSLockCreate(&psKickSyncContext->hLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto err_lockcreate; ++ } ++ ++ psKickSyncContext->psDeviceNode = psDeviceNode; ++ ++ ui32CCBAllocSizeLog2 = U32toU8_Unpack1(ui32PackedCCBSizeU88); ++ ui32CCBMaxAllocSizeLog2 = U32toU8_Unpack2(ui32PackedCCBSizeU88); ++ eError = FWCommonContextAllocate(psConnection, ++ psDeviceNode, ++ REQ_TYPE_KICKSYNC, ++ RGXFWIF_DM_GP, ++ hMemCtxPrivData, ++ NULL, ++ 0, ++ psFWMemContextMemDesc, ++ NULL, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_KICKSYNC_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_KICKSYNC_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ 0, /* priority */ ++ 0, /* max deadline MS */ ++ 0, /* robustness address */ ++ & sInfo, ++ & psKickSyncContext->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_contextalloc; ++ } ++ ++ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); ++ ++ SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence); ++ SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate); ++ ++ * ppsKickSyncContext = psKickSyncContext; ++ return PVRSRV_OK; ++ ++fail_contextalloc: ++ OSLockDestroy(psKickSyncContext->hLock); ++err_lockcreate: ++ OSFreeMem(psKickSyncContext); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext) ++{ ++ PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode, ++ psKickSyncContext->psServerCommonContext, ++ RGXFWIF_DM_GP, ++ PDUMP_FLAGS_NONE); ++ ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ /* ... it has so we can free its resources */ ++ ++ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock); ++ dllist_remove_node(&(psKickSyncContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock); ++ ++ FWCommonContextFree(psKickSyncContext->psServerCommonContext); ++ ++ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence); ++ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate); ++ ++ OSLockDestroy(psKickSyncContext->hLock); ++ ++ OSFreeMem(psKickSyncContext); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ switch (eContextProperty) ++ { ++ case RGX_CONTEXT_PROPERTY_FLAGS: ++ { ++ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; ++ ++ OSLockAcquire(psKickSyncContext->hLock); ++ eError = FWCommonContextSetFlags(psKickSyncContext->psServerCommonContext, ++ ui32ContextFlags); ++ ++ OSLockRelease(psKickSyncContext->hLock); ++ break; ++ } ++ ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ } ++ ++ return eError; ++} ++ ++void DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); ++ dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); ++ ++ if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) ++ { ++ DumpFWCommonContextInfo(psCurrentServerKickSyncCtx->psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ } ++ OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); ++} ++ ++IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ IMG_UINT32 ui32ContextBitMask = 0; ++ ++ OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode); ++ ++ if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext) ++ { ++ if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED) ++ { ++ ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP; ++ } ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock); ++ return ui32ContextBitMask; ++} ++ ++PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext, ++ IMG_UINT32 ui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 * paui32ClientUpdateOffset, ++ IMG_UINT32 * paui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE * piUpdateFence, ++ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32ExtJobRef) ++{ ++ RGXFWIF_KCCB_CMD sKickSyncKCCBCmd; ++ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1]; ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eError2; ++ IMG_BOOL bCCBStateOpen = IMG_FALSE; ++ PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress = NULL; ++ IMG_UINT32 ui32ClientFenceCount = 0; ++ IMG_UINT32 *paui32ClientFenceValue = NULL; ++ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; ++ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr; ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psKickSyncContext->psServerCommonContext); ++ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext); ++ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); ++ IMG_UINT64 uiCheckFenceUID = 0; ++ IMG_UINT64 uiUpdateFenceUID = 0; ++ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; ++ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; ++ IMG_UINT32 ui32FenceTimelineUpdateValue = 0; ++ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; ++ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; ++ void *pvUpdateFenceFinaliseData = NULL; ++ ++ /* Ensure we haven't been given a null ptr to ++ * update values if we have been told we ++ * have dev var updates ++ */ ++ if (ui32ClientUpdateCount > 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, ++ "paui32ClientUpdateValue NULL but ui32ClientUpdateCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ OSLockAcquire(psKickSyncContext->hLock); ++ eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate, ++ ui32ClientUpdateCount, ++ pauiClientUpdateUFODevVarBlock, ++ paui32ClientUpdateOffset); ++ ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_syncaddrlist; ++ } ++ ++ if (ui32ClientUpdateCount > 0) ++ { ++ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ /* Ensure the string is null-terminated (Required for safety) */ ++ szUpdateFenceName[31] = '\0'; ++ ++ /* This will never be true if called from the bridge since piUpdateFence will always be valid */ ++ if (iUpdateTimeline >= 0 && !piUpdateFence) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto out_unlock; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), " ++ "psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", ++ __func__, iCheckFence, ++ (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext)); ++ /* Resolve the sync checkpoints that make up the input fence */ ++ eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext, ++ iCheckFence, ++ &ui32FenceSyncCheckpointCount, ++ &apsFenceSyncCheckpoints, ++ &uiCheckFenceUID, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_resolve_fence; ++ } ++ ++ /* Create the output fence (if required) */ ++ if (iUpdateTimeline != PVRSRV_NO_TIMELINE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", ++ __func__, iUpdateTimeline)); ++ eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode, ++ szUpdateFenceName, ++ iUpdateTimeline, ++ psKickSyncContext->psDeviceNode->hSyncCheckpointContext, ++ &iUpdateFence, ++ &uiUpdateFenceUID, ++ &pvUpdateFenceFinaliseData, ++ &psUpdateSyncCheckpoint, ++ (void*)&psFenceTimelineUpdateSync, ++ &ui32FenceTimelineUpdateValue, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_OK) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", ++ __func__, eError)); ++ goto fail_create_output_fence; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: ...returned from SyncCheckpointCreateFence " ++ "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " ++ "ui32FenceTimelineUpdateValue=%u)", ++ __func__, iUpdateFence, psFenceTimelineUpdateSync, ++ ui32FenceTimelineUpdateValue)); ++ ++ /* Append the sync prim update for the timeline (if required) */ ++ if (psFenceTimelineUpdateSync) ++ { ++ IMG_UINT32 *pui32TimelineUpdateWp = NULL; ++ ++ /* Allocate memory to hold the list of update values (including our timeline update) */ ++ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1)); ++ if (!pui32IntAllocatedUpdateValues) ++ { ++ /* Failed to allocate memory */ ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc_update_values_mem; ++ } ++ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1)); ++ /* Copy the update values into the new memory, then append our timeline update value */ ++ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount); ++ /* Now set the additional update value */ ++ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount; ++ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; ++ ui32ClientUpdateCount++; ++ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */ ++ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; ++#if defined(KICKSYNC_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", ++ __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Now append the timeline sync prim addr to the kicksync context update list */ ++ SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate, ++ psFenceTimelineUpdateSync); ++ } ++ } ++ ++ /* Reset number of fence syncs in kicksync context fence list to 0 */ ++ SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence, ++ 0, NULL, NULL); ++ ++ if (ui32FenceSyncCheckpointCount > 0) ++ { ++ /* Append the checks (from input fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append %d sync checkpoints to KickSync Fence " ++ "(&psKickSyncContext->sSyncAddrListFence=<%p>)...", ++ __func__, ui32FenceSyncCheckpointCount, ++ (void*)&psKickSyncContext->sSyncAddrListFence)); ++ SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence, ++ ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ if (!pauiClientFenceUFOAddress) ++ { ++ pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs; ++ } ++ ui32ClientFenceCount += ui32FenceSyncCheckpointCount; ++#if defined(KICKSYNC_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress; ++ ++ for (iii=0; iii) = 0x%x", ++ __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ ++ if (psUpdateSyncCheckpoint) ++ { ++ PVRSRV_ERROR eErr; ++ ++ /* Append the update (from output fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append 1 sync checkpoint to KickSync Update " ++ "(&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", ++ __func__, (void*)&psKickSyncContext->sSyncAddrListUpdate)); ++ eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate, ++ 1, ++ &psUpdateSyncCheckpoint); ++ if (eErr != PVRSRV_OK) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: ...done. SyncAddrListAppendCheckpoints() returned error (%d)", ++ __func__, eErr)); ++ } ++ else ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done.", __func__)); ++ } ++ if (!pauiClientUpdateUFOAddress) ++ { ++ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ ui32ClientUpdateCount++; ++#if defined(KICKSYNC_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress; ++ ++ for (iii=0; iii) = 0x%x", ++ __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ ++#if (ENABLE_KICKSYNC_UFO_DUMP == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", ++ __func__)); ++ { ++ IMG_UINT32 ii; ++ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress; ++ IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue; ++ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress; ++ IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue; ++ ++ /* Dump Fence syncs and Update syncs */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Prepared %d KickSync fence syncs " ++ "(&psKickSyncContext->sSyncAddrListFence=<%p>, " ++ "pauiClientFenceUFOAddress=<%p>):", ++ __func__, ui32ClientFenceCount, ++ (void*)&psKickSyncContext->sSyncAddrListFence, ++ (void*)pauiClientFenceUFOAddress)); ++ for (ii=0; iiui32Addr & 0x1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, " ++ "CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", ++ __func__, ii + 1, ui32ClientFenceCount, ++ (void*)psTmpIntFenceUFOAddress, ++ psTmpIntFenceUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", ++ __func__, ii + 1, ui32ClientFenceCount, ++ (void*)psTmpIntFenceUFOAddress, ++ psTmpIntFenceUFOAddress->ui32Addr, ++ *pui32TmpIntFenceValue, ++ *pui32TmpIntFenceValue)); ++ pui32TmpIntFenceValue++; ++ } ++ psTmpIntFenceUFOAddress++; ++ } ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Prepared %d KickSync update syncs " ++ "(&psKickSyncContext->sSyncAddrListUpdate=<%p>, " ++ "pauiClientUpdateUFOAddress=<%p>):", ++ __func__, ui32ClientUpdateCount, ++ (void*)&psKickSyncContext->sSyncAddrListUpdate, ++ (void*)pauiClientUpdateUFOAddress)); ++ for (ii=0; ii", ++ __func__, __LINE__, ++ (void*)psTmpIntUpdateUFOAddress)); ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Line %d, pui32TmpIntUpdateValue=<%p>", ++ __func__, __LINE__, ++ (void*)pui32TmpIntUpdateValue)); ++ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, " ++ "UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", ++ __func__, ii + 1, ui32ClientUpdateCount, ++ (void*)psTmpIntUpdateUFOAddress, ++ psTmpIntUpdateUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", ++ __func__, ii + 1, ui32ClientUpdateCount, ++ (void*)psTmpIntUpdateUFOAddress, ++ psTmpIntUpdateUFOAddress->ui32Addr, ++ *pui32TmpIntUpdateValue)); ++ pui32TmpIntUpdateValue++; ++ } ++ psTmpIntUpdateUFOAddress++; ++ } ++ } ++#endif ++ ++ RGXCmdHelperInitCmdCCB(psDevInfo, ++ psClientCCB, ++ 0, /* empty ui64FBSCEntryMask */ ++ ui32ClientFenceCount, ++ pauiClientFenceUFOAddress, ++ paui32ClientFenceValue, ++ ui32ClientUpdateCount, ++ pauiClientUpdateUFOAddress, ++ paui32ClientUpdateValue, ++ 0, ++ NULL, ++ NULL, ++ NULL, ++ NULL, ++ RGXFWIF_CCB_CMD_TYPE_NULL, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ PDUMP_FLAGS_NONE, ++ NULL, ++ "KickSync", ++ bCCBStateOpen, ++ asCmdHelperData); ++ ++ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_cmdaquire; ++ } ++ ++ /* ++ * We should reserve space in the kernel CCB here and fill in the command ++ * directly. ++ * This is so if there isn't space in the kernel CCB we can return with ++ * retry back to services client before we take any operations ++ */ ++ ++ /* ++ * We might only be kicking for flush out a padding packet so only submit ++ * the command if the create was successful ++ */ ++ if (eError == PVRSRV_OK) ++ { ++ /* ++ * All the required resources are ready at this point, we can't fail so ++ * take the required server sync operations and commit all the resources ++ */ ++ RGXCmdHelperReleaseCmdCCB(1, ++ asCmdHelperData, ++ "KickSync", ++ FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr); ++ } ++ ++ /* Construct the kernel kicksync CCB command. */ ++ sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext); ++ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); ++ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); ++ ++ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; ++ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++ ++ /* ++ * Submit the kicksync command to the firmware. ++ */ ++ RGXSRV_HWPERF_ENQ(psKickSyncContext, ++ OSGetCurrentClientProcessIDKM(), ++ ui32FWCtx, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_SYNC, ++ iCheckFence, ++ iUpdateFence, ++ iUpdateTimeline, ++ uiCheckFenceUID, ++ uiUpdateFenceUID, ++ NO_DEADLINE, ++ NO_CYCEST); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ & sKickSyncKCCBCmd, ++ PDUMP_FLAGS_NONE); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ PVRGpuTraceEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice, ++ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_SYNC); ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)", ++ eError)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = eError2; ++ } ++ } ++ ++ /* ++ * Now check eError (which may have returned an error from our earlier call ++ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first ++ * so we check it now... ++ */ ++ if (eError != PVRSRV_OK ) ++ { ++ goto fail_cmdaquire; ++ } ++ ++#if defined(NO_HARDWARE) ++ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ ++ if (psUpdateSyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", ++ __func__, (void*)psUpdateSyncCheckpoint, ++ SyncCheckpointGetId(psUpdateSyncCheckpoint), ++ SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint))); ++ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); ++ } ++ if (psFenceTimelineUpdateSync) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Updating NOHW sync prim<%p> to %d", ++ __func__, (void*)psFenceTimelineUpdateSync, ++ ui32FenceTimelineUpdateValue)); ++ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); ++ } ++ SyncCheckpointNoHWUpdateTimelines(NULL); ++#endif ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++ /* Free memory allocated to hold the internal list of update values */ ++ if (pui32IntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui32IntAllocatedUpdateValues); ++ pui32IntAllocatedUpdateValues = NULL; ++ } ++ ++ *piUpdateFence = iUpdateFence; ++ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psKickSyncContext->psDeviceNode, iUpdateFence, ++ pvUpdateFenceFinaliseData, ++ psUpdateSyncCheckpoint, szUpdateFenceName); ++ } ++ ++ OSLockRelease(psKickSyncContext->hLock); ++ return PVRSRV_OK; ++ ++fail_cmdaquire: ++ SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence); ++ SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate); ++ if (iUpdateFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); ++ } ++ ++ /* Free memory allocated to hold update values */ ++ if (pui32IntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui32IntAllocatedUpdateValues); ++ } ++fail_alloc_update_values_mem: ++fail_create_output_fence: ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ /* Free memory allocated to hold the resolved fence's checkpoints */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++fail_resolve_fence: ++fail_syncaddrlist: ++out_unlock: ++ OSLockRelease(psKickSyncContext->hLock); ++ return eError; ++} ++ ++/**************************************************************************//** ++ End of file (rgxkicksync.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxkicksync.h b/drivers/gpu/drm/img-rogue/rgxkicksync.h +new file mode 100644 +index 000000000000..57b49a03da5c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxkicksync.h +@@ -0,0 +1,128 @@ ++/*************************************************************************/ /*! ++@File rgxkicksync.h ++@Title Server side of the sync only kick API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXKICKSYNC_H) ++#define RGXKICKSYNC_H ++ ++#include "pvrsrv_error.h" ++#include "connection_server.h" ++#include "sync_server.h" ++#include "rgxdevice.h" ++ ++ ++typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT; ++ ++/**************************************************************************/ /*! ++@Function DumpKickSyncCtxtsInfo ++@Description Function that dumps debug info of kick sync ctxs on this device ++@Return none ++*/ /**************************************************************************/ ++void ++DumpKickSyncCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel); ++ ++/**************************************************************************/ /*! ++@Function CheckForStalledClientKickSyncCtxt ++@Description Function that checks if a kick sync client is stalled ++@Return RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0 ++*/ /**************************************************************************/ ++IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVRGXCreateKickSyncContextKM ++@Description Server-side implementation of RGXCreateKicksyncContext ++@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32PackedCCBSizeU88, ++ IMG_UINT32 ui32ContextFlags, ++ RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext); ++ ++ ++ ++/**************************************************************************/ /*! ++@Function PVRSRVRGXDestroyKickSyncContextKM ++@Description Server-side implementation of RGXDestroyKicksyncContext ++@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVRGXSetKickSyncContextPropertyKM ++@Description Server-side implementation of RGXSetKickSyncContextProperty ++@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code ++ */ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXSetKickSyncContextPropertyKM(RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVRGXKickSyncKM ++@Description Kicks a sync only command ++@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext, ++ IMG_UINT32 ui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 * paui32ClientUpdateDevVarOffset, ++ IMG_UINT32 * paui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE * piUpdateFence, ++ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ ++ IMG_UINT32 ui32ExtJobRef); ++ ++#endif /* RGXKICKSYNC_H */ ++ ++/**************************************************************************//** ++ End of file (rgxkicksync.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxlayer.h b/drivers/gpu/drm/img-rogue/rgxlayer.h +new file mode 100644 +index 000000000000..431a7b6896a6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxlayer.h +@@ -0,0 +1,812 @@ ++/*************************************************************************/ /*! ++@File ++@Title Header for Services abstraction layer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Declaration of an interface layer used to abstract code that ++ can be compiled outside of the DDK, potentially in a ++ completely different OS. ++ All the headers included by this file must also be copied to ++ the alternative source tree. ++ All the functions declared here must have a DDK implementation ++ inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and ++ another different implementation in case they are used outside ++ of the DDK. ++ All of the functions accept as a first parameter a ++ "const void *hPrivate" argument. It should be used to pass ++ around any implementation specific data required. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXLAYER_H) ++#define RGXLAYER_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "img_elf.h" ++#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */ ++#include "pvrsrv_firmware_boot.h" ++#include "rgx_bvnc_defs_km.h" ++#include "rgx_fw_info.h" ++#include "rgx_fwif_shared.h" /* includes rgx_common.h and mem_types.h */ ++#include "rgx_meta.h" ++#include "rgx_mips.h" ++#include "rgx_riscv.h" ++ ++#include "rgxdefs_km.h" ++/* includes: ++ * rgx_cr_defs_km.h, ++ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h), ++ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h) ++ */ ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXMemCopy ++ ++ @Description MemCopy implementation ++ ++ @Input hPrivate : Implementation specific data ++ @Input pvDst : Pointer to the destination ++ @Input pvSrc : Pointer to the source location ++ @Input uiSize : The amount of memory to copy in bytes ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXMemCopy(const void *hPrivate, ++ void *pvDst, ++ void *pvSrc, ++ size_t uiSize); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXMemSet ++ ++ @Description MemSet implementation ++ ++ @Input hPrivate : Implementation specific data ++ @Input pvDst : Pointer to the start of the memory region ++ @Input ui8Value : The value to be written ++ @Input uiSize : The number of bytes to be set to ui8Value ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXMemSet(const void *hPrivate, ++ void *pvDst, ++ IMG_UINT8 ui8Value, ++ size_t uiSize); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXCommentLog ++ ++ @Description Generic log function used for debugging or other purposes ++ ++ @Input hPrivate : Implementation specific data ++ @Input pszString : Message to be printed ++ @Input ... : Variadic arguments ++ ++ @Return void ++ ++******************************************************************************/ ++__printf(2, 3) ++void RGXCommentLog(const void *hPrivate, ++ const IMG_CHAR *pszString, ++ ...); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXErrorLog ++ ++ @Description Generic error log function used for debugging or other purposes ++ ++ @Input hPrivate : Implementation specific data ++ @Input pszString : Message to be printed ++ @Input ... : Variadic arguments ++ ++ @Return void ++ ++******************************************************************************/ ++__printf(2, 3) ++void RGXErrorLog(const void *hPrivate, ++ const IMG_CHAR *pszString, ++ ...); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetOSPageSize ++ ++ @Description Return Page size used on OS ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return IMG_UINT32 ++ ++******************************************************************************/ ++ ++IMG_UINT32 RGXGetOSPageSize(const void *hPrivate); ++ ++/* This is used to get the value of a specific feature from hprivate. ++ * Should be used instead of calling RGXDeviceHasFeature. */ ++#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \ ++ RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK) ++ ++/* This is used to check if a specific feature with value is enabled. ++ * Should be used instead of calling RGXDeviceGetFeatureValue. */ ++#define RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, Feature) \ ++ (RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) >= 0) ++ ++/* This is used to get the value of a specific feature from hPrivate. ++ * Should be used instead of calling RGXDeviceGetFeatureValue. */ ++#define RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, Feature) \ ++ RGXDeviceGetFeatureValue(hPrivate, RGX_FEATURE_##Feature##_IDX) ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDeviceGetFeatureValue ++ ++ @Description Checks if a device has a particular feature with values ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui64Feature : Feature with values to check ++ ++ @Return Value >= 0 if the given feature is available, -1 otherwise ++ ++******************************************************************************/ ++IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDeviceHasFeature ++ ++ @Description Checks if a device has a particular feature ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui64Feature : Feature to check ++ ++ @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise ++ ++******************************************************************************/ ++IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetFWCorememSize ++ ++ @Description Get the FW coremem size ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return FW coremem size ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXWriteReg32/64 ++ ++ @Description Write a value to a 32/64 bit RGX register ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32RegAddr : Register offset inside the register bank ++ @Input ui32/64RegValue : New register value ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXWriteReg32(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32RegValue); ++ ++void RGXWriteReg64(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64RegValue); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXReadReg32/64 ++ ++ @Description Read a 32/64 bit RGX register ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32RegAddr : Register offset inside the register bank ++ ++ @Return Register value ++ ++******************************************************************************/ ++IMG_UINT32 RGXReadReg32(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr); ++ ++IMG_UINT64 RGXReadReg64(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXReadModifyWriteReg32 ++ ++ @Description Read-modify-write a 32 bit RGX register ++ ++ @Input hPrivate : Implementation specific data. ++ @Input ui32RegAddr : Register offset inside the register bank. ++ @Input ui32RegValue : New register value. ++ @Input ui32RegMask : Keep the bits set in the mask. ++ ++ @Return Always returns PVRSRV_OK ++ ++******************************************************************************/ ++IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64RegValue, ++ IMG_UINT64 ui64RegKeepMask); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXPollReg32/64 ++ ++ @Description Poll on a 32/64 bit RGX register until some bits are set/unset ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32RegAddr : Register offset inside the register bank ++ @Input ui32/64RegValue : Value expected from the register ++ @Input ui32/64RegMask : Only the bits set in this mask will be ++ checked against uiRegValue ++ ++ @Return PVRSRV_OK if the poll succeeds, ++ PVRSRV_ERROR_TIMEOUT if the poll takes too long ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXPollReg32(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32RegValue, ++ IMG_UINT32 ui32RegMask); ++ ++PVRSRV_ERROR RGXPollReg64(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64RegValue, ++ IMG_UINT64 ui64RegMask); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXWaitCycles ++ ++ @Description Wait for a number of GPU cycles and/or microseconds ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32Cycles : Number of GPU cycles to wait for in pdumps, ++ it can also be used when running driver-live ++ if desired (ignoring the next parameter) ++ @Input ui32WaitUs : Number of microseconds to wait for when running ++ driver-live ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXWaitCycles(const void *hPrivate, ++ IMG_UINT32 ui32Cycles, ++ IMG_UINT32 ui32WaitUs); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireKernelMMUPC ++ ++ @Description Acquire the Kernel MMU Page Catalogue device physical address ++ ++ @Input hPrivate : Implementation specific data ++ @Input psPCAddr : Returned page catalog address ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXWriteKernelMMUPC32/64 ++ ++ @Description Write the Kernel MMU Page Catalogue to the 32/64 bit ++ RGX register passed as argument. ++ In a driver-live scenario without PDump these functions ++ are the same as RGXWriteReg32/64 and they don't need ++ to be reimplemented. ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32PCReg : Register offset inside the register bank ++ @Input ui32AlignShift : PC register alignshift ++ @Input ui32Shift : PC register shift ++ @Input ui32/64PCVal : Page catalog value (aligned and shifted) ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(PDUMP) ++void RGXWriteKernelMMUPC64(const void *hPrivate, ++ IMG_UINT32 ui32PCReg, ++ IMG_UINT32 ui32PCRegAlignShift, ++ IMG_UINT32 ui32PCRegShift, ++ IMG_UINT64 ui64PCVal); ++ ++void RGXWriteKernelMMUPC32(const void *hPrivate, ++ IMG_UINT32 ui32PCReg, ++ IMG_UINT32 ui32PCRegAlignShift, ++ IMG_UINT32 ui32PCRegShift, ++ IMG_UINT32 ui32PCVal); ++#else /* defined(PDUMP) */ ++ ++#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \ ++ RGXWriteReg64(priv, pcreg, pcval) ++ ++#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \ ++ RGXWriteReg32(priv, pcreg, pcval) ++ ++#endif /* defined(PDUMP) */ ++ ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireGPURegsAddr ++ ++ @Description Acquire the GPU registers base device physical address ++ ++ @Input hPrivate : Implementation specific data ++ @Input psGPURegsAddr : Returned GPU registers base address ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXMIPSWrapperConfig ++ ++ @Description Write GPU register bank transaction ID and MIPS boot mode ++ to the MIPS wrapper config register (passed as argument). ++ In a driver-live scenario without PDump this is the same as ++ RGXWriteReg64 and it doesn't need to be reimplemented. ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32RegAddr : Register offset inside the register bank ++ @Input ui64GPURegsAddr : GPU registers base address ++ @Input ui32GPURegsAlign : Register bank transactions alignment ++ @Input ui32BootMode : Mips BOOT ISA mode ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(PDUMP) ++void RGXMIPSWrapperConfig(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64GPURegsAddr, ++ IMG_UINT32 ui32GPURegsAlign, ++ IMG_UINT32 ui32BootMode); ++#else ++#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \ ++ RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode)) ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireBootRemapAddr ++ ++ @Description Acquire the device physical address of the MIPS bootloader ++ accessed through remap region ++ ++ @Input hPrivate : Implementation specific data ++ @Output psBootRemapAddr : Base address of the remapped bootloader ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXBootRemapConfig ++ ++ @Description Configure the bootloader remap registers passed as arguments. ++ In a driver-live scenario without PDump this is the same as ++ two RGXWriteReg64 and it doesn't need to be reimplemented. ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32Config1RegAddr : Remap config1 register offset ++ @Input ui64Config1RegValue : Remap config1 register value ++ @Input ui32Config2RegAddr : Remap config2 register offset ++ @Input ui64Config2PhyAddr : Output remapped aligned physical address ++ @Input ui64Config2PhyMask : Mask for the output physical address ++ @Input ui64Config2Settings : Extra settings for this remap region ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(PDUMP) ++void RGXBootRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings); ++#else ++#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ ++ RGXWriteReg64(priv, c1reg, (c1val)); \ ++ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ ++ } while (0) ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireCodeRemapAddr ++ ++ @Description Acquire the device physical address of the MIPS code ++ accessed through remap region ++ ++ @Input hPrivate : Implementation specific data ++ @Output psCodeRemapAddr : Base address of the remapped code ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXCodeRemapConfig ++ ++ @Description Configure the code remap registers passed as arguments. ++ In a driver-live scenario without PDump this is the same as ++ two RGXWriteReg64 and it doesn't need to be reimplemented. ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32Config1RegAddr : Remap config1 register offset ++ @Input ui64Config1RegValue : Remap config1 register value ++ @Input ui32Config2RegAddr : Remap config2 register offset ++ @Input ui64Config2PhyAddr : Output remapped aligned physical address ++ @Input ui64Config2PhyMask : Mask for the output physical address ++ @Input ui64Config2Settings : Extra settings for this remap region ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(PDUMP) ++void RGXCodeRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings); ++#else ++#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ ++ RGXWriteReg64(priv, c1reg, (c1val)); \ ++ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ ++ } while (0) ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireDataRemapAddr ++ ++ @Description Acquire the device physical address of the MIPS data ++ accessed through remap region ++ ++ @Input hPrivate : Implementation specific data ++ @Output psDataRemapAddr : Base address of the remapped data ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDataRemapConfig ++ ++ @Description Configure the data remap registers passed as arguments. ++ In a driver-live scenario without PDump this is the same as ++ two RGXWriteReg64 and it doesn't need to be reimplemented. ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32Config1RegAddr : Remap config1 register offset ++ @Input ui64Config1RegValue : Remap config1 register value ++ @Input ui32Config2RegAddr : Remap config2 register offset ++ @Input ui64Config2PhyAddr : Output remapped aligned physical address ++ @Input ui64Config2PhyMask : Mask for the output physical address ++ @Input ui64Config2Settings : Extra settings for this remap region ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(PDUMP) ++void RGXDataRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings); ++#else ++#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ ++ RGXWriteReg64(priv, c1reg, (c1val)); \ ++ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ ++ } while (0) ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireTrampolineRemapAddr ++ ++ @Description Acquire the device physical address of the MIPS data ++ accessed through remap region ++ ++ @Input hPrivate : Implementation specific data ++ @Output psTrampolineRemapAddr: Base address of the remapped data ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXTrampolineRemapConfig ++ ++ @Description Configure the trampoline remap registers passed as arguments. ++ In a driver-live scenario without PDump this is the same as ++ two RGXWriteReg64 and it doesn't need to be reimplemented. ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui32Config1RegAddr : Remap config1 register offset ++ @Input ui64Config1RegValue : Remap config1 register value ++ @Input ui32Config2RegAddr : Remap config2 register offset ++ @Input ui64Config2PhyAddr : Output remapped aligned physical address ++ @Input ui64Config2PhyMask : Mask for the output physical address ++ @Input ui64Config2Settings : Extra settings for this remap region ++ ++ @Return void ++ ++******************************************************************************/ ++#if defined(PDUMP) ++void RGXTrampolineRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings); ++#else ++#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \ ++ RGXWriteReg64(priv, c1reg, (c1val)); \ ++ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \ ++ } while (0) ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDoFWSlaveBoot ++ ++ @Description Returns whether or not a FW Slave Boot is required ++ while powering on ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return IMG_BOOL ++ ++******************************************************************************/ ++IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXFabricCoherencyTest ++ ++ @Description Performs a coherency test ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return PVRSRV_OK if the test succeeds, ++ PVRSRV_ERROR_INIT_FAILURE if the test fails at some point ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate); ++ ++/* This is used to check if a specific ERN/BRN is enabled from hprivate. ++ * Should be used instead of calling RGXDeviceHasErnBrn. */ ++#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \ ++ RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK) ++ ++#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \ ++ RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK) ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDeviceHasErnBrn ++ ++ @Description Checks if a device has a particular errata ++ ++ @Input hPrivate : Implementation specific data ++ @Input ui64ErnsBrns : Flags to check ++ ++ @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise ++ ++******************************************************************************/ ++IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetDeviceSLCBanks ++ ++ @Description Returns the number of SLC banks used by the device ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return Number of SLC banks ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetDeviceCacheLineSize ++ ++ @Description Returns the device cache line size ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return Cache line size ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXGetDevicePhysBusWidth ++ ++ @Description Returns the device physical bus width ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return Physical bus width ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDevicePA0IsValid ++ ++ @Description Returns true if the device physical address 0x0 is a valid ++ address and can be accessed by the GPU. ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return IMG_TRUE if device physical address 0x0 is a valid address, ++ IMG_FALSE otherwise ++ ++******************************************************************************/ ++IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireBootCodeAddr ++ ++ @Description Acquire the device virtual address of the RISCV boot code ++ ++ @Input hPrivate : Implementation specific data ++ @Output psBootCodeAddr : Boot code base address ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireBootDataAddr ++ ++ @Description Acquire the device virtual address of the RISCV boot data ++ ++ @Input hPrivate : Implementation specific data ++ @Output psBootDataAddr : Boot data base address ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXDeviceAckIrq ++ ++ @Description Checks the implementation specific IRQ status register, ++ clearing it if necessary and returning the IRQ status. ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return: IRQ status ++ ++******************************************************************************/ ++IMG_BOOL RGXDeviceAckIrq(const void *hPrivate); ++ ++#if defined(__cplusplus) ++} ++#endif ++ ++#endif /* RGXLAYER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxlayer_impl.c b/drivers/gpu/drm/img-rogue/rgxlayer_impl.c +new file mode 100644 +index 000000000000..6c421badd8dd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxlayer_impl.c +@@ -0,0 +1,1318 @@ ++/*************************************************************************/ /*! ++@File ++@Title DDK implementation of the Services abstraction layer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description DDK implementation of the Services abstraction layer ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgxlayer_impl.h" ++#include "osfunc.h" ++#include "pdump_km.h" ++#include "rgxfwutils.h" ++#include "rgxfwimageutils.h" ++#include "devicemem.h" ++#include "cache_km.h" ++#include "pmr.h" ++ ++#if defined(PDUMP) ++#include ++#endif ++ ++void RGXMemCopy(const void *hPrivate, ++ void *pvDst, ++ void *pvSrc, ++ size_t uiSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ OSDeviceMemCopy(pvDst, pvSrc, uiSize); ++} ++ ++void RGXMemSet(const void *hPrivate, ++ void *pvDst, ++ IMG_UINT8 ui8Value, ++ size_t uiSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ OSDeviceMemSet(pvDst, ui8Value, uiSize); ++} ++ ++void RGXCommentLog(const void *hPrivate, ++ const IMG_CHAR *pszString, ++ ...) ++{ ++#if defined(PDUMP) ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ va_list argList; ++ va_start(argList, pszString); ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ PDumpCommentWithFlagsVA(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, pszString, argList); ++ va_end(argList); ++#else ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ PVR_UNREFERENCED_PARAMETER(pszString); ++#endif ++} ++ ++void RGXErrorLog(const void *hPrivate, ++ const IMG_CHAR *pszString, ++ ...) ++{ ++ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; ++ va_list argList; ++ ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ ++ va_start(argList, pszString); ++ vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList); ++ va_end(argList); ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer)); ++} ++ ++IMG_UINT32 RGXGetOSPageSize(const void *hPrivate) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ return OSGetPageSize(); ++} ++ ++IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate) ++{ ++#if defined(RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX) ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32CorememSize = 0; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE)) ++ { ++ ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE); ++ } ++ ++ return ui32CorememSize; ++#else ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ ++ return 0U; ++#endif ++} ++ ++void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++#if defined(PDUMP) ++ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) ++#endif ++ { ++ OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue); ++ } ++ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags); ++} ++ ++void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++#if defined(PDUMP) ++ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) ++#endif ++ { ++ OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue); ++ } ++ ++ PDUMPREG64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags); ++} ++ ++IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++ IMG_UINT32 ui32RegValue; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++#if defined(PDUMP) ++ if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) ++ { ++ ui32RegValue = IMG_UINT32_MAX; ++ } ++ else ++#endif ++ { ++ ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr); ++ } ++ ++ PDUMPREGREAD32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, psParams->ui32PdumpFlags); ++ ++ return ui32RegValue; ++} ++ ++IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++ IMG_UINT64 ui64RegValue; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++#if defined(PDUMP) ++ if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW) ++ { ++ ui64RegValue = IMG_UINT64_MAX; ++ } ++ else ++#endif ++ { ++ ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); ++ } ++ ++ PDUMPREGREAD64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, PDUMP_FLAGS_CONTINUOUS); ++ ++ return ui64RegValue; ++} ++ ++IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 uiRegValueNew, ++ IMG_UINT64 uiRegKeepMask) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++#if defined(PDUMP) ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++#endif ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++ /* only use the new values for bits we update according to the keep mask */ ++ uiRegValueNew &= ~uiRegKeepMask; ++ ++#if defined(PDUMP) ++ ++ PDUMP_BLKSTART(ui32PDumpFlags); ++ ++ /* Store register offset to temp PDump variable */ ++ PDumpRegRead64ToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ":SYSMEM:$1", ui32RegAddr, ui32PDumpFlags); ++ ++ /* Keep the bits set in the mask */ ++ PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ uiRegKeepMask, ui32PDumpFlags); ++ ++ /* OR the new values */ ++ PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ uiRegValueNew, ui32PDumpFlags); ++ ++ /* Do the actual register write */ ++ PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); ++ ++ PDUMP_BLKEND(ui32PDumpFlags); ++ ++ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) ++#endif ++ ++ { ++ IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr); ++ uiRegValue &= uiRegKeepMask; ++ OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXPollReg32(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32 ui32RegValue, ++ IMG_UINT32 ui32RegMask) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++#if defined(PDUMP) ++ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) ++#endif ++ { ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), ++ ui32RegValue, ++ ui32RegMask, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr)); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ } ++ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ ui32RegAddr, ++ ui32RegValue, ++ ui32RegMask, ++ psParams->ui32PdumpFlags, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXPollReg64(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64RegValue, ++ IMG_UINT64 ui64RegMask) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void __iomem *pvRegsBase; ++ ++ /* Split lower and upper words */ ++ IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32); ++ IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue); ++ IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32); ++ IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask); ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ pvRegsBase = psDevInfo->pvRegsBaseKM; ++ ++#if defined(PDUMP) ++ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)) ++#endif ++ { ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4), ++ ui32UpperValue, ++ ui32UpperMask, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr)); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ if (PVRSRVPollForValueKM(psDevInfo->psDeviceNode, ++ (IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr), ++ ui32LowerValue, ++ ui32LowerMask, ++ POLL_FLAG_LOG_ERROR) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for lower part of Reg (0x%x) failed", ui32RegAddr)); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ } ++ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ ui32RegAddr + 4, ++ ui32UpperValue, ++ ui32UpperMask, ++ psParams->ui32PdumpFlags, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ ui32RegAddr, ++ ui32LowerValue, ++ ui32LowerMask, ++ psParams->ui32PdumpFlags, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ return PVRSRV_OK; ++} ++ ++void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ OSWaitus(ui32TimeUs); ++ PDUMPIDLWITHFLAGS(psDevInfo->psDeviceNode, ui32Cycles, PDUMP_FLAGS_CONTINUOUS); ++} ++ ++void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr) ++{ ++ PVR_ASSERT(hPrivate != NULL); ++ *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr; ++} ++ ++#if defined(PDUMP) ++void RGXWriteKernelMMUPC64(const void *hPrivate, ++ IMG_UINT32 ui32PCReg, ++ IMG_UINT32 ui32PCRegAlignShift, ++ IMG_UINT32 ui32PCRegShift, ++ IMG_UINT64 ui64PCVal) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ /* Write the cat-base address */ ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal); ++ ++ /* Pdump catbase address */ ++ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, ++ RGX_PDUMPREG_NAME, ++ ui32PCReg, ++ 8, ++ ui32PCRegAlignShift, ++ ui32PCRegShift, ++ PDUMP_FLAGS_CONTINUOUS); ++} ++ ++void RGXWriteKernelMMUPC32(const void *hPrivate, ++ IMG_UINT32 ui32PCReg, ++ IMG_UINT32 ui32PCRegAlignShift, ++ IMG_UINT32 ui32PCRegShift, ++ IMG_UINT32 ui32PCVal) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ /* Write the cat-base address */ ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal); ++ ++ /* Pdump catbase address */ ++ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx, ++ RGX_PDUMPREG_NAME, ++ ui32PCReg, ++ 4, ++ ui32PCRegAlignShift, ++ ui32PCRegShift, ++ PDUMP_FLAGS_CONTINUOUS); ++} ++#endif /* defined(PDUMP) */ ++ ++void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr) ++{ ++ PVR_ASSERT(hPrivate != NULL); ++ *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr; ++} ++ ++#if defined(PDUMP) ++void RGXMIPSWrapperConfig(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64GPURegsAddr, ++ IMG_UINT32 ui32GPURegsAlign, ++ IMG_UINT32 ui32BootMode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ++ ui32RegAddr, ++ (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode); ++ ++ PDUMP_BLKSTART(ui32PDumpFlags); ++ ++ /* Store register offset to temp PDump variable */ ++ PDumpRegLabelToInternalVar(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); ++ ++ /* Align register transactions identifier */ ++ PDumpWriteVarSHRValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ ui32GPURegsAlign, ui32PDumpFlags); ++ ++ /* Enable micromips instruction encoding */ ++ PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ ui32BootMode, ui32PDumpFlags); ++ ++ /* Do the actual register write */ ++ PDumpInternalVarToReg64(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32RegAddr, ":SYSMEM:$1", ui32PDumpFlags); ++ ++ PDUMP_BLKEND(ui32PDumpFlags); ++} ++#endif ++ ++void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr) ++{ ++ PVR_ASSERT(hPrivate != NULL); ++ *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr; ++} ++ ++void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr) ++{ ++ PVR_ASSERT(hPrivate != NULL); ++ *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr; ++} ++ ++void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr) ++{ ++ PVR_ASSERT(hPrivate != NULL); ++ *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr; ++} ++ ++void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr) ++{ ++ PVR_ASSERT(hPrivate != NULL); ++ *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr; ++} ++ ++#if defined(PDUMP) ++static inline ++void RGXWriteRemapConfig2Reg(void __iomem *pvRegs, ++ PMR *psPMR, ++ IMG_DEVMEM_OFFSET_T uiLogicalOffset, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64PhyAddr, ++ IMG_UINT64 ui64PhyMask, ++ IMG_UINT64 ui64Settings) ++{ ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ PVR_ASSERT(psPMR != NULL); ++ psDevNode = PMR_DeviceNode(psPMR); ++ ++ OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings); ++ ++ PDUMP_BLKSTART(ui32PDumpFlags); ++ ++ /* Store memory offset to temp PDump variable */ ++ PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, ++ uiLogicalOffset, ui32PDumpFlags); ++ ++ /* Keep only the relevant bits of the output physical address */ ++ PDumpWriteVarANDValueOp(psDevNode, ":SYSMEM:$1", ui64PhyMask, ui32PDumpFlags); ++ ++ /* Extra settings for this remapped region */ ++ PDumpWriteVarORValueOp(psDevNode, ":SYSMEM:$1", ui64Settings, ui32PDumpFlags); ++ ++ /* Do the actual register write */ ++ PDumpInternalVarToReg64(psDevNode, RGX_PDUMPREG_NAME, ui32RegAddr, ++ ":SYSMEM:$1", ui32PDumpFlags); ++ ++ PDUMP_BLKEND(ui32PDumpFlags); ++} ++ ++void RGXBootRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32BootRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_CODE); ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ /* Write remap config1 register */ ++ RGXWriteReg64(hPrivate, ++ ui32Config1RegAddr, ++ ui64Config1RegValue); ++ ++ /* Write remap config2 register */ ++ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, ++ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, ++ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset, ++ ui32Config2RegAddr, ++ ui64Config2PhyAddr, ++ ui64Config2PhyMask, ++ ui64Config2Settings); ++} ++ ++void RGXCodeRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32CodeRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_EXCEPTIONS_CODE); ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ /* Write remap config1 register */ ++ RGXWriteReg64(hPrivate, ++ ui32Config1RegAddr, ++ ui64Config1RegValue); ++ ++ /* Write remap config2 register */ ++ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, ++ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR, ++ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset, ++ ui32Config2RegAddr, ++ ui64Config2PhyAddr, ++ ui64Config2PhyMask, ++ ui64Config2Settings); ++} ++ ++void RGXDataRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32DataRemapMemOffset = RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ /* Write remap config1 register */ ++ RGXWriteReg64(hPrivate, ++ ui32Config1RegAddr, ++ ui64Config1RegValue); ++ ++ /* Write remap config2 register */ ++ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM, ++ psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, ++ psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset, ++ ui32Config2RegAddr, ++ ui64Config2PhyAddr, ++ ui64Config2PhyMask, ++ ui64Config2Settings); ++} ++ ++void RGXTrampolineRemapConfig(const void *hPrivate, ++ IMG_UINT32 ui32Config1RegAddr, ++ IMG_UINT64 ui64Config1RegValue, ++ IMG_UINT32 ui32Config2RegAddr, ++ IMG_UINT64 ui64Config2PhyAddr, ++ IMG_UINT64 ui64Config2PhyMask, ++ IMG_UINT64 ui64Config2Settings) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ /* write the register for real, without PDump */ ++ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ++ ui32Config1RegAddr, ++ ui64Config1RegValue); ++ ++ PDUMP_BLKSTART(ui32PDumpFlags); ++ ++ /* Store the memory address in a PDump variable */ ++ PDumpPhysHandleToInternalVar64(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ psDevInfo->psTrampoline->hPdumpPages, ++ ui32PDumpFlags); ++ ++ /* Keep only the relevant bits of the input physical address */ ++ PDumpWriteVarANDValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK, ++ ui32PDumpFlags); ++ ++ /* Enable bit */ ++ PDumpWriteVarORValueOp(psDevInfo->psDeviceNode, ":SYSMEM:$1", ++ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, ++ ui32PDumpFlags); ++ ++ /* Do the PDump register write */ ++ PDumpInternalVarToReg64(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ ui32Config1RegAddr, ++ ":SYSMEM:$1", ++ ui32PDumpFlags); ++ ++ PDUMP_BLKEND(ui32PDumpFlags); ++ ++ /* this can be written directly */ ++ RGXWriteReg64(hPrivate, ++ ui32Config2RegAddr, ++ (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings); ++} ++#endif ++ ++#define MAX_NUM_COHERENCY_TESTS (10) ++IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS) ++ { ++ return IMG_FALSE; ++ } ++ ++ psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig; ++ ++ return PVRSRVSystemSnoopingOfCPUCache(psDevConfig); ++} ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Wait for Slave Port to be Ready */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_META_SP_MSLVCTRL1, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); ++ if (eError != PVRSRV_OK) return eError; ++ ++ /* Issue a Write */ ++ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); ++ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); ++ ++ return eError; ++} ++#endif ++ ++/* ++ * The fabric coherency test is performed when platform supports fabric coherency ++ * either in the form of ACE-lite or Full-ACE. This test is done quite early ++ * with the firmware processor quiescent and makes exclusive use of the slave ++ * port interface for reading/writing through the device memory hierarchy. The ++ * rationale for the test is to ensure that what the CPU writes to its dcache ++ * is visible to the GPU via coherency snoop miss/hit and vice-versa without ++ * any intervening cache maintenance by the writing agent. ++ */ ++PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate) ++{ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 *pui32FabricCohTestBufferCpuVA; ++ DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc; ++ RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA; ++ IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64); ++ IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64); ++ IMG_UINT32 ui32SLCCTRL = 0; ++ IMG_UINT32 ui32OddEven; ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ IMG_BOOL bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE); ++#endif ++ IMG_UINT32 ui32TestType; ++ IMG_UINT32 ui32OddEvenSeed = 1; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_BOOL bFullTestPassed = IMG_TRUE; ++ IMG_BOOL bExit = IMG_FALSE; ++#if defined(DEBUG) ++ IMG_BOOL bSubTestPassed = IMG_FALSE; ++#endif ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ PVR_LOG(("Starting fabric coherency test .....")); ++ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (bFeatureS7) ++ { ++ IMG_UINT64 ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_UNCACHED(MMU_CONTEXT_MAPPING_FWIF); ++ ++ /* Configure META to use SLC force-linefill for the bootloader segment */ ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), ++ (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); ++ } ++ else ++#endif ++ { ++ /* Bypass the SLC when IO coherency is enabled */ ++ ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS); ++ RGXWriteReg32(hPrivate, ++ RGX_CR_SLC_CTRL_BYPASS, ++ ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN); ++ } ++ ++ /* Size and align are 'expanded' because we request an export align allocation */ ++ eError = DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap), ++ &uiFabricCohTestBlockSize, ++ &uiFabricCohTestBlockAlign); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevmemExportalignAdjustSizeAndAlign() error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ goto e0; ++ } ++ ++ /* Allocate, acquire cpu address and set firmware address */ ++ eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode, ++ uiFabricCohTestBlockSize, ++ uiFabricCohTestBlockAlign, ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | ++ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwExFabricCoherencyTestBuffer", ++ &psFabricCohTestBufferMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevmemFwAllocateExportable() error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ goto e0; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "DevmemAcquireCpuVirtAddr() error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ goto e1; ++ } ++ ++ /* Create a FW address which is uncached in the Meta DCache and in the SLC ++ * using the Meta bootloader segment. ++ * This segment is the only one configured correctly out of reset ++ * (when this test is meant to be executed). ++ */ ++ eError = RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA, ++ psFabricCohTestBufferMemDesc, ++ 0, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", e2); ++ ++ /* Undo most of the FW mappings done by RGXSetFirmwareAddress */ ++ sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK; ++ sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK; ++ sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS; ++ ++ /* Map the buffer in the bootloader segment as uncached */ ++ sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR; ++ sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED; ++ ++ for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++) ++ { ++ IMG_CPU_PHYADDR sCpuPhyAddr; ++ IMG_BOOL bValid; ++ PMR *psPMR; ++ ++ /* Acquire underlying PMR CpuPA in preparation for cache maintenance */ ++ (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR); ++ eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid); ++ if (eError != PVRSRV_OK || bValid == IMG_FALSE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "PMR_CpuPhysAddr error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ bExit = IMG_TRUE; ++ continue; ++ } ++ ++ /* Here we do two passes [runs] mostly to account for the effects of using ++ the different seed (i.e. ui32OddEvenSeed) value to read and write */ ++ for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++) ++ { ++ IMG_UINT32 i; ++ ++#if defined(DEBUG) ++ switch (ui32TestType) ++ { ++ case 0: ++ PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); ++ break; ++ case 1: ++ PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven)); ++ break; ++ case 2: ++ PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); ++ break; ++ case 3: ++ PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven)); ++ break; ++ default: ++ PVR_LOG(("Internal error, exiting test")); ++ eError = PVRSRV_ERROR_INIT_FAILURE; ++ bExit = IMG_TRUE; ++ continue; ++ } ++#endif ++ ++ for (i = 0; i < 2 && bExit == IMG_FALSE; i++) ++ { ++ IMG_UINT32 ui32FWAddr; ++ IMG_UINT32 ui32FWValue; ++ IMG_UINT32 ui32FWValue2; ++ IMG_CPU_PHYADDR sCpuPhyAddrStart; ++ IMG_CPU_PHYADDR sCpuPhyAddrEnd; ++ IMG_UINT32 ui32LastFWValue = ~0; ++ IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32); ++ ++ /* Calculate next address and seed value to write/read from slave-port */ ++ ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset; ++ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset; ++ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr; ++ ui32OddEvenSeed += 1; ++ ++ if (ui32TestType & 0x1) ++ { ++ ui32FWValue = i + ui32OddEvenSeed; ++ ++ switch (ui32TestType) ++ { ++ case 1: ++ case 3: ++ /* Clean dcache to ensure there is no stale data in dcache that might over-write ++ what we are about to write via slave-port here because if it drains from the CPU ++ dcache before we read it, it would corrupt what we are going to read back via ++ the CPU */ ++ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); ++ CacheOpExec(psDevInfo->psDeviceNode, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), ++ sCpuPhyAddrStart, ++ sCpuPhyAddrEnd, ++ PVRSRV_CACHE_OP_CLEAN); ++ break; ++ } ++ ++ /* Write the value using the RGX slave-port interface */ ++ eError = RGXWriteFWModuleAddr(psDevInfo, ui32FWAddr, ui32FWValue); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXWriteFWModuleAddr error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ bExit = IMG_TRUE; ++ continue; ++ } ++ ++ /* Read back value using RGX slave-port interface, this is used ++ as a sort of memory barrier for the above write */ ++ eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue2); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXReadFWModuleAddr error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ bExit = IMG_TRUE; ++ continue; ++ } ++ else if (ui32FWValue != ui32FWValue2) ++ { ++ /* Fatal error, we should abort */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x", ++ i, ++ ui32FWValue, ++ ui32FWValue2)); ++ eError = PVRSRV_ERROR_INIT_FAILURE; ++ bExit = IMG_TRUE; ++ continue; ++ } ++ ++ if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig)) ++ { ++ /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory ++ region is discarded before we read (i.e. next read must trigger a cache miss). ++ If there is snooping of device cache, then any prefetching done by the CPU ++ will reflect the most up to date datum writing by GPU into said location, ++ that is to say prefetching must be coherent so CPU d-flush is not needed */ ++ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); ++ CacheOpExec(psDevInfo->psDeviceNode, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), ++ sCpuPhyAddrStart, ++ sCpuPhyAddrEnd, ++ PVRSRV_CACHE_OP_INVALIDATE); ++ } ++ } ++ else ++ { ++ IMG_UINT32 ui32RAWCpuValue; ++ ++ /* Ensures line is in dcache */ ++ ui32FWValue = IMG_UINT32_MAX; ++ ++ /* Dirty allocation in dcache */ ++ ui32RAWCpuValue = i + ui32OddEvenSeed; ++ pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed; ++ ++ /* Flush possible cpu store-buffer(ing) on LMA */ ++ OSWriteMemoryBarrier(&pui32FabricCohTestBufferCpuVA[i]); ++ ++ switch (ui32TestType) ++ { ++ case 0: ++ /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so ++ memory is coherent before the SlavePort reads */ ++ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); ++ CacheOpExec(psDevInfo->psDeviceNode, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), ++ sCpuPhyAddrStart, ++ sCpuPhyAddrEnd, ++ PVRSRV_CACHE_OP_FLUSH); ++ break; ++ } ++ ++ /* Read back value using RGX slave-port interface */ ++ eError = RGXReadFWModuleAddr(psDevInfo, ui32FWAddr, &ui32FWValue); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXReadFWModuleAddr error: %s, exiting", ++ PVRSRVGetErrorString(eError))); ++ bExit = IMG_TRUE; ++ continue; ++ } ++ ++ /* We are being mostly paranoid here, just to account for CPU RAW operations */ ++ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32); ++ CacheOpExec(psDevInfo->psDeviceNode, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset, ++ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32), ++ sCpuPhyAddrStart, ++ sCpuPhyAddrEnd, ++ PVRSRV_CACHE_OP_FLUSH); ++ if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue) ++ { ++ /* Fatal error, we should abort */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "At Offset: %d, RAW by CPU failed: expected: %x, got: %x", ++ i, ++ ui32RAWCpuValue, ++ pui32FabricCohTestBufferCpuVA[i])); ++ eError = PVRSRV_ERROR_INIT_FAILURE; ++ bExit = IMG_TRUE; ++ continue; ++ } ++ } ++ ++ /* Compare to see if sub-test passed */ ++ if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue) ++ { ++#if defined(DEBUG) ++ bSubTestPassed = IMG_TRUE; ++#endif ++ } ++ else ++ { ++ bFullTestPassed = IMG_FALSE; ++ eError = PVRSRV_ERROR_INIT_FAILURE; ++#if defined(DEBUG) ++ bSubTestPassed = IMG_FALSE; ++#endif ++ if (ui32LastFWValue != ui32FWValue) ++ { ++#if defined(DEBUG) ++ PVR_LOG(("At Offset: %d, Expected: %x, Got: %x", ++ i, ++ (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i], ++ (ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue)); ++#endif ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "test encountered unexpected error, exiting")); ++ eError = PVRSRV_ERROR_INIT_FAILURE; ++ bExit = IMG_TRUE; ++ continue; ++ } ++ } ++ ++ ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i]; ++ } ++ ++#if defined(DEBUG) ++ if (bExit) ++ { ++ continue; ++ } ++ ++ switch (ui32TestType) ++ { ++ case 0: ++ PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); ++ break; ++ case 1: ++ PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); ++ break; ++ case 2: ++ PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); ++ break; ++ case 3: ++ PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED")); ++ break; ++ default: ++ PVR_LOG(("Internal error, exiting test")); ++ bExit = IMG_TRUE; ++ continue; ++ } ++#endif ++ } ++ } ++ ++ RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc); ++e2: ++ DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc); ++e1: ++ DevmemFwUnmapAndFree(psDevInfo, psFabricCohTestBufferMemDesc); ++ ++e0: ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (bFeatureS7) ++ { ++ /* Restore bootloader segment settings */ ++ IMG_UINT64 ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_VIVT_SLC_CACHED(MMU_CONTEXT_MAPPING_FWIF); ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6), ++ (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32); ++ } ++ else ++#endif ++ { ++ /* Restore SLC bypass settings */ ++ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL); ++ } ++ ++ bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed; ++ if (bFullTestPassed) ++ { ++ PVR_LOG(("fabric coherency test: PASSED")); ++ psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1; ++ } ++ else ++ { ++ PVR_LOG(("fabric coherency test: FAILED")); ++ psDevInfo->ui32CoherencyTestsDone++; ++ } ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(hPrivate); ++ ++ return PVRSRV_OK; ++#endif ++} ++ ++IMG_INT32 RGXDeviceGetFeatureValue(const void *hPrivate, IMG_UINT64 ui64Feature) ++{ ++ IMG_INT32 i32Ret = -1; ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ psDeviceNode = psDevInfo->psDeviceNode; ++ ++ if ((psDeviceNode->pfnGetDeviceFeatureValue)) ++ { ++ i32Ret = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ui64Feature); ++ } ++ ++ return i32Ret; ++} ++ ++IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0; ++} ++ ++IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0; ++} ++ ++IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS)) ++ { ++ return 0; ++ } ++ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS); ++} ++ ++IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)) ++ { ++ return 0; ++ } ++ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS); ++} ++ ++IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) ++ { ++ return 0; ++ } ++ return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH); ++} ++ ++IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ return psDevInfo->sLayerParams.bDevicePA0IsValid; ++} ++ ++void RGXAcquireBootCodeAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootCodeAddr) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ *psBootCodeAddr = psDevInfo->sFWCodeDevVAddrBase; ++} ++ ++void RGXAcquireBootDataAddr(const void *hPrivate, IMG_DEV_VIRTADDR *psBootDataAddr) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo; ++ ++ *psBootDataAddr = psDevInfo->sFWDataDevVAddrBase; ++} ++ ++IMG_BOOL RGXDeviceAckIrq(const void *hPrivate) ++{ ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ return (psDevInfo->pfnRGXAckIrq != NULL) ? ++ psDevInfo->pfnRGXAckIrq(psDevInfo) : IMG_TRUE; ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxlayer_impl.h b/drivers/gpu/drm/img-rogue/rgxlayer_impl.h +new file mode 100644 +index 000000000000..4d7c0f0c7798 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxlayer_impl.h +@@ -0,0 +1,67 @@ ++/*************************************************************************/ /*! ++@File ++@Title Header for DDK implementation of the Services abstraction layer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for DDK implementation of the Services abstraction layer ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXLAYER_IMPL_H) ++#define RGXLAYER_IMPL_H ++ ++#include "rgxlayer.h" ++#include "device_connection.h" ++ ++typedef struct _RGX_LAYER_PARAMS_ ++{ ++ void *psDevInfo; ++ void *psDevConfig; ++#if defined(PDUMP) ++ IMG_UINT32 ui32PdumpFlags; ++#endif ++ ++ IMG_DEV_PHYADDR sPCAddr; ++ IMG_DEV_PHYADDR sGPURegAddr; ++ IMG_DEV_PHYADDR sBootRemapAddr; ++ IMG_DEV_PHYADDR sCodeRemapAddr; ++ IMG_DEV_PHYADDR sDataRemapAddr; ++ IMG_DEV_PHYADDR sTrampolineRemapAddr; ++ IMG_BOOL bDevicePA0IsValid; ++} RGX_LAYER_PARAMS; ++ ++#endif /* RGXLAYER_IMPL_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxmem.c b/drivers/gpu/drm/img-rogue/rgxmem.c +new file mode 100644 +index 000000000000..de38b1cec33f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmem.c +@@ -0,0 +1,947 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX memory context management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX memory context management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvr_debug.h" ++#include "rgxmem.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_server_utils.h" ++#include "devicemem_pdump.h" ++#include "rgxdevice.h" ++#include "rgx_fwif_km.h" ++#include "rgxfwutils.h" ++#include "pdump_km.h" ++#include "pdump_physmem.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++#include "sync_internal.h" ++#include "rgx_memallocflags.h" ++#include "rgx_bvnc_defs_km.h" ++#include "info_page.h" ++ ++#if defined(PDUMP) ++#include "sync.h" ++#endif ++ ++struct SERVER_MMU_CONTEXT_TAG ++{ ++ DEVMEM_MEMDESC *psFWMemContextMemDesc; ++ PRGXFWIF_FWMEMCONTEXT sFWMemContextDevVirtAddr; ++ MMU_CONTEXT *psMMUContext; ++ IMG_PID uiPID; ++ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; ++ IMG_UINT64 ui64FBSCEntryMask; ++ DLLIST_NODE sNode; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++}; /* SERVER_MMU_CONTEXT is typedef-ed in rgxmem.h */ ++ ++PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDeviceNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiSize, ++ IMG_BOOL bInvalidate) ++{ ++ PVRSRV_ERROR eError; ++ DLLIST_NODE *psNode, *psNext; ++ RGXFWIF_KCCB_CMD sFlushInvalCmd; ++ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_UINT32 ui32kCCBCommandSlot; ++ ++ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) ++ { ++ SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); ++ if (psIter->psMMUContext == psMMUContext) ++ { ++ psServerMMUContext = psIter; ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); ++ ++ if (! psServerMMUContext) ++ { ++ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; ++ } ++ ++ /* Schedule the SLC flush command */ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Submit SLC flush and invalidate"); ++#endif ++ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = bInvalidate; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Size = uiSize; ++ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.ui64Address = sDevVAddr.uiAddr; ++ eError = RGXGetFWCommonContextAddrFromServerMMUCtx(psDevInfo, ++ psServerMMUContext, ++ &sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ eError = RGXSendCommandWithPowLockAndGetKCCBSlot(psDevInfo, ++ &sFlushInvalCmd, ++ PDUMP_FLAGS_CONTINUOUS, ++ &ui32kCCBCommandSlot); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXSLCFlush: Failed to schedule SLC flush command with error (%u)", ++ eError)); ++ } ++ else ++ { ++ /* Wait for the SLC flush to complete */ ++ eError = RGXWaitForKCCBSlotUpdate(psDevInfo, ui32kCCBCommandSlot, PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXSLCFlush: SLC flush and invalidate aborted with error (%u)", ++ eError)); ++ } ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_UINT64 ui64FBSCEntryMask) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) ++ { ++ SERVER_MMU_CONTEXT *psIter = IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); ++ if (psIter->psMMUContext == psMMUContext) ++ { ++ psServerMMUContext = psIter; ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); ++ ++ if (! psServerMMUContext) ++ { ++ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; ++ } ++ ++ /* Accumulate the FBSC invalidate request */ ++ psServerMMUContext->ui64FBSCEntryMask |= ui64FBSCEntryMask; ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * RGXExtractFBSCEntryMaskFromMMUContext ++ * ++ */ ++PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, ++ SERVER_MMU_CONTEXT *psServerMMUContext, ++ IMG_UINT64 *pui64FBSCEntryMask) ++{ ++ if (!psServerMMUContext) ++ { ++ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND; ++ } ++ ++ *pui64FBSCEntryMask = psServerMMUContext->ui64FBSCEntryMask; ++ psServerMMUContext->ui64FBSCEntryMask = 0; ++ ++ return PVRSRV_OK; ++} ++ ++void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode, ++ MMU_CONTEXT *psMMUContext, ++ MMU_LEVEL eMMULevel, ++ IMG_BOOL bUnmap) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ IMG_UINT32 ui32NewCacheFlags; ++ ++ PVR_UNREFERENCED_PARAMETER(bUnmap); ++ ++ switch (eMMULevel) ++ { ++ case MMU_LEVEL_3: ++ ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PC; ++ ++ break; ++ case MMU_LEVEL_2: ++ ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PD; ++ ++ break; ++ case MMU_LEVEL_1: ++ ui32NewCacheFlags = RGXFWIF_MMUCACHEDATA_FLAGS_PT; ++ ++#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))) ++#endif ++ { ++ ui32NewCacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB; ++ } ++ ++ break; ++ default: ++ ui32NewCacheFlags = 0; ++ PVR_ASSERT(0); ++ ++ break; ++ } ++ ++#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)) ++ { ++ MMU_AppendCacheFlags(psMMUContext, ui32NewCacheFlags); ++ } ++ else ++#endif ++ { ++ MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32NewCacheFlags); ++ } ++} ++ ++static inline void _GetAndResetCacheOpsPending(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 *pui32FWCacheFlags) ++{ ++ /* ++ * Atomically exchange flags and 0 to ensure we never accidentally read ++ * state inconsistently or overwrite valid cache flags with 0. ++ */ ++ *pui32FWCacheFlags = MMU_ExchangeCacheFlags(psDevInfo->psKernelMMUCtx, 0); ++} ++ ++static ++PVRSRV_ERROR _PrepareAndSubmitCacheCommand(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXFWIF_DM eDM, ++ IMG_UINT32 ui32CacheFlags, ++ IMG_BOOL bInterrupt, ++ IMG_UINT32 *pui32MMUInvalidateUpdate) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_KCCB_CMD sFlushCmd; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ ++ *pui32MMUInvalidateUpdate = psDeviceNode->ui32NextMMUInvalidateUpdate++; ++ ++ /* Setup cmd and add the device nodes sync object */ ++ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE; ++ sFlushCmd.uCmdData.sMMUCacheData.ui32MMUCacheSyncUpdateValue = *pui32MMUInvalidateUpdate; ++ SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim, ++ &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr); ++ ++ /* Indicate the firmware should signal command completion to the host */ ++ if (bInterrupt) ++ { ++ ui32CacheFlags |= RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT; ++ } ++ ++ sFlushCmd.uCmdData.sMMUCacheData.ui32CacheFlags = ui32CacheFlags; ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Submit MMU flush and invalidate (flags = 0x%08x)", ++ ui32CacheFlags); ++#endif ++ ++ /* Schedule MMU cache command */ ++ eError = RGXSendCommand(psDevInfo, ++ &sFlushCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule MMU cache command to " ++ "DM=%d with error (%u)", ++ __func__, eDM, eError)); ++ psDeviceNode->ui32NextMMUInvalidateUpdate--; ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *pui32MMUInvalidateUpdate) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32FWCacheFlags; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ eError = PVRSRVPowerLock(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto RGXMMUCacheInvalidateKick_exit; ++ } ++ ++ _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags); ++ if (ui32FWCacheFlags == 0) ++ { ++ /* Nothing to do if no cache ops pending */ ++ eError = PVRSRV_OK; ++ goto _PowerUnlockAndReturnErr; ++ } ++ ++ /* Ensure device is powered up before sending cache command */ ++ PDUMPPOWCMDSTART(psDeviceNode); ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_ON, ++ PVRSRV_POWER_FLAGS_NONE); ++ PDUMPPOWCMDEND(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32FWCacheFlags); ++ goto _PowerUnlockAndReturnErr; ++ } ++ ++ eError = _PrepareAndSubmitCacheCommand(psDeviceNode, RGXFWIF_DM_GP, ui32FWCacheFlags, ++ IMG_TRUE, pui32MMUInvalidateUpdate); ++ if (eError != PVRSRV_OK) ++ { ++ /* failed to submit cache operations, return failure */ ++ PVR_DPF((PVR_DBG_WARNING, "%s: failed to submit cache command (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ MMU_AppendCacheFlags(psDevInfo->psKernelMMUCtx, ui32FWCacheFlags); ++ goto _PowerUnlockAndReturnErr; ++ } ++ ++_PowerUnlockAndReturnErr: ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++RGXMMUCacheInvalidateKick_exit: ++ return eError; ++} ++ ++PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_DM eDM, ++ IMG_UINT32 *pui32MMUInvalidateUpdate) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode; ++ IMG_UINT32 ui32FWCacheFlags; ++ ++ /* Caller should ensure that power lock is held before calling this function */ ++ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock)); ++ ++ _GetAndResetCacheOpsPending(psDeviceNode->pvDevice, &ui32FWCacheFlags); ++ if (ui32FWCacheFlags == 0) ++ { ++ /* Nothing to do if no cache ops pending */ ++ return PVRSRV_OK; ++ } ++ ++ return _PrepareAndSubmitCacheCommand(psDeviceNode, eDM, ui32FWCacheFlags, ++ IMG_FALSE, pui32MMUInvalidateUpdate); ++} ++ ++/* page fault debug is the only current use case for needing to find process info ++ * after that process device memory context has been destroyed ++ */ ++ ++typedef struct _UNREGISTERED_MEMORY_CONTEXT_ ++{ ++ IMG_PID uiPID; ++ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; ++ IMG_DEV_PHYADDR sPCDevPAddr; ++} UNREGISTERED_MEMORY_CONTEXT; ++ ++/* must be a power of two */ ++#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3) ++ ++static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE]; ++static IMG_UINT32 gui32UnregisteredMemCtxsHead; ++ ++/* record a device memory context being unregistered. ++ * the list of unregistered contexts can be used to find the PID and process name ++ * belonging to a memory context which has been destroyed ++ */ ++static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext) ++{ ++ UNREGISTERED_MEMORY_CONTEXT *psRecord; ++ ++ OSLockAcquire(psDevInfo->hMMUCtxUnregLock); ++ ++ psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead]; ++ ++ gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1) ++ & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1); ++ ++ OSLockRelease(psDevInfo->hMMUCtxUnregLock); ++ ++ psRecord->uiPID = psServerMMUContext->uiPID; ++ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK) ++ { ++ PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context")); ++ } ++ OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName)); ++} ++ ++ ++void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData) ++{ ++ SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo; ++ ++#if defined(PDUMP) ++ { ++ RGXFWIF_DEV_VIRTADDR sFWAddr; ++ ++ RGXSetFirmwareAddress(&sFWAddr, ++ psServerMMUContext->psFWMemContextMemDesc, ++ 0, ++ RFW_FWADDR_NOREF_FLAG); ++ ++ /* ++ * MMU cache commands (always dumped) might have a pointer to this FW ++ * memory context, wait until the FW has caught-up to the latest command. ++ */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, ++ "Ensure FW has executed all MMU invalidations on FW memory " ++ "context 0x%x before freeing it", sFWAddr.ui32Addr); ++ SyncPrimPDumpPol(psDevInfo->psDeviceNode->psMMUCacheSyncPrim, ++ psDevInfo->psDeviceNode->ui32NextMMUInvalidateUpdate - 1, ++ 0xFFFFFFFF, ++ PDUMP_POLL_OPERATOR_GREATEREQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++#endif ++ ++ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); ++ dllist_remove_node(&psServerMMUContext->sNode); ++ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) ++ { ++ _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext); ++ } ++ ++ /* ++ * Release the page catalogue address acquired in RGXRegisterMemoryContext(). ++ */ ++ MMU_ReleaseBaseAddr(NULL); ++ ++ /* ++ * Free the firmware memory context. ++ */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Free FW memory context"); ++ DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); ++ ++ OSFreeMem(psServerMMUContext); ++} ++ ++/* ++ * RGXRegisterMemoryContext ++ */ ++PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_HANDLE *hPrivData) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_MEMALLOCFLAGS_T uiFWMemContextMemAllocFlags; ++ RGXFWIF_FWMEMCONTEXT *psFWMemContext; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc; ++ SERVER_MMU_CONTEXT *psServerMMUContext; ++ ++ if (psDevInfo->psKernelMMUCtx == NULL) ++ { ++ /* ++ * This must be the creation of the Kernel memory context. Take a copy ++ * of the MMU context for use when programming the BIF. ++ */ ++ psDevInfo->psKernelMMUCtx = psMMUContext; ++ ++#if defined(RGX_BRN71422_TARGET_HARDWARE_PHYSICAL_ADDR) ++ /* Setup the BRN71422 mapping in the FW memory context. */ ++ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 71422)) ++ { ++ RGXMapBRN71422TargetPhysicalAddress(psMMUContext); ++ } ++#endif ++ } ++ else ++ { ++ psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext)); ++ if (psServerMMUContext == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc_server_ctx; ++ } ++ ++ psServerMMUContext->psDevInfo = psDevInfo; ++ psServerMMUContext->ui64FBSCEntryMask = 0; ++ psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = 0; ++ ++ /* ++ * This FW MemContext is only mapped into kernel for initialisation purposes. ++ * Otherwise this allocation is only used by the FW. ++ * Therefore the GPU cache doesn't need coherency, and write-combine ++ * will suffice on the CPU side (WC buffer will be flushed at any kick) ++ */ ++ uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN); ++ ++ /* ++ Allocate device memory for the firmware memory context for the new ++ application. ++ */ ++ PDUMPCOMMENT(psDevInfo->psDeviceNode, "Allocate RGX firmware memory context"); ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(*psFWMemContext), ++ uiFWMemContextMemAllocFlags | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC, ++ "FwMemoryContext", ++ &psFWMemContextMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware memory context (%u)", ++ __func__, ++ eError)); ++ goto fail_alloc_fw_ctx; ++ } ++ ++ /* ++ Temporarily map the firmware memory context to the kernel. ++ */ ++ eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc, ++ (void **)&psFWMemContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware memory context (%u)", ++ __func__, ++ eError)); ++ goto fail_acquire_cpu_addr; ++ } ++ ++ /* ++ * Write the new memory context's page catalogue into the firmware memory ++ * context for the client. ++ */ ++ eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire Page Catalogue address (%u)", ++ __func__, ++ eError)); ++ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); ++ goto fail_acquire_base_addr; ++ } ++ ++ /* ++ * Set default values for the rest of the structure. ++ */ ++ psFWMemContext->uiPageCatBaseRegSet = RGXFW_BIF_INVALID_PCSET; ++ psFWMemContext->uiBreakpointAddr = 0; ++ psFWMemContext->uiBPHandlerAddr = 0; ++ psFWMemContext->uiBreakpointCtl = 0; ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++{ ++ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; ++ IMG_BOOL bOSidAxiProt; ++ ++ MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt); ++ ++ psFWMemContext->ui32OSid = ui32OSidReg; ++ psFWMemContext->bOSidAxiProt = bOSidAxiProt; ++} ++#endif ++ ++#if defined(PDUMP) ++ { ++ IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH]; ++ IMG_DEVMEM_OFFSET_T uiOffset = 0; ++ ++ /* ++ * Dump the Mem context allocation ++ */ ++ DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS); ++ ++ ++ /* ++ * Obtain a symbolic addr of the mem context structure ++ */ ++ eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc, ++ &uiOffset, ++ aszName, ++ PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to generate a Dump Page Catalogue address (%u)", ++ __func__, ++ eError)); ++ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); ++ goto fail_pdump_cat_base_addr; ++ } ++ ++ /* ++ * Dump the Page Cat tag in the mem context (symbolic address) ++ */ ++ eError = MMU_PDumpWritePageCatBase(psMMUContext, ++ aszName, ++ uiOffset, ++ 8, /* 64-bit register write */ ++ 0, ++ 0, ++ 0); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to acquire Page Catalogue address (%u)", ++ __func__, ++ eError)); ++ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); ++ goto fail_pdump_cat_base; ++ } ++ } ++#endif ++ ++ /* ++ * Release kernel address acquired above. ++ */ ++ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc); ++ ++ /* ++ * Store the process information for this device memory context ++ * for use with the host page-fault analysis. ++ */ ++ psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM(); ++ psServerMMUContext->psMMUContext = psMMUContext; ++ psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc; ++ OSStringLCopy(psServerMMUContext->szProcessName, ++ OSGetCurrentClientProcessNameKM(), ++ sizeof(psServerMMUContext->szProcessName)); ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "New memory context: Process Name: %s PID: %u (0x%08X)", ++ psServerMMUContext->szProcessName, ++ psServerMMUContext->uiPID, ++ psServerMMUContext->uiPID); ++ ++ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock); ++ dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode); ++ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock); ++ ++ *hPrivData = psServerMMUContext; ++ } ++ ++ return PVRSRV_OK; ++ ++#if defined(PDUMP) ++fail_pdump_cat_base: ++fail_pdump_cat_base_addr: ++ MMU_ReleaseBaseAddr(NULL); ++#endif ++fail_acquire_base_addr: ++ /* Done before jumping to the fail point as the release is done before exit */ ++fail_acquire_cpu_addr: ++ DevmemFwUnmapAndFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc); ++fail_alloc_fw_ctx: ++ OSFreeMem(psServerMMUContext); ++fail_alloc_server_ctx: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv) ++{ ++ SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv; ++ ++ return psMMUContext->psFWMemContextMemDesc; ++} ++ ++void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, ++ RGXFWIF_DEV_VIRTADDR sFWMemContextAddr) ++{ ++ psServerMMUContext->sFWMemContextDevVirtAddr.ui32Addr = sFWMemContextAddr.ui32Addr; ++} ++ ++void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_DEV_VIRTADDR *psDevVAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ MMU_FAULT_DATA *psOutFaultData) ++{ ++ IMG_DEV_PHYADDR sPCDevPAddr; ++ DLLIST_NODE *psNode, *psNext; ++ ++ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) ++ { ++ SERVER_MMU_CONTEXT *psServerMMUContext = ++ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); ++ ++ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get PC address for memory context")); ++ continue; ++ } ++ ++ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) ++ { ++ MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr, psOutFaultData); ++ goto out_unlock; ++ } ++ } ++ ++ /* Lastly check for fault in the kernel allocated memory */ ++ if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get PC address for kernel memory context")); ++ } ++ ++ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr) ++ { ++ MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr, psOutFaultData); ++ } ++ ++out_unlock: ++ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock); ++} ++ ++/* given the physical address of a page catalogue, searches for a corresponding ++ * MMU context and if found, provides the caller details of the process. ++ * Returns IMG_TRUE if a process is found. ++ */ ++IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, ++ RGXMEM_PROCESS_INFO *psInfo) ++{ ++ IMG_BOOL bRet = IMG_FALSE; ++ DLLIST_NODE *psNode, *psNext; ++ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; ++ ++ /* check if the input PC addr corresponds to an active memory context */ ++ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) ++ { ++ SERVER_MMU_CONTEXT *psThisMMUContext = ++ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); ++ IMG_DEV_PHYADDR sPCDevPAddr; ++ ++ if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get PC address for memory context")); ++ continue; ++ } ++ ++ if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr) ++ { ++ psServerMMUContext = psThisMMUContext; ++ break; ++ } ++ } ++ ++ if (psServerMMUContext != NULL) ++ { ++ psInfo->uiPID = psServerMMUContext->uiPID; ++ OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); ++ psInfo->bUnregistered = IMG_FALSE; ++ bRet = IMG_TRUE; ++ } ++ /* else check if the input PC addr corresponds to the firmware */ ++ else ++ { ++ IMG_DEV_PHYADDR sKernelPCDevPAddr; ++ PVRSRV_ERROR eError; ++ ++ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("Failed to get PC address for kernel memory context")); ++ } ++ else ++ { ++ if (sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr) ++ { ++ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; ++ OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); ++ psInfo->bUnregistered = IMG_FALSE; ++ bRet = IMG_TRUE; ++ } ++ } ++ } ++ ++ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && ++ (bRet == IMG_FALSE)) ++ { ++ /* no active memory context found with the given PC address. ++ * Check the list of most recently freed memory contexts. ++ */ ++ IMG_UINT32 i; ++ ++ OSLockAcquire(psDevInfo->hMMUCtxUnregLock); ++ ++ /* iterate through the list of unregistered memory contexts ++ * from newest (one before the head) to the oldest (the current head) ++ */ ++ i = gui32UnregisteredMemCtxsHead; ++ ++ do ++ { ++ UNREGISTERED_MEMORY_CONTEXT *psRecord; ++ ++ i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1)); ++ ++ psRecord = &gasUnregisteredMemCtxs[i]; ++ ++ if (psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr) ++ { ++ psInfo->uiPID = psRecord->uiPID; ++ OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); ++ psInfo->bUnregistered = IMG_TRUE; ++ bRet = IMG_TRUE; ++ break; ++ } ++ } while (i != gui32UnregisteredMemCtxsHead); ++ ++ OSLockRelease(psDevInfo->hMMUCtxUnregLock); ++ ++ } ++ ++ return bRet; ++} ++ ++IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, ++ RGXMEM_PROCESS_INFO *psInfo) ++{ ++ IMG_BOOL bRet = IMG_FALSE; ++ DLLIST_NODE *psNode, *psNext; ++ SERVER_MMU_CONTEXT *psServerMMUContext = NULL; ++ ++ /* check if the input PID corresponds to an active memory context */ ++ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext) ++ { ++ SERVER_MMU_CONTEXT *psThisMMUContext = ++ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode); ++ ++ if (psThisMMUContext->uiPID == uiPID) ++ { ++ psServerMMUContext = psThisMMUContext; ++ break; ++ } ++ } ++ ++ if (psServerMMUContext != NULL) ++ { ++ psInfo->uiPID = psServerMMUContext->uiPID; ++ OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName)); ++ psInfo->bUnregistered = IMG_FALSE; ++ bRet = IMG_TRUE; ++ } ++ /* else check if the input PID corresponds to the firmware */ ++ else if (uiPID == RGXMEM_SERVER_PID_FIRMWARE) ++ { ++ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE; ++ OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName)); ++ psInfo->bUnregistered = IMG_FALSE; ++ bRet = IMG_TRUE; ++ } ++ ++ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_PAGE_FAULT_DEBUG_ENABLED) && ++ (bRet == IMG_FALSE)) ++ { ++ /* if the PID didn't correspond to an active context or the ++ * FW address then see if it matches a recently unregistered context ++ */ ++ const IMG_UINT32 ui32Mask = UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1; ++ IMG_UINT32 i, j; ++ ++ OSLockAcquire(psDevInfo->hMMUCtxUnregLock); ++ ++ for (i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j = 0; ++ j < UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE; ++ i = (gui32UnregisteredMemCtxsHead - 1) & ui32Mask, j++) ++ { ++ UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i]; ++ ++ if (psRecord->uiPID == uiPID) ++ { ++ psInfo->uiPID = psRecord->uiPID; ++ OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName)); ++ psInfo->bUnregistered = IMG_TRUE; ++ bRet = IMG_TRUE; ++ break; ++ } ++ } ++ ++ OSLockRelease(psDevInfo->hMMUCtxUnregLock); ++ } ++ ++ return bRet; ++} ++ ++IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext) ++{ ++ if (psServerMMUContext) ++ { ++ return psServerMMUContext->uiPID; ++ } ++ return 0; ++} ++ ++/****************************************************************************** ++ End of file (rgxmem.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxmem.h b/drivers/gpu/drm/img-rogue/rgxmem.h +new file mode 100644 +index 000000000000..cbcbed77f921 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmem.h +@@ -0,0 +1,147 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX memory context management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for RGX memory context management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXMEM_H) ++#define RGXMEM_H ++ ++#include "pvrsrv_error.h" ++#include "device.h" ++#include "mmu_common.h" ++#include "rgxdevice.h" ++ ++#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16 ++ ++/* this PID denotes the firmware */ ++#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF ++ ++/* this PID denotes the PM */ ++#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF ++ ++typedef struct _RGXMEM_PROCESS_INFO_ ++{ ++ IMG_PID uiPID; ++ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME]; ++ IMG_BOOL bUnregistered; ++} RGXMEM_PROCESS_INFO; ++ ++typedef struct SERVER_MMU_CONTEXT_TAG SERVER_MMU_CONTEXT; ++ ++IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext); ++ ++void RGXSetFWMemContextDevVirtAddr(SERVER_MMU_CONTEXT *psServerMMUContext, ++ RGXFWIF_DEV_VIRTADDR sFWMemContextAddr); ++ ++void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDevNode); ++void RGXMMUSyncPrimFree(void); ++ ++PVRSRV_ERROR RGXSLCFlushRange(PVRSRV_DEVICE_NODE *psDevNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_DEV_VIRTADDR sDevVAddr, ++ IMG_DEVMEM_SIZE_T uiLength, ++ IMG_BOOL bInvalidate); ++ ++PVRSRV_ERROR RGXInvalidateFBSCTable(PVRSRV_DEVICE_NODE *psDeviceNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_UINT64 ui64FBSCEntryMask); ++ ++PVRSRV_ERROR RGXExtractFBSCEntryMaskFromMMUContext(PVRSRV_DEVICE_NODE *psDeviceNode, ++ SERVER_MMU_CONTEXT *psServerMMUContext, ++ IMG_UINT64 *pui64FBSCEntryMask); ++ ++void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDevNode, ++ MMU_CONTEXT *psMMUContext, ++ MMU_LEVEL eMMULevel, ++ IMG_BOOL bUnmap); ++ ++/*************************************************************************/ /*! ++@Function RGXMMUCacheInvalidateKick ++ ++@Description Sends a flush command to a particular DM but first takes ++ the power lock. ++ ++@Input psDevNode Device Node pointer ++@Input pui32NextMMUInvalidateUpdate ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 *pui32NextMMUInvalidateUpdate); ++ ++/*************************************************************************/ /*! ++@Function RGXPreKickCacheCommand ++ ++@Description Sends a cache flush command to a particular DM without ++ honouring the power lock. It's the caller's responsibility ++ to ensure power lock is held before calling this function. ++ ++@Input psDevInfo Device Info ++@Input eDM To which DM the cmd is sent. ++@Input pui32MMUInvalidateUpdate ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo, ++ RGXFWIF_DM eDM, ++ IMG_UINT32 *pui32MMUInvalidateUpdate); ++ ++void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData); ++PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDevNode, ++ MMU_CONTEXT *psMMUContext, ++ IMG_HANDLE *hPrivData); ++ ++DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv); ++ ++void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_DEV_VIRTADDR *psDevVAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ MMU_FAULT_DATA *psOutFaultData); ++ ++IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress, ++ RGXMEM_PROCESS_INFO *psInfo); ++ ++IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID, ++ RGXMEM_PROCESS_INFO *psInfo); ++ ++IMG_PID RGXGetPIDFromServerMMUContext(SERVER_MMU_CONTEXT *psServerMMUContext); ++ ++#endif /* RGXMEM_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c +new file mode 100644 +index 000000000000..0e6c0ab05a46 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.c +@@ -0,0 +1,1045 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific initialisation routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific MMU initialisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "rgxmipsmmuinit.h" ++ ++#include "device.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "mmu_common.h" ++#include "pdump_mmu.h" ++#include "rgxheapconfig.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "rgx_memallocflags.h" ++#include "pdump_km.h" ++#include "rgxdevice.h" ++#include "log2.h" ++ ++/* ++ * Bits of PT, PD and PC not involving addresses ++ */ ++ ++/* Currently there is no page directory for MIPS MMU */ ++#define RGX_MIPS_MMUCTRL_PDE_PROTMASK 0 ++/* Currently there is no page catalog for MIPS MMU */ ++#define RGX_MIPS_MMUCTRL_PCE_PROTMASK 0 ++ ++ ++static MMU_PxE_CONFIG sRGXMMUPCEConfig; ++static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; ++ ++ ++/* ++ * ++ * Configuration for heaps with 4kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 16kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 64kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 256kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 1MB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 2MB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; ++ ++ ++/* Forward declaration of protection bits derivation functions, for ++ the following structure */ ++static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); ++static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); ++static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); ++ ++static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, ++ const MMU_PxE_CONFIG **ppsMMUPDEConfig, ++ const MMU_PxE_CONFIG **ppsMMUPTEConfig, ++ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, ++ IMG_HANDLE *phPriv); ++ ++static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); ++ ++static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); ++static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); ++ ++static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; ++ ++/* Cached policy */ ++static IMG_UINT32 gui32CachedPolicy; ++ ++static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_DEVICEATTRIBS *psDevAttrs, ++ IMG_UINT64 *pui64Addr); ++ ++PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ IMG_BOOL bPhysBusAbove32Bit = 0; ++ ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH)) ++ { ++ bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32; ++ } ++ ++ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = ++ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]); ++ ++ /* ++ * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently ++ */ ++ sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */ ++ sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */ ++ ++ sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */ ++ sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; /* Alignment of PD AND PC */ ++ ++ sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits of the PC */ ++ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */ ++ ++ sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ ++ sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */ ++ ++ /* ++ * Setup sRGXMMUTopLevelDevVAddrConfig ++ */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0; ++ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0; ++ ++ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0; ++ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0; ++ ++ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; ++ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ ++/* ++ * ++ * Configuration for heaps with 4kB Data-Page size ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently ++ */ ++ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0; ++ ++ /* No PD used for MIPS */ ++ sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0; ++ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0; ++ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; ++ ++ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0); ++ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0; ++ ++ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_4KBDP. ++ */ ++ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ ++ ++ if (bPhysBusAbove32Bit) ++ { ++ sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; ++ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; ++ } ++ else ++ { ++ sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; ++ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; ++ } ++ ++ sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; ++ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; ++ ++ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | ++ RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; ++ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; ++ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_4KBDP ++ */ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0; ++ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0; ++ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0; ++ ++ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0; ++ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0; ++ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0; ++ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ ++ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); ++ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); ++ ++ /* ++ * Setup gsPageSizeConfig4KB ++ */ ++ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; ++ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; ++ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; ++ gsPageSizeConfig4KB.uiRefCount = 0; ++ gsPageSizeConfig4KB.uiMaxRefCount = 0; ++ ++ ++/* ++ * ++ * Configuration for heaps with 16kB Data-Page size ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_16KBDP ++ */ ++ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0; ++ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */ ++ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */ ++ ++ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0; ++ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0; ++ ++ sRGXMMUPDEConfig_16KBDP.uiProtMask = 0; ++ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0; ++ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet ++ */ ++ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0; ++ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */ ++ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */ ++ ++ sRGXMMUPTEConfig_16KBDP.uiProtMask = 0; ++ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0; ++ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_16KBDP ++ */ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0; ++ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = 0; ++ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0; ++ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig16KB ++ */ ++ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; ++ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; ++ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; ++ gsPageSizeConfig16KB.uiRefCount = 0; ++ gsPageSizeConfig16KB.uiMaxRefCount = 0; ++ ++ ++/* ++ * ++ * Configuration for heaps with 64kB Data-Page size. Not supported yet ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_64KBDP ++ */ ++ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0; ++ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0; ++ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0; ++ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0; ++ ++ sRGXMMUPDEConfig_64KBDP.uiProtMask = 0; ++ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0; ++ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_64KBDP. ++ * ++ */ ++ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ ++ if (bPhysBusAbove32Bit) ++ { ++ sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT; ++ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT; ++ } ++ else ++ { ++ sRGXMMUPTEConfig_64KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK; ++ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY; ++ } ++ ++ /* Even while using 64K pages, MIPS still aligns addresses to 4K */ ++ sRGXMMUPTEConfig_64KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT; ++ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_4K; ++ ++ sRGXMMUPTEConfig_64KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK | ++ RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN | RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN; ++ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN; ++ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_64KBDP. ++ */ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0; ++ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0; ++ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0; ++ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0; ++ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0; ++ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0; ++ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00ffff0000); ++ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE_64K; ++ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = (RGX_NUM_OS_SUPPORTED << RGXMIPSFW_LOG2_PAGETABLE_SIZE_64K) >> RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); ++ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff); ++ ++ /* ++ * Setup gsPageSizeConfig64KB. ++ */ ++ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; ++ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; ++ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; ++ gsPageSizeConfig64KB.uiRefCount = 0; ++ gsPageSizeConfig64KB.uiMaxRefCount = 0; ++ ++ ++/* ++ * ++ * Configuration for heaps with 256kB Data-Page size. Not supported yet ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_256KBDP ++ */ ++ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0; ++ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0; ++ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0; ++ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0; ++ ++ sRGXMMUPDEConfig_256KBDP.uiProtMask = 0; ++ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0; ++ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP ++ */ ++ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0; ++ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0; ++ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPTEConfig_256KBDP.uiProtMask = 0; ++ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0; ++ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_256KBDP ++ */ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0; ++ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0; ++ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0; ++ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig256KB ++ */ ++ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; ++ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; ++ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; ++ gsPageSizeConfig256KB.uiRefCount = 0; ++ gsPageSizeConfig256KB.uiMaxRefCount = 0; ++ ++ /* ++ * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet ++ */ ++ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0; ++ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0; ++ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0; ++ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0; ++ ++ sRGXMMUPDEConfig_1MBDP.uiProtMask = 0; ++ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0; ++ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_1MBDP ++ */ ++ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0; ++ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0; ++ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPTEConfig_1MBDP.uiProtMask = 0; ++ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0; ++ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_1MBDP ++ */ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0; ++ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0; ++ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0; ++ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig1MB ++ */ ++ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; ++ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; ++ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; ++ gsPageSizeConfig1MB.uiRefCount = 0; ++ gsPageSizeConfig1MB.uiMaxRefCount = 0; ++ ++ /* ++ * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet ++ */ ++ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0; ++ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0; ++ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0; ++ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0; ++ ++ sRGXMMUPDEConfig_2MBDP.uiProtMask = 0; ++ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0; ++ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_2MBDP ++ */ ++ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0; ++ ++ sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0; ++ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0; ++ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0; ++ ++ sRGXMMUPTEConfig_2MBDP.uiProtMask = 0; ++ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0; ++ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_2MBDP ++ */ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0; ++ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0; ++ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0; ++ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig2MB ++ */ ++ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; ++ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; ++ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; ++ gsPageSizeConfig2MB.uiRefCount = 0; ++ gsPageSizeConfig2MB.uiMaxRefCount = 0; ++ ++ /* ++ * Setup sRGXMMUDeviceAttributes ++ */ ++ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV; ++ sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1; ++ ++ /* ++ * The page table fits in one or more big physically adjacent pages, ++ * at most as big as the page table itself. ++ * To calculate its alignment/page size, calculate the log2 size of the page ++ * table taking into account all OSes, then round that down to a valid MIPS ++ * log2 page size (12, 14, 16 for a 4K, 16K, 64K page size). ++ */ ++ sRGXMMUDeviceAttributes.ui32BaseAlign = ++ (CeilLog2(RGX_NUM_OS_SUPPORTED) + RGXMIPSFW_LOG2_PAGETABLE_SIZE_4K) & ~1U; ++ ++ /* 256K alignment might be too hard to achieve, fall back to 64K */ ++ sRGXMMUDeviceAttributes.ui32BaseAlign = ++ MIN(sRGXMMUDeviceAttributes.ui32BaseAlign, RGXMIPSFW_LOG2_PAGE_SIZE_64K); ++ ++ ++ ++ /* The base configuration is set to 4kB pages*/ ++ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP; ++ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; ++ ++ /* Functions for deriving page table/dir/cat protection bits */ ++ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; ++ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; ++ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; ++ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; ++ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; ++ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; ++ ++ /* Functions for establishing configurations for PDE/PTE/DEVVADDR ++ on per-heap basis */ ++ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; ++ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; ++ ++ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; ++ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; ++ ++ psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes; ++ ++ psDeviceNode->pfnValidateOrTweakPhysAddrs = RGXCheckTrampolineAddrs; ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR RGXCheckTrampolineAddrs(struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ MMU_DEVICEATTRIBS *psDevAttrs, ++ IMG_UINT64 *pui64Addr) ++{ ++ if (PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, MIPS)) ++ { ++ /* ++ * If mapping for the MIPS FW context, check for sensitive PAs ++ */ ++ if (psDevAttrs == psDevNode->psFirmwareMMUDevAttrs) ++ { ++ PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psDevNode->pvDevice; ++ ++ if ((RGX_GET_FEATURE_VALUE(psDevice, PHYS_BUS_WIDTH) == 32) && ++ RGXMIPSFW_SENSITIVE_ADDR(*pui64Addr)) ++ { ++ *pui64Addr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(*pui64Addr); ++ } ++ /* FIX_HW_BRN_63553 is mainlined for all MIPS cores */ ++ else if (*pui64Addr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__)); ++ return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE; ++ } ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PVRSRV_OK; ++ ++#if defined(PDUMP) ++ psDeviceNode->pfnMMUGetContextID = NULL; ++#endif ++ ++ psDeviceNode->psFirmwareMMUDevAttrs = NULL; ++ ++#if defined(DEBUG) ++ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", ++ gsPageSizeConfig4KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", ++ gsPageSizeConfig4KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", ++ gsPageSizeConfig16KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", ++ gsPageSizeConfig16KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", ++ gsPageSizeConfig64KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", ++ gsPageSizeConfig64KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", ++ gsPageSizeConfig256KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", ++ gsPageSizeConfig256KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", ++ gsPageSizeConfig1MB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", ++ gsPageSizeConfig1MB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", ++ gsPageSizeConfig2MB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", ++ gsPageSizeConfig2MB.uiRefCount)); ++#endif ++ if (gsPageSizeConfig4KB.uiRefCount > 0 || ++ gsPageSizeConfig16KB.uiRefCount > 0 || ++ gsPageSizeConfig64KB.uiRefCount > 0 || ++ gsPageSizeConfig256KB.uiRefCount > 0 || ++ gsPageSizeConfig1MB.uiRefCount > 0 || ++ gsPageSizeConfig2MB.uiRefCount > 0 ++ ) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); ++ } ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePCEProt4 ++@Description calculate the PCE protection flags based on a 4 byte entry ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) ++{ ++ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePCEProt8 ++@Description calculate the PCE protection flags based on an 8 byte entry ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); ++ ++ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU")); ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePDEProt4 ++@Description derive the PDE protection flags based on a 4 byte entry ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePDEProt8 ++@Description derive the PDE protection flags based on an 8 byte entry ++ ++@Input uiLog2DataPageSize The log2 of the required page size. ++ E.g, for 4KiB pages, this parameter must be 12. ++ For 2MiB pages, it must be set to 21. ++ ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU")); ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePTEProt4 ++@Description calculate the PTE protection flags based on a 4 byte entry ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) ++{ ++ IMG_UINT32 ui32MMUFlags = 0; ++ ++ if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) ++ { ++ /* read/write */ ++ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN; ++ } ++ else if (MMU_PROTFLAGS_READABLE & uiProtFlags) ++ { ++ /* read only */ ++ } ++ else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) ++ { ++ /* write only */ ++ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN; ++ } ++ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified...")); ++ } ++ ++ /* cache coherency */ ++ if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches")); ++ } ++ ++ /* cache setup */ ++ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) ++ { ++ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED; ++ } ++ else ++ { ++ ui32MMUFlags |= gui32CachedPolicy << ++ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT; ++ } ++ ++ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) ++ { ++ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN; ++ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN; ++ } ++ ++ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) ++ { ++ /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */ ++ } ++ ++ return ui32MMUFlags; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePTEProt8 ++@Description calculate the PTE protection flags based on an 8 byte entry ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); ++ ++ PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device")); ++ ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXGetPageSizeConfig ++@Description Set up configuration for variable sized data pages. ++ RGXPutPageSizeConfigCB has to be called to ensure correct ++ refcounting. ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, ++ const MMU_PxE_CONFIG **ppsMMUPDEConfig, ++ const MMU_PxE_CONFIG **ppsMMUPTEConfig, ++ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, ++ IMG_HANDLE *phPriv) ++{ ++ MMU_PAGESIZECONFIG *psPageSizeConfig; ++ ++ switch (uiLog2DataPageSize) ++ { ++ case RGXMIPSFW_LOG2_PAGE_SIZE_64K: ++ psPageSizeConfig = &gsPageSizeConfig64KB; ++ break; ++ case RGXMIPSFW_LOG2_PAGE_SIZE_4K: ++ psPageSizeConfig = &gsPageSizeConfig4KB; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", ++ uiLog2DataPageSize)); ++ *phPriv = NULL; ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++ } ++ ++ /* Refer caller's pointers to the data */ ++ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; ++ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; ++ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; ++ ++#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) ++ /* Increment ref-count - not that we're allocating anything here ++ (I'm using static structs), but one day we might, so we want ++ the Get/Put code to be balanced properly */ ++ psPageSizeConfig->uiRefCount++; ++ ++ /* This is purely for debug statistics */ ++ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, ++ psPageSizeConfig->uiRefCount); ++#endif ++ ++ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; ++ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXPutPageSizeConfig ++@Description Tells this code that the mmu module is done with the ++ configurations set in RGXGetPageSizeConfig. This can ++ be a no-op. ++ Called after RGXGetPageSizeConfigCB. ++@Return PVRSRV_ERROR ++*/ /**************************************************************************/ ++static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) ++{ ++#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) ++ MMU_PAGESIZECONFIG *psPageSizeConfig; ++ IMG_UINT32 uiLog2DataPageSize; ++ ++ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; ++ ++ switch (uiLog2DataPageSize) ++ { ++ case RGXMIPSFW_LOG2_PAGE_SIZE_64K: ++ psPageSizeConfig = &gsPageSizeConfig64KB; ++ break; ++ case RGXMIPSFW_LOG2_PAGE_SIZE_4K: ++ psPageSizeConfig = &gsPageSizeConfig4KB; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", ++ uiLog2DataPageSize)); ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++ } ++ ++ /* Ref-count here is not especially useful, but it's an extra ++ check that the API is being used correctly */ ++ psPageSizeConfig->uiRefCount--; ++#else ++ PVR_UNREFERENCED_PARAMETER(hPriv); ++#endif ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32PDE); ++ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); ++ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++} ++ ++static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui64PDE); ++ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); ++ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS")); ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++} ++ ++void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx, ++ IMG_UINT32 ui32FwVA, ++ MMU_FAULT_DATA *psOutFaultData) ++{ ++ IMG_UINT32 *pui32PageTable = NULL; ++ PVRSRV_ERROR eError = MMU_AcquireCPUBaseAddr(psFwMMUCtx, (void**) &pui32PageTable); ++ MMU_LEVEL_DATA *psMMULevelData; ++ IMG_UINT32 ui32FwHeapBase = (IMG_UINT32) (RGX_FIRMWARE_RAW_HEAP_BASE & UINT_MAX); ++ IMG_UINT32 ui32PageSize = OSGetPageSize(); ++ ++ /* MIPS Firmware CPU must use the same page size as the Host */ ++ IMG_UINT32 ui32PTEIndex = ((ui32FwVA & ~(ui32PageSize - 1)) - ui32FwHeapBase) / ui32PageSize; ++ ++ psOutFaultData->eTopLevel = MMU_LEVEL_1; ++ psOutFaultData->eType = MMU_FAULT_TYPE_NON_PM; ++ ++ psMMULevelData = &psOutFaultData->sLevelData[MMU_LEVEL_1]; ++ psMMULevelData->uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ psMMULevelData->ui32Index = ui32PTEIndex; ++ psMMULevelData->ui32NumOfEntries = RGX_FIRMWARE_RAW_HEAP_SIZE / ui32PageSize; ++ ++ if ((eError == PVRSRV_OK) && (pui32PageTable != NULL)) ++ { ++ psMMULevelData->ui64Address = pui32PageTable[ui32PTEIndex]; ++ } ++ else ++ { ++ psMMULevelData->ui64Address = 0U; ++ } ++ ++ psMMULevelData->psDebugStr = BITMASK_HAS(psMMULevelData->ui64Address, ++ RGXMIPSFW_TLB_VALID) ? ++ ("valid") : ("not valid"); ++} ++ ++ +diff --git a/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h +new file mode 100644 +index 000000000000..b2b39402cafe +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmipsmmuinit.h +@@ -0,0 +1,97 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific initialisation routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific MMU initialisation for the MIPS firmware ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* NB: this file is not to be included arbitrarily. It exists solely ++ for the linkage between rgxinit.c and rgxmmuinit.c, the former ++ being otherwise cluttered by the contents of the latter */ ++ ++#ifndef SRVKM_RGXMIPSMMUINIT_H ++#define SRVKM_RGXMIPSMMUINIT_H ++ ++#include "device.h" ++#include "img_types.h" ++#include "mmu_common.h" ++#include "img_defs.h" ++#include "rgx_mips.h" ++ ++/* ++ ++ Labelling of fields within virtual address. No PD and PC are used currently for ++ the MIPS MMU ++*/ ++/* ++Page Table entry # ++*/ ++#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U) ++#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF)) ++ ++ ++/* PC entries related definitions */ ++/* No PC is currently used for MIPS MMU */ ++#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN (0U) ++#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT (0U) ++#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK (0U) ++ ++#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT (0U) ++#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK (0U) ++#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN (0U) ++ ++/* PD entries related definitions */ ++/* No PD is currently used for MIPS MMU */ ++#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN (0U) ++#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT (0U) ++#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK (0U) ++ ++#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT (0U) ++#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK (0U) ++#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN (0U) ++ ++ ++PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); ++PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++void RGXMipsCheckFaultAddress(MMU_CONTEXT *psFwMMUCtx, ++ IMG_UINT32 ui32FwVA, ++ MMU_FAULT_DATA *psOutFaultData); ++ ++#endif /* #ifndef SRVKM_RGXMIPSMMUINIT_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxmmuinit.c b/drivers/gpu/drm/img-rogue/rgxmmuinit.c +new file mode 100644 +index 000000000000..629e7abdc370 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmmuinit.c +@@ -0,0 +1,1079 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific initialisation routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific MMU initialisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++#include "rgxmmuinit.h" ++#include "rgxmmudefs_km.h" ++ ++#include "device.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "mmu_common.h" ++#include "pdump_mmu.h" ++ ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "rgx_memallocflags.h" ++#include "rgx_heaps.h" ++#include "pdump_km.h" ++ ++ ++/* useful macros */ ++/* units represented in a bitfield */ ++#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1) ++ ++ ++/* ++ * Bits of PT, PD and PC not involving addresses ++ */ ++ ++#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \ ++ RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \ ++ RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \ ++ RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \ ++ RGX_MMUCTRL_PT_DATA_CC_EN | \ ++ RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \ ++ RGX_MMUCTRL_PT_DATA_VALID_EN) ++ ++#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \ ++ ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \ ++ RGX_MMUCTRL_PD_DATA_VALID_EN) ++ ++#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \ ++ RGX_MMUCTRL_PC_DATA_VALID_EN) ++ ++ ++ ++static MMU_PxE_CONFIG sRGXMMUPCEConfig; ++static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig; ++ ++ ++/* ++ * ++ * Configuration for heaps with 4kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig4KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 16kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig16KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 64kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig64KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 256kB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig256KB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 1MB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig1MB; ++ ++ ++/* ++ * ++ * Configuration for heaps with 2MB Data-Page size ++ * ++ */ ++ ++static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP; ++static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP; ++static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP; ++static MMU_PAGESIZECONFIG gsPageSizeConfig2MB; ++ ++ ++/* Forward declaration of protection bits derivation functions, for ++ the following structure */ ++static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags); ++static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags); ++static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize); ++static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags); ++ ++static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, ++ const MMU_PxE_CONFIG **ppsMMUPDEConfig, ++ const MMU_PxE_CONFIG **ppsMMUPTEConfig, ++ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, ++ IMG_HANDLE *phPriv); ++ ++static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv); ++ ++static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize); ++static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize); ++ ++static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes; ++ ++PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ /* Setup of Px Entries: ++ * ++ * ++ * PAGE TABLE (8 Byte): ++ * ++ * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 | ++ * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid | ++ * ++ * ++ * PAGE DIRECTORY (8 Byte): ++ * ++ * | 40 | 39...5 (varies) | 4 | 3...1 | 0 | ++ * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid | ++ * ++ * ++ * PAGE CATALOGUE (4 Byte): ++ * ++ * | 31...4 | 3...2 | 1 | 0 | ++ * | Page Directory base address | (reserved) | Entry Pending | Valid | ++ * ++ */ ++ ++ ++ /* Example how to get the PD address from a PC entry. ++ * The procedure is the same for PD and PT entries to retrieve PT and Page addresses: ++ * ++ * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&': ++ * | 31...4 | 3...2 | 1 | 0 | ++ * | PD Addr | 0 | 0 | 0 | ++ * ++ * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>': ++ * | 27...0 | ++ * | PD Addr | ++ * ++ * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<': ++ * | 39...0 | ++ * | PD Addr | ++ * ++ */ ++ ++ ++ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName = ++ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL]); ++ ++ /* ++ * Setup sRGXMMUPCEConfig ++ */ ++ sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */ ++ sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */ ++ ++ sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */ ++ sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */ ++ ++ sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/ ++ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the status bits */ ++ ++ sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */ ++ sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */ ++ ++ /* ++ * Setup sRGXMMUTopLevelDevVAddrConfig ++ */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */ ++ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask, ++ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift)); ++ ++ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */ ++ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */ ++ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask, ++ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift)); ++ ++ /* ++ * ++ * Configuration for heaps with 4kB Data-Page size ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_4KBDP ++ */ ++ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); ++ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12; ++ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12; ++ ++ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); ++ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1; ++ ++ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_4KBDP ++ */ ++ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000); ++ sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12; ++ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */ ++ ++ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; ++ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; ++ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_4KBDP ++ */ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask, ++ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift)); ++ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask, ++ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift)); ++ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask, ++ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift)); ++ ++ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff); ++ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig4KB ++ */ ++ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP; ++ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP; ++ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP; ++ gsPageSizeConfig4KB.uiRefCount = 0; ++ gsPageSizeConfig4KB.uiMaxRefCount = 0; ++ ++ ++ /* ++ * ++ * Configuration for heaps with 16kB Data-Page size ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_16KBDP ++ */ ++ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); ++ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10; ++ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10; ++ ++ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); ++ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1; ++ ++ sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_16KBDP ++ */ ++ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000); ++ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14; ++ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14; ++ ++ sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; ++ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; ++ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_16KBDP ++ */ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask, ++ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask, ++ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000); ++ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14; ++ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask, ++ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift)); ++ ++ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff); ++ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig16KB ++ */ ++ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP; ++ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP; ++ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP; ++ gsPageSizeConfig16KB.uiRefCount = 0; ++ gsPageSizeConfig16KB.uiMaxRefCount = 0; ++ ++ ++ /* ++ * ++ * Configuration for heaps with 64kB Data-Page size ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_64KBDP ++ */ ++ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); ++ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8; ++ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8; ++ ++ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); ++ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1; ++ ++ sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_64KBDP ++ */ ++ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000); ++ sRGXMMUPTEConfig_64KBDP.uiAddrShift = 16; ++ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16; ++ ++ sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; ++ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; ++ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_64KBDP ++ */ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask, ++ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask, ++ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000); ++ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16; ++ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask, ++ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff); ++ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig64KB ++ */ ++ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP; ++ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP; ++ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP; ++ gsPageSizeConfig64KB.uiRefCount = 0; ++ gsPageSizeConfig64KB.uiMaxRefCount = 0; ++ ++ ++ /* ++ * ++ * Configuration for heaps with 256kB Data-Page size ++ * ++ */ ++ ++ /* ++ * Setup sRGXMMUPDEConfig_256KBDP ++ */ ++ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); ++ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6; ++ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6; ++ ++ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); ++ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1; ++ ++ sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP ++ */ ++ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000); ++ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18; ++ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18; ++ ++ sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; ++ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; ++ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_256KBDP ++ */ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask, ++ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask, ++ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000); ++ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18; ++ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask, ++ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff); ++ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig256KB ++ */ ++ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP; ++ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP; ++ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP; ++ gsPageSizeConfig256KB.uiRefCount = 0; ++ gsPageSizeConfig256KB.uiMaxRefCount = 0; ++ ++ /* ++ * Setup sRGXMMUPDEConfig_1MBDP ++ */ ++ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); ++ /* ++ * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even ++ * if they contain fewer entries. ++ */ ++ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6; ++ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6; ++ ++ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); ++ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1; ++ ++ sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_1MBDP ++ */ ++ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000); ++ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20; ++ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20; ++ ++ sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; ++ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; ++ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_1MBDP ++ */ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask, ++ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask, ++ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000); ++ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20; ++ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask, ++ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff); ++ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig1MB ++ */ ++ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP; ++ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP; ++ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP; ++ gsPageSizeConfig1MB.uiRefCount = 0; ++ gsPageSizeConfig1MB.uiMaxRefCount = 0; ++ ++ /* ++ * Setup sRGXMMUPDEConfig_2MBDP ++ */ ++ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0); ++ /* ++ * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even ++ * if they contain fewer entries. ++ */ ++ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6; ++ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6; ++ ++ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e); ++ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1; ++ ++ sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK; ++ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0; ++ ++ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN; ++ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUPTEConfig_2MBDP ++ */ ++ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8; ++ ++ sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000); ++ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21; ++ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21; ++ ++ sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK; ++ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0; ++ ++ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN; ++ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT; ++ ++ /* ++ * Setup sRGXMMUDevVAddrConfig_2MBDP ++ */ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask, ++ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; ++ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; ++ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask, ++ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000); ++ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21; ++ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask, ++ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift)); ++ ++ ++ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff); ++ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0; ++ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0; ++ ++ /* ++ * Setup gsPageSizeConfig2MB ++ */ ++ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP; ++ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP; ++ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP; ++ gsPageSizeConfig2MB.uiRefCount = 0; ++ gsPageSizeConfig2MB.uiMaxRefCount = 0; ++ ++ /* ++ * Setup sRGXMMUDeviceAttributes ++ */ ++ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT; ++ sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3; ++ sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT; ++ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig; ++ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig; ++ ++ /* Functions for deriving page table/dir/cat protection bits */ ++ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8; ++ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4; ++ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8; ++ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4; ++ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8; ++ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4; ++ ++ /* Functions for establishing configurations for PDE/PTE/DEVVADDR ++ on per-heap basis */ ++ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB; ++ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB; ++ ++ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4; ++ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8; ++ sRGXMMUDeviceAttributes.pfnGetPageSizeFromVirtAddr = NULL; ++ ++ psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = PVRSRV_OK; ++ ++#if defined(PDUMP) ++ psDeviceNode->pfnMMUGetContextID = NULL; ++#endif ++ ++ psDeviceNode->psMMUDevAttrs = NULL; ++ ++#if defined(DEBUG) ++ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:")); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d", ++ gsPageSizeConfig4KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d", ++ gsPageSizeConfig4KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d", ++ gsPageSizeConfig16KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d", ++ gsPageSizeConfig16KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d", ++ gsPageSizeConfig64KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d", ++ gsPageSizeConfig64KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d", ++ gsPageSizeConfig256KB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d", ++ gsPageSizeConfig256KB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d", ++ gsPageSizeConfig1MB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d", ++ gsPageSizeConfig1MB.uiRefCount)); ++ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d", ++ gsPageSizeConfig2MB.uiMaxRefCount)); ++ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d", ++ gsPageSizeConfig2MB.uiRefCount)); ++#endif ++ if (gsPageSizeConfig4KB.uiRefCount > 0 || ++ gsPageSizeConfig16KB.uiRefCount > 0 || ++ gsPageSizeConfig64KB.uiRefCount > 0 || ++ gsPageSizeConfig256KB.uiRefCount > 0 || ++ gsPageSizeConfig1MB.uiRefCount > 0 || ++ gsPageSizeConfig2MB.uiRefCount > 0 ++ ) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)")); ++ } ++ ++ return eError; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePCEProt4 ++@Description calculate the PCE protection flags based on a 4 byte entry ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags) ++{ ++ return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePCEProt8 ++@Description calculate the PCE protection flags based on an 8 byte entry ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); ++ ++ PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device")); ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePDEProt4 ++@Description derive the PDE protection flags based on a 4 byte entry ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); ++ return 0; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePDEProt8 ++@Description derive the PDE protection flags based on an 8 byte entry ++ ++@Input uiLog2DataPageSize The log2 of the required page size. ++ E.g, for 4KiB pages, this parameter must be 12. ++ For 2MiB pages, it must be set to 21. ++ ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) ++{ ++ IMG_UINT64 ret_value = 0; /* 0 means invalid */ ++ ++ if (!(uiProtFlags & MMU_PROTFLAGS_INVALID)) /* if not invalid */ ++ { ++ switch (uiLog2DataPageSize) ++ { ++ case RGX_HEAP_4KB_PAGE_SHIFT: ++ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB; ++ break; ++ case RGX_HEAP_16KB_PAGE_SHIFT: ++ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB; ++ break; ++ case RGX_HEAP_64KB_PAGE_SHIFT: ++ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB; ++ break; ++ case RGX_HEAP_256KB_PAGE_SHIFT: ++ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB; ++ break; ++ case RGX_HEAP_1MB_PAGE_SHIFT: ++ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB; ++ break; ++ case RGX_HEAP_2MB_PAGE_SHIFT: ++ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]", ++ __FILE__, __LINE__, __func__, uiLog2DataPageSize)); ++ } ++ } ++ return ret_value; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePTEProt4 ++@Description calculate the PTE protection flags based on a 4 byte entry ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(uiProtFlags); ++ PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device")); ++ ++ return 0; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXDerivePTEProt8 ++@Description calculate the PTE protection flags based on an 8 byte entry ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize) ++{ ++ IMG_UINT64 ui64MMUFlags=0; ++ ++ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize); ++ ++ if (((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE)) ++ { ++ /* read/write */ ++ } ++ else if (MMU_PROTFLAGS_READABLE & uiProtFlags) ++ { ++ /* read only */ ++ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN; ++ } ++ else if (MMU_PROTFLAGS_WRITEABLE & uiProtFlags) ++ { ++ /* write only */ ++ PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt8: write-only is not possible on this device")); ++ } ++ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified...")); ++ } ++ ++ /* cache coherency */ ++ if (MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags) ++ { ++ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN; ++ } ++ ++ /* cache setup */ ++ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0) ++ { ++ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN; ++ } ++ ++ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0) ++ { ++ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN; ++ } ++ ++ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags) ++ { ++ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN; ++ } ++ ++ return ui64MMUFlags; ++} ++ ++ ++/*************************************************************************/ /*! ++@Function RGXGetPageSizeConfig ++@Description Set up configuration for variable sized data pages. ++ RGXPutPageSizeConfigCB has to be called to ensure correct ++ refcounting. ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize, ++ const MMU_PxE_CONFIG **ppsMMUPDEConfig, ++ const MMU_PxE_CONFIG **ppsMMUPTEConfig, ++ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig, ++ IMG_HANDLE *phPriv) ++{ ++ MMU_PAGESIZECONFIG *psPageSizeConfig; ++ ++ switch (uiLog2DataPageSize) ++ { ++ case RGX_HEAP_4KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig4KB; ++ break; ++ case RGX_HEAP_16KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig16KB; ++ break; ++ case RGX_HEAP_64KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig64KB; ++ break; ++ case RGX_HEAP_256KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig256KB; ++ break; ++ case RGX_HEAP_1MB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig1MB; ++ break; ++ case RGX_HEAP_2MB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig2MB; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", ++ uiLog2DataPageSize)); ++ *phPriv = NULL; ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++ } ++ ++ /* Refer caller's pointers to the data */ ++ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig; ++ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig; ++ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig; ++ ++#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) ++ /* Increment ref-count - not that we're allocating anything here ++ (I'm using static structs), but one day we might, so we want ++ the Get/Put code to be balanced properly */ ++ psPageSizeConfig->uiRefCount++; ++ ++ /* This is purely for debug statistics */ ++ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount, ++ psPageSizeConfig->uiRefCount); ++#endif ++ ++ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize; ++ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv); ++ ++ return PVRSRV_OK; ++} ++ ++/*************************************************************************/ /*! ++@Function RGXPutPageSizeConfig ++@Description Tells this code that the mmu module is done with the ++ configurations set in RGXGetPageSizeConfig. This can ++ be a no-op. ++ Called after RGXGetPageSizeConfigCB. ++@Return PVRSRV_ERROR ++ */ /**************************************************************************/ ++static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv) ++{ ++#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT) ++ MMU_PAGESIZECONFIG *psPageSizeConfig; ++ IMG_UINT32 uiLog2DataPageSize; ++ ++ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv; ++ ++ switch (uiLog2DataPageSize) ++ { ++ case RGX_HEAP_4KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig4KB; ++ break; ++ case RGX_HEAP_16KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig16KB; ++ break; ++ case RGX_HEAP_64KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig64KB; ++ break; ++ case RGX_HEAP_256KB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig256KB; ++ break; ++ case RGX_HEAP_1MB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig1MB; ++ break; ++ case RGX_HEAP_2MB_PAGE_SHIFT: ++ psPageSizeConfig = &gsPageSizeConfig2MB; ++ break; ++ default: ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x", ++ uiLog2DataPageSize)); ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++ } ++ ++ /* Ref-count here is not especially useful, but it's an extra ++ check that the API is being used correctly */ ++ psPageSizeConfig->uiRefCount--; ++#else ++ PVR_UNREFERENCED_PARAMETER(hPriv); ++#endif ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32PDE); ++ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize); ++ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device")); ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++} ++ ++static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize) ++{ ++ switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK)) ++ { ++ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB: ++ *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT; ++ break; ++ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB: ++ *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT; ++ break; ++ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB: ++ *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT; ++ break; ++ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB: ++ *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT; ++ break; ++ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB: ++ *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT; ++ break; ++ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB: ++ *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT; ++ break; ++ default: ++ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE; ++ } ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxmmuinit.h b/drivers/gpu/drm/img-rogue/rgxmmuinit.h +new file mode 100644 +index 000000000000..0591628d6ad3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmmuinit.h +@@ -0,0 +1,60 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific initialisation routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific MMU initialisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* NB: this file is not to be included arbitrarily. It exists solely ++ for the linkage between rgxinit.c and rgxmmuinit.c, the former ++ being otherwise cluttered by the contents of the latter */ ++ ++#ifndef SRVKM_RGXMMUINIT_H ++#define SRVKM_RGXMMUINIT_H ++ ++#include "device.h" ++#include "img_types.h" ++#include "mmu_common.h" ++#include "img_defs.h" ++ ++PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode); ++PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++ ++#endif /* #ifndef SRVKM_RGXMMUINIT_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxmulticore.c b/drivers/gpu/drm/img-rogue/rgxmulticore.c +new file mode 100644 +index 000000000000..a888e70015db +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmulticore.c +@@ -0,0 +1,224 @@ ++/*************************************************************************/ /*! ++@File rgxmulticore.c ++@Title Functions related to multicore devices ++@Codingstyle IMG ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Kernel mode workload estimation functionality. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgxdevice.h" ++#include "rgxdefs_km.h" ++#include "pdump_km.h" ++#include "rgxmulticore.h" ++#include "multicore_defs.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++ ++/* ++ * check that register defines match our hardcoded definitions. ++ * Rogue has these, volcanic does not. ++ */ ++#if ((RGX_MULTICORE_CAPABILITY_FRAGMENT_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_FRAGMENT_EN) || \ ++ (RGX_MULTICORE_CAPABILITY_GEOMETRY_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_GEOMETRY_EN) || \ ++ (RGX_MULTICORE_CAPABILITY_COMPUTE_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_COMPUTE_EN) || \ ++ (RGX_MULTICORE_CAPABILITY_PRIMARY_EN != RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) || \ ++ (RGX_MULTICORE_ID_CLRMSK != RGX_CR_MULTICORE_GPU_ID_CLRMSK)) ++#error "Rogue definitions for RGX_CR_MULTICORE_GPU register have changed" ++#endif ++ ++ ++static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32CapsSize, ++ IMG_UINT32 *pui32NumCores, ++ IMG_UINT64 *pui64Caps); ++ ++ ++/* ++ * RGXInitMultiCoreInfo: ++ * Return multicore information to clients. ++ * Return not_supported on cores without multicore. ++ */ ++static PVRSRV_ERROR RGXGetMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32CapsSize, ++ IMG_UINT32 *pui32NumCores, ++ IMG_UINT64 *pui64Caps) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (psDeviceNode->ui32MultiCoreNumCores == 0) ++ { ++ /* MULTICORE not supported on this device */ ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ else ++ { ++ *pui32NumCores = psDeviceNode->ui32MultiCoreNumCores; ++ if (ui32CapsSize > 0) ++ { ++ if (ui32CapsSize < psDeviceNode->ui32MultiCoreNumCores) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Multicore caps buffer too small")); ++ eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ } ++ else ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psDeviceNode->ui32MultiCoreNumCores; ++i) ++ { ++ pui64Caps[i] = psDeviceNode->pui64MultiCoreCapabilities[i]; ++ } ++ } ++ } ++ } ++ ++ return eError; ++} ++ ++ ++ ++/* ++ * RGXInitMultiCoreInfo: ++ * Read multicore HW registers and fill in data structure for clients. ++ * Return not supported on cores without multicore. ++ */ ++PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (psDeviceNode->pfnGetMultiCoreInfo != NULL) ++ { ++ /* we only set this up once */ ++ return PVRSRV_OK; ++ } ++ ++ /* defaults for non-multicore devices */ ++ psDeviceNode->ui32MultiCoreNumCores = 0; ++ psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); ++ psDeviceNode->pui64MultiCoreCapabilities = NULL; ++ psDeviceNode->pfnGetMultiCoreInfo = NULL; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) ++ { ++ IMG_UINT32 ui32MulticoreRegBankOffset = (1 << RGX_GET_FEATURE_VALUE(psDevInfo, XPU_MAX_REGBANKS_ADDR_WIDTH)); ++ IMG_UINT32 ui32MulticoreGPUReg = RGX_CR_MULTICORE_GPU; ++ IMG_UINT32 ui32NumCores; ++ IMG_UINT32 i; ++ ++ ui32NumCores = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MULTICORE_SYSTEM); ++#if !defined(NO_HARDWARE) ++ /* check that the number of cores reported is in-bounds */ ++ if (ui32NumCores > (RGX_CR_MULTICORE_SYSTEM_MASKFULL >> RGX_CR_MULTICORE_SYSTEM_GPU_COUNT_SHIFT)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "invalid return (%u) read from MULTICORE_SYSTEM", ui32NumCores)); ++ return PVRSRV_ERROR_DEVICE_REGISTER_FAILED; ++ } ++#else ++ /* for nohw set to max so clients can allocate enough memory for all pdump runs on any config */ ++ ui32NumCores = RGX_MULTICORE_MAX_NOHW_CORES; ++#endif ++ PVR_DPF((PVR_DBG_MESSAGE, "Multicore system has %u cores", ui32NumCores)); ++ PDUMPCOMMENT(psDeviceNode, "RGX Multicore has %d cores\n", ui32NumCores); ++ ++ /* allocate storage for capabilities */ ++ psDeviceNode->pui64MultiCoreCapabilities = OSAllocMem(ui32NumCores * sizeof(psDeviceNode->pui64MultiCoreCapabilities[0])); ++ if (psDeviceNode->pui64MultiCoreCapabilities == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to alloc memory for Multicore info", __func__)); ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psDeviceNode->ui32MultiCoreNumCores = ui32NumCores; ++ ++ for (i = 0; i < ui32NumCores; ++i) ++ { ++ #if !defined(NO_HARDWARE) ++ psDeviceNode->pui64MultiCoreCapabilities[i] = ++ OSReadHWReg64(psDevInfo->pvRegsBaseKM, ui32MulticoreGPUReg) & RGX_CR_MULTICORE_GPU_MASKFULL; ++ #else ++ /* emulation for what we think caps are */ ++ psDeviceNode->pui64MultiCoreCapabilities[i] = ++ i | ((i == 0) ? (RGX_MULTICORE_CAPABILITY_PRIMARY_EN ++ | RGX_MULTICORE_CAPABILITY_GEOMETRY_EN) : 0) ++ | RGX_MULTICORE_CAPABILITY_COMPUTE_EN ++ | RGX_MULTICORE_CAPABILITY_FRAGMENT_EN; ++ #endif ++ PVR_DPF((PVR_DBG_MESSAGE, "Core %d has capabilities value 0x%x", i, (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i] )); ++ PDUMPCOMMENT(psDeviceNode, "\tCore %d has caps 0x%08x\n", i, ++ (IMG_UINT32)psDeviceNode->pui64MultiCoreCapabilities[i]); ++ ++ if (psDeviceNode->pui64MultiCoreCapabilities[i] & RGX_CR_MULTICORE_GPU_CAPABILITY_PRIMARY_EN) ++ { ++ psDeviceNode->ui32MultiCorePrimaryId = (psDeviceNode->pui64MultiCoreCapabilities[i] ++ & ~RGX_CR_MULTICORE_GPU_ID_CLRMSK) ++ >> RGX_CR_MULTICORE_GPU_ID_SHIFT; ++ } ++ ++ ui32MulticoreGPUReg += ui32MulticoreRegBankOffset; ++ } ++ ++ /* Register callback to return info about multicore setup to client bridge */ ++ psDeviceNode->pfnGetMultiCoreInfo = RGXGetMultiCoreInfo; ++ } ++ else ++ { ++ /* MULTICORE not supported on this device */ ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ ++ return eError; ++} ++ ++ ++/* ++ * RGXDeinitMultiCoreInfo: ++ * Release resources and clear the MultiCore values in the DeviceNode. ++ */ ++void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ if (psDeviceNode->pui64MultiCoreCapabilities != NULL) ++ { ++ OSFreeMem(psDeviceNode->pui64MultiCoreCapabilities); ++ psDeviceNode->pui64MultiCoreCapabilities = NULL; ++ psDeviceNode->ui32MultiCoreNumCores = 0; ++ psDeviceNode->ui32MultiCorePrimaryId = (IMG_UINT32)(-1); ++ } ++ psDeviceNode->pfnGetMultiCoreInfo = NULL; ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxmulticore.h b/drivers/gpu/drm/img-rogue/rgxmulticore.h +new file mode 100644 +index 000000000000..b45a20ab5e14 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxmulticore.h +@@ -0,0 +1,54 @@ ++/*************************************************************************/ /*! ++@File rgxmulticore.h ++@Title Functions related to multicore devices ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description General purpose memory shared between kernel driver and user ++ mode. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXMULTICORE_H ++#define RGXMULTICORE_H ++ ++#include "pvrsrv_error.h" ++#include "pvrsrv.h" ++ ++PVRSRV_ERROR RGXInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode); ++void RGXDeInitMultiCoreInfo(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* RGXMULTICORE_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxpower.c b/drivers/gpu/drm/img-rogue/rgxpower.c +new file mode 100644 +index 000000000000..6cb9ae0ec748 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxpower.c +@@ -0,0 +1,1628 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific power routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "rgxpower.h" ++#include "rgxinit.h" ++#include "rgx_fwif_km.h" ++#include "rgxfwutils.h" ++#include "pdump_km.h" ++#include "pvr_debug.h" ++#include "osfunc.h" ++#include "rgxdebug.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "rgxtimecorr.h" ++#include "devicemem_utils.h" ++#include "htbserver.h" ++#include "rgxstartstop.h" ++#include "rgxfwimageutils.h" ++#include "sync.h" ++#include "rgxdefs_km.h" ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++#include "process_stats.h" ++#endif ++#if defined(SUPPORT_LINUX_DVFS) ++#include "pvr_dvfs_device.h" ++#endif ++#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) ++#include "oskm_apphint.h" ++#endif ++ ++static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_KCCB_CMD sCmd; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32CmdKCCBSlot; ++ ++ /* Send the Timeout notification to the FW */ ++ sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; ++ sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; ++ sCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT; ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ &sCmd, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ ++ return eError; ++} ++ ++static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb; ++ IMG_UINT64 *paui64StatsCounters; ++ IMG_UINT64 ui64LastPeriod; ++ IMG_UINT64 ui64LastState; ++ IMG_UINT64 ui64LastTime; ++ IMG_UINT64 ui64TimeNow; ++ ++ psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0]; ++ ++ OSLockAcquire(psDevInfo->hGPUUtilLock); ++ ++ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64(psDevInfo->psDeviceNode)); ++ ++ /* Update counters to account for the time since the last update */ ++ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord); ++ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord); ++ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime); ++ paui64StatsCounters[ui64LastState] += ui64LastPeriod; ++ ++ /* Update state and time of the latest update */ ++ psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState); ++ ++ OSLockRelease(psDevInfo->hGPUUtilLock); ++} ++ ++static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ if (psDevConfig->pfnTDRGXStop == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!")); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ ++ eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData); ++#else ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ eError = RGXStop(&psDevInfo->sLayerParams); ++#endif ++ ++ return eError; ++} ++ ++/* ++ RGXPrePowerState ++*/ ++PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ ++ if ((eNewPowerState != eCurrentPowerState) && ++ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON)) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_KCCB_CMD sPowCmd; ++ IMG_UINT32 ui32CmdKCCBSlot; ++ ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++ /* Send the Power off request to the FW */ ++ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; ++ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ; ++ sPowCmd.uCmdData.sPowData.uPowerReqData.bForced = BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED); ++ ++ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", ++ __func__)); ++ return eError; ++ } ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ &sPowCmd, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send Power off request", ++ __func__)); ++ return eError; ++ } ++ ++ /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies ++ on the EventObject which is signalled in this MISR */ ++ eError = RGXPollForGPCommandCompletion(psDeviceNode, ++ psDevInfo->psPowSyncPrim->pui32LinAddr, ++ 0x1, 0xFFFFFFFF); ++ ++ /* Check the Power state after the answer */ ++ if (eError == PVRSRV_OK) ++ { ++ /* Finally, de-initialise some registers. */ ++ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) ++ { ++#if !defined(NO_HARDWARE) ++ IMG_UINT32 ui32idx; ++ ++ /* Driver takes the VZ Fw-KM connection down, preventing the ++ * firmware from submitting further interrupts */ ++ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); ++ ++#if defined(RGX_FW_IRQ_OS_COUNTERS) ++ ui32idx = RGXFW_HOST_OS; ++#else ++ for_each_irq_cnt(ui32idx) ++#endif /* RGX_FW_IRQ_OS_COUNTERS */ ++ { ++ IMG_UINT32 ui32IrqCnt; ++ ++ get_irq_cnt_val(ui32IrqCnt, ui32idx, psDevInfo); ++ ++ /* Wait for the pending FW processor to host interrupts to come back. */ ++ eError = PVRSRVPollForValueKM(psDeviceNode, ++ (IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32idx], ++ ui32IrqCnt, ++ 0xffffffff, ++ POLL_FLAG_LOG_ERROR); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Wait for pending interrupts failed (DevID %u)." MSG_IRQ_CNT_TYPE " %u Host: %u, FW: %u", ++ __func__, ++ psDeviceNode->sDevId.ui32InternalID, ++ ui32idx, ++ psDevInfo->aui32SampleIRQCount[ui32idx], ++ ui32IrqCnt)); ++ ++ RGX_WaitForInterruptsTimeout(psDevInfo); ++ } ++ } ++#endif /* NO_HARDWARE */ ++ ++ /* Update GPU frequency and timer correlation related data */ ++ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER); ++ ++ /* Update GPU state counters */ ++ _RGXUpdateGPUUtilStats(psDevInfo); ++ ++#if defined(SUPPORT_LINUX_DVFS) ++ eError = SuspendDVFS(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to suspend DVFS", __func__)); ++ return eError; ++ } ++#endif ++ ++ psDevInfo->bRGXPowered = IMG_FALSE; ++ ++ eError = RGXDoStop(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ /* Power down failures are treated as successful since the power was removed but logged. */ ++ PVR_DPF((PVR_DBG_WARNING, "%s: RGXDoStop failed (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ psDevInfo->ui32ActivePMReqNonIdle++; ++ eError = PVRSRV_OK; ++ } ++ } ++ else ++ { ++ /* the sync was updated but the pow state isn't off -> the FW denied the transition */ ++ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; ++ ++ if (BITMASK_HAS(ePwrFlags, PVRSRV_POWER_FLAGS_FORCED)) ++ { /* It is an error for a forced request to be denied */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failure to power off during a forced power off. FW: %d", ++ __func__, psFwSysData->ePowState)); ++ } ++ } ++ } ++ else if (eError == PVRSRV_ERROR_TIMEOUT) ++ { ++ /* timeout waiting for the FW to ack the request: return timeout */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Timeout waiting for powoff ack from the FW", ++ __func__)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error waiting for powoff ack from the FW (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; ++ } ++ } ++ ++ return eError; ++} ++ ++#if defined(SUPPORT_AUTOVZ) ++static PVRSRV_ERROR _RGXWaitForGuestsToDisconnect(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError = PVRSRV_ERROR_TIMEOUT; ++ IMG_UINT32 ui32FwTimeout = (20 * SECONDS_TO_MICROSECONDS); ++ ++ LOOP_UNTIL_TIMEOUT(ui32FwTimeout) ++ { ++ IMG_UINT32 ui32OSid; ++ IMG_BOOL bGuestOnline = IMG_FALSE; ++ ++ for (ui32OSid = RGXFW_GUEST_OSID_START; ++ ui32OSid < RGX_NUM_OS_SUPPORTED; ui32OSid++) ++ { ++ RGXFWIF_CONNECTION_FW_STATE eGuestState = (RGXFWIF_CONNECTION_FW_STATE) ++ psDevInfo->psRGXFWIfFwSysData->asOsRuntimeFlagsMirror[ui32OSid].bfOsState; ++ ++ if ((eGuestState == RGXFW_CONNECTION_FW_ACTIVE) || ++ (eGuestState == RGXFW_CONNECTION_FW_OFFLOADING)) ++ { ++ bGuestOnline = IMG_TRUE; ++ PVR_DPF((PVR_DBG_WARNING, "%s: Guest OS %u still online.", __func__, ui32OSid)); ++ } ++ } ++ ++ if (!bGuestOnline) ++ { ++ /* Allow Guests to finish reading Connection state registers before disconnecting. */ ++ OSSleepms(100); ++ ++ PVR_DPF((PVR_DBG_WARNING, "%s: All Guest connections are down. " ++ "Host can power down the GPU.", __func__)); ++ eError = PVRSRV_OK; ++ break; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Waiting for Guests to disconnect " ++ "before powering down GPU.", __func__)); ++ ++ if (PVRSRVPwrLockIsLockedByMe(psDeviceNode)) ++ { ++ /* Don't wait with the power lock held as this prevents the vz ++ * watchdog thread from keeping the fw-km connection alive. */ ++ PVRSRVPowerUnlock(psDeviceNode); ++ } ++ } ++ ++ OSSleepms(10); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (!PVRSRVPwrLockIsLockedByMe(psDeviceNode)) ++ { ++ /* Take back power lock after waiting for Guests */ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ } ++ ++ return eError; ++} ++#endif /* defined(SUPPORT_AUTOVZ) */ ++ ++/* ++ RGXVzPrePowerState ++*/ ++PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ ++ PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); ++ ++ if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) ++ { ++ /* powering down */ ++#if defined(SUPPORT_AUTOVZ) ++ if (PVRSRV_VZ_MODE_IS(HOST) && (!psDeviceNode->bAutoVzFwIsUp)) ++ { ++ /* The Host must ensure all Guest drivers have disconnected from the GPU before powering it down. ++ * Guest drivers regularly access hardware registers during runtime. If an attempt is made to ++ * access a GPU register while the GPU is down, the SoC might lock up. */ ++ eError = _RGXWaitForGuestsToDisconnect(psDeviceNode); ++ PVR_LOG_RETURN_IF_ERROR(eError, "_RGXWaitForGuestsToDisconnect"); ++ ++ /* Temporarily restore all power callbacks used by the driver to fully power down the GPU. ++ * Under AutoVz, power transitions requests (e.g. on driver deinitialisation and unloading) ++ * are generally ignored and the GPU power state is unaffected. Special power requests like ++ * those triggered by Suspend/Resume calls must reinstate the callbacks when needed. */ ++ PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, ++ &RGXVzPrePowerState, &RGXVzPostPowerState, ++ psDeviceNode->psDevConfig->pfnPrePowerState, ++ psDeviceNode->psDevConfig->pfnPostPowerState, ++ &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest); ++ } ++ else ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (KM_FW_CONNECTION_IS(ACTIVE, psDevInfo) && ++ KM_OS_CONNECTION_IS(ACTIVE, psDevInfo)) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError = RGXFWSetFwOsState(psDevInfo, 0, RGXFWIF_OS_OFFLINE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXFWSetFwOsState"); ++ } ++ } ++#endif ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", ++ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", ++ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); ++ } ++ else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) ++ { ++ /* powering up */ ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", ++ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", ++ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); ++ ++ } ++ ++ if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) ++ { ++ /* call regular device power function */ ++ eError = RGXPrePowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); ++ } ++ ++ return eError; ++} ++ ++/* ++ RGXVzPostPowerState ++*/ ++PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ PVR_LOG_RETURN_IF_FALSE((eNewPowerState != eCurrentPowerState), "no power change", eError); ++ ++ if (!(PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp))) ++ { ++ /* call regular device power function */ ++ eError = RGXPostPowerState(hDevHandle, eNewPowerState, eCurrentPowerState, ePwrFlags); ++ } ++ ++ if (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON) ++ { ++ /* powering down */ ++ PVR_LOG_RETURN_IF_FALSE((!psDeviceNode->bAutoVzFwIsUp), "AutoVz Fw active, power not changed", eError); ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering down: bAutoVzFwIsUp = %s", ++ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", ++ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); ++ ++#if !defined(SUPPORT_AUTOVZ_HW_REGS) ++ /* The connection states must be reset on a GPU power cycle. If the states are kept ++ * in hardware scratch registers, they will be cleared on power down. When using shared ++ * memory the connection data must be explicitly cleared by the driver. */ ++ OSCachedMemSetWMB(psDevInfo->psRGXFWIfConnectionCtl, 0, sizeof(RGXFWIF_CONNECTION_CTL)); ++#endif /* defined(SUPPORT_AUTOVZ) && !defined(SUPPORT_AUTOVZ_HW_REGS) */ ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) ++ { ++#if defined(SUPPORT_AUTOVZ) ++ /* AutoVz Guests attempting to suspend have updated their connections earlier in RGXVzPrePowerState. ++ * Skip this redundant register write, as the Host could have powered down the GPU by now. */ ++ if (psDeviceNode->bAutoVzFwIsUp) ++#endif ++ { ++ /* Take the VZ connection down to prevent firmware from submitting further interrupts */ ++ KM_SET_OS_CONNECTION(OFFLINE, psDevInfo); ++ } ++ /* Power transition callbacks were not executed, update RGXPowered flag here */ ++ psDevInfo->bRGXPowered = IMG_FALSE; ++ } ++ } ++ else if (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON) ++ { ++ /* powering up */ ++ IMG_UINT32 ui32FwTimeout = (3 * SECONDS_TO_MICROSECONDS); ++ volatile IMG_BOOL *pbUpdatedFlag = &psDevInfo->psRGXFWIfOsInit->sRGXCompChecks.bUpdated; ++ ++ PVR_DPF((PVR_DBG_WARNING, "%s: %s driver powering up: bAutoVzFwIsUp = %s", ++ __func__, PVRSRV_VZ_MODE_IS(GUEST)? "GUEST" : "HOST", ++ psDeviceNode->bAutoVzFwIsUp ? "TRUE" : "FALSE")); ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ /* Guests don't execute the power transition callbacks, so update their RGXPowered flag here */ ++ psDevInfo->bRGXPowered = IMG_TRUE; ++ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ /* Guest drivers expect the firmware to have set its end of the ++ * connection to Ready state by now. Poll indefinitely otherwise. */ ++ if (!KM_FW_CONNECTION_IS(READY, psDevInfo)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Ready state. Waiting for Firmware ...", __func__)); ++ } ++ while (!KM_FW_CONNECTION_IS(READY, psDevInfo)) ++ { ++ OSSleepms(10); ++ } ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Ready. Initialisation proceeding.", __func__)); ++#endif /* RGX_VZ_STATIC_CARVEOUT_FW_HEAPS */ ++ ++ /* Guests can only access the register holding the connection states, ++ * after the GPU is confirmed to be powered up */ ++ KM_SET_OS_CONNECTION(READY, psDevInfo); ++ ++ OSWriteDeviceMem32WithWMB(pbUpdatedFlag, IMG_FALSE); ++ ++ /* Kick an initial dummy command to make the firmware initialise all ++ * its internal guest OS data structures and compatibility information. ++ * Use the lower-level RGXSendCommandAndGetKCCBSlot() for the job, to make ++ * sure only 1 KCCB command is issued to the firmware. ++ * The default RGXFWHealthCheckCmd() prefaces each HealthCheck command with ++ * a pre-kick cache command which can interfere with the FW-KM init handshake. */ ++ { ++ RGXFWIF_KCCB_CMD sCmpKCCBCmd; ++ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK; ++ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, &sCmpKCCBCmd, PDUMP_FLAGS_CONTINUOUS, NULL); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXSendCommandAndGetKCCBSlot()"); ++ } ++ } ++ else ++ { ++ KM_SET_OS_CONNECTION(READY, psDevInfo); ++ ++ /* Disable power callbacks that should not be run on virtualised drivers after the GPU ++ * is fully initialised: system layer pre/post functions and driver idle requests. ++ * The original device RGX Pre/Post functions are called from this Vz wrapper. */ ++ PVRSRVSetPowerCallbacks(psDeviceNode, psDeviceNode->psPowerDev, ++ &RGXVzPrePowerState, &RGXVzPostPowerState, ++ NULL, NULL, NULL, NULL); ++ ++#if defined(SUPPORT_AUTOVZ) ++ /* During first-time boot the flag is set here, while subsequent reboots will already ++ * have set it earlier in RGXInit. Set to true from this point onwards in any case. */ ++ psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; ++#endif ++ } ++ ++ /* Wait for the firmware to accept and enable the connection with this OS by setting its state to Active */ ++ while (!KM_FW_CONNECTION_IS(ACTIVE, psDevInfo)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is not in Active state. Waiting for Firmware ...", __func__)); ++ OSSleepms(100); ++ } ++ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware Connection is Active. Initialisation proceeding.", __func__)); ++ ++ /* poll on the Firmware supplying the compatibility data */ ++ LOOP_UNTIL_TIMEOUT(ui32FwTimeout) ++ { ++ if (*pbUpdatedFlag) ++ { ++ break; ++ } ++ OSSleepms(10); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ PVR_LOG_RETURN_IF_FALSE(*pbUpdatedFlag, "Firmware does not respond with compatibility data. ", PVRSRV_ERROR_TIMEOUT); ++ ++ KM_SET_OS_CONNECTION(ACTIVE, psDevInfo); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++#if defined(TRACK_FW_BOOT) ++static INLINE void RGXCheckFWBootStage(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ FW_BOOT_STAGE eStage; ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ /* Boot stage temporarily stored to the register below */ ++ eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ++ RGX_FW_BOOT_STAGE_REGISTER); ++ } ++ else ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR)) ++ { ++ eStage = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_SCRATCH14); ++ } ++ else ++#endif ++ { ++ IMG_BYTE *pbBootData; ++ ++ if (PVRSRV_OK != DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, ++ (void**)&pbBootData)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Could not acquire pointer to FW boot stage", __func__)); ++ eStage = FW_BOOT_STAGE_NOT_AVAILABLE; ++ } ++ else ++ { ++ pbBootData += RGXGetFWImageSectionOffset(NULL, MIPS_BOOT_DATA); ++ ++ eStage = *(FW_BOOT_STAGE*)&pbBootData[RGXMIPSFW_BOOT_STAGE_OFFSET]; ++ ++ if (eStage == FW_BOOT_STAGE_TLB_INIT_FAILURE) ++ { ++ RGXMIPSFW_BOOT_DATA *psBootData = ++ (RGXMIPSFW_BOOT_DATA*) (pbBootData + RGXMIPSFW_BOOTLDR_CONF_OFFSET); ++ ++ PVR_LOG(("MIPS TLB could not be initialised. Boot data info:" ++ " num PT pages %u, log2 PT page size %u, PT page addresses" ++ " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx ++ " %"IMG_UINT64_FMTSPECx " %"IMG_UINT64_FMTSPECx, ++ psBootData->ui32PTNumPages, ++ psBootData->ui32PTLog2PageSize, ++ psBootData->aui64PTPhyAddr[0U], ++ psBootData->aui64PTPhyAddr[1U], ++ psBootData->aui64PTPhyAddr[2U], ++ psBootData->aui64PTPhyAddr[3U])); ++ } ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); ++ } ++ } ++ ++ PVR_LOG(("%s: FW reached boot stage %i/%i.", ++ __func__, eStage, FW_BOOT_INIT_DONE)); ++} ++#endif ++ ++static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; ++ ++ if (psDevConfig->pfnTDRGXStart == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!")); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ ++ eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData); ++#else ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ eError = RGXStart(&psDevInfo->sLayerParams); ++#endif ++ ++ return eError; ++} ++ ++ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ ++#if 0 ++#include "emu_cr_defs.h" ++#else ++#define EMU_CR_SYSTEM_IRQ_STATUS (0x00E0U) ++/* IRQ is officially defined [8 .. 0] but here we split out the old deprecated single irq. */ ++#define EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE01)) ++#define EMU_CR_SYSTEM_IRQ_STATUS_OLD_IRQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE)) ++#endif ++ ++static PVRSRV_ERROR ++_ValidateIrqs(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT32 ui32OSid; ++ PDUMP_FLAGS_T ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS; ++ ++ /* Check if the Validation IRQ flag is set */ ++ if ((psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_IRQ) == 0) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PDUMPIF(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags); ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Poll for TB irq status to be set (irqs signalled)..."); ++ PDUMPREGPOL(psDevInfo->psDeviceNode, ++ RGX_TB_PDUMPREG_NAME, ++ EMU_CR_SYSTEM_IRQ_STATUS, ++ ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK, ++ ~EMU_CR_SYSTEM_IRQ_STATUS_IRQ_CLRMSK, ++ ui32PDumpFlags, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "... and then clear them"); ++ for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++) ++ { ++ PDUMPREG32(psDevInfo->psDeviceNode, ++ RGX_PDUMPREG_NAME, ++ RGX_CR_IRQ_OS0_EVENT_CLEAR + ui32OSid * 0x10000, ++ RGX_CR_IRQ_OS0_EVENT_CLEAR_MASKFULL, ++ ui32PDumpFlags); ++ } ++ ++ PDUMPFI(psDevInfo->psDeviceNode, "IMG_PVR_TESTBENCH", ui32PDumpFlags); ++ ++ /* Poll on all the interrupt status registers for all OSes */ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, ui32PDumpFlags, ++ "Validate Interrupt lines."); ++ ++ for (ui32OSid = 0; ui32OSid < RGXFW_MAX_NUM_OS; ui32OSid++) ++ { ++ PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, ++ RGX_CR_IRQ_OS0_EVENT_STATUS + ui32OSid * 0x10000, ++ 0x0, ++ 0xFFFFFFFF, ++ ui32PDumpFlags, ++ PDUMP_POLL_OPERATOR_EQUAL); ++ } ++ ++ return PVRSRV_OK; ++} ++#endif /* defined(NO_HARDWARE) && defined(PDUMP) */ ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) ++/* ++ * To validate the MTS unit we do the following: ++ * - Immediately after firmware loading for each OSID ++ * - Write the OSid to a memory location shared with FW ++ * - Kick the register of that OSid ++ * (Uncounted, DM 0) ++ * - FW clears the memory location if OSid matches ++ * - Host checks that memory location is cleared ++ * ++ * See firmware/devices/rgx/rgxfw_bg.c ++ */ ++static PVRSRV_ERROR RGXVirtualisationPowerupSidebandTest(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXFWIF_SYSINIT *psFwSysInit, ++ PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ IMG_UINT32 ui32ScheduleRegister; ++ IMG_UINT32 ui32OSid; ++ IMG_UINT32 ui32KickType; ++ IMG_UINT32 ui32OsRegBanksMapped = (psDeviceNode->psDevConfig->ui32RegsSize / RGX_VIRTUALISATION_REG_SIZE_PER_OS); ++ ++ /* Nothing to do if the device does not support GPU_VIRTUALISATION */ ++ if (!PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, GPU_VIRTUALISATION)) ++ { ++ return PVRSRV_OK; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Testing per-os kick registers:")); ++ ++ ui32OsRegBanksMapped = MIN(ui32OsRegBanksMapped, GPUVIRT_VALIDATION_NUM_OS); ++ ++ if (ui32OsRegBanksMapped != RGXFW_MAX_NUM_OS) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "The register bank mapped into kernel VA does not cover all OS' registers:")); ++ PVR_DPF((PVR_DBG_WARNING, "Maximum OS count = %d / Per-os register banks mapped = %d", RGXFW_MAX_NUM_OS, ui32OsRegBanksMapped)); ++ PVR_DPF((PVR_DBG_WARNING, "Only first %d MTS registers will be tested", ui32OsRegBanksMapped)); ++ } ++ ++ ui32KickType = RGX_CR_MTS_SCHEDULE_DM_DM0 | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED; ++ ++ for (ui32OSid = 0; ui32OSid < ui32OsRegBanksMapped; ui32OSid++) ++ { ++ /* set Test field */ ++ psFwSysInit->ui32OSKickTest = (ui32OSid << RGXFWIF_KICK_TEST_OSID_SHIFT) | RGXFWIF_KICK_TEST_ENABLED_BIT; ++ ++#if defined(PDUMP) ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, ui32OSKickTest), ++ sizeof(psFwSysInit->ui32OSKickTest), ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ /* Force a read-back to memory to avoid posted writes on certain buses */ ++ OSWriteMemoryBarrier(&psFwSysInit->ui32OSKickTest); ++ ++ /* kick register */ ++ ui32ScheduleRegister = RGX_CR_MTS_SCHEDULE + (ui32OSid * RGX_VIRTUALISATION_REG_SIZE_PER_OS); ++ PVR_DPF((PVR_DBG_MESSAGE, " Testing OS: %u, Kick Reg: %X", ++ ui32OSid, ++ ui32ScheduleRegister)); ++ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32ScheduleRegister, ui32KickType); ++ OSMemoryBarrier((IMG_BYTE*) psDevInfo->pvRegsBaseKM + ui32ScheduleRegister); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "VZ sideband test, kicking MTS register %u", ui32OSid); ++ ++ PDUMPREG32(psDeviceNode, RGX_PDUMPREG_NAME, ++ ui32ScheduleRegister, ui32KickType, PDUMP_FLAGS_CONTINUOUS); ++ ++ DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, ui32OSKickTest), ++ 0, ++ 0xFFFFFFFF, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ /* Wait test enable bit to be unset */ ++ if (PVRSRVPollForValueKM(psDeviceNode, ++ (IMG_UINT32 *)&psFwSysInit->ui32OSKickTest, ++ 0, ++ RGXFWIF_KICK_TEST_ENABLED_BIT, ++ POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware did not clear test location (contents: 0x%X)", ++ ui32OSid, ++ psFwSysInit->ui32OSKickTest)); ++ ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ /* Check that the value is what we expect */ ++ if (psFwSysInit->ui32OSKickTest != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Testing OS %u kick register failed: firmware wrote 0x%X to test location", ++ ui32OSid, ++ psFwSysInit->ui32OSKickTest)); ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, " PASS")); ++ } ++ ++ PVR_LOG(("MTS passed sideband tests")); ++ return PVRSRV_OK; ++} ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) */ ++ ++#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) ++#define SCRATCH_VALUE (0x12345678U) ++ ++static void RGXRiscvDebugModuleTest(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ void *pvAppHintState = NULL; ++ IMG_UINT32 ui32AppHintDefault = 0; ++ IMG_BOOL bRunRiscvDmiTest; ++ ++ IMG_UINT32 *pui32FWCode = NULL; ++ PVRSRV_ERROR eError; ++ ++ OSCreateKMAppHintState(&pvAppHintState); ++ OSGetKMAppHintBOOL(APPHINT_NO_DEVICE, pvAppHintState, RiscvDmiTest, ++ &ui32AppHintDefault, &bRunRiscvDmiTest); ++ OSFreeKMAppHintState(pvAppHintState); ++ ++ if (bRunRiscvDmiTest == IMG_FALSE) ++ { ++ return; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, (void **)&pui32FWCode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error acquiring FW code memory pointer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ } ++ ++ PDumpIfKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST BEGIN"); ++ ++ RGXRiscvHalt(psDevInfo); ++ ++ /* ++ * Test RISC-V register reads/writes. ++ * RGXRiscv[Write/Poll]Reg are used to access internal RISC-V registers ++ * via debug module. ++ */ ++ ++ /* Write RISC-V mscratch register */ ++ RGXRiscvWriteReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); ++ /* Read RISC-V misa register (compare against default standard value) */ ++ RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MISA_ADDR, RGXRISCVFW_MISA_VALUE); ++ /* Read RISC-V mscratch register (compare against previously written value) */ ++ RGXRiscvPollReg(psDevInfo, RGXRISCVFW_MSCRATCH_ADDR, SCRATCH_VALUE); ++ ++ /* ++ * Test RISC-V memory reads/writes. ++ * RGXRiscv[Write/Poll]Mem are used to access system memory via debug module ++ * (from RISC-V point of view). ++ */ ++ ++ if (pui32FWCode != NULL) ++ { ++ IMG_UINT32 ui32Tmp; ++ ++ /* Acquire pointer to FW code (bootloader) */ ++ pui32FWCode += RGXGetFWImageSectionOffset(NULL, RISCV_UNCACHED_CODE) / sizeof(IMG_UINT32); ++ /* Save FW code at address (bootloader) */ ++ ui32Tmp = *pui32FWCode; ++ ++ /* Write FW code at address (bootloader) */ ++ RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); ++ /* Read FW code at address (bootloader + 4) (compare against value read from Host) */ ++ RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE + 4, *(pui32FWCode + 1)); ++ /* Read FW code at address (bootloader) (compare against previously written value) */ ++ RGXRiscvPollMem(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, SCRATCH_VALUE); ++ /* Restore FW code at address (bootloader) */ ++ RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_BOOTLDR_CODE_BASE, ui32Tmp); ++ ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); ++ } ++ ++ /* ++ * Test GPU register reads/writes. ++ * RGXRiscv[Write/Poll]Mem are used to access GPU registers via debug module ++ * (from RISC-V point of view). ++ * Note that system memory and GPU register accesses both use the same ++ * debug module interface, targeting different address ranges. ++ */ ++ ++ /* Write SCRATCH0 from the Host */ ++ PDUMPREG32(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, ++ SCRATCH_VALUE, PDUMP_FLAGS_CONTINUOUS); ++ /* Read SCRATCH0 */ ++ RGXRiscvPollMem(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, SCRATCH_VALUE); ++ /* Write SCRATCH0 */ ++ RGXWriteFWModuleAddr(psDevInfo, RGXRISCVFW_SOCIF_BASE | RGX_CR_SCRATCH0, ~SCRATCH_VALUE); ++ /* Read SCRATCH0 from the Host */ ++ PDUMPREGPOL(psDevInfo->psDeviceNode, RGX_PDUMPREG_NAME, RGX_CR_SCRATCH0, ++ ~SCRATCH_VALUE, 0xFFFFFFFFU, ++ PDUMP_FLAGS_CONTINUOUS, PDUMP_POLL_OPERATOR_EQUAL); ++ ++ RGXRiscvResume(psDevInfo); ++ ++ PDUMPCOMMENTWITHFLAGS(psDevInfo->psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "DMI_TEST END"); ++ PDumpFiKM(psDevInfo->psDeviceNode, "ENABLE_RISCV_DMI_TEST", PDUMP_FLAGS_CONTINUOUS); ++} ++#endif ++ ++/* ++ RGXPostPowerState ++*/ ++PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if ((eNewPowerState != eCurrentPowerState) && ++ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON)) ++ { ++ PVRSRV_ERROR eError; ++ ++ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF) ++ { ++ /* Update timer correlation related data */ ++ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER); ++ ++ /* Update GPU state counters */ ++ _RGXUpdateGPUUtilStats(psDevInfo); ++ ++ eError = RGXDoStart(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: RGXDoStart failed")); ++ return eError; ++ } ++ ++ OSMemoryBarrier(NULL); ++ ++ /* ++ * Check whether the FW has started by polling on bFirmwareStarted flag ++ */ ++ if (PVRSRVPollForValueKM(psDeviceNode, ++ (IMG_UINT32 __iomem *)&psDevInfo->psRGXFWIfSysInit->bFirmwareStarted, ++ IMG_TRUE, ++ 0xFFFFFFFF, ++ POLL_FLAG_LOG_ERROR | POLL_FLAG_DEBUG_DUMP) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed.")); ++ eError = PVRSRV_ERROR_TIMEOUT; ++ ++#if defined(TRACK_FW_BOOT) ++ RGXCheckFWBootStage(psDevInfo); ++#endif ++ ++ /* ++ * When bFirmwareStarted fails some info may be gained by doing the following ++ * debug dump but unfortunately it could be potentially dangerous if the reason ++ * for not booting is the GPU power is not ON. However, if we have reached this ++ * point the System Layer has returned without errors, we assume the GPU power ++ * is indeed ON. ++ */ ++ RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE); ++ RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice); ++ ++ return eError; ++ } ++ ++#if defined(PDUMP) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start."); ++ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfSysInitMemDesc, ++ offsetof(RGXFWIF_SYSINIT, bFirmwareStarted), ++ IMG_TRUE, ++ 0xFFFFFFFFU, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "RGXPostPowerState: problem pdumping POL for psRGXFWIfSysInitMemDesc (%d)", ++ eError)); ++ return eError; ++ } ++ ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ eError = _ValidateIrqs(psDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++#endif ++#endif ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) && !defined(NO_HARDWARE) ++ eError = RGXVirtualisationPowerupSidebandTest(psDeviceNode, psDevInfo->psRGXFWIfSysInit, psDevInfo); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++#endif ++ ++#if defined(SUPPORT_VALIDATION) && defined(NO_HARDWARE) && defined(PDUMP) ++ RGXRiscvDebugModuleTest(psDevInfo); ++#endif ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ SetFirmwareStartTime(psDevInfo->psRGXFWIfSysInit->ui32FirmwareStartedTimeStamp); ++#endif ++ ++ HTBSyncPartitionMarker(psDevInfo->psRGXFWIfSysInit->ui32MarkerVal); ++ ++ psDevInfo->bRGXPowered = IMG_TRUE; ++ ++#if defined(SUPPORT_LINUX_DVFS) ++ eError = ResumeDVFS(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Failed to resume DVFS")); ++ return eError; ++ } ++#endif ++ } ++ } ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXPostPowerState: Current state: %d, New state: %d", ++ eCurrentPowerState, eNewPowerState); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ RGXPreClockSpeedChange ++*/ ++PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ const PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ PVR_UNREFERENCED_PARAMETER(psRGXData); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXPreClockSpeedChange: RGX clock speed was %uHz", ++ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); ++ ++ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && ++ (psFwSysData->ePowState != RGXFWIF_POW_OFF)) ++ { ++ /* Update GPU frequency and timer correlation related data */ ++ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS); ++ } ++ ++ return eError; ++} ++ ++/* ++ RGXPostClockSpeedChange ++*/ ++PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ const RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ /* Update runtime configuration with the new value */ ++ OSWriteDeviceMem32WithWMB(&psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed, ++ ui32NewClockSpeed); ++ ++ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF) && ++ (psFwSysData->ePowState != RGXFWIF_POW_OFF)) ++ { ++ RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd; ++ IMG_UINT32 ui32CmdKCCBSlot; ++ ++ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS); ++ ++ sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE; ++ sCOREClkSpeedChangeCmd.uCmdData.sCoreClkSpeedChangeData.ui32NewClockSpeed = ui32NewClockSpeed; ++ ++ PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command"); ++ ++ PDUMPPOWCMDSTART(psDeviceNode); ++ eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, ++ &sCOREClkSpeedChangeCmd, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ PDUMPPOWCMDEND(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PDUMPCOMMENT(psDeviceNode, "Scheduling CORE clock speed change command failed"); ++ PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError)); ++ return eError; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "RGXPostClockSpeedChange: RGX clock speed changed to %uHz", ++ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed)); ++ } ++ ++ return eError; ++} ++ ++/*! ++ ****************************************************************************** ++ ++ @Function RGXDustCountChange ++ ++ @Description ++ ++ Does change of number of DUSTs ++ ++ @Input hDevHandle : RGX Device Node ++ @Input ui32NumberOfDusts : Number of DUSTs to make transition to ++ ++ @Return PVRSRV_ERROR : ++ ++ ******************************************************************************/ ++PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, ++ IMG_UINT32 ui32NumberOfDusts) ++{ ++ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ RGXFWIF_KCCB_CMD sDustCountChange; ++ IMG_UINT32 ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount; ++ IMG_UINT32 ui32CmdKCCBSlot; ++ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ if (ui32NumberOfDusts > ui32MaxAvailableDusts) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u", ++ __func__, ++ ui32NumberOfDusts, ++ ui32MaxAvailableDusts, ++ eError)); ++ return eError; ++ } ++ ++ psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts; ++ OSWriteMemoryBarrier(&psRuntimeCfg->ui32DefaultDustsNumInit); ++ ++#if !defined(NO_HARDWARE) ++ { ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) ++ { ++ return PVRSRV_OK; ++ } ++ ++ if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) ++ { ++ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Attempt to change dust count when not IDLE", ++ __func__)); ++ return eError; ++ } ++ } ++#endif ++ ++ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", ++ __func__)); ++ return eError; ++ } ++ ++ sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW; ++ sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUM_UNITS_CHANGE; ++ sDustCountChange.uCmdData.sPowData.uPowerReqData.ui32NumOfDusts = ui32NumberOfDusts; ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "Scheduling command to change Dust Count to %u", ++ ui32NumberOfDusts); ++ eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, ++ &sDustCountChange, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PDUMPCOMMENT(psDeviceNode, ++ "Scheduling command to change Dust Count failed. Error:%u", ++ eError); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Scheduling KCCB to change Dust Count failed. Error:%u", ++ __func__, eError)); ++ return eError; ++ } ++ ++ /* Wait for the firmware to answer. */ ++ eError = RGXPollForGPCommandCompletion(psDeviceNode, ++ psDevInfo->psPowSyncPrim->pui32LinAddr, ++ 0x1, 0xFFFFFFFF); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for idle request", __func__)); ++ return eError; ++ } ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d", ++ psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); ++ ++ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, ++ 1, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ 0); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ @Function RGXAPMLatencyChange ++*/ ++PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, ++ IMG_UINT32 ui32ActivePMLatencyms, ++ IMG_BOOL bActivePMLatencyPersistant) ++{ ++ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg; ++ IMG_UINT32 ui32CmdKCCBSlot; ++ PVRSRV_DEV_POWER_STATE ePowerState; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ eError = PVRSRVPowerLock(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Failed to acquire power lock")); ++ return eError; ++ } ++ ++ /* Update runtime configuration with the new values and ensure the ++ * new APM latency is written to memory before requesting the FW to ++ * read it ++ */ ++ psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms; ++ psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant; ++ OSWriteMemoryBarrier(&psRuntimeCfg->bActivePMLatencyPersistant); ++ ++ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); ++ ++ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF)) ++ { ++ RGXFWIF_KCCB_CMD sActivePMLatencyChange; ++ sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW; ++ sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE; ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "Scheduling command to change APM latency to %u", ++ ui32ActivePMLatencyms); ++ eError = RGXSendCommandAndGetKCCBSlot(psDeviceNode->pvDevice, ++ &sActivePMLatencyChange, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PDUMPCOMMENT(psDeviceNode, ++ "Scheduling command to change APM latency failed. Error:%u", ++ eError); ++ PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError)); ++ goto ErrorExit; ++ } ++ } ++ ++ErrorExit: ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++ return eError; ++} ++ ++/* ++ RGXActivePowerRequest ++*/ ++PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ const RGXFWIF_SYSDATA *psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ ++ psDevInfo->ui32ActivePMReqTotal++; ++ ++ /* Powerlock to avoid further requests from racing with the FW hand-shake ++ * from now on (previous kicks to this point are detected by the FW) ++ * PVRSRVPowerLock is replaced with PVRSRVPowerTryLock to avoid ++ * potential dead lock between PDumpWriteLock and PowerLock ++ * during 'DriverLive + PDUMP=1 + EnableAPM=1'. ++ */ ++ eError = PVRSRVPowerTryLock(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_LOG_ERROR(eError, "PVRSRVPowerTryLock"); ++ } ++ else ++ { ++ psDevInfo->ui32ActivePMReqRetry++; ++ } ++ goto _RGXActivePowerRequest_PowerLock_failed; ++ } ++ ++ /* Check again for IDLE once we have the power lock */ ++ if (psFwSysData->ePowState == RGXFWIF_POW_IDLE) ++ { ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFwSysData->ui64StartIdleTime); ++#endif ++ ++ PDUMPPOWCMDSTART(psDeviceNode); ++ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_OFF, ++ PVRSRV_POWER_FLAGS_NONE); ++ PDUMPPOWCMDEND(psDeviceNode); ++ ++ if (eError == PVRSRV_OK) ++ { ++ psDevInfo->ui32ActivePMReqOk++; ++ } ++ else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED) ++ { ++ psDevInfo->ui32ActivePMReqDenied++; ++ } ++ } ++ else ++ { ++ psDevInfo->ui32ActivePMReqNonIdle++; ++ } ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++ ++_RGXActivePowerRequest_PowerLock_failed: ++ ++ return eError; ++} ++/* ++ RGXForcedIdleRequest ++*/ ++ ++#define RGX_FORCED_IDLE_RETRY_COUNT 20 ++ ++PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_KCCB_CMD sPowCmd; ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32RetryCount = 0; ++ IMG_UINT32 ui32CmdKCCBSlot; ++#if !defined(NO_HARDWARE) ++ const RGXFWIF_SYSDATA *psFwSysData; ++#endif ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++#if !defined(NO_HARDWARE) ++ psFwSysData = psDevInfo->psRGXFWIfFwSysData; ++ ++ /* Firmware already forced idle */ ++ if (psFwSysData->ePowState == RGXFWIF_POW_FORCED_IDLE) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */ ++ if (psFwSysData->ePowState == RGXFWIF_POW_OFF) ++ { ++ return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; ++ } ++#endif ++ ++ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", ++ __func__)); ++ return eError; ++ } ++ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; ++ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; ++ sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE; ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXForcedIdleRequest: Sending forced idle command"); ++ ++ /* Send one forced IDLE command to GP */ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ &sPowCmd, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to send idle request", __func__)); ++ return eError; ++ } ++ ++ /* Wait for GPU to finish current workload */ ++ do { ++ eError = RGXPollForGPCommandCompletion(psDeviceNode, ++ psDevInfo->psPowSyncPrim->pui32LinAddr, ++ 0x1, 0xFFFFFFFF); ++ if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT)) ++ { ++ break; ++ } ++ ui32RetryCount++; ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Request timeout. Retry %d of %d", ++ __func__, ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT)); ++ } while (IMG_TRUE); ++ ++ if (eError != PVRSRV_OK) ++ { ++ RGXFWNotifyHostTimeout(psDevInfo); ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Idle request failed. Firmware potentially left in forced idle state", ++ __func__)); ++ return eError; ++ } ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", ++ psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); ++ ++ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, ++ 1, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ 0); ++#endif ++ ++#if !defined(NO_HARDWARE) ++ /* Check the firmware state for idleness */ ++ if (psFwSysData->ePowState != RGXFWIF_POW_FORCED_IDLE) ++ { ++ return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED; ++ } ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ RGXCancelForcedIdleRequest ++*/ ++PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_KCCB_CMD sPowCmd; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32CmdKCCBSlot; ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_OK); ++ ++ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set Power sync prim", ++ __func__)); ++ goto ErrorExit; ++ } ++ ++ /* Send the IDLE request to the FW */ ++ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW; ++ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ; ++ sPowCmd.uCmdData.sPowData.uPowerReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE; ++ ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXForcedIdleRequest: Sending cancel forced idle command"); ++ ++ /* Send cancel forced IDLE command to GP */ ++ eError = RGXSendCommandAndGetKCCBSlot(psDevInfo, ++ &sPowCmd, ++ PDUMP_FLAGS_NONE, ++ &ui32CmdKCCBSlot); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", ++ RGXFWIF_DM_GP); ++ goto ErrorExit; ++ } ++ ++ /* Wait for the firmware to answer. */ ++ eError = RGXPollForGPCommandCompletion(psDeviceNode, ++ psDevInfo->psPowSyncPrim->pui32LinAddr, ++ 1, 0xFFFFFFFF); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Timeout waiting for cancel idle request", __func__)); ++ goto ErrorExit; ++ } ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, ++ "RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", ++ psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP); ++ ++ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim, ++ 1, ++ 0xffffffff, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ 0); ++#endif ++ ++ return eError; ++ ++ErrorExit: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware potentially left in forced idle state", __func__)); ++ return eError; ++} ++ ++/*! ++ ****************************************************************************** ++ ++ @Function PVRSRVGetNextDustCount ++ ++ @Description ++ ++ Calculate a sequence of dust counts to achieve full transition coverage. ++ We increment two counts of dusts and switch up and down between them. ++ It does contain a few redundant transitions. If two dust exist, the ++ output transitions should be as follows. ++ ++ 0->1, 0<-1, 0->2, 0<-2, (0->1) ++ 1->1, 1->2, 1<-2, (1->2) ++ 2->2, (2->0), ++ 0->0. Repeat. ++ ++ Redundant transitions in brackets. ++ ++ @Input psDustReqState : Counter state used to calculate next dust count ++ @Input ui32DustCount : Number of dusts in the core ++ ++ @Return PVRSRV_ERROR ++ ++ ******************************************************************************/ ++IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount) ++{ ++ if (psDustReqState->bToggle) ++ { ++ psDustReqState->ui32DustCount2++; ++ } ++ ++ if (psDustReqState->ui32DustCount2 > ui32DustCount) ++ { ++ psDustReqState->ui32DustCount1++; ++ psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1; ++ } ++ ++ if (psDustReqState->ui32DustCount1 > ui32DustCount) ++ { ++ psDustReqState->ui32DustCount1 = 0; ++ psDustReqState->ui32DustCount2 = 0; ++ } ++ ++ psDustReqState->bToggle = !psDustReqState->bToggle; ++ ++ return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2; ++} ++ ++/****************************************************************************** ++ End of file (rgxpower.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxpower.h b/drivers/gpu/drm/img-rogue/rgxpower.h +new file mode 100644 +index 000000000000..a6cd3f2b2d10 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxpower.h +@@ -0,0 +1,286 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX power header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX power ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXPOWER_H) ++#define RGXPOWER_H ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "servicesext.h" ++#include "rgxdevice.h" ++ ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXPrePowerState ++ ++ @Description ++ ++ does necessary preparation before power state transition ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eNewPowerState : New power state ++ @Input eCurrentPowerState : Current power state ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXPostPowerState ++ ++ @Description ++ ++ does necessary preparation after power state transition ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eNewPowerState : New power state ++ @Input eCurrentPowerState : Current power state ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXVzPrePowerState ++ ++ @Description ++ ++ does necessary preparation before power state transition on a vz driver ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eNewPowerState : New power state ++ @Input eCurrentPowerState : Current power state ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXVzPrePowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXVzPostPowerState ++ ++ @Description ++ ++ does necessary preparation after power state transition on a vz driver ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eNewPowerState : New power state ++ @Input eCurrentPowerState : Current power state ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXVzPostPowerState(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eNewPowerState, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXPreClockSpeedChange ++ ++ @Description ++ ++ Does processing required before an RGX clock speed change. ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eCurrentPowerState : Power state of the device ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXPostClockSpeedChange ++ ++ @Description ++ ++ Does processing required after an RGX clock speed change. ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eCurrentPowerState : Power state of the device ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle, ++ PVRSRV_DEV_POWER_STATE eCurrentPowerState); ++ ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXDustCountChange ++ ++ @Description Change of number of DUSTs ++ ++ @Input hDevHandle : RGX Device Node ++ @Input ui32NumberOfDusts : Number of DUSTs to make transition to ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle, ++ IMG_UINT32 ui32NumberOfDusts); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXAPMLatencyChange ++ ++ @Description ++ ++ Changes the wait duration used before firmware indicates IDLE. ++ Reducing this value will cause the firmware to shut off faster and ++ more often but may increase bubbles in GPU scheduling due to the added ++ power management activity. If bPersistent is NOT set, APM latency will ++ return back to system default on power up. ++ ++ @Input hDevHandle : RGX Device Node ++ @Input ui32ActivePMLatencyms : Number of milliseconds to wait ++ @Input bActivePMLatencyPersistant : Set to ensure new value is not reset ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle, ++ IMG_UINT32 ui32ActivePMLatencyms, ++ IMG_BOOL bActivePMLatencyPersistant); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXActivePowerRequest ++ ++ @Description Initiate a handshake with the FW to power off the GPU ++ ++ @Input hDevHandle : RGX Device Node ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXForcedIdleRequest ++ ++ @Description Initiate a handshake with the FW to idle the GPU ++ ++ @Input hDevHandle : RGX Device Node ++ ++ @Input bDeviceOffPermitted : Set to indicate device state being off is not ++ erroneous. ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXCancelForcedIdleRequest ++ ++ @Description Send a request to cancel idle to the firmware. ++ ++ @Input hDevHandle : RGX Device Node ++ ++ @Return PVRSRV_ERROR : ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle); ++ ++/*! ++****************************************************************************** ++ ++ @Function PVRSRVGetNextDustCount ++ ++ @Description ++ ++ Calculate a sequence of dust counts to achieve full transition coverage. ++ We increment two counts of dusts and switch up and down between them. ++ It does contain a few redundant transitions. If two dust exist, the ++ output transitions should be as follows. ++ ++ 0->1, 0<-1, 0->2, 0<-2, (0->1) ++ 1->1, 1->2, 1<-2, (1->2) ++ 2->2, (2->0), ++ 0->0. Repeat. ++ ++ Redundant transitions in brackets. ++ ++ @Input psDustReqState : Counter state used to calculate next dust count ++ @Input ui32DustCount : Number of dusts in the core ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount); ++ ++#endif /* RGXPOWER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxregconfig.c b/drivers/gpu/drm/img-rogue/rgxregconfig.c +new file mode 100644 +index 000000000000..ef39bea255eb +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxregconfig.c +@@ -0,0 +1,319 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Register configuration ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX Regconfig routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgxregconfig.h" ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "device.h" ++#include "sync_internal.h" ++#include "pdump_km.h" ++#include "pvrsrv.h" ++ ++PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT8 ui8RegCfgType) ++{ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; ++ RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType; ++ ++ PVR_UNREFERENCED_PARAMETER(psDevConnection); ++ ++ OSLockAcquire(psRegCfg->hLock); ++ ++ if (eRegCfgType < psRegCfg->eRegCfgTypeToPush) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Register configuration requested (%d) is not valid since it has to be at least %d." ++ " Configurations of different types need to go in order", ++ __func__, ++ eRegCfgType, ++ psRegCfg->eRegCfgTypeToPush)); ++ OSLockRelease(psRegCfg->hLock); ++ return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE; ++ } ++ ++ psRegCfg->eRegCfgTypeToPush = eRegCfgType; ++ ++ OSLockRelease(psRegCfg->hLock); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(psDevConnection); ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", ++ __func__)); ++ return PVRSRV_ERROR_FEATURE_DISABLED; ++#endif ++} ++ ++PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT64 ui64RegValue, ++ IMG_UINT64 ui64RegMask) ++{ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sRegCfgCmd; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ OSLockAcquire(psRegCfg->hLock); ++ ++ if (psRegCfg->bEnabled) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Cannot add record whilst register configuration active.", ++ __func__)); ++ OSLockRelease(psRegCfg->hLock); ++ return PVRSRV_ERROR_REG_CONFIG_ENABLED; ++ } ++ if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Register configuration full.", ++ __func__)); ++ OSLockRelease(psRegCfg->hLock); ++ return PVRSRV_ERROR_REG_CONFIG_FULL; ++ } ++ ++ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; ++ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr; ++ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue; ++ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask; ++ sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush; ++ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD; ++ ++ eError = RGXScheduleCommand(psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sRegCfgCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXScheduleCommand failed. Error:%u", ++ __func__, ++ eError)); ++ OSLockRelease(psRegCfg->hLock); ++ return eError; ++ } ++ ++ psRegCfg->ui32NumRegRecords++; ++ ++ OSLockRelease(psRegCfg->hLock); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", ++ __func__)); ++ return PVRSRV_ERROR_FEATURE_DISABLED; ++#endif ++} ++ ++PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sRegCfgCmd; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ OSLockAcquire(psRegCfg->hLock); ++ ++ if (psRegCfg->bEnabled) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Attempt to clear register configuration whilst active.", ++ __func__)); ++ OSLockRelease(psRegCfg->hLock); ++ return PVRSRV_ERROR_REG_CONFIG_ENABLED; ++ } ++ ++ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; ++ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR; ++ ++ eError = RGXScheduleCommand(psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sRegCfgCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXScheduleCommand failed. Error:%u", ++ __func__, ++ eError)); ++ OSLockRelease(psRegCfg->hLock); ++ return eError; ++ } ++ ++ psRegCfg->ui32NumRegRecords = 0; ++ psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON; ++ ++ OSLockRelease(psRegCfg->hLock); ++ ++ return eError; ++#else ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", ++ __func__)); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ return PVRSRV_ERROR_FEATURE_DISABLED; ++#endif ++} ++ ++PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sRegCfgCmd; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ OSLockAcquire(psRegCfg->hLock); ++ ++ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; ++ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE; ++ ++ eError = RGXScheduleCommand(psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sRegCfgCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXScheduleCommand failed. Error:%u", ++ __func__, ++ eError)); ++ OSLockRelease(psRegCfg->hLock); ++ return eError; ++ } ++ ++ psRegCfg->bEnabled = IMG_TRUE; ++ ++ OSLockRelease(psRegCfg->hLock); ++ ++ return eError; ++#else ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", ++ __func__)); ++ return PVRSRV_ERROR_FEATURE_DISABLED; ++#endif ++} ++ ++PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(SUPPORT_USER_REGISTER_CONFIGURATION) ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGXFWIF_KCCB_CMD sRegCfgCmd; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ PVRSRV_VZ_RET_IF_MODE(GUEST, PVRSRV_ERROR_NOT_SUPPORTED); ++ ++ OSLockAcquire(psRegCfg->hLock); ++ ++ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG; ++ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE; ++ ++ eError = RGXScheduleCommand(psDeviceNode->pvDevice, ++ RGXFWIF_DM_GP, ++ &sRegCfgCmd, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXScheduleCommand failed. Error:%u", ++ __func__, ++ eError)); ++ OSLockRelease(psRegCfg->hLock); ++ return eError; ++ } ++ ++ psRegCfg->bEnabled = IMG_FALSE; ++ ++ OSLockRelease(psRegCfg->hLock); ++ ++ return eError; ++#else ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION", ++ __func__)); ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ return PVRSRV_ERROR_FEATURE_DISABLED; ++#endif ++} ++ ++/****************************************************************************** ++ End of file (rgxregconfig.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxregconfig.h b/drivers/gpu/drm/img-rogue/rgxregconfig.h +new file mode 100644 +index 000000000000..b0921d98cb14 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxregconfig.h +@@ -0,0 +1,130 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX register configuration functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX register configuration functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXREGCONFIG_H) ++#define RGXREGCONFIG_H ++ ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgx_fwif_km.h" ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXSetRegConfigTypeKM ++ ++ @Description ++ Server-side implementation of RGXSetRegConfig ++ ++ @Input psDeviceNode - RGX Device node ++ @Input ui8RegPowerIsland - Reg configuration ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT8 ui8RegPowerIsland); ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXSetRegConfigKM ++ ++ @Description ++ Server-side implementation of RGXSetRegConfig ++ ++ @Input psDeviceNode - RGX Device node ++ @Input ui64RegAddr - Register address ++ @Input ui64RegValue - Reg value ++ @Input ui64RegMask - Reg mask ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++ ++PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui64RegAddr, ++ IMG_UINT64 ui64RegValue, ++ IMG_UINT64 ui64RegMask); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXClearRegConfigKM ++ ++ @Description ++ Server-side implementation of RGXClearRegConfig ++ ++ @Input psDeviceNode - RGX Device node ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXEnableRegConfigKM ++ ++ @Description ++ Server-side implementation of RGXEnableRegConfig ++ ++ @Input psDeviceNode - RGX Device node ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++******************************************************************************* ++ @Function PVRSRVRGXDisableRegConfigKM ++ ++ @Description ++ Server-side implementation of RGXDisableRegConfig ++ ++ @Input psDeviceNode - RGX Device node ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* RGXREGCONFIG_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxshader.c b/drivers/gpu/drm/img-rogue/rgxshader.c +new file mode 100644 +index 000000000000..407c0fbd5939 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxshader.c +@@ -0,0 +1,302 @@ ++/*************************************************************************/ /*! ++@File rgxshader.c ++@Title TQ Shader Load ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shader code and info are shared for all context on the device. ++ If allocation doesn't already exist, read shader data from file ++ and allocate PMR memory. PMR memory is not deallocated until ++ device deinit. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgxshader.h" ++#include "osfunc_common.h" ++#include "rgxdevice.h" ++#include "pdump_km.h" ++#include "physmem.h" ++#include "ri_server.h" ++#include "pvr_ricommon.h" ++ ++static void ++RGXShaderReadHeader(OS_FW_IMAGE *psShaderFW, RGX_SHADER_HEADER *psHeader) ++{ ++ const void * pvData; ++ ++ pvData = OSFirmwareData(psShaderFW); ++ ++ OSDeviceMemCopy(psHeader, pvData, sizeof(RGX_SHADER_HEADER)); ++} ++ ++static size_t ++RGXShaderCLIMemSize(OS_FW_IMAGE *psShaderFW) ++{ ++ RGX_SHADER_HEADER sHeader; ++ ++ RGXShaderReadHeader(psShaderFW, &sHeader); ++ ++ return sHeader.ui32SizeClientMem; ++} ++ ++static size_t ++RGXShaderUSCMemSize(OS_FW_IMAGE *psShaderFW) ++{ ++ RGX_SHADER_HEADER sHeader; ++ ++ RGXShaderReadHeader(psShaderFW, &sHeader); ++ ++ return sHeader.ui32SizeFragment; ++} ++ ++static void * ++RGXShaderCLIMem(OS_FW_IMAGE *psShaderFW) ++{ ++ return (void*)OSFirmwareData(psShaderFW); ++} ++ ++static void * ++RGXShaderUSCMem(OS_FW_IMAGE *psShaderFW) ++{ ++ IMG_PBYTE pui8Data; ++ ++ pui8Data = (IMG_PBYTE)OSFirmwareData(psShaderFW); ++ ++ pui8Data += RGXShaderCLIMemSize(psShaderFW); ++ ++ return (void*) pui8Data; ++} ++ ++#define RGX_SHADER_FILENAME_MAX_SIZE ((sizeof(RGX_SH_FILENAME)+ \ ++ RGX_BVNC_STR_SIZE_MAX)) ++ ++static void ++_GetShaderFileName(PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_CHAR * pszShaderFilenameStr, ++ IMG_CHAR * pszShaderpFilenameStr) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSSNPrintf(pszShaderFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, ++ "%s." RGX_BVNC_STR_FMTSPEC, ++ RGX_SH_FILENAME, ++ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); ++ ++ OSSNPrintf(pszShaderpFilenameStr, RGX_SHADER_FILENAME_MAX_SIZE, ++ "%s." RGX_BVNC_STRP_FMTSPEC, ++ RGX_SH_FILENAME, ++ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C); ++} ++ ++PVRSRV_ERROR ++PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ OS_FW_IMAGE *psShaderFW; ++ RGX_SHADER_HEADER sHeader; ++ IMG_UINT32 ui32MappingTable = 0; ++ IMG_UINT32 ui32NumPages; ++ IMG_CHAR aszShaderFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; ++ IMG_CHAR aszShaderpFilenameStr[RGX_SHADER_FILENAME_MAX_SIZE]; ++ const IMG_CHAR *pszShaderFilenameStr = aszShaderFilenameStr; ++ size_t uiNumBytes; ++ PVRSRV_ERROR eError; ++ ++ _GetShaderFileName(psDeviceNode, aszShaderFilenameStr, aszShaderpFilenameStr); ++ ++ eError = OSLoadFirmware(psDeviceNode, aszShaderFilenameStr, NULL, &psShaderFW); ++ ++ if (eError != PVRSRV_OK) ++ { ++ eError = OSLoadFirmware(psDeviceNode, aszShaderpFilenameStr, ++ NULL, &psShaderFW); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to load shader binary file %s (%s)", ++ __func__, ++ aszShaderpFilenameStr, ++ PVRSRVGetErrorString(eError))); ++ eError = PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE; ++ goto failed_init; ++ } ++ ++ pszShaderFilenameStr = aszShaderpFilenameStr; ++ } ++ ++ PVR_LOG(("Shader binary image '%s' loaded", pszShaderFilenameStr)); ++ ++ RGXShaderReadHeader(psShaderFW, &sHeader); ++ ++ ui32NumPages = (sHeader.ui32SizeFragment / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate TDM USC PMR Block (Pages %08X)", ui32NumPages); ++ ++ eError = PhysmemNewRamBackedPMR(NULL, ++ psDeviceNode, ++ (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, ++ (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, ++ 1, ++ 1, ++ &ui32MappingTable, ++ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE ++ | PVRSRV_MEMALLOCFLAG_GPU_READABLE ++ | PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT ++ | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, ++ sizeof("tquscpmr"), ++ "tquscpmr", ++ PVR_SYS_ALLOC_PID, ++ (PMR**)&psDevInfo->hTQUSCSharedMem, ++ PDUMP_NONE, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto failed_firmware; ++ } ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQUSCSharedMem, PVR_SYS_ALLOC_PID); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto failed_uscpmr; ++ } ++#endif ++ ++ eError = PMR_WriteBytes(psDevInfo->hTQUSCSharedMem, 0, RGXShaderUSCMem(psShaderFW), RGXShaderUSCMemSize(psShaderFW), &uiNumBytes); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto failed_uscpmr; ++ } ++ ++ ui32NumPages = (sHeader.ui32SizeClientMem / RGX_BIF_PM_PHYSICAL_PAGE_SIZE) + 1; ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate TDM Client PMR Block (Pages %08X)", ui32NumPages); ++ ++ eError = PhysmemNewRamBackedPMR(NULL, ++ psDeviceNode, ++ (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, ++ (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE, ++ 1, ++ 1, ++ &ui32MappingTable, ++ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE ++ | PVRSRV_MEMALLOCFLAG_CPU_READABLE ++ | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT ++ | PVRSRV_MEMALLOCFLAG_VAL_SHARED_BUFFER, ++ sizeof("tqclipmr"), ++ "tqclipmr", ++ PVR_SYS_ALLOC_PID, ++ (PMR**)&psDevInfo->hTQCLISharedMem, ++ PDUMP_NONE, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from PhysmemNewRamBackedPMR (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto failed_uscpmr; ++ } ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ eError = RIWritePMREntryWithOwnerKM(psDevInfo->hTQCLISharedMem, PVR_SYS_ALLOC_PID); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RIWritePMREntryWithOwnerKM (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto failed_clipmr; ++ } ++#endif ++ ++ eError = PMR_WriteBytes(psDevInfo->hTQCLISharedMem, 0, RGXShaderCLIMem(psShaderFW), RGXShaderCLIMemSize(psShaderFW), &uiNumBytes); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from PMR_WriteBytes (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto failed_clipmr; ++ } ++ ++ OSUnloadFirmware(psShaderFW); ++ ++ PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); ++ PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); ++ ++ return PVRSRV_OK; ++ ++failed_clipmr: ++ PMRUnrefPMR(psDevInfo->hTQCLISharedMem); ++failed_uscpmr: ++ PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); ++failed_firmware: ++ OSUnloadFirmware(psShaderFW); ++failed_init: ++ return eError; ++} ++ ++void ++PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE * psDeviceNode, ++ PMR ** ppsCLIPMRMem, ++ PMR ** ppsUSCPMRMem) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ PVR_ASSERT(psDevInfo->hTQUSCSharedMem != NULL); ++ PVR_ASSERT(psDevInfo->hTQCLISharedMem != NULL); ++ ++ *ppsUSCPMRMem = psDevInfo->hTQUSCSharedMem; ++ *ppsCLIPMRMem = psDevInfo->hTQCLISharedMem; ++} ++ ++void PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE * psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ (void) PMRUnrefPMR(psDevInfo->hTQUSCSharedMem); ++ (void) PMRUnrefPMR(psDevInfo->hTQCLISharedMem); ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxshader.h b/drivers/gpu/drm/img-rogue/rgxshader.h +new file mode 100644 +index 000000000000..7676ede51b7f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxshader.h +@@ -0,0 +1,83 @@ ++/*************************************************************************/ /*! ++@File rgxshader.h ++@Title TQ Shader Load ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Shader code and info are shared for all context on the device. ++ If allocation doesn't already exist, read shader data from file ++ and allocate PMR memory. PMR memory is not deallocated until ++ device deinit. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXSHADER_H) ++#define RGXSHADER_H ++ ++#include "fwload.h" ++#include "rgxtransfer_shader.h" ++#include "connection_server.h" ++ ++/*************************************************************************/ /*! ++@Function PVRSRVTQLoadShaders ++@Description If PMR is not allocated, reads shader binary data from file ++ and allocates new PMR memory. ++@Input psDeviceNode Device node ++@Return PVRSRV_ERROR Returns PVRSRV_OK on success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVTQLoadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVTQAcquireShaders ++@Description Get handle to ready allocated shader PMR memory ++@Input psDeviceNode Device node ++@Output ppsCLIPMRMem Shader data used by CPU client side. ++@Output ppsUSCPMRMem Shader usc code used by GPU. ++*/ /**************************************************************************/ ++void ++PVRSRVTQAcquireShaders(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PMR **ppsCLIPMRMem, ++ PMR **ppsUSCPMRMem); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVTQUnLoadShaders ++@Description Unref PMR memory. ++@Input psDeviceNode Device node ++*/ /**************************************************************************/ ++void PVRSRVTQUnloadShaders(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++#endif /* RGXSHADER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxsrvinit.c b/drivers/gpu/drm/img-rogue/rgxsrvinit.c +new file mode 100644 +index 000000000000..851054a7c334 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxsrvinit.c +@@ -0,0 +1,1657 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services initialisation routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "srvinit.h" ++#include "pvr_debug.h" ++#include "osfunc.h" ++#include "km_apphint_defs.h" ++#include "htbuffer_types.h" ++#include "htbuffer_init.h" ++ ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++ ++#include "rgx_fwif_km.h" ++#include "pdump_km.h" ++ ++#include "rgxinit.h" ++#include "rgxmulticore.h" ++ ++#include "rgx_compat_bvnc.h" ++ ++#include "osfunc.h" ++ ++#include "rgxdefs_km.h" ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++#include "rgx_fwif_hwperf.h" ++#include "rgx_hwperf_table.h" ++ ++#include "fwload.h" ++#include "rgxlayer_impl.h" ++#include "rgxfwimageutils.h" ++#include "rgxfwutils.h" ++ ++#include "rgx_hwperf.h" ++#include "rgx_bvnc_defs_km.h" ++ ++#include "rgxdevice.h" ++ ++#include "pvrsrv.h" ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++#include "rgxdevice.h" ++#include "pvrsrv_device.h" ++#endif ++ ++#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */ ++ ++#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */ ++#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */ ++ ++/* Kernel CCB size */ ++ ++#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE) ++#define PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE 4 ++#endif ++#if !defined(PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE) ++#define PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE 16 ++#endif ++ ++#if PVRSRV_APPHINT_KCCB_SIZE_LOG2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE ++#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too low. ++#elif PVRSRV_APPHINT_KCCB_SIZE_LOG2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE ++#error PVRSRV_APPHINT_KCCB_SIZE_LOG2 is too high. ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++#include "pvrsrv_apphint.h" ++#endif ++ ++#include "os_srvinit_param.h" ++#if !defined(__linux__) ++/*! ++******************************************************************************* ++ * AppHint mnemonic data type helper tables ++******************************************************************************/ ++/* apphint map of name vs. enable flag */ ++static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = { ++#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) }, ++ HTB_LOG_SFGROUPLIST ++#undef X ++}; ++/* apphint map of arg vs. OpMode */ ++static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = { ++ { "droplatest", HTB_OPMODE_DROPLATEST}, ++ { "dropoldest", HTB_OPMODE_DROPOLDEST}, ++ /* HTB should never be started in HTB_OPMODE_BLOCK ++ * as this can lead to deadlocks ++ */ ++}; ++ ++static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = { ++ { "trace", 0}, ++ { "none", 0} ++#if defined(SUPPORT_TBI_INTERFACE) ++ , { "tbi", 1} ++#endif ++}; ++ ++static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = { ++ { "mono", 0 }, ++ { "mono_raw", 1 }, ++ { "sched", 2 } ++}; ++ ++static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP }; ++ ++/* ++ * Services AppHints initialisation ++ */ ++#define X(a, b, c, d, e) SrvInitParamInit ## b(a, d, e) ++APPHINT_LIST_ALL ++#undef X ++#endif /* !defined(__linux__) */ ++ ++/* ++ * Container for all the apphints used by this module ++ */ ++typedef struct _RGX_SRVINIT_APPHINTS_ ++{ ++ IMG_UINT32 ui32DriverMode; ++ IMG_BOOL bGPUUnitsPowerChange; ++ IMG_BOOL bEnableSignatureChecks; ++ IMG_UINT32 ui32SignatureChecksBufSize; ++ ++ IMG_BOOL bAssertOnOutOfMem; ++#if defined(SUPPORT_VALIDATION) ++ IMG_BOOL bValidateIrq; ++ IMG_BOOL bValidateSOCUSCTimer; ++#endif ++ IMG_BOOL bAssertOnHWRTrigger; ++#if defined(SUPPORT_VALIDATION) ++ IMG_UINT32 aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_LAST]; ++ IMG_UINT32 ui32FBCDCVersionOverride; ++ IMG_UINT32 ui32TFBCCompressionControlGroup; ++ IMG_UINT32 ui32TFBCCompressionControlScheme; ++ IMG_BOOL bTFBCCompressionControlYUVFormat; ++#endif ++ IMG_BOOL bCheckMlist; ++ IMG_BOOL bDisableClockGating; ++ IMG_BOOL bDisableDMOverlap; ++ IMG_BOOL bDisableFEDLogging; ++ IMG_BOOL bDisablePDP; ++ IMG_BOOL bEnableCDMKillRand; ++ IMG_BOOL bEnableRandomCsw; ++ IMG_BOOL bEnableSoftResetCsw; ++ IMG_BOOL bFilteringMode; ++ IMG_BOOL bHWPerfDisableCustomCounterFilter; ++ IMG_BOOL bZeroFreelist; ++ IMG_UINT32 ui32EnableFWContextSwitch; ++ IMG_UINT32 ui32FWContextSwitchProfile; ++ ++ IMG_UINT32 ui32HWPerfFWBufSize; ++ IMG_UINT32 ui32HWPerfHostBufSize; ++ IMG_UINT32 ui32HWPerfFilter0; ++ IMG_UINT32 ui32HWPerfFilter1; ++ IMG_UINT32 ui32HWPerfHostFilter; ++ IMG_UINT32 ui32TimeCorrClock; ++ IMG_UINT32 ui32HWRDebugDumpLimit; ++ IMG_UINT32 ui32JonesDisableMask; ++ IMG_UINT32 ui32LogType; ++ IMG_UINT32 ui32TruncateMode; ++ IMG_UINT32 ui32KCCBSizeLog2; ++ FW_PERF_CONF eFirmwarePerf; ++ RGX_ACTIVEPM_CONF eRGXActivePMConf; ++ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf; ++ ++ IMG_BOOL bEnableTrustedDeviceAceConfig; ++ IMG_UINT32 ui32FWContextSwitchCrossDM; ++#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) ++ IMG_UINT32 ui32PhysMemTestPasses; ++#endif ++} RGX_SRVINIT_APPHINTS; ++ ++/*! ++******************************************************************************* ++ ++ @Function GetApphints ++ ++ @Description Read init time apphints and initialise internal variables ++ ++ @Input psHints : Pointer to apphints container ++ ++ @Return void ++ ++******************************************************************************/ ++static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints) ++{ ++ void *pvParamState = SrvInitParamOpen(); ++ IMG_UINT32 ui32ParamTemp; ++ IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE; ++ IMG_BOOL bE42606 = IMG_FALSE; ++#if defined(EMULATOR) ++ IMG_BOOL bAXIACELite = IMG_FALSE; ++#endif ++ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) ++ { ++ bS7TopInfra = IMG_TRUE; ++ } ++#endif ++#if defined(RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL)) ++ { ++ bTPUFiltermodeCtrl = IMG_TRUE; ++ } ++#endif ++#if defined(HW_ERN_42290_BIT_MASK) ++ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42290)) ++ { ++ bE42290 = IMG_TRUE; ++ } ++#endif ++#if defined(HW_ERN_42606_BIT_MASK) ++ if (RGX_IS_ERN_SUPPORTED(psDevInfo, 42606)) ++ { ++ bE42606 = IMG_TRUE; ++ } ++#endif ++#if defined(HW_FEATURE_AXI_ACELITE_BIT_MASK) && defined(EMULATOR) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) ++ { ++ bAXIACELite = IMG_TRUE; ++ } ++#endif ++ ++ /* ++ * NB AppHints initialised to a default value via SrvInitParamInit* macros above ++ */ ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, DriverMode, psHints->ui32DriverMode); ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, GPUUnitsPowerChange, psHints->bGPUUnitsPowerChange); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize); ++ ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem); ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger); ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, CheckMList, psHints->bCheckMlist); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, DisableClockGating, psHints->bDisableClockGating); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap); ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging); ++ SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, EnableAPM, ui32ParamTemp); ++ psHints->eRGXActivePMConf = ui32ParamTemp; ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableCDMKillingRandMode, psHints->bEnableCDMKillRand); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableRandomContextSwitch, psHints->bEnableRandomCsw); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableSoftResetContextSwitch, psHints->bEnableSoftResetCsw); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, EnableRDPowerIsland, ui32ParamTemp); ++ psHints->eRGXRDPowerIslandConf = ui32ParamTemp; ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FirmwarePerf, ui32ParamTemp); ++ psHints->eFirmwarePerf = ui32ParamTemp; ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ++ HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize); ++ SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, KernelCCBSizeLog2, psHints->ui32KCCBSizeLog2); ++ ++ if (psHints->ui32KCCBSizeLog2 < PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too low, setting to %u", ++ psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE)); ++ psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MIN_SIZE; ++ } ++ else if (psHints->ui32KCCBSizeLog2 > PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "KCCB size %u is too high, setting to %u", ++ psHints->ui32KCCBSizeLog2, PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE)); ++ psHints->ui32KCCBSizeLog2 = PVRSRV_RGX_LOG2_KERNEL_CCB_MAX_SIZE; ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ if (psHints->ui32KCCBSizeLog2 != PVRSRV_APPHINT_KCCB_SIZE_LOG2) ++ { ++ PVR_LOG(("KernelCCBSizeLog2 set to %u", psHints->ui32KCCBSizeLog2)); ++ } ++#endif ++ ++#if defined(__linux__) ++ /* name changes */ ++ { ++ IMG_UINT64 ui64Tmp; ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, DisablePDumpPanic, psHints->bDisablePDP); ++ SrvInitParamGetUINT64(psDevInfo->psDeviceNode, pvParamState, HWPerfFWFilter, ui64Tmp); ++ psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu); ++ psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu); ++ } ++#else ++ SrvInitParamUnreferenced(DisablePDumpPanic); ++ SrvInitParamUnreferenced(HWPerfFWFilter); ++ SrvInitParamUnreferenced(RGXBVNC); ++#endif ++ SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter); ++ SrvInitParamGetUINT32List(psDevInfo->psDeviceNode, pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock); ++ SrvInitParamGetUINT32(psDevInfo->psDeviceNode, pvParamState, HWRDebugDumpLimit, ui32ParamTemp); ++ psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL); ++ ++ if (bS7TopInfra) ++ { ++ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU) ++ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN (0X00000020U) ++ #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN (0X00000010U) ++ ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, JonesDisableMask, ui32ParamTemp); ++ if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) || ++ ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN)) ++ { ++ ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN | ++ RGX_CR_JONES_FIX_MT_ORDER_ISP_EN); ++ PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d", ++ ui32ParamTemp)); ++ } ++ psHints->ui32JonesDisableMask = ui32ParamTemp; ++ } ++ ++ if ((bE42290) && (bTPUFiltermodeCtrl)) ++ { ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, NewFilteringMode, psHints->bFilteringMode); ++ } ++ ++ if (bE42606) ++ { ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TruncateMode, psHints->ui32TruncateMode); ++ } ++#if defined(EMULATOR) ++ if (bAXIACELite) ++ { ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig); ++ } ++#endif ++ ++ SrvInitParamGetBOOL(psDevInfo->psDeviceNode, pvParamState, ZeroFreelist, psHints->bZeroFreelist); ++ ++#if defined(__linux__) ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM); ++#else ++ SrvInitParamUnreferenced(FWContextSwitchCrossDM); ++#endif ++ ++#if defined(SUPPORT_PHYSMEM_TEST) && !defined(INTEGRITY_OS) && !defined(__QNXNTO__) ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, PhysMemTestPasses, psHints->ui32PhysMemTestPasses); ++#endif ++ ++#if defined(SUPPORT_VALIDATION) ++ /* Apphints for TPU trilinear frac masking */ ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskPDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_PDM]); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskVDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_VDM]); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskCDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_CDM]); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TPUTrilinearFracMaskTDM, psHints->aui32TPUTrilinearFracMask[RGXFWIF_TPU_DM_TDM]); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ValidateIrq, psHints->bValidateIrq); ++ SrvInitParamGetBOOL(INITPARAM_NO_DEVICE, pvParamState, ValidateSOCUSCTimer, psHints->bValidateSOCUSCTimer); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, FBCDCVersionOverride, psHints->ui32FBCDCVersionOverride); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlGroup, psHints->ui32TFBCCompressionControlGroup); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlScheme, psHints->ui32TFBCCompressionControlScheme); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, TFBCCompressionControlYUVFormat, psHints->bTFBCCompressionControlYUVFormat); ++#endif ++ ++ /* ++ * FW logs apphints ++ */ ++ { ++ IMG_UINT32 ui32LogGroup, ui32TraceOrTBI; ++ ++ SrvInitParamGetUINT32BitField(psDevInfo->psDeviceNode, pvParamState, EnableLogGroup, ui32LogGroup); ++ SrvInitParamGetUINT32List(psDevInfo->psDeviceNode, pvParamState, FirmwareLogType, ui32TraceOrTBI); ++ ++ /* Defaulting to TRACE */ ++ BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); ++ ++#if defined(SUPPORT_TBI_INTERFACE) ++ if (ui32TraceOrTBI == 1 /* TBI */) ++ { ++ if ((ui32LogGroup & RGXFWIF_LOG_TYPE_GROUP_MASK) == 0) ++ { ++ /* No groups configured - defaulting to MAIN group */ ++ BITMASK_SET(ui32LogGroup, RGXFWIF_LOG_TYPE_GROUP_MAIN); ++ } ++ BITMASK_UNSET(ui32LogGroup, RGXFWIF_LOG_TYPE_TRACE); ++ } ++#endif ++ psHints->ui32LogType = ui32LogGroup; ++ } ++ ++ SrvInitParamClose(pvParamState); ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function GetFWConfigFlags ++ ++ @Description Initialise and return FW config flags ++ ++ @Input psHints : Apphints container ++ @Input pui32FWConfigFlags : Pointer to config flags ++ ++ @Return void ++ ++******************************************************************************/ ++static INLINE void GetFWConfigFlags(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_SRVINIT_APPHINTS *psHints, ++ IMG_UINT32 *pui32FWConfigFlags, ++ IMG_UINT32 *pui32FWConfigFlagsExt, ++ IMG_UINT32 *pui32FwOsCfgFlags) ++{ ++#if defined(SUPPORT_VALIDATION) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++#endif ++ IMG_UINT32 ui32FWConfigFlags = 0; ++ IMG_UINT32 ui32FWConfigFlagsExt = 0; ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ ui32FWConfigFlags = 0; ++ ui32FWConfigFlagsExt = 0; ++ } ++ else ++ { ++ ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0; ++ ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0; ++ ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0; ++ ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0; ++ ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0; ++ ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_INICFG_DISABLE_PDP_EN : 0; ++ ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_DM_KILL_MODE_RAND_EN : 0; ++ ui32FWConfigFlags |= psHints->bEnableRandomCsw ? RGXFWIF_INICFG_CTXSWITCH_MODE_RAND : 0; ++ ui32FWConfigFlags |= psHints->bEnableSoftResetCsw ? RGXFWIF_INICFG_CTXSWITCH_SRESET_EN : 0; ++ ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0; ++ ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0; ++ ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK; ++ ++#if defined(SUPPORT_VALIDATION) ++#if defined(NO_HARDWARE) && defined(PDUMP) ++ ui32FWConfigFlags |= psHints->bValidateIrq ? RGXFWIF_INICFG_VALIDATE_IRQ : 0; ++#endif ++ ++ if (psHints->ui32FBCDCVersionOverride > 0) ++ { ++ ui32FWConfigFlags |= (psHints->ui32FBCDCVersionOverride == 2) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; ++ } ++ else ++#endif /* defined(SUPPORT_VALIDATION) */ ++ { ++ ui32FWConfigFlags |= psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode) ? RGXFWIF_INICFG_FBCDC_V3_1_EN : 0; ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ ui32FWConfigFlags |= psHints->bValidateSOCUSCTimer ? RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER : 0; ++ ++ if ((ui32FWConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) && ++ ((psHints->eRGXActivePMConf != 0) || (psHints->eRGXRDPowerIslandConf != 0))) ++ { ++ psHints->eRGXActivePMConf = 0; ++ psHints->eRGXRDPowerIslandConf = 0; ++ PVR_DPF((PVR_DBG_WARNING, "SoC/USC Timer test needs to run with both EnableAPM and EnableRDPowerIsland disabled.\n" ++ "Overriding current value for both with new value 0.")); ++ } ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_LOSSY_37_PERCENT) || ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_DELTA_CORRELATION) || ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, TFBC_NATIVE_YUV10)) ++ { ++ ui32FWConfigFlagsExt |= ++ ((((psHints->ui32TFBCCompressionControlGroup << RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_SHIFT) & ++ ~RGX_CR_TFBC_COMPRESSION_CONTROL_GROUP_CONTROL_CLRMSK) | ++ ((psHints->ui32TFBCCompressionControlScheme << RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_SHIFT) & ++ ~RGX_CR_TFBC_COMPRESSION_CONTROL_SCHEME_CLRMSK) | ++ ((psHints->bTFBCCompressionControlYUVFormat) ? RGX_CR_TFBC_COMPRESSION_CONTROL_YUV10_OVERRIDE_EN : 0)) ++ << RGXFWIF_INICFG_EXT_TFBC_CONTROL_SHIFT) & RGXFWIF_INICFG_EXT_TFBC_CONTROL_MASK; ++ } ++#endif ++ } ++ ++ *pui32FWConfigFlags = ui32FWConfigFlags; ++ *pui32FWConfigFlagsExt = ui32FWConfigFlagsExt; ++ *pui32FwOsCfgFlags = psHints->ui32FWContextSwitchCrossDM | ++ (psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_OS_CTXSWITCH_CLRMSK); ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function GetFilterFlags ++ ++ @Description Initialise and return filter flags ++ ++ @Input psHints : Apphints container ++ ++ @Return IMG_UINT32 : Filter flags ++ ++******************************************************************************/ ++static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints) ++{ ++ IMG_UINT32 ui32FilterFlags = 0; ++ ++ ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0; ++ if (psHints->ui32TruncateMode == 2) ++ { ++ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT; ++ } ++ else if (psHints->ui32TruncateMode == 3) ++ { ++ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF; ++ } ++ ++ return ui32FilterFlags; ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function InittDeviceFlags ++ ++ @Description Initialise and return device flags ++ ++ @Input psHints : Apphints container ++ @Input pui32DeviceFlags : Pointer to device flags ++ ++ @Return void ++ ++******************************************************************************/ ++static INLINE void InitDeviceFlags(RGX_SRVINIT_APPHINTS *psHints, ++ IMG_UINT32 *pui32DeviceFlags) ++{ ++ IMG_UINT32 ui32DeviceFlags = 0; ++ ++ ui32DeviceFlags |= psHints->bGPUUnitsPowerChange ? RGXKM_DEVICE_STATE_GPU_UNITS_POWER_CHANGE_EN : 0; ++ ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKM_DEVICE_STATE_ZERO_FREELIST : 0; ++ ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0; ++#if defined(PVRSRV_ENABLE_CCCB_GROW) ++ BITMASK_SET(ui32DeviceFlags, RGXKM_DEVICE_STATE_CCB_GROW_EN); ++#endif ++ ++ *pui32DeviceFlags = ui32DeviceFlags; ++} ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++/*! ++******************************************************************************* ++ ++ @Function RGXTDProcessFWImage ++ ++ @Description Fetch and send data used by the trusted device to complete ++ the FW image setup ++ ++ @Input psDeviceNode : Device node ++ @Input psRGXFW : Firmware blob ++ @Input puFWParams : Parameters used by the FW at boot time ++ ++ @Return PVRSRV_ERROR ++******************************************************************************/ ++static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode, ++ OS_FW_IMAGE *psRGXFW, ++ PVRSRV_FW_BOOT_PARAMS *puFWParams) ++{ ++ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_TD_FW_PARAMS sTDFWParams; ++ RGX_LAYER_PARAMS sLayerParams; ++ PVRSRV_ERROR eError; ++ ++ if (psDevConfig->pfnTDSendFWImage == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: TDSendFWImage not implemented!", __func__)); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++ } ++ ++ sLayerParams.psDevInfo = psDevInfo; ++ ++ sTDFWParams.pvFirmware = OSFirmwareData(psRGXFW); ++ sTDFWParams.ui32FirmwareSize = OSFirmwareSize(psRGXFW); ++ ++#if defined(RGX_FEATURE_META_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ sTDFWParams.uFWP.sMeta = puFWParams->sMeta; ++ } ++ else ++#endif ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ sTDFWParams.uFWP.sMips = puFWParams->sMips; ++ ++ if (sTDFWParams.uFWP.sMips.ui32FWPageTableNumPages > TD_MAX_NUM_MIPS_PAGETABLE_PAGES) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Number of page table pages %u greater " ++ "than what is allowed by the TD interface (%u), FW might " ++ "not work properly!", __func__, ++ puFWParams->sMips.ui32FWPageTableNumPages, ++ TD_MAX_NUM_MIPS_PAGETABLE_PAGES)); ++ } ++ } ++ else ++ { ++ sTDFWParams.uFWP.sRISCV = puFWParams->sRISCV; ++ } ++ ++ eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams); ++ ++ return eError; ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAcquireMipsBootldrData ++ ++ @Description Acquire MIPS bootloader data parameters ++ ++ @Input psDeviceNode : Device node ++ @Input puFWParams : FW boot parameters ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR RGXAcquireMipsBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_FW_BOOT_PARAMS *puFWParams) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice; ++ MMU_DEVICEATTRIBS *psFWMMUDevAttrs = psDevInfo->psDeviceNode->psFirmwareMMUDevAttrs; ++ IMG_DEV_PHYADDR sAddr; ++ IMG_UINT32 ui32PTSize, i; ++ PVRSRV_ERROR eError; ++ IMG_BOOL bValid; ++ ++ /* Rogue Registers physical address */ ++#if defined(SUPPORT_ALT_REGBASE) ++ puFWParams->sMips.sGPURegAddr = psDeviceNode->psDevConfig->sAltRegsGpuPBase; ++#else ++ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_PHYS_HEAP_GPU_LOCAL], ++ 1, ++ &puFWParams->sMips.sGPURegAddr, ++ &(psDeviceNode->psDevConfig->sRegsCpuPBase)); ++#endif ++ ++ /* MIPS Page Table physical address */ ++ MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sAddr); ++ ++ /* MIPS Page Table allocation is contiguous. Pass one or more addresses ++ * to the FW depending on the Page Table size and alignment. */ ++ ++ ui32PTSize = (psFWMMUDevAttrs->psTopLevelDevVAddrConfig->uiNumEntriesPT) ++ << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE; ++ ui32PTSize = PVR_ALIGN(ui32PTSize, 1U << psFWMMUDevAttrs->ui32BaseAlign); ++ ++ puFWParams->sMips.ui32FWPageTableLog2PageSize = psFWMMUDevAttrs->ui32BaseAlign; ++ puFWParams->sMips.ui32FWPageTableNumPages = ui32PTSize >> psFWMMUDevAttrs->ui32BaseAlign; ++ ++ if (puFWParams->sMips.ui32FWPageTableNumPages > 4U) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Page table cannot be mapped by the FW " ++ "(size 0x%x, log2 page size %u, %u pages)", ++ __func__, ui32PTSize, puFWParams->sMips.ui32FWPageTableLog2PageSize, ++ puFWParams->sMips.ui32FWPageTableNumPages)); ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ /* Confirm page alignment fits in 64-bits */ ++ if (psFWMMUDevAttrs->ui32BaseAlign > 63) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid page alignment " ++ "(psFWMMUDevAttrs->ui32BaseAlign = %u)", ++ __func__, psFWMMUDevAttrs->ui32BaseAlign)); ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ for (i = 0; i < puFWParams->sMips.ui32FWPageTableNumPages; i++) ++ { ++ puFWParams->sMips.asFWPageTableAddr[i].uiAddr = ++ sAddr.uiAddr + i * (1ULL << psFWMMUDevAttrs->ui32BaseAlign); ++ } ++ ++ /* MIPS Stack Pointer Physical Address */ ++ eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR, ++ &puFWParams->sMips.sFWStackAddr, ++ RGXGetFWImageSectionOffset(NULL, MIPS_STACK), ++ OSGetPageShift(), ++ 1, ++ &bValid); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function InitFirmware ++ ++ @Description Allocate, initialise and pdump Firmware code and data memory ++ ++ @Input psDeviceNode : Device Node ++ @Input psHints : Apphints ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_SRVINIT_APPHINTS *psHints) ++{ ++ OS_FW_IMAGE *psRGXFW = NULL; ++ const IMG_BYTE *pbRGXFirmware = NULL; ++ ++ /* FW code memory */ ++ IMG_DEVMEM_SIZE_T uiFWCodeAllocSize; ++ void *pvFWCodeHostAddr; ++ ++ /* FW data memory */ ++ IMG_DEVMEM_SIZE_T uiFWDataAllocSize; ++ void *pvFWDataHostAddr; ++ ++ /* FW coremem code memory */ ++ IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize; ++ void *pvFWCorememCodeHostAddr = NULL; ++ ++ /* FW coremem data memory */ ++ IMG_DEVMEM_SIZE_T uiFWCorememDataAllocSize; ++ void *pvFWCorememDataHostAddr = NULL; ++ ++ PVRSRV_FW_BOOT_PARAMS uFWParams; ++ RGX_LAYER_PARAMS sLayerParams; ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ IMG_BOOL bUseSecureFWData = ++#if defined(RGX_FEATURE_META_IDX) ++ RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) || ++#endif ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ RGX_IS_FEATURE_SUPPORTED(psDevInfo, RISCV_FW_PROCESSOR) || ++#endif ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) && ++ RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32); ++#endif ++ ++ /* ++ * Get pointer to Firmware image ++ */ ++ eError = RGXLoadAndGetFWData(psDeviceNode, &psRGXFW, &pbRGXFirmware); ++ ++ if (eError != PVRSRV_OK) ++ { ++ /* Error or confirmation message generated in RGXLoadAndGetFWData */ ++ goto fw_load_fail; ++ } ++ ++ sLayerParams.psDevInfo = psDevInfo; ++ ++ /* ++ * Allocate Firmware memory ++ */ ++ ++ eError = RGXGetFWImageAllocSize(&sLayerParams, ++ pbRGXFirmware, ++ OSFirmwareSize(psRGXFW), ++ &uiFWCodeAllocSize, ++ &uiFWDataAllocSize, ++ &uiFWCorememCodeAllocSize, ++ &uiFWCorememDataAllocSize); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXGetFWImageAllocSize failed", ++ __func__)); ++ goto cleanup_initfw; ++ } ++ ++ psDevInfo->ui32FWCodeSizeInBytes = uiFWCodeAllocSize; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_META_DMA_BIT_MASK) ++ /* Disable META core memory allocation unless the META DMA is available */ ++ if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, META_DMA)) ++ { ++ uiFWCorememCodeAllocSize = 0; ++ uiFWCorememDataAllocSize = 0; ++ } ++#endif ++ ++ psDevInfo->ui32FWCorememCodeSizeInBytes = uiFWCorememCodeAllocSize; ++ ++ eError = RGXInitAllocFWImgMem(psDeviceNode, ++ uiFWCodeAllocSize, ++ uiFWDataAllocSize, ++ uiFWCorememCodeAllocSize, ++ uiFWCorememDataAllocSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXInitAllocFWImgMem failed (%d)", ++ __func__, ++ eError)); ++ goto cleanup_initfw; ++ } ++ ++ /* ++ * Acquire pointers to Firmware allocations ++ */ ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc, &pvFWCodeHostAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", cleanup_initfw); ++ ++#else ++ /* We can't get a pointer to a secure FW allocation from within the DDK */ ++ pvFWCodeHostAddr = NULL; ++#endif ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ if (bUseSecureFWData) ++ { ++ /* We can't get a pointer to a secure FW allocation from within the DDK */ ++ pvFWDataHostAddr = NULL; ++ } ++ else ++#endif ++ { ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWDataHostAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_code); ++ } ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++ if (uiFWCorememCodeAllocSize) ++ { ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc, &pvFWCorememCodeHostAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_data); ++ } ++#else ++ /* We can't get a pointer to a secure FW allocation from within the DDK */ ++ pvFWCorememCodeHostAddr = NULL; ++#endif ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ if (bUseSecureFWData) ++ { ++ pvFWCorememDataHostAddr = NULL; ++ } ++ else ++#endif ++ if (uiFWCorememDataAllocSize) ++ { ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, &pvFWCorememDataHostAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", release_corememcode); ++ } ++ ++ /* ++ * Prepare FW boot parameters ++ */ ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)) ++ { ++ eError = RGXAcquireMipsBootldrData(psDeviceNode, &uFWParams); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXAcquireMipsBootldrData failed (%d)", ++ __func__, eError)); ++ goto release_fw_allocations; ++ } ++ } ++ else ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ++ { ++ uFWParams.sMeta.sFWCodeDevVAddr = psDevInfo->sFWCodeDevVAddrBase; ++ uFWParams.sMeta.sFWDataDevVAddr = psDevInfo->sFWDataDevVAddrBase; ++ uFWParams.sMeta.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; ++ uFWParams.sMeta.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; ++ uFWParams.sMeta.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; ++ uFWParams.sMeta.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; ++ uFWParams.sMeta.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; ++#if defined(RGXFW_META_SUPPORT_2ND_THREAD) ++ uFWParams.sMeta.ui32NumThreads = 2; ++#else ++ uFWParams.sMeta.ui32NumThreads = 1; ++#endif ++ } ++ else ++#endif ++ { ++ uFWParams.sRISCV.sFWCorememCodeDevVAddr = psDevInfo->sFWCorememCodeDevVAddrBase; ++ uFWParams.sRISCV.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr; ++ uFWParams.sRISCV.uiFWCorememCodeSize = uiFWCorememCodeAllocSize; ++ ++ uFWParams.sRISCV.sFWCorememDataDevVAddr = psDevInfo->sFWCorememDataStoreDevVAddrBase; ++ uFWParams.sRISCV.sFWCorememDataFWAddr = psDevInfo->sFWCorememDataStoreFWAddr; ++ uFWParams.sRISCV.uiFWCorememDataSize = uiFWCorememDataAllocSize; ++ } ++ ++ ++ /* ++ * Process the Firmware image and setup code and data segments. ++ * ++ * When the trusted device is enabled and the FW code lives ++ * in secure memory we will only setup the data segments here, ++ * while the code segments will be loaded to secure memory ++ * by the trusted device. ++ */ ++ if (!psDeviceNode->bAutoVzFwIsUp) ++ { ++ eError = RGXProcessFWImage(&sLayerParams, ++ pbRGXFirmware, ++ pvFWCodeHostAddr, ++ pvFWDataHostAddr, ++ pvFWCorememCodeHostAddr, ++ pvFWCorememDataHostAddr, ++ &uFWParams); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXProcessFWImage failed (%d)", ++ __func__, eError)); ++ goto release_fw_allocations; ++ } ++ } ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ RGXTDProcessFWImage(psDeviceNode, psRGXFW, &uFWParams); ++#endif ++ ++ ++ /* ++ * PDump Firmware allocations ++ */ ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Dump firmware code image"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWCodeMemDesc, ++ 0, ++ uiFWCodeAllocSize, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ if (!bUseSecureFWData) ++#endif ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Dump firmware data image"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWDataMemDesc, ++ 0, ++ uiFWDataAllocSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++ if (uiFWCorememCodeAllocSize) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Dump firmware coremem code image"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWCorememCodeMemDesc, ++ 0, ++ uiFWCorememCodeAllocSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++#endif ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ if (!bUseSecureFWData && uiFWCorememDataAllocSize) ++#else ++ if (uiFWCorememDataAllocSize) ++#endif ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Dump firmware coremem data store image"); ++ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfCorememDataStoreMemDesc, ++ 0, ++ uiFWCorememDataAllocSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ ++ /* ++ * Release Firmware allocations and clean up ++ */ ++ ++release_fw_allocations: ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ if (!bUseSecureFWData && uiFWCorememDataAllocSize) ++#else ++ if (uiFWCorememDataAllocSize) ++#endif ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfCorememDataStoreMemDesc); ++ } ++release_corememcode: ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++ if (uiFWCorememCodeAllocSize) ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCorememCodeMemDesc); ++ } ++#endif ++ ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++release_data: ++#endif ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ if (!bUseSecureFWData) ++#endif ++ { ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc); ++ } ++ ++release_code: ++#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE) || defined(SUPPORT_SECURITY_VALIDATION) ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWCodeMemDesc); ++#endif ++cleanup_initfw: ++ OSUnloadFirmware(psRGXFW); ++fw_load_fail: ++ ++ return eError; ++} ++ ++ ++#if defined(PDUMP) ++/*! ++******************************************************************************* ++ ++ @Function InitialiseHWPerfCounters ++ ++ @Description Initialisation of hardware performance counters and dumping ++ them out to pdump, so that they can be modified at a later ++ point. ++ ++ @Input pvDevice ++ @Input psHWPerfDataMemDesc ++ @Input psHWPerfInitDataInt ++ ++ @Return void ++ ++******************************************************************************/ ++ ++static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, ++ void *pvDevice, ++ DEVMEM_MEMDESC *psHWPerfDataMemDesc, ++ RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt) ++{ ++ RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData; ++ RGXFWIF_HWPERF_DA_BLK *psHWPerfInitDABlkData; ++ IMG_UINT32 ui32CntBlkModelLen; ++ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel; ++ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc; ++ IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx; ++ RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo; ++ ++ ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel); ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "HWPerf Counter Config starts here."); ++ ++ for (ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++) ++ { ++ IMG_UINT32 uiUnit; ++ IMG_BOOL bDirect; ++ ++ /* Exit early if this core does not have any of these counter blocks ++ * due to core type/BVNC features.... */ ++ psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx]; ++ if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDevice, &sCntBlkRtInfo) == IMG_FALSE) ++ { ++ continue; ++ } ++ ++ /* Program all counters in one block so those already on may ++ * be configured off and vice-a-versa. */ ++ for (ui32BlockID = psBlkTypeDesc->ui32CntBlkIdBase; ++ ui32BlockID < psBlkTypeDesc->ui32CntBlkIdBase+sCntBlkRtInfo.ui32NumUnits; ++ ui32BlockID++) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "Unit %d Block : %s", ++ ui32BlockID-psBlkTypeDesc->ui32CntBlkIdBase, ++ psBlkTypeDesc->pszBlockNameComment); ++ ++ /* Get the block configure store to update from the global store of ++ * block configuration. This is used to remember the configuration ++ * between configurations and core power on in APM. ++ * For RGX_FEATURE_HWPERF_OCEANIC layout we have a different ++ * structure type to decode the HWPerf block. This is indicated by ++ * the RGX_CNTBLK_ID_DA_MASK bit being set in the block-ID value. */ ++ ++ bDirect = (psBlkTypeDesc->ui32IndirectReg == 0U); ++ uiUnit = ui32BlockID - psBlkTypeDesc->ui32CntBlkIdBase; ++ ++ if ((ui32BlockID & RGX_CNTBLK_ID_DA_MASK) == RGX_CNTBLK_ID_DA_MASK) ++ { ++ psHWPerfInitDABlkData = rgxfw_hwperf_get_da_block_ctl(ui32BlockID, psHWPerfInitDataInt); ++ ++ PVR_ASSERT(psHWPerfInitDABlkData); ++ ++ psHWPerfInitDABlkData->eBlockID = ui32BlockID; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information."); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitDABlkData->eBlockID) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitDABlkData->eBlockID, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ psHWPerfInitDABlkData->uiEnabled = 0U; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "uiEnabled: Set to 0x1 if the block needs to be enabled during playback."); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitDABlkData->uiEnabled) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitDABlkData->uiEnabled, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ psHWPerfInitDABlkData->uiNumCounters = 0U; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "uiNumCounters (X): Specifies the number of valid counters" ++ " [0..%d] which follow.", RGX_CNTBLK_COUNTERS_MAX); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitDABlkData->uiNumCounters) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitDABlkData->uiNumCounters, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ for (ui32CounterIdx = 0; ui32CounterIdx < RGX_CNTBLK_COUNTERS_MAX; ui32CounterIdx++) ++ { ++ psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx] = IMG_UINT32_C(0x00000000); ++ ++ if (bDirect) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "%s_COUNTER_%d", ++ psBlkTypeDesc->pszBlockNameComment, ++ ui32CounterIdx); ++ } ++ else ++ { ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "%s%d_COUNTER_%d", ++ psBlkTypeDesc->pszBlockNameComment, ++ uiUnit, ui32CounterIdx); ++ } ++ ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitDABlkData->aui32Counters[ui32CounterIdx], ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ } ++ else ++ { ++ psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt); ++ /* Assert to check for HWPerf block mis-configuration */ ++ PVR_ASSERT(psHWPerfInitBlkData); ++ ++ psHWPerfInitBlkData->bValid = IMG_TRUE; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "bValid: This specifies if the layout block is valid for the given BVNC."); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitBlkData->bValid, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ psHWPerfInitBlkData->bEnabled = IMG_FALSE; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "bEnabled: Set to 0x1 if the block needs to be enabled during playback."); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitBlkData->bEnabled, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ psHWPerfInitBlkData->eBlockID = ui32BlockID; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information."); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitBlkData->eBlockID, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ psHWPerfInitBlkData->uiCounterMask = 0x00; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "uiCounterMask: Bitmask for selecting the counters that need to be configured. (Bit 0 - counter0, bit 1 - counter1 and so on.)"); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitBlkData->uiCounterMask, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ for (ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->ui8NumCounters; ui32CounterIdx++) ++ { ++ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000); ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment, ui32CounterIdx); ++ DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc, ++ (size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt), ++ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx], ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ } ++ } ++ } ++ } ++} ++/*! ++******************************************************************************* ++ ++ @Function InitialiseCustomCounters ++ ++ @Description Initialisation of custom counters and dumping them out to ++ pdump, so that they can be modified at a later point. ++ ++ @Input psHWPerfDataMemDesc ++ ++ @Return void ++ ++******************************************************************************/ ++ ++static void InitialiseCustomCounters(PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psHWPerfDataMemDesc) ++{ ++ IMG_UINT32 ui32CustomBlock, ui32CounterID; ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected"); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask), ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ for (ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++) ++ { ++ /* ++ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of ++ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is ++ * "expression must have a constant value". ++ */ ++ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters ++ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters); ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock ); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ uiOffsetOfCustomBlockSelectedCounters, ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ for (ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ ) ++ { ++ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs ++ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]); ++ ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, PDUMP_FLAGS_CONTINUOUS, ++ "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID); ++ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc, ++ uiOffsetOfCustomBlockSelectedCounterIDs, ++ 0, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ } ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function InitialiseAllCounters ++ ++ @Description Initialise HWPerf and custom counters ++ ++ @Input psDeviceNode : Device Node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ RGXFWIF_HWPERF_CTL *psHWPerfInitData; ++ PVRSRV_ERROR eError; ++ ++ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void **)&psHWPerfInitData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt); ++ ++ InitialiseHWPerfCounters(psDeviceNode, psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc, psHWPerfInitData); ++ InitialiseCustomCounters(psDeviceNode, psDevInfo->psRGXFWIfHWPerfCountersMemDesc); ++ ++failHWPerfCountersMemDescAqCpuVirt: ++ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWPerfCountersMemDesc); ++ ++ return eError; ++} ++#endif /* PDUMP */ ++ ++/* ++ * _ParseHTBAppHints: ++ * ++ * Generate necessary references to the globally visible AppHints which are ++ * declared in the above #include "km_apphint_defs.h" ++ * Without these local references some compiler tool-chains will treat ++ * unreferenced declarations as fatal errors. This function duplicates the ++ * HTB_specific apphint references which are made in htbserver.c:HTBInit() ++ * However, it makes absolutely *NO* use of these hints. ++ */ ++static void ++_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ void *pvParamState = NULL; ++ IMG_UINT32 ui32LogType; ++ IMG_BOOL bAnyLogGroupConfigured; ++ IMG_UINT32 ui32BufferSize; ++ IMG_UINT32 ui32OpMode; ++ ++ /* Services initialisation parameters */ ++ pvParamState = SrvInitParamOpen(); ++ if (pvParamState == NULL) ++ return; ++ ++ SrvInitParamGetUINT32BitField(INITPARAM_NO_DEVICE, pvParamState, EnableHTBLogGroup, ui32LogType); ++ bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE; ++ SrvInitParamGetUINT32List(INITPARAM_NO_DEVICE, pvParamState, HTBOperationMode, ui32OpMode); ++ SrvInitParamGetUINT32(INITPARAM_NO_DEVICE, pvParamState, HTBufferSizeInKB, ui32BufferSize); ++ ++ SrvInitParamClose(pvParamState); ++} ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++static PVRSRV_ERROR RGXValidateTDHeap(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_PHYS_HEAP ePhysHeap, ++ PHYS_HEAP_USAGE_FLAGS ui32RequiredFlags) ++{ ++ PHYS_HEAP *psHeap = psDeviceNode->apsPhysHeap[ePhysHeap]; ++ PHYS_HEAP_USAGE_FLAGS ui32HeapFlags = PhysHeapGetFlags(psHeap); ++ PHYS_HEAP_USAGE_FLAGS ui32InvalidFlags = ~(PHYS_HEAP_USAGE_FW_PRIV_DATA | PHYS_HEAP_USAGE_FW_CODE ++ | PHYS_HEAP_USAGE_GPU_SECURE); ++ ++ PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32RequiredFlags) != 0, ++ PVRSRV_ERROR_NOT_SUPPORTED, ++ "TD heap is missing required flags. flags: 0x%x / required:0x%x", ++ ui32HeapFlags, ++ ui32RequiredFlags); ++ ++ PVR_LOG_RETURN_IF_FALSE_VA((ui32HeapFlags & ui32InvalidFlags) == 0, ++ PVRSRV_ERROR_NOT_SUPPORTED, ++ "TD heap uses invalid flags. flags: 0x%x / invalid:0x%x", ++ ui32HeapFlags, ++ ui32InvalidFlags); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR RGXValidateTDHeaps(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_PRIV_DATA, PHYS_HEAP_USAGE_FW_PRIV_DATA); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_PRIV_DATA"); ++ ++ eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_FW_CODE, PHYS_HEAP_USAGE_FW_CODE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:FW_CODE"); ++ ++ eError = RGXValidateTDHeap(psDeviceNode, PVRSRV_PHYS_HEAP_GPU_SECURE, PHYS_HEAP_USAGE_GPU_SECURE); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeap:GPU_SECURE"); ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXInit ++ ++ @Description RGX Initialisation ++ ++ @Input psDeviceNode ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Services initialisation parameters */ ++ RGX_SRVINIT_APPHINTS sApphints = {0}; ++ IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt, ui32FwOsCfgFlags; ++ IMG_UINT32 ui32DeviceFlags; ++ IMG_BOOL bPowerDown = (psDeviceNode->eCurrentSysPowerState == PVRSRV_SYS_POWER_STATE_OFF); ++ ++ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ RGX_LAYER_PARAMS sLayerParams; ++ ++ PDUMPCOMMENT(psDeviceNode, "RGX Initialisation Part 1"); ++ ++ PDUMPCOMMENT(psDeviceNode, "Device Name: %s", ++ psDeviceNode->psDevConfig->pszName); ++ PDUMPCOMMENT(psDeviceNode, "Device ID: %u (%d)", ++ psDeviceNode->sDevId.ui32InternalID, ++ psDeviceNode->sDevId.i32OsDeviceID); ++ ++ if (psDeviceNode->psDevConfig->pszVersion) ++ { ++ PDUMPCOMMENT(psDeviceNode, "Device Version: %s", ++ psDeviceNode->psDevConfig->pszVersion); ++ } ++ ++ /* pdump info about the core */ ++ PDUMPCOMMENT(psDeviceNode, ++ "RGX Version Information (KM): %d.%d.%d.%d", ++ psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C); ++ ++ /* Power-up the device as required to read the registers */ ++ if (bPowerDown) ++ { ++ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_ON); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState ON"); ++ } ++ ++ RGXInitMultiCoreInfo(psDeviceNode); ++ ++ if (bPowerDown) ++ { ++ eError = PVRSRVSetSystemPowerState(psDeviceNode->psDevConfig, PVRSRV_SYS_POWER_STATE_OFF); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PVRSRVSetSystemPowerState OFF"); ++ } ++ ++#if defined(PDUMP) ++ eError = DevmemIntAllocDefBackingPage(psDeviceNode, ++ &psDeviceNode->sDummyPage, ++ PVR_DUMMY_PAGE_INIT_VALUE, ++ DUMMY_PAGE, ++ IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate dummy page.", __func__)); ++ goto cleanup; ++ } ++ ++ eError = DevmemIntAllocDefBackingPage(psDeviceNode, ++ &psDeviceNode->sDevZeroPage, ++ PVR_ZERO_PAGE_INIT_VALUE, ++ DEV_ZERO_PAGE, ++ IMG_TRUE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate Zero page.", __func__)); ++ goto cleanup; ++ } ++#endif /* defined(PDUMP) */ ++ ++ sLayerParams.psDevInfo = psDevInfo; ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ eError = RGXValidateTDHeaps(psDeviceNode); ++ PVR_LOG_RETURN_IF_ERROR(eError, "RGXValidateTDHeaps"); ++#endif ++ ++#if defined(SUPPORT_AUTOVZ) ++ if (PVRSRV_VZ_MODE_IS(HOST)) ++ { ++ /* The RGX_CR_MTS_DM0_INTERRUPT_ENABLE register is always set by the firmware during initialisation ++ * and it provides a good method of determining if the firmware has been booted previously */ ++ psDeviceNode->bAutoVzFwIsUp = (OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_DM0_INTERRUPT_ENABLE) != 0); ++ ++ PVR_LOG(("AutoVz startup check: firmware is %s;", ++ (psDeviceNode->bAutoVzFwIsUp) ? "already running" : "powered down")); ++ } ++ else if (PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ /* Guest assumes the firmware is always available */ ++ psDeviceNode->bAutoVzFwIsUp = IMG_TRUE; ++ } ++ else ++#endif ++ { ++ /* Firmware does not follow the AutoVz life-cycle */ ++ psDeviceNode->bAutoVzFwIsUp = IMG_FALSE; ++ } ++ ++ if (PVRSRV_VZ_MODE_IS(GUEST) || (psDeviceNode->bAutoVzFwIsUp)) ++ { ++ /* set the device power state here as the regular power ++ * callbacks will not be executed on this driver */ ++ psDevInfo->bRGXPowered = IMG_TRUE; ++ } ++ ++ /* Set which HW Safety Events will be handled by the driver */ ++#if defined(RGX_FEATURE_WATCHDOG_TIMER_BIT_MASK) ++ psDevInfo->ui32HostSafetyEventMask |= RGX_IS_FEATURE_SUPPORTED(psDevInfo, WATCHDOG_TIMER) ? ++ RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__WATCHDOG_TIMEOUT_EN : 0; ++#endif ++#if defined(RGX_FEATURE_ECC_RAMS_MAX_VALUE_IDX) ++ psDevInfo->ui32HostSafetyEventMask |= (RGX_DEVICE_HAS_FEATURE_VALUE(&sLayerParams, ECC_RAMS) ++ && (RGX_DEVICE_GET_FEATURE_VALUE(&sLayerParams, ECC_RAMS) > 0)) ? ++ RGX_CR_SAFETY_EVENT_STATUS__ROGUEXE__FAULT_FW_EN : 0; ++#endif ++ ++ /* Services initialisation parameters */ ++ _ParseHTBAppHints(psDeviceNode); ++ GetApphints(psDevInfo, &sApphints); ++ InitDeviceFlags(&sApphints, &ui32DeviceFlags); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#if defined(EMULATOR) ++ if ((sApphints.bEnableTrustedDeviceAceConfig) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))) ++ { ++ SetTrustedDeviceAceEnabled(); ++ } ++#endif ++#endif ++ ++ eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create FW kernel memory context (%u)", ++ __func__, eError)); ++ goto cleanup; ++ } ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = InitFirmware(psDeviceNode, &sApphints); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: InitFirmware failed (%d)", ++ __func__, eError)); ++ goto cleanup; ++ } ++ } ++ ++ /* ++ * Setup Firmware initialisation data ++ */ ++ ++ GetFWConfigFlags(psDeviceNode, &sApphints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt, &ui32FwOsCfgFlags); ++ ++ eError = RGXInitFirmware(psDeviceNode, ++ sApphints.bEnableSignatureChecks, ++ sApphints.ui32SignatureChecksBufSize, ++ sApphints.ui32HWPerfFWBufSize, ++ (IMG_UINT64)sApphints.ui32HWPerfFilter0 | ++ ((IMG_UINT64)sApphints.ui32HWPerfFilter1 << 32), ++ ui32FWConfigFlags, ++ sApphints.ui32LogType, ++ GetFilterFlags(&sApphints), ++ sApphints.ui32JonesDisableMask, ++ sApphints.ui32HWRDebugDumpLimit, ++ sizeof(RGXFWIF_HWPERF_CTL), ++#if defined(SUPPORT_VALIDATION) ++ &sApphints.aui32TPUTrilinearFracMask[0], ++#else ++ NULL, ++#endif ++ sApphints.eRGXRDPowerIslandConf, ++ sApphints.eFirmwarePerf, ++ sApphints.ui32KCCBSizeLog2, ++ ui32FWConfigFlagsExt, ++ ui32FwOsCfgFlags); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXInitFirmware failed (%d)", ++ __func__, ++ eError)); ++ goto cleanup; ++ } ++ ++#if defined(PDUMP) ++ if (!PVRSRV_VZ_MODE_IS(GUEST)) ++ { ++ eError = InitialiseAllCounters(psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: InitialiseAllCounters failed (%d)", ++ __func__, eError)); ++ goto cleanup; ++ } ++ } ++#endif ++ ++ /* ++ * Perform second stage of RGX initialisation ++ */ ++ eError = RGXInitDevPart2(psDeviceNode, ++ ui32DeviceFlags, ++ sApphints.ui32HWPerfHostFilter, ++ sApphints.eRGXActivePMConf); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXInitDevPart2 failed (%d)", ++ __func__, eError)); ++ goto cleanup; ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ PVRSRVAppHintDumpState(psDeviceNode); ++#endif ++ ++ eError = PVRSRV_OK; ++ ++cleanup: ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (rgxsrvinit.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxstartstop.c b/drivers/gpu/drm/img-rogue/rgxstartstop.c +new file mode 100644 +index 000000000000..2d213e3540c6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxstartstop.c +@@ -0,0 +1,1331 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific start/stop routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific start/stop routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* The routines implemented here are built on top of an abstraction layer to ++ * hide DDK/OS-specific details in case they are used outside of the DDK ++ * (e.g. when trusted device is enabled). ++ * Any new dependency should be added to rgxlayer.h. ++ * Any new code should be built on top of the existing abstraction layer, ++ * which should be extended when necessary. */ ++#include "rgxstartstop.h" ++ ++#if defined(SUPPORT_SHARED_SLC) ++#include "rgxapi_km.h" ++#endif ++ ++#include "rgxdevice.h" ++#include "km/rgxdefs_km.h" ++ ++#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXEnableClocks ++ ++ @Description Enable RGX Clocks ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXEnableClocks(const void *hPrivate) ++{ ++ RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)"); ++} ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Wait for Slave Port to be Ready */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_META_SP_MSLVCTRL1, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); ++ if (eError != PVRSRV_OK) return eError; ++ ++ /* Issue a Write */ ++ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr); ++ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ ++ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue); ++ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT); /* Fence write */ ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate, ++ IMG_UINT32 ui32RegAddr, ++ IMG_UINT32* ui32RegValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Wait for Slave Port to be Ready */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_META_SP_MSLVCTRL1, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); ++ if (eError != PVRSRV_OK) return eError; ++ ++ /* Issue a Read */ ++ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN); ++ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0); /* Fence write */ ++ ++ /* Wait for Slave Port to be Ready */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_META_SP_MSLVCTRL1, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); ++ if (eError != PVRSRV_OK) return eError; ++ ++#if !defined(NO_HARDWARE) ++ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX); ++#else ++ *ui32RegValue = 0xFFFFFFFF; ++#endif ++ ++ return eError; ++} ++ ++static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate, ++ IMG_UINT32 ui32CoreReg, ++ IMG_UINT32 ui32Value) ++{ ++ IMG_UINT32 i = 0; ++ ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value); ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT); ++ ++ do ++ { ++ RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value); ++ } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000)); ++ ++ if (i == 1000) ++ { ++ RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout"); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Give privilege to debug and slave port */ ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); ++ ++ /* Point Meta to the bootloader address, global (uncached) range */ ++ eError = RGXWriteMetaCoreRegThoughSP(hPrivate, ++ PC_ACCESS(0), ++ RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT); ++ ++ if (eError != PVRSRV_OK) ++ { ++ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!"); ++ return eError; ++ } ++ ++ /* Enable minim encoding */ ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN); ++ ++ /* Enable Meta thread */ ++ RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXInitMetaProcWrapper ++ ++ @Description Configures the hardware wrapper of the META processor ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXInitMetaProcWrapper(const void *hPrivate) ++{ ++ IMG_UINT64 ui64GartenConfig; ++ ++ /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */ ++ ++ /* Garten IDLE bit controlled by META */ ++ ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; ++ ++ /* The fence addr is set at the fw init sequence */ ++ ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++ /* Set PC = 0 for fences */ ++ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK; ++ ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV ++ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT; ++ ++ } ++ else ++ { ++ /* Set PC = 0 for fences */ ++ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; ++ ui64GartenConfig |= (IMG_UINT64)MMU_CONTEXT_MAPPING_FWPRIV ++ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT; ++ ++ /* Set SLC DM=META */ ++ ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_BIFDM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; ++ } ++ ++ RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper"); ++ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig); ++} ++#endif ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXInitMipsProcWrapper ++ ++ @Description Configures the hardware wrapper of the MIPS processor ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXInitMipsProcWrapper(const void *hPrivate) ++{ ++ IMG_DEV_PHYADDR sPhyAddr; ++ IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */ ++ ++ RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper"); ++ ++ /* ++ * MIPS wrapper (registers transaction ID and ISA mode) setup ++ */ ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register"); ++ ++ if (RGXGetDevicePhysBusWidth(hPrivate) > 32) ++ { ++ RGXWriteReg32(hPrivate, ++ RGX_CR_MIPS_WRAPPER_CONFIG, ++ (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >> ++ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) | ++ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); ++ } ++ else ++ { ++ RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr); ++ ++ RGXMIPSWrapperConfig(hPrivate, ++ RGX_CR_MIPS_WRAPPER_CONFIG, ++ sPhyAddr.uiAddr, ++ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN, ++ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS); ++ } ++ ++ /* ++ * Boot remap setup ++ */ ++ ++ RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr); ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ /* Do not mark accesses to a FW code remap region as DRM accesses */ ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; ++#endif ++ ++#if defined(MIPS_FW_CODE_OSID) ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; ++ ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; ++#endif ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers"); ++ RGXBootRemapConfig(hPrivate, ++ RGX_CR_MIPS_ADDR_REMAP1_CONFIG1, ++ RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN, ++ RGX_CR_MIPS_ADDR_REMAP1_CONFIG2, ++ sPhyAddr.uiAddr, ++ ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK, ++ ui64RemapSettings); ++ ++#if defined(FIX_HW_BRN_63553_BIT_MASK) ++ if (RGX_DEVICE_HAS_BRN(hPrivate, 63553)) ++ { ++ IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32; ++ IMG_BOOL bDevicePA0IsValid = RGXDevicePA0IsValid(hPrivate); ++ ++ /* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */ ++ if (bPhysBusAbove32Bit || !bDevicePA0IsValid) ++ { ++ RGXCodeRemapConfig(hPrivate, ++ RGX_CR_MIPS_ADDR_REMAP5_CONFIG1, ++ 0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN, ++ RGX_CR_MIPS_ADDR_REMAP5_CONFIG2, ++ sPhyAddr.uiAddr, ++ ~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK, ++ ui64RemapSettings); ++ } ++ } ++#endif ++ ++ /* ++ * Data remap setup ++ */ ++ ++ RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr); ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ if (RGXGetDevicePhysBusWidth(hPrivate) > 32) ++ { ++ /* Remapped private data in secure memory */ ++ ui64RemapSettings |= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN; ++ } ++ else ++ { ++ /* Remapped data in non-secure memory */ ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; ++ } ++#endif ++ ++#if defined(MIPS_FW_CODE_OSID) ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; ++#endif ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write data remap registers"); ++ RGXDataRemapConfig(hPrivate, ++ RGX_CR_MIPS_ADDR_REMAP2_CONFIG1, ++ RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN, ++ RGX_CR_MIPS_ADDR_REMAP2_CONFIG2, ++ sPhyAddr.uiAddr, ++ ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK, ++ ui64RemapSettings); ++ ++ /* ++ * Code remap setup ++ */ ++ ++ RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr); ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ /* Do not mark accesses to a FW code remap region as DRM accesses */ ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; ++#endif ++ ++#if defined(MIPS_FW_CODE_OSID) ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; ++ ui64RemapSettings |= MIPS_FW_CODE_OSID << RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT; ++#endif ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers"); ++ RGXCodeRemapConfig(hPrivate, ++ RGX_CR_MIPS_ADDR_REMAP3_CONFIG1, ++ RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN, ++ RGX_CR_MIPS_ADDR_REMAP3_CONFIG2, ++ sPhyAddr.uiAddr, ++ ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK, ++ ui64RemapSettings); ++ ++ if (RGXGetDevicePhysBusWidth(hPrivate) == 32) ++ { ++ /* ++ * Trampoline remap setup ++ */ ++ ++ RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr); ++ ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ /* Remapped data in non-secure memory */ ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK; ++#endif ++ ++#if defined(MIPS_FW_CODE_OSID) ++ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK; ++#endif ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers"); ++ RGXTrampolineRemapConfig(hPrivate, ++ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1, ++ sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN, ++ RGX_CR_MIPS_ADDR_REMAP4_CONFIG2, ++ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR, ++ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK, ++ ui64RemapSettings); ++ } ++ ++ /* Garten IDLE bit controlled by MIPS */ ++ RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS"); ++ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); ++ ++ /* Turn on the EJTAG probe (only useful driver live) */ ++ RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0); ++} ++ ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++/*! ++******************************************************************************* ++ ++ @Function RGXInitRiscvProcWrapper ++ ++ @Description Configures the hardware wrapper of the RISCV processor ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXInitRiscvProcWrapper(const void *hPrivate) ++{ ++ IMG_DEV_VIRTADDR sTmp; ++ ++ RGXCommentLog(hPrivate, "RGXStart: Configure RISCV wrapper"); ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write boot code remap"); ++ RGXAcquireBootCodeAddr(hPrivate, &sTmp); ++ RGXWriteReg64(hPrivate, ++ RGXRISCVFW_BOOTLDR_CODE_REMAP, ++ sTmp.uiAddr | ++ (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) ++ << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | ++ (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | ++ RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_FETCH_EN_EN); ++ ++ RGXCommentLog(hPrivate, "RGXStart: Write boot data remap"); ++ RGXAcquireBootDataAddr(hPrivate, &sTmp); ++ RGXWriteReg64(hPrivate, ++ RGXRISCVFW_BOOTLDR_DATA_REMAP, ++ sTmp.uiAddr | ++ (IMG_UINT64) (RGX_FIRMWARE_RAW_HEAP_SIZE >> FWCORE_ADDR_REMAP_CONFIG0_SIZE_ALIGNSHIFT) ++ << RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_SIZE_SHIFT | ++ (IMG_UINT64) MMU_CONTEXT_MAPPING_FWPRIV << FWCORE_ADDR_REMAP_CONFIG0_MMU_CONTEXT_SHIFT | ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_TRUSTED_EN | ++#endif ++ RGX_CR_FWCORE_ADDR_REMAP_CONFIG0_LOAD_STORE_EN_EN); ++ ++ /* Garten IDLE bit controlled by RISCV */ ++ RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to RISCV"); ++ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META); ++} ++#endif ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function __RGXInitSLC ++ ++ @Description Initialise RGX SLC ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void __RGXInitSLC(const void *hPrivate) ++{ ++#if defined(RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY)) ++ { ++ IMG_UINT32 ui32Reg; ++ IMG_UINT32 ui32RegVal; ++ ++ /* ++ * SLC control ++ */ ++ ui32Reg = RGX_CR_SLC3_CTRL_MISC; ++ ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH | ++ RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN; ++ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); ++ ++ /* ++ * SLC scramble bits ++ */ ++ { ++ IMG_UINT32 i; ++ IMG_UINT32 ui32Count=0; ++ IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate); ++ IMG_UINT64 aui64ScrambleValues[4]; ++ IMG_UINT32 aui32ScrambleRegs[] = { ++ RGX_CR_SLC3_SCRAMBLE, ++ RGX_CR_SLC3_SCRAMBLE2, ++ RGX_CR_SLC3_SCRAMBLE3, ++ RGX_CR_SLC3_SCRAMBLE4 ++ }; ++ ++ if (2 == ui32SLCBanks) ++ { ++ aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a); ++ aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a); ++ aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566); ++ aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a); ++ ui32Count = 4; ++ } ++ else if (4 == ui32SLCBanks) ++ { ++ aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4); ++ aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372); ++ aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1); ++ aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478); ++ ui32Count = 4; ++ ++ } ++ else if (8 == ui32SLCBanks) ++ { ++ aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688); ++ aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33); ++ aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447); ++ ui32Count = 3; ++ } ++ ++ for (i = 0; i < ui32Count; i++) ++ { ++ IMG_UINT32 ui32Reg = aui32ScrambleRegs[i]; ++ IMG_UINT64 ui64Value = aui64ScrambleValues[i]; ++ RGXWriteReg64(hPrivate, ui32Reg, ui64Value); ++ } ++ } ++ ++ { ++ /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */ ++ RGXCommentLog(hPrivate, "Disable forced SLC coherency"); ++ RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0); ++ } ++ } ++ else ++#endif ++ { ++ IMG_UINT32 ui32Reg; ++ IMG_UINT32 ui32RegVal; ++ IMG_UINT64 ui64RegVal; ++ ++ /* ++ * SLC Bypass control ++ */ ++ ui32Reg = RGX_CR_SLC_CTRL_BYPASS; ++ ui64RegVal = 0; ++ ++#if defined(RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN) ++ if ((RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, SLC_SIZE_IN_KILOBYTES) == 8) || ++ RGX_DEVICE_HAS_BRN(hPrivate, 61450)) ++ { ++ RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF"); ++ ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN | ++ (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN; ++ } ++#endif ++ ++ if (ui64RegVal != 0) ++ { ++ RGXReadModifyWriteReg64(hPrivate, ui32Reg, ui64RegVal, ~ui64RegVal); ++ } ++ ++ /* ++ * SLC Misc control. ++ * ++ * Note: This is a 64bit register and we set only the lower 32bits leaving the top ++ * 32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default. ++ */ ++ ui32Reg = RGX_CR_SLC_CTRL_MISC; ++ ui32RegVal = RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1; ++ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ ui32RegVal |= RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; ++#endif ++ ++#if defined(FIX_HW_BRN_60084_BIT_MASK) ++ if (RGX_DEVICE_HAS_BRN(hPrivate, 60084)) ++ { ++#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING) ++ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; ++#else ++ if (RGX_DEVICE_HAS_ERN(hPrivate, 61389)) ++ { ++ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN; ++ } ++#endif ++ } ++#endif ++ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ /* Bypass burst combiner if SLC line size is smaller than 1024 bits */ ++ if (RGXGetDeviceCacheLineSize(hPrivate) < 1024) ++ { ++ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN; ++ } ++#endif ++ ++ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal); ++ } ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXInitBIF ++ ++ @Description Initialise RGX BIF ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXInitBIF(const void *hPrivate) ++{ ++ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS)) ++ { ++ IMG_DEV_PHYADDR sPCAddr; ++ ++ /* ++ * Acquire the address of the Kernel Page Catalogue. ++ */ ++ RGXAcquireKernelMMUPC(hPrivate, &sPCAddr); ++ ++ /* ++ * Write the kernel catalogue base. ++ */ ++ RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue"); ++ ++#if defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) ++ { ++ /* Write the cat-base address */ ++ RGXWriteKernelMMUPC64(hPrivate, ++ BIF_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), ++ RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT, ++ RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT, ++ ((sPCAddr.uiAddr ++ >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT) ++ << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT) ++ & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK); ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) ++ { ++ /* Keep catbase registers in sync */ ++ RGXWriteKernelMMUPC64(hPrivate, ++ FWCORE_MEM_CAT_BASEx(MMU_CONTEXT_MAPPING_FWPRIV), ++ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT, ++ RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT, ++ ((sPCAddr.uiAddr ++ >> RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_ALIGNSHIFT) ++ << RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_SHIFT) ++ & ~RGX_CR_FWCORE_MEM_CAT_BASE0_ADDR_CLRMSK); ++ } ++#endif ++ ++ /* ++ * Trusted Firmware boot ++ */ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); ++ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); ++#endif ++ } ++ else ++#endif /* defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) */ ++ { ++#if defined(RGX_CR_MMU_CBASE_MAPPING) // FIXME_OCEANIC ++ IMG_UINT32 uiPCAddr; ++ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT) ++ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT) ++ & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK); ++ ++ /* Set the mapping context */ ++ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWPRIV); ++ (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ ++ ++ /* Write the cat-base address */ ++ RGXWriteKernelMMUPC32(hPrivate, ++ RGX_CR_MMU_CBASE_MAPPING, ++ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, ++ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, ++ uiPCAddr); ++ ++#if (MMU_CONTEXT_MAPPING_FWIF != MMU_CONTEXT_MAPPING_FWPRIV) ++ /* Set-up different MMU ID mapping to the same PC used above */ ++ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, MMU_CONTEXT_MAPPING_FWIF); ++ (void)RGXReadReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT); /* Fence write */ ++ ++ RGXWriteKernelMMUPC32(hPrivate, ++ RGX_CR_MMU_CBASE_MAPPING, ++ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT, ++ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT, ++ uiPCAddr); ++#endif ++#endif ++ } ++ } ++ else ++ { ++ /* ++ * Trusted Firmware boot ++ */ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled"); ++ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN); ++#endif ++ } ++} ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXAXIACELiteInit ++ ++ @Description Initialise AXI-ACE Lite interface ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return void ++ ++******************************************************************************/ ++static void RGXAXIACELiteInit(const void *hPrivate) ++{ ++ IMG_UINT32 ui32RegAddr; ++ IMG_UINT64 ui64RegVal; ++ ++ ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION; ++ ++ /* Setup AXI-ACE config. Set everything to outer cache */ ++ ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) | ++ (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) | ++ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) | ++ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) | ++ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) | ++ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) | ++ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) | ++ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT); ++ ++#if defined(FIX_HW_BRN_42321_BIT_MASK) ++ if (RGX_DEVICE_HAS_BRN(hPrivate, 42321)) ++ { ++ ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT); ++ } ++#endif ++ ++#if defined(FIX_HW_BRN_68186_BIT_MASK) ++ if (RGX_DEVICE_HAS_BRN(hPrivate, 68186)) ++ { ++ /* default value for reg_enable_fence_out is zero. Force to 1 to allow core_clk < mem_clk */ ++ ui64RegVal |= (IMG_UINT64)1 << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ENABLE_FENCE_OUT_SHIFT; ++ } ++#endif ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && defined(RGX_FEATURE_SLC_VIVT_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT)) ++ { ++ RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted"); ++ ui64RegVal |= IMG_UINT64_C(0xFC) ++ << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT; ++ } ++#endif ++ ++ RGXCommentLog(hPrivate, "Init AXI-ACE interface"); ++ RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal); ++} ++ ++PVRSRV_ERROR RGXStart(const void *hPrivate) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_CHAR *pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS; ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ IMG_BOOL bDoFWSlaveBoot = IMG_FALSE; ++ IMG_BOOL bMetaFW = IMG_FALSE; ++#endif ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) ++ { ++ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_RISCV; ++ } ++ else ++#endif ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (RGX_DEVICE_HAS_FEATURE_VALUE(hPrivate, META)) ++ { ++ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META; ++ bMetaFW = IMG_TRUE; ++ bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate); ++ } ++#endif ++ ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET)) ++ { ++ /* Disable the default sys_bus_secure protection to perform minimal setup */ ++ RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure"); ++ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0); ++ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ ++ } ++ ++#if defined(SUPPORT_SHARED_SLC) ++ /* When the SLC is shared, the SLC reset is performed by the System layer when calling ++ * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid ++ * soft_resetting it here. ++ */ ++#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN) ++ RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)"); ++#else ++#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL) ++#endif ++ ++#if defined(RGX_S7_SOFT_RESET_DUSTS) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++ /* Set RGX in soft-reset */ ++ RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1"); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS); ++ ++ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); ++ ++ RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2"); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2); ++ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); ++ ++ /* Take everything out of reset but the FW processor */ ++ RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0); ++ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET2); ++ ++ RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); ++ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ } ++ else ++#endif ++ { ++ /* Set RGX in soft-reset */ ++ RGXCommentLog(hPrivate, "RGXStart: soft reset everything"); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL); ++ ++ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ ++ /* Take Rascal and Dust out of reset */ ++ RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset"); ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN); ++ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ ++ /* Take everything out of reset but the FW processor */ ++ RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR); ++ ++#if defined(RGX_FEATURE_XE_ARCHITECTURE) && (RGX_FEATURE_XE_ARCHITECTURE > 1) ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_CPU_EN); ++#else ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN); ++#endif ++ ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ } ++ ++ /* Enable clocks */ ++ RGXEnableClocks(hPrivate); ++ ++ /* ++ * Initialise SLC. ++ */ ++#if !defined(SUPPORT_SHARED_SLC) ++ __RGXInitSLC(hPrivate); ++#endif ++ ++ if (RGX_DEVICE_GET_FEATURE_VALUE(hPrivate, ECC_RAMS) > 0) ++ { ++ RGXCommentLog(hPrivate, "RGXStart: Enable safety events"); ++ RGXWriteReg32(hPrivate, RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE, ++ RGX_CR_SAFETY_EVENT_ENABLE__ROGUEXE__MASKFULL); ++ } ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (bMetaFW) ++ { ++ if (bDoFWSlaveBoot) ++ { ++ /* Configure META to Slave boot */ ++ RGXCommentLog(hPrivate, "RGXStart: META Slave boot"); ++ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0); ++ ++ } ++ else ++ { ++ /* Configure META to Master boot */ ++ RGXCommentLog(hPrivate, "RGXStart: META Master boot"); ++ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN); ++ } ++ } ++#endif ++ ++ /* ++ * Initialise Firmware wrapper ++ */ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) ++ { ++ RGXInitRiscvProcWrapper(hPrivate); ++ } ++ else ++#endif ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (bMetaFW) ++ { ++ RGXInitMetaProcWrapper(hPrivate); ++ } ++ else ++#endif ++ { ++ RGXInitMipsProcWrapper(hPrivate); ++ } ++ ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE)) ++ { ++ /* We must init the AXI-ACE interface before 1st BIF transaction */ ++ RGXAXIACELiteInit(hPrivate); ++ } ++ ++ /* ++ * Initialise BIF. ++ */ ++ RGXInitBIF(hPrivate); ++ ++ RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR); ++ ++ /* Need to wait for at least 16 cycles before taking the FW processor out of reset ... */ ++ RGXWaitCycles(hPrivate, 32, 3); ++ ++ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0); ++ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET); ++ ++ /* ... and afterwards */ ++ RGXWaitCycles(hPrivate, 32, 3); ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (bMetaFW && bDoFWSlaveBoot) ++ { ++ eError = RGXFabricCoherencyTest(hPrivate); ++ if (eError != PVRSRV_OK) return eError; ++ ++ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start"); ++ eError = RGXStartFirmware(hPrivate); ++ if (eError != PVRSRV_OK) return eError; ++ } ++ else ++#endif ++ { ++ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start"); ++ ++#if defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR)) ++ { ++ /* Bring Debug Module out of reset */ ++ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_DMI_DMCONTROL, RGX_CR_FWCORE_DMI_DMCONTROL_DMACTIVE_EN); ++ ++ /* Boot the FW */ ++ RGXWriteReg32(hPrivate, RGX_CR_FWCORE_BOOT, 1); ++ RGXWaitCycles(hPrivate, 32, 3); ++ } ++#endif ++ } ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(SUPPORT_SECURITY_VALIDATION) ++ RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure"); ++ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN); ++ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */ ++#endif ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXStop(const void *hPrivate) ++{ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) || defined(RGX_FEATURE_RISCV_FW_PROCESSOR_BIT_MASK) ++ IMG_BOOL bMipsFW = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS); ++ IMG_BOOL bRiscvFW = RGX_DEVICE_HAS_FEATURE(hPrivate, RISCV_FW_PROCESSOR); ++ IMG_BOOL bMetaFW = !bMipsFW && !bRiscvFW; ++#endif ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ RGX_LAYER_PARAMS *psParams; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVR_ASSERT(hPrivate != NULL); ++ psParams = (RGX_LAYER_PARAMS*)hPrivate; ++ psDevInfo = psParams->psDevInfo; ++ ++ RGXDeviceAckIrq(hPrivate); ++ ++ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper ++ * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW ++ */ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) ++ { ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_JONES_IDLE, ++ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), ++ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); ++ } ++ else ++#endif ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SIDEKICK_IDLE, ++ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN), ++ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN)); ++ } ++ ++ if (eError != PVRSRV_OK) return eError; ++ } ++#endif ++ ++ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) ++ { ++#if !defined(SUPPORT_SHARED_SLC) ++ /* ++ * Wait for SLC to signal IDLE ++ * For LAYOUT_MARS = 1, SLC would have been powered down by FW ++ */ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SLC3_IDLE, ++ RGX_CR_SLC3_IDLE_MASKFULL, ++ RGX_CR_SLC3_IDLE_MASKFULL); ++ } ++ else ++#endif ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SLC_IDLE, ++ RGX_CR_SLC_IDLE_MASKFULL, ++ RGX_CR_SLC_IDLE_MASKFULL); ++ } ++#endif /* SUPPORT_SHARED_SLC */ ++ if (eError != PVRSRV_OK) return eError; ++ } ++ ++ /* Unset MTS DM association with threads */ ++ RGXWriteReg32(hPrivate, ++ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC, ++ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK ++ & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL); ++ RGXWriteReg32(hPrivate, ++ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC, ++ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK ++ & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL); ++#if defined(RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC) // FIXME_OCEANIC ++ RGXWriteReg32(hPrivate, ++ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC, ++ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK ++ & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL); ++ RGXWriteReg32(hPrivate, ++ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC, ++ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK ++ & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL); ++#endif ++ ++#if defined(PDUMP) && defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (bMetaFW) ++ { ++ /* Disabling threads is only required for pdumps to stop the fw gracefully */ ++ ++ /* Disable thread 0 */ ++ eError = RGXWriteMetaRegThroughSP(hPrivate, ++ META_CR_T0ENABLE_OFFSET, ++ ~META_CR_TXENABLE_ENABLE_BIT); ++ if (eError != PVRSRV_OK) return eError; ++ ++ /* Disable thread 1 */ ++ eError = RGXWriteMetaRegThroughSP(hPrivate, ++ META_CR_T1ENABLE_OFFSET, ++ ~META_CR_TXENABLE_ENABLE_BIT); ++ if (eError != PVRSRV_OK) return eError; ++ ++ /* Clear down any irq raised by META (done after disabling the FW ++ * threads to avoid a race condition). ++ * This is only really needed for PDumps but we do it anyway driver-live. ++ */ ++ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0); ++ (void)RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS); /* Fence write */ ++ ++ /* Wait for the Slave Port to finish all the transactions */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_META_SP_MSLVCTRL1, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ++ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN); ++ if (eError != PVRSRV_OK) return eError; ++ } ++#endif ++ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ /* Extra Idle checks */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_BIF_STATUS_MMU, ++ 0, ++ RGX_CR_BIF_STATUS_MMU_MASKFULL); ++ if (eError != PVRSRV_OK) return eError; ++ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_BIFPM_STATUS_MMU, ++ 0, ++ RGX_CR_BIFPM_STATUS_MMU_MASKFULL); ++ if (eError != PVRSRV_OK) return eError; ++#endif ++ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) && ++ !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE)) ++#endif ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_BIF_READS_EXT_STATUS, ++ 0, ++ RGX_CR_BIF_READS_EXT_STATUS_MASKFULL); ++ if (eError != PVRSRV_OK) return eError; ++ } ++ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_BIFPM_READS_EXT_STATUS, ++ 0, ++ RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL); ++ if (eError != PVRSRV_OK) return eError; ++#endif ++ ++ { ++ IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL; ++ eError = RGXPollReg64(hPrivate, ++ RGX_CR_SLC_STATUS1, ++ 0, ++ ui64SLCMask); ++ if (eError != PVRSRV_OK) return eError; ++ } ++ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ if (4 == RGXGetDeviceSLCBanks(hPrivate)) ++ { ++ eError = RGXPollReg64(hPrivate, ++ RGX_CR_SLC_STATUS2, ++ 0, ++ RGX_CR_SLC_STATUS2_MASKFULL); ++ if (eError != PVRSRV_OK) return eError; ++ } ++#endif ++ ++ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) ++ { ++#if !defined(SUPPORT_SHARED_SLC) ++ /* ++ * Wait for SLC to signal IDLE ++ * For LAYOUT_MARS = 1, SLC would have been powered down by FW ++ */ ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SLC3_IDLE, ++ RGX_CR_SLC3_IDLE_MASKFULL, ++ RGX_CR_SLC3_IDLE_MASKFULL); ++ } ++ else ++#endif ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SLC_IDLE, ++ RGX_CR_SLC_IDLE_MASKFULL, ++ RGX_CR_SLC_IDLE_MASKFULL); ++ } ++#endif /* SUPPORT_SHARED_SLC */ ++ if (eError != PVRSRV_OK) return eError; ++ } ++ ++ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper ++ * For LAYOUT_MARS = 1, SIDEKICK would have been powered down by FW ++ */ ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ if (!(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0)) ++ { ++#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK) ++ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++#if defined(RGX_FEATURE_FASTRENDER_DM_BIT_MASK) ++ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM)) ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_JONES_IDLE, ++ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN), ++ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN)); ++ } ++#endif ++ } ++ else ++#endif ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SIDEKICK_IDLE, ++ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN), ++ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN)); ++ } ++ ++ if (eError != PVRSRV_OK) return eError; ++ } ++#endif ++ ++#if defined(RGX_FEATURE_META_MAX_VALUE_IDX) ++ if (bMetaFW) ++ { ++ IMG_UINT32 ui32RegValue; ++ ++ eError = RGXReadMetaRegThroughSP(hPrivate, ++ META_CR_TxVECINT_BHALT, ++ &ui32RegValue); ++ if (eError != PVRSRV_OK) return eError; ++ ++ if ((ui32RegValue & 0xFFFFFFFFU) == 0x0) ++ { ++ /* Wait for Sidekick/Jones to signal IDLE including ++ * the Garten Wrapper if there is no debugger attached ++ * (TxVECINT_BHALT = 0x0) */ ++ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE)) ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SIDEKICK_IDLE, ++ RGX_CR_SIDEKICK_IDLE_GARTEN_EN, ++ RGX_CR_SIDEKICK_IDLE_GARTEN_EN); ++ if (eError != PVRSRV_OK) return eError; ++ } ++ else ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_JONES_IDLE, ++ RGX_CR_JONES_IDLE_GARTEN_EN, ++ RGX_CR_JONES_IDLE_GARTEN_EN); ++ if (eError != PVRSRV_OK) return eError; ++ } ++ } ++ } ++ else ++#endif ++ { ++ if (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevInfo->psDeviceNode, LAYOUT_MARS) > 0) ++ { ++ /* As FW core has been moved from SIDEKICK to the new MARS domain, checking ++ * idle bits for CPU & System Arbiter excluding SOCIF which will never be Idle ++ * if Host polling on this register ++ */ ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_MARS_IDLE, ++ RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN, ++ RGX_CR_MARS_IDLE_CPU_EN | RGX_CR_MARS_IDLE_MH_SYSARB0_EN); ++ if (eError != PVRSRV_OK) return eError; ++ } ++#if !defined(RGX_FEATURE_XE_ARCHITECTURE) || (RGX_FEATURE_XE_ARCHITECTURE == 1) ++ else ++ { ++ eError = RGXPollReg32(hPrivate, ++ RGX_CR_SIDEKICK_IDLE, ++ RGX_CR_SIDEKICK_IDLE_GARTEN_EN, ++ RGX_CR_SIDEKICK_IDLE_GARTEN_EN); ++ if (eError != PVRSRV_OK) return eError; ++ } ++#endif ++ } ++ ++ return eError; ++} ++ ++ ++/* ++ * RGXInitSLC ++ */ ++#if defined(SUPPORT_SHARED_SLC) ++PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ void *pvPowerParams; ++ ++ if (psDeviceNode == NULL) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ psDevInfo = psDeviceNode->pvDevice; ++ pvPowerParams = &psDevInfo->sLayerParams; ++ ++ /* reset the SLC */ ++ RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC"); ++ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN); ++ ++ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */ ++ (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET); ++ ++ /* Take everything out of reset */ ++ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0); ++ ++ __RGXInitSLC(pvPowerParams); ++ ++ return PVRSRV_OK; ++} ++#endif +diff --git a/drivers/gpu/drm/img-rogue/rgxstartstop.h b/drivers/gpu/drm/img-rogue/rgxstartstop.h +new file mode 100644 +index 000000000000..178afe2849a0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxstartstop.h +@@ -0,0 +1,84 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX start/stop header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX start/stop functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXSTARTSTOP_H) ++#define RGXSTARTSTOP_H ++ ++/* The routines declared here are built on top of an abstraction layer to ++ * hide DDK/OS-specific details in case they are used outside of the DDK ++ * (e.g. when DRM security is enabled). ++ * Any new dependency should be added to rgxlayer.h. ++ * Any new code should be built on top of the existing abstraction layer, ++ * which should be extended when necessary. ++ */ ++#include "rgxlayer.h" ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXStart ++ ++ @Description Perform GPU reset and initialisation ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXStart(const void *hPrivate); ++ ++/*! ++******************************************************************************* ++ ++ @Function RGXStop ++ ++ @Description Stop Rogue in preparation for power down ++ ++ @Input hPrivate : Implementation specific data ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXStop(const void *hPrivate); ++ ++#endif /* RGXSTARTSTOP_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxsyncutils.c b/drivers/gpu/drm/img-rogue/rgxsyncutils.c +new file mode 100644 +index 000000000000..cec0597d0301 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxsyncutils.c +@@ -0,0 +1,184 @@ ++/*************************************************************************/ /*! ++@File rgxsyncutils.c ++@Title RGX Sync Utilities ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX Sync helper functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "rgxsyncutils.h" ++ ++#include "sync_server.h" ++#include "sync_internal.h" ++#include "sync.h" ++#include "allocmem.h" ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++#include "pvr_buffer_sync.h" ++#endif ++ ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_internal.h" ++ ++//#define TA3D_CHECKPOINT_DEBUG ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++#define CHKPT_DBG(X) PVR_DPF(X) ++static ++void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues, ++ IMG_UINT32 ui32Count) ++{ ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; ++ ++ for (iii = 0; iii < ui32Count; iii++) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++} ++#else ++#define CHKPT_DBG(X) ++#endif ++ ++ ++PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, ++ SYNC_ADDR_LIST *psSyncList, ++ SYNC_ADDR_LIST *psPRSyncList, ++ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, ++ RGX_SYNC_DATA *psSyncData, ++ IMG_BOOL bKick3D) ++{ ++ IMG_UINT32 *pui32TimelineUpdateWOff = NULL; ++ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; ++ ++ IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount; ++ ++ /* Space for original client updates, and the one new update */ ++ size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1); ++ ++ if (!bKick3D) ++ { ++ /* Additional space for one PR update, only the newest one */ ++ uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", ++ __func__, ++ (void*)pui32IntAllocatedUpdateValues)); ++ ++ /* Allocate memory to hold the list of update values (including our timeline update) */ ++ pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize); ++ if (!pui32IntAllocatedUpdateValues) ++ { ++ /* Failed to allocate memory */ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize); ++ pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues; ++ ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", ++ __func__, ++ ui32ClientUpdateValueCount, ++ bKick3D ? "TA/3D" : "TA/PR", ++ (void*)pui32IntAllocatedUpdateValues)); ++ /* Copy the update values into the new memory, then append our timeline update value */ ++ OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue)); ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++ _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount); ++#endif ++ ++ pui32TimelineUpdateWOff += ui32ClientUpdateValueCount; ++ } ++ ++ /* Now set the additional update value and append the timeline sync prim addr to either the ++ * render context 3D (or TA) update list ++ */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", ++ __func__, ++ ui32FenceTimelineUpdateValue, ++ bKick3D ? "TA/3D" : "TA/PR")); ++ ++ /* Append the TA/3D update */ ++ { ++ *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; ++ psSyncData->ui32ClientUpdateValueCount++; ++ psSyncData->ui32ClientUpdateCount++; ++ SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync); ++ ++ if (!psSyncData->pauiClientUpdateUFOAddress) ++ { ++ psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs; ++ } ++ /* Update paui32ClientUpdateValue to point to our new list of update values */ ++ psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues; ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++ _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount); ++#endif ++ } ++ ++ if (!bKick3D) ++ { ++ /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */ ++ *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue; ++ psSyncData->ui32ClientPRUpdateValueCount = 1; ++ psSyncData->ui32ClientPRUpdateCount = 1; ++ SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync); ++ ++ if (!psSyncData->pauiClientPRUpdateUFOAddress) ++ { ++ psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs; ++ } ++ /* Update paui32ClientPRUpdateValue to point to our new list of update values */ ++ psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount]; ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++ _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount); ++#endif ++ } ++ ++ /* Do not free the old psSyncData->ui32ClientUpdateValueCount, ++ * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */ ++ ++ return PVRSRV_OK; ++} +diff --git a/drivers/gpu/drm/img-rogue/rgxsyncutils.h b/drivers/gpu/drm/img-rogue/rgxsyncutils.h +new file mode 100644 +index 000000000000..2133da85e78a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxsyncutils.h +@@ -0,0 +1,76 @@ ++/*************************************************************************/ /*! ++@File rgxsyncutils.h ++@Title RGX Sync Utilities ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX Sync helper functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXSYNCUTILS_H ++#define RGXSYNCUTILS_H ++ ++#include "rgxdevice.h" ++#include "sync_server.h" ++#include "rgxdebug.h" ++#include "rgx_fwif_km.h" ++ ++typedef struct _RGX_SYNC_DATA_ ++{ ++ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress; ++ IMG_UINT32 *paui32ClientUpdateValue; ++ IMG_UINT32 ui32ClientUpdateValueCount; ++ IMG_UINT32 ui32ClientUpdateCount; ++ ++ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress; ++ IMG_UINT32 *paui32ClientPRUpdateValue; ++ IMG_UINT32 ui32ClientPRUpdateValueCount; ++ IMG_UINT32 ui32ClientPRUpdateCount; ++} RGX_SYNC_DATA; ++ ++PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue, ++ SYNC_ADDR_LIST *psSyncList, ++ SYNC_ADDR_LIST *psPRSyncList, ++ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync, ++ RGX_SYNC_DATA *psSyncData, ++ IMG_BOOL bKick3D); ++ ++#endif /* RGXSYNCUTILS_H */ ++ ++/****************************************************************************** ++ End of file (rgxsyncutils.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxta3d.c b/drivers/gpu/drm/img-rogue/rgxta3d.c +new file mode 100644 +index 000000000000..3b43babaf1df +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxta3d.c +@@ -0,0 +1,5426 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX TA/3D routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX TA/3D routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++/* for the offsetof macro */ ++#if defined(__linux__) ++#include ++#else ++#include ++#endif ++ ++#include "pdump_km.h" ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgxta3d.h" ++#include "rgxmem.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "ri_server.h" ++#include "osfunc.h" ++#include "pvrsrv.h" ++#include "rgx_memallocflags.h" ++#include "rgxccb.h" ++#include "rgxhwperf.h" ++#include "ospvr_gputrace.h" ++#include "rgxsyncutils.h" ++#include "htbuffer.h" ++ ++#include "rgxdefs_km.h" ++#include "rgx_fwif_km.h" ++#include "physmem.h" ++#include "sync_server.h" ++#include "sync_internal.h" ++#include "sync.h" ++#include "process_stats.h" ++ ++#include "rgxtimerquery.h" ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++#include "pvr_buffer_sync.h" ++#endif ++ ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_internal.h" ++ ++#if defined(SUPPORT_PDVFS) ++#include "rgxpdvfs.h" ++#endif ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#include "rgxworkest.h" ++ ++#define HASH_CLEAN_LIMIT 6 ++#endif ++ ++/* Enable this to dump the compiled list of UFOs prior to kick call */ ++#define ENABLE_TA3D_UFO_DUMP 0 ++ ++//#define TA3D_CHECKPOINT_DEBUG ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++#define CHKPT_DBG(X) PVR_DPF(X) ++static INLINE ++void _DebugSyncValues(const IMG_CHAR *pszFunction, ++ const IMG_UINT32 *pui32UpdateValues, ++ const IMG_UINT32 ui32Count) ++{ ++ IMG_UINT32 i; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues; ++ ++ for (i = 0; i < ui32Count; i++) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++} ++ ++static INLINE ++void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction, ++ const IMG_CHAR *pszDMName, ++ const PSYNC_CHECKPOINT *apsSyncCheckpoints, ++ const IMG_UINT32 ui32Count) ++{ ++ IMG_UINT32 i; ++ ++ for (i = 0; i < ui32Count; i++) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i))); ++ } ++} ++ ++#else ++#define CHKPT_DBG(X) ++#endif ++ ++/* define the number of commands required to be set up by the CCB helper */ ++/* 1 command for the TA */ ++#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1 ++/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */ ++#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3 ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui32CyclesPrediction) ++#else ++#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST) ++#endif ++ ++typedef struct { ++ DEVMEM_MEMDESC *psContextStateMemDesc; ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; ++ IMG_UINT32 ui32Priority; ++} RGX_SERVER_RC_TA_DATA; ++ ++typedef struct { ++ DEVMEM_MEMDESC *psContextStateMemDesc; ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; ++ IMG_UINT32 ui32Priority; ++} RGX_SERVER_RC_3D_DATA; ++ ++struct _RGX_SERVER_RENDER_CONTEXT_ { ++ /* this lock protects usage of the render context. ++ * it ensures only one kick is being prepared and/or submitted on ++ * this render context at any time ++ */ ++ POS_LOCK hLock; ++ RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS]; ++ RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS]; ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ DEVMEM_MEMDESC *psFWRenderContextMemDesc; ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc; ++ RGX_SERVER_RC_TA_DATA sTAData; ++ RGX_SERVER_RC_3D_DATA s3DData; ++ IMG_UINT32 ui32CleanupStatus; ++#define RC_CLEANUP_TA_COMPLETE (1 << 0) ++#define RC_CLEANUP_3D_COMPLETE (1 << 1) ++ DLLIST_NODE sListNode; ++ SYNC_ADDR_LIST sSyncAddrListTAFence; ++ SYNC_ADDR_LIST sSyncAddrListTAUpdate; ++ SYNC_ADDR_LIST sSyncAddrList3DFence; ++ SYNC_ADDR_LIST sSyncAddrList3DUpdate; ++ ATOMIC_T hIntJobRef; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WORKEST_HOST_DATA sWorkEstData; ++#endif ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++}; ++ ++ ++/* ++ Static functions used by render context code ++*/ ++ ++static ++PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, ++ psTAData->psServerCommonContext, ++ RGXFWIF_DM_GEOM, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ /* ... it has so we can free its resources */ ++#if defined(DEBUG) ++ /* Log the number of TA context stores which occurred */ ++ { ++ RGXFWIF_TACTX_STATE *psFWTAState; ++ ++ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc, ++ (void**)&psFWTAState); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware render context state (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ } ++ else ++ { ++ /* Release the CPU virt addr */ ++ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); ++ } ++ } ++#endif ++ FWCommonContextFree(psTAData->psServerCommonContext); ++ DevmemFwUnmapAndFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc); ++ psTAData->psServerCommonContext = NULL; ++ return PVRSRV_OK; ++} ++ ++static ++PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, ++ ps3DData->psServerCommonContext, ++ RGXFWIF_DM_3D, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ /* ... it has so we can free its resources */ ++#if defined(DEBUG) ++ /* Log the number of 3D context stores which occurred */ ++ { ++ RGXFWIF_3DCTX_STATE *psFW3DState; ++ ++ eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc, ++ (void**)&psFW3DState); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware render context state (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ } ++ else ++ { ++ /* Release the CPU virt addr */ ++ DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc); ++ } ++ } ++#endif ++ ++ FWCommonContextFree(ps3DData->psServerCommonContext); ++ DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc); ++ ps3DData->psServerCommonContext = NULL; ++ return PVRSRV_OK; ++} ++ ++static void _RGXDumpPMRPageList(DLLIST_NODE *psNode) ++{ ++ RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); ++ PVRSRV_ERROR eError; ++ ++ eError = PMRDumpPageList(psPMRNode->psPMR, ++ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Error (%s) printing pmr %p", ++ PVRSRVGetErrorString(eError), ++ psPMRNode->psPMR)); ++ } ++} ++ ++IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ ++ PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx, ++ psFreeList->sFreeListFWDevVAddr.ui32Addr, ++ psFreeList->ui32FreelistID, ++ psFreeList->ui64FreelistChecksum)); ++ ++ /* Dump Init FreeList page list */ ++ PVR_LOG((" Initial Memory block")); ++ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) ++ { ++ _RGXDumpPMRPageList(psNode); ++ } ++ ++ /* Dump Grow FreeList page list */ ++ PVR_LOG((" Grow Memory blocks")); ++ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) ++ { ++ _RGXDumpPMRPageList(psNode); ++ } ++ ++ return IMG_TRUE; ++} ++ ++static void _CheckFreelist(RGX_FREELIST *psFreeList, ++ IMG_UINT32 ui32NumOfPagesToCheck, ++ IMG_UINT64 ui64ExpectedCheckSum, ++ IMG_UINT64 *pui64CalculatedCheckSum) ++{ ++#if defined(NO_HARDWARE) ++ /* No checksum needed as we have all information in the pdumps */ ++ PVR_UNREFERENCED_PARAMETER(psFreeList); ++ PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck); ++ PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum); ++ *pui64CalculatedCheckSum = 0; ++#else ++ PVRSRV_ERROR eError; ++ size_t uiNumBytes; ++ IMG_UINT8* pui8Buffer; ++ IMG_UINT32* pui32Buffer; ++ IMG_UINT32 ui32CheckSumAdd = 0; ++ IMG_UINT32 ui32CheckSumXor = 0; ++ IMG_UINT32 ui32Entry; ++ IMG_UINT32 ui32Entry2; ++ IMG_BOOL bFreelistBad = IMG_FALSE; ++ ++ *pui64CalculatedCheckSum = 0; ++ ++ PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); ++ ++ /* Allocate Buffer of the size of the freelist */ ++ pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); ++ if (pui8Buffer == NULL) ++ { ++ PVR_LOG(("%s: Failed to allocate buffer to check freelist %p!", ++ __func__, psFreeList)); ++ PVR_ASSERT(0); ++ return; ++ } ++ ++ /* Copy freelist content into Buffer */ ++ eError = PMR_ReadBytes(psFreeList->psFreeListPMR, ++ psFreeList->uiFreeListPMROffset + ++ (((psFreeList->ui32MaxFLPages - ++ psFreeList->ui32CurrentFLPages - ++ psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) & ++ ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)), ++ pui8Buffer, ++ ui32NumOfPagesToCheck * sizeof(IMG_UINT32), ++ &uiNumBytes); ++ if (eError != PVRSRV_OK) ++ { ++ OSFreeMem(pui8Buffer); ++ PVR_LOG(("%s: Failed to get freelist data for freelist %p!", ++ __func__, psFreeList)); ++ PVR_ASSERT(0); ++ return; ++ } ++ ++ PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32)); ++ ++ /* Generate checksum (skipping the first page if not allocated) */ ++ pui32Buffer = (IMG_UINT32 *)pui8Buffer; ++ ui32Entry = ((psFreeList->ui32GrowFLPages == 0 && psFreeList->ui32CurrentFLPages > 1) ? 1 : 0); ++ for (/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++) ++ { ++ ui32CheckSumAdd += pui32Buffer[ui32Entry]; ++ ui32CheckSumXor ^= pui32Buffer[ui32Entry]; ++ ++ /* Check for double entries */ ++ for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++) ++ { ++ if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]) ++ { ++ PVR_LOG(("%s: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d", ++ __func__, ++ psFreeList->sFreeListFWDevVAddr.ui32Addr, ++ pui32Buffer[ui32Entry2], ++ ui32Entry, ++ ui32Entry2, ++ psFreeList->ui32CurrentFLPages)); ++ bFreelistBad = IMG_TRUE; ++ break; ++ } ++ } ++ } ++ ++ OSFreeMem(pui8Buffer); ++ ++ /* Check the calculated checksum against the expected checksum... */ ++ *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd; ++ ++ if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum) ++ { ++ PVR_LOG(("%s: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx, ++ __func__, psFreeList, ++ ui64ExpectedCheckSum, *pui64CalculatedCheckSum)); ++ bFreelistBad = IMG_TRUE; ++ } ++ ++ if (bFreelistBad) ++ { ++ PVR_LOG(("%s: Sleeping for ever!", __func__)); ++ PVR_ASSERT(!bFreelistBad); ++ } ++#endif ++} ++ ++ ++/* ++ * Function to work out the number of freelist pages to reserve for growing ++ * within the FW without having to wait for the host to progress a grow ++ * request. ++ * ++ * The number of pages must be a multiple of 4 to align the PM addresses ++ * for the initial freelist allocation and also be less than the grow size. ++ * ++ * If the threshold or grow size means less than 4 pages, then the feature ++ * is not used. ++ */ ++static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList, ++ IMG_UINT32 ui32FLPages) ++{ ++ IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) & ++ ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1); ++ ++ if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages) ++ { ++ ui32ReadyFLPages = psFreeList->ui32GrowFLPages; ++ } ++ ++ return ui32ReadyFLPages; ++} ++ ++ ++PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, ++ IMG_UINT32 ui32NumPages, ++ PDLLIST_NODE pListHeader) ++{ ++ RGX_PMR_NODE *psPMRNode; ++ IMG_DEVMEM_SIZE_T uiSize; ++ IMG_UINT32 ui32MappingTable = 0; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiLength; ++ IMG_DEVMEM_SIZE_T uistartPage; ++ PVRSRV_ERROR eError; ++ static const IMG_CHAR szAllocName[] = "Free List"; ++ ++ /* Are we allowed to grow ? */ ++ if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Freelist [0x%p]: grow by %u pages denied. " ++ "Max PB size reached (current pages %u+%u/%u)", ++ psFreeList, ++ ui32NumPages, ++ psFreeList->ui32CurrentFLPages, ++ psFreeList->ui32ReadyFLPages, ++ psFreeList->ui32MaxFLPages)); ++ return PVRSRV_ERROR_PBSIZE_ALREADY_MAX; ++ } ++ ++ /* Allocate kernel memory block structure */ ++ psPMRNode = OSAllocMem(sizeof(*psPMRNode)); ++ if (psPMRNode == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to allocate host data structure", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto ErrorAllocHost; ++ } ++ ++ /* ++ * Lock protects simultaneous manipulation of: ++ * - the memory block list ++ * - the freelist's ui32CurrentFLPages ++ */ ++ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); ++ ++ ++ /* ++ * The PM never takes the last page in a freelist, so if this block ++ * of pages is the first one and there is no ability to grow, then ++ * we can skip allocating one 4K page for the lowest entry. ++ */ ++ if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE) ++ { ++ /* ++ * Allocation size will be rounded up to the OS page size, ++ * any attempt to change it a bit now will be invalidated later. ++ */ ++ psPMRNode->bFirstPageMissing = IMG_FALSE; ++ } ++ else ++ { ++ psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0 && ui32NumPages > 1); ++ } ++ ++ psPMRNode->ui32NumPages = ui32NumPages; ++ psPMRNode->psFreeList = psFreeList; ++ ++ /* Allocate Memory Block */ ++ PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Allocate PB Block (Pages %08X)", ui32NumPages); ++ uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE; ++ if (psPMRNode->bFirstPageMissing) ++ { ++ uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE; ++ } ++ eError = PhysmemNewRamBackedPMR(psFreeList->psConnection, ++ psFreeList->psDevInfo->psDeviceNode, ++ uiSize, ++ uiSize, ++ 1, ++ 1, ++ &ui32MappingTable, ++ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE, ++ sizeof(szAllocName), ++ szAllocName, ++ psFreeList->ownerPid, ++ &psPMRNode->psPMR, ++ PDUMP_NONE, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX, ++ __func__, ++ (IMG_UINT64)uiSize)); ++ goto ErrorBlockAlloc; ++ } ++ ++ /* Zeroing physical pages pointed by the PMR */ ++ if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) ++ { ++ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to zero PMR %p of freelist %p (%s)", ++ __func__, ++ psPMRNode->psPMR, ++ psFreeList, ++ PVRSRVGetErrorString(eError))); ++ PVR_ASSERT(0); ++ } ++ } ++ ++ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); ++ uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); ++ uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ ++ eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR, ++ psFreeList->ownerPid); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: call to RIWritePMREntryWithOwnerKM failed (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ } ++ ++ /* Attach RI information */ ++ eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR, ++ OSStringNLength(szAllocName, DEVMEM_ANNOTATION_MAX_LEN), ++ szAllocName, ++ 0, ++ uiSize, ++ IMG_FALSE, ++ IMG_FALSE, ++ &psPMRNode->hRIHandle); ++ PVR_LOG_IF_ERROR(eError, "RIWriteMEMDESCEntryKM"); ++ ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ /* write Freelist with Memory Block physical addresses */ ++ eError = PMRWritePMPageList( ++ /* Target PMR, offset, and length */ ++ psFreeList->psFreeListPMR, ++ (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), ++ (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), ++ /* Referenced PMR, and "page" granularity */ ++ psPMRNode->psPMR, ++ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, ++ &psPMRNode->psPageList); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to write pages of Node %p", ++ __func__, ++ psPMRNode)); ++ goto ErrorPopulateFreelist; ++ } ++ ++#if defined(SUPPORT_SHADOW_FREELISTS) ++ /* Copy freelist memory to shadow freelist */ ++ { ++ const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof(IMG_UINT32); ++ const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2; ++ const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset; ++ IMG_BYTE *pFLMapAddr; ++ size_t uiNumBytes; ++ PVRSRV_ERROR res; ++ IMG_HANDLE hMapHandle; ++ ++ /* Map both the FL and the shadow FL */ ++ res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize, ++ (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle); ++ if (res != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map freelist (ID=%d)", ++ __func__, ++ psFreeList->ui32FreelistID)); ++ goto ErrorPopulateFreelist; ++ } ++ ++ /* Copy only the newly added memory */ ++ OSCachedMemCopy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength); ++ OSWriteMemoryBarrier(pFLMapAddr); ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psFreeList->psDevInfo->psDeviceNode, "Initialize shadow freelist"); ++ ++ /* Translate memcpy to pdump */ ++ { ++ IMG_DEVMEM_OFFSET_T uiCurrOffset; ++ ++ for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof(IMG_UINT32)) ++ { ++ PMRPDumpCopyMem32(psFreeList->psFreeListPMR, ++ uiCurrOffset + ui32FLMaxSize, ++ psFreeList->psFreeListPMR, ++ uiCurrOffset, ++ ":SYSMEM:$1", ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ } ++#endif ++ ++ ++ res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle); ++ ++ if (res != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to release freelist mapping (ID=%d)", ++ __func__, ++ psFreeList->ui32FreelistID)); ++ goto ErrorPopulateFreelist; ++ } ++ } ++#endif ++ ++ /* We add It must be added to the tail, otherwise the freelist population won't work */ ++ dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock); ++ ++ /* Update number of available pages */ ++ psFreeList->ui32CurrentFLPages += ui32NumPages; ++ ++ /* Update statistics (needs to happen before the ReadyFL calculation to also count those pages) */ ++ if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages) ++ { ++ psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages; ++ } ++ ++ /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */ ++ psFreeList->ui32ReadyFLPages = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages); ++ psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; ++ ++ if (psFreeList->bCheckFreelist) ++ { ++ /* ++ * We can only calculate the freelist checksum when the list is full ++ * (e.g. at initial creation time). At other times the checksum cannot ++ * be calculated and has to be disabled for this freelist. ++ */ ++ if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages) ++ { ++ _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum); ++ } ++ else ++ { ++ psFreeList->ui64FreelistChecksum = 0; ++ } ++ } ++ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)", ++ psFreeList, ++ ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"), ++ ui32NumPages, ++ psFreeList->ui32CurrentFLPages, ++ psFreeList->ui32ReadyFLPages, ++ psFreeList->ui32MaxFLPages, ++ psFreeList->ui64FreelistChecksum, ++ (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : ""))); ++ ++ return PVRSRV_OK; ++ ++ /* Error handling */ ++ErrorPopulateFreelist: ++ PMRUnrefPMR(psPMRNode->psPMR); ++ ++ErrorBlockAlloc: ++ OSFreeMem(psPMRNode); ++ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); ++ ++ErrorAllocHost: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++ ++} ++ ++static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader, ++ RGX_FREELIST *psFreeList) ++{ ++ DLLIST_NODE *psNode; ++ RGX_PMR_NODE *psPMRNode; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32OldValue; ++ ++ /* ++ * Lock protects simultaneous manipulation of: ++ * - the memory block list ++ * - the freelist's ui32CurrentFLPages value ++ */ ++ PVR_ASSERT(pListHeader); ++ PVR_ASSERT(psFreeList); ++ PVR_ASSERT(psFreeList->psDevInfo); ++ PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList); ++ ++ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); ++ ++ /* Get node from head of list and remove it */ ++ psNode = dllist_get_next_node(pListHeader); ++ if (psNode) ++ { ++ dllist_remove_node(psNode); ++ ++ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); ++ PVR_ASSERT(psPMRNode); ++ PVR_ASSERT(psPMRNode->psPMR); ++ PVR_ASSERT(psPMRNode->psFreeList); ++ ++ /* remove block from freelist list */ ++ ++ /* Unwrite Freelist with Memory Block physical addresses */ ++ eError = PMRUnwritePMPageList(psPMRNode->psPageList); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to unwrite pages of Node %p", ++ __func__, ++ psPMRNode)); ++ PVR_ASSERT(IMG_FALSE); ++ } ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ ++ if (psPMRNode->hRIHandle) ++ { ++ PVRSRV_ERROR eError; ++ ++ eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle); ++ PVR_LOG_IF_ERROR(eError, "RIDeleteMEMDESCEntryKM"); ++ } ++ ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ ++ ++ /* Free PMR (We should be the only one that holds a ref on the PMR) */ ++ eError = PMRUnrefPMR(psPMRNode->psPMR); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to free PB block %p (%s)", ++ __func__, ++ psPMRNode->psPMR, ++ PVRSRVGetErrorString(eError))); ++ PVR_ASSERT(IMG_FALSE); ++ } ++ ++ /* update available pages in freelist */ ++ ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; ++ ++ /* ++ * Deallocated pages should first be deducted from ReadyPages bank, once ++ * there are no more left, start deducting them from CurrentPage bank. ++ */ ++ if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages) ++ { ++ psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages; ++ psFreeList->ui32ReadyFLPages = 0; ++ } ++ else ++ { ++ psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages; ++ } ++ ++ /* check underflow */ ++ PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages)); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)", ++ psFreeList, ++ psPMRNode->ui32NumPages, ++ psFreeList->ui32CurrentFLPages, ++ psFreeList->ui32MaxFLPages)); ++ ++ OSFreeMem(psPMRNode); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)", ++ psFreeList, ++ psFreeList->ui32InitFLPages)); ++ eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN; ++ } ++ ++ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); ++ ++ return eError; ++} ++ ++static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ RGX_FREELIST *psFreeList = NULL; ++ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ ++ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) ++ { ++ RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); ++ ++ if (psThisFreeList->ui32FreelistID == ui32FreelistID) ++ { ++ psFreeList = psThisFreeList; ++ break; ++ } ++ } ++ ++ OSLockRelease(psDevInfo->hLockFreeList); ++ return psFreeList; ++} ++ ++void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FreelistID) ++{ ++ RGX_FREELIST *psFreeList = NULL; ++ RGXFWIF_KCCB_CMD s3DCCBCmd; ++ IMG_UINT32 ui32GrowValue; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psDevInfo); ++ ++ psFreeList = FindFreeList(psDevInfo, ui32FreelistID); ++ ++ if (psFreeList) ++ { ++ /* Since the FW made the request, it has already consumed the ready pages, update the host struct */ ++ psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages; ++ psFreeList->ui32ReadyFLPages = 0; ++ ++ /* Try to grow the freelist */ ++ eError = RGXGrowFreeList(psFreeList, ++ psFreeList->ui32GrowFLPages, ++ &psFreeList->sMemoryBlockHead); ++ ++ if (eError == PVRSRV_OK) ++ { ++ /* Grow successful, return size of grow size */ ++ ui32GrowValue = psFreeList->ui32GrowFLPages; ++ ++ psFreeList->ui32NumGrowReqByFW++; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ /* Update Stats */ ++ PVRSRVStatsUpdateFreelistStats(0, ++ 1, /* Add 1 to the appropriate counter (Requests by FW) */ ++ psFreeList->ui32InitFLPages, ++ psFreeList->ui32NumHighPages, ++ psFreeList->ownerPid); ++ ++#endif ++ ++ } ++ else ++ { ++ /* Grow failed */ ++ ui32GrowValue = 0; ++ PVR_DPF((PVR_DBG_ERROR, ++ "Grow for FreeList %p failed (%s)", ++ psFreeList, ++ PVRSRVGetErrorString(eError))); ++ } ++ ++ /* send feedback */ ++ s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE; ++ s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr; ++ s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue; ++ s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages; ++ s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages; ++ ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_3D, ++ &s3DCCBCmd, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ /* Kernel CCB should never fill up, as the FW is processing them right away */ ++ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ else ++ { ++ /* Should never happen */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ++ ui32FreelistID)); ++ PVR_ASSERT(IMG_FALSE); ++ } ++} ++ ++static void _RGXFreeListReconstruction(PDLLIST_NODE psNode) ++{ ++ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ RGX_FREELIST *psFreeList; ++ RGX_PMR_NODE *psPMRNode; ++ PVRSRV_ERROR eError; ++ IMG_DEVMEM_OFFSET_T uiOffset; ++ IMG_DEVMEM_SIZE_T uiLength; ++ IMG_UINT32 ui32StartPage; ++ ++ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock); ++ psFreeList = psPMRNode->psFreeList; ++ PVR_ASSERT(psFreeList); ++ psDevInfo = psFreeList->psDevInfo; ++ PVR_ASSERT(psDevInfo); ++ ++ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32); ++ ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages); ++ uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)); ++ ++ PMRUnwritePMPageList(psPMRNode->psPageList); ++ psPMRNode->psPageList = NULL; ++ eError = PMRWritePMPageList( ++ /* Target PMR, offset, and length */ ++ psFreeList->psFreeListPMR, ++ (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset), ++ (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength), ++ /* Referenced PMR, and "page" granularity */ ++ psPMRNode->psPMR, ++ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT, ++ &psPMRNode->psPageList); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Error (%s) writing FL 0x%08x", ++ __func__, ++ PVRSRVGetErrorString(eError), ++ (IMG_UINT32)psFreeList->ui32FreelistID)); ++ } ++ ++ /* Zeroing physical pages pointed by the reconstructed freelist */ ++ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST) ++ { ++ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to zero PMR %p of freelist %p (%s)", ++ __func__, ++ psPMRNode->psPMR, ++ psFreeList, ++ PVRSRVGetErrorString(eError))); ++ PVR_ASSERT(0); ++ } ++ } ++ ++ ++ psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages; ++} ++ ++ ++static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList) ++{ ++ IMG_UINT32 ui32OriginalFLPages; ++ DLLIST_NODE *psNode, *psNext; ++ RGXFWIF_FREELIST *psFWFreeList; ++ PVRSRV_ERROR eError; ++ ++ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID)); ++ ++ /* Do the FreeList Reconstruction */ ++ ui32OriginalFLPages = psFreeList->ui32CurrentFLPages; ++ psFreeList->ui32CurrentFLPages = 0; ++ ++ /* Reconstructing Init FreeList pages */ ++ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext) ++ { ++ _RGXFreeListReconstruction(psNode); ++ } ++ ++ /* Reconstructing Grow FreeList pages */ ++ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext) ++ { ++ _RGXFreeListReconstruction(psNode); ++ } ++ ++ /* Ready pages are allocated but kept hidden until OOM occurs. */ ++ psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages; ++ if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages) ++ { ++ PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages); ++ return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED; ++ } ++ ++ /* Reset the firmware freelist structure */ ++ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; ++ psFWFreeList->ui32AllocatedPageCount = 0; ++ psFWFreeList->ui32AllocatedMMUPageCount = 0; ++ ++ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); ++ ++ /* Check the Freelist checksum if required (as the list is fully populated) */ ++ if (psFreeList->bCheckFreelist) ++ { ++ IMG_UINT64 ui64CheckSum; ++ ++ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); ++ } ++ ++ return eError; ++} ++ ++ ++void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FreelistsCount, ++ const IMG_UINT32 *paui32Freelists) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ DLLIST_NODE *psNode, *psNext; ++ IMG_UINT32 ui32Loop; ++ RGXFWIF_KCCB_CMD sTACCBCmd; ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ DLLIST_NODE *psNodeHWRTData, *psNextHWRTData; ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; ++ RGXFWIF_HWRTDATA *psHWRTData; ++#endif ++ IMG_UINT32 ui32FinalFreelistsCount = 0; ++ IMG_UINT32 aui32FinalFreelists[RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT * 2]; /* Worst-case is double what we are sent */ ++ ++ PVR_ASSERT(psDevInfo != NULL); ++ PVR_ASSERT(ui32FreelistsCount <= RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT); ++ if (ui32FreelistsCount > RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT) ++ { ++ ui32FreelistsCount = RGXFWIF_MAX_FREELISTS_TO_RECONSTRUCT; ++ } ++ ++ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount)); ++ ++ /* ++ * Initialise the response command (in case we don't find a freelist ID). ++ * Also copy the list to the 'final' freelist array. ++ */ ++ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE; ++ sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount; ++ ++ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) ++ { ++ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] | ++ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; ++ aui32FinalFreelists[ui32Loop] = paui32Freelists[ui32Loop]; ++ } ++ ++ ui32FinalFreelistsCount = ui32FreelistsCount; ++ ++ /* ++ * The list of freelists we have been given for reconstruction will ++ * consist of local and global freelists (maybe MMU as well). Any ++ * local freelists should have their global list specified as well. ++ * There may be cases where the global freelist is not given (in ++ * cases of partial setups before a poll failure for example). To ++ * handle that we must first ensure every local freelist has a global ++ * freelist specified, otherwise we add that to the 'final' list. ++ * This final list of freelists is created in a first pass. ++ * ++ * Even with the global freelists listed, there may be other local ++ * freelists not listed, which are going to have their global freelist ++ * reconstructed. Therefore we have to find those freelists as well ++ * meaning we will have to iterate the entire list of freelists to ++ * find which must be reconstructed. This is the second pass. ++ */ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) ++ { ++ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); ++ IMG_BOOL bInList = IMG_FALSE; ++ IMG_BOOL bGlobalInList = IMG_FALSE; ++ ++ /* Check if this local freelist is in the list and ensure its global is too. */ ++ if (psFreeList->ui32FreelistGlobalID != 0) ++ { ++ for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) ++ { ++ if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID) ++ { ++ bInList = IMG_TRUE; ++ } ++ if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) ++ { ++ bGlobalInList = IMG_TRUE; ++ } ++ } ++ ++ if (bInList && !bGlobalInList) ++ { ++ aui32FinalFreelists[ui32FinalFreelistsCount] = psFreeList->ui32FreelistGlobalID; ++ ui32FinalFreelistsCount++; ++ } ++ } ++ } ++ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext) ++ { ++ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode); ++ IMG_BOOL bReconstruct = IMG_FALSE; ++ ++ /* ++ * Check if this freelist needs to be reconstructed (was it requested ++ * or is its global freelist going to be reconstructed)... ++ */ ++ for (ui32Loop = 0; ui32Loop < ui32FinalFreelistsCount; ui32Loop++) ++ { ++ if (aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistID || ++ aui32FinalFreelists[ui32Loop] == psFreeList->ui32FreelistGlobalID) ++ { ++ bReconstruct = IMG_TRUE; ++ break; ++ } ++ } ++ ++ if (bReconstruct) ++ { ++ eError = RGXReconstructFreeList(psFreeList); ++ if (eError == PVRSRV_OK) ++ { ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ /* Mark all HWRTData's of reconstructing local freelists as HWR (applies to TA/3D's not finished yet) */ ++ dllist_foreach_node(&psFreeList->sNodeHWRTDataHead, psNodeHWRTData, psNextHWRTData) ++ { ++ psKMHWRTDataSet = IMG_CONTAINER_OF(psNodeHWRTData, RGX_KM_HW_RT_DATASET, sNodeHWRTData); ++ eError = DevmemAcquireCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc, (void **)&psHWRTData); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Devmem AcquireCpuVirtAddr Failed during Reconstructing of FreeList, FwMemDesc(%p),psHWRTData(%p)", ++ psKMHWRTDataSet->psHWRTDataFwMemDesc, ++ psHWRTData)); ++ continue; ++ } ++ ++ psHWRTData->eState = RGXFWIF_RTDATA_STATE_HWR; ++ psHWRTData->ui32HWRTDataFlags &= ~HWRTDATA_HAS_LAST_TA; ++ ++ DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); ++ } ++#endif ++ ++ /* Update the response for this freelist if it was specifically requested for reconstruction. */ ++ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) ++ { ++ if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID) ++ { ++ /* Reconstruction of this requested freelist was successful... */ ++ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG; ++ break; ++ } ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Reconstructing of FreeList %p failed (%s)", ++ psFreeList, ++ PVRSRVGetErrorString(eError))); ++ } ++ } ++ } ++ OSLockRelease(psDevInfo->hLockFreeList); ++ ++ /* Check that all freelists were found and reconstructed... */ ++ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++) ++ { ++ PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] & ++ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0); ++ } ++ ++ /* send feedback */ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GEOM, ++ &sTACCBCmd, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ /* Kernel CCB should never fill up, as the FW is processing them right away */ ++ PVR_ASSERT(eError == PVRSRV_OK); ++} ++ ++/* Create a single HWRTData instance */ ++static PVRSRV_ERROR RGXCreateHWRTData_aux( ++ CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR psVHeapTableDevVAddr, ++ IMG_DEV_VIRTADDR psPMMListDevVAddr, /* per-HWRTData */ ++ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS], ++ IMG_DEV_VIRTADDR sTailPtrsDevVAddr, ++ IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr, /* per-HWRTData */ ++ IMG_DEV_VIRTADDR sRgnHeaderDevVAddr, /* per-HWRTData */ ++ IMG_DEV_VIRTADDR sRTCDevVAddr, ++ IMG_UINT16 ui16MaxRTs, ++ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie, ++ RGX_KM_HW_RT_DATASET **ppsKMHWRTDataSet) /* per-HWRTData */ ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32Loop; ++ ++ /* KM cookie storing all the FW/HW data */ ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet; ++ ++ /* local pointers for memory descriptors of FW allocations */ ++ DEVMEM_MEMDESC *psHWRTDataFwMemDesc = NULL; ++ DEVMEM_MEMDESC *psRTArrayFwMemDesc = NULL; ++ DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc = NULL; ++ ++ /* local pointer for CPU-mapped [FW]HWRTData */ ++ RGXFWIF_HWRTDATA *psHWRTData = NULL; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ /* Prepare the HW RT DataSet struct */ ++ psKMHWRTDataSet = OSAllocZMem(sizeof(*psKMHWRTDataSet)); ++ if (psKMHWRTDataSet == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto AllocError; ++ } ++ ++ *ppsKMHWRTDataSet = psKMHWRTDataSet; ++ psKMHWRTDataSet->psDeviceNode = psDeviceNode; ++ ++ psKMHWRTDataSet->psHWRTDataCommonCookie = psHWRTDataCommonCookie; ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ /* ++ * This FW RT-Data is only mapped into kernel for initialisation. ++ * Otherwise this allocation is only used by the FW. ++ * Therefore the GPU cache doesn't need coherency, and write-combine will ++ * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_HWRTDATA), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwHWRTData", ++ &psHWRTDataFwMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: DevmemAllocate for RGX_FWIF_HWRTDATA failed", ++ __func__)); ++ goto FWRTDataAllocateError; ++ } ++ ++ psKMHWRTDataSet->psHWRTDataFwMemDesc = psHWRTDataFwMemDesc; ++ eError = RGXSetFirmwareAddress( &psKMHWRTDataSet->sHWRTDataFwAddr, ++ psHWRTDataFwMemDesc, ++ 0, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:1", FWRTDataFwAddrError); ++ ++ eError = DevmemAcquireCpuVirtAddr(psHWRTDataFwMemDesc, ++ (void **)&psHWRTData); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError); ++ ++ psHWRTData->psVHeapTableDevVAddr = psVHeapTableDevVAddr; ++ ++ psHWRTData->sHWRTDataCommonFwAddr = psHWRTDataCommonCookie->sHWRTDataCommonFwAddr; ++ ++ psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr; ++ ++ psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr; ++ psHWRTData->sMacrotileArrayDevVAddr = sMacrotileArrayDevVAddr; ++ psHWRTData->sRgnHeaderDevVAddr = sRgnHeaderDevVAddr; ++ psHWRTData->sRTCDevVAddr = sRTCDevVAddr; ++ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) ++ { ++ psKMHWRTDataSet->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop]; ++ psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount++; ++ psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psKMHWRTDataSet->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr; ++ /* invalid initial snapshot value, the snapshot is always taken during first kick ++ * and hence the value get replaced during the first kick anyway. So it's safe to set it 0. ++ */ ++ psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0; ++ } ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ dllist_add_to_tail(&apsFreeLists[RGXFW_LOCAL_FREELIST]->sNodeHWRTDataHead, &(psKMHWRTDataSet->sNodeHWRTData)); ++#endif ++ OSLockRelease(psDevInfo->hLockFreeList); ++ ++ { ++ RGXFWIF_RTA_CTL *psRTACtl = &psHWRTData->sRTACtl; ++ ++ psRTACtl->ui32RenderTargetIndex = 0; ++ psRTACtl->ui32ActiveRenderTargets = 0; ++ psRTACtl->sValidRenderTargets.ui32Addr = 0; ++ psRTACtl->sRTANumPartialRenders.ui32Addr = 0; ++ psRTACtl->ui32MaxRTs = (IMG_UINT32) ui16MaxRTs; ++ ++ if (ui16MaxRTs > 1) ++ { ++ PDUMPCOMMENT(psDeviceNode, "Allocate memory for shadow render target cache"); ++ eError = DevmemFwAllocate(psDevInfo, ++ ui16MaxRTs * sizeof(IMG_UINT32), ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwShadowRTCache", ++ &psRTArrayFwMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate %u bytes for render target array (%s)", ++ __func__, ++ ui16MaxRTs, PVRSRVGetErrorString(eError))); ++ goto FWAllocateRTArryError; ++ } ++ ++ psKMHWRTDataSet->psRTArrayFwMemDesc = psRTArrayFwMemDesc; ++ eError = RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets, ++ psRTArrayFwMemDesc, ++ 0, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:2", FWAllocateRTArryFwAddrError); ++ ++ PDUMPCOMMENT(psDeviceNode, "Allocate memory for tracking renders accumulation"); ++ eError = DevmemFwAllocate(psDevInfo, ++ ui16MaxRTs * sizeof(IMG_UINT32), ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwRendersAccumulation", ++ &psRendersAccArrayFwMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate %u bytes for render target array (%s) (renders accumulation)", ++ __func__, ++ ui16MaxRTs, PVRSRVGetErrorString(eError))); ++ goto FWAllocateRTAccArryError; ++ } ++ psKMHWRTDataSet->psRendersAccArrayFwMemDesc = psRendersAccArrayFwMemDesc; ++ eError = RGXSetFirmwareAddress(&psRTACtl->sRTANumPartialRenders, ++ psRendersAccArrayFwMemDesc, ++ 0, ++ RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress:3", FWAllocRTAccArryFwAddrError); ++ } ++ } ++ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Dump HWRTData 0x%08X", psKMHWRTDataSet->sHWRTDataFwAddr.ui32Addr); ++ DevmemPDumpLoadMem(psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); ++ return PVRSRV_OK; ++ ++FWAllocRTAccArryFwAddrError: ++ DevmemFwUnmapAndFree(psDevInfo, psRendersAccArrayFwMemDesc); ++FWAllocateRTAccArryError: ++ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); ++FWAllocateRTArryFwAddrError: ++ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); ++FWAllocateRTArryError: ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) ++ { ++ PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); ++ psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; ++ } ++ OSLockRelease(psDevInfo->hLockFreeList); ++ DevmemReleaseCpuVirtAddr(psKMHWRTDataSet->psHWRTDataFwMemDesc); ++FWRTDataCpuMapError: ++ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); ++FWRTDataFwAddrError: ++ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); ++FWRTDataAllocateError: ++ *ppsKMHWRTDataSet = NULL; ++ OSFreeMem(psKMHWRTDataSet); ++ ++AllocError: ++ return eError; ++} ++ ++static void RGXDestroyHWRTData_aux(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ IMG_UINT32 ui32Loop; ++ ++ if (psKMHWRTDataSet == NULL) ++ { ++ return; ++ } ++ ++ psDevInfo = psKMHWRTDataSet->psDeviceNode->pvDevice; ++ ++ if (psKMHWRTDataSet->psRTArrayFwMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRTArrayFwMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRTArrayFwMemDesc); ++ } ++ ++ if (psKMHWRTDataSet->psRendersAccArrayFwMemDesc) ++ { ++ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psRendersAccArrayFwMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psRendersAccArrayFwMemDesc); ++ } ++ ++ /* Decrease freelist refcount */ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++) ++ { ++ PVR_ASSERT(psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount > 0); ++ psKMHWRTDataSet->apsFreeLists[ui32Loop]->ui32RefCount--; ++ } ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ dllist_remove_node(&psKMHWRTDataSet->sNodeHWRTData); ++#endif ++ OSLockRelease(psDevInfo->hLockFreeList); ++ ++ /* Freeing the memory has to happen _after_ removing the HWRTData from the freelist ++ * otherwise we risk traversing the freelist to find a pointer from a freed data structure */ ++ RGXUnsetFirmwareAddress(psKMHWRTDataSet->psHWRTDataFwMemDesc); ++ DevmemFwUnmapAndFree(psDevInfo, psKMHWRTDataSet->psHWRTDataFwMemDesc); ++ ++ OSFreeMem(psKMHWRTDataSet); ++} ++ ++/* Create set of HWRTData(s) and bind it with a shared FW HWRTDataCommon */ ++PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], ++ IMG_DEV_VIRTADDR asPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], ++ RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], ++ IMG_UINT32 ui32ScreenPixelMax, ++ IMG_UINT64 ui64MultiSampleCtl, ++ IMG_UINT64 ui64FlippedMultiSampleCtl, ++ IMG_UINT32 ui32TPCStride, ++ IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], ++ IMG_UINT32 ui32TPCSize, ++ IMG_UINT32 ui32TEScreen, ++ IMG_UINT32 ui32TEAA, ++ IMG_UINT32 ui32TEMTILE1, ++ IMG_UINT32 ui32TEMTILE2, ++ IMG_UINT32 ui32MTileStride, ++ IMG_UINT32 ui32ISPMergeLowerX, ++ IMG_UINT32 ui32ISPMergeLowerY, ++ IMG_UINT32 ui32ISPMergeUpperX, ++ IMG_UINT32 ui32ISPMergeUpperY, ++ IMG_UINT32 ui32ISPMergeScaleX, ++ IMG_UINT32 ui32ISPMergeScaleY, ++ IMG_DEV_VIRTADDR asMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], ++ IMG_DEV_VIRTADDR asRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], ++ IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], ++ IMG_UINT32 uiRgnHeaderSize, ++ IMG_UINT32 ui32ISPMtileSize, ++ IMG_UINT16 ui16MaxRTs, ++ RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32RTDataID; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; ++ RGXFWIF_HWRTDATA_COMMON *psHWRTDataCommon; ++ DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; ++ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; ++ ++ /* Prepare KM cleanup object for HWRTDataCommon FW object */ ++ psHWRTDataCommonCookie = OSAllocZMem(sizeof(*psHWRTDataCommonCookie)); ++ if (psHWRTDataCommonCookie == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto err_HWRTDataCommonCookieAlloc; ++ } ++ ++ /* ++ * This FW common context is only mapped into kernel for initialisation. ++ * Otherwise this allocation is only used by the FW. ++ * Therefore the GPU cache doesn't need coherency, and write-combine will ++ * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_HWRTDATA_COMMON), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwHWRTDataCommon", ++ &psHWRTDataCommonFwMemDesc); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: DevmemAllocate for FwHWRTDataCommon failed", __func__)); ++ goto err_HWRTDataCommonAlloc; ++ } ++ eError = RGXSetFirmwareAddress(&sHWRTDataCommonFwAddr, psHWRTDataCommonFwMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", err_HWRTDataCommonFwAddr); ++ ++ eError = DevmemAcquireCpuVirtAddr(psHWRTDataCommonFwMemDesc, (void **)&psHWRTDataCommon); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", err_HWRTDataCommonVA); ++ ++ psHWRTDataCommon->bTACachesNeedZeroing = IMG_FALSE; ++ psHWRTDataCommon->ui32ScreenPixelMax = ui32ScreenPixelMax; ++ psHWRTDataCommon->ui64MultiSampleCtl = ui64MultiSampleCtl; ++ psHWRTDataCommon->ui64FlippedMultiSampleCtl = ui64FlippedMultiSampleCtl; ++ psHWRTDataCommon->ui32TPCStride = ui32TPCStride; ++ psHWRTDataCommon->ui32TPCSize = ui32TPCSize; ++ psHWRTDataCommon->ui32TEScreen = ui32TEScreen; ++ psHWRTDataCommon->ui32TEAA = ui32TEAA; ++ psHWRTDataCommon->ui32TEMTILE1 = ui32TEMTILE1; ++ psHWRTDataCommon->ui32TEMTILE2 = ui32TEMTILE2; ++ psHWRTDataCommon->ui32MTileStride = ui32MTileStride; ++ psHWRTDataCommon->ui32ISPMergeLowerX = ui32ISPMergeLowerX; ++ psHWRTDataCommon->ui32ISPMergeLowerY = ui32ISPMergeLowerY; ++ psHWRTDataCommon->ui32ISPMergeUpperX = ui32ISPMergeUpperX; ++ psHWRTDataCommon->ui32ISPMergeUpperY = ui32ISPMergeUpperY; ++ psHWRTDataCommon->ui32ISPMergeScaleX = ui32ISPMergeScaleX; ++ psHWRTDataCommon->ui32ISPMergeScaleY = ui32ISPMergeScaleY; ++ psHWRTDataCommon->uiRgnHeaderSize = uiRgnHeaderSize; ++ psHWRTDataCommon->ui32ISPMtileSize = ui32ISPMtileSize; ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Dump HWRTDataCommon"); ++ DevmemPDumpLoadMem(psHWRTDataCommonFwMemDesc, 0, sizeof(*psHWRTDataCommon), PDUMP_FLAGS_CONTINUOUS); ++#endif ++ DevmemReleaseCpuVirtAddr(psHWRTDataCommonFwMemDesc); ++ ++ psHWRTDataCommonCookie->ui32RefCount = 0; ++ psHWRTDataCommonCookie->psHWRTDataCommonFwMemDesc = psHWRTDataCommonFwMemDesc; ++ psHWRTDataCommonCookie->sHWRTDataCommonFwAddr = sHWRTDataCommonFwAddr; ++ ++ /* Here we are creating a set of HWRTData(s) ++ the number of elements in the set equals RGXMKIF_NUM_RTDATAS. ++ */ ++ ++ for (ui32RTDataID = 0; ui32RTDataID < RGXMKIF_NUM_RTDATAS; ui32RTDataID++) ++ { ++ eError = RGXCreateHWRTData_aux( ++ psConnection, ++ psDeviceNode, ++ asVHeapTableDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], ++ asPMMListDevVAddr[ui32RTDataID], ++ &apsFreeLists[(ui32RTDataID % RGXMKIF_NUM_GEOMDATAS) * RGXFW_MAX_FREELISTS], ++ asTailPtrsDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], ++ asMacrotileArrayDevVAddr[ui32RTDataID], ++ asRgnHeaderDevVAddr[ui32RTDataID], ++ asRTCDevVAddr[ui32RTDataID % RGXMKIF_NUM_GEOMDATAS], ++ ui16MaxRTs, ++ psHWRTDataCommonCookie, ++ &pasKMHWRTDataSet[ui32RTDataID]); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to create HWRTData [slot %u] (%s)", ++ __func__, ++ ui32RTDataID, ++ PVRSRVGetErrorString(eError))); ++ goto err_HWRTDataAlloc; ++ } ++ psHWRTDataCommonCookie->ui32RefCount += 1; ++ } ++ ++ return PVRSRV_OK; ++ ++err_HWRTDataAlloc: ++ PVR_DPF((PVR_DBG_WARNING, "%s: err_HWRTDataAlloc %u", ++ __func__, psHWRTDataCommonCookie->ui32RefCount)); ++ if (pasKMHWRTDataSet) ++ { ++ for (ui32RTDataID = psHWRTDataCommonCookie->ui32RefCount; ui32RTDataID > 0; ui32RTDataID--) ++ { ++ if (pasKMHWRTDataSet[ui32RTDataID-1] != NULL) ++ { ++ RGXDestroyHWRTData_aux(pasKMHWRTDataSet[ui32RTDataID-1]); ++ pasKMHWRTDataSet[ui32RTDataID-1] = NULL; ++ } ++ } ++ } ++err_HWRTDataCommonVA: ++ RGXUnsetFirmwareAddress(psHWRTDataCommonFwMemDesc); ++err_HWRTDataCommonFwAddr: ++ DevmemFwUnmapAndFree(psDevInfo, psHWRTDataCommonFwMemDesc); ++err_HWRTDataCommonAlloc: ++ OSFreeMem(psHWRTDataCommonCookie); ++err_HWRTDataCommonCookieAlloc: ++ ++ return eError; ++} ++ ++/* Destroy a single instance of HWRTData. ++ Additionally, destroy the HWRTDataCommon{Cookie} objects ++ when it is the last HWRTData within a corresponding set of HWRTDatas. ++*/ ++PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ PVRSRV_ERROR eError; ++ PRGXFWIF_HWRTDATA psHWRTData; ++ RGX_HWRTDATA_COMMON_COOKIE *psCommonCookie; ++ ++ PVR_ASSERT(psKMHWRTDataSet); ++ ++ psDevNode = psKMHWRTDataSet->psDeviceNode; ++ psDevInfo = psDevNode->pvDevice; ++ ++ eError = RGXSetFirmwareAddress(&psHWRTData, ++ psKMHWRTDataSet->psHWRTDataFwMemDesc, 0, ++ RFW_FWADDR_NOREF_FLAG); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ /* Cleanup HWRTData */ ++ eError = RGXFWRequestHWRTDataCleanUp(psDevNode, psHWRTData); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ psCommonCookie = psKMHWRTDataSet->psHWRTDataCommonCookie; ++ ++ RGXDestroyHWRTData_aux(psKMHWRTDataSet); ++ ++ /* We've got past potential PVRSRV_ERROR_RETRY events, so we are sure ++ that the HWRTDATA instance will be destroyed during this call. ++ Consequently, we decrease the ref count for HWRTDataCommonCookie. ++ ++ NOTE: This ref count does not require locks or atomics. ++ ------------------------------------------------------- ++ HWRTDatas bound into one pair are always destroyed sequentially, ++ within a single loop on the Client side. ++ The Common/Cookie objects always belong to only one pair of ++ HWRTDatas, and ref count is used to ensure that the Common/Cookie ++ objects will be destroyed after destruction of all HWRTDatas ++ within a single pair. ++ */ ++ psCommonCookie->ui32RefCount--; ++ ++ /* When ref count for HWRTDataCommonCookie hits ZERO ++ * we have to destroy the HWRTDataCommon [FW object] and the cookie ++ * [KM object] afterwards. */ ++ if (psCommonCookie->ui32RefCount == 0) ++ { ++ RGXUnsetFirmwareAddress(psCommonCookie->psHWRTDataCommonFwMemDesc); ++ ++ /* We don't need to flush the SLC before freeing. ++ * FW RequestCleanUp has already done that for HWRTData, so we're fine ++ * now. */ ++ ++ DevmemFwUnmapAndFree(psDevNode->pvDevice, ++ psCommonCookie->psHWRTDataCommonFwMemDesc); ++ OSFreeMem(psCommonCookie); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32MaxFLPages, ++ IMG_UINT32 ui32InitFLPages, ++ IMG_UINT32 ui32GrowFLPages, ++ IMG_UINT32 ui32GrowParamThreshold, ++ RGX_FREELIST *psGlobalFreeList, ++ IMG_BOOL bCheckFreelist, ++ IMG_DEV_VIRTADDR sFreeListDevVAddr, ++ PMR *psFreeListPMR, ++ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, ++ RGX_FREELIST **ppsFreeList) ++{ ++ PVRSRV_ERROR eError; ++ RGXFWIF_FREELIST *psFWFreeList; ++ DEVMEM_MEMDESC *psFWFreelistMemDesc; ++ RGX_FREELIST *psFreeList; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT) ++ { ++ IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages; ++ ++ /* Round up number of FL pages to the next multiple of the OS page size */ ++ ++ ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; ++ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ++ ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; ++ ++ ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; ++ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ++ ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; ++ ++ ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; ++ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize()); ++ ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT; ++ ++ PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u", ++ __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages)); ++ ++ ui32InitFLPages = ui32NewInitFLPages; ++ ui32GrowFLPages = ui32NewGrowFLPages; ++ ui32MaxFLPages = ui32NewMaxFLPages; ++ } ++ ++ /* Allocate kernel freelist struct */ ++ psFreeList = OSAllocZMem(sizeof(*psFreeList)); ++ if (psFreeList == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to allocate host data structure", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto ErrorAllocHost; ++ } ++ ++ /* ++ * This FW FreeList context is only mapped into kernel for initialisation ++ * and reconstruction (at other times it is not mapped and only used by the ++ * FW). ++ * Therefore the GPU cache doesn't need coherency, and write-combine will ++ * suffice on the CPU side (WC buffer will be flushed at the first TA-kick) ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(*psFWFreeList), ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwFreeList", ++ &psFWFreelistMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: DevmemAllocate for RGXFWIF_FREELIST failed", ++ __func__)); ++ goto FWFreeListAlloc; ++ } ++ ++ /* Initialise host data structures */ ++ psFreeList->psDevInfo = psDevInfo; ++ psFreeList->psConnection = psConnection; ++ psFreeList->psFreeListPMR = psFreeListPMR; ++ psFreeList->uiFreeListPMROffset = uiFreeListPMROffset; ++ psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc; ++ eError = RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); ++ ++ /* psFreeList->ui32FreelistID set below with lock... */ ++ psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0); ++ psFreeList->ui32MaxFLPages = ui32MaxFLPages; ++ psFreeList->ui32InitFLPages = ui32InitFLPages; ++ psFreeList->ui32GrowFLPages = ui32GrowFLPages; ++ psFreeList->ui32CurrentFLPages = 0; ++ psFreeList->ui32ReadyFLPages = 0; ++ psFreeList->ui32GrowThreshold = ui32GrowParamThreshold; ++ psFreeList->ui64FreelistChecksum = 0; ++ psFreeList->ui32RefCount = 0; ++ psFreeList->bCheckFreelist = bCheckFreelist; ++ dllist_init(&psFreeList->sMemoryBlockHead); ++ dllist_init(&psFreeList->sMemoryBlockInitHead); ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ dllist_init(&psFreeList->sNodeHWRTDataHead); ++#endif ++ psFreeList->ownerPid = OSGetCurrentClientProcessIDKM(); ++ ++ ++ /* Add to list of freelists */ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++; ++ dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode); ++ OSLockRelease(psDevInfo->hLockFreeList); ++ ++ ++ /* Initialise FW data structure */ ++ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); ++ PVR_LOG_GOTO_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap); ++ ++ { ++ const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages); ++ ++ psFWFreeList->ui32MaxPages = ui32MaxFLPages; ++ psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages; ++ psFWFreeList->ui32GrowPages = ui32GrowFLPages; ++ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1; ++ psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr; ++ psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr + ++ ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) & ++ ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1); ++ psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID; ++ psFWFreeList->bGrowPending = IMG_FALSE; ++ psFWFreeList->ui32ReadyPages = ui32ReadyPages; ++ ++#if defined(SUPPORT_SHADOW_FREELISTS) ++ /* Get the FW Memory Context address... */ ++ eError = RGXSetFirmwareAddress(&psFWFreeList->psFWMemContext, ++ RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData), ++ 0, RFW_FWADDR_NOREF_FLAG); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: RGXSetFirmwareAddress for RGXFWIF_FWMEMCONTEXT failed", ++ __func__)); ++ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); ++ goto FWFreeListCpuMap; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(hMemCtxPrivData); ++#endif ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, " ++ "Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", " ++ "Init FL base address 0x%016" IMG_UINT64_FMTSPECx, ++ psFreeList, ++ ui32MaxFLPages, ++ ui32InitFLPages, ++ sFreeListDevVAddr.uiAddr, ++ psFWFreeList->ui64CurrentDevVAddr)); ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Dump FW FreeList"); ++ DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS); ++ ++ /* ++ * Separate dump of the Freelist's number of Pages and stack pointer. ++ * This allows to easily modify the PB size in the out2.txt files. ++ */ ++ PDUMPCOMMENT(psDeviceNode, "FreeList TotalPages"); ++ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, ++ offsetof(RGXFWIF_FREELIST, ui32CurrentPages), ++ psFWFreeList->ui32CurrentPages, ++ PDUMP_FLAGS_CONTINUOUS); ++ PDUMPCOMMENT(psDeviceNode, "FreeList StackPointer"); ++ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc, ++ offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop), ++ psFWFreeList->ui32CurrentStackTop, ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); ++ ++ ++ /* Add initial PB block */ ++ eError = RGXGrowFreeList(psFreeList, ++ ui32InitFLPages, ++ &psFreeList->sMemoryBlockInitHead); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (%d)", ++ __func__, ++ sFreeListDevVAddr.uiAddr, ++ eError)); ++ goto FWFreeListCpuMap; ++ } ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ /* Update Stats */ ++ PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/ ++ 0, ++ psFreeList->ui32InitFLPages, ++ psFreeList->ui32NumHighPages, ++ psFreeList->ownerPid); ++ ++#endif ++ ++ /* return values */ ++ *ppsFreeList = psFreeList; ++ ++ return PVRSRV_OK; ++ ++ /* Error handling */ ++ ++FWFreeListCpuMap: ++ /* Remove freelists from list */ ++ OSLockAcquire(psDevInfo->hLockFreeList); ++ dllist_remove_node(&psFreeList->sNode); ++ OSLockRelease(psDevInfo->hLockFreeList); ++ RGXUnsetFirmwareAddress(psFWFreelistMemDesc); ++ ++ErrorSetFwAddr: ++ DevmemFwUnmapAndFree(psDevInfo, psFWFreelistMemDesc); ++ ++FWFreeListAlloc: ++ OSFreeMem(psFreeList); ++ ++ErrorAllocHost: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++/* ++ RGXDestroyFreeList ++ */ ++PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 ui32RefCount; ++ ++ PVR_ASSERT(psFreeList); ++ ++ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); ++ ui32RefCount = psFreeList->ui32RefCount; ++ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); ++ ++ if (ui32RefCount != 0) ++ { ++ /* Freelist still busy */ ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ /* Freelist is not in use => start firmware cleanup */ ++ eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo, ++ psFreeList->sFreeListFWDevVAddr); ++ if (eError != PVRSRV_OK) ++ { ++ /* Can happen if the firmware took too long to handle the cleanup request, ++ * or if SLC-flushes didn't went through (due to some GPU lockup) */ ++ return eError; ++ } ++ ++ /* Remove FreeList from linked list before we destroy it... */ ++ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList); ++ dllist_remove_node(&psFreeList->sNode); ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ /* Confirm all HWRTData nodes are freed before releasing freelist */ ++ PVR_ASSERT(dllist_is_empty(&psFreeList->sNodeHWRTDataHead)); ++#endif ++ OSLockRelease(psFreeList->psDevInfo->hLockFreeList); ++ ++ if (psFreeList->bCheckFreelist) ++ { ++ RGXFWIF_FREELIST *psFWFreeList; ++ IMG_UINT64 ui32CurrentStackTop; ++ IMG_UINT64 ui64CheckSum; ++ ++ /* Get the current stack pointer for this free list */ ++ DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList); ++ ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop; ++ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc); ++ ++ if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1) ++ { ++ /* Do consistency tests (as the list is fully populated) */ ++ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum); ++ } ++ else ++ { ++ /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */ ++ _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum); ++ } ++ } ++ ++ /* Destroy FW structures */ ++ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc); ++ DevmemFwUnmapAndFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc); ++ ++ /* Remove grow shrink blocks */ ++ while (!dllist_is_empty(&psFreeList->sMemoryBlockHead)) ++ { ++ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ } ++ ++ /* Remove initial PB block */ ++ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList); ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ /* consistency checks */ ++ PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead)); ++ PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0); ++ ++ /* free Freelist */ ++ OSFreeMem(psFreeList); ++ ++ return eError; ++} ++ ++ ++/* ++ RGXCreateZSBuffer ++ */ ++PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_RESERVATION *psReservation, ++ PMR *psPMR, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ RGX_ZSBUFFER_DATA **ppsZSBuffer) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_PRBUFFER *psFWZSBuffer; ++ RGX_ZSBUFFER_DATA *psZSBuffer; ++ DEVMEM_MEMDESC *psFWZSBufferMemDesc; ++ IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE; ++ ++ /* Allocate host data structure */ ++ psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer)); ++ if (psZSBuffer == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate cleanup data structure for ZS-Buffer", ++ __func__)); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto ErrorAllocCleanup; ++ } ++ ++ /* Populate Host data */ ++ psZSBuffer->psDevInfo = psDevInfo; ++ psZSBuffer->psReservation = psReservation; ++ psZSBuffer->psPMR = psPMR; ++ psZSBuffer->uiMapFlags = uiMapFlags; ++ psZSBuffer->ui32RefCount = 0; ++ psZSBuffer->bOnDemand = bOnDemand; ++ if (bOnDemand) ++ { ++ /* psZSBuffer->ui32ZSBufferID set below with lock... */ ++ psZSBuffer->psMapping = NULL; ++ ++ OSLockAcquire(psDevInfo->hLockZSBuffer); ++ psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++; ++ dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode); ++ OSLockRelease(psDevInfo->hLockZSBuffer); ++ } ++ ++ /* Allocate firmware memory for ZS-Buffer. */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate firmware ZS-Buffer data structure"); ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(*psFWZSBuffer), ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | ++ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) | ++ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(FW_MAIN), ++ "FwZSBuffer", ++ &psFWZSBufferMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware ZS-Buffer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto ErrorAllocFWZSBuffer; ++ } ++ psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc; ++ ++ /* Temporarily map the firmware render context to the kernel. */ ++ eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc, ++ (void **)&psFWZSBuffer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware ZS-Buffer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto ErrorAcquireFWZSBuffer; ++ } ++ ++ /* Populate FW ZS-Buffer data structure */ ++ psFWZSBuffer->bOnDemand = bOnDemand; ++ psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED; ++ psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID; ++ ++ /* Get firmware address of ZS-Buffer. */ ++ eError = RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXSetFirmwareAddress", ErrorSetFwAddr); ++ ++ /* Dump the ZS-Buffer and the memory content */ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Dump firmware ZS-Buffer"); ++ DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ /* Release address acquired above. */ ++ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); ++ ++ ++ /* define return value */ ++ *ppsZSBuffer = psZSBuffer; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)", ++ psZSBuffer, ++ (bOnDemand) ? "On-Demand": "Up-front")); ++ ++ psZSBuffer->owner=OSGetCurrentClientProcessIDKM(); ++ ++ return PVRSRV_OK; ++ ++ /* error handling */ ++ ++ErrorSetFwAddr: ++ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc); ++ErrorAcquireFWZSBuffer: ++ DevmemFwUnmapAndFree(psDevInfo, psFWZSBufferMemDesc); ++ ++ErrorAllocFWZSBuffer: ++ OSFreeMem(psZSBuffer); ++ ++ErrorAllocCleanup: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++/* ++ RGXDestroyZSBuffer ++ */ ++PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer) ++{ ++ POS_LOCK hLockZSBuffer; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psZSBuffer); ++ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; ++ ++ /* Request ZS Buffer cleanup */ ++ eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo, ++ psZSBuffer->sZSBufferFWDevVAddr); ++ if (eError == PVRSRV_OK) ++ { ++ /* Free the firmware render context. */ ++ RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc); ++ DevmemFwUnmapAndFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc); ++ ++ /* Remove Deferred Allocation from list */ ++ if (psZSBuffer->bOnDemand) ++ { ++ OSLockAcquire(hLockZSBuffer); ++ PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode)); ++ dllist_remove_node(&psZSBuffer->sNode); ++ OSLockRelease(hLockZSBuffer); ++ } ++ ++ PVR_ASSERT(psZSBuffer->ui32RefCount == 0); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] destroyed", psZSBuffer)); ++ ++ /* Free ZS-Buffer host data structure */ ++ OSFreeMem(psZSBuffer); ++ ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) ++{ ++ POS_LOCK hLockZSBuffer; ++ PVRSRV_ERROR eError; ++ ++ if (!psZSBuffer) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (!psZSBuffer->bOnDemand) ++ { ++ /* Only deferred allocations can be populated */ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "ZS Buffer [%p, ID=0x%08x]: Physical backing requested", ++ psZSBuffer, ++ psZSBuffer->ui32ZSBufferID)); ++ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; ++ ++ OSLockAcquire(hLockZSBuffer); ++ ++ if (psZSBuffer->ui32RefCount == 0) ++ { ++ if (psZSBuffer->bOnDemand) ++ { ++ IMG_HANDLE hDevmemHeap; ++ ++ PVR_ASSERT(psZSBuffer->psMapping == NULL); ++ ++ /* Get Heap */ ++ eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap); ++ PVR_ASSERT(psZSBuffer->psMapping == NULL); ++ if (unlikely(hDevmemHeap == (IMG_HANDLE)NULL)) ++ { ++ OSLockRelease(hLockZSBuffer); ++ return PVRSRV_ERROR_INVALID_HEAP; ++ } ++ ++ eError = DevmemIntMapPMR(hDevmemHeap, ++ psZSBuffer->psReservation, ++ psZSBuffer->psPMR, ++ psZSBuffer->uiMapFlags, ++ &psZSBuffer->psMapping); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Unable populate ZS Buffer [%p, ID=0x%08x] (%s)", ++ psZSBuffer, ++ psZSBuffer->ui32ZSBufferID, ++ PVRSRVGetErrorString(eError))); ++ OSLockRelease(hLockZSBuffer); ++ return eError; ++ ++ } ++ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired", ++ psZSBuffer, ++ psZSBuffer->ui32ZSBufferID)); ++ } ++ } ++ ++ /* Increase refcount*/ ++ psZSBuffer->ui32RefCount++; ++ ++ OSLockRelease(hLockZSBuffer); ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, ++ RGX_POPULATION **ppsPopulation) ++{ ++ RGX_POPULATION *psPopulation; ++ PVRSRV_ERROR eError; ++ ++ psZSBuffer->ui32NumReqByApp++; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVRSRVStatsUpdateZSBufferStats(1, 0, psZSBuffer->owner); ++#endif ++ ++ /* Do the backing */ ++ eError = RGXBackingZSBuffer(psZSBuffer); ++ if (eError != PVRSRV_OK) ++ { ++ goto OnErrorBacking; ++ } ++ ++ /* Create the handle to the backing */ ++ psPopulation = OSAllocMem(sizeof(*psPopulation)); ++ if (psPopulation == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto OnErrorAlloc; ++ } ++ ++ psPopulation->psZSBuffer = psZSBuffer; ++ ++ /* return value */ ++ *ppsPopulation = psPopulation; ++ ++ return PVRSRV_OK; ++ ++OnErrorAlloc: ++ RGXUnbackingZSBuffer(psZSBuffer); ++ ++OnErrorBacking: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR ++RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer) ++{ ++ POS_LOCK hLockZSBuffer; ++ PVRSRV_ERROR eError; ++ ++ if (!psZSBuffer) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVR_ASSERT(psZSBuffer->ui32RefCount); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested", ++ psZSBuffer, ++ psZSBuffer->ui32ZSBufferID)); ++ ++ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer; ++ ++ OSLockAcquire(hLockZSBuffer); ++ ++ if (psZSBuffer->bOnDemand) ++ { ++ if (psZSBuffer->ui32RefCount == 1) ++ { ++ PVR_ASSERT(psZSBuffer->psMapping); ++ ++ eError = DevmemIntUnmapPMR(psZSBuffer->psMapping); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Unable to unpopulate ZS Buffer [%p, ID=0x%08x] (%s)", ++ psZSBuffer, ++ psZSBuffer->ui32ZSBufferID, ++ PVRSRVGetErrorString(eError))); ++ OSLockRelease(hLockZSBuffer); ++ return eError; ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed", ++ psZSBuffer, ++ psZSBuffer->ui32ZSBufferID)); ++ } ++ } ++ ++ /* Decrease refcount*/ ++ psZSBuffer->ui32RefCount--; ++ ++ OSLockRelease(hLockZSBuffer); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (!psPopulation) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer); ++ if (eError != PVRSRV_OK) ++ { ++ return eError; ++ } ++ ++ OSFreeMem(psPopulation); ++ ++ return PVRSRV_OK; ++} ++ ++static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ RGX_ZSBUFFER_DATA *psZSBuffer = NULL; ++ ++ OSLockAcquire(psDevInfo->hLockZSBuffer); ++ ++ dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext) ++ { ++ RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode); ++ ++ if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID) ++ { ++ psZSBuffer = psThisZSBuffer; ++ break; ++ } ++ } ++ ++ OSLockRelease(psDevInfo->hLockZSBuffer); ++ return psZSBuffer; ++} ++ ++void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32ZSBufferID) ++{ ++ RGX_ZSBUFFER_DATA *psZSBuffer; ++ RGXFWIF_KCCB_CMD sTACCBCmd; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psDevInfo); ++ ++ /* scan all deferred allocations */ ++ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); ++ ++ if (psZSBuffer) ++ { ++ IMG_BOOL bBackingDone = IMG_TRUE; ++ ++ /* Populate ZLS */ ++ eError = RGXBackingZSBuffer(psZSBuffer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "Populating ZS-Buffer (ID = 0x%08x) failed (%s)", ++ ui32ZSBufferID, ++ PVRSRVGetErrorString(eError))); ++ bBackingDone = IMG_FALSE; ++ } ++ ++ /* send confirmation */ ++ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE; ++ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; ++ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone; ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GEOM, ++ &sTACCBCmd, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ /* Kernel CCB should never fill up, as the FW is processing them right away */ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ psZSBuffer->ui32NumReqByFW++; ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ PVRSRVStatsUpdateZSBufferStats(0, 1, psZSBuffer->owner); ++#endif ++ ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", ++ ui32ZSBufferID)); ++ } ++} ++ ++void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32ZSBufferID) ++{ ++ RGX_ZSBUFFER_DATA *psZSBuffer; ++ RGXFWIF_KCCB_CMD sTACCBCmd; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psDevInfo); ++ ++ /* scan all deferred allocations */ ++ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID); ++ ++ if (psZSBuffer) ++ { ++ /* Unpopulate ZLS */ ++ eError = RGXUnbackingZSBuffer(psZSBuffer); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "UnPopulating ZS-Buffer (ID = 0x%08x) failed (%s)", ++ ui32ZSBufferID, ++ PVRSRVGetErrorString(eError))); ++ PVR_ASSERT(IMG_FALSE); ++ } ++ ++ /* send confirmation */ ++ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE; ++ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr; ++ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE; ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_GEOM, ++ &sTACCBCmd, ++ PDUMP_FLAGS_NONE); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ /* Kernel CCB should never fill up, as the FW is processing them right away */ ++ PVR_ASSERT(eError == PVRSRV_OK); ++ ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", ++ ui32ZSBufferID)); ++ } ++} ++ ++static ++PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psAllocatedMemDesc, ++ IMG_UINT32 ui32AllocatedOffset, ++ DEVMEM_MEMDESC *psFWMemContextMemDesc, ++ IMG_DEV_VIRTADDR sVDMCallStackAddr, ++ IMG_UINT32 ui32CallStackDepth, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32MaxDeadlineMS, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_COMMON_CONTEXT_INFO *psInfo, ++ RGX_SERVER_RC_TA_DATA *psTAData, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_TACTX_STATE *psContextState; ++ IMG_UINT32 uiCoreIdx; ++ PVRSRV_ERROR eError; ++ /* ++ Allocate device memory for the firmware GPU context suspend state. ++ Note: the FW reads/writes the state to memory by accessing the GPU register interface. ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TA context suspend state"); ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_TACTX_STATE), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwTAContextState", ++ &psTAData->psContextStateMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU context suspend state (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_tacontextsuspendalloc; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc, ++ (void **)&psContextState); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware render context state (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_suspendcpuvirtacquire; ++ } ++ ++ for (uiCoreIdx = 0; uiCoreIdx < RGX_NUM_GEOM_CORES; uiCoreIdx++) ++ { ++ psContextState->asGeomCore[uiCoreIdx].uTAReg_VDM_CALL_STACK_POINTER_Init = ++ sVDMCallStackAddr.uiAddr + (uiCoreIdx * ui32CallStackDepth * sizeof(IMG_UINT64)); ++ } ++ ++ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc); ++ ++ eError = FWCommonContextAllocate(psConnection, ++ psDeviceNode, ++ REQ_TYPE_TA, ++ RGXFWIF_DM_GEOM, ++ NULL, ++ psAllocatedMemDesc, ++ ui32AllocatedOffset, ++ psFWMemContextMemDesc, ++ psTAData->psContextStateMemDesc, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TA_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TA_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ ui32Priority, ++ ui32MaxDeadlineMS, ++ ui64RobustnessAddress, ++ psInfo, ++ &psTAData->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to init TA fw common context (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_tacommoncontext; ++ } ++ ++ /* ++ * Dump the FW 3D context suspend state buffer ++ */ ++#if defined(PDUMP) ++ PDUMPCOMMENT(psDeviceNode, "Dump the TA context suspend state buffer"); ++ DevmemPDumpLoadMem(psTAData->psContextStateMemDesc, ++ 0, ++ sizeof(RGXFWIF_TACTX_STATE), ++ PDUMP_FLAGS_CONTINUOUS); ++#endif ++ ++ psTAData->ui32Priority = ui32Priority; ++ return PVRSRV_OK; ++ ++fail_tacommoncontext: ++fail_suspendcpuvirtacquire: ++ DevmemFwUnmapAndFree(psDevInfo, psTAData->psContextStateMemDesc); ++fail_tacontextsuspendalloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++static ++PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psAllocatedMemDesc, ++ IMG_UINT32 ui32AllocatedOffset, ++ DEVMEM_MEMDESC *psFWMemContextMemDesc, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32MaxDeadlineMS, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_COMMON_CONTEXT_INFO *psInfo, ++ RGX_SERVER_RC_3D_DATA *ps3DData, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ IMG_UINT uiNumISPStoreRegs; ++ IMG_UINT ui3DRegISPStateStoreSize = 0; ++ ++ /* ++ Allocate device memory for the firmware GPU context suspend state. ++ Note: the FW reads/writes the state to memory by accessing the GPU register interface. ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware 3D context suspend state"); ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) ++ { ++ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ++ RGX_FEATURE_NUM_RASTER_PIPES_IDX); ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_MULTICORE_SUPPORT)) ++ { ++ uiNumISPStoreRegs *= (1U + psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ++ RGX_FEATURE_XPU_MAX_SLAVES_IDX)); ++ } ++ } ++ else ++ { ++ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ++ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); ++ } ++ ++ /* Size of the CS buffer */ ++ /* Calculate the size of the 3DCTX ISP state */ ++ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + ++ uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ ui3DRegISPStateStoreSize, ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "Fw3DContextState", ++ &ps3DData->psContextStateMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU context suspend state (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_3dcontextsuspendalloc; ++ } ++ ++ eError = FWCommonContextAllocate(psConnection, ++ psDeviceNode, ++ REQ_TYPE_3D, ++ RGXFWIF_DM_3D, ++ NULL, ++ psAllocatedMemDesc, ++ ui32AllocatedOffset, ++ psFWMemContextMemDesc, ++ ps3DData->psContextStateMemDesc, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_3D_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_3D_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ ui32Priority, ++ ui32MaxDeadlineMS, ++ ui64RobustnessAddress, ++ psInfo, ++ &ps3DData->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to init 3D fw common context (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_3dcommoncontext; ++ } ++ ++ /* ++ * Dump the FW 3D context suspend state buffer ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Dump the 3D context suspend state buffer"); ++ DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc, ++ 0, ++ sizeof(RGXFWIF_3DCTX_STATE), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ ps3DData->ui32Priority = ui32Priority; ++ return PVRSRV_OK; ++ ++fail_3dcommoncontext: ++ DevmemFwUnmapAndFree(psDevInfo, ps3DData->psContextStateMemDesc); ++fail_3dcontextsuspendalloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++ ++/* ++ * PVRSRVRGXCreateRenderContextKM ++ */ ++PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_DEV_VIRTADDR sVDMCallStackAddr, ++ IMG_UINT32 ui32CallStackDepth, ++ IMG_UINT32 ui32FrameworkRegisterSize, ++ IMG_PBYTE pabyFrameworkRegisters, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32StaticRenderContextStateSize, ++ IMG_PBYTE pStaticRenderContextState, ++ IMG_UINT32 ui32PackedCCBSizeU8888, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ IMG_UINT32 ui32MaxTADeadlineMS, ++ IMG_UINT32 ui32Max3DDeadlineMS, ++ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_SERVER_RENDER_CONTEXT *psRenderContext; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; ++ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; ++ ++ *ppsRenderContext = NULL; ++ ++ if (ui32StaticRenderContextStateSize > RGXFWIF_STATIC_RENDERCONTEXT_SIZE) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psRenderContext = OSAllocZMem(sizeof(*psRenderContext)); ++ if (psRenderContext == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ eError = OSLockCreate(&psRenderContext->hLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_lock; ++ } ++ ++ psRenderContext->psDeviceNode = psDeviceNode; ++ ++ /* ++ Create the FW render context, this has the TA and 3D FW common ++ contexts embedded within it ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_FWRENDERCONTEXT), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwRenderContext", ++ &psRenderContext->psFWRenderContextMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_fwrendercontext; ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WorkEstInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); ++#endif ++ ++ if (ui32FrameworkRegisterSize) ++ { ++ /* ++ * Create the FW framework buffer ++ */ ++ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, ++ &psRenderContext->psFWFrameworkMemDesc, ++ ui32FrameworkRegisterSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU framework state (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcreate; ++ } ++ ++ /* Copy the Framework client data into the framework buffer */ ++ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, ++ psRenderContext->psFWFrameworkMemDesc, ++ pabyFrameworkRegisters, ++ ui32FrameworkRegisterSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to populate the framework buffer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcopy; ++ } ++ ++ sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc; ++ } ++ ++ eError = _Create3DContext(psConnection, ++ psDeviceNode, ++ psRenderContext->psFWRenderContextMemDesc, ++ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), ++ psFWMemContextMemDesc, ++ ui32Priority, ++ ui32Max3DDeadlineMS, ++ ui64RobustnessAddress, ++ &sInfo, ++ &psRenderContext->s3DData, ++ U32toU8_Unpack3(ui32PackedCCBSizeU8888), ++ U32toU8_Unpack4(ui32PackedCCBSizeU8888), ++ ui32ContextFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_3dcontext; ++ } ++ ++ eError = _CreateTAContext(psConnection, ++ psDeviceNode, ++ psRenderContext->psFWRenderContextMemDesc, ++ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), ++ psFWMemContextMemDesc, ++ sVDMCallStackAddr, ++ ui32CallStackDepth, ++ ui32Priority, ++ ui32MaxTADeadlineMS, ++ ui64RobustnessAddress, ++ &sInfo, ++ &psRenderContext->sTAData, ++ U32toU8_Unpack1(ui32PackedCCBSizeU8888), ++ U32toU8_Unpack2(ui32PackedCCBSizeU8888), ++ ui32ContextFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_tacontext; ++ } ++ ++ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, ++ (void **)&psFWRenderContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_acquire_cpu_mapping; ++ } ++ ++ /* Copy the static render context data */ ++ OSDeviceMemCopy(&psFWRenderContext->sStaticRenderContextState, pStaticRenderContextState, ui32StaticRenderContextStateSize); ++ DevmemPDumpLoadMem(psRenderContext->psFWRenderContextMemDesc, 0, sizeof(RGXFWIF_FWRENDERCONTEXT), PDUMP_FLAGS_CONTINUOUS); ++ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ psRenderContext->psBufferSyncContext = ++ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, ++ "rogue-ta3d"); ++ if (IS_ERR(psRenderContext->psBufferSyncContext)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to create buffer_sync context (err=%ld)", ++ __func__, PTR_ERR(psRenderContext->psBufferSyncContext))); ++ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_buffer_sync_context_create; ++ } ++#endif ++ ++ SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence); ++ SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate); ++ SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence); ++ SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate); ++ ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); ++ } ++ ++ *ppsRenderContext = psRenderContext; ++ return PVRSRV_OK; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++fail_buffer_sync_context_create: ++#endif ++fail_acquire_cpu_mapping: ++ _DestroyTAContext(&psRenderContext->sTAData, ++ psDeviceNode); ++fail_tacontext: ++ _Destroy3DContext(&psRenderContext->s3DData, ++ psRenderContext->psDeviceNode); ++fail_3dcontext: ++fail_frameworkcopy: ++ if (psRenderContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); ++ } ++fail_frameworkcreate: ++ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); ++fail_fwrendercontext: ++ OSLockDestroy(psRenderContext->hLock); ++fail_lock: ++ OSFreeMem(psRenderContext); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ return eError; ++} ++ ++/* ++ * PVRSRVRGXDestroyRenderContextKM ++ */ ++PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext; ++ IMG_UINT32 ui32WorkEstCCBSubmitted; ++#endif ++ ++ /* remove node from list before calling destroy - as destroy, if successful ++ * will invalidate the node ++ * must be re-added if destroy fails ++ */ ++ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); ++ dllist_remove_node(&(psRenderContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ /* Check psBufferSyncContext has not been destroyed already (by a previous ++ * call to this function which then later returned PVRSRV_ERROR_RETRY) ++ */ ++ if (psRenderContext->psBufferSyncContext != NULL) ++ { ++ pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext); ++ psRenderContext->psBufferSyncContext = NULL; ++ } ++#endif ++ ++ /* Cleanup the TA if we haven't already */ ++ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0) ++ { ++ eError = _DestroyTAContext(&psRenderContext->sTAData, ++ psRenderContext->psDeviceNode); ++ if (eError == PVRSRV_OK) ++ { ++ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE; ++ } ++ else ++ { ++ goto e0; ++ } ++ } ++ ++ /* Cleanup the 3D if we haven't already */ ++ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0) ++ { ++ eError = _Destroy3DContext(&psRenderContext->s3DData, ++ psRenderContext->psDeviceNode); ++ if (eError == PVRSRV_OK) ++ { ++ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE; ++ } ++ else ++ { ++ goto e0; ++ } ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc, ++ (void **)&psFWRenderContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware render context (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto e0; ++ } ++ ++ ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted; ++ ++ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc); ++ ++ /* Check if all of the workload estimation CCB commands for this workload are read */ ++ if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived) ++ { ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", ++ __func__, ui32WorkEstCCBSubmitted, ++ psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)); ++ ++ eError = PVRSRV_ERROR_RETRY; ++ goto e0; ++ } ++#endif ++ ++ /* ++ Only if both TA and 3D contexts have been cleaned up can we ++ free the shared resources ++ */ ++ if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE)) ++ { ++ if (psRenderContext->psFWFrameworkMemDesc) ++ { ++ /* Free the framework buffer */ ++ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc); ++ } ++ ++ /* Free the firmware render context */ ++ DevmemFwUnmapAndFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc); ++ ++ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence); ++ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate); ++ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence); ++ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WorkEstDeInitTA3D(psDevInfo, &psRenderContext->sWorkEstData); ++#endif ++ ++ OSLockDestroy(psRenderContext->hLock); ++ ++ OSFreeMem(psRenderContext); ++ } ++ ++ return PVRSRV_OK; ++ ++e0: ++ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock); ++ return eError; ++} ++ ++ ++ ++#if (ENABLE_TA3D_UFO_DUMP == 1) ++static void DumpUfoList(IMG_UINT32 ui32ClientTAFenceCount, ++ IMG_UINT32 ui32ClientTAUpdateCount, ++ IMG_UINT32 ui32Client3DFenceCount, ++ IMG_UINT32 ui32Client3DUpdateCount, ++ PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress, ++ IMG_UINT32 *paui32ClientTAFenceValue, ++ PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress, ++ IMG_UINT32 *paui32ClientTAUpdateValue, ++ PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress, ++ IMG_UINT32 *paui32Client3DFenceValue, ++ PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress, ++ IMG_UINT32 *paui32Client3DUpdateValue) ++{ ++ IMG_UINT32 i; ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", ++ __func__)); ++ ++ /* Dump Fence syncs, Update syncs and PR Update syncs */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", ++ __func__, ui32ClientTAFenceCount)); ++ for (i = 0; i < ui32ClientTAFenceCount; i++) ++ { ++ if (BITMASK_HAS(pauiClientTAFenceUFOAddress->ui32Addr, 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x," ++ " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", ++ __func__, i + 1, ui32ClientTAFenceCount, ++ (void *) pauiClientTAFenceUFOAddress, ++ pauiClientTAFenceUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", ++ __func__, i + 1, ui32ClientTAFenceCount, ++ (void *) pauiClientTAFenceUFOAddress, ++ pauiClientTAFenceUFOAddress->ui32Addr, ++ *paui32ClientTAFenceValue, ++ *paui32ClientTAFenceValue)); ++ paui32ClientTAFenceValue++; ++ } ++ pauiClientTAFenceUFOAddress++; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", ++ __func__, ui32ClientTAUpdateCount)); ++ for (i = 0; i < ui32ClientTAUpdateCount; i++) ++ { ++ if (BITMASK_HAS(pauiClientTAUpdateUFOAddress->ui32Addr, 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x," ++ " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", ++ __func__, i + 1, ui32ClientTAUpdateCount, ++ (void *) pauiClientTAUpdateUFOAddress, ++ pauiClientTAUpdateUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", ++ __func__, i + 1, ui32ClientTAUpdateCount, ++ (void *) pauiClientTAUpdateUFOAddress, ++ pauiClientTAUpdateUFOAddress->ui32Addr, ++ *paui32ClientTAUpdateValue, ++ *paui32ClientTAUpdateValue)); ++ paui32ClientTAUpdateValue++; ++ } ++ pauiClientTAUpdateUFOAddress++; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", ++ __func__, ui32Client3DFenceCount)); ++ for (i = 0; i < ui32Client3DFenceCount; i++) ++ { ++ if (BITMASK_HAS(pauiClient3DFenceUFOAddress->ui32Addr, 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x," ++ " CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", ++ __func__, i + 1, ui32Client3DFenceCount, ++ (void *) pauiClient3DFenceUFOAddress, ++ pauiClient3DFenceUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", ++ __func__, i + 1, ui32Client3DFenceCount, ++ (void *) pauiClient3DFenceUFOAddress, ++ pauiClient3DFenceUFOAddress->ui32Addr, ++ *paui32Client3DFenceValue, ++ *paui32Client3DFenceValue)); ++ paui32Client3DFenceValue++; ++ } ++ pauiClient3DFenceUFOAddress++; ++ } ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", ++ __func__, ui32Client3DUpdateCount)); ++ for (i = 0; i < ui32Client3DUpdateCount; i++) ++ { ++ if (BITMASK_HAS(pauiClient3DUpdateUFOAddress->ui32Addr, 1)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x," ++ " UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", ++ __func__, i + 1, ui32Client3DUpdateCount, ++ (void *) pauiClient3DUpdateUFOAddress, ++ pauiClient3DUpdateUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", ++ __func__, i + 1, ui32Client3DUpdateCount, ++ (void *) pauiClient3DUpdateUFOAddress, ++ pauiClient3DUpdateUFOAddress->ui32Addr, ++ *paui32Client3DUpdateValue, ++ *paui32Client3DUpdateValue)); ++ paui32Client3DUpdateValue++; ++ } ++ pauiClient3DUpdateUFOAddress++; ++ } ++} ++#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ ++ ++/* ++ * PVRSRVRGXKickTA3DKM ++ */ ++PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ++ IMG_UINT32 ui32ClientTAFenceCount, ++ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, ++ IMG_UINT32 *paui32ClientTAFenceSyncOffset, ++ IMG_UINT32 *paui32ClientTAFenceValue, ++ IMG_UINT32 ui32ClientTAUpdateCount, ++ SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock, ++ IMG_UINT32 *paui32ClientTAUpdateSyncOffset, ++ IMG_UINT32 *paui32ClientTAUpdateValue, ++ IMG_UINT32 ui32Client3DUpdateCount, ++ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, ++ IMG_UINT32 *paui32Client3DUpdateSyncOffset, ++ IMG_UINT32 *paui32Client3DUpdateValue, ++ SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock, ++ IMG_UINT32 ui32PRFenceSyncOffset, ++ IMG_UINT32 ui32PRFenceValue, ++ PVRSRV_FENCE iCheckTAFence, ++ PVRSRV_TIMELINE iUpdateTATimeline, ++ PVRSRV_FENCE *piUpdateTAFence, ++ IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH], ++ PVRSRV_FENCE iCheck3DFence, ++ PVRSRV_TIMELINE iUpdate3DTimeline, ++ PVRSRV_FENCE *piUpdate3DFence, ++ IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32TACmdSize, ++ IMG_PBYTE pui8TADMCmd, ++ IMG_UINT32 ui323DPRCmdSize, ++ IMG_PBYTE pui83DPRDMCmd, ++ IMG_UINT32 ui323DCmdSize, ++ IMG_PBYTE pui83DDMCmd, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_BOOL bKickTA, ++ IMG_BOOL bKickPR, ++ IMG_BOOL bKick3D, ++ IMG_BOOL bAbort, ++ IMG_UINT32 ui32PDumpFlags, ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, ++ RGX_ZSBUFFER_DATA *psZSBuffer, ++ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 *paui32SyncPMRFlags, ++ PMR **ppsSyncPMRs, ++ IMG_UINT32 ui32RenderTargetSize, ++ IMG_UINT32 ui32NumberOfDrawCalls, ++ IMG_UINT32 ui32NumberOfIndices, ++ IMG_UINT32 ui32NumberOfMRTs, ++ IMG_UINT64 ui64DeadlineInus) ++{ ++ /* per-context helper structures */ ++ RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData; ++ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData; ++ ++ IMG_UINT32 ui32TACmdCount=0; ++ IMG_UINT32 ui323DCmdCount=0; ++ IMG_UINT32 ui32TACmdOffset=0; ++ IMG_UINT32 ui323DCmdOffset=0; ++ RGXFWIF_UFO sPRUFO; ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_ERROR eError2 = PVRSRV_OK; ++ ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psRenderContext->s3DData.psServerCommonContext); ++ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); ++ IMG_BOOL bCCBStateOpen = IMG_FALSE; ++ ++ IMG_UINT32 ui32ClientPRUpdateCount = 0; ++ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL; ++ IMG_UINT32 *paui32ClientPRUpdateValue = NULL; ++ ++ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; ++ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; ++ PRGXFWIF_UFO_ADDR pRMWUFOAddr; ++ ++ PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress; ++ ++ IMG_UINT64 uiCheckTAFenceUID = 0; ++ IMG_UINT64 uiCheck3DFenceUID = 0; ++ IMG_UINT64 uiUpdateTAFenceUID = 0; ++ IMG_UINT64 uiUpdate3DFenceUID = 0; ++ ++ IMG_BOOL bUseCombined3DAnd3DPR = bKickPR && bKick3D && !pui83DPRDMCmd; ++ ++ RGXFWIF_KCCB_CMD_KICK_DATA sTACmdKickData; ++ RGXFWIF_KCCB_CMD_KICK_DATA s3DCmdKickData; ++ IMG_BOOL bUseSingleFWCommand = bKickTA && (bKickPR || bKick3D); ++ ++ IMG_UINT32 ui32TACmdSizeTmp = 0, ui323DCmdSizeTmp = 0; ++ ++ IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE; ++ ++ PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE; ++ PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE; ++ ++ IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE; ++ IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0; ++ IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; ++ ++ /* ++ * Count of the number of TA and 3D update values (may differ from number of ++ * TA and 3D updates later, as sync checkpoints do not need to specify a value) ++ */ ++ IMG_UINT32 ui32ClientPRUpdateValueCount = 0; ++ IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount; ++ IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount; ++ PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL; /*!< TA fence checkpoints */ ++ PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL; /*!< 3D fence checkpoints */ ++ IMG_UINT32 ui32FenceTASyncCheckpointCount = 0; ++ IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0; ++ PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL; /*!< TA update checkpoint (output) */ ++ PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL; /*!< 3D update checkpoint (output) */ ++ PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL; ++ PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; ++ void *pvTAUpdateFenceFinaliseData = NULL; ++ void *pv3DUpdateFenceFinaliseData = NULL; ++ ++ RGX_SYNC_DATA sTASyncData = {NULL}; /*!< Contains internal update syncs for TA */ ++ RGX_SYNC_DATA s3DSyncData = {NULL}; /*!< Contains internal update syncs for 3D */ ++ ++ IMG_BOOL bTestSLRAdd3DCheck = IMG_FALSE; ++#if defined(SUPPORT_VALIDATION) ++ PVRSRV_FENCE hTestSLRTmpFence = PVRSRV_NO_FENCE; ++ PSYNC_CHECKPOINT psDummySyncCheckpoint = NULL; ++#endif ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; ++ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; ++ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA = {0}; ++ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D = {0}; ++ IMG_UINT32 ui32TACommandOffset = 0; ++ IMG_UINT32 ui323DCommandOffset = 0; ++ IMG_UINT32 ui32TACmdHeaderOffset = 0; ++ IMG_UINT32 ui323DCmdHeaderOffset = 0; ++ IMG_UINT32 ui323DFullRenderCommandOffset = 0; ++ IMG_UINT32 ui32TACmdOffsetWrapCheck = 0; ++ IMG_UINT32 ui323DCmdOffsetWrapCheck = 0; ++ RGX_WORKLOAD sWorkloadCharacteristics = {0}; ++#endif ++ ++ IMG_UINT32 ui32TAFenceCount, ui323DFenceCount; ++ IMG_UINT32 ui32TAUpdateCount, ui323DUpdateCount; ++ IMG_UINT32 ui32PRUpdateCount; ++ ++ IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM(); ++ ++ IMG_UINT32 ui32Client3DFenceCount = 0; ++ ++ /* Ensure we haven't been given a null ptr to ++ * TA fence values if we have been told we ++ * have TA sync prim fences ++ */ ++ if (ui32ClientTAFenceCount > 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(paui32ClientTAFenceValue != NULL, ++ "paui32ClientTAFenceValue NULL but " ++ "ui32ClientTAFenceCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ /* Ensure we haven't been given a null ptr to ++ * TA update values if we have been told we ++ * have TA updates ++ */ ++ if (ui32ClientTAUpdateCount > 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(paui32ClientTAUpdateValue != NULL, ++ "paui32ClientTAUpdateValue NULL but " ++ "ui32ClientTAUpdateCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ /* Ensure we haven't been given a null ptr to ++ * 3D update values if we have been told we ++ * have 3D updates ++ */ ++ if (ui32Client3DUpdateCount > 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(paui32Client3DUpdateValue != NULL, ++ "paui32Client3DUpdateValue NULL but " ++ "ui32Client3DUpdateCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Write FW addresses into CMD SHARED BLOCKs */ ++ { ++ CMDTA3D_SHARED *psGeomCmdShared = (CMDTA3D_SHARED *)pui8TADMCmd; ++ CMDTA3D_SHARED *ps3DCmdShared = (CMDTA3D_SHARED *)pui83DDMCmd; ++ CMDTA3D_SHARED *psPR3DCmdShared = (CMDTA3D_SHARED *)pui83DPRDMCmd; ++ ++ if (psKMHWRTDataSet == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "KMHWRTDataSet is a null-pointer")); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Write FW address for TA CMD ++ */ ++ if (psGeomCmdShared != NULL) ++ { ++ psGeomCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; ++ ++ if (psZSBuffer != NULL) ++ { ++ psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; ++ } ++ if (psMSAAScratchBuffer != NULL) ++ { ++ psGeomCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; ++ } ++ } ++ ++ /* Write FW address for 3D CMD ++ */ ++ if (ps3DCmdShared != NULL) ++ { ++ ps3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; ++ ++ if (psZSBuffer != NULL) ++ { ++ ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; ++ } ++ if (psMSAAScratchBuffer != NULL) ++ { ++ ps3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; ++ } ++ } ++ ++ /* Write FW address for PR3D CMD ++ */ ++ if (psPR3DCmdShared != NULL) ++ { ++ psPR3DCmdShared->sHWRTData = psKMHWRTDataSet->sHWRTDataFwAddr; ++ ++ if (psZSBuffer != NULL) ++ { ++ psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_ZSBUFFER] = psZSBuffer->sZSBufferFWDevVAddr; ++ } ++ if (psMSAAScratchBuffer != NULL) ++ { ++ psPR3DCmdShared->asPRBuffer[RGXFWIF_PRBUFFER_MSAABUFFER] = psMSAAScratchBuffer->sZSBufferFWDevVAddr; ++ } ++ } ++ } ++ ++ if (unlikely(iUpdateTATimeline >= 0 && !piUpdateTAFence)) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ if (unlikely(iUpdate3DTimeline >= 0 && !piUpdate3DFence)) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, " ++ "ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", ++ __func__, ++ ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ++ ui32Client3DFenceCount, ui32Client3DUpdateCount)); ++ ++ ++ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice, ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr); ++ ++ /* Double-check we have a PR kick if there are client fences */ ++ if (unlikely(!bKickPR && ui32Client3DFenceCount != 0)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence passed without a PR kick", ++ __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Ensure the string is null-terminated (Required for safety) */ ++ szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; ++ szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; ++ ++ OSLockAcquire(psRenderContext->hLock); ++ ++ ui32TAFenceCount = ui32ClientTAFenceCount; ++ ui323DFenceCount = ui32Client3DFenceCount; ++ ui32TAUpdateCount = ui32ClientTAUpdateCount; ++ ui323DUpdateCount = ui32Client3DUpdateCount; ++ ui32PRUpdateCount = ui32ClientPRUpdateCount; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (ui32SyncPMRCount) ++ { ++ int err; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling" ++ " pvr_buffer_sync_resolve_and_create_fences", __func__)); ++ ++ err = pvr_buffer_sync_resolve_and_create_fences( ++ psRenderContext->psBufferSyncContext, ++ psRenderContext->psDeviceNode->hSyncCheckpointContext, ++ ui32SyncPMRCount, ++ ppsSyncPMRs, ++ paui32SyncPMRFlags, ++ &ui32BufferFenceSyncCheckpointCount, ++ &apsBufferFenceSyncCheckpoints, ++ &psBufferUpdateSyncCheckpoint, ++ &psBufferSyncData ++ ); ++ ++ if (unlikely(err)) ++ { ++ switch (err) ++ { ++ case -EINTR: ++ eError = PVRSRV_ERROR_RETRY; ++ break; ++ case -ENOMEM: ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ break; ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ break; ++ } ++ ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: " ++ "pvr_buffer_sync_resolve_and_create_fences failed (%d)", ++ __func__, eError)); ++ } ++ OSLockRelease(psRenderContext->hLock); ++ ++ return eError; ++ } ++ ++#if !defined(SUPPORT_STRIP_RENDERING) ++ if (bKickTA) ++ { ++ ui32TAFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++ else ++ { ++ ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++#else /* !defined(SUPPORT_STRIP_RENDERING) */ ++ ui323DFenceCount += ui32BufferFenceSyncCheckpointCount; ++ ++ PVR_UNREFERENCED_PARAMETER(bTAFenceOnSyncCheckpointsOnly); ++#endif /* !defined(SUPPORT_STRIP_RENDERING) */ ++ ++ if (psBufferUpdateSyncCheckpoint != NULL) ++ { ++ if (bKick3D) ++ { ++ ui323DUpdateCount++; ++ } ++ else ++ { ++ ui32PRUpdateCount++; ++ } ++ } ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++#if !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 ++#error "Invalid value for UPDATE_FENCE_CHECKPOINT_COUNT. Must be either 1 or 2." ++#endif /* !defined(UPDATE_FENCE_CHECKPOINT_COUNT) || UPDATE_FENCE_CHECKPOINT_COUNT != 1 && UPDATE_FENCE_CHECKPOINT_COUNT != 2 */ ++ ++ if (iCheckTAFence != PVRSRV_NO_FENCE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA]" ++ " (iCheckFence=%d)," ++ " psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", ++ __func__, iCheckTAFence, ++ (void *) psRenderContext->psDeviceNode->hSyncCheckpointContext)); ++ ++ /* Resolve the sync checkpoints that make up the input fence */ ++ eError = SyncCheckpointResolveFence( ++ psRenderContext->psDeviceNode->hSyncCheckpointContext, ++ iCheckTAFence, ++ &ui32FenceTASyncCheckpointCount, ++ &apsFenceTASyncCheckpoints, ++ &uiCheckTAFenceUID, ++ ui32PDumpFlags); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", ++ __func__, eError)); ++ goto fail_resolve_input_ta_fence; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " ++ "checkpoints (apsFenceSyncCheckpoints=<%p>)", ++ __func__, iCheckTAFence, ui32FenceTASyncCheckpointCount, ++ (void *) apsFenceTASyncCheckpoints)); ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++ if (apsFenceTASyncCheckpoints) ++ { ++ _DebugSyncCheckpoints(__func__, "TA", apsFenceTASyncCheckpoints, ++ ui32FenceTASyncCheckpointCount); ++ } ++#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ ++ } ++ ++ if (iCheck3DFence != PVRSRV_NO_FENCE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D]" ++ " (iCheckFence=%d), " ++ "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", ++ __func__, iCheck3DFence, ++ (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); ++ ++ /* Resolve the sync checkpoints that make up the input fence */ ++ eError = SyncCheckpointResolveFence( ++ psRenderContext->psDeviceNode->hSyncCheckpointContext, ++ iCheck3DFence, ++ &ui32Fence3DSyncCheckpointCount, ++ &apsFence3DSyncCheckpoints, ++ &uiCheck3DFenceUID, ++ ui32PDumpFlags); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", ++ __func__, eError)); ++ goto fail_resolve_input_3d_fence; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d " ++ "checkpoints (apsFenceSyncCheckpoints=<%p>)", ++ __func__, iCheck3DFence, ui32Fence3DSyncCheckpointCount, ++ (void*)apsFence3DSyncCheckpoints)); ++ ++#if defined(TA3D_CHECKPOINT_DEBUG) ++ if (apsFence3DSyncCheckpoints) ++ { ++ _DebugSyncCheckpoints(__func__, "3D", apsFence3DSyncCheckpoints, ++ ui32Fence3DSyncCheckpointCount); ++ } ++#endif /* defined(TA3D_CHECKPOINT_DEBUG) */ ++ } ++ ++ if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || ++ iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) ++ { ++ IMG_UINT32 i; ++ ++ if (bKickTA) ++ { ++ ui32TAFenceCount += ui32FenceTASyncCheckpointCount; ++ ++ for (i = 0; i < ui32Fence3DSyncCheckpointCount; i++) ++ { ++ if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != ++ uiCurrentProcess) ++ { ++ ui32TAFenceCount++; ++ } ++ } ++ } ++ ++ if (bKick3D) ++ { ++ ui323DFenceCount += ui32Fence3DSyncCheckpointCount; ++ } ++ ++ ui32TAUpdateCount += iUpdateTATimeline != PVRSRV_NO_TIMELINE ? ++ UPDATE_FENCE_CHECKPOINT_COUNT : 0; ++ ui323DUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE ? ++ UPDATE_FENCE_CHECKPOINT_COUNT : 0; ++ ui32PRUpdateCount += iUpdate3DTimeline != PVRSRV_NO_TIMELINE && !bKick3D ? ++ UPDATE_FENCE_CHECKPOINT_COUNT : 0; ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ /* Check if TestingSLR is adding an extra sync checkpoint to the ++ * 3D fence check (which we won't signal) ++ */ ++ if ((psDevInfo->ui32TestSLRInterval > 0) && ++ (--psDevInfo->ui32TestSLRCount == 0)) ++ { ++ bTestSLRAdd3DCheck = IMG_TRUE; ++ psDevInfo->ui32TestSLRCount = psDevInfo->ui32TestSLRInterval; ++ } ++ ++ if ((bTestSLRAdd3DCheck) && (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)) ++ { ++ if (iUpdate3DTimeline == PVRSRV_NO_TIMELINE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Would append additional SLR checkpoint " ++ "to 3D fence but no update 3D timeline provided", __func__)); ++ } ++ else ++ { ++ SyncCheckpointAlloc(psRenderContext->psDeviceNode->hSyncCheckpointContext, ++ iUpdate3DTimeline, ++ hTestSLRTmpFence, ++ "TestSLRCheck", ++ &psDummySyncCheckpoint); ++ PVR_DPF((PVR_DBG_WARNING, "%s: Appending additional SLR checkpoint to 3D fence " ++ "checkpoints (psDummySyncCheckpoint=<%p>)", ++ __func__, (void*)psDummySyncCheckpoint)); ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, ++ 1, ++ &psDummySyncCheckpoint); ++ if (!pauiClient3DFenceUFOAddress) ++ { ++ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; ++ } ++ ++ if (ui32Client3DFenceCount == 0) ++ { ++ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; ++ } ++ ui323DFenceCount++; ++ } ++ } ++#endif /* defined(SUPPORT_VALIDATION) */ ++ ++ if (bKickTA) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," ++ " ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", ++ __func__, ui32TAFenceCount, ui32TAUpdateCount)); ++ ++ RGXCmdHelperInitCmdCCB_CommandSize( ++ psDevInfo, ++ 0, ++ ui32TAFenceCount, ++ ui32TAUpdateCount, ++ ui32TACmdSize, ++ &pPreAddr, ++ (bKick3D ? NULL : &pPostAddr), ++ (bKick3D ? NULL : &pRMWUFOAddr), ++ pasTACmdHelperData ++ ); ++ } ++ ++ if (bKickPR) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," ++ " ui32Client3DFenceCount=%d", __func__, ++ ui323DFenceCount)); ++ ++ RGXCmdHelperInitCmdCCB_CommandSize( ++ psDevInfo, ++ 0, ++ ui323DFenceCount, ++ 0, ++ sizeof(sPRUFO), ++ NULL, ++ NULL, ++ NULL, ++ &pas3DCmdHelperData[ui323DCmdCount++] ++ ); ++ } ++ ++ if (bKickPR && !bUseCombined3DAnd3DPR) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," ++ " ui32PRUpdateCount=%d", __func__, ++ ui32PRUpdateCount)); ++ ++ RGXCmdHelperInitCmdCCB_CommandSize( ++ psDevInfo, ++ 0, ++ 0, ++ ui32PRUpdateCount, ++ /* if the client has not provided a 3DPR command, the regular 3D ++ * command should be used instead */ ++ pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, ++ NULL, ++ NULL, ++ NULL, ++ &pas3DCmdHelperData[ui323DCmdCount++] ++ ); ++ } ++ ++ if (bKick3D || bAbort) ++ { ++ if (!bKickTA) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB()," ++ " ui32Client3DFenceCount=%d", __func__, ++ ui323DFenceCount)); ++ } ++ ++ RGXCmdHelperInitCmdCCB_CommandSize( ++ psDevInfo, ++ 0, ++ bKickTA ? 0 : ui323DFenceCount, ++ ui323DUpdateCount, ++ ui323DCmdSize, ++ (bKickTA ? NULL : &pPreAddr), ++ &pPostAddr, ++ &pRMWUFOAddr, ++ &pas3DCmdHelperData[ui323DCmdCount++] ++ ); ++ } ++ ++ if (bKickTA) ++ { ++ ui32TACmdSizeTmp = RGXCmdHelperGetCommandSize(1, pasTACmdHelperData); ++ ++ eError = RGXCheckSpaceCCB( ++ FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext), ++ ui32TACmdSizeTmp ++ ); ++ if (eError != PVRSRV_OK) ++ { ++ goto err_not_enough_space; ++ } ++ } ++ ++ if (ui323DCmdCount > 0) ++ { ++ ui323DCmdSizeTmp = RGXCmdHelperGetCommandSize(ui323DCmdCount, pas3DCmdHelperData); ++ ++ eError = RGXCheckSpaceCCB( ++ FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext), ++ ui323DCmdSizeTmp ++ ); ++ if (eError != PVRSRV_OK) ++ { ++ goto err_not_enough_space; ++ } ++ } ++ ++ /* need to reset the counter here */ ++ ++ ui323DCmdCount = 0; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", ++ __func__, ui32ClientTAFenceCount)); ++ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence, ++ ui32ClientTAFenceCount, ++ apsClientTAFenceSyncPrimBlock, ++ paui32ClientTAFenceSyncOffset); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto err_populate_sync_addr_list_ta_fence; ++ } ++ ++ if (ui32ClientTAFenceCount) ++ { ++ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: pauiClientTAFenceUFOAddress=<%p> ", ++ __func__, (void*)pauiClientTAFenceUFOAddress)); ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", ++ __func__, ui32ClientTAUpdateCount)); ++ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate, ++ ui32ClientTAUpdateCount, ++ apsClientTAUpdateSyncPrimBlock, ++ paui32ClientTAUpdateSyncOffset); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto err_populate_sync_addr_list_ta_update; ++ } ++ ++ if (ui32ClientTAUpdateCount) ++ { ++ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: pauiClientTAUpdateUFOAddress=<%p> ", ++ __func__, (void*)pauiClientTAUpdateUFOAddress)); ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", ++ __func__, ui32Client3DFenceCount)); ++ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence, ++ ui32Client3DFenceCount, ++ NULL, ++ NULL); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto err_populate_sync_addr_list_3d_fence; ++ } ++ ++ if (ui32Client3DFenceCount) ++ { ++ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ", ++ __func__, (void*)pauiClient3DFenceUFOAddress)); ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", ++ __func__, ui32Client3DUpdateCount)); ++ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate, ++ ui32Client3DUpdateCount, ++ apsClient3DUpdateSyncPrimBlock, ++ paui32Client3DUpdateSyncOffset); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto err_populate_sync_addr_list_3d_update; ++ } ++ ++ if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D)) ++ { ++ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", ++ __func__, (void*)pauiClient3DUpdateUFOAddress)); ++ ++ eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress); ++ ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto err_pr_fence_address; ++ } ++ ++#if (ENABLE_TA3D_UFO_DUMP == 1) ++ DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ++ ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), ++ ui32Client3DUpdateCount, ++ pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, ++ pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, ++ pauiClient3DFenceUFOAddress, NULL, ++ pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); ++#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ ++ ++ if (ui32SyncPMRCount) ++ { ++#if defined(SUPPORT_BUFFER_SYNC) ++#if !defined(SUPPORT_STRIP_RENDERING) ++ /* Append buffer sync fences to TA fences */ ++ if (ui32BufferFenceSyncCheckpointCount > 0 && bKickTA) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append %d buffer sync checkpoints to TA Fence " ++ "(&psRenderContext->sSyncAddrListTAFence=<%p>, " ++ "pauiClientTAFenceUFOAddress=<%p>)...", ++ __func__, ++ ui32BufferFenceSyncCheckpointCount, ++ (void*)&psRenderContext->sSyncAddrListTAFence , ++ (void*)pauiClientTAFenceUFOAddress)); ++ SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence, ++ ui32BufferFenceSyncCheckpointCount, ++ apsBufferFenceSyncCheckpoints); ++ if (!pauiClientTAFenceUFOAddress) ++ { ++ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; ++ } ++ if (ui32ClientTAFenceCount == 0) ++ { ++ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; ++ } ++ ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++ else ++#endif ++ /* Append buffer sync fences to 3D fences */ ++ if (ui32BufferFenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append %d buffer sync checkpoints to 3D Fence " ++ "(&psRenderContext->sSyncAddrList3DFence=<%p>, " ++ "pauiClient3DFenceUFOAddress=<%p>)...", ++ __func__, ++ ui32BufferFenceSyncCheckpointCount, ++ (void*)&psRenderContext->sSyncAddrList3DFence, ++ (void*)pauiClient3DFenceUFOAddress)); ++ SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrList3DFence, ++ ui32BufferFenceSyncCheckpointCount, ++ apsBufferFenceSyncCheckpoints); ++ if (!pauiClient3DFenceUFOAddress) ++ { ++ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; ++ } ++ if (ui32Client3DFenceCount == 0) ++ { ++ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; ++ } ++ ui32Client3DFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++ ++ if (psBufferUpdateSyncCheckpoint) ++ { ++ /* If we have a 3D kick append update to the 3D updates else append to the PR update */ ++ if (bKick3D) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append 1 buffer sync checkpoint<%p> to 3D Update" ++ " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," ++ " pauiClient3DUpdateUFOAddress=<%p>)...", ++ __func__, ++ (void*)psBufferUpdateSyncCheckpoint, ++ (void*)&psRenderContext->sSyncAddrList3DUpdate, ++ (void*)pauiClient3DUpdateUFOAddress)); ++ /* Append buffer sync update to 3D updates */ ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, ++ 1, ++ &psBufferUpdateSyncCheckpoint); ++ if (!pauiClient3DUpdateUFOAddress) ++ { ++ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; ++ } ++ ui32Client3DUpdateCount++; ++ } ++ else ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append 1 buffer sync checkpoint<%p> to PR Update" ++ " (&psRenderContext->sSyncAddrList3DUpdate=<%p>," ++ " pauiClientPRUpdateUFOAddress=<%p>)...", ++ __func__, ++ (void*)psBufferUpdateSyncCheckpoint, ++ (void*)&psRenderContext->sSyncAddrList3DUpdate, ++ (void*)pauiClientPRUpdateUFOAddress)); ++ /* Attach update to the 3D (used for PR) Updates */ ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, ++ 1, ++ &psBufferUpdateSyncCheckpoint); ++ if (!pauiClientPRUpdateUFOAddress) ++ { ++ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; ++ } ++ ui32ClientPRUpdateCount++; ++ } ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: (after buffer_sync) ui32ClientTAFenceCount=%d, " ++ "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " ++ "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", ++ __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ++ ui32Client3DFenceCount, ui32Client3DUpdateCount, ++ ui32ClientPRUpdateCount)); ++ ++#else /* defined(SUPPORT_BUFFER_SYNC) */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Buffer sync not supported but got %u buffers", ++ __func__, ui32SyncPMRCount)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto err_no_buffer_sync_invalid_params; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ } ++ ++ /* ++ * The hardware requires a PR to be submitted if there is a TA (otherwise ++ * it can wedge if we run out of PB space with no PR to run) ++ * ++ * If we only have a TA, attach native checks to the TA and updates to the PR ++ * If we have a TA and 3D, attach checks to TA, updates to 3D ++ * If we only have a 3D, attach checks and updates to the 3D ++ * ++ * Note that 'updates' includes the cleanup syncs for 'check' fence FDs, in ++ * addition to the update fence FD (if supplied) ++ * ++ * Currently, the client driver never kicks only the 3D, so we only support ++ * that for the time being. ++ */ ++ if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 || ++ iCheck3DFence >= 0 || iUpdate3DTimeline >= 0) ++ { ++ PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", ++ __func__, iCheckTAFence, iUpdateTATimeline)); ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d", ++ __func__, iCheck3DFence, iUpdate3DTimeline)); ++ ++ { ++ /* Create the output fence for TA (if required) */ ++ if (iUpdateTATimeline != PVRSRV_NO_TIMELINE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling SyncCheckpointCreateFence[TA] " ++ "(iUpdateFence=%d, iUpdateTimeline=%d, " ++ "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", ++ __func__, iUpdateTAFence, iUpdateTATimeline, ++ (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); ++ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, ++ szFenceNameTA, ++ iUpdateTATimeline, ++ psRenderContext->psDeviceNode->hSyncCheckpointContext, ++ &iUpdateTAFence, ++ &uiUpdateTAFenceUID, ++ &pvTAUpdateFenceFinaliseData, ++ &psUpdateTASyncCheckpoint, ++ (void*)&psTAFenceTimelineUpdateSync, ++ &ui32TAFenceTimelineUpdateValue, ++ ui32PDumpFlags); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: SyncCheckpointCreateFence[TA] failed (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_create_ta_fence; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: returned from SyncCheckpointCreateFence[TA] " ++ "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " ++ "ui32FenceTimelineUpdateValue=0x%x)", ++ __func__, iUpdateTAFence, ++ (void*)psTAFenceTimelineUpdateSync, ++ ui32TAFenceTimelineUpdateValue)); ++ ++ /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */ ++ pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint); ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", ++ __func__, pauiClientTAIntUpdateUFOAddress->ui32Addr)); ++ } ++ ++ /* Append the sync prim update for the TA timeline (if required) */ ++ if (psTAFenceTimelineUpdateSync) ++ { ++ sTASyncData.ui32ClientUpdateCount = ui32ClientTAUpdateCount; ++ sTASyncData.ui32ClientUpdateValueCount = ui32ClientTAUpdateValueCount; ++ sTASyncData.ui32ClientPRUpdateValueCount = (bKick3D) ? 0 : ui32ClientPRUpdateValueCount; ++ sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue; ++ ++ eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue, ++ &psRenderContext->sSyncAddrListTAUpdate, ++ (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate, ++ psTAFenceTimelineUpdateSync, ++ &sTASyncData, ++ bKick3D); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto fail_alloc_update_values_mem_TA; ++ } ++ ++ paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue; ++ ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount; ++ pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress; ++ ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount; ++ } ++ ++ /* Create the output fence for 3D (if required) */ ++ if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling SyncCheckpointCreateFence[3D] " ++ "(iUpdateFence=%d, iUpdateTimeline=%d, " ++ "psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", ++ __func__, iUpdate3DFence, iUpdate3DTimeline, ++ (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext)); ++ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode, ++ szFenceName3D, ++ iUpdate3DTimeline, ++ psRenderContext->psDeviceNode->hSyncCheckpointContext, ++ &iUpdate3DFence, ++ &uiUpdate3DFenceUID, ++ &pv3DUpdateFenceFinaliseData, ++ &psUpdate3DSyncCheckpoint, ++ (void*)&ps3DFenceTimelineUpdateSync, ++ &ui323DFenceTimelineUpdateValue, ++ ui32PDumpFlags); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: SyncCheckpointCreateFence[3D] failed (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_create_3d_fence; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: returned from SyncCheckpointCreateFence[3D] " ++ "(iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, " ++ "ui32FenceTimelineUpdateValue=0x%x)", ++ __func__, iUpdate3DFence, ++ (void*)ps3DFenceTimelineUpdateSync, ++ ui323DFenceTimelineUpdateValue)); ++ ++ /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */ ++ pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint); ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", ++ __func__, pauiClient3DIntUpdateUFOAddress->ui32Addr)); ++ } ++ ++ /* Append the sync prim update for the 3D timeline (if required) */ ++ if (ps3DFenceTimelineUpdateSync) ++ { ++ s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount; ++ s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount; ++ s3DSyncData.ui32ClientPRUpdateValueCount = ui32ClientPRUpdateValueCount; ++ s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue; ++ ++ eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue, ++ &psRenderContext->sSyncAddrList3DUpdate, ++ &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */ ++ ps3DFenceTimelineUpdateSync, ++ &s3DSyncData, ++ bKick3D); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ goto fail_alloc_update_values_mem_3D; ++ } ++ ++ paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue; ++ ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount; ++ pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress; ++ ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount; ++ ++ if (!bKick3D) ++ { ++ paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue; ++ ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount; ++ pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress; ++ ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount; ++ } ++ } ++ ++ /* ++ * The hardware requires a PR to be submitted if there is a TA OOM. ++ * If we only have a TA, attach native checks and updates to the TA ++ * and 3D updates to the PR. ++ * If we have a TA and 3D, attach the native TA checks and updates ++ * to the TA and similarly for the 3D. ++ * Note that 'updates' includes the cleanup syncs for 'check' fence ++ * FDs, in addition to the update fence FD (if supplied). ++ * Currently, the client driver never kicks only the 3D, so we don't ++ * support that for the time being. ++ */ ++ ++ { ++ if (bKickTA) ++ { ++ /* Attach checks and updates to TA */ ++ ++ /* Checks (from input fence) */ ++ if (ui32FenceTASyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", ++ __func__, ++ ui32FenceTASyncCheckpointCount, ++ (void*)apsFenceTASyncCheckpoints)); ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, ++ ui32FenceTASyncCheckpointCount, ++ apsFenceTASyncCheckpoints); ++ if (!pauiClientTAFenceUFOAddress) ++ { ++ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: {ui32ClientTAFenceCount was %d, now %d}", ++ __func__, ui32ClientTAFenceCount, ++ ui32ClientTAFenceCount + ui32FenceTASyncCheckpointCount)); ++ if (ui32ClientTAFenceCount == 0) ++ { ++ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; ++ } ++ ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: {ui32ClientTAFenceCount now %d}", ++ __func__, ui32ClientTAFenceCount)); ++ ++ if (psUpdateTASyncCheckpoint) ++ { ++ /* Update (from output fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Update...", ++ __func__, (void*)psUpdateTASyncCheckpoint, ++ SyncCheckpointGetId(psUpdateTASyncCheckpoint))); ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate, ++ 1, ++ &psUpdateTASyncCheckpoint); ++ if (!pauiClientTAUpdateUFOAddress) ++ { ++ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs; ++ } ++ ui32ClientTAUpdateCount++; ++ } ++ ++ if (!bKick3D && psUpdate3DSyncCheckpoint) ++ { ++ /* Attach update to the 3D (used for PR) Updates */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...", ++ __func__, (void*)psUpdate3DSyncCheckpoint, ++ SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, ++ 1, ++ &psUpdate3DSyncCheckpoint); ++ if (!pauiClientPRUpdateUFOAddress) ++ { ++ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; ++ } ++ ui32ClientPRUpdateCount++; ++ } ++ } ++ ++ if (bKick3D) ++ { ++ /* Attach checks and updates to the 3D */ ++ ++ /* Checks (from input fence) */ ++ if (ui32Fence3DSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append %d sync checkpoints to 3D Fence...", ++ __func__, ui32Fence3DSyncCheckpointCount)); ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence, ++ ui32Fence3DSyncCheckpointCount, ++ apsFence3DSyncCheckpoints); ++ if (!pauiClient3DFenceUFOAddress) ++ { ++ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: {ui32Client3DFenceCount was %d, now %d}", ++ __func__, ui32Client3DFenceCount, ++ ui32Client3DFenceCount + ui32Fence3DSyncCheckpointCount)); ++ if (ui32Client3DFenceCount == 0) ++ { ++ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE; ++ } ++ ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: {ui32Client3DFenceCount was %d}", ++ __func__, ui32Client3DFenceCount)); ++ ++ if (psUpdate3DSyncCheckpoint) ++ { ++ /* Update (from output fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", ++ __func__, (void*)psUpdate3DSyncCheckpoint, ++ SyncCheckpointGetId(psUpdate3DSyncCheckpoint))); ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate, ++ 1, ++ &psUpdate3DSyncCheckpoint); ++ if (!pauiClient3DUpdateUFOAddress) ++ { ++ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs; ++ } ++ ui32Client3DUpdateCount++; ++ } ++ } ++ ++ /* ++ * Relocate sync check points from the 3D fence that are ++ * external to the current process, to the TA fence. ++ * This avoids a sync lockup when dependent renders are ++ * submitted out-of-order and a PR must be scheduled. ++ */ ++ if (bKickTA) ++ { ++ /* Search for external timeline dependencies */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Checking 3D fence for external sync points (%d)...", ++ __func__, ui32Fence3DSyncCheckpointCount)); ++ ++ for (i=0; i (ID=%d) to TA Fence...", ++ __func__, (void*)apsFence3DSyncCheckpoints[i], ++ SyncCheckpointGetId(apsFence3DSyncCheckpoints[i]))); ++ ++ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence, ++ 1, ++ &apsFence3DSyncCheckpoints[i]); ++ ++ if (!pauiClientTAFenceUFOAddress) ++ { ++ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: {ui32ClientTAFenceCount was %d, now %d}", ++ __func__, ++ ui32ClientTAFenceCount, ++ ui32ClientTAFenceCount + 1)); ++ ++ if (ui32ClientTAFenceCount == 0) ++ { ++ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE; ++ } ++ ++ ui32ClientTAFenceCount++; ++ } ++ } ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, " ++ "ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, " ++ "ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", ++ __func__, ++ ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ++ ui32Client3DFenceCount, ui32Client3DUpdateCount, ++ ui32ClientPRUpdateCount)); ++ } ++ } ++ ++ if (ui32ClientTAFenceCount) ++ { ++ PVR_ASSERT(pauiClientTAFenceUFOAddress); ++ if (!bTAFenceOnSyncCheckpointsOnly) ++ { ++ PVR_ASSERT(paui32ClientTAFenceValue); ++ } ++ } ++ if (ui32ClientTAUpdateCount) ++ { ++ PVR_ASSERT(pauiClientTAUpdateUFOAddress); ++ if (ui32ClientTAUpdateValueCount>0) ++ { ++ PVR_ASSERT(paui32ClientTAUpdateValue); ++ } ++ } ++ if (ui32Client3DFenceCount) ++ { ++ PVR_ASSERT(pauiClient3DFenceUFOAddress); ++ PVR_ASSERT(b3DFenceOnSyncCheckpointsOnly); ++ } ++ if (ui32Client3DUpdateCount) ++ { ++ PVR_ASSERT(pauiClient3DUpdateUFOAddress); ++ if (ui32Client3DUpdateValueCount>0) ++ { ++ PVR_ASSERT(paui32Client3DUpdateValue); ++ } ++ } ++ if (ui32ClientPRUpdateCount) ++ { ++ PVR_ASSERT(pauiClientPRUpdateUFOAddress); ++ if (ui32ClientPRUpdateValueCount>0) ++ { ++ PVR_ASSERT(paui32ClientPRUpdateValue); ++ } ++ } ++ ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", ++ __func__, ++ ui32ClientTAFenceCount, ++ (void*)paui32ClientTAFenceValue)); ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", ++ __func__, ++ ui32ClientTAUpdateCount, ++ (void*)pauiClientTAUpdateUFOAddress)); ++#if (ENABLE_TA3D_UFO_DUMP == 1) ++ DumpUfoList(ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ++ ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), ++ ui32Client3DUpdateCount, ++ pauiClientTAFenceUFOAddress, paui32ClientTAFenceValue, ++ pauiClientTAUpdateUFOAddress, paui32ClientTAUpdateValue, ++ pauiClient3DFenceUFOAddress, NULL, ++ pauiClient3DUpdateUFOAddress, paui32Client3DUpdateValue); ++#endif /* (ENABLE_TA3D_UFO_DUMP == 1) */ ++ ++ /* Command size check */ ++ if (ui32TAFenceCount != ui32ClientTAFenceCount) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of fences" ++ " is different than the actual number (%u != %u)", ++ ui32TAFenceCount, ui32ClientTAFenceCount)); ++ } ++ if (ui32TAUpdateCount != ui32ClientTAUpdateCount) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "TA pre-calculated number of updates" ++ " is different than the actual number (%u != %u)", ++ ui32TAUpdateCount, ui32ClientTAUpdateCount)); ++ } ++ if (!bTestSLRAdd3DCheck && (ui323DFenceCount != ui32Client3DFenceCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of fences" ++ " is different than the actual number (%u != %u)", ++ ui323DFenceCount, ui32Client3DFenceCount)); ++ } ++ if (ui323DUpdateCount != ui32Client3DUpdateCount) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "3D pre-calculated number of updates" ++ " is different than the actual number (%u != %u)", ++ ui323DUpdateCount, ui32Client3DUpdateCount)); ++ } ++ if (ui32PRUpdateCount != ui32ClientPRUpdateCount) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PR pre-calculated number of updates" ++ " is different than the actual number (%u != %u)", ++ ui32PRUpdateCount, ui32ClientPRUpdateCount)); ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (bKickTA || bKick3D || bAbort) ++ { ++ sWorkloadCharacteristics.sTA3D.ui32RenderTargetSize = ui32RenderTargetSize; ++ sWorkloadCharacteristics.sTA3D.ui32NumberOfDrawCalls = ui32NumberOfDrawCalls; ++ sWorkloadCharacteristics.sTA3D.ui32NumberOfIndices = ui32NumberOfIndices; ++ sWorkloadCharacteristics.sTA3D.ui32NumberOfMRTs = ui32NumberOfMRTs; ++ } ++#endif ++ ++ /* Init and acquire to TA command if required */ ++ if (bKickTA) ++ { ++ RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData; ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Prepare workload estimation */ ++ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, ++ &psRenderContext->sWorkEstData, ++ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sDataTA, ++ RGXFWIF_CCB_CMD_TYPE_GEOM, ++ &sWorkloadCharacteristics, ++ ui64DeadlineInus, ++ &sWorkloadKickDataTA); ++#endif ++ ++ /* Init the TA command helper */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", ++ __func__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount)); ++ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(psTAData->psServerCommonContext), ++ ui32ClientTAFenceCount, ++ pauiClientTAFenceUFOAddress, ++ paui32ClientTAFenceValue, ++ ui32ClientTAUpdateCount, ++ pauiClientTAUpdateUFOAddress, ++ paui32ClientTAUpdateValue, ++ ui32TACmdSize, ++ pui8TADMCmd, ++ &pPreAddr, ++ (bKick3D ? NULL : &pPostAddr), ++ (bKick3D ? NULL : &pRMWUFOAddr), ++ RGXFWIF_CCB_CMD_TYPE_GEOM, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ &sWorkloadKickDataTA, ++#else ++ NULL, ++#endif ++ "TA", ++ bCCBStateOpen, ++ pasTACmdHelperData); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* The following is used to determine the offset of the command header containing ++ the workload estimation data so that can be accessed when the KCCB is read */ ++ ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData); ++#endif ++ ++ eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", ++ __func__, eError)); ++ goto fail_taacquirecmd; ++ } ++ else ++ { ++ ui32TACmdCount++; ++ } ++ } ++ ++ /* Only kick the 3D if required */ ++ if (bKickPR) ++ { ++ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; ++ ++ /* ++ The command helper doesn't know about the PR fence so create ++ the command with all the fences against it and later create ++ the PR command itself which _must_ come after the PR fence. ++ */ ++ sPRUFO.puiAddrUFO = uiPRFenceUFOAddress; ++ sPRUFO.ui32Value = ui32PRFenceValue; ++ ++ /* Init the PR fence command helper */ ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", ++ __func__, ui32Client3DFenceCount)); ++ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), ++ ui32Client3DFenceCount + (bTestSLRAdd3DCheck ? 1 : 0), ++ pauiClient3DFenceUFOAddress, ++ NULL, ++ 0, ++ NULL, ++ NULL, ++ sizeof(sPRUFO), ++ (IMG_UINT8*) &sPRUFO, ++ NULL, ++ NULL, ++ NULL, ++ RGXFWIF_CCB_CMD_TYPE_FENCE_PR, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++ NULL, ++ "3D-PR-Fence", ++ bCCBStateOpen, ++ &pas3DCmdHelperData[ui323DCmdCount++]); ++ ++ /* Init the 3D PR command helper */ ++ /* ++ Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update ++ if no 3D is present. This is so the timeline update cannot happen out of order with any ++ other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB). ++ This out of order timeline sync prim update could happen if we attach it to the TA update. ++ */ ++ if (ui32ClientPRUpdateCount) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Line %d, ui32ClientPRUpdateCount=%d, " ++ "pauiClientPRUpdateUFOAddress=0x%x, " ++ "ui32ClientPRUpdateValueCount=%d, " ++ "paui32ClientPRUpdateValue=0x%x", ++ __func__, __LINE__, ui32ClientPRUpdateCount, ++ pauiClientPRUpdateUFOAddress->ui32Addr, ++ ui32ClientPRUpdateValueCount, ++ (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue)); ++ } ++ ++ if (!bUseCombined3DAnd3DPR) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", ++ __func__, ui32ClientPRUpdateCount)); ++ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), ++ 0, ++ NULL, ++ NULL, ++ ui32ClientPRUpdateCount, ++ pauiClientPRUpdateUFOAddress, ++ paui32ClientPRUpdateValue, ++ pui83DPRDMCmd ? ui323DPRCmdSize : ui323DCmdSize, // If the client has not provided a 3DPR command, the regular 3D command should be used instead ++ pui83DPRDMCmd ? pui83DPRDMCmd : pui83DDMCmd, ++ NULL, ++ NULL, ++ NULL, ++ RGXFWIF_CCB_CMD_TYPE_3D_PR, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++ NULL, ++ "3D-PR", ++ bCCBStateOpen, ++ &pas3DCmdHelperData[ui323DCmdCount++]); ++ } ++ } ++ ++ if (bKick3D || bAbort) ++ { ++ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData; ++ const RGXFWIF_CCB_CMD_TYPE e3DCmdType = bAbort ? RGXFWIF_CCB_CMD_TYPE_ABORT : RGXFWIF_CCB_CMD_TYPE_3D; ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Prepare workload estimation */ ++ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice, ++ &psRenderContext->sWorkEstData, ++ &psRenderContext->sWorkEstData.uWorkloadMatchingData.sTA3D.sData3D, ++ e3DCmdType, ++ &sWorkloadCharacteristics, ++ ui64DeadlineInus, ++ &sWorkloadKickData3D); ++#endif ++ ++ /* Init the 3D command helper */ ++ RGXCmdHelperInitCmdCCB_OtherData(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext), ++ bKickTA ? 0 : ui32Client3DFenceCount, /* For a kick with a TA, the 3D fences are added before the PR command instead */ ++ bKickTA ? NULL : pauiClient3DFenceUFOAddress, ++ NULL, ++ ui32Client3DUpdateCount, ++ pauiClient3DUpdateUFOAddress, ++ paui32Client3DUpdateValue, ++ ui323DCmdSize, ++ pui83DDMCmd, ++ (bKickTA ? NULL : &pPreAddr), ++ &pPostAddr, ++ &pRMWUFOAddr, ++ e3DCmdType, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ &sWorkloadKickData3D, ++#else ++ NULL, ++#endif ++ "3D", ++ bCCBStateOpen, ++ &pas3DCmdHelperData[ui323DCmdCount++]); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* The following are used to determine the offset of the command header containing the workload estimation ++ data so that can be accessed when the KCCB is read */ ++ ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]); ++ ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1); ++#endif ++ } ++ ++ /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */ ++ if (unlikely(ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); ++ goto fail_3dcmdinit; ++ } ++ ++ if (ui323DCmdCount) ++ { ++ PVR_ASSERT(bKickPR || bKick3D); ++ ++ /* Acquire space for all the 3D command(s) */ ++ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling ++ * of a new TA command with the same Write offset in Kernel CCB. ++ */ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __func__, eError)); ++ goto fail_3dacquirecmd; ++ } ++ } ++ ++ /* ++ We should acquire the space in the kernel CCB here as after this point ++ we release the commands which will take operations on server syncs ++ which can't be undone ++ */ ++ ++ /* ++ Everything is ready to go now, release the commands ++ */ ++ if (ui32TACmdCount) ++ { ++ ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); ++ RGXCmdHelperReleaseCmdCCB(ui32TACmdCount, ++ pasTACmdHelperData, ++ "TA", ++ FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext)); ++ ++ /* This checks if the command would wrap around at the end of the CCB and therefore would start at an ++ offset of 0 rather than the current command offset */ ++ if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck) ++ { ++ ui32TACommandOffset = ui32TACmdOffset; ++ } ++ else ++ { ++ ui32TACommandOffset = 0; ++ } ++#endif ++ } ++ ++ if (ui323DCmdCount) ++ { ++ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); ++ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, ++ pas3DCmdHelperData, ++ "3D", ++ FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr); ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext)); ++ ++ if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck) ++ { ++ ui323DCommandOffset = ui323DCmdOffset; ++ } ++ else ++ { ++ ui323DCommandOffset = 0; ++ } ++#endif ++ } ++ ++ if (ui32TACmdCount) ++ { ++ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr; ++ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext); ++ CMDTA3D_SHARED *psGeomCmdShared = IMG_OFFSET_ADDR(pui8TADMCmd, 0); ++ ++ sTACmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext); ++ sTACmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); ++ sTACmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); ++ ++ /* Add the Workload data into the KCCB kick */ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ ++ sTACmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset; ++#else ++ sTACmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++#endif ++ ++ eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTACmdKickData.apsCleanupCtl, ++ &sTACmdKickData.ui32NumCleanupCtl, ++ RGXFWIF_DM_GEOM, ++ bKickTA, ++ psKMHWRTDataSet, ++ psZSBuffer, ++ psMSAAScratchBuffer); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", ++ __func__, eError)); ++ goto fail_taattachcleanupctls; ++ } ++ ++ if (psGeomCmdShared) ++ { ++ HTBLOGK(HTB_SF_MAIN_KICK_TA, ++ sTACmdKickData.psContext, ++ ui32TACmdOffset, ++ psGeomCmdShared->sCmn.ui32FrameNum, ++ ui32ExtJobRef, ++ ui32IntJobRef ++ ); ++ } ++ ++ RGXSRV_HWPERF_ENQ(psRenderContext, ++ OSGetCurrentClientProcessIDKM(), ++ ui32FWCtx, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_TA, ++ iCheckTAFence, ++ iUpdateTAFence, ++ iUpdateTATimeline, ++ uiCheckTAFenceUID, ++ uiUpdateTAFenceUID, ++ ui64DeadlineInus, ++ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA)); ++ ++ if (!bUseSingleFWCommand) ++ { ++ /* Construct the kernel TA CCB command. */ ++ RGXFWIF_KCCB_CMD sTAKCCBCmd; ++ sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ sTAKCCBCmd.uCmdData.sCmdKickData = sTACmdKickData; ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, ++ RGXFWIF_DM_GEOM, ++ &sTAKCCBCmd, ++ ui32PDumpFlags); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ } ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = eError2; ++ } ++ goto fail_taacquirecmd; ++ } ++ ++ PVRGpuTraceEnqueueEvent(psRenderContext->psDeviceNode->pvDevice, ++ ui32FWCtx, ui32ExtJobRef, ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_TA3D); ++ } ++ ++ if (ui323DCmdCount) ++ { ++ RGXFWIF_KCCB_CMD s3DKCCBCmd = { 0 }; ++ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr; ++ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext); ++ CMDTA3D_SHARED *ps3DCmdShared = IMG_OFFSET_ADDR(pui83DDMCmd, 0); ++ ++ s3DCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext); ++ s3DCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); ++ s3DCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); ++ ++ /* Add the Workload data into the KCCB kick */ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */ ++ s3DCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset; ++#else ++ s3DCmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++#endif ++ ++ eError = AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DCmdKickData.apsCleanupCtl, ++ &s3DCmdKickData.ui32NumCleanupCtl, ++ RGXFWIF_DM_3D, ++ bKick3D, ++ psKMHWRTDataSet, ++ psZSBuffer, ++ psMSAAScratchBuffer); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", ++ __func__, eError)); ++ goto fail_3dattachcleanupctls; ++ } ++ ++ if (ps3DCmdShared) ++ { ++ HTBLOGK(HTB_SF_MAIN_KICK_3D, ++ s3DCmdKickData.psContext, ++ ui323DCmdOffset, ++ ps3DCmdShared->sCmn.ui32FrameNum, ++ ui32ExtJobRef, ++ ui32IntJobRef ++ ); ++ } ++ ++ RGXSRV_HWPERF_ENQ(psRenderContext, ++ OSGetCurrentClientProcessIDKM(), ++ ui32FWCtx, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_3D, ++ iCheck3DFence, ++ iUpdate3DFence, ++ iUpdate3DTimeline, ++ uiCheck3DFenceUID, ++ uiUpdate3DFenceUID, ++ ui64DeadlineInus, ++ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D)); ++ ++ if (bUseSingleFWCommand) ++ { ++ /* Construct the kernel TA/3D CCB command. */ ++ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_COMBINED_TA_3D_KICK; ++ s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.sTACmdKickData = sTACmdKickData; ++ s3DKCCBCmd.uCmdData.sCombinedTA3DCmdKickData.s3DCmdKickData = s3DCmdKickData; ++ } ++ else ++ { ++ /* Construct the kernel 3D CCB command. */ ++ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ s3DKCCBCmd.uCmdData.sCmdKickData = s3DCmdKickData; ++ } ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice, ++ RGXFWIF_DM_3D, ++ &s3DKCCBCmd, ++ ui32PDumpFlags); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ } ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKicKTA3DKM failed to schedule kernel CCB command. (0x%x)", eError2)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = eError2; ++ } ++ } ++ ++ /* ++ * Now check eError (which may have returned an error from our earlier calls ++ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first ++ * so we check it now... ++ */ ++ if (unlikely(eError != PVRSRV_OK )) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", ++ __func__, eError)); ++ goto fail_3dacquirecmd; ++ } ++ ++#if defined(NO_HARDWARE) ++ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ ++ if (psUpdateTASyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x", ++ __func__, (void*)psUpdateTASyncCheckpoint, ++ SyncCheckpointGetId(psUpdateTASyncCheckpoint), ++ SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint))); ++ SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint); ++ } ++ if (psTAFenceTimelineUpdateSync) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Updating NOHW sync prim [TA] <%p> to %d", ++ __func__, (void*)psTAFenceTimelineUpdateSync, ++ ui32TAFenceTimelineUpdateValue)); ++ SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue); ++ } ++ ++ if (psUpdate3DSyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x", ++ __func__, (void*)psUpdate3DSyncCheckpoint, ++ SyncCheckpointGetId(psUpdate3DSyncCheckpoint), ++ SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint))); ++ SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint); ++ } ++ if (ps3DFenceTimelineUpdateSync) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: Updating NOHW sync prim [3D] <%p> to %d", ++ __func__, (void*)ps3DFenceTimelineUpdateSync, ++ ui323DFenceTimelineUpdateValue)); ++ SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); ++ } ++ SyncCheckpointNoHWUpdateTimelines(NULL); ++ ++#endif /* defined(NO_HARDWARE) */ ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, ++ "%s: calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", ++ __func__, (void*)psBufferSyncData)); ++ pvr_buffer_sync_kick_succeeded(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++ if (piUpdateTAFence) ++ { ++ *piUpdateTAFence = iUpdateTAFence; ++ } ++ if (piUpdate3DFence) ++ { ++ *piUpdate3DFence = iUpdate3DFence; ++ } ++ ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence. ++ * NOTE: 3D fence is always submitted, either via 3D or TA(PR). ++ */ ++ if (bKickTA) ++ { ++ SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); ++ } ++ SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); ++ ++ if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdateTAFence, ++ pvTAUpdateFenceFinaliseData, ++ psUpdateTASyncCheckpoint, szFenceNameTA); ++ } ++ if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psRenderContext->psDeviceNode, iUpdate3DFence, ++ pv3DUpdateFenceFinaliseData, ++ psUpdate3DSyncCheckpoint, szFenceName3D); ++ } ++ ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceTASyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); ++ } ++ if (apsFence3DSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); ++ } ++ ++ if (sTASyncData.paui32ClientUpdateValue) ++ { ++ OSFreeMem(sTASyncData.paui32ClientUpdateValue); ++ } ++ if (s3DSyncData.paui32ClientUpdateValue) ++ { ++ OSFreeMem(s3DSyncData.paui32ClientUpdateValue); ++ } ++ ++#if defined(SUPPORT_VALIDATION) ++ if (bTestSLRAdd3DCheck) ++ { ++ SyncCheckpointFree(psDummySyncCheckpoint); ++ } ++#endif ++ OSLockRelease(psRenderContext->hLock); ++ ++ return PVRSRV_OK; ++ ++fail_3dattachcleanupctls: ++fail_taattachcleanupctls: ++fail_3dacquirecmd: ++fail_3dcmdinit: ++fail_taacquirecmd: ++ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence); ++ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate); ++ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence); ++ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate); ++ /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list. ++ * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what ++ * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the ++ * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate. ++ */ ++ if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs)) ++ { ++ SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr); ++ } ++ ++fail_alloc_update_values_mem_3D: ++ if (iUpdate3DFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData); ++ } ++fail_create_3d_fence: ++fail_alloc_update_values_mem_TA: ++ if (iUpdateTAFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData); ++ } ++fail_create_ta_fence: ++#if !defined(SUPPORT_BUFFER_SYNC) ++err_no_buffer_sync_invalid_params: ++#endif /* !defined(SUPPORT_BUFFER_SYNC) */ ++err_pr_fence_address: ++err_populate_sync_addr_list_3d_update: ++err_populate_sync_addr_list_3d_fence: ++err_populate_sync_addr_list_ta_update: ++err_populate_sync_addr_list_ta_fence: ++err_not_enough_space: ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence. ++ * NOTE: 3D fence is always submitted, either via 3D or TA(PR). ++ */ ++#if defined(SUPPORT_BUFFER_SYNC) ++ SyncAddrListDeRefCheckpoints(ui32BufferFenceSyncCheckpointCount, ++ apsBufferFenceSyncCheckpoints); ++#endif ++ SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints); ++fail_resolve_input_3d_fence: ++ if (bKickTA) ++ { ++ SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints); ++ } ++fail_resolve_input_ta_fence: ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceTASyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints); ++ } ++ if (apsFence3DSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints); ++ } ++ if (sTASyncData.paui32ClientUpdateValue) ++ { ++ OSFreeMem(sTASyncData.paui32ClientUpdateValue); ++ } ++ if (s3DSyncData.paui32ClientUpdateValue) ++ { ++ OSFreeMem(s3DSyncData.paui32ClientUpdateValue); ++ } ++#if defined(SUPPORT_VALIDATION) ++ if (bTestSLRAdd3DCheck) ++ { ++ SyncCheckpointFree(psDummySyncCheckpoint); ++ } ++#endif ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_failed(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ OSLockRelease(psRenderContext->hLock); ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ RGX_SERVER_RENDER_CONTEXT *psRenderContext, ++ IMG_UINT32 ui32Priority) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ ++ OSLockAcquire(psRenderContext->hLock); ++ ++ if (psRenderContext->sTAData.ui32Priority != ui32Priority) ++ { ++ eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext, ++ psConnection, ++ psRenderContext->psDeviceNode->pvDevice, ++ ui32Priority, ++ RGXFWIF_DM_GEOM); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to set the priority of the TA part of the rendercontext (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto fail_tacontext; ++ } ++ psRenderContext->sTAData.ui32Priority = ui32Priority; ++ } ++ ++ if (psRenderContext->s3DData.ui32Priority != ui32Priority) ++ { ++ eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext, ++ psConnection, ++ psRenderContext->psDeviceNode->pvDevice, ++ ui32Priority, ++ RGXFWIF_DM_3D); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto fail_3dcontext; ++ } ++ psRenderContext->s3DData.ui32Priority = ui32Priority; ++ } ++ ++ OSLockRelease(psRenderContext->hLock); ++ return PVRSRV_OK; ++ ++fail_3dcontext: ++fail_tacontext: ++ OSLockRelease(psRenderContext->hLock); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ switch (eContextProperty) ++ { ++ case RGX_CONTEXT_PROPERTY_FLAGS: ++ { ++ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; ++ ++ OSLockAcquire(psRenderContext->hLock); ++ eError = FWCommonContextSetFlags(psRenderContext->sTAData.psServerCommonContext, ++ ui32ContextFlags); ++ if (eError == PVRSRV_OK) ++ { ++ eError = FWCommonContextSetFlags(psRenderContext->s3DData.psServerCommonContext, ++ ui32ContextFlags); ++ } ++ OSLockRelease(psRenderContext->hLock); ++ break; ++ } ++ ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ } ++ ++ return eError; ++} ++ ++ ++void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); ++ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); ++ ++ DumpFWCommonContextInfo(psCurrentServerRenderCtx->sTAData.psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ DumpFWCommonContextInfo(psCurrentServerRenderCtx->s3DData.psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); ++} ++ ++IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ IMG_UINT32 ui32ContextBitMask = 0; ++ ++ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode); ++ if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext) ++ { ++ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED) ++ { ++ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA; ++ } ++ } ++ ++ if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext) ++ { ++ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED) ++ { ++ ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D; ++ } ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock); ++ return ui32ContextBitMask; ++} ++ ++/* ++ * RGXRenderContextStalledKM ++ */ ++PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext) ++{ ++ RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE); ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ End of file (rgxta3d.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxta3d.h b/drivers/gpu/drm/img-rogue/rgxta3d.h +new file mode 100644 +index 000000000000..89a5b225a1ed +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxta3d.h +@@ -0,0 +1,502 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX TA and 3D Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX TA and 3D Functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RGXTA3D_H ++#define RGXTA3D_H ++ ++#include "devicemem.h" ++#include "devicemem_server.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "rgx_fwif_shared.h" ++#include "rgx_fwif_resetframework.h" ++#include "sync_server.h" ++#include "connection_server.h" ++#include "rgxdebug.h" ++#include "pvr_notifier.h" ++ ++typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT; ++typedef struct _RGX_FREELIST_ RGX_FREELIST; ++typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE; ++ ++/***************************************************************************** ++ * The Design of Data Storage System for Render Targets * ++ * ==================================================== * ++ * Relevant for * ++ * understanding RGXCreateHWRTDataSet & RGXDestroyHWRTDataSet * ++ * * ++ * * ++ * +=========================================+ * ++ * | RenderTargetDataSet | * ++ * +---------------|---------|---------------+ * ++ * | | * ++ * V V * ++ * +- - - - - - - - - - - - + +- - - - - - - - - - - - + * ++ * | KM_HW_RT_DATA_HANDLE_0 | | KM_HW_RT_DATA_HANDLE_1 | * ++ * +- - -|- - - - - - - - - + +- - - - - - - - - | - - + * ++ * | | * ++ * | | [UM]Client * ++ * ------|-----------------------------------------|----------------------- * ++ * | | Bridge * ++ * ------|-----------------------------------------|----------------------- * ++ * | | [KM]Server * ++ * | | * ++ * | KM-ptr | KM-ptr * ++ * V V * ++ * +====================+ +====================+ * ++ * | KM_HW_RT_DATA_0 | | KM_HW_RT_DATA_1 | * ++ * +-----|------------|-+ +-|------------|-----+ * ++ * | | | | * ++ * | | | | * ++ * | | | | * ++ * | | | | * ++ * | | KM-ptr | KM-ptr | * ++ * | V V | * ++ * | +==========================+ | * ++ * | | HW_RT_DATA_COMMON_COOKIE | | * ++ * | +--------------------------+ | * ++ * | | | * ++ * | | | * ++ * ------|-------------------|---------------------|----------------------- * ++ * | | | [FW]Firmware * ++ * | | | * ++ * | FW-addr | | FW-addr * ++ * V | V * ++ * +===============+ | +===============+ * ++ * | HW_RT_DATA_0 | | | HW_RT_DATA_1 | * ++ * +------------|--+ | +--|------------+ * ++ * | | | * ++ * | FW-addr | FW-addr | FW-addr * ++ * V V V * ++ * +=========================================+ * ++ * | HW_RT_DATA_COMMON | * ++ * +-----------------------------------------+ * ++ * * ++ *****************************************************************************/ ++ ++typedef struct _RGX_HWRTDATA_COMMON_COOKIE_ ++{ ++ DEVMEM_MEMDESC *psHWRTDataCommonFwMemDesc; ++ RGXFWIF_DEV_VIRTADDR sHWRTDataCommonFwAddr; ++ IMG_UINT32 ui32RefCount; ++ ++} RGX_HWRTDATA_COMMON_COOKIE; ++ ++typedef struct _RGX_KM_HW_RT_DATASET_ ++{ ++ RGX_HWRTDATA_COMMON_COOKIE *psHWRTDataCommonCookie; ++ ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ RGXFWIF_DEV_VIRTADDR sHWRTDataFwAddr; ++ ++ DEVMEM_MEMDESC *psHWRTDataFwMemDesc; ++ DEVMEM_MEMDESC *psRTArrayFwMemDesc; ++ DEVMEM_MEMDESC *psRendersAccArrayFwMemDesc; ++ ++ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS]; ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ DLLIST_NODE sNodeHWRTData; ++#endif ++ ++} RGX_KM_HW_RT_DATASET; ++ ++struct _RGX_FREELIST_ { ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ CONNECTION_DATA *psConnection; ++ ++ /* Free list PMR */ ++ PMR *psFreeListPMR; ++ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset; ++ ++ /* Freelist config */ ++ IMG_UINT32 ui32MaxFLPages; ++ IMG_UINT32 ui32InitFLPages; ++ IMG_UINT32 ui32CurrentFLPages; ++ IMG_UINT32 ui32GrowFLPages; ++ IMG_UINT32 ui32ReadyFLPages; ++ IMG_UINT32 ui32GrowThreshold; /* Percentage of FL memory used that should trigger a new grow request */ ++ IMG_UINT32 ui32FreelistID; ++ IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */ ++ IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */ ++ IMG_BOOL bCheckFreelist; /* freelist check enabled */ ++ IMG_UINT32 ui32RefCount; /* freelist reference counting */ ++ ++ IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application */ ++ IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */ ++ IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */ ++ ++ IMG_PID ownerPid; /* Pid of the owner of the list */ ++ ++ /* Memory Blocks */ ++ DLLIST_NODE sMemoryBlockHead; ++ DLLIST_NODE sMemoryBlockInitHead; ++ DLLIST_NODE sNode; ++#if !defined(SUPPORT_SHADOW_FREELISTS) ++ /* HWRTData nodes linked to local freelist */ ++ DLLIST_NODE sNodeHWRTDataHead; ++#endif ++ ++ /* FW data structures */ ++ DEVMEM_MEMDESC *psFWFreelistMemDesc; ++ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; ++}; ++ ++struct _RGX_PMR_NODE_ { ++ RGX_FREELIST *psFreeList; ++ PMR *psPMR; ++ PMR_PAGELIST *psPageList; ++ DLLIST_NODE sMemoryBlock; ++ IMG_UINT32 ui32NumPages; ++ IMG_BOOL bFirstPageMissing; ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ RI_HANDLE hRIHandle; ++#endif ++}; ++ ++typedef struct { ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ DEVMEM_MEMDESC *psFWZSBufferMemDesc; ++ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; ++ ++ DEVMEMINT_RESERVATION *psReservation; ++ PMR *psPMR; ++ DEVMEMINT_MAPPING *psMapping; ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags; ++ IMG_UINT32 ui32ZSBufferID; ++ IMG_UINT32 ui32RefCount; ++ IMG_BOOL bOnDemand; ++ ++ IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */ ++ IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */ ++ ++ IMG_PID owner; ++ ++ DLLIST_NODE sNode; ++}RGX_ZSBUFFER_DATA; ++ ++typedef struct { ++ RGX_ZSBUFFER_DATA *psZSBuffer; ++} RGX_POPULATION; ++ ++/* Dump the physical pages of a freelist */ ++IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList); ++ ++ ++/* Create set of HWRTData(s) */ ++PVRSRV_ERROR RGXCreateHWRTDataSet(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_DEV_VIRTADDR asVHeapTableDevVAddr[RGXMKIF_NUM_GEOMDATAS], ++ IMG_DEV_VIRTADDR psPMMListDevVAddr[RGXMKIF_NUM_RTDATAS], ++ RGX_FREELIST *apsFreeLists[RGXMKIF_NUM_RTDATA_FREELISTS], ++ IMG_UINT32 ui32ScreenPixelMax, ++ IMG_UINT64 ui64MultiSampleCtl, ++ IMG_UINT64 ui64FlippedMultiSampleCtl, ++ IMG_UINT32 ui32TPCStride, ++ IMG_DEV_VIRTADDR asTailPtrsDevVAddr[RGXMKIF_NUM_GEOMDATAS], ++ IMG_UINT32 ui32TPCSize, ++ IMG_UINT32 ui32TEScreen, ++ IMG_UINT32 ui32TEAA, ++ IMG_UINT32 ui32TEMTILE1, ++ IMG_UINT32 ui32TEMTILE2, ++ IMG_UINT32 ui32MTileStride, ++ IMG_UINT32 ui32ISPMergeLowerX, ++ IMG_UINT32 ui32ISPMergeLowerY, ++ IMG_UINT32 ui32ISPMergeUpperX, ++ IMG_UINT32 ui32ISPMergeUpperY, ++ IMG_UINT32 ui32ISPMergeScaleX, ++ IMG_UINT32 ui32ISPMergeScaleY, ++ IMG_DEV_VIRTADDR sMacrotileArrayDevVAddr[RGXMKIF_NUM_RTDATAS], ++ IMG_DEV_VIRTADDR sRgnHeaderDevVAddr[RGXMKIF_NUM_RTDATAS], ++ IMG_DEV_VIRTADDR asRTCDevVAddr[RGXMKIF_NUM_GEOMDATAS], ++ IMG_UINT32 uiRgnHeaderSize, ++ IMG_UINT32 ui32ISPMtileSize, ++ IMG_UINT16 ui16MaxRTs, ++ RGX_KM_HW_RT_DATASET *pasKMHWRTDataSet[RGXMKIF_NUM_RTDATAS]); ++ ++/* Destroy HWRTDataSet */ ++PVRSRV_ERROR RGXDestroyHWRTDataSet(RGX_KM_HW_RT_DATASET *psKMHWRTDataSet); ++ ++/* ++ RGXCreateZSBufferKM ++*/ ++PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEMINT_RESERVATION *psReservation, ++ PMR *psPMR, ++ PVRSRV_MEMALLOCFLAGS_T uiMapFlags, ++ RGX_ZSBUFFER_DATA **ppsZSBuffer); ++ ++/* ++ RGXDestroyZSBufferKM ++*/ ++PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer); ++ ++ ++/* ++ * RGXBackingZSBuffer() ++ * ++ * Backs ZS-Buffer with physical pages ++ */ ++PVRSRV_ERROR ++RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); ++ ++/* ++ * RGXPopulateZSBufferKM() ++ * ++ * Backs ZS-Buffer with physical pages (called by Bridge calls) ++ */ ++PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer, ++ RGX_POPULATION **ppsPopulation); ++ ++/* ++ * RGXUnbackingZSBuffer() ++ * ++ * Frees ZS-Buffer's physical pages ++ */ ++PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer); ++ ++/* ++ * RGXUnpopulateZSBufferKM() ++ * ++ * Frees ZS-Buffer's physical pages (called by Bridge calls) ++ */ ++PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation); ++ ++/* ++ RGXProcessRequestZSBufferBacking ++*/ ++void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32ZSBufferID); ++ ++/* ++ RGXProcessRequestZSBufferUnbacking ++*/ ++void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32ZSBufferID); ++ ++/* ++ RGXGrowFreeList ++*/ ++PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList, ++ IMG_UINT32 ui32NumPages, ++ PDLLIST_NODE pListHeader); ++ ++/* Create free list */ ++PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32MaxFLPages, ++ IMG_UINT32 ui32InitFLPages, ++ IMG_UINT32 ui32GrowFLPages, ++ IMG_UINT32 ui32GrowParamThreshold, ++ RGX_FREELIST *psGlobalFreeList, ++ IMG_BOOL bCheckFreelist, ++ IMG_DEV_VIRTADDR sFreeListDevVAddr, ++ PMR *psFreeListPMR, ++ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset, ++ RGX_FREELIST **ppsFreeList); ++ ++/* Destroy free list */ ++PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList); ++ ++/* ++ RGXProcessRequestGrow ++*/ ++void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FreelistID); ++ ++ ++/* Reconstruct free list after Hardware Recovery */ ++void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32FreelistsCount, ++ const IMG_UINT32 *paui32Freelists); ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVRGXCreateRenderContextKM ++ ++ @Description ++ Server-side implementation of RGXCreateRenderContext ++ ++ @Input psConnection - ++ @Input psDeviceNode - device node ++ @Input ui32Priority - context priority ++ @Input sVDMCallStackAddr - VDM call stack device virtual address ++ @Input ui32CallStackDepth - VDM call stack depth ++ @Input ui32FrameworkCommandSize - framework command size ++ @Input pabyFrameworkCommand - ptr to framework command ++ @Input hMemCtxPrivData - memory context private data ++ @Input ui32StaticRenderContextStateSize - size of fixed render state ++ @Input pStaticRenderContextState - ptr to fixed render state buffer ++ @Input ui32PackedCCBSizeU8888 : ++ ui8TACCBAllocSizeLog2 - TA CCB size ++ ui8TACCBMaxAllocSizeLog2 - maximum size to which TA CCB can grow ++ ui83DCCBAllocSizeLog2 - 3D CCB size ++ ui83DCCBMaxAllocSizeLog2 - maximum size to which 3D CCB can grow ++ @Input ui32ContextFlags - flags which specify properties of the context ++ @Output ppsRenderContext - ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_DEV_VIRTADDR sVDMCallStackAddr, ++ IMG_UINT32 ui32CallStackDepth, ++ IMG_UINT32 ui32FrameworkCommandSize, ++ IMG_PBYTE pabyFrameworkCommand, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32StaticRenderContextStateSize, ++ IMG_PBYTE pStaticRenderContextState, ++ IMG_UINT32 ui32PackedCCBSizeU8888, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ IMG_UINT32 ui32MaxTADeadlineMS, ++ IMG_UINT32 ui32Max3DDeadlineMS, ++ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext); ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVRGXDestroyRenderContextKM ++ ++ @Description ++ Server-side implementation of RGXDestroyRenderContext ++ ++ @Input psRenderContext - ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVRGXKickTA3DKM ++ ++ @Description ++ Server-side implementation of RGXKickTA3D ++ ++ @Input psRTDataCleanup - RT data associated with the kick (or NULL) ++ @Input psZBuffer - Z-buffer associated with the kick (or NULL) ++ @Input psSBuffer - S-buffer associated with the kick (or NULL) ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ++ IMG_UINT32 ui32ClientTAFenceCount, ++ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock, ++ IMG_UINT32 *paui32ClientTAFenceSyncOffset, ++ IMG_UINT32 *paui32ClientTAFenceValue, ++ IMG_UINT32 ui32ClientTAUpdateCount, ++ SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock, ++ IMG_UINT32 *paui32ClientUpdateSyncOffset, ++ IMG_UINT32 *paui32ClientTAUpdateValue, ++ IMG_UINT32 ui32Client3DUpdateCount, ++ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock, ++ IMG_UINT32 *paui32Client3DUpdateSyncOffset, ++ IMG_UINT32 *paui32Client3DUpdateValue, ++ SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock, ++ IMG_UINT32 ui32PRSyncOffset, ++ IMG_UINT32 ui32PRFenceValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE *piUpdateFence, ++ IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ PVRSRV_FENCE iCheckFence3D, ++ PVRSRV_TIMELINE iUpdateTimeline3D, ++ PVRSRV_FENCE *piUpdateFence3D, ++ IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32TACmdSize, ++ IMG_PBYTE pui8TADMCmd, ++ IMG_UINT32 ui323DPRCmdSize, ++ IMG_PBYTE pui83DPRDMCmd, ++ IMG_UINT32 ui323DCmdSize, ++ IMG_PBYTE pui83DDMCmd, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_BOOL bKickTA, ++ IMG_BOOL bKickPR, ++ IMG_BOOL bKick3D, ++ IMG_BOOL bAbort, ++ IMG_UINT32 ui32PDumpFlags, ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSet, ++ RGX_ZSBUFFER_DATA *psZSBuffer, ++ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 *paui32SyncPMRFlags, ++ PMR **ppsSyncPMRs, ++ IMG_UINT32 ui32RenderTargetSize, ++ IMG_UINT32 ui32NumberOfDrawCalls, ++ IMG_UINT32 ui32NumberOfIndices, ++ IMG_UINT32 ui32NumberOfMRTs, ++ IMG_UINT64 ui64DeadlineInus); ++ ++ ++PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ RGX_SERVER_RENDER_CONTEXT *psRenderContext, ++ IMG_UINT32 ui32Priority); ++ ++PVRSRV_ERROR PVRSRVRGXSetRenderContextPropertyKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output); ++ ++/* Debug - Dump debug info of render contexts on this device */ ++void DumpRenderCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel); ++ ++/* Debug/Watchdog - check if client contexts are stalled */ ++IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext); ++ ++#endif /* RGXTA3D_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxtdmtransfer.c b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.c +new file mode 100644 +index 000000000000..f341464e71b1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.c +@@ -0,0 +1,1329 @@ ++/*************************************************************************/ /*! ++@File rgxtdmtransfer.c ++@Title Device specific TDM transfer queue routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pdump_km.h" ++#include "rgxdevice.h" ++#include "rgxccb.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgxtdmtransfer.h" ++#include "rgx_tq_shared.h" ++#include "rgxmem.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++#include "pvrsrv.h" ++#include "rgx_fwif_resetframework.h" ++#include "rgx_memallocflags.h" ++#include "rgxhwperf.h" ++#include "ospvr_gputrace.h" ++#include "htbuffer.h" ++#include "rgxshader.h" ++ ++#include "pdump_km.h" ++ ++#include "sync_server.h" ++#include "sync_internal.h" ++#include "sync.h" ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++#include "pvr_buffer_sync.h" ++#endif ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++#include "rgxworkest.h" ++#endif ++ ++#include "rgxtimerquery.h" ++ ++/* Enable this to dump the compiled list of UFOs prior to kick call */ ++#define ENABLE_TDM_UFO_DUMP 0 ++ ++//#define TDM_CHECKPOINT_DEBUG 1 ++ ++#if defined(TDM_CHECKPOINT_DEBUG) ++#define CHKPT_DBG(X) PVR_DPF(X) ++#else ++#define CHKPT_DBG(X) ++#endif ++ ++typedef struct { ++ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext; ++ IMG_UINT32 ui32Priority; ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++} RGX_SERVER_TQ_TDM_DATA; ++ ++ ++struct _RGX_SERVER_TQ_TDM_CONTEXT_ { ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc; ++ DEVMEM_MEMDESC *psFWTransferContextMemDesc; ++ IMG_UINT32 ui32Flags; ++ RGX_SERVER_TQ_TDM_DATA sTDMData; ++ DLLIST_NODE sListNode; ++ SYNC_ADDR_LIST sSyncAddrListFence; ++ SYNC_ADDR_LIST sSyncAddrListUpdate; ++ POS_LOCK hLock; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ WORKEST_HOST_DATA sWorkEstData; ++#endif ++}; ++ ++static PVRSRV_ERROR _CreateTDMTransferContext( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ DEVMEM_MEMDESC * psAllocatedMemDesc, ++ IMG_UINT32 ui32AllocatedOffset, ++ DEVMEM_MEMDESC * psFWMemContextMemDesc, ++ IMG_UINT32 ui32Priority, ++ RGX_COMMON_CONTEXT_INFO * psInfo, ++ RGX_SERVER_TQ_TDM_DATA * psTDMData, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress) ++{ ++ PVRSRV_ERROR eError; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ psTDMData->psBufferSyncContext = ++ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, ++ "rogue-tdm"); ++ if (IS_ERR(psTDMData->psBufferSyncContext)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to create buffer_sync context (err=%ld)", ++ __func__, PTR_ERR(psTDMData->psBufferSyncContext))); ++ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_buffer_sync_context_create; ++ } ++#endif ++ ++ eError = FWCommonContextAllocate( ++ psConnection, ++ psDeviceNode, ++ REQ_TYPE_TQ_TDM, ++ RGXFWIF_DM_TDM, ++ NULL, ++ psAllocatedMemDesc, ++ ui32AllocatedOffset, ++ psFWMemContextMemDesc, ++ NULL, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TDM_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TDM_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ ui32Priority, ++ UINT_MAX, /* max deadline MS */ ++ ui64RobustnessAddress, ++ psInfo, ++ &psTDMData->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_contextalloc; ++ } ++ ++ psTDMData->ui32Priority = ui32Priority; ++ return PVRSRV_OK; ++ ++fail_contextalloc: ++#if defined(SUPPORT_BUFFER_SYNC) ++ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); ++ psTDMData->psBufferSyncContext = NULL; ++fail_buffer_sync_context_create: ++#endif ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++static PVRSRV_ERROR _DestroyTDMTransferContext( ++ RGX_SERVER_TQ_TDM_DATA * psTDMData, ++ PVRSRV_DEVICE_NODE * psDeviceNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp( ++ psDeviceNode, ++ psTDMData->psServerCommonContext, ++ RGXFWIF_DM_TDM, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ /* ... it has so we can free it's resources */ ++ FWCommonContextFree(psTDMData->psServerCommonContext); ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext); ++ psTDMData->psBufferSyncContext = NULL; ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * PVRSRVCreateTransferContextKM ++ */ ++PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32FrameworkCommandSize, ++ IMG_PBYTE pabyFrameworkCommand, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32PackedCCBSizeU88, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext) ++{ ++ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext; ++ ++ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice; ++ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Allocate the server side structure */ ++ *ppsTransferContext = NULL; ++ psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); ++ if (psTransferContext == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ /* ++ Create the FW transfer context, this has the TDM common ++ context embedded within it ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_FWTDMCONTEXT), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwTransferContext", ++ &psTransferContext->psFWTransferContextMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_fwtransfercontext; ++ } ++ ++ eError = OSLockCreate(&psTransferContext->hLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_lockcreate; ++ } ++ ++ psTransferContext->psDeviceNode = psDeviceNode; ++ ++ if (ui32FrameworkCommandSize) ++ { ++ /* ++ * Create the FW framework buffer ++ */ ++ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, ++ &psTransferContext->psFWFrameworkMemDesc, ++ ui32FrameworkCommandSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU framework state (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcreate; ++ } ++ ++ /* Copy the Framework client data into the framework buffer */ ++ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, ++ psTransferContext->psFWFrameworkMemDesc, ++ pabyFrameworkCommand, ++ ui32FrameworkCommandSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to populate the framework buffer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcopy; ++ } ++ ++ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; ++ } ++ ++ eError = _CreateTDMTransferContext(psConnection, ++ psDeviceNode, ++ psTransferContext->psFWTransferContextMemDesc, ++ offsetof(RGXFWIF_FWTDMCONTEXT, sTDMContext), ++ psFWMemContextMemDesc, ++ ui32Priority, ++ &sInfo, ++ &psTransferContext->sTDMData, ++ U32toU8_Unpack1(ui32PackedCCBSizeU88), ++ U32toU8_Unpack2(ui32PackedCCBSizeU88), ++ ui32ContextFlags, ++ ui64RobustnessAddress); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_tdmtransfercontext; ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ WorkEstInitTDM(psDevInfo, &psTransferContext->sWorkEstData); ++ } ++#endif ++ ++ SyncAddrListInit(&psTransferContext->sSyncAddrListFence); ++ SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate); ++ ++ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); ++ *ppsTransferContext = psTransferContext; ++ ++ return PVRSRV_OK; ++ ++fail_tdmtransfercontext: ++fail_frameworkcopy: ++ if (psTransferContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); ++ } ++fail_frameworkcreate: ++ OSLockDestroy(psTransferContext->hLock); ++fail_lockcreate: ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); ++fail_fwtransfercontext: ++ OSFreeMem(psTransferContext); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ *ppsTransferContext = NULL; ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ PMR ** ppsCLIPMRMem, ++ PMR ** ppsUSCPMRMem) ++{ ++ PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psPMRMem) ++{ ++ PVR_UNREFERENCED_PARAMETER(psPMRMem); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFWIF_FWTDMCONTEXT *psFWTransferContext; ++ IMG_UINT32 ui32WorkEstCCBSubmitted; ++ ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ eError = DevmemAcquireCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc, ++ (void **)&psFWTransferContext); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to map firmware transfer context (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ ui32WorkEstCCBSubmitted = psFWTransferContext->ui32WorkEstCCBSubmitted; ++ ++ DevmemReleaseCpuVirtAddr(psTransferContext->psFWTransferContextMemDesc); ++ ++ /* Check if all of the workload estimation CCB commands for this workload are read */ ++ if (ui32WorkEstCCBSubmitted != psTransferContext->sWorkEstData.ui32WorkEstCCBReceived) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: WorkEst # cmds submitted (%u) and received (%u) mismatch", ++ __func__, ui32WorkEstCCBSubmitted, ++ psTransferContext->sWorkEstData.ui32WorkEstCCBReceived)); ++ ++ return PVRSRV_ERROR_RETRY; ++ } ++ } ++#endif ++ ++ ++ /* remove node from list before calling destroy - as destroy, if successful ++ * will invalidate the node ++ * must be re-added if destroy fails ++ */ ++ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); ++ dllist_remove_node(&(psTransferContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); ++ ++ ++ eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData, ++ psTransferContext->psDeviceNode); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_destroyTDM; ++ } ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ WorkEstDeInitTDM(psDevInfo, &psTransferContext->sWorkEstData); ++ } ++#endif ++ ++ if (psTransferContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); ++ } ++ ++ SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence); ++ SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate); ++ ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); ++ ++ OSLockDestroy(psTransferContext->hLock); ++ ++ OSFreeMem(psTransferContext); ++ ++ return PVRSRV_OK; ++ ++fail_destroyTDM: ++ ++ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++/* ++ * PVRSRVSubmitTQ3DKickKM ++ */ ++PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( ++ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 ui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 * paui32ClientUpdateSyncOffset, ++ IMG_UINT32 * paui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE * piUpdateFence, ++ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32FWCommandSize, ++ IMG_UINT8 * pui8FWCommand, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 * paui32SyncPMRFlags, ++ PMR ** ppsSyncPMRs, ++ IMG_UINT32 ui32TDMCharacteristic1, ++ IMG_UINT32 ui32TDMCharacteristic2, ++ IMG_UINT64 ui64DeadlineInus) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelper; ++ PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL; ++ PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL; ++ IMG_UINT32 ui32IntClientFenceCount = 0; ++ IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue; ++ IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount; ++ PVRSRV_ERROR eError; ++ PVRSRV_ERROR eError2; ++ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE; ++ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psTransferContext->sTDMData.psServerCommonContext); ++ RGX_CLIENT_CCB *psClientCCB = FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext); ++ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); ++ ++ IMG_UINT32 ui32CmdOffset = 0; ++ IMG_BOOL bCCBStateOpen; ++ ++ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; ++ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; ++ PRGXFWIF_UFO_ADDR pRMWUFOAddr; ++ ++ IMG_UINT64 uiCheckFenceUID = 0; ++ IMG_UINT64 uiUpdateFenceUID = 0; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTransfer = {0}; ++ IMG_UINT32 ui32TDMWorkloadDataRO = 0; ++ IMG_UINT32 ui32TDMCmdHeaderOffset = 0; ++ IMG_UINT32 ui32TDMCmdOffsetWrapCheck = 0; ++ RGX_WORKLOAD sWorkloadCharacteristics = {0}; ++#endif ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; ++ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; ++ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; ++#endif ++ ++ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL; ++ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; ++ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL; ++ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL; ++ IMG_UINT32 ui32FenceTimelineUpdateValue = 0; ++ void *pvUpdateFenceFinaliseData = NULL; ++ ++ if (iUpdateTimeline >= 0 && !piUpdateFence) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if !defined(SUPPORT_WORKLOAD_ESTIMATION) ++ PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic1); ++ PVR_UNREFERENCED_PARAMETER(ui32TDMCharacteristic2); ++ PVR_UNREFERENCED_PARAMETER(ui64DeadlineInus); ++#endif ++ ++ /* Ensure we haven't been given a null ptr to ++ * update values if we have been told we ++ * have updates ++ */ ++ if (ui32ClientUpdateCount > 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(paui32ClientUpdateValue != NULL, ++ "paui32ClientUpdateValue NULL but " ++ "ui32ClientUpdateCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Ensure the string is null-terminated (Required for safety) */ ++ szUpdateFenceName[31] = '\0'; ++ ++ if (ui32SyncPMRCount != 0) ++ { ++ if (!ppsSyncPMRs) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ ++ OSLockAcquire(psTransferContext->hLock); ++ ++ /* We can't allocate the required amount of stack space on all consumer architectures */ ++ psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA)); ++ if (psCmdHelper == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_allochelper; ++ } ++ ++ ++ /* ++ Init the command helper commands for all the prepares ++ */ ++ { ++ IMG_CHAR *pszCommandName; ++ RGXFWIF_CCB_CMD_TYPE eType; ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++ ++ pszCommandName = "TQ-TDM"; ++ ++ if (ui32FWCommandSize == 0) ++ { ++ /* A NULL CMD for TDM is used to append updates to a non finished ++ * FW command. bCCBStateOpen is used in case capture range is ++ * entered on this command, to not drain CCB up to the Roff for this ++ * command, but the finished command prior to this. ++ */ ++ bCCBStateOpen = IMG_TRUE; ++ eType = RGXFWIF_CCB_CMD_TYPE_NULL; ++ } ++ else ++ { ++ bCCBStateOpen = IMG_FALSE; ++ eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM; ++ } ++#if defined(SUPPORT_BUFFER_SYNC) ++ psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext; ++#endif ++ ++ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence, ++ 0, ++ NULL, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_populate_sync_addr_list; ++ } ++ ++ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate, ++ ui32ClientUpdateCount, ++ pauiClientUpdateUFODevVarBlock, ++ paui32ClientUpdateSyncOffset); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_populate_sync_addr_list; ++ } ++ paui32IntUpdateValue = paui32ClientUpdateValue; ++ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; ++ ++ ++ if (ui32SyncPMRCount) ++ { ++#if defined(SUPPORT_BUFFER_SYNC) ++ int err; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); ++ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, ++ psTransferContext->psDeviceNode->hSyncCheckpointContext, ++ ui32SyncPMRCount, ++ ppsSyncPMRs, ++ paui32SyncPMRFlags, ++ &ui32BufferFenceSyncCheckpointCount, ++ &apsBufferFenceSyncCheckpoints, ++ &psBufferUpdateSyncCheckpoint, ++ &psBufferSyncData); ++ if (err) ++ { ++ switch (err) ++ { ++ case -EINTR: ++ eError = PVRSRV_ERROR_RETRY; ++ break; ++ case -ENOMEM: ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ break; ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ break; ++ } ++ ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); ++ } ++ goto fail_resolve_input_fence; ++ } ++ ++ /* Append buffer sync fences */ ++ if (ui32BufferFenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); ++ SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence, ++ ui32BufferFenceSyncCheckpointCount, ++ apsBufferFenceSyncCheckpoints); ++ if (!pauiIntFenceUFOAddress) ++ { ++ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; ++ } ++ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++ ++ if (psBufferUpdateSyncCheckpoint) ++ { ++ /* Append the update (from output fence) */ ++ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, ++ 1, ++ &psBufferUpdateSyncCheckpoint); ++ if (!pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ ui32IntClientUpdateCount++; ++ } ++#else /* defined(SUPPORT_BUFFER_SYNC) */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_populate_sync_addr_list; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ } ++ ++ /* Resolve the sync checkpoints that make up the input fence */ ++ eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext, ++ iCheckFence, ++ &ui32FenceSyncCheckpointCount, ++ &apsFenceSyncCheckpoints, ++ &uiCheckFenceUID, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_resolve_input_fence; ++ } ++#if defined(TDM_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 ii; ++ for (ii=0; ii<32; ii++) ++ { ++ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii); ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii])); ++ } ++ } ++#endif ++ /* Create the output fence (if required) */ ++ if (iUpdateTimeline != PVRSRV_NO_TIMELINE) ++ { ++ eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode, ++ szUpdateFenceName, ++ iUpdateTimeline, ++ psTransferContext->psDeviceNode->hSyncCheckpointContext, ++ &iUpdateFence, ++ &uiUpdateFenceUID, ++ &pvUpdateFenceFinaliseData, ++ &psUpdateSyncCheckpoint, ++ (void*)&psFenceTimelineUpdateSync, ++ &ui32FenceTimelineUpdateValue, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_create_output_fence; ++ } ++ ++ /* Append the sync prim update for the timeline (if required) */ ++ if (psFenceTimelineUpdateSync) ++ { ++ IMG_UINT32 *pui32TimelineUpdateWp = NULL; ++ ++ /* Allocate memory to hold the list of update values (including our timeline update) */ ++ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); ++ if (!pui32IntAllocatedUpdateValues) ++ { ++ /* Failed to allocate memory */ ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc_update_values_mem; ++ } ++ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); ++ /* Copy the update values into the new memory, then append our timeline update value */ ++ if (paui32IntUpdateValue) ++ { ++ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); ++ } ++ /* Now set the additional update value */ ++ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; ++ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue; ++ ui32IntClientUpdateCount++; ++#if defined(TDM_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Now append the timeline sync prim addr to the transfer context update list */ ++ SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate, ++ psFenceTimelineUpdateSync); ++#if defined(TDM_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ ++ paui32IntUpdateValue = pui32IntAllocatedUpdateValues; ++ } ++ } ++ ++ if (ui32FenceSyncCheckpointCount) ++ { ++ /* Append the checks (from input fence) */ ++ if (ui32FenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence)); ++#if defined(TDM_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence, ++ ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ if (!pauiIntFenceUFOAddress) ++ { ++ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs; ++ } ++ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; ++ } ++#if defined(TDM_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ if (psUpdateSyncCheckpoint) ++ { ++ /* Append the update (from output fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); ++ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate, ++ 1, ++ &psUpdateSyncCheckpoint); ++ if (!pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs; ++ } ++ ui32IntClientUpdateCount++; ++#if defined(TDM_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ ++#if (ENABLE_TDM_UFO_DUMP == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__)); ++ { ++ IMG_UINT32 ii; ++ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; ++ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; ++ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; ++ ++ /* Dump Fence syncs and Update syncs */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); ++ for (ii=0; ii. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); ++ psTmpIntFenceUFOAddress++; ++ } ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); ++ for (ii=0; iiui32Addr & 0x1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); ++ pui32TmpIntUpdateValue++; ++ } ++ psTmpIntUpdateUFOAddress++; ++ } ++ } ++#endif ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ sWorkloadCharacteristics.sTransfer.ui32Characteristic1 = ui32TDMCharacteristic1; ++ sWorkloadCharacteristics.sTransfer.ui32Characteristic2 = ui32TDMCharacteristic2; ++ ++ /* Prepare workload estimation */ ++ WorkEstPrepare(psDeviceNode->pvDevice, ++ &psTransferContext->sWorkEstData, ++ &psTransferContext->sWorkEstData.uWorkloadMatchingData.sTransfer.sDataTDM, ++ eType, ++ &sWorkloadCharacteristics, ++ ui64DeadlineInus, ++ &sWorkloadKickDataTransfer); ++ } ++#endif ++ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice, ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr); ++ /* ++ Create the command helper data for this command ++ */ ++ RGXCmdHelperInitCmdCCB(psDevInfo, ++ psClientCCB, ++ 0, ++ ui32IntClientFenceCount, ++ pauiIntFenceUFOAddress, ++ NULL, ++ ui32IntClientUpdateCount, ++ pauiIntUpdateUFOAddress, ++ paui32IntUpdateValue, ++ ui32FWCommandSize, ++ pui8FWCommand, ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr, ++ eType, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ &sWorkloadKickDataTransfer, ++#else /* SUPPORT_WORKLOAD_ESTIMATION */ ++ NULL, ++#endif /* SUPPORT_WORKLOAD_ESTIMATION */ ++ pszCommandName, ++ bCCBStateOpen, ++ psCmdHelper); ++ } ++ ++ /* ++ Acquire space for all the commands in one go ++ */ ++ ++ eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_3dcmdacquire; ++ } ++ ++ ++ /* ++ We should acquire the kernel CCB(s) space here as the schedule could fail ++ and we would have to roll back all the syncs ++ */ ++ ++ /* ++ Only do the command helper release (which takes the server sync ++ operations if the acquire succeeded ++ */ ++ ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); ++ RGXCmdHelperReleaseCmdCCB(1, ++ psCmdHelper, ++ "TQ_TDM", ++ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr); ++ ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ /* The following is used to determine the offset of the command header containing ++ the workload estimation data so that can be accessed when the KCCB is read */ ++ ui32TDMCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(psCmdHelper); ++ ++ ui32TDMCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext)); ++ ++ /* This checks if the command would wrap around at the end of the CCB and ++ * therefore would start at an offset of 0 rather than the current command ++ * offset */ ++ if (ui32CmdOffset < ui32TDMCmdOffsetWrapCheck) ++ { ++ ui32TDMWorkloadDataRO = ui32CmdOffset; ++ } ++ else ++ { ++ ui32TDMWorkloadDataRO = 0; ++ } ++ } ++#endif ++ ++ /* ++ Even if we failed to acquire the client CCB space we might still need ++ to kick the HW to process a padding packet to release space for us next ++ time round ++ */ ++ { ++ RGXFWIF_KCCB_CMD sTDMKCCBCmd; ++ IMG_UINT32 ui32FWAddr = FWCommonContextGetFWAddress( ++ psTransferContext->sTDMData.psServerCommonContext).ui32Addr; ++ ++ /* Construct the kernel 3D CCB command. */ ++ sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); ++ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(psClientCCB); ++ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(psClientCCB); ++ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; ++ ++ /* Add the Workload data into the KCCB kick */ ++ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)) ++ { ++ /* Store the offset to the CCCB command header so that it can be referenced ++ * when the KCCB command reaches the FW */ ++ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TDMWorkloadDataRO + ui32TDMCmdHeaderOffset; ++ } ++#endif ++ ++ /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */ ++ /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */ ++ /* ui323DCmdOffset); */ ++ RGXSRV_HWPERF_ENQ(psTransferContext, ++ OSGetCurrentClientProcessIDKM(), ++ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_TQTDM, ++ iCheckFence, ++ iUpdateFence, ++ iUpdateTimeline, ++ uiCheckFenceUID, ++ uiUpdateFenceUID, ++ NO_DEADLINE, ++ NO_CYCEST); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice, ++ RGXFWIF_DM_TDM, ++ & sTDMKCCBCmd, ++ ui32PDumpFlags); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXTDMSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = eError2; ++ } ++ goto fail_2dcmdacquire; ++ } ++ ++ PVRGpuTraceEnqueueEvent(psDeviceNode->pvDevice, ui32FWAddr, ui32ExtJobRef, ++ ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM); ++ } ++ ++ /* ++ * Now check eError (which may have returned an error from our earlier calls ++ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first ++ * so we check it now... ++ */ ++ if (eError != PVRSRV_OK ) ++ { ++ goto fail_2dcmdacquire; ++ } ++ ++#if defined(NO_HARDWARE) ++ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ ++ if (psUpdateSyncCheckpoint) ++ { ++ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint); ++ } ++ if (psFenceTimelineUpdateSync) ++ { ++ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue); ++ } ++ SyncCheckpointNoHWUpdateTimelines(NULL); ++#endif /* defined(NO_HARDWARE) */ ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_succeeded(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++ * piUpdateFence = iUpdateFence; ++ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psDeviceNode, iUpdateFence, pvUpdateFenceFinaliseData, ++ psUpdateSyncCheckpoint, szUpdateFenceName); ++ } ++ ++ OSFreeMem(psCmdHelper); ++ ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++ /* Free memory allocated to hold the internal list of update values */ ++ if (pui32IntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui32IntAllocatedUpdateValues); ++ pui32IntAllocatedUpdateValues = NULL; ++ } ++ ++ OSLockRelease(psTransferContext->hLock); ++ return PVRSRV_OK; ++ ++/* ++ No resources are created in this function so there is nothing to free ++ unless we had to merge syncs. ++ If we fail after the client CCB acquire there is still nothing to do ++ as only the client CCB release will modify the client CCB ++*/ ++fail_2dcmdacquire: ++fail_3dcmdacquire: ++ ++ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence); ++ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate); ++fail_alloc_update_values_mem: ++ ++/* fail_pdumpcheck: */ ++/* fail_cmdtype: */ ++ ++ if (iUpdateFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData); ++ } ++fail_create_output_fence: ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ ++fail_resolve_input_fence: ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_failed(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++fail_populate_sync_addr_list: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ OSFreeMem(psCmdHelper); ++fail_allochelper: ++ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++ OSLockRelease(psTransferContext->hLock); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ RGXFWIF_KCCB_CMD sKCCBCmd; ++ PVRSRV_ERROR eError; ++ ++ OSLockAcquire(psTransferContext->hLock); ++ ++ /* Schedule the firmware command */ ++ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE; ++ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice, ++ RGXFWIF_DM_TDM, ++ &sKCCBCmd, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to schedule the FW command %d (%s)", ++ __func__, eError, PVRSRVGETERRORSTRING(eError))); ++ } ++ ++ OSLockRelease(psTransferContext->hLock); ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32Priority) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ ++ OSLockAcquire(psTransferContext->hLock); ++ ++ if (psTransferContext->sTDMData.ui32Priority != ui32Priority) ++ { ++ eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext, ++ psConnection, ++ psTransferContext->psDeviceNode->pvDevice, ++ ui32Priority, ++ RGXFWIF_DM_TDM); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorString(eError))); ++ ++ OSLockRelease(psTransferContext->hLock); ++ return eError; ++ } ++ } ++ ++ OSLockRelease(psTransferContext->hLock); ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ switch (eContextProperty) ++ { ++ case RGX_CONTEXT_PROPERTY_FLAGS: ++ { ++ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; ++ ++ OSLockAcquire(psTransferContext->hLock); ++ eError = FWCommonContextSetFlags(psTransferContext->sTDMData.psServerCommonContext, ++ ui32ContextFlags); ++ OSLockRelease(psTransferContext->hLock); ++ break; ++ } ++ ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ } ++ ++ return eError; ++} ++ ++void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ ++ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); ++ ++ DumpFWCommonContextInfo(psCurrentServerTransferCtx->sTDMData.psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); ++} ++ ++ ++IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ IMG_UINT32 ui32ContextBitMask = 0; ++ ++ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode); ++ ++ if (CheckStalledClientCommonContext( ++ psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D) ++ == PVRSRV_ERROR_CCCB_STALLED) { ++ ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D; ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock); ++ return ui32ContextBitMask; ++} ++ ++/**************************************************************************//** ++ End of file (rgxtdmtransfer.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxtdmtransfer.h b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.h +new file mode 100644 +index 000000000000..87ca2cf2c2f8 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtdmtransfer.h +@@ -0,0 +1,132 @@ ++/*************************************************************************/ /*! ++@File rgxtdmtransfer.h ++@Title RGX Transfer queue 2 Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX Transfer queue Functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXTDMTRANSFER_H) ++#define RGXTDMTRANSFER_H ++ ++#include "devicemem.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "rgxfwutils.h" ++#include "rgx_fwif_resetframework.h" ++#include "rgxdebug.h" ++#include "pvr_notifier.h" ++ ++#include "sync_server.h" ++#include "connection_server.h" ++ ++typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT; ++ ++ ++PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32FrameworkCommandSize, ++ IMG_PBYTE pabyFrameworkCommand, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32PackedCCBSizeU88, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext); ++ ++ ++PVRSRV_ERROR PVRSRVRGXTDMGetSharedMemoryKM( ++ CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ PMR ** ppsCLIPMRMem, ++ PMR ** ppsUSCPMRMem); ++ ++ ++PVRSRV_ERROR PVRSRVRGXTDMReleaseSharedMemoryKM(PMR * psUSCPMRMem); ++ ++ ++PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext); ++ ++ ++PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM( ++ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext, ++ IMG_UINT32 ui32PDumpFlags, ++ IMG_UINT32 ui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 * paui32ClientUpdateSyncOffset, ++ IMG_UINT32 * paui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE iUpdateTimeline, ++ PVRSRV_FENCE * piUpdateFence, ++ IMG_CHAR szUpdateFenceName[PVRSRV_SYNC_NAME_LENGTH], ++ IMG_UINT32 ui32FWCommandSize, ++ IMG_UINT8 * pui8FWCommand, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 * pui32SyncPMRFlags, ++ PMR ** ppsSyncPMRs, ++ IMG_UINT32 ui32TDMCharacteristic1, ++ IMG_UINT32 ui32TDMCharacteristic2, ++ IMG_UINT64 ui64DeadlineInus); ++ ++PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM( ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32PDumpFlags); ++ ++PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32Priority); ++ ++PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPropertyKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output); ++ ++/* Debug - Dump debug info of TDM transfer contexts on this device */ ++void DumpTDMTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel); ++ ++/* Debug/Watchdog - check if client transfer contexts are stalled */ ++IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++ ++#endif /* RGXTDMTRANSFER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxtimecorr.c b/drivers/gpu/drm/img-rogue/rgxtimecorr.c +new file mode 100644 +index 000000000000..584dbf1e3f64 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtimecorr.c +@@ -0,0 +1,648 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific time correlation and calibration routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific time correlation and calibration routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "rgxtimecorr.h" ++#include "rgxfwutils.h" ++#include "htbserver.h" ++#include "pvrsrv_apphint.h" ++ ++/****************************************************************************** ++ * ++ * - A calibration period is started on power-on and after a DVFS transition, ++ * and it's closed before a power-off and before a DVFS transition ++ * (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs..., ++ * where each arrow is a calibration period). ++ * ++ * - The timers on the Host and on the FW are correlated at the beginning of ++ * each period together with the current GPU frequency. ++ * ++ * - Correlation and calibration are also done at regular intervals using ++ * a best effort approach. ++ * ++ *****************************************************************************/ ++ ++/* ++ AppHint interfaces ++*/ ++ ++static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 ui32Value) ++{ ++ static __maybe_unused const char* const apszClocks[] = { ++ "mono", "mono_raw", "sched" ++ }; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ PVR_ASSERT(psDeviceNode->pvDevice != NULL); ++ ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ if (ui32Value >= RGXTIMECORR_CLOCK_LAST) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode, ++ RGXTIMECORR_EVENT_CLOCK_CHANGE); ++ ++ PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"", ++ apszClocks[psDevInfo->ui32ClockSource], ++ apszClocks[ui32Value])); ++ ++ psDevInfo->ui32ClockSource = ui32Value; ++ ++ RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode, ++ RGXTIMECORR_EVENT_CLOCK_CHANGE); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *psPrivate, ++ IMG_UINT32 *pui32Value) ++{ ++ PVR_ASSERT(psDeviceNode->pvDevice != NULL); ++ ++ *pui32Value = ++ ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource; ++ ++ PVR_UNREFERENCED_PARAMETER(psPrivate); ++ ++ return PVRSRV_OK; ++} ++ ++void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock, ++ _SetClock, psDeviceNode, NULL); ++} ++ ++/* ++ End of AppHint interface ++*/ ++ ++IMG_UINT64 RGXTimeCorrGetClockns64(const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ IMG_UINT64 ui64Clock; ++ ++ switch (((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource) { ++ case RGXTIMECORR_CLOCK_MONO: ++ return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock); ++ case RGXTIMECORR_CLOCK_MONO_RAW: ++ return OSClockMonotonicRawns64(); ++ case RGXTIMECORR_CLOCK_SCHED: ++ return OSClockns64(); ++ default: ++ PVR_ASSERT(IMG_FALSE); ++ return 0; ++ } ++} ++ ++IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ IMG_UINT32 rem; ++ return OSDivide64r64(RGXTimeCorrGetClockns64(psDeviceNode), 1000, &rem); ++} ++ ++void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXFWIF_TIME_CORR *psTimeCorrs, ++ IMG_UINT32 ui32NumOut) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount; ++ ++ while (ui32NumOut--) ++ { ++ *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)]; ++ ui32CurrentIndex--; ++ } ++} ++ ++static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent) ++{ ++ switch (eEvent) ++ { ++ case RGXTIMECORR_EVENT_POWER: ++ return "power"; ++ case RGXTIMECORR_EVENT_DVFS: ++ return "dvfs"; ++ case RGXTIMECORR_EVENT_PERIODIC: ++ return "periodic"; ++ case RGXTIMECORR_EVENT_CLOCK_CHANGE: ++ return "clock source"; ++ default: ++ return "n/a"; ++ } ++} ++ ++static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData; ++ ++ return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed; ++} ++ ++static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; ++ GPU_FREQ_TRACKING_DATA *psTrackingData; ++ ++ psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; ++ ++ return psTrackingData->ui32EstCoreClockSpeed; ++} ++ ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++static inline void _DumpTimerCorrelationHistory(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; ++ IMG_UINT32 i = psGpuDVFSTable->ui32HistoryIndex; ++ ++ PVR_DPF((PVR_DBG_ERROR, "Dumping history of timer correlation data (latest first):")); ++ ++ do ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ " Begin times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " ++ "End times: OS %" IMG_UINT64_FMTSPEC ", CR %" IMG_UINT64_FMTSPEC ", " ++ "Core clk %u, Estimated clk %u", ++ psGpuDVFSTable->asTrackingHistory[i].ui64BeginOSTimestamp, ++ psGpuDVFSTable->asTrackingHistory[i].ui64BeginCRTimestamp, ++ psGpuDVFSTable->asTrackingHistory[i].ui64EndOSTimestamp, ++ psGpuDVFSTable->asTrackingHistory[i].ui64EndCRTimestamp, ++ psGpuDVFSTable->asTrackingHistory[i].ui32CoreClockSpeed, ++ psGpuDVFSTable->asTrackingHistory[i].ui32EstCoreClockSpeed)); ++ ++ i = (i - 1) % RGX_GPU_FREQ_TRACKING_SIZE; ++ ++ } while (i != psGpuDVFSTable->ui32HistoryIndex); ++} ++#endif ++ ++static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1; ++ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)]; ++ ++ /* ++ * The following reads must be done as close together as possible, because ++ * they represent the same current time sampled from different clock sources. ++ */ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "_RGXMakeTimeCorrData: System Monotonic Clock not available.")); ++ PVR_ASSERT(0); ++ } ++#endif ++ psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); ++ psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode); ++ psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo); ++ psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); ++ ++ if (psTimeCorr->ui64CRDeltaToOSDeltaKNs == 0) ++ { ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++ _DumpTimerCorrelationHistory(psDevInfo); ++#endif ++ ++ /* Revert to original clock speed (error already printed) */ ++ psTimeCorr->ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); ++ psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXTimeCorrGetConversionFactor(psTimeCorr->ui32CoreClockSpeed); ++ } ++ ++ /* Make sure the values are written to memory before updating the index of the current entry */ ++ OSWriteMemoryBarrier(psTimeCorr); ++ ++ /* Update the index of the current entry in the timer correlation array */ ++ psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, " ++ "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)", ++ _EventToString(eEvent), ++ psTimeCorr->ui64OSTimeStamp, ++ psTimeCorr->ui64CRTimeStamp, ++ RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), ++ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode))); ++ ++ /* ++ * Don't log timing data to the HTB log after a power(-on) event. ++ * Otherwise this will be logged before the HTB partition marker, breaking ++ * the log sync grammar. This data will be automatically repeated when the ++ * partition marker is written. ++ */ ++ HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER, ++ psTimeCorr->ui64OSTimeStamp, ++ psTimeCorr->ui64CRTimeStamp, ++ psTimeCorr->ui32CoreClockSpeed); ++} ++ ++static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable) ++{ ++#if !defined(NO_HARDWARE) && !defined(VIRTUAL_PLATFORM) && defined(DEBUG) ++#define SCALING_FACTOR (10) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb; ++ IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount); ++ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index]; ++ IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp; ++ IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff; ++ IMG_INT64 i64Diff; ++ IMG_UINT32 ui32Ratio, ui32Remainder; ++ ++ /* ++ * The following reads must be done as close together as possible, because ++ * they represent the same current time sampled from different clock sources. ++ */ ++ ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo); ++ ui64OSTimeStamp = RGXTimeCorrGetClockns64(psDeviceNode); ++ ++ if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR)) ++ { ++ /* ++ * Less than ~1us has passed since the timer correlation data was generated. ++ * A time frame this short is probably not enough to get an estimate ++ * of how good the timer correlation data was. ++ * Skip calculations for the above reason and to avoid a division by 0 below. ++ */ ++ return; ++ } ++ ++ ++ /* Calculate an estimated timestamp based on the latest timer correlation data */ ++ ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp; ++ ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff, ++ psTimeCorr->ui64CRDeltaToOSDeltaKNs); ++ ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff; ++ ++ /* Get difference between estimated timestamp and current timestamp, in ns */ ++ i64Diff = ui64EstimatedTime - ui64OSTimeStamp; ++ ++ /* ++ * Calculate ratio between estimated time diff and real time diff: ++ * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr) ++ * ++ * The operands are scaled down (approximately from ns to us) so at least ++ * the divisor fits on 32 bit. ++ */ ++ ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR, ++ (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR, ++ &ui32Remainder); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over " ++ "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%", ++ i64Diff, ++ ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, ++ ui32Ratio)); ++ ++ /* Warn if the estimated timestamp is not within +/- 1% of the current time */ ++ if (ui32Ratio < 99 || ui32Ratio > 101) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns " ++ "were %s the real time (increasing at %u%% speed)", ++ ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp, ++ i64Diff > 0 ? "ahead of" : "behind", ++ ui32Ratio)); ++ ++ /* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected", ++ RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed), ++ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), ++ i64Diff > 0 ? "lower" : "higher")); ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); ++#endif ++} ++ ++static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed) ++{ ++ IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency; ++ IMG_UINT32 i; ++ ++ for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++) ++ { ++ if (paui32GPUFrequencies[i] == ui32CoreClockSpeed) ++ { ++ return i; ++ } ++ ++ if (paui32GPUFrequencies[i] == 0) ++ { ++ paui32GPUFrequencies[i] = ui32CoreClockSpeed; ++ return i; ++ } ++ } ++ ++ i--; ++ ++ PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! " ++ "Table size should be increased! Overriding last entry (%u) with %u", ++ paui32GPUFrequencies[i], ui32CoreClockSpeed)); ++ ++ paui32GPUFrequencies[i] = ui32CoreClockSpeed; ++ ++ return i; ++} ++ ++static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ GPU_FREQ_TRACKING_DATA *psTrackingData; ++ IMG_UINT32 ui32CoreClockSpeed, ui32Index; ++ ++ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); ++ IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(psDeviceNode); ++ ++ psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp; ++ psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp; ++ ++ ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode); ++ ui32Index = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed); ++ psTrackingData = &psGpuDVFSTable->asTrackingData[ui32Index]; ++ ++ /* Set the time needed to (re)calibrate the GPU frequency */ ++ if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */ ++ { ++ psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed; ++ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US; ++ } ++ else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */ ++ { ++ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US; ++ } ++ else ++ { ++ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US; ++ } ++ ++ /* Update the index to the DVFS table */ ++ psGpuDVFSTable->ui32FreqIndex = ui32Index; ++ ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++ /* Update tracking history */ ++ { ++ GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; ++ ++ psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; ++ psTrackingHistory->ui32CoreClockSpeed = ui32CoreClockSpeed; ++ psTrackingHistory->ui32EstCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; ++ psTrackingHistory->ui64BeginCRTimestamp = ui64CRTimestamp; ++ psTrackingHistory->ui64BeginOSTimestamp = ui64OSTimestamp; ++ psTrackingHistory->ui64EndCRTimestamp = 0ULL; ++ psTrackingHistory->ui64EndOSTimestamp = 0ULL; ++ } ++#endif ++} ++ ++static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo); ++ IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64(psDeviceNode); ++ ++ psGpuDVFSTable->ui64CalibrationCRTimediff = ++ ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp; ++ psGpuDVFSTable->ui64CalibrationOSTimediff = ++ ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp; ++ ++ /* Check if the current timer correlation data is good enough */ ++ _RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable); ++ ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++ /* Update tracking history */ ++ { ++ GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; ++ ++ psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; ++ psTrackingHistory->ui64EndCRTimestamp = ui64CRTimestamp; ++ psTrackingHistory->ui64EndOSTimestamp = ui64OSTimestamp; ++ } ++#endif ++} ++ ++static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable, ++ RGXTIMECORR_EVENT eEvent) ++{ ++#if !defined(DISABLE_GPU_FREQUENCY_CALIBRATION) ++ GPU_FREQ_TRACKING_DATA *psTrackingData; ++ IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed; ++ IMG_INT32 i32Diff; ++ IMG_UINT32 ui32Remainder; ++ ++ /* ++ * Find out what the GPU frequency was in the last period. ++ * This should return a value very close to the frequency passed by the system layer. ++ */ ++ ui32EstCoreClockSpeed = ++ RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff, ++ psGpuDVFSTable->ui64CalibrationOSTimediff, ++ ui32Remainder); ++ ++ /* Update GPU frequency used by the driver for a given system layer frequency */ ++ psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex]; ++ ++ ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed; ++ psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; ++ psTrackingData->ui32CalibrationCount++; ++ ++ i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed); ++ ++ if ((i32Diff < -1000000) || (i32Diff > 1000000)) ++ { ++ /* Warn if the frequency changed by more than 1 MHz between recalculations */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " ++ "more than 1 MHz difference between old and new value " ++ "(%u Hz -> %u Hz over %" IMG_UINT64_FMTSPEC " us)", ++ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), ++ _EventToString(eEvent), ++ RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), ++ RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), ++ psGpuDVFSTable->ui64CalibrationOSTimediff)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "GPU frequency calibration of system layer frequency %u Hz (pre %s event): " ++ "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us", ++ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode), ++ _EventToString(eEvent), ++ RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed), ++ RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed), ++ psGpuDVFSTable->ui64CalibrationOSTimediff)); ++ } ++ ++ /* Reset time deltas to avoid recalibrating the same frequency over and over again */ ++ psGpuDVFSTable->ui64CalibrationCRTimediff = 0; ++ psGpuDVFSTable->ui64CalibrationOSTimediff = 0; ++ ++#if defined(PVRSRV_TIMER_CORRELATION_HISTORY) ++ /* Update tracking history */ ++ { ++ GPU_FREQ_TRACKING_HISTORY *psTrackingHistory; ++ ++ psTrackingHistory = &psGpuDVFSTable->asTrackingHistory[psGpuDVFSTable->ui32HistoryIndex]; ++ psTrackingHistory->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed; ++ psGpuDVFSTable->ui32HistoryIndex = ++ (psGpuDVFSTable->ui32HistoryIndex + 1) % RGX_GPU_FREQ_TRACKING_SIZE; ++ } ++#endif ++ ++#else ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable); ++ PVR_UNREFERENCED_PARAMETER(eEvent); ++#endif ++} ++ ++void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ ++ _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable); ++ _RGXMakeTimeCorrData(psDeviceNode, eEvent); ++} ++ ++void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ ++ _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable); ++ ++ if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod) ++ { ++ _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent); ++ } ++} ++ ++void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable; ++ IMG_UINT64 ui64TimeNow = RGXTimeCorrGetClockus64(psDeviceNode); ++ PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_DEFAULT; ++ PVRSRV_VZ_RETN_IF_MODE(GUEST); ++ ++ if (psGpuDVFSTable == NULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__)); ++ return; ++ } ++ ++ /* Check if it's the right time to recalibrate the GPU clock frequency */ ++ if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return; ++ ++ /* Try to acquire the powerlock, if not possible then don't wait */ ++ if (PVRSRVPowerTryLock(psDeviceNode) != PVRSRV_OK) return; ++ ++ /* If the GPU is off then we can't do anything */ ++ PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState); ++ if (ePowerState != PVRSRV_DEV_POWER_STATE_ON) ++ { ++ PVRSRVPowerUnlock(psDeviceNode); ++ return; ++ } ++ ++ /* All checks passed, we can calibrate and correlate */ ++ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); ++ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC); ++ ++ PVRSRVPowerUnlock(psDeviceNode); ++} ++ ++/* ++ RGXTimeCorrGetClockSource ++*/ ++RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++ return ((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice)->ui32ClockSource; ++} ++ ++/* ++ RGXTimeCorrSetClockSource ++*/ ++PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXTIMECORR_CLOCK_TYPE eClockType) ++{ ++ return _SetClock(psDeviceNode, NULL, eClockType); ++} ++ ++PVRSRV_ERROR ++PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT64 * pui64Time) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ *pui64Time = RGXTimeCorrGetClockns64(psDeviceNode); ++ ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ End of file (rgxtimecorr.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxtimecorr.h b/drivers/gpu/drm/img-rogue/rgxtimecorr.h +new file mode 100644 +index 000000000000..e1cfff9b7abc +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtimecorr.h +@@ -0,0 +1,272 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX time correlation and calibration header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX time correlation and calibration routines ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXTIMECORR_H) ++#define RGXTIMECORR_H ++ ++#include "img_types.h" ++#include "device.h" ++#include "osfunc.h" ++#include "connection_server.h" ++ ++typedef enum ++{ ++ RGXTIMECORR_CLOCK_MONO, ++ RGXTIMECORR_CLOCK_MONO_RAW, ++ RGXTIMECORR_CLOCK_SCHED, ++ ++ RGXTIMECORR_CLOCK_LAST ++} RGXTIMECORR_CLOCK_TYPE; ++ ++typedef enum ++{ ++ RGXTIMECORR_EVENT_POWER, ++ RGXTIMECORR_EVENT_DVFS, ++ RGXTIMECORR_EVENT_PERIODIC, ++ RGXTIMECORR_EVENT_CLOCK_CHANGE ++} RGXTIMECORR_EVENT; ++ ++/* ++ * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz ++ * before use, to reduce the noise introduced by calculations done with ++ * imperfect operands (correlated timers not sampled at exactly the same ++ * time, GPU CR timer incrementing only once every 256 GPU cycles). ++ * This also helps reducing the variation between consecutive calculations. ++ */ ++#define RGXFWIF_CONVERT_TO_KHZ(freq) (((freq) + 500) / 1000) ++#define RGXFWIF_ROUND_TO_KHZ(freq) ((((freq) + 500) / 1000) * 1000) ++ ++/* Constants used in different calculations */ ++#define SECONDS_TO_MICROSECONDS (1000000ULL) ++#define CRTIME_TO_CYCLES_WITH_US_SCALE (RGX_CRTIME_TICK_IN_CYCLES * SECONDS_TO_MICROSECONDS) ++ ++/* ++ * Use this macro to get a more realistic GPU core clock speed than the one ++ * given by the upper layers (used when doing GPU frequency calibration) ++ */ ++#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \ ++ OSDivide64((deltacr_us) * CRTIME_TO_CYCLES_WITH_US_SCALE, (deltaos_us), &(remainder)) ++ ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrGetConversionFactor ++ ++ @Description Generate constant used to convert a GPU time difference into ++ an OS time difference (for more info see rgx_fwif_km.h). ++ ++ @Input ui32ClockSpeed : GPU clock speed ++ ++ @Return 0 on failure, conversion factor otherwise ++ ++******************************************************************************/ ++static inline IMG_UINT64 RGXTimeCorrGetConversionFactor(IMG_UINT32 ui32ClockSpeed) ++{ ++ IMG_UINT32 ui32Remainder; ++ ++ if (RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed) == 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: GPU clock frequency %u is too low", ++ __func__, ui32ClockSpeed)); ++ ++ return 0; ++ } ++ ++ return OSDivide64r64(CRTIME_TO_CYCLES_WITH_US_SCALE << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT, ++ RGXFWIF_CONVERT_TO_KHZ(ui32ClockSpeed), &ui32Remainder); ++} ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrBegin ++ ++ @Description Generate new timer correlation data, and start tracking ++ the current GPU frequency. ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eEvent : Event associated with the beginning of a timer ++ correlation period ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrEnd ++ ++ @Description Stop tracking the CPU and GPU timers, and if possible ++ recalculate the GPU frequency to a value which makes the timer ++ correlation data more accurate. ++ ++ @Input hDevHandle : RGX Device Node ++ @Input eEvent : Event associated with the end of a timer ++ correlation period ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrRestartPeriodic ++ ++ @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin, ++ but only if enough time has passed since the last timer ++ correlation data was generated. ++ ++ @Input hDevHandle : RGX Device Node ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrGetClockns64 ++ ++ @Description Returns value of currently selected clock (in ns). ++ ++ @Input psDeviceNode : RGX Device Node ++ @Return clock value from currently selected clock source ++ ++******************************************************************************/ ++IMG_UINT64 RGXTimeCorrGetClockns64(const PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrGetClockus64 ++ ++ @Description Returns value of currently selected clock (in us). ++ ++ @Input psDeviceNode : RGX Device Node ++ @Return clock value from currently selected clock source ++ ++******************************************************************************/ ++IMG_UINT64 RGXTimeCorrGetClockus64(const PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrGetClockSource ++ ++ @Description Returns currently selected clock source ++ ++ @Input psDeviceNode : RGX Device Node ++ @Return clock source type ++ ++******************************************************************************/ ++RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(const PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrSetClockSource ++ ++ @Description Sets clock source for correlation data. ++ ++ @Input psDeviceNode : RGX Device Node ++ @Input eClockType : clock source type ++ ++ @Return error code ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXTIMECORR_CLOCK_TYPE eClockType); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXTimeCorrInitAppHintCallbacks ++ ++ @Description Initialise apphint callbacks for timer correlation ++ related apphints. ++ ++ @Input psDeviceNode : RGX Device Node ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXGetTimeCorrData ++ ++ @Description Get a number of the most recent time correlation data points ++ ++ @Input psDeviceNode : RGX Device Node ++ @Output psTimeCorrs : Output array of RGXFWIF_TIME_CORR elements ++ for data to be written to ++ @Input ui32NumOut : Number of elements to be written out ++ ++ @Return void ++ ++******************************************************************************/ ++void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, ++ RGXFWIF_TIME_CORR *psTimeCorrs, ++ IMG_UINT32 ui32NumOut); ++ ++/**************************************************************************/ /*! ++@Function PVRSRVRGXCurrentTime ++@Description Returns the current state of the device timer ++@Input psDevData Device data. ++@Out pui64Time ++@Return PVRSRV_OK on success. ++*/ /***************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT64 * pui64Time); ++ ++#endif /* RGXTIMECORR_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxtimerquery.c b/drivers/gpu/drm/img-rogue/rgxtimerquery.c +new file mode 100644 +index 000000000000..d5d11bff9129 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtimerquery.c +@@ -0,0 +1,244 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Timer queries ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description RGX Timer queries ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgxtimerquery.h" ++#include "rgxdevice.h" ++#include "rgxtimecorr.h" ++ ++#include "rgxfwutils.h" ++#include "pdump_km.h" ++ ++PVRSRV_ERROR ++PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32QueryId) ++{ ++ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ OSLockAcquire(psDevInfo->hTimerQueryLock); ++#endif ++ ++ psDevInfo->bSaveStart = IMG_TRUE; ++ psDevInfo->bSaveEnd = IMG_TRUE; ++ ++ /* clear the stamps, in case there is no Kick */ ++ psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL; ++ psDevInfo->pui64EndTimeById[ui32QueryId] = 0UL; ++ OSWriteMemoryBarrier(&psDevInfo->pui64EndTimeById[ui32QueryId]); ++ ++ /* save of the active query index */ ++ psDevInfo->ui32ActiveQueryId = ui32QueryId; ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ OSLockRelease(psDevInfo->hTimerQueryLock); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode) ++{ ++ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ OSLockAcquire(psDevInfo->hTimerQueryLock); ++#endif ++ ++ /* clear off the flags set by Begin(). Note that _START_TIME is ++ * probably already cleared by Kick() ++ */ ++ psDevInfo->bSaveStart = IMG_FALSE; ++ psDevInfo->bSaveEnd = IMG_FALSE; ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ OSLockRelease(psDevInfo->hTimerQueryLock); ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++ ++PVRSRV_ERROR ++PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32QueryId, ++ IMG_UINT64 * pui64StartTime, ++ IMG_UINT64 * pui64EndTime) ++{ ++ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice; ++ IMG_UINT32 ui32Scheduled; ++ IMG_UINT32 ui32Completed; ++ PVRSRV_ERROR eError; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ OSLockAcquire(psDevInfo->hTimerQueryLock); ++#endif ++ ++ ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId]; ++ ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId]; ++ ++ /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared ++ * the stamps. If there was no begin the returned data is undefined - but still ++ * safe from services pov ++ */ ++ if (ui32Completed >= ui32Scheduled) ++ { ++ * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId]; ++ * pui64EndTime = psDevInfo->pui64EndTimeById[ui32QueryId]; ++ ++ eError = PVRSRV_OK; ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ } ++ ++#if !defined(PVRSRV_USE_BRIDGE_LOCK) ++ OSLockRelease(psDevInfo->hTimerQueryLock); ++#endif ++ return eError; ++} ++ ++ ++ ++/****************************************************************************** ++ NOT BRIDGED/EXPORTED FUNCS ++******************************************************************************/ ++/* writes a time stamp command in the client CCB */ ++void ++RGXWriteTimestampCommand(void ** ppvPtr, ++ RGXFWIF_CCB_CMD_TYPE eCmdType, ++ PRGXFWIF_TIMESTAMP_ADDR pAddr) ++{ ++ RGXFWIF_CCB_CMD_HEADER * psHeader; ++ PRGXFWIF_TIMESTAMP_ADDR * psTimestampAddr; ++ ++ psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppvPtr); ++ ++ PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP ++ || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP); ++ ++ psHeader->eCmdType = eCmdType; ++ psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1); ++ ++ (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, sizeof(RGXFWIF_CCB_CMD_HEADER)); ++ ++ psTimestampAddr = (PRGXFWIF_TIMESTAMP_ADDR *) *ppvPtr; ++ psTimestampAddr->ui32Addr = pAddr.ui32Addr; ++ ++ (*ppvPtr) = IMG_OFFSET_ADDR(*ppvPtr, psHeader->ui32CmdSize); ++} ++ ++ ++void ++RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, ++ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, ++ PRGXFWIF_UFO_ADDR * ppUpdate) ++{ ++ if (ppPreAddr != NULL) ++ { ++ if (psDevInfo->bSaveStart) ++ { ++ /* drop the SaveStart on the first Kick */ ++ psDevInfo->bSaveStart = IMG_FALSE; ++ ++ RGXSetFirmwareAddress(ppPreAddr, ++ psDevInfo->psStartTimeMemDesc, ++ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId, ++ RFW_FWADDR_NOREF_FLAG); ++ } ++ else ++ { ++ ppPreAddr->ui32Addr = 0; ++ } ++ } ++ ++ if (ppPostAddr != NULL && ppUpdate != NULL) ++ { ++ if (psDevInfo->bSaveEnd) ++ { ++ RGXSetFirmwareAddress(ppPostAddr, ++ psDevInfo->psEndTimeMemDesc, ++ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId, ++ RFW_FWADDR_NOREF_FLAG); ++ ++ psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++; ++ ++ RGXSetFirmwareAddress(ppUpdate, ++ psDevInfo->psCompletedMemDesc, ++ sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId, ++ RFW_FWADDR_NOREF_FLAG); ++ } ++ else ++ { ++ ppUpdate->ui32Addr = 0; ++ ppPostAddr->ui32Addr = 0; ++ } ++ } ++} ++ ++ ++/****************************************************************************** ++ End of file (rgxtimerquery.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxtimerquery.h b/drivers/gpu/drm/img-rogue/rgxtimerquery.h +new file mode 100644 +index 000000000000..81898860dc23 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtimerquery.h +@@ -0,0 +1,123 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Timer queries ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX Timer queries functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGX_TIMERQUERIES_H) ++#define RGX_TIMERQUERIES_H ++ ++#include "pvrsrv_error.h" ++#include "img_types.h" ++#include "device.h" ++#include "rgxdevice.h" ++ ++#include "connection_server.h" ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRGXBeginTimerQueryKM ++@Description Opens a new timer query. ++ ++@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ] ++@Return PVRSRV_OK on success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32QueryId); ++ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRGXEndTimerQueryKM ++@Description Closes a timer query ++ ++ The lack of ui32QueryId argument expresses the fact that there ++ can't be overlapping queries open. ++@Return PVRSRV_OK on success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode); ++ ++ ++ ++/*************************************************************************/ /*! ++@Function PVRSRVRGXQueryTimerKM ++@Description Queries the state of the specified timer ++ ++@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ] ++@Out pui64StartTime ++@Out pui64EndTime ++@Return PVRSRV_OK on success. ++ PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with ++ operations from the queried period ++ other error code otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32QueryId, ++ IMG_UINT64 * pui64StartTime, ++ IMG_UINT64 * pui64EndTime); ++ ++ ++ ++/****************************************************************************** ++ NON BRIDGED/EXPORTED interface ++******************************************************************************/ ++ ++/* write the timestamp cmd from the helper*/ ++void ++RGXWriteTimestampCommand(void ** ppvCmd, ++ RGXFWIF_CCB_CMD_TYPE eCmdType, ++ PRGXFWIF_TIMESTAMP_ADDR pAddr); ++ ++/* get the relevant data from the Kick to the helper*/ ++void ++RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo, ++ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr, ++ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr, ++ PRGXFWIF_UFO_ADDR * ppUpdate); ++ ++#endif /* RGX_TIMERQUERIES_H */ ++ ++/****************************************************************************** ++ End of file (rgxtimerquery.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxtransfer.c b/drivers/gpu/drm/img-rogue/rgxtransfer.c +new file mode 100644 +index 000000000000..91b3b8d2830e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtransfer.c +@@ -0,0 +1,1805 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific transfer queue routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pdump_km.h" ++#include "rgxdevice.h" ++#include "rgxccb.h" ++#include "rgxutils.h" ++#include "rgxfwutils.h" ++#include "rgxtransfer.h" ++#include "rgx_tq_shared.h" ++#include "rgxmem.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "osfunc.h" ++#include "pvr_debug.h" ++#include "pvrsrv.h" ++#include "rgx_fwif_resetframework.h" ++#include "rgx_memallocflags.h" ++#include "rgxhwperf.h" ++#include "ospvr_gputrace.h" ++#include "htbuffer.h" ++#include "rgxshader.h" ++ ++#include "pdump_km.h" ++ ++#include "sync_server.h" ++#include "sync_internal.h" ++#include "sync.h" ++#include "rgx_bvnc_defs_km.h" ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++#include "pvr_buffer_sync.h" ++#endif ++ ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_internal.h" ++ ++#include "rgxtimerquery.h" ++ ++/* Enable this to dump the compiled list of UFOs prior to kick call */ ++#define ENABLE_TQ_UFO_DUMP 0 ++ ++//#define TRANSFER_CHECKPOINT_DEBUG 1 ++ ++#if defined(TRANSFER_CHECKPOINT_DEBUG) ++#define CHKPT_DBG(X) PVR_DPF(X) ++#else ++#define CHKPT_DBG(X) ++#endif ++ ++typedef struct { ++ DEVMEM_MEMDESC *psFWContextStateMemDesc; ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; ++ IMG_UINT32 ui32Priority; ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++} RGX_SERVER_TQ_3D_DATA; ++ ++typedef struct { ++ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; ++ IMG_UINT32 ui32Priority; ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++} RGX_SERVER_TQ_2D_DATA; ++ ++struct _RGX_SERVER_TQ_CONTEXT_ { ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ DEVMEM_MEMDESC *psFWFrameworkMemDesc; ++ DEVMEM_MEMDESC *psFWTransferContextMemDesc; ++ IMG_UINT32 ui32Flags; ++#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D (1<<0) ++#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D (1<<1) ++ RGX_SERVER_TQ_3D_DATA s3DData; ++ RGX_SERVER_TQ_2D_DATA s2DData; ++ DLLIST_NODE sListNode; ++ ATOMIC_T hIntJobRef; ++ IMG_UINT32 ui32PDumpFlags; ++ /* per-prepare sync address lists */ ++ SYNC_ADDR_LIST asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT]; ++ SYNC_ADDR_LIST asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT]; ++ POS_LOCK hLock; ++}; ++ ++/* ++ Static functions used by transfer context code ++*/ ++static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psAllocatedMemDesc, ++ IMG_UINT32 ui32AllocatedOffset, ++ DEVMEM_MEMDESC *psFWMemContextMemDesc, ++ IMG_UINT32 ui32Priority, ++ RGX_COMMON_CONTEXT_INFO *psInfo, ++ RGX_SERVER_TQ_3D_DATA *ps3DData, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ PVRSRV_ERROR eError; ++ IMG_UINT ui3DRegISPStateStoreSize = 0; ++ IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */ ++ /* ++ Allocate device memory for the firmware GPU context suspend state. ++ Note: the FW reads/writes the state to memory by accessing the GPU register interface. ++ */ ++ PDUMPCOMMENT(psDeviceNode, "Allocate RGX firmware TQ/3D context suspend state"); ++ ++ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY)) ++ { ++ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode, ++ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX); ++ } ++ ++ /* Calculate the size of the 3DCTX ISP state */ ++ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) + ++ uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]); ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ ps3DData->psBufferSyncContext = ++ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, ++ "rogue-tq3d"); ++ if (IS_ERR(ps3DData->psBufferSyncContext)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to create buffer_sync context (err=%ld)", ++ __func__, PTR_ERR(ps3DData->psBufferSyncContext))); ++ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_buffer_sync_context_create; ++ } ++#endif ++ ++ eError = DevmemFwAllocate(psDevInfo, ++ ui3DRegISPStateStoreSize, ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwTQ3DContext", ++ &ps3DData->psFWContextStateMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_contextswitchstate; ++ } ++ ++ eError = FWCommonContextAllocate(psConnection, ++ psDeviceNode, ++ REQ_TYPE_TQ_3D, ++ RGXFWIF_DM_3D, ++ NULL, ++ psAllocatedMemDesc, ++ ui32AllocatedOffset, ++ psFWMemContextMemDesc, ++ ps3DData->psFWContextStateMemDesc, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ3D_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ3D_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ ui32Priority, ++ UINT_MAX, /* max deadline MS */ ++ ui64RobustnessAddress, ++ psInfo, ++ &ps3DData->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_contextalloc; ++ } ++ ++ ++ PDUMPCOMMENT(psDeviceNode, "Dump 3D context suspend state buffer"); ++ DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS); ++ ++ ps3DData->ui32Priority = ui32Priority; ++ return PVRSRV_OK; ++ ++fail_contextalloc: ++ DevmemFwUnmapAndFree(psDevInfo, ps3DData->psFWContextStateMemDesc); ++fail_contextswitchstate: ++#if defined(SUPPORT_BUFFER_SYNC) ++ pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); ++ ps3DData->psBufferSyncContext = NULL; ++fail_buffer_sync_context_create: ++#endif ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ DEVMEM_MEMDESC *psFWMemContextMemDesc, ++ IMG_UINT32 ui32Priority, ++ RGX_COMMON_CONTEXT_INFO *psInfo, ++ RGX_SERVER_TQ_2D_DATA *ps2DData, ++ IMG_UINT32 ui32CCBAllocSizeLog2, ++ IMG_UINT32 ui32CCBMaxAllocSizeLog2, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress) ++{ ++ PVRSRV_ERROR eError; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ ps2DData->psBufferSyncContext = ++ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice, ++ "rogue-tqtla"); ++ if (IS_ERR(ps2DData->psBufferSyncContext)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to create buffer_sync context (err=%ld)", ++ __func__, PTR_ERR(ps2DData->psBufferSyncContext))); ++ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_buffer_sync_context_create; ++ } ++#endif ++ ++ eError = FWCommonContextAllocate(psConnection, ++ psDeviceNode, ++ REQ_TYPE_TQ_2D, ++ RGXFWIF_DM_2D, ++ NULL, ++ NULL, ++ 0, ++ psFWMemContextMemDesc, ++ NULL, ++ ui32CCBAllocSizeLog2 ? ui32CCBAllocSizeLog2 : RGX_TQ2D_CCB_SIZE_LOG2, ++ ui32CCBMaxAllocSizeLog2 ? ui32CCBMaxAllocSizeLog2 : RGX_TQ2D_CCB_MAX_SIZE_LOG2, ++ ui32ContextFlags, ++ ui32Priority, ++ UINT_MAX, /* max deadline MS */ ++ ui64RobustnessAddress, ++ psInfo, ++ &ps2DData->psServerCommonContext); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_contextalloc; ++ } ++ ++ ps2DData->ui32Priority = ui32Priority; ++ return PVRSRV_OK; ++ ++fail_contextalloc: ++#if defined(SUPPORT_BUFFER_SYNC) ++ pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); ++ ps2DData->psBufferSyncContext = NULL; ++fail_buffer_sync_context_create: ++#endif ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++ ++static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, ++ ps2DData->psServerCommonContext, ++ RGXFWIF_DM_2D, ++ ui32PDumpFlags); ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ /* ... it has so we can free its resources */ ++ FWCommonContextFree(ps2DData->psServerCommonContext); ++ ps2DData->psServerCommonContext = NULL; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext); ++ ps2DData->psBufferSyncContext = NULL; ++#endif ++ ++ return PVRSRV_OK; ++} ++#endif /* #if defined(RGX_FEATURE_TLA_BIT_MASK) */ ++ ++static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Check if the FW has finished with this resource ... */ ++ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode, ++ ps3DData->psServerCommonContext, ++ RGXFWIF_DM_3D, ++ ui32PDumpFlags); ++ if (eError == PVRSRV_ERROR_RETRY) ++ { ++ return eError; ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ return eError; ++ } ++ ++ /* ... it has so we can free its resources */ ++ DevmemFwUnmapAndFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc); ++ FWCommonContextFree(ps3DData->psServerCommonContext); ++ ps3DData->psServerCommonContext = NULL; ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext); ++ ps3DData->psBufferSyncContext = NULL; ++#endif ++ ++ return PVRSRV_OK; ++} ++ ++ ++/* ++ * PVRSRVCreateTransferContextKM ++ */ ++PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32FrameworkCommandSize, ++ IMG_PBYTE pabyFrameworkCommand, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32PackedCCBSizeU8888, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_SERVER_TQ_CONTEXT **ppsTransferContext, ++ PMR **ppsCLIPMRMem, ++ PMR **ppsUSCPMRMem) ++{ ++ RGX_SERVER_TQ_CONTEXT *psTransferContext; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData); ++ RGX_COMMON_CONTEXT_INFO sInfo = {NULL}; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* Allocate the server side structure */ ++ *ppsTransferContext = NULL; ++ psTransferContext = OSAllocZMem(sizeof(*psTransferContext)); ++ if (psTransferContext == NULL) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ /* ++ Create the FW transfer context, this has the TQ common ++ context embedded within it ++ */ ++ eError = DevmemFwAllocate(psDevInfo, ++ sizeof(RGXFWIF_FWTRANSFERCONTEXT), ++ RGX_FWCOMCTX_ALLOCFLAGS, ++ "FwTransferContext", ++ &psTransferContext->psFWTransferContextMemDesc); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_fwtransfercontext; ++ } ++ ++ eError = OSLockCreate(&psTransferContext->hLock); ++ ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_createlock; ++ } ++ ++ psTransferContext->psDeviceNode = psDeviceNode; ++ ++ if (ui32FrameworkCommandSize) ++ { ++ /* ++ * Create the FW framework buffer ++ */ ++ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, ++ &psTransferContext->psFWFrameworkMemDesc, ++ ui32FrameworkCommandSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to allocate firmware GPU framework state (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcreate; ++ } ++ ++ /* Copy the Framework client data into the framework buffer */ ++ eError = PVRSRVRGXFrameworkCopyCommand(psDeviceNode, ++ psTransferContext->psFWFrameworkMemDesc, ++ pabyFrameworkCommand, ++ ui32FrameworkCommandSize); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Failed to populate the framework buffer (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_frameworkcopy; ++ } ++ ++ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc; ++ } ++ ++ eError = _Create3DTransferContext(psConnection, ++ psDeviceNode, ++ psTransferContext->psFWTransferContextMemDesc, ++ offsetof(RGXFWIF_FWTRANSFERCONTEXT, sTQContext), ++ psFWMemContextMemDesc, ++ ui32Priority, ++ &sInfo, ++ &psTransferContext->s3DData, ++ U32toU8_Unpack3(ui32PackedCCBSizeU8888), ++ U32toU8_Unpack4(ui32PackedCCBSizeU8888), ++ ui32ContextFlags, ++ ui64RobustnessAddress); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_3dtransfercontext; ++ } ++ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D; ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)) ++ { ++ eError = _Create2DTransferContext(psConnection, ++ psDeviceNode, ++ psFWMemContextMemDesc, ++ ui32Priority, ++ &sInfo, ++ &psTransferContext->s2DData, ++ U32toU8_Unpack1(ui32PackedCCBSizeU8888), ++ U32toU8_Unpack2(ui32PackedCCBSizeU8888), ++ ui32ContextFlags, ++ ui64RobustnessAddress); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_2dtransfercontext; ++ } ++ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D; ++ } ++#endif ++ ++ PVRSRVTQAcquireShaders(psDeviceNode, ppsCLIPMRMem, ppsUSCPMRMem); ++ ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); ++ *ppsTransferContext = psTransferContext; ++ } ++ ++ return PVRSRV_OK; ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++fail_2dtransfercontext: ++ _Destroy3DTransferContext(&psTransferContext->s3DData, ++ psTransferContext->psDeviceNode, ++ psTransferContext->ui32PDumpFlags); ++#endif ++fail_3dtransfercontext: ++fail_frameworkcopy: ++ if (psTransferContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); ++ } ++fail_frameworkcreate: ++ OSLockDestroy(psTransferContext->hLock); ++fail_createlock: ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); ++fail_fwtransfercontext: ++ OSFreeMem(psTransferContext); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ *ppsTransferContext = NULL; ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice; ++ IMG_UINT32 i; ++ ++ /* remove node from list before calling destroy - as destroy, if successful ++ * will invalidate the node ++ * must be re-added if destroy fails ++ */ ++ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); ++ dllist_remove_node(&(psTransferContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ eError = _Destroy2DTransferContext(&psTransferContext->s2DData, ++ psTransferContext->psDeviceNode, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_destroy2d; ++ } ++ /* We've freed the 2D context, don't try to free it again */ ++ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D; ++ } ++#endif ++ ++ if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) ++ { ++ eError = _Destroy3DTransferContext(&psTransferContext->s3DData, ++ psTransferContext->psDeviceNode, ++ PDUMP_FLAGS_CONTINUOUS); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_destroy3d; ++ } ++ /* We've freed the 3D context, don't try to free it again */ ++ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D; ++ } ++ ++ /* free any resources within the per-prepare UFO address stores */ ++ for (i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++) ++ { ++ SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]); ++ SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]); ++ } ++ ++ if (psTransferContext->psFWFrameworkMemDesc) ++ { ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc); ++ } ++ ++ DevmemFwUnmapAndFree(psDevInfo, psTransferContext->psFWTransferContextMemDesc); ++ ++ OSLockDestroy(psTransferContext->hLock); ++ ++ OSFreeMem(psTransferContext); ++ ++ return PVRSRV_OK; ++ ++fail_destroy3d: ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ ++fail_destroy2d: ++#endif ++ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock); ++ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode)); ++ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/* ++ * PVRSRVSubmitTQ3DKickKM ++ */ ++PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32PrepareCount, ++ IMG_UINT32 *paui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 **papaui32ClientUpdateSyncOffset, ++ IMG_UINT32 **papaui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE i2DUpdateTimeline, ++ PVRSRV_FENCE *pi2DUpdateFence, ++ PVRSRV_TIMELINE i3DUpdateTimeline, ++ PVRSRV_FENCE *pi3DUpdateFence, ++ IMG_CHAR szFenceName[32], ++ IMG_UINT32 *paui32FWCommandSize, ++ IMG_UINT8 **papaui8FWCommand, ++ IMG_UINT32 *pui32TQPrepareFlags, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 *paui32SyncPMRFlags, ++ PMR **ppsSyncPMRs) ++{ ++ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode; ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper; ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper; ++#endif ++ IMG_UINT32 ui323DCmdCount = 0; ++ IMG_UINT32 ui323DCmdLast = 0; ++ IMG_UINT32 ui323DCmdOffset = 0; ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ IMG_UINT32 ui322DCmdCount = 0; ++ IMG_UINT32 ui322DCmdLast = 0; ++ IMG_UINT32 ui322DCmdOffset = 0; ++#endif ++ IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE; ++ IMG_UINT32 i; ++ IMG_UINT64 uiCheckFenceUID = 0; ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ IMG_UINT64 ui2DUpdateFenceUID = 0; ++#endif ++ IMG_UINT64 ui3DUpdateFenceUID = 0; ++ ++ PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL; ++ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32FenceSyncCheckpointCount = 0; ++ IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL; ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL; ++ IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL; ++ PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL; ++ IMG_UINT32 ui322DFenceTimelineUpdateValue = 0; ++ void *pv2DUpdateFenceFinaliseData = NULL; ++#endif ++ PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL; ++ IMG_UINT32 ui323DFenceTimelineUpdateValue = 0; ++ void *pv3DUpdateFenceFinaliseData = NULL; ++#if defined(SUPPORT_BUFFER_SYNC) ++ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL; ++ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL; ++ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL; ++ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_ERROR eError2; ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE; ++#endif ++ PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE; ++ IMG_UINT32 ui32IntJobRef = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal); ++ IMG_UINT32 ui32PreparesDone = 0; ++ ++ ++ PRGXFWIF_TIMESTAMP_ADDR pPreAddr; ++ PRGXFWIF_TIMESTAMP_ADDR pPostAddr; ++ PRGXFWIF_UFO_ADDR pRMWUFOAddr; ++ ++#if !defined(RGX_FEATURE_TLA_BIT_MASK) ++ PVR_UNREFERENCED_PARAMETER(i2DUpdateTimeline); ++ PVR_UNREFERENCED_PARAMETER(pi2DUpdateFence); ++#endif ++ ++ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice, ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr); ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++#endif ++ if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Validate sync prim fence/update value ptrs ++ * for each prepare. ++ */ ++ { ++ IMG_UINT32 ui32Prepare; ++ IMG_UINT32 *pui32UpdateCount = paui32ClientUpdateCount; ++ IMG_UINT32 **papui32UpdateValue = papaui32ClientUpdateValue; ++ ++ /* Check that we have not been given a null ptr for ++ * update count parameters. ++ */ ++ PVR_LOG_RETURN_IF_FALSE((paui32ClientUpdateCount != NULL), ++ "paui32ClientUpdateCount NULL", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ for (ui32Prepare=0; ui32Prepare 0) ++ { ++ PVR_LOG_RETURN_IF_FALSE(*papui32UpdateValue != NULL, ++ "paui32ClientUpdateValue NULL but " ++ "ui32ClientUpdateCount > 0", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ /* Advance local ptr to update values ptr for next prepare. */ ++ papui32UpdateValue++; ++ /* Advance local ptr to update count for next prepare. */ ++ pui32UpdateCount++; ++ } ++ } ++ ++ /* Ensure the string is null-terminated (Required for safety) */ ++ szFenceName[31] = '\0'; ++ ++ if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT)) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (ui32SyncPMRCount != 0) ++ { ++ if (!ppsSyncPMRs) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ /* PMR sync is valid only when there is no batching */ ++ if ((ui32PrepareCount != 1)) ++#endif ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ ++ OSLockAcquire(psTransferContext->hLock); ++ ++ /* We can't allocate the required amount of stack space on all consumer architectures */ ++ pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount); ++ if (pas3DCmdHelper == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc3dhelper; ++ } ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount); ++ if (pas2DCmdHelper == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc2dhelper; ++ } ++#endif ++ ++ if (iCheckFence != PVRSRV_NO_FENCE) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psDeviceNode->hSyncCheckpointContext)); ++ /* Resolve the sync checkpoints that make up the input fence */ ++ eError = SyncCheckpointResolveFence(psDeviceNode->hSyncCheckpointContext, ++ iCheckFence, ++ &ui32FenceSyncCheckpointCount, ++ &apsFenceSyncCheckpoints, ++ &uiCheckFenceUID, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError)); ++ goto fail_resolve_fencesync_input_fence; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints)); ++#if defined(TRANSFER_CHECKPOINT_DEBUG) ++ if (ui32FenceSyncCheckpointCount > 0) ++ { ++ IMG_UINT32 ii; ++ for (ii=0; ii", __func__, ii, (void*)psNextCheckpoint)); ++ } ++ } ++#endif ++ } ++ /* ++ Ensure we do the right thing for server syncs which cross call boundaries ++ */ ++ for (i=0;iasSyncAddrListFence[i]; ++ SYNC_ADDR_LIST *psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i]; ++ IMG_UINT32 ui32IntClientFenceCount = 0U; ++ IMG_UINT32 ui32IntClientUpdateCount = paui32ClientUpdateCount[i]; ++ IMG_UINT32 *paui32IntUpdateValue = papaui32ClientUpdateValue[i]; ++#if defined(SUPPORT_BUFFER_SYNC) ++ struct pvr_buffer_sync_context *psBufferSyncContext; ++#endif ++ ++ PVRSRV_FENCE *piUpdateFence = NULL; ++ PVRSRV_TIMELINE iUpdateTimeline = PVRSRV_NO_TIMELINE; ++ void **ppvUpdateFenceFinaliseData = NULL; ++ PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL; ++ PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL; ++ IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL; ++ IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL; ++ IMG_BOOL bCheckFence = IMG_FALSE; ++ IMG_BOOL bUpdateFence = IMG_FALSE; ++ IMG_UINT64 *puiUpdateFenceUID = NULL; ++ ++ IMG_BOOL bCCBStateOpen = IMG_FALSE; ++ ++ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D)) ++ { ++ psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext; ++ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); ++ pszCommandName = "TQ-3D"; ++ psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++]; ++ eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D; ++#if defined(SUPPORT_BUFFER_SYNC) ++ psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext; ++#endif ++ bCheckFence = ui323DCmdCount == 1; ++ bUpdateFence = ui323DCmdCount == ui323DCmdLast ++ && i3DUpdateTimeline != PVRSRV_NO_TIMELINE; ++ ++ if (bUpdateFence) ++ { ++ piUpdateFence = &i3DUpdateFence; ++ iUpdateTimeline = i3DUpdateTimeline; ++ ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData; ++ ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint; ++ ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync; ++ pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue; ++ ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues; ++ puiUpdateFenceUID = &ui3DUpdateFenceUID; ++ } ++ } ++ else ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext; ++ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx); ++ pszCommandName = "TQ-2D"; ++ psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++]; ++ eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D; ++#if defined(SUPPORT_BUFFER_SYNC) ++ psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext; ++#endif ++ bCheckFence = ui322DCmdCount == 1; ++ bUpdateFence = ui322DCmdCount == ui322DCmdLast ++ && i2DUpdateTimeline != PVRSRV_NO_TIMELINE; ++ ++ if (bUpdateFence) ++ { ++ piUpdateFence = &i2DUpdateFence; ++ iUpdateTimeline = i2DUpdateTimeline; ++ ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData; ++ ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint; ++ ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync; ++ pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue; ++ ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues; ++ puiUpdateFenceUID = &ui2DUpdateFenceUID; ++ } ++ } ++ else ++#endif ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto fail_prepare_loop; ++ } ++ ++ if (i == 0) ++ { ++ ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; ++ PDUMPCOMMENTWITHFLAGS(psDeviceNode, ui32PDumpFlags, ++ "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr); ++ psTransferContext->ui32PDumpFlags |= ui32PDumpFlags; ++ } ++ else ++ { ++ IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE; ++ if (ui32NewPDumpFlags != ui32PDumpFlags) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__)); ++ goto fail_prepare_loop; ++ } ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount)); ++ eError = SyncAddrListPopulate(psSyncAddrListFence, ++ 0, ++ NULL, ++ NULL); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_prepare_loop; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount)); ++ eError = SyncAddrListPopulate(psSyncAddrListUpdate, ++ ui32IntClientUpdateCount, ++ papauiClientUpdateUFODevVarBlock[i], ++ papaui32ClientUpdateSyncOffset[i]); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_prepare_loop; ++ } ++ if (!pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after sync prims) ui32IntClientUpdateCount=%d", __func__, ui32IntClientUpdateCount)); ++ if (ui32SyncPMRCount) ++ { ++#if defined(SUPPORT_BUFFER_SYNC) ++ int err; ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__)); ++ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext, ++ psTransferContext->psDeviceNode->hSyncCheckpointContext, ++ ui32SyncPMRCount, ++ ppsSyncPMRs, ++ paui32SyncPMRFlags, ++ &ui32BufferFenceSyncCheckpointCount, ++ &apsBufferFenceSyncCheckpoints, ++ &psBufferUpdateSyncCheckpoint, ++ &psBufferSyncData); ++ if (err) ++ { ++ switch (err) ++ { ++ case -EINTR: ++ eError = PVRSRV_ERROR_RETRY; ++ break; ++ case -ENOMEM: ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ break; ++ default: ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ break; ++ } ++ ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorString(eError))); ++ } ++ goto fail_resolve_buffersync_input_fence; ++ } ++ ++ /* Append buffer sync fences */ ++ if (ui32BufferFenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress)); ++ SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence, ++ ui32BufferFenceSyncCheckpointCount, ++ apsBufferFenceSyncCheckpoints); ++ if (!pauiIntFenceUFOAddress) ++ { ++ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; ++ } ++ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount; ++ } ++ ++ if (psBufferUpdateSyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->asSyncAddrListUpdate[i]=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)psSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); ++ /* Append the update (from output fence) */ ++ SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, ++ 1, ++ &psBufferUpdateSyncCheckpoint); ++ if (!pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; ++ } ++ ui32IntClientUpdateCount++; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); ++#else /* defined(SUPPORT_BUFFER_SYNC) */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS)); ++ OSLockRelease(psTransferContext->hLock); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ } ++ ++ /* Create the output fence (if required) */ ++ if (bUpdateFence) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d, psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psDeviceNode->hSyncCheckpointContext)); ++ eError = SyncCheckpointCreateFence(psDeviceNode, ++ szFenceName, ++ iUpdateTimeline, ++ psDeviceNode->hSyncCheckpointContext, ++ piUpdateFence, ++ puiUpdateFenceUID, ++ ppvUpdateFenceFinaliseData, ++ ppsUpdateSyncCheckpoint, ++ (void*)ppsFenceTimelineUpdateSync, ++ pui32FenceTimelineUpdateValue, ++ ui32PDumpFlags); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: SyncCheckpointCreateFence failed (%s)", ++ __func__, ++ PVRSRVGetErrorString(eError))); ++ goto fail_prepare_loop; ++ } ++ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence)); ++ ++ /* Append the sync prim update for the timeline (if required) */ ++ if (*ppsFenceTimelineUpdateSync) ++ { ++ IMG_UINT32 *pui32TimelineUpdateWp = NULL; ++ ++ /* Allocate memory to hold the list of update values (including our timeline update) */ ++ *ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); ++ if (!*ppui32IntAllocatedUpdateValues) ++ { ++ /* Failed to allocate memory */ ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_prepare_loop; ++ } ++ OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1)); ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferUpdateSyncCheckpoint) ++ { ++ /* Copy the update values into the new memory, then append our timeline update value */ ++ OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1)); ++ pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1); ++ } ++ else ++#endif ++ { ++ /* Copy the update values into the new memory, then append our timeline update value */ ++ OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount); ++ pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount; ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue)); ++ /* Now set the additional update value */ ++ *pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue; ++#if defined(TRANSFER_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Now append the timeline sync prim addr to the transfer context update list */ ++ SyncAddrListAppendSyncPrim(psSyncAddrListUpdate, ++ *ppsFenceTimelineUpdateSync); ++ ui32IntClientUpdateCount++; ++#if defined(TRANSFER_CHECKPOINT_DEBUG) ++ if (ui32IntClientUpdateCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues)); ++ paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues; ++ } ++ } ++ ++ if (bCheckFence && ui32FenceSyncCheckpointCount) ++ { ++ /* Append the checks (from input fence) */ ++ if (ui32FenceSyncCheckpointCount > 0) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence)); ++ SyncAddrListAppendCheckpoints(psSyncAddrListFence, ++ ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ if (!pauiIntFenceUFOAddress) ++ { ++ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs; ++ } ++ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount; ++ } ++#if defined(TRANSFER_CHECKPOINT_DEBUG) ++ if (ui32IntClientFenceCount > 0) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress; ++ ++ for (iii=0; iiipasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ if (bUpdateFence && *ppsUpdateSyncCheckpoint) ++ { ++ /* Append the update (from output fence) */ ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress)); ++ SyncAddrListAppendCheckpoints(psSyncAddrListUpdate, ++ 1, ++ ppsUpdateSyncCheckpoint); ++ if (!pauiIntUpdateUFOAddress) ++ { ++ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs; ++ } ++ ui32IntClientUpdateCount++; ++#if defined(TRANSFER_CHECKPOINT_DEBUG) ++ { ++ IMG_UINT32 iii; ++ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress; ++ ++ for (iii=0; iii) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp)); ++ pui32Tmp++; ++ } ++ } ++#endif ++ } ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount)); ++ ++#if (ENABLE_TQ_UFO_DUMP == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__)); ++ { ++ IMG_UINT32 ii; ++ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress; ++ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress; ++ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue; ++ ++ /* Dump Fence syncs and Update syncs */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress)); ++ for (ii=0; iiui32Addr & 0x1); ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr)); ++ ++ psTmpIntFenceUFOAddress++; ++ } ++ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress)); ++ for (ii=0; iiui32Addr & 0x1) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr)); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue)); ++ pui32TmpIntUpdateValue++; ++ } ++ psTmpIntUpdateUFOAddress++; ++ } ++ } ++#endif ++ ++ ui32PreparesDone++; ++ ++ /* ++ Create the command helper data for this command ++ */ ++ RGXCmdHelperInitCmdCCB(psDevInfo, ++ psClientCCB, ++ 0, ++ ui32IntClientFenceCount, ++ pauiIntFenceUFOAddress, ++ NULL, /* fence value */ ++ ui32IntClientUpdateCount, ++ pauiIntUpdateUFOAddress, ++ paui32IntUpdateValue, ++ paui32FWCommandSize[i], ++ papaui8FWCommand[i], ++ &pPreAddr, ++ &pPostAddr, ++ &pRMWUFOAddr, ++ eType, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ ui32PDumpFlags, ++ NULL, ++ pszCommandName, ++ bCCBStateOpen, ++ psCmdHelper); ++ } ++ ++ /* ++ Acquire space for all the commands in one go ++ */ ++ if (ui323DCmdCount) ++ { ++ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, ++ &pas3DCmdHelper[0]); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_cmdacquire; ++ } ++ } ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount, ++ &pas2DCmdHelper[0]); ++ if (eError != PVRSRV_OK) ++ { ++ goto fail_cmdacquire; ++ } ++ } ++#endif ++ ++ /* ++ We should acquire the kernel CCB(s) space here as the schedule could fail ++ and we would have to roll back all the syncs ++ */ ++ ++ if (ui323DCmdCount) ++ { ++ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext)); ++ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount, ++ &pas3DCmdHelper[0], ++ "TQ_3D", ++ FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr); ++ } ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext)); ++ RGXCmdHelperReleaseCmdCCB(ui322DCmdCount, ++ &pas2DCmdHelper[0], ++ "TQ_2D", ++ FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr); ++ } ++#endif ++ ++ if (ui323DCmdCount) ++ { ++ RGXFWIF_KCCB_CMD s3DKCCBCmd; ++ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr; ++ RGX_CLIENT_CCB *ps3DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext); ++ ++ /* Take one of the helper data structs and extract the common cmd struct, ++ * this is used to obtain the frame num. Each command should share the same ++ * frame number so we can just get the first. ++ */ ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas3DCmdHelper[0]; ++ CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); ++ ++ /* Construct the kernel 3D CCB command. */ ++ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext); ++ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps3DTQCCB); ++ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps3DTQCCB); ++ s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; ++ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0; ++ ++ HTBLOGK(HTB_SF_MAIN_KICK_3D, ++ s3DKCCBCmd.uCmdData.sCmdKickData.psContext, ++ ui323DCmdOffset, ++ psTransferCmdCmn->ui32FrameNum, ++ ui32ExtJobRef, ++ ui32IntJobRef ++ ); ++ ++ RGXSRV_HWPERF_ENQ(psTransferContext, ++ OSGetCurrentClientProcessIDKM(), ++ ui32FWCtx, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_TQ3D, ++ iCheckFence, ++ i3DUpdateFence, ++ i3DUpdateTimeline, ++ uiCheckFenceUID, ++ ui3DUpdateFenceUID, ++ NO_DEADLINE, ++ NO_CYCEST); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_3D, ++ &s3DKCCBCmd, ++ ui32PDumpFlags); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = eError2; ++ } ++ goto fail_cmdacquire; ++ } ++ ++ PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, ++ ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D); ++ } ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ RGXFWIF_KCCB_CMD s2DKCCBCmd; ++ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr; ++ RGX_CLIENT_CCB *ps2DTQCCB = FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext); ++ ++ /* Take one of the helper data structs and extract the common cmd struct, ++ * this is used to obtain the frame num. Each command should share the same ++ * frame number so we can just get the first. ++ */ ++ RGX_CCB_CMD_HELPER_DATA *psCmdHelper = &pas2DCmdHelper[0]; ++ CMD_COMMON *psTransferCmdCmn = IMG_OFFSET_ADDR(psCmdHelper->pui8DMCmd, 0); ++ ++ /* Construct the kernel 2D CCB command. */ ++ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK; ++ s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext); ++ s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(ps2DTQCCB); ++ s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWrapMaskUpdate = RGXGetWrapMaskCCB(ps2DTQCCB); ++ s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0; ++ ++ HTBLOGK(HTB_SF_MAIN_KICK_2D, ++ s2DKCCBCmd.uCmdData.sCmdKickData.psContext, ++ ui322DCmdOffset, ++ psTransferCmdCmn->ui32FrameNum, ++ ui32ExtJobRef, ++ ui32IntJobRef); ++ ++ RGXSRV_HWPERF_ENQ(psTransferContext, ++ OSGetCurrentClientProcessIDKM(), ++ ui32FWCtx, ++ ui32ExtJobRef, ++ ui32IntJobRef, ++ RGX_HWPERF_KICK_TYPE_TQ2D, ++ iCheckFence, ++ i2DUpdateFence, ++ i2DUpdateTimeline, ++ uiCheckFenceUID, ++ ui2DUpdateFenceUID, ++ NO_DEADLINE, ++ NO_CYCEST); ++ ++ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++ { ++ eError2 = RGXScheduleCommand(psDevInfo, ++ RGXFWIF_DM_2D, ++ &s2DKCCBCmd, ++ ui32PDumpFlags); ++ if (eError2 != PVRSRV_ERROR_RETRY) ++ { ++ break; ++ } ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (eError2 != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSubmitTransferKM failed to schedule kernel CCB command. (0x%x)", eError2)); ++ if (eError == PVRSRV_OK) ++ { ++ eError = eError2; ++ } ++ goto fail_cmdacquire; ++ } ++ ++ PVRGpuTraceEnqueueEvent(psDeviceNode, ui32FWCtx, ui32ExtJobRef, ++ ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D); ++ } ++#endif ++ ++ /* ++ * Now check eError (which may have returned an error from our earlier calls ++ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first ++ * so we check it now... ++ */ ++ if (eError != PVRSRV_OK ) ++ { ++ goto fail_cmdacquire; ++ } ++ ++#if defined(NO_HARDWARE) ++ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (ps2DUpdateSyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint))); ++ SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint); ++ } ++ if (ps2DFenceTimelineUpdateSync) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue)); ++ SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue); ++ } ++#endif ++ if (ps3DUpdateSyncCheckpoint) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint))); ++ SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint); ++ } ++ if (ps3DFenceTimelineUpdateSync) ++ { ++ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue)); ++ SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue); ++ } ++ SyncCheckpointNoHWUpdateTimelines(NULL); ++#endif /* defined(NO_HARDWARE) */ ++ ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_succeeded(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (pi2DUpdateFence) ++ { ++ *pi2DUpdateFence = i2DUpdateFence; ++ } ++#endif ++ if (pi3DUpdateFence) ++ { ++ *pi3DUpdateFence = i3DUpdateFence; ++ } ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psDeviceNode, i2DUpdateFence, pv2DUpdateFenceFinaliseData, ++ ps2DUpdateSyncCheckpoint, szFenceName); ++ } ++#endif ++ if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE)) ++ { ++ SyncCheckpointFinaliseFence(psDeviceNode, i3DUpdateFence, pv3DUpdateFenceFinaliseData, ++ ps3DUpdateSyncCheckpoint, szFenceName); ++ } ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ OSFreeMem(pas2DCmdHelper); ++#endif ++ OSFreeMem(pas3DCmdHelper); ++ ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++ /* Free memory allocated to hold the internal list of update values */ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (pui322DIntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui322DIntAllocatedUpdateValues); ++ pui322DIntAllocatedUpdateValues = NULL; ++ } ++#endif ++ if (pui323DIntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui323DIntAllocatedUpdateValues); ++ pui323DIntAllocatedUpdateValues = NULL; ++ } ++ ++ OSLockRelease(psTransferContext->hLock); ++ return PVRSRV_OK; ++ ++/* ++ No resources are created in this function so there is nothing to free ++ unless we had to merge syncs. ++ If we fail after the client CCB acquire there is still nothing to do ++ as only the client CCB release will modify the client CCB ++*/ ++fail_cmdacquire: ++fail_prepare_loop: ++ ++ PVR_ASSERT(eError != PVRSRV_OK); ++ ++ for (i=0;iasSyncAddrListFence[i]); ++ SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[i]); ++ } ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (ui32PreparesDone > 0) ++ { ++ /* Prevent duplicate rollback in case of buffer sync. */ ++ psBufferUpdateSyncCheckpoint = NULL; ++ } ++#endif ++ ++ /* Free memory allocated to hold the internal list of update values */ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (pui322DIntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui322DIntAllocatedUpdateValues); ++ pui322DIntAllocatedUpdateValues = NULL; ++ } ++#endif ++ if (pui323DIntAllocatedUpdateValues) ++ { ++ OSFreeMem(pui323DIntAllocatedUpdateValues); ++ pui323DIntAllocatedUpdateValues = NULL; ++ } ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if (i2DUpdateFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData); ++ } ++#endif ++ if (i3DUpdateFence != PVRSRV_NO_FENCE) ++ { ++ SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData); ++ } ++#if defined(SUPPORT_BUFFER_SYNC) ++ if (psBufferUpdateSyncCheckpoint) ++ { ++ SyncAddrListRollbackCheckpoints(psDeviceNode, &psTransferContext->asSyncAddrListUpdate[0]); ++ } ++ if (psBufferSyncData) ++ { ++ pvr_buffer_sync_kick_failed(psBufferSyncData); ++ } ++ if (apsBufferFenceSyncCheckpoints) ++ { ++ kfree(apsBufferFenceSyncCheckpoints); ++ } ++fail_resolve_buffersync_input_fence: ++#endif /* defined(SUPPORT_BUFFER_SYNC) */ ++ ++ /* Drop the references taken on the sync checkpoints in the ++ * resolved input fence */ ++ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, ++ apsFenceSyncCheckpoints); ++ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */ ++ if (apsFenceSyncCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints); ++ } ++fail_resolve_fencesync_input_fence: ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ OSFreeMem(pas2DCmdHelper); ++fail_alloc2dhelper: ++#endif ++ OSFreeMem(pas3DCmdHelper); ++fail_alloc3dhelper: ++ ++ OSLockRelease(psTransferContext->hLock); ++ return eError; ++} ++ ++ ++PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ RGX_SERVER_TQ_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32Priority) ++{ ++ PVRSRV_ERROR eError; ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++#endif ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++ OSLockAcquire(psTransferContext->hLock); ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((psTransferContext->s2DData.ui32Priority != ui32Priority) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext, ++ psConnection, ++ psTransferContext->psDeviceNode->pvDevice, ++ ui32Priority, ++ RGXFWIF_DM_2D); ++ if (eError != PVRSRV_OK) ++ { ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); ++ } ++ goto fail_2dcontext; ++ } ++ psTransferContext->s2DData.ui32Priority = ui32Priority; ++ } ++#endif ++ ++ if (psTransferContext->s3DData.ui32Priority != ui32Priority) ++ { ++ eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext, ++ psConnection, ++ psTransferContext->psDeviceNode->pvDevice, ++ ui32Priority, ++ RGXFWIF_DM_3D); ++ if (eError != PVRSRV_OK) ++ { ++ if (eError != PVRSRV_ERROR_RETRY) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorString(eError))); ++ } ++ goto fail_3dcontext; ++ } ++ psTransferContext->s3DData.ui32Priority = ui32Priority; ++ } ++ ++ OSLockRelease(psTransferContext->hLock); ++ return PVRSRV_OK; ++ ++fail_3dcontext: ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ ++fail_2dcontext: ++#endif ++ OSLockRelease(psTransferContext->hLock); ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ switch (eContextProperty) ++ { ++ case RGX_CONTEXT_PROPERTY_FLAGS: ++ { ++ IMG_UINT32 ui32ContextFlags = (IMG_UINT32)ui64Input; ++ ++ OSLockAcquire(psTransferContext->hLock); ++ eError = FWCommonContextSetFlags(psTransferContext->s2DData.psServerCommonContext, ++ ui32ContextFlags); ++ if (eError == PVRSRV_OK) ++ { ++ eError = FWCommonContextSetFlags(psTransferContext->s3DData.psServerCommonContext, ++ ui32ContextFlags); ++ } ++ OSLockRelease(psTransferContext->hLock); ++ break; ++ } ++ ++ default: ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_ERROR_NOT_SUPPORTED - asked to set unknown property (%d)", __func__, eContextProperty)); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ } ++ ++ return eError; ++} ++ ++void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ ++ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ DumpFWCommonContextInfo(psCurrentServerTransferCtx->s2DData.psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++#endif ++ ++ if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) ++ { ++ DumpFWCommonContextInfo(psCurrentServerTransferCtx->s3DData.psServerCommonContext, ++ pfnDumpDebugPrintf, pvDumpDebugFile, ui32VerbLevel); ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); ++} ++ ++IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ IMG_UINT32 ui32ContextBitMask = 0; ++ ++ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock); ++ ++ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext) ++ { ++ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx = ++ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode); ++ ++#if defined(RGX_FEATURE_TLA_BIT_MASK) ++ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && ++ (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && ++ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))) ++ { ++ if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED) ++ { ++ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D; ++ } ++ } ++#endif ++ ++ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext)) ++ { ++ if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED)) ++ { ++ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D; ++ } ++ } ++ } ++ ++ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock); ++ return ui32ContextBitMask; ++} ++ ++/**************************************************************************//** ++ End of file (rgxtransfer.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxtransfer.h b/drivers/gpu/drm/img-rogue/rgxtransfer.h +new file mode 100644 +index 000000000000..cbc5b73f6bbe +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtransfer.h +@@ -0,0 +1,153 @@ ++/*************************************************************************/ /*! ++@File ++@Title RGX Transfer queue Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the RGX Transfer queue Functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXTRANSFER_H) ++#define RGXTRANSFER_H ++ ++#include "devicemem.h" ++#include "device.h" ++#include "rgxdevice.h" ++#include "rgxfwutils.h" ++#include "rgx_fwif_resetframework.h" ++#include "rgxdebug.h" ++#include "pvr_notifier.h" ++ ++#include "sync_server.h" ++#include "connection_server.h" ++ ++typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT; ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVRGXCreateTransferContextKM ++ ++ @Description ++ Server-side implementation of RGXCreateTransferContext ++ ++ @Input pvDeviceNode - device node ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32Priority, ++ IMG_UINT32 ui32FrameworkCommandSize, ++ IMG_PBYTE pabyFrameworkCommand, ++ IMG_HANDLE hMemCtxPrivData, ++ IMG_UINT32 ui32PackedCCBSizeU8888, ++ IMG_UINT32 ui32ContextFlags, ++ IMG_UINT64 ui64RobustnessAddress, ++ RGX_SERVER_TQ_CONTEXT **ppsTransferContext, ++ PMR **ppsCLIPMRMem, ++ PMR **ppsUSCPMRMem); ++ ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVRGXDestroyTransferContextKM ++ ++ @Description ++ Server-side implementation of RGXDestroyTransferContext ++ ++ @Input psTransferContext - Transfer context ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext); ++ ++/*! ++******************************************************************************* ++ ++ @Function PVRSRVSubmitTransferKM ++ ++ @Description ++ Schedules one or more 2D or 3D HW commands on the firmware ++ ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32PrepareCount, ++ IMG_UINT32 *paui32ClientUpdateCount, ++ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFODevVarBlock, ++ IMG_UINT32 **papaui32ClientUpdateSyncOffset, ++ IMG_UINT32 **papaui32ClientUpdateValue, ++ PVRSRV_FENCE iCheckFence, ++ PVRSRV_TIMELINE i2DUpdateTimeline, ++ PVRSRV_FENCE *pi2DUpdateFence, ++ PVRSRV_TIMELINE i3DUpdateTimeline, ++ PVRSRV_FENCE *pi3DUpdateFence, ++ IMG_CHAR szFenceName[32], ++ IMG_UINT32 *paui32FWCommandSize, ++ IMG_UINT8 **papaui8FWCommand, ++ IMG_UINT32 *pui32TQPrepareFlags, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32SyncPMRCount, ++ IMG_UINT32 *paui32SyncPMRFlags, ++ PMR **ppsSyncPMRs); ++ ++PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ RGX_SERVER_TQ_CONTEXT *psTransferContext, ++ IMG_UINT32 ui32Priority); ++ ++PVRSRV_ERROR PVRSRVRGXSetTransferContextPropertyKM(RGX_SERVER_TQ_CONTEXT *psTransferContext, ++ RGX_CONTEXT_PROPERTY eContextProperty, ++ IMG_UINT64 ui64Input, ++ IMG_UINT64 *pui64Output); ++ ++/* Debug - Dump debug info of transfer contexts on this device */ ++void DumpTransferCtxtsInfo(PVRSRV_RGXDEV_INFO *psDevInfo, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile, ++ IMG_UINT32 ui32VerbLevel); ++ ++/* Debug/Watchdog - check if client transfer contexts are stalled */ ++IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo); ++ ++#endif /* RGXTRANSFER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxtransfer_shader.h b/drivers/gpu/drm/img-rogue/rgxtransfer_shader.h +new file mode 100644 +index 000000000000..979f85bd4414 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxtransfer_shader.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File rgxtransfer_shader.h ++@Title TQ binary shader file info ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header holds info about TQ binary shader file generated ++ by the TQ shader factory. This header is need by shader factory ++ when generating the file; by services KM when reading and ++ loading the file into memory; and by services UM when ++ constructing blits using the shaders. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(RGXSHADERHEADER_H) ++#define RGXSHADERHEADER_H ++ ++typedef struct _RGX_SHADER_HEADER_ ++{ ++ IMG_UINT32 ui32Version; ++ IMG_UINT32 ui32NumFragment; ++ IMG_UINT32 ui32SizeFragment; ++ IMG_UINT32 ui32NumTDMFragment; ++ IMG_UINT32 ui32SizeTDMFragment; ++ IMG_UINT32 ui32SizeClientMem; ++} RGX_SHADER_HEADER; ++ ++#endif /* RGXSHADERHEADER_H */ +diff --git a/drivers/gpu/drm/img-rogue/rgxutils.c b/drivers/gpu/drm/img-rogue/rgxutils.c +new file mode 100644 +index 000000000000..866fd014a44d +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxutils.c +@@ -0,0 +1,221 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific utility routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Device specific functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "rgx_fwif_km.h" ++#include "pdump_km.h" ++#include "osfunc.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "rgxutils.h" ++#include "power.h" ++#include "pvrsrv.h" ++#include "sync_internal.h" ++#include "rgxfwutils.h" ++ ++ ++PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_UINT32 *pui32State) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_UNREFERENCED_PARAMETER(pvPrivateData); ++ ++ if (!psDeviceNode) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ *pui32State = psDevInfo->eActivePMConf; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_UINT32 ui32State) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++#if !defined(NO_HARDWARE) ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++#endif ++ ++ PVR_UNREFERENCED_PARAMETER(pvPrivateData); ++ ++ if (!psDeviceNode || !psDeviceNode->pvDevice) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (RGX_ACTIVEPM_FORCE_OFF != ui32State) ++ { ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ ++#if !defined(NO_HARDWARE) ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ if (psDevInfo->pvAPMISRData) ++ { ++ psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF; ++ psDevInfo->pvAPMISRData = NULL; ++ eError = PVRSRVSetDeviceDefaultPowerState((PPVRSRV_DEVICE_NODE)psDeviceNode, ++ PVRSRV_DEV_POWER_STATE_ON); ++ } ++#endif ++ ++ return eError; ++} ++ ++PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_BOOL *pbDisabled) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_UNREFERENCED_PARAMETER(pvPrivateData); ++ ++ if (!psDeviceNode || !psDeviceNode->pvDevice) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ *pbDisabled = !psDevInfo->bPDPEnabled; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_BOOL bDisable) ++{ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ PVR_UNREFERENCED_PARAMETER(pvPrivateData); ++ ++ if (!psDeviceNode || !psDeviceNode->pvDevice) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ psDevInfo->bPDPEnabled = !bDisable; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 *pui32DeviceFlags) ++{ ++ if (!pui32DeviceFlags || !psDevInfo) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ *pui32DeviceFlags = psDevInfo->ui32DeviceFlags; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Config, ++ IMG_BOOL bSetNotClear) ++{ ++ if (!psDevInfo) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if ((ui32Config & ~RGXKM_DEVICE_STATE_MASK) != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Bits outside of device state mask set (input: 0x%x, mask: 0x%x)", ++ __func__, ui32Config, RGXKM_DEVICE_STATE_MASK)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (bSetNotClear) ++ { ++ psDevInfo->ui32DeviceFlags |= ui32Config; ++ } ++ else ++ { ++ psDevInfo->ui32DeviceFlags &= ~ui32Config; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM) ++{ ++ PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST); ++ ++ switch (eKickTypeDM) { ++ case RGX_KICK_TYPE_DM_GP: ++ return "GP "; ++ case RGX_KICK_TYPE_DM_TDM_2D: ++ return "TDM/2D "; ++ case RGX_KICK_TYPE_DM_TA: ++ return "TA "; ++ case RGX_KICK_TYPE_DM_3D: ++ return "3D "; ++ case RGX_KICK_TYPE_DM_CDM: ++ return "CDM "; ++ case RGX_KICK_TYPE_DM_RTU: ++ return "RTU "; ++ case RGX_KICK_TYPE_DM_SHG: ++ return "SHG "; ++ case RGX_KICK_TYPE_DM_TQ2D: ++ return "TQ2D "; ++ case RGX_KICK_TYPE_DM_TQ3D: ++ return "TQ3D "; ++ default: ++ return "Invalid DM "; ++ } ++} ++ ++/****************************************************************************** ++ End of file (rgxutils.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/rgxutils.h b/drivers/gpu/drm/img-rogue/rgxutils.h +new file mode 100644 +index 000000000000..670986323d2b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rgxutils.h +@@ -0,0 +1,185 @@ ++/*************************************************************************/ /*! ++@File ++@Title Device specific utility routines declarations ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Inline functions/structures specific to RGX ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "device.h" ++#include "rgxdevice.h" ++#include "rgxdebug.h" ++#include "pvr_notifier.h" ++#include "pvrsrv.h" ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXQueryAPMState ++ ++ @Description Query the state of the APM configuration ++ ++ @Input psDeviceNode : The device node ++ ++ @Input pvPrivateData: Unused (required for AppHint callback) ++ ++ @Output pui32State : The APM configuration state ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_UINT32 *pui32State); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXSetAPMState ++ ++ @Description Set the APM configuration state. Currently only 'OFF' is ++ supported ++ ++ @Input psDeviceNode : The device node ++ ++ @Input pvPrivateData: Unused (required for AppHint callback) ++ ++ @Input ui32State : The requested APM configuration state ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_UINT32 ui32State); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXQueryPdumpPanicDisable ++ ++ @Description Get the PDump Panic Enable configuration state. ++ ++ @Input psDeviceNode : The device node ++ ++ @Input pvPrivateData: Unused (required for AppHint callback) ++ ++ @Input pbDisabled : IMG_TRUE if PDump Panic is disabled ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXQueryPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_BOOL *pbDisabled); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXSetPdumpPanicDisable ++ ++ @Description Set the PDump Panic Enable flag ++ ++ @Input psDeviceNode : The device node ++ ++ @Input pvPrivateData: Unused (required for AppHint callback) ++ ++ @Input bDisable : The requested configuration state ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXSetPdumpPanicDisable(const PVRSRV_DEVICE_NODE *psDeviceNode, ++ const void *pvPrivateData, ++ IMG_BOOL bDisable); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXGetDeviceFlags ++ ++ @Description Get the device flags for a given device ++ ++ @Input psDevInfo : The device descriptor query ++ ++ @Output pui32DeviceFlags : The current state of the device flags ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 *pui32DeviceFlags); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXSetDeviceFlags ++ ++ @Description Set the device flags for a given device ++ ++ @Input psDevInfo : The device descriptor to modify ++ ++ @Input ui32Config : The device flags to modify ++ ++ @Input bSetNotClear : Set or clear the specified flags ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo, ++ IMG_UINT32 ui32Config, ++ IMG_BOOL bSetNotClear); ++ ++/*! ++****************************************************************************** ++ ++ @Function RGXStringifyKickTypeDM ++ ++ @Description Gives the kick type DM name stringified ++ ++ @Input Kick type DM ++ ++ @Return Array containing the kick type DM name ++ ++******************************************************************************/ ++const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM); ++ ++#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : "" ++/****************************************************************************** ++ End of file (rgxutils.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/ri_server.c b/drivers/gpu/drm/img-rogue/ri_server.c +new file mode 100644 +index 000000000000..d1fe2b2868cd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/ri_server.c +@@ -0,0 +1,2123 @@ ++/*************************************************************************/ /*! ++@File ri_server.c ++@Title Resource Information (RI) server implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Resource Information (RI) server functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if defined(__linux__) ++ #include ++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) ++ #include ++ #else ++ #include ++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) */ ++#else ++ #include ++#endif /* __linux__ */ ++#include "img_defs.h" ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "pvrsrv_error.h" ++#include "osfunc.h" ++ ++#include "srvkm.h" ++#include "lock.h" ++ ++/* services/include */ ++#include "pvr_ricommon.h" ++ ++/* services/server/include/ */ ++#include "ri_server.h" ++ ++/* services/include/shared/ */ ++#include "hash.h" ++/* services/shared/include/ */ ++#include "dllist.h" ++ ++#include "pmr.h" ++ ++/* include/device.h */ ++#include "device.h" ++ ++#if !defined(RI_UNIT_TEST) ++#include "pvrsrv.h" ++#endif ++ ++ ++#if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) ++ ++#define USE_RI_LOCK 1 ++ ++/* ++ * Initial size use for Hash table. (Used to index the RI list entries). ++ */ ++#define _RI_INITIAL_HASH_TABLE_SIZE 64 ++ ++/* ++ * Values written to the 'valid' field of RI structures when created and ++ * cleared prior to being destroyed. The code can then check this value ++ * before accessing the provided pointer contents as a valid RI structure. ++ */ ++#define _VALID_RI_LIST_ENTRY 0x66bccb66 ++#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77 ++#define _INVALID 0x00000000 ++ ++/* ++ * If this define is set to 1, details of the linked lists (addresses, ++ * prev/next ptrs, etc) are also output when function RIDumpList() is called. ++ */ ++#define _DUMP_LINKEDLIST_INFO 0 ++ ++ ++typedef IMG_UINT64 _RI_BASE_T; ++ ++ ++/* No +1 in SIZE macros since sizeof includes \0 byte in size */ ++ ++#define RI_PROC_BUF_SIZE 16 ++ ++#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\ ++ "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\ ++ "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n" ++#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+30+60) ++ ++ ++#define RI_PMR_SUM_FRMT "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\ ++ "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n" ++#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(20+40)) ++ ++#define RI_PMR_ENTRY_FRMT "%%sPID:%%-5d <%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c" ++#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+5+16+PVR_ANNOTATION_MAX_LEN+10+10)) ++#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT)) ++ ++/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */ ++#define RI_MEMDESC_ENTRY_PROC_FRMT "[%5d:%s]" ++#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16) ++ ++#define RI_SYS_ALLOC_IMPORT_FRMT "{Import from PID %d}" ++#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5) ++static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE]; ++ ++#define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}" ++#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5) ++ ++#define RI_MEMDESC_ENTRY_UNPINNED_FRMT "{Unpinned}" ++#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT)) ++ ++#define RI_MEMDESC_ENTRY_FRMT "%%sPID:%%-5d 0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c" ++#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+5+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\ ++ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE)) ++#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)) ++ ++ ++#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\ ++ MAX(RI_PMR_ENTRY_BUF_SIZE,\ ++ MAX(RI_MEMDESC_SUM_BUF_SIZE,\ ++ RI_PMR_SUM_BUF_SIZE)))) ++ ++ ++ ++ ++/* Structure used to make linked sublist of memory allocations (MEMDESC) */ ++struct _RI_SUBLIST_ENTRY_ ++{ ++ DLLIST_NODE sListNode; ++ struct _RI_LIST_ENTRY_ *psRI; ++ IMG_UINT32 valid; ++ IMG_BOOL bIsImport; ++ IMG_BOOL bIsSuballoc; ++ IMG_PID pid; ++ IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; ++ IMG_DEV_VIRTADDR sVAddr; ++ IMG_UINT64 ui64Offset; ++ IMG_UINT64 ui64Size; ++ IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1]; ++ DLLIST_NODE sProcListNode; ++}; ++ ++/* ++ * Structure used to make linked list of PMRs. Sublists of allocations ++ * (MEMDESCs) made from these PMRs are chained off these entries. ++ */ ++struct _RI_LIST_ENTRY_ ++{ ++ DLLIST_NODE sListNode; ++ DLLIST_NODE sSysAllocListNode; ++ DLLIST_NODE sSubListFirst; ++ IMG_UINT32 valid; ++ PMR *psPMR; ++ IMG_PID pid; ++ IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE]; ++ IMG_UINT16 ui16SubListCount; ++ IMG_UINT16 ui16MaxSubListCount; ++ IMG_UINT32 ui32RIPMRFlags; /* Flags used to indicate the type of allocation */ ++ IMG_UINT32 ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */ ++}; ++ ++typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY; ++typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY; ++ ++static IMG_UINT16 g_ui16RICount; ++static HASH_TABLE *g_pRIHashTable; ++static IMG_UINT16 g_ui16ProcCount; ++static HASH_TABLE *g_pProcHashTable; ++ ++static POS_LOCK g_hRILock; ++ ++/* Linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and lock ++ * to prevent concurrent access to it. ++ */ ++static POS_LOCK g_hSysAllocPidListLock; ++static DLLIST_NODE g_sSysAllocPidListHead; ++ ++/* ++ * Flag used to indicate if RILock should be destroyed when final PMR entry is ++ * deleted, i.e. if RIDeInitKM() has already been called before that point but ++ * the handle manager has deferred deletion of RI entries. ++ */ ++static IMG_BOOL bRIDeInitDeferred = IMG_FALSE; ++ ++/* ++ * Used as head of linked-list of PMR RI entries - this is useful when we wish ++ * to iterate all PMR list entries (when we don't have a PMR ref) ++ */ ++static DLLIST_NODE sListFirst; ++ ++/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ ++static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); ++/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */ ++static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString); ++ ++static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv); ++static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv); ++static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv); ++static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid); ++#define _RIOutput(x) PVR_LOG(x) ++ ++#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS 0x1 ++#define RI_FLAG_SYSALLOC_PMR 0x2 ++ ++static IMG_UINT32 ++_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen); ++ ++static IMG_UINT32 ++_ProcHashFunc(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen) ++{ ++ IMG_UINT32 *p = (IMG_UINT32 *)pKey; ++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); ++ IMG_UINT32 ui; ++ IMG_UINT32 uHashKey = 0; ++ ++ PVR_UNREFERENCED_PARAMETER(uHashTabLen); ++ ++ for (ui = 0; ui < uKeyLen; ui++) ++ { ++ IMG_UINT32 uHashPart = *p++; ++ ++ uHashPart += (uHashPart << 12); ++ uHashPart ^= (uHashPart >> 22); ++ uHashPart += (uHashPart << 4); ++ uHashPart ^= (uHashPart >> 9); ++ uHashPart += (uHashPart << 10); ++ uHashPart ^= (uHashPart >> 2); ++ uHashPart += (uHashPart << 7); ++ uHashPart ^= (uHashPart >> 12); ++ ++ uHashKey += uHashPart; ++ } ++ ++ return uHashKey; ++} ++ ++static IMG_BOOL ++_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2); ++ ++static IMG_BOOL ++_ProcHashComp(size_t uKeySize, void *pKey1, void *pKey2) ++{ ++ IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1; ++ IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2; ++ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32); ++ IMG_UINT32 ui; ++ ++ for (ui = 0; ui < uKeyLen; ui++) ++ { ++ if (*p1++ != *p2++) ++ return IMG_FALSE; ++ } ++ ++ return IMG_TRUE; ++} ++ ++static void _RILock(void) ++{ ++#if (USE_RI_LOCK == 1) ++ OSLockAcquire(g_hRILock); ++#endif ++} ++ ++static void _RIUnlock(void) ++{ ++#if (USE_RI_LOCK == 1) ++ OSLockRelease(g_hRILock); ++#endif ++} ++ ++/* This value maintains a count of the number of PMRs attributed to the ++ * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock, so it ++ * does not need to be an ATOMIC_T. ++ */ ++static IMG_UINT32 g_ui32SysAllocPMRCount; ++ ++ ++PVRSRV_ERROR RIInitKM(void) ++{ ++ IMG_INT iCharsWritten; ++ PVRSRV_ERROR eError; ++ ++ bRIDeInitDeferred = IMG_FALSE; ++ ++ iCharsWritten = OSSNPrintf(g_szSysAllocImport, ++ RI_SYS_ALLOC_IMPORT_FRMT_SIZE, ++ RI_SYS_ALLOC_IMPORT_FRMT, ++ PVR_SYS_ALLOC_PID); ++ PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \ ++ "OSSNPrintf failed to initialise g_szSysAllocImport"); ++ ++ eError = OSLockCreate(&g_hSysAllocPidListLock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)", ++ __func__, ++ eError)); ++ } ++ dllist_init(&(g_sSysAllocPidListHead)); ++#if (USE_RI_LOCK == 1) ++ eError = OSLockCreate(&g_hRILock); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OSLockCreate (g_hRILock) failed (returned %d)", ++ __func__, ++ eError)); ++ } ++#endif ++ return eError; ++} ++void RIDeInitKM(void) ++{ ++#if (USE_RI_LOCK == 1) ++ if (g_ui16RICount > 0) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: called with %d entries remaining - deferring OSLockDestroy()", ++ __func__, ++ g_ui16RICount)); ++ bRIDeInitDeferred = IMG_TRUE; ++ } ++ else ++ { ++ OSLockDestroy(g_hRILock); ++ OSLockDestroy(g_hSysAllocPidListLock); ++ } ++#endif ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RILockAcquireKM ++ ++ @Description ++ Acquires the RI Lock (which protects the integrity of the RI ++ linked lists). Caller will be suspended until lock is acquired. ++ ++ @Return None ++ ++******************************************************************************/ ++void RILockAcquireKM(void) ++{ ++ _RILock(); ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RILockReleaseKM ++ ++ @Description ++ Releases the RI Lock (which protects the integrity of the RI ++ linked lists). ++ ++ @Return None ++ ++******************************************************************************/ ++void RILockReleaseKM(void) ++{ ++ _RIUnlock(); ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIWritePMREntryWithOwnerKM ++ ++ @Description ++ Writes a new Resource Information list entry. ++ The new entry will be inserted at the head of the list of ++ PMR RI entries and assigned the values provided. ++ ++ @input psPMR - Reference (handle) to the PMR to which this reference relates ++ ++ @input ui32Owner - PID of the process which owns the allocation. This ++ may not be the current process (e.g. a request to ++ grow a buffer may happen in the context of a kernel ++ thread, or we may import further resource for a ++ suballocation made from the FW heap which can then ++ also be utilized by other processes) ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, ++ IMG_PID ui32Owner) ++{ ++ PMR *pPMRHashKey = psPMR; ++ RI_LIST_ENTRY *psRIEntry; ++ uintptr_t hashData; ++ ++ /* if Hash table has not been created, create it now */ ++ if (!g_pRIHashTable) ++ { ++ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); ++ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); ++ } ++ PVR_RETURN_IF_NOMEM(g_pRIHashTable); ++ PVR_RETURN_IF_NOMEM(g_pProcHashTable); ++ ++ PVR_RETURN_IF_INVALID_PARAM(psPMR); ++ ++ /* Acquire RI Lock */ ++ _RILock(); ++ ++ /* Look-up psPMR in Hash Table */ ++ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); ++ psRIEntry = (RI_LIST_ENTRY *)hashData; ++ if (!psRIEntry) ++ { ++ /* ++ * If failed to find a matching existing entry, create a new one ++ */ ++ psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY)); ++ if (!psRIEntry) ++ { ++ /* Release RI Lock */ ++ _RIUnlock(); ++ /* Error - no memory to allocate for new RI entry */ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ else ++ { ++ PMR_FLAGS_T uiPMRFlags = PMR_Flags(psPMR); ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR); ++ ++ /* ++ * Add new RI Entry ++ */ ++ if (g_ui16RICount == 0) ++ { ++ /* Initialise PMR entry linked-list head */ ++ dllist_init(&sListFirst); ++ } ++ g_ui16RICount++; ++ ++ dllist_init (&(psRIEntry->sSysAllocListNode)); ++ dllist_init (&(psRIEntry->sSubListFirst)); ++ psRIEntry->ui16SubListCount = 0; ++ psRIEntry->ui16MaxSubListCount = 0; ++ psRIEntry->valid = _VALID_RI_LIST_ENTRY; ++ ++ /* Check if this PMR should be accounted for under the ++ * PVR_SYS_ALLOC_PID debugFS entry. This should happen if ++ * we are in the driver init phase, the flags indicate ++ * this is a FW Main allocation (made from FW heap) ++ * or the owner PID is PVR_SYS_ALLOC_PID. ++ * Also record host dev node allocs on the system PID. ++ */ ++ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || ++ PVRSRV_CHECK_FW_MAIN(uiPMRFlags) || ++ ui32Owner == PVR_SYS_ALLOC_PID || ++ psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) ++ { ++ psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR; ++ OSSNPrintf(psRIEntry->ai8ProcName, ++ RI_PROC_BUF_SIZE, ++ "SysProc"); ++ psRIEntry->pid = PVR_SYS_ALLOC_PID; ++ OSLockAcquire(g_hSysAllocPidListLock); ++ /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */ ++ dllist_add_to_tail(&g_sSysAllocPidListHead, (PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); ++ OSLockRelease(g_hSysAllocPidListLock); ++ g_ui32SysAllocPMRCount++; ++ } ++ else ++ { ++ psRIEntry->ui32RIPMRFlags = 0; ++ psRIEntry->pid = ui32Owner; ++ } ++ ++ OSSNPrintf(psRIEntry->ai8ProcName, ++ RI_PROC_BUF_SIZE, ++ "%s", ++ OSGetCurrentClientProcessNameKM()); ++ /* Add PMR entry to linked-list of all PMR entries */ ++ dllist_init (&(psRIEntry->sListNode)); ++ dllist_add_to_tail(&sListFirst, (PDLLIST_NODE)&(psRIEntry->sListNode)); ++ } ++ ++ psRIEntry->psPMR = psPMR; ++ psRIEntry->ui32Flags = 0; ++ ++ /* Create index entry in Hash Table */ ++ HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry); ++ ++ /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */ ++ PMRStoreRIHandle(psPMR, psRIEntry); ++ } ++ /* Release RI Lock */ ++ _RIUnlock(); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIWritePMREntryKM ++ ++ @Description ++ Writes a new Resource Information list entry. ++ The new entry will be inserted at the head of the list of ++ PMR RI entries and assigned the values provided. ++ ++ @input psPMR - Reference (handle) to the PMR to which this reference relates ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR) ++{ ++ return RIWritePMREntryWithOwnerKM(psPMR, ++ OSGetCurrentClientProcessIDKM()); ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIWriteMEMDESCEntryKM ++ ++ @Description ++ Writes a new Resource Information sublist entry. ++ The new entry will be inserted at the head of the sublist of ++ the indicated PMR list entry, and assigned the values provided. ++ ++ @input psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates ++ @input ui32TextBSize - Length of string provided in psz8TextB parameter ++ @input psz8TextB - String describing this secondary reference (may be null) ++ @input ui64Offset - Offset from the start of the PMR at which this allocation begins ++ @input ui64Size - Size of this allocation ++ @input bIsImport - Flag indicating if this is an allocation or an import ++ @input bIsSuballoc - Flag indicating if this is a sub-allocation ++ @output phRIHandle - Handle to the created RI entry ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, ++ IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR *psz8TextB, ++ IMG_UINT64 ui64Offset, ++ IMG_UINT64 ui64Size, ++ IMG_BOOL bIsImport, ++ IMG_BOOL bIsSuballoc, ++ RI_HANDLE *phRIHandle) ++{ ++ RI_SUBLIST_ENTRY *psRISubEntry; ++ RI_LIST_ENTRY *psRIEntry; ++ PMR *pPMRHashKey = psPMR; ++ uintptr_t hashData; ++ IMG_PID pid; ++ ++ /* Check Hash tables have been created (meaning at least one PMR has been defined) */ ++ PVR_RETURN_IF_INVALID_PARAM(g_pRIHashTable); ++ PVR_RETURN_IF_INVALID_PARAM(g_pProcHashTable); ++ ++ PVR_RETURN_IF_INVALID_PARAM(psPMR); ++ PVR_RETURN_IF_INVALID_PARAM(phRIHandle); ++ ++ /* Acquire RI Lock */ ++ _RILock(); ++ ++ *phRIHandle = NULL; ++ ++ /* Look-up psPMR in Hash Table */ ++ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); ++ psRIEntry = (RI_LIST_ENTRY *)hashData; ++ if (!psRIEntry) ++ { ++ /* Release RI Lock */ ++ _RIUnlock(); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); ++ if (!psRISubEntry) ++ { ++ /* Release RI Lock */ ++ _RIUnlock(); ++ /* Error - no memory to allocate for new RI sublist entry */ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ else ++ { ++ /* ++ * Insert new entry in sublist ++ */ ++ PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst)); ++ ++ /* ++ * Insert new entry before currentNode ++ */ ++ if (!currentNode) ++ { ++ currentNode = &(psRIEntry->sSubListFirst); ++ } ++ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode)); ++ ++ psRISubEntry->psRI = psRIEntry; ++ ++ /* Increment number of entries in sublist */ ++ psRIEntry->ui16SubListCount++; ++ if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount) ++ { ++ psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount; ++ } ++ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; ++ } ++ ++ /* If allocation is made during device or driver initialisation, ++ * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use ++ * the current PID. ++ * Record host dev node allocations on the system PID. ++ */ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR); ++ ++ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT || ++ psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode) ++ { ++ psRISubEntry->pid = psRISubEntry->psRI->pid; ++ } ++ else ++ { ++ psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); ++ } ++ } ++ ++ if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: TextBSize too long (%u). Text will be truncated " ++ "to %zu characters", __func__, ++ ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); ++ } ++ ++ /* copy ai8TextB field data */ ++ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); ++ ++ psRISubEntry->ui64Offset = ui64Offset; ++ psRISubEntry->ui64Size = ui64Size; ++ psRISubEntry->bIsImport = bIsImport; ++ psRISubEntry->bIsSuballoc = bIsSuballoc; ++ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); ++ dllist_init (&(psRISubEntry->sProcListNode)); ++ ++ /* ++ * Now insert this MEMDESC into the proc list ++ */ ++ /* look-up pid in Hash Table */ ++ pid = psRISubEntry->pid; ++ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); ++ if (!hashData) ++ { ++ /* ++ * No allocations for this pid yet ++ */ ++ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); ++ /* Increment number of entries in proc hash table */ ++ g_ui16ProcCount++; ++ } ++ else ++ { ++ /* ++ * Insert allocation into pid allocations linked list ++ */ ++ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; ++ ++ /* ++ * Insert new entry ++ */ ++ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); ++ } ++ *phRIHandle = (RI_HANDLE)psRISubEntry; ++ /* Release RI Lock */ ++ _RIUnlock(); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIWriteProcListEntryKM ++ ++ @Description ++ Write a new entry in the process list directly. We have to do this ++ because there might be no, multiple or changing PMR handles. ++ ++ In the common case we have a PMR that will be added to the PMR list ++ and one or several MemDescs that are associated to it in a sub-list. ++ Additionally these MemDescs will be inserted in the per-process list. ++ ++ There might be special descriptors from e.g. new user APIs that ++ are associated with no or multiple PMRs and not just one. ++ These can be now added to the per-process list (as RI_SUBLIST_ENTRY) ++ directly with this function and won't be listed in the PMR list (RIEntry) ++ because there might be no PMR. ++ ++ To remove entries from the per-process list, just use ++ RIDeleteMEMDESCEntryKM(). ++ ++ @input psz8TextB - String describing this secondary reference (may be null) ++ @input ui64Size - Size of this allocation ++ @input ui64DevVAddr - Virtual address of this entry ++ @output phRIHandle - Handle to the created RI entry ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR *psz8TextB, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64DevVAddr, ++ RI_HANDLE *phRIHandle) ++{ ++ uintptr_t hashData = 0; ++ IMG_PID pid; ++ RI_SUBLIST_ENTRY *psRISubEntry = NULL; ++ ++ if (!g_pRIHashTable) ++ { ++ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default); ++ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp); ++ ++ if (!g_pRIHashTable || !g_pProcHashTable) ++ { ++ /* Error - no memory to allocate for Hash table(s) */ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ } ++ ++ /* Acquire RI Lock */ ++ _RILock(); ++ ++ *phRIHandle = NULL; ++ ++ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY)); ++ if (!psRISubEntry) ++ { ++ /* Release RI Lock */ ++ _RIUnlock(); ++ /* Error - no memory to allocate for new RI sublist entry */ ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY; ++ ++ psRISubEntry->pid = OSGetCurrentClientProcessIDKM(); ++ ++ if (ui32TextBSize > sizeof(psRISubEntry->ai8TextB)-1) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: TextBSize too long (%u). Text will be truncated " ++ "to %zu characters", __func__, ++ ui32TextBSize, sizeof(psRISubEntry->ai8TextB)-1)); ++ } ++ ++ /* copy ai8TextB field data */ ++ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, sizeof(psRISubEntry->ai8TextB), "%s", psz8TextB); ++ ++ psRISubEntry->ui64Offset = 0; ++ psRISubEntry->ui64Size = ui64Size; ++ psRISubEntry->sVAddr.uiAddr = ui64DevVAddr; ++ psRISubEntry->bIsImport = IMG_FALSE; ++ psRISubEntry->bIsSuballoc = IMG_FALSE; ++ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM()); ++ dllist_init (&(psRISubEntry->sProcListNode)); ++ ++ /* ++ * Now insert this MEMDESC into the proc list ++ */ ++ /* look-up pid in Hash Table */ ++ pid = psRISubEntry->pid; ++ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); ++ if (!hashData) ++ { ++ /* ++ * No allocations for this pid yet ++ */ ++ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode)); ++ /* Increment number of entries in proc hash table */ ++ g_ui16ProcCount++; ++ } ++ else ++ { ++ /* ++ * Insert allocation into pid allocations linked list ++ */ ++ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData; ++ ++ /* ++ * Insert new entry ++ */ ++ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode)); ++ } ++ *phRIHandle = (RI_HANDLE)psRISubEntry; ++ /* Release RI Lock */ ++ _RIUnlock(); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIUpdateMEMDESCAddrKM ++ ++ @Description ++ Update a Resource Information entry. ++ ++ @input hRIHandle - Handle of object whose reference info is to be updated ++ @input sVAddr - New address for the RI entry ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, ++ IMG_DEV_VIRTADDR sVAddr) ++{ ++ RI_SUBLIST_ENTRY *psRISubEntry; ++ ++ PVR_RETURN_IF_INVALID_PARAM(hRIHandle); ++ ++ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; ++ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) ++ { ++ /* Pointer does not point to valid structure */ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr; ++ ++ /* Release RI lock */ ++ _RIUnlock(); ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDeletePMREntryKM ++ ++ @Description ++ Delete a Resource Information entry. ++ ++ @input hRIHandle - Handle of object whose reference info is to be deleted ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle) ++{ ++ RI_LIST_ENTRY *psRIEntry; ++ PMR *pPMRHashKey; ++ PVRSRV_ERROR eResult = PVRSRV_OK; ++ ++ PVR_RETURN_IF_INVALID_PARAM(hRIHandle); ++ ++ psRIEntry = (RI_LIST_ENTRY *)hRIHandle; ++ ++ if (psRIEntry->valid != _VALID_RI_LIST_ENTRY) ++ { ++ /* Pointer does not point to valid structure */ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ if (psRIEntry->ui16SubListCount == 0) ++ { ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ /* Remove the HASH table index entry */ ++ pPMRHashKey = psRIEntry->psPMR; ++ HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey); ++ ++ psRIEntry->valid = _INVALID; ++ ++ /* Remove PMR entry from linked-list of PMR entries */ ++ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode)); ++ ++ if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) ++ { ++ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode)); ++ g_ui32SysAllocPMRCount--; ++ } ++ ++ /* Now, free the memory used to store the RI entry */ ++ OSFreeMemNoStats(psRIEntry); ++ psRIEntry = NULL; ++ ++ /* ++ * Decrement number of RI entries - if this is now zero, ++ * we can delete the RI hash table ++ */ ++ if (--g_ui16RICount == 0) ++ { ++ HASH_Delete(g_pRIHashTable); ++ g_pRIHashTable = NULL; ++ ++ _RIUnlock(); ++ ++ /* If deInit has been deferred, we can now destroy the RI Lock */ ++ if (bRIDeInitDeferred) ++ { ++ OSLockDestroy(g_hRILock); ++ } ++ } ++ else ++ { ++ /* Release RI lock*/ ++ _RIUnlock(); ++ } ++ /* ++ * Make the handle NULL once PMR RI entry is deleted ++ */ ++ hRIHandle = NULL; ++ } ++ else ++ { ++ eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP; ++ } ++ ++ return eResult; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDeleteMEMDESCEntryKM ++ ++ @Description ++ Delete a Resource Information entry. ++ Entry can be from RIEntry list or ProcList. ++ ++ @input hRIHandle - Handle of object whose reference info is to be deleted ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle) ++{ ++ RI_LIST_ENTRY *psRIEntry = NULL; ++ RI_SUBLIST_ENTRY *psRISubEntry; ++ uintptr_t hashData; ++ IMG_PID pid; ++ ++ PVR_RETURN_IF_INVALID_PARAM(hRIHandle); ++ ++ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle; ++ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) ++ { ++ /* Pointer does not point to valid structure */ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ /* For entries which do have a parent PMR remove the node from the sublist */ ++ if (psRISubEntry->psRI) ++ { ++ psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI; ++ ++ /* Now, remove entry from the sublist */ ++ dllist_remove_node(&(psRISubEntry->sListNode)); ++ } ++ ++ psRISubEntry->valid = _INVALID; ++ ++ /* Remove the entry from the proc allocations linked list */ ++ pid = psRISubEntry->pid; ++ /* If this is the only allocation for this pid, just remove it from the hash table */ ++ if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ++ { ++ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); ++ /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */ ++ if (--g_ui16ProcCount == 0) ++ { ++ HASH_Delete(g_pProcHashTable); ++ g_pProcHashTable = NULL; ++ } ++ } ++ else ++ { ++ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid); ++ if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode)) ++ { ++ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid); ++ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode))); ++ } ++ } ++ dllist_remove_node(&(psRISubEntry->sProcListNode)); ++ ++ /* Now, free the memory used to store the sublist entry */ ++ OSFreeMemNoStats(psRISubEntry); ++ psRISubEntry = NULL; ++ ++ /* ++ * Decrement number of entries in sublist if this MemDesc had a parent entry. ++ */ ++ if (psRIEntry) ++ { ++ psRIEntry->ui16SubListCount--; ++ } ++ ++ /* Release RI lock*/ ++ _RIUnlock(); ++ ++ /* ++ * Make the handle NULL once MEMDESC RI entry is deleted ++ */ ++ hRIHandle = NULL; ++ ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDeleteListKM ++ ++ @Description ++ Delete all Resource Information entries and free associated ++ memory. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDeleteListKM(void) ++{ ++ PVRSRV_ERROR eResult = PVRSRV_OK; ++ ++ _RILock(); ++ ++ if (g_pRIHashTable) ++ { ++ eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries, NULL); ++ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) ++ { ++ /* ++ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when ++ * the hash table gets deleted as a result of deleting the final PMR entry, ++ * so this is not a real error condition... ++ */ ++ eResult = PVRSRV_OK; ++ } ++ } ++ ++ /* After the run through the RIHashTable that holds the PMR entries there might be ++ * still entries left in the per-process hash table because they were added with ++ * RIWriteProcListEntryKM() and have no PMR parent associated. ++ */ ++ if (g_pProcHashTable) ++ { ++ eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries, NULL); ++ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE) ++ { ++ /* ++ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when ++ * the hash table gets deleted as a result of deleting the final PMR entry, ++ * so this is not a real error condition... ++ */ ++ eResult = PVRSRV_OK; ++ } ++ } ++ ++ _RIUnlock(); ++ ++ return eResult; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDumpListKM ++ ++ @Description ++ Dumps out the contents of the RI List entry for the ++ specified PMR, and all MEMDESC allocation entries ++ in the associated sub linked list. ++ At present, output is directed to Kernel log ++ via PVR_DPF. ++ ++ @input psPMR - PMR for which RI entry details are to be output ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDumpListKM(PMR *psPMR) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ eError = _DumpList(psPMR, 0); ++ ++ /* Release RI lock*/ ++ _RIUnlock(); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIGetListEntryKM ++ ++ @Description ++ Returns pointer to a formatted string with details of the specified ++ list entry. If no entry exists (e.g. it may have been deleted ++ since the previous call), NULL is returned. ++ ++ @input pid - pid for which RI entry details are to be output ++ @input ppHandle - handle to the entry, if NULL, the first entry will be ++ returned. ++ @output pszEntryString - string to be output for the entry ++ @output hEntry - hEntry will be returned pointing to the next entry ++ (or NULL if there is no next entry) ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++IMG_BOOL RIGetListEntryKM(IMG_PID pid, ++ IMG_HANDLE **ppHandle, ++ IMG_CHAR **ppszEntryString) ++{ ++ RI_SUBLIST_ENTRY *psRISubEntry = NULL; ++ RI_LIST_ENTRY *psRIEntry = NULL; ++ uintptr_t hashData = 0; ++ IMG_PID hashKey = pid; ++ ++ static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX]; ++ ++ static IMG_UINT64 ui64TotalMemdescAlloc; ++ static IMG_UINT64 ui64TotalImport; ++ static IMG_UINT64 ui64TotalPMRAlloc; ++ static IMG_UINT64 ui64TotalPMRBacked; ++ static enum { ++ RI_GET_STATE_MEMDESCS_LIST_START, ++ RI_GET_STATE_MEMDESCS_SUMMARY, ++ RI_GET_STATE_PMR_LIST, ++ RI_GET_STATE_PMR_SUMMARY, ++ RI_GET_STATE_END, ++ RI_GET_STATE_LAST ++ } g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; ++ ++ static DLLIST_NODE *psNode; ++ static DLLIST_NODE *psSysAllocNode; ++ static IMG_CHAR szProcName[RI_PROC_BUF_SIZE]; ++ static IMG_UINT32 ui32ProcessedSysAllocPMRCount; ++ ++ acStringBuffer[0] = '\0'; ++ ++ switch (g_bNextGetState) ++ { ++ case RI_GET_STATE_MEMDESCS_LIST_START: ++ /* look-up pid in Hash Table, to obtain first entry for pid */ ++ hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey); ++ if (hashData) ++ { ++ if (*ppHandle) ++ { ++ psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle; ++ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) ++ { ++ psRISubEntry = NULL; ++ } ++ } ++ else ++ { ++ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); ++ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY) ++ { ++ psRISubEntry = NULL; ++ } ++ } ++ } ++ ++ if (psRISubEntry) ++ { ++ PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode); ++ ++ if (psRISubEntry->bIsImport) ++ { ++ ui64TotalImport += psRISubEntry->ui64Size; ++ } ++ else ++ { ++ ui64TotalMemdescAlloc += psRISubEntry->ui64Size; ++ } ++ ++ _GenerateMEMDESCEntryString(psRISubEntry, ++ IMG_TRUE, ++ RI_MEMDESC_ENTRY_BUF_SIZE, ++ acStringBuffer); ++ ++ if (szProcName[0] == '\0') ++ { ++ OSStringLCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ? ++ PVRSRV_MODNAME : psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE); ++ } ++ ++ ++ *ppszEntryString = acStringBuffer; ++ *ppHandle = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode); ++ ++ if (psNextProcListNode == NULL || ++ psNextProcListNode == (PDLLIST_NODE)hashData) ++ { ++ g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; ++ } ++ /* else continue to list MEMDESCs */ ++ } ++ else ++ { ++ if (ui64TotalMemdescAlloc == 0) ++ { ++ acStringBuffer[0] = '\0'; ++ *ppszEntryString = acStringBuffer; ++ g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY; ++ } ++ /* else continue to list MEMDESCs */ ++ } ++ break; ++ ++ case RI_GET_STATE_MEMDESCS_SUMMARY: ++ OSSNPrintf(acStringBuffer, ++ RI_MEMDESC_SUM_BUF_SIZE, ++ RI_MEMDESC_SUM_FRMT, ++ pid, ++ szProcName, ++ ui64TotalMemdescAlloc, ++ ui64TotalMemdescAlloc >> 10, ++ ui64TotalImport, ++ ui64TotalImport >> 10, ++ (ui64TotalMemdescAlloc + ui64TotalImport), ++ (ui64TotalMemdescAlloc + ui64TotalImport) >> 10); ++ ++ *ppszEntryString = acStringBuffer; ++ ui64TotalMemdescAlloc = 0; ++ ui64TotalImport = 0; ++ szProcName[0] = '\0'; ++ ++ g_bNextGetState = RI_GET_STATE_PMR_LIST; ++ break; ++ ++ case RI_GET_STATE_PMR_LIST: ++ if (pid == PVR_SYS_ALLOC_PID) ++ { ++ OSLockAcquire(g_hSysAllocPidListLock); ++ acStringBuffer[0] = '\0'; ++ if (!psSysAllocNode) ++ { ++ psSysAllocNode = &g_sSysAllocPidListHead; ++ ui32ProcessedSysAllocPMRCount = 0; ++ } ++ psSysAllocNode = dllist_get_next_node(psSysAllocNode); ++ ++ if (szProcName[0] == '\0') ++ { ++ OSStringLCopy(szProcName, PVRSRV_MODNAME, RI_PROC_BUF_SIZE); ++ } ++ if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead) ++ { ++ IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; ++ ++ psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); ++ _GeneratePMREntryString(psRIEntry, ++ IMG_TRUE, ++ RI_PMR_ENTRY_BUF_SIZE, ++ acStringBuffer); ++ PMR_LogicalSize(psRIEntry->psPMR, ++ &uiPMRLogicalSize); ++ ui64TotalPMRAlloc += uiPMRLogicalSize; ++ PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); ++ ui64TotalPMRBacked += uiPMRPhysicalBacking; ++ ++ ui32ProcessedSysAllocPMRCount++; ++ if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1) ++ { ++ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; ++ } ++ /* else continue to list PMRs */ ++ } ++ else ++ { ++ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; ++ } ++ *ppszEntryString = (IMG_CHAR *)acStringBuffer; ++ OSLockRelease(g_hSysAllocPidListLock); ++ } ++ else ++ { ++ IMG_BOOL bPMRToDisplay = IMG_FALSE; ++ ++ /* Iterate through the 'touched' PMRs and display details */ ++ if (!psNode) ++ { ++ psNode = dllist_get_next_node(&sListFirst); ++ } ++ else ++ { ++ psNode = dllist_get_next_node(psNode); ++ } ++ ++ while ((psNode != NULL && psNode != &sListFirst) && ++ !bPMRToDisplay) ++ { ++ psRIEntry = IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode); ++ if (psRIEntry->pid == pid) ++ { ++ IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0; ++ ++ /* This PMR was 'touched', so display details and unflag it*/ ++ _GeneratePMREntryString(psRIEntry, ++ IMG_TRUE, ++ RI_PMR_ENTRY_BUF_SIZE, ++ acStringBuffer); ++ PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize); ++ ui64TotalPMRAlloc += uiPMRLogicalSize; ++ PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking); ++ ui64TotalPMRBacked += uiPMRPhysicalBacking; ++ ++ /* Remember the name of the process for 1 PMR for the summary */ ++ if (szProcName[0] == '\0') ++ { ++ OSStringLCopy(szProcName, psRIEntry->ai8ProcName, RI_PROC_BUF_SIZE); ++ } ++ bPMRToDisplay = IMG_TRUE; ++ } ++ else ++ { ++ psNode = dllist_get_next_node(psNode); ++ } ++ } ++ ++ if (psNode == NULL || (psNode == &sListFirst)) ++ { ++ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY; ++ } ++ /* else continue listing PMRs */ ++ } ++ break; ++ ++ case RI_GET_STATE_PMR_SUMMARY: ++ OSSNPrintf(acStringBuffer, ++ RI_PMR_SUM_BUF_SIZE, ++ RI_PMR_SUM_FRMT, ++ pid, ++ szProcName, ++ ui64TotalPMRAlloc, ++ ui64TotalPMRAlloc >> 10, ++ ui64TotalPMRBacked, ++ ui64TotalPMRBacked >> 10); ++ ++ *ppszEntryString = acStringBuffer; ++ ui64TotalPMRAlloc = 0; ++ ui64TotalPMRBacked = 0; ++ szProcName[0] = '\0'; ++ psSysAllocNode = NULL; ++ ++ g_bNextGetState = RI_GET_STATE_END; ++ break; ++ ++ default: ++ PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState)); ++ ++ __fallthrough; ++ case RI_GET_STATE_END: ++ /* Reset state ready for the next gpu_mem_area file to display */ ++ *ppszEntryString = NULL; ++ *ppHandle = NULL; ++ psNode = NULL; ++ szProcName[0] = '\0'; ++ ++ g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START; ++ return IMG_FALSE; ++ break; ++ } ++ ++ return IMG_TRUE; ++} ++ ++/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */ ++static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, ++ IMG_BOOL bDebugFs, ++ IMG_UINT16 ui16MaxStrLen, ++ IMG_CHAR *pszEntryString) ++{ ++ IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE]; ++ IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE]; ++ IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE]; ++ const IMG_CHAR *pszAnnotationText; ++ IMG_PID uiRIPid = 0; ++ PMR* psRIPMR = NULL; ++ IMG_UINT32 ui32RIPMRFlags = 0; ++ ++ if (psRISubEntry->psRI != NULL) ++ { ++ uiRIPid = psRISubEntry->psRI->pid; ++ psRIPMR = psRISubEntry->psRI->psPMR; ++ ui32RIPMRFlags = psRISubEntry->psRI->ui32RIPMRFlags; ++ } ++ ++ OSSNPrintf(szEntryFormat, ++ RI_MEMDESC_ENTRY_FRMT_SIZE, ++ RI_MEMDESC_ENTRY_FRMT, ++ DEVMEM_ANNOTATION_MAX_LEN); ++ ++ if (!bDebugFs) ++ { ++ /* we don't include process ID info for debugfs output */ ++ OSSNPrintf(szProc, ++ RI_MEMDESC_ENTRY_PROC_BUF_SIZE, ++ RI_MEMDESC_ENTRY_PROC_FRMT, ++ psRISubEntry->pid, ++ psRISubEntry->ai8ProcName); ++ } ++ ++ if (psRISubEntry->bIsImport && psRIPMR) ++ { ++ OSSNPrintf((IMG_CHAR *)&szImport, ++ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE, ++ RI_MEMDESC_ENTRY_IMPORT_FRMT, ++ uiRIPid); ++ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ ++ pszAnnotationText = PMR_GetAnnotation(psRIPMR); ++ } ++ else if (!psRISubEntry->bIsSuballoc && psRIPMR) ++ { ++ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */ ++ pszAnnotationText = PMR_GetAnnotation(psRIPMR); ++ } ++ else ++ { ++ /* Set pszAnnotationText to that of the MEMDESC RI entry */ ++ pszAnnotationText = psRISubEntry->ai8TextB; ++ } ++ ++ /* Don't print memdescs if they are local imports ++ * (i.e. imported PMRs allocated by this process) ++ */ ++ if (bDebugFs && ++ ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) && ++ (psRISubEntry->bIsImport && ((psRISubEntry->pid == uiRIPid) ++ || (psRISubEntry->pid == PVR_SYS_ALLOC_PID)))) ++ { ++ /* Don't print this entry */ ++ pszEntryString[0] = '\0'; ++ } ++ else ++ { ++ OSSNPrintf(pszEntryString, ++ ui16MaxStrLen, ++ szEntryFormat, ++ (bDebugFs ? "" : " "), ++ psRISubEntry->pid, ++ (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset), ++ pszAnnotationText, ++ (bDebugFs ? "" : (char *)szProc), ++ psRISubEntry->ui64Size, ++ psRIPMR, ++ (psRISubEntry->bIsImport ? (char *)&szImport : ""), ++ (!psRISubEntry->bIsImport && (ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "", ++ (psRIPMR && PMR_IsUnpinned(psRIPMR)) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : "", ++ (bDebugFs ? '\n' : ' ')); ++ } ++} ++ ++/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */ ++static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, ++ IMG_BOOL bDebugFs, ++ IMG_UINT16 ui16MaxStrLen, ++ IMG_CHAR *pszEntryString) ++{ ++ const IMG_CHAR* pszAnnotationText; ++ IMG_DEVMEM_SIZE_T uiLogicalSize = 0; ++ IMG_DEVMEM_SIZE_T uiPhysicalSize = 0; ++ IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE]; ++ ++ PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize); ++ ++ PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize); ++ ++ OSSNPrintf(szEntryFormat, ++ RI_PMR_ENTRY_FRMT_SIZE, ++ RI_PMR_ENTRY_FRMT, ++ DEVMEM_ANNOTATION_MAX_LEN); ++ ++ /* Set pszAnnotationText to that PMR RI entry */ ++ pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR); ++ ++ OSSNPrintf(pszEntryString, ++ ui16MaxStrLen, ++ szEntryFormat, ++ (bDebugFs ? "" : " "), ++ psRIEntry->pid, ++ (void*)psRIEntry->psPMR, ++ pszAnnotationText, ++ uiLogicalSize, ++ uiPhysicalSize, ++ (bDebugFs ? '\n' : ' ')); ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _DumpList ++ ++ @Description ++ Dumps out RI List entries according to parameters passed. ++ ++ @input psPMR - If not NULL, function will output the RI entries for ++ the specified PMR only ++ @input pid - If non-zero, the function will only output MEMDESC RI ++ entries made by the process with ID pid. ++ If zero, all MEMDESC RI entries will be output. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid) ++{ ++ RI_LIST_ENTRY *psRIEntry = NULL; ++ RI_SUBLIST_ENTRY *psRISubEntry = NULL; ++ IMG_UINT16 ui16SubEntriesParsed = 0; ++ uintptr_t hashData = 0; ++ IMG_PID hashKey; ++ PMR *pPMRHashKey = psPMR; ++ IMG_BOOL bDisplayedThisPMR = IMG_FALSE; ++ IMG_UINT64 ui64LogicalSize = 0; ++ ++ PVR_RETURN_IF_INVALID_PARAM(psPMR); ++ ++ if (g_pRIHashTable && g_pProcHashTable) ++ { ++ if (pid != 0) ++ { ++ /* look-up pid in Hash Table */ ++ hashKey = pid; ++ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); ++ if (hashData) ++ { ++ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); ++ if (psRISubEntry) ++ { ++ psRIEntry = psRISubEntry->psRI; ++ } ++ } ++ } ++ else ++ { ++ /* Look-up psPMR in Hash Table */ ++ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); ++ psRIEntry = (RI_LIST_ENTRY *)hashData; ++ } ++ if (!psRIEntry) ++ { ++ /* No entry found in hash table */ ++ return PVRSRV_ERROR_NOT_FOUND; ++ } ++ while (psRIEntry) ++ { ++ bDisplayedThisPMR = IMG_FALSE; ++ /* Output details for RI entry */ ++ if (!pid) ++ { ++ PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); ++ ++ _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, ++ PMR_GetAnnotation(psRIEntry->psPMR), ++ psRIEntry->psPMR, ++ (IMG_UINT)psRIEntry->ui16SubListCount, ++ ui64LogicalSize)); ++ bDisplayedThisPMR = IMG_TRUE; ++ } ++ ui16SubEntriesParsed = 0; ++ if (psRIEntry->ui16SubListCount) ++ { ++#if _DUMP_LINKEDLIST_INFO ++ _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%p}\n", ++ psRIEntry->sSubListFirst.psNextNode)); ++#endif /* _DUMP_LINKEDLIST_INFO */ ++ if (!pid) ++ { ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), ++ RI_SUBLIST_ENTRY, sListNode); ++ } ++ /* Traverse RI sublist and output details for each entry */ ++ while (psRISubEntry) ++ { ++ if (psRIEntry) ++ { ++ if ((ui16SubEntriesParsed >= psRIEntry->ui16SubListCount)) ++ { ++ break; ++ } ++ if (!bDisplayedThisPMR) ++ { ++ PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize); ++ ++ _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx, ++ PMR_GetAnnotation(psRIEntry->psPMR), ++ psRIEntry->psPMR, ++ (IMG_UINT)psRIEntry->ui16SubListCount, ++ ui64LogicalSize)); ++ bDisplayedThisPMR = IMG_TRUE; ++ } ++ } ++#if _DUMP_LINKEDLIST_INFO ++ _RIOutput (("RI LIST: [this subentry:0x%p]\n",psRISubEntry)); ++ _RIOutput (("RI LIST: psRI:0x%p\n",psRISubEntry->psRI)); ++#endif /* _DUMP_LINKEDLIST_INFO */ ++ ++ { ++ IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE]; ++ ++ _GenerateMEMDESCEntryString(psRISubEntry, ++ IMG_FALSE, ++ RI_MEMDESC_ENTRY_BUF_SIZE, ++ szEntryString); ++ _RIOutput (("%s",szEntryString)); ++ } ++ ++ if (pid) ++ { ++ if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || ++ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) ++ { ++ psRISubEntry = NULL; ++ } ++ else ++ { ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), ++ RI_SUBLIST_ENTRY, sProcListNode); ++ if (psRISubEntry) ++ { ++ if (psRIEntry != psRISubEntry->psRI) ++ { ++ /* ++ * The next MEMDESC in the process linked list is in a different PMR ++ */ ++ psRIEntry = psRISubEntry->psRI; ++ bDisplayedThisPMR = IMG_FALSE; ++ } ++ } ++ } ++ } ++ else ++ { ++ ui16SubEntriesParsed++; ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), ++ RI_SUBLIST_ENTRY, sListNode); ++ } ++ } ++ } ++ if (!pid && psRIEntry) ++ { ++ if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount) ++ { ++ /* ++ * Output error message as sublist does not contain the ++ * number of entries indicated by sublist count ++ */ ++ _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n", ++ ui16SubEntriesParsed, psRIEntry->ui16SubListCount)); ++ } ++ else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst))) ++ { ++ /* ++ * Output error message as sublist is empty but sublist count ++ * is not zero ++ */ ++ _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n", ++ psRIEntry->ui16SubListCount)); ++ } ++ } ++ psRIEntry = NULL; ++ } ++ } ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDumpAllKM ++ ++ @Description ++ Dumps out the contents of all RI List entries (i.e. for all ++ MEMDESC allocations for each PMR). ++ At present, output is directed to Kernel log ++ via PVR_DPF. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDumpAllKM(void) ++{ ++ if (g_pRIHashTable) ++ { ++ return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries, NULL); ++ } ++ return PVRSRV_OK; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDumpProcessKM ++ ++ @Description ++ Dumps out the contents of all MEMDESC RI List entries (for every ++ PMR) which have been allocate by the specified process only. ++ At present, output is directed to Kernel log ++ via PVR_DPF. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 dummyPMR; ++ ++ if (!g_pProcHashTable) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ eError = _DumpList((PMR *)&dummyPMR, pid); ++ ++ /* Release RI lock*/ ++ _RIUnlock(); ++ ++ return eError; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function _TotalAllocsForProcess ++ ++ @Description ++ Totals all PMR physical backing for given process. ++ ++ @input pid - ID of process. ++ ++ @input ePhysHeapType - type of Physical Heap for which to total allocs ++ ++ @Return Size of all physical backing for PID's PMRs allocated from the ++ specified heap type (in bytes). ++ ++******************************************************************************/ ++static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) ++{ ++ RI_LIST_ENTRY *psRIEntry = NULL; ++ RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL; ++ RI_SUBLIST_ENTRY *psRISubEntry = NULL; ++ uintptr_t hashData = 0; ++ IMG_PID hashKey; ++ IMG_INT32 i32TotalPhysical = 0; ++ ++ if (g_pRIHashTable && g_pProcHashTable) ++ { ++ if (pid == PVR_SYS_ALLOC_PID) ++ { ++ IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0; ++ DLLIST_NODE *psSysAllocNode = NULL; ++ ++ OSLockAcquire(g_hSysAllocPidListLock); ++ psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead); ++ while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead) ++ { ++ psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode); ++ ui32ProcessedSysAllocPMRCount++; ++ if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType) ++ { ++ IMG_UINT64 ui64PhysicalSize; ++ ++ PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); ++ if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); ++ } ++ i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); ++ } ++ psSysAllocNode = dllist_get_next_node(psSysAllocNode); ++ } ++ OSLockRelease(g_hSysAllocPidListLock); ++ } ++ else ++ { ++ if (pid != 0) ++ { ++ /* look-up pid in Hash Table */ ++ hashKey = pid; ++ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey); ++ if (hashData) ++ { ++ psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode); ++ psRISubEntry = psInitialRISubEntry; ++ if (psRISubEntry) ++ { ++ psRIEntry = psRISubEntry->psRI; ++ } ++ } ++ } ++ ++ while (psRISubEntry && psRIEntry) ++ { ++ if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) && ++ (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) && ++ (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)) ++ { ++ IMG_UINT64 ui64PhysicalSize; ++ ++ ++ PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize); ++ if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff)) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__)); ++ } ++ i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff); ++ psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; ++ } ++ if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || ++ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) ++ { ++ psRISubEntry = NULL; ++ psRIEntry = NULL; ++ } ++ else ++ { ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), ++ RI_SUBLIST_ENTRY, sProcListNode); ++ if (psRISubEntry) ++ { ++ psRIEntry = psRISubEntry->psRI; ++ } ++ } ++ } ++ psRISubEntry = psInitialRISubEntry; ++ if (psRISubEntry) ++ { ++ psRIEntry = psRISubEntry->psRI; ++ } ++ while (psRISubEntry && psRIEntry) ++ { ++ psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS; ++ if ((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) || ++ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData)) ++ { ++ psRISubEntry = NULL; ++ psRIEntry = NULL; ++ } ++ else ++ { ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)), ++ RI_SUBLIST_ENTRY, sProcListNode); ++ if (psRISubEntry) ++ { ++ psRIEntry = psRISubEntry->psRI; ++ } ++ } ++ } ++ } ++ } ++ return i32TotalPhysical; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RITotalAllocProcessKM ++ ++ @Description ++ Returns the total of allocated GPU memory (backing for PMRs) ++ which has been allocated from the specific heap by the specified ++ process only. ++ ++ @Return Amount of physical backing allocated (in bytes) ++ ++******************************************************************************/ ++IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType) ++{ ++ IMG_INT32 i32BackingTotal = 0; ++ ++ if (g_pProcHashTable) ++ { ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType); ++ ++ /* Release RI lock*/ ++ _RIUnlock(); ++ } ++ return i32BackingTotal; ++} ++ ++#if defined(DEBUG) ++/*! ++******************************************************************************* ++ ++ @Function _DumpProcessList ++ ++ @Description ++ Dumps out RI List entries according to parameters passed. ++ ++ @input psPMR - If not NULL, function will output the RI entries for ++ the specified PMR only ++ @input pid - If non-zero, the function will only output MEMDESC RI ++ entries made by the process with ID pid. ++ If zero, all MEMDESC RI entries will be output. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++static PVRSRV_ERROR _DumpProcessList(PMR *psPMR, ++ IMG_PID pid, ++ IMG_UINT64 ui64Offset, ++ IMG_DEV_VIRTADDR *psDevVAddr) ++{ ++ RI_LIST_ENTRY *psRIEntry = NULL; ++ RI_SUBLIST_ENTRY *psRISubEntry = NULL; ++ IMG_UINT16 ui16SubEntriesParsed = 0; ++ uintptr_t hashData = 0; ++ PMR *pPMRHashKey = psPMR; ++ ++ psDevVAddr->uiAddr = 0; ++ ++ PVR_RETURN_IF_INVALID_PARAM(psPMR); ++ ++ if (g_pRIHashTable && g_pProcHashTable) ++ { ++ PVR_ASSERT(psPMR && pid); ++ ++ /* Look-up psPMR in Hash Table */ ++ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey); ++ psRIEntry = (RI_LIST_ENTRY *)hashData; ++ ++ if (!psRIEntry) ++ { ++ /* No entry found in hash table */ ++ return PVRSRV_ERROR_NOT_FOUND; ++ } ++ ++ if (psRIEntry->ui16SubListCount) ++ { ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), ++ RI_SUBLIST_ENTRY, sListNode); ++ ++ /* Traverse RI sublist and output details for each entry */ ++ while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount)) ++ { ++ if (pid == psRISubEntry->pid) ++ { ++ IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset; ++ IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size; ++ ++ if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset) ++ { ++ psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr; ++ return PVRSRV_OK; ++ } ++ } ++ ++ ui16SubEntriesParsed++; ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)), ++ RI_SUBLIST_ENTRY, sListNode); ++ } ++ } ++ } ++ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++} ++ ++/*! ++******************************************************************************* ++ ++ @Function RIDumpProcessListKM ++ ++ @Description ++ Dumps out selected contents of all MEMDESC RI List entries (for a ++ PMR) which have been allocate by the specified process only. ++ ++ @Return PVRSRV_ERROR ++ ++******************************************************************************/ ++PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, ++ IMG_PID pid, ++ IMG_UINT64 ui64Offset, ++ IMG_DEV_VIRTADDR *psDevVAddr) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (!g_pProcHashTable) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Acquire RI lock*/ ++ _RILock(); ++ ++ eError = _DumpProcessList(psPMR, ++ pid, ++ ui64Offset, ++ psDevVAddr); ++ ++ /* Release RI lock*/ ++ _RIUnlock(); ++ ++ return eError; ++} ++#endif ++ ++static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) ++{ ++ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; ++ ++ PVR_UNREFERENCED_PARAMETER (k); ++ PVR_UNREFERENCED_PARAMETER (pvPriv); ++ ++ return RIDumpListKM(psRIEntry->psPMR); ++} ++ ++static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v, void* pvPriv) ++{ ++ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v; ++ RI_SUBLIST_ENTRY *psRISubEntry; ++ PVRSRV_ERROR eResult = PVRSRV_OK; ++ ++ PVR_UNREFERENCED_PARAMETER (k); ++ PVR_UNREFERENCED_PARAMETER (pvPriv); ++ ++ while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0)) ++ { ++ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode); ++ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry); ++ } ++ if (eResult == PVRSRV_OK) ++ { ++ eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry); ++ /* ++ * If we've deleted the Hash table, return ++ * an error to stop the iterator... ++ */ ++ if (!g_pRIHashTable) ++ { ++ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ } ++ } ++ return eResult; ++} ++ ++static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v, void* pvPriv) ++{ ++ RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v; ++ PVRSRV_ERROR eResult; ++ ++ PVR_UNREFERENCED_PARAMETER (k); ++ PVR_UNREFERENCED_PARAMETER (pvPriv); ++ ++ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry); ++ if (eResult == PVRSRV_OK && !g_pProcHashTable) ++ { ++ /* ++ * If we've deleted the Hash table, return ++ * an error to stop the iterator... ++ */ ++ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE; ++ } ++ ++ return eResult; ++} ++ ++#endif /* if defined(PVRSRV_ENABLE_GPU_MEMORY_INFO) */ +diff --git a/drivers/gpu/drm/img-rogue/ri_server.h b/drivers/gpu/drm/img-rogue/ri_server.h +new file mode 100644 +index 000000000000..f7467f800070 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/ri_server.h +@@ -0,0 +1,106 @@ ++/*************************************************************************/ /*! ++@File ri_server.h ++@Title Resource Information abstraction ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Resource Information (RI) functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RI_SERVER_H ++#define RI_SERVER_H ++ ++#include "img_defs.h" ++#include "ri_typedefs.h" ++#include "pmr.h" ++#include "pvrsrv_error.h" ++#include "physheap.h" ++ ++PVRSRV_ERROR RIInitKM(void); ++void RIDeInitKM(void); ++ ++void RILockAcquireKM(void); ++void RILockReleaseKM(void); ++ ++PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR); ++ ++PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR, ++ IMG_PID ui32Owner); ++ ++PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR, ++ IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN], ++ IMG_UINT64 uiOffset, ++ IMG_UINT64 uiSize, ++ IMG_BOOL bIsImport, ++ IMG_BOOL bIsSuballoc, ++ RI_HANDLE *phRIHandle); ++ ++PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize, ++ const IMG_CHAR *psz8TextB, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64DevVAddr, ++ RI_HANDLE *phRIHandle); ++ ++PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle, ++ IMG_DEV_VIRTADDR sVAddr); ++ ++PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle); ++PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle); ++ ++PVRSRV_ERROR RIDeleteListKM(void); ++ ++PVRSRV_ERROR RIDumpListKM(PMR *psPMR); ++ ++PVRSRV_ERROR RIDumpAllKM(void); ++ ++PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid); ++ ++#if defined(DEBUG) ++PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR, ++ IMG_PID pid, ++ IMG_UINT64 ui64Offset, ++ IMG_DEV_VIRTADDR *psDevVAddr); ++#endif ++ ++IMG_BOOL RIGetListEntryKM(IMG_PID pid, ++ IMG_HANDLE **ppHandle, ++ IMG_CHAR **ppszEntryString); ++ ++IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType); ++ ++#endif /* RI_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/ri_typedefs.h b/drivers/gpu/drm/img-rogue/ri_typedefs.h +new file mode 100644 +index 000000000000..77be10e2ab03 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/ri_typedefs.h +@@ -0,0 +1,52 @@ ++/*************************************************************************/ /*! ++@File ++@Title Resource Information (RI) Management ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Client side part of RI management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef RI_TYPEDEFS_H ++#define RI_TYPEDEFS_H ++ ++#include "img_types.h" ++ ++typedef struct RI_SUBLIST_ENTRY RI_ENTRY; ++typedef RI_ENTRY* RI_HANDLE; ++ ++#endif /* #ifndef RI_TYPEDEFS_H */ +diff --git a/drivers/gpu/drm/img-rogue/rogue_trace_events.h b/drivers/gpu/drm/img-rogue/rogue_trace_events.h +new file mode 100644 +index 000000000000..e59230703f8e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/rogue_trace_events.h +@@ -0,0 +1,543 @@ ++/*************************************************************************/ /*! ++@File ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM rogue ++ ++#if !defined(ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) ++#define ROGUE_TRACE_EVENTS_H ++ ++#include ++#include ++#include ++#include ++ ++#define show_secs_from_ns(ns) \ ++ ({ \ ++ u64 t = ns + (NSEC_PER_USEC / 2); \ ++ do_div(t, NSEC_PER_SEC); \ ++ t; \ ++ }) ++ ++#define show_usecs_from_ns(ns) \ ++ ({ \ ++ u64 t = ns + (NSEC_PER_USEC / 2); \ ++ u32 rem; \ ++ do_div(t, NSEC_PER_USEC); \ ++ rem = do_div(t, USEC_PER_SEC); \ ++ }) ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++int trace_fence_update_enabled_callback(void); ++#else ++void trace_fence_update_enabled_callback(void); ++#endif ++void trace_fence_update_disabled_callback(void); ++ ++TRACE_EVENT_FN(rogue_fence_update, ++ ++ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset, ++ u32 sync_fwaddr, u32 sync_value), ++ ++ TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value), ++ ++ TP_STRUCT__entry( ++ __string( comm, comm ) ++ __string( cmd, cmd ) ++ __string( dm, dm ) ++ __field( u32, ctx_id ) ++ __field( u32, offset ) ++ __field( u32, sync_fwaddr ) ++ __field( u32, sync_value ) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(comm, comm); ++ __assign_str(cmd, cmd); ++ __assign_str(dm, dm); ++ __entry->ctx_id = ctx_id; ++ __entry->offset = offset; ++ __entry->sync_fwaddr = sync_fwaddr; ++ __entry->sync_value = sync_value; ++ ), ++ ++ TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", ++ __get_str(comm), ++ __get_str(cmd), ++ __get_str(dm), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->offset, ++ (unsigned long)__entry->sync_fwaddr, ++ (unsigned long)__entry->sync_value), ++ ++ trace_fence_update_enabled_callback, ++ trace_fence_update_disabled_callback ++); ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++int trace_fence_check_enabled_callback(void); ++#else ++void trace_fence_check_enabled_callback(void); ++#endif ++void trace_fence_check_disabled_callback(void); ++ ++TRACE_EVENT_FN(rogue_fence_check, ++ ++ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset, ++ u32 sync_fwaddr, u32 sync_value), ++ ++ TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value), ++ ++ TP_STRUCT__entry( ++ __string( comm, comm ) ++ __string( cmd, cmd ) ++ __string( dm, dm ) ++ __field( u32, ctx_id ) ++ __field( u32, offset ) ++ __field( u32, sync_fwaddr ) ++ __field( u32, sync_value ) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(comm, comm); ++ __assign_str(cmd, cmd); ++ __assign_str(dm, dm); ++ __entry->ctx_id = ctx_id; ++ __entry->offset = offset; ++ __entry->sync_fwaddr = sync_fwaddr; ++ __entry->sync_value = sync_value; ++ ), ++ ++ TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx", ++ __get_str(comm), ++ __get_str(cmd), ++ __get_str(dm), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->offset, ++ (unsigned long)__entry->sync_fwaddr, ++ (unsigned long)__entry->sync_value), ++ ++ trace_fence_check_enabled_callback, ++ trace_fence_check_disabled_callback ++); ++ ++TRACE_EVENT(rogue_job_enqueue, ++ ++ TP_PROTO(u32 ctx_id, u32 int_id, u32 ext_id, ++ const char *kick_type), ++ ++ TP_ARGS(ctx_id, int_id, ext_id, kick_type), ++ ++ TP_STRUCT__entry( ++ __field(u32, ctx_id) ++ __field(u32, int_id) ++ __field(u32, ext_id) ++ __string(kick_type, kick_type) ++ ), ++ ++ TP_fast_assign( ++ __entry->ctx_id = ctx_id; ++ __entry->int_id = int_id; ++ __entry->ext_id = ext_id; ++ __assign_str(kick_type, kick_type); ++ ), ++ ++ TP_printk("ctx_id=%lu int_id=%lu ext_id=%lu kick_type=%s", ++ (unsigned long) __entry->ctx_id, ++ (unsigned long) __entry->int_id, ++ (unsigned long) __entry->ext_id, ++ __get_str(kick_type) ++ ) ++); ++ ++TRACE_EVENT(rogue_sched_switch, ++ ++ TP_PROTO(const char *work_type, u32 switch_type, u64 timestamp, u32 next_ctx_id, ++ u32 next_prio, u32 next_int_id, u32 next_ext_id), ++ ++ TP_ARGS(work_type, switch_type, timestamp, next_ctx_id, next_prio, next_int_id, next_ext_id), ++ ++ TP_STRUCT__entry( ++ __string(work_type, work_type) ++ __field(u32, switch_type) ++ __field(u64, timestamp) ++ __field(u32, next_ctx_id) ++ __field(u32, next_prio) ++ __field(u32, next_int_id) ++ __field(u32, next_ext_id) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(work_type, work_type); ++ __entry->switch_type = switch_type; ++ __entry->timestamp = timestamp; ++ __entry->next_ctx_id = next_ctx_id; ++ __entry->next_prio = next_prio; ++ __entry->next_int_id = next_int_id; ++ __entry->next_ext_id = next_ext_id; ++ ), ++ ++ TP_printk("ts=%llu.%06lu next_ctx_id=%lu next_int_id=%lu next_ext_id=%lu" ++ " next_prio=%lu work_type=%s switch_type=%s", ++ (unsigned long long) show_secs_from_ns(__entry->timestamp), ++ (unsigned long) show_usecs_from_ns(__entry->timestamp), ++ (unsigned long) __entry->next_ctx_id, ++ (unsigned long) __entry->next_int_id, ++ (unsigned long) __entry->next_ext_id, ++ (unsigned long) __entry->next_prio, ++ __get_str(work_type), ++ __print_symbolic(__entry->switch_type, ++ /* These values are from ospvr_gputrace.h. */ ++ { 1, "begin" }, ++ { 2, "end" }) ++ ) ++); ++ ++TRACE_EVENT(rogue_create_fw_context, ++ ++ TP_PROTO(const char *comm, const char *dm, u32 ctx_id), ++ ++ TP_ARGS(comm, dm, ctx_id), ++ ++ TP_STRUCT__entry( ++ __string( comm, comm ) ++ __string( dm, dm ) ++ __field( u32, ctx_id ) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(comm, comm); ++ __assign_str(dm, dm); ++ __entry->ctx_id = ctx_id; ++ ), ++ ++ TP_printk("comm=%s dm=%s ctx_id=%lu", ++ __get_str(comm), ++ __get_str(dm), ++ (unsigned long)__entry->ctx_id) ++); ++ ++void PVRGpuTraceEnableUfoCallback(void); ++void PVRGpuTraceDisableUfoCallback(void); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++int PVRGpuTraceEnableUfoCallbackWrapper(void); ++#else ++#define PVRGpuTraceEnableUfoCallbackWrapper \ ++ PVRGpuTraceEnableUfoCallback ++#endif ++ ++TRACE_EVENT_FN(rogue_ufo_update, ++ ++ TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, ++ u32 fwaddr, u32 old_value, u32 new_value), ++ ++ TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, old_value, ++ new_value), ++ ++ TP_STRUCT__entry( ++ __field( u64, timestamp ) ++ __field( u32, ctx_id ) ++ __field( u32, int_id ) ++ __field( u32, ext_id ) ++ __field( u32, fwaddr ) ++ __field( u32, old_value ) ++ __field( u32, new_value ) ++ ), ++ ++ TP_fast_assign( ++ __entry->timestamp = timestamp; ++ __entry->ctx_id = ctx_id; ++ __entry->int_id = int_id; ++ __entry->ext_id = ext_id; ++ __entry->fwaddr = fwaddr; ++ __entry->old_value = old_value; ++ __entry->new_value = new_value; ++ ), ++ ++ TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" ++ " fwaddr=%#lx old_value=%#lx new_value=%#lx", ++ (unsigned long long)show_secs_from_ns(__entry->timestamp), ++ (unsigned long)show_usecs_from_ns(__entry->timestamp), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->int_id, ++ (unsigned long)__entry->ext_id, ++ (unsigned long)__entry->fwaddr, ++ (unsigned long)__entry->old_value, ++ (unsigned long)__entry->new_value), ++ PVRGpuTraceEnableUfoCallbackWrapper, ++ PVRGpuTraceDisableUfoCallback ++); ++ ++TRACE_EVENT_FN(rogue_ufo_check_fail, ++ ++ TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, ++ u32 fwaddr, u32 value, u32 required), ++ ++ TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required), ++ ++ TP_STRUCT__entry( ++ __field( u64, timestamp ) ++ __field( u32, ctx_id ) ++ __field( u32, int_id ) ++ __field( u32, ext_id ) ++ __field( u32, fwaddr ) ++ __field( u32, value ) ++ __field( u32, required ) ++ ), ++ ++ TP_fast_assign( ++ __entry->timestamp = timestamp; ++ __entry->ctx_id = ctx_id; ++ __entry->int_id = int_id; ++ __entry->ext_id = ext_id; ++ __entry->fwaddr = fwaddr; ++ __entry->value = value; ++ __entry->required = required; ++ ), ++ ++ TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" ++ " fwaddr=%#lx value=%#lx required=%#lx", ++ (unsigned long long)show_secs_from_ns(__entry->timestamp), ++ (unsigned long)show_usecs_from_ns(__entry->timestamp), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->int_id, ++ (unsigned long)__entry->ext_id, ++ (unsigned long)__entry->fwaddr, ++ (unsigned long)__entry->value, ++ (unsigned long)__entry->required), ++ PVRGpuTraceEnableUfoCallbackWrapper, ++ PVRGpuTraceDisableUfoCallback ++); ++ ++TRACE_EVENT_FN(rogue_ufo_pr_check_fail, ++ ++ TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, ++ u32 fwaddr, u32 value, u32 required), ++ ++ TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value, required), ++ ++ TP_STRUCT__entry( ++ __field( u64, timestamp ) ++ __field( u32, ctx_id ) ++ __field( u32, int_id ) ++ __field( u32, ext_id ) ++ __field( u32, fwaddr ) ++ __field( u32, value ) ++ __field( u32, required ) ++ ), ++ ++ TP_fast_assign( ++ __entry->timestamp = timestamp; ++ __entry->ctx_id = ctx_id; ++ __entry->int_id = int_id; ++ __entry->ext_id = ext_id; ++ __entry->fwaddr = fwaddr; ++ __entry->value = value; ++ __entry->required = required; ++ ), ++ ++ TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" ++ " fwaddr=%#lx value=%#lx required=%#lx", ++ (unsigned long long)show_secs_from_ns(__entry->timestamp), ++ (unsigned long)show_usecs_from_ns(__entry->timestamp), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->int_id, ++ (unsigned long)__entry->ext_id, ++ (unsigned long)__entry->fwaddr, ++ (unsigned long)__entry->value, ++ (unsigned long)__entry->required), ++ PVRGpuTraceEnableUfoCallbackWrapper, ++ PVRGpuTraceDisableUfoCallback ++); ++ ++TRACE_EVENT_FN(rogue_ufo_check_success, ++ ++ TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, ++ u32 fwaddr, u32 value), ++ ++ TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value), ++ ++ TP_STRUCT__entry( ++ __field( u64, timestamp ) ++ __field( u32, ctx_id ) ++ __field( u32, int_id ) ++ __field( u32, ext_id ) ++ __field( u32, fwaddr ) ++ __field( u32, value ) ++ ), ++ ++ TP_fast_assign( ++ __entry->timestamp = timestamp; ++ __entry->ctx_id = ctx_id; ++ __entry->int_id = int_id; ++ __entry->ext_id = ext_id; ++ __entry->fwaddr = fwaddr; ++ __entry->value = value; ++ ), ++ ++ TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" ++ " fwaddr=%#lx value=%#lx", ++ (unsigned long long)show_secs_from_ns(__entry->timestamp), ++ (unsigned long)show_usecs_from_ns(__entry->timestamp), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->int_id, ++ (unsigned long)__entry->ext_id, ++ (unsigned long)__entry->fwaddr, ++ (unsigned long)__entry->value), ++ PVRGpuTraceEnableUfoCallbackWrapper, ++ PVRGpuTraceDisableUfoCallback ++); ++ ++TRACE_EVENT_FN(rogue_ufo_pr_check_success, ++ ++ TP_PROTO(u64 timestamp, u32 ctx_id, u32 int_id, u32 ext_id, ++ u32 fwaddr, u32 value), ++ ++ TP_ARGS(timestamp, ctx_id, int_id, ext_id, fwaddr, value), ++ ++ TP_STRUCT__entry( ++ __field( u64, timestamp ) ++ __field( u32, ctx_id ) ++ __field( u32, int_id ) ++ __field( u32, ext_id ) ++ __field( u32, fwaddr ) ++ __field( u32, value ) ++ ), ++ ++ TP_fast_assign( ++ __entry->timestamp = timestamp; ++ __entry->ctx_id = ctx_id; ++ __entry->int_id = int_id; ++ __entry->ext_id = ext_id; ++ __entry->fwaddr = fwaddr; ++ __entry->value = value; ++ ), ++ ++ TP_printk("ts=%llu.%06lu ctx_id=%lu int_id=%lu ext_id=%lu" ++ " fwaddr=%#lx value=%#lx", ++ (unsigned long long)show_secs_from_ns(__entry->timestamp), ++ (unsigned long)show_usecs_from_ns(__entry->timestamp), ++ (unsigned long)__entry->ctx_id, ++ (unsigned long)__entry->int_id, ++ (unsigned long)__entry->ext_id, ++ (unsigned long)__entry->fwaddr, ++ (unsigned long)__entry->value), ++ PVRGpuTraceEnableUfoCallbackWrapper, ++ PVRGpuTraceDisableUfoCallback ++); ++ ++TRACE_EVENT(rogue_events_lost, ++ ++ TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal), ++ ++ TP_ARGS(event_source, last_ordinal, curr_ordinal), ++ ++ TP_STRUCT__entry( ++ __field( u32, event_source ) ++ __field( u32, last_ordinal ) ++ __field( u32, curr_ordinal ) ++ ), ++ ++ TP_fast_assign( ++ __entry->event_source = event_source; ++ __entry->last_ordinal = last_ordinal; ++ __entry->curr_ordinal = curr_ordinal; ++ ), ++ ++ TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u", ++ __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}), ++ __entry->last_ordinal, ++ __entry->curr_ordinal) ++); ++ ++void PVRGpuTraceEnableFirmwareActivityCallback(void); ++void PVRGpuTraceDisableFirmwareActivityCallback(void); ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void); ++#else ++#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \ ++ PVRGpuTraceEnableFirmwareActivityCallback ++#endif ++ ++TRACE_EVENT_FN(rogue_firmware_activity, ++ ++ TP_PROTO(u64 timestamp, const char *task, u32 fw_event), ++ ++ TP_ARGS(timestamp, task, fw_event), ++ ++ TP_STRUCT__entry( ++ __field( u64, timestamp ) ++ __string( task, task ) ++ __field( u32, fw_event ) ++ ), ++ ++ TP_fast_assign( ++ __entry->timestamp = timestamp; ++ __assign_str(task, task); ++ __entry->fw_event = fw_event; ++ ), ++ ++ TP_printk("ts=%llu.%06lu task=%s event=%s", ++ (unsigned long long)show_secs_from_ns(__entry->timestamp), ++ (unsigned long)show_usecs_from_ns(__entry->timestamp), ++ __get_str(task), ++ __print_symbolic(__entry->fw_event, ++ /* These values are from ospvr_gputrace.h. */ ++ { 1, "begin" }, ++ { 2, "end" })), ++ ++ PVRGpuTraceEnableFirmwareActivityCallbackWrapper, ++ PVRGpuTraceDisableFirmwareActivityCallback ++); ++ ++#undef show_secs_from_ns ++#undef show_usecs_from_ns ++ ++#endif /* ROGUE_TRACE_EVENTS_H */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_PATH . ++ ++/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */ ++#define TRACE_INCLUDE_FILE rogue_trace_events ++ ++/* This part must be outside protection */ ++#include +diff --git a/drivers/gpu/drm/img-rogue/server_cache_bridge.c b/drivers/gpu/drm/img-rogue/server_cache_bridge.c +new file mode 100644 +index 000000000000..18509ba08819 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_cache_bridge.c +@@ -0,0 +1,457 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for cache ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for cache ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "cache_km.h" ++ ++#include "common_cache_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static_assert(CACHE_BATCH_MAX <= IMG_UINT32_MAX, ++ "CACHE_BATCH_MAX must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psCacheOpQueueIN_UI8, ++ IMG_UINT8 * psCacheOpQueueOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN = ++ (PVRSRV_BRIDGE_IN_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT = ++ (PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *) IMG_OFFSET_ADDR(psCacheOpQueueOUT_UI8, 0); ++ ++ PMR **psPMRInt = NULL; ++ IMG_HANDLE *hPMRInt2 = NULL; ++ IMG_UINT64 *ui64AddressInt = NULL; ++ IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL; ++ IMG_DEVMEM_SIZE_T *uiSizeInt = NULL; ++ PVRSRV_CACHE_OP *iuCacheOpInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) + ++ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) + ++ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) + ++ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) + ++ ((IMG_UINT64) psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) + 0; ++ ++ if (unlikely(psCacheOpQueueIN->ui32NumCacheOps > CACHE_BATCH_MAX)) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto CacheOpQueue_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto CacheOpQueue_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psCacheOpQueueIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto CacheOpQueue_exit; ++ } ++ } ++ } ++ ++ if (psCacheOpQueueIN->ui32NumCacheOps != 0) ++ { ++ psPMRInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psPMRInt, 0, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)); ++ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *); ++ hPMRInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hPMRInt2, (const void __user *)psCacheOpQueueIN->phPMR, ++ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto CacheOpQueue_exit; ++ } ++ } ++ if (psCacheOpQueueIN->ui32NumCacheOps != 0) ++ { ++ ui64AddressInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64); ++ } ++ ++ /* Copy the data over */ ++ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui64AddressInt, (const void __user *)psCacheOpQueueIN->pui64Address, ++ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto CacheOpQueue_exit; ++ } ++ } ++ if (psCacheOpQueueIN->ui32NumCacheOps != 0) ++ { ++ uiOffsetInt = ++ (IMG_DEVMEM_OFFSET_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T); ++ } ++ ++ /* Copy the data over */ ++ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiOffsetInt, (const void __user *)psCacheOpQueueIN->puiOffset, ++ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto CacheOpQueue_exit; ++ } ++ } ++ if (psCacheOpQueueIN->ui32NumCacheOps != 0) ++ { ++ uiSizeInt = (IMG_DEVMEM_SIZE_T *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T); ++ } ++ ++ /* Copy the data over */ ++ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiSizeInt, (const void __user *)psCacheOpQueueIN->puiSize, ++ psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto CacheOpQueue_exit; ++ } ++ } ++ if (psCacheOpQueueIN->ui32NumCacheOps != 0) ++ { ++ iuCacheOpInt = ++ (PVRSRV_CACHE_OP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP); ++ } ++ ++ /* Copy the data over */ ++ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, iuCacheOpInt, (const void __user *)psCacheOpQueueIN->piuCacheOp, ++ psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK) ++ { ++ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto CacheOpQueue_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) ++ { ++ /* Look up the address from the handle */ ++ psCacheOpQueueOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt[i], ++ hPMRInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psCacheOpQueueOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto CacheOpQueue_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psCacheOpQueueOUT->eError = ++ CacheOpQueue(psConnection, OSGetDevNode(psConnection), ++ psCacheOpQueueIN->ui32NumCacheOps, ++ psPMRInt, ++ ui64AddressInt, ++ uiOffsetInt, uiSizeInt, iuCacheOpInt, psCacheOpQueueIN->ui32OpTimeline); ++ ++CacheOpQueue_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ if (hPMRInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psCacheOpQueueIN->ui32NumCacheOps; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMRInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psCacheOpQueueOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psCacheOpExecIN_UI8, ++ IMG_UINT8 * psCacheOpExecOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN = ++ (PVRSRV_BRIDGE_IN_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT = ++ (PVRSRV_BRIDGE_OUT_CACHEOPEXEC *) IMG_OFFSET_ADDR(psCacheOpExecOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psCacheOpExecIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psCacheOpExecOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psCacheOpExecOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto CacheOpExec_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psCacheOpExecOUT->eError = ++ CacheOpValExec(psPMRInt, ++ psCacheOpExecIN->ui64Address, ++ psCacheOpExecIN->uiOffset, ++ psCacheOpExecIN->uiSize, psCacheOpExecIN->iuCacheOp); ++ ++CacheOpExec_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psCacheOpLogIN_UI8, ++ IMG_UINT8 * psCacheOpLogOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN = ++ (PVRSRV_BRIDGE_IN_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT = ++ (PVRSRV_BRIDGE_OUT_CACHEOPLOG *) IMG_OFFSET_ADDR(psCacheOpLogOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psCacheOpLogIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psCacheOpLogOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psCacheOpLogOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto CacheOpLog_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psCacheOpLogOUT->eError = ++ CacheOpLog(psPMRInt, ++ psCacheOpLogIN->ui64Address, ++ psCacheOpLogIN->uiOffset, ++ psCacheOpLogIN->uiSize, ++ psCacheOpLogIN->i64StartTime, ++ psCacheOpLogIN->i64EndTime, psCacheOpLogIN->iuCacheOp); ++ ++CacheOpLog_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitCACHEBridge(void); ++void DeinitCACHEBridge(void); ++ ++/* ++ * Register all CACHE functions with services ++ */ ++PVRSRV_ERROR InitCACHEBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, ++ PVRSRVBridgeCacheOpQueue, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, ++ PVRSRVBridgeCacheOpExec, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, ++ PVRSRVBridgeCacheOpLog, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all cache functions with services ++ */ ++void DeinitCACHEBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_cmm_bridge.c b/drivers/gpu/drm/img-rogue/server_cmm_bridge.c +new file mode 100644 +index 000000000000..b95f8589ae99 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_cmm_bridge.c +@@ -0,0 +1,409 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for cmm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for cmm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "pmr.h" ++#include "devicemem_server.h" ++ ++#include "common_cmm_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#if !defined(EXCLUDE_CMM_BRIDGE) ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _DevmemIntExportCtxpsContextExportIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemIntUnexportCtx((DEVMEMINT_CTX_EXPORT *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntExportCtxIN_UI8, ++ IMG_UINT8 * psDevmemIntExportCtxOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntExportCtxOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext; ++ DEVMEMINT_CTX *psContextInt = NULL; ++ IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR; ++ PMR *psPMRInt = NULL; ++ DEVMEMINT_CTX_EXPORT *psContextExportInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntExportCtxOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psContextInt, ++ hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntExportCtx_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psDevmemIntExportCtxOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntExportCtx_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntExportCtxOUT->eError = ++ DevmemIntExportCtx(psContextInt, psPMRInt, &psContextExportInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) ++ { ++ goto DevmemIntExportCtx_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntExportCtxOUT-> ++ hContextExport, ++ (void *)psContextExportInt, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT, ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE, ++ (PFN_HANDLE_RELEASE) & ++ _DevmemIntExportCtxpsContextExportIntRelease); ++ if (unlikely(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntExportCtx_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntExportCtx_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hContext, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK) ++ { ++ if (psContextExportInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ DevmemIntUnexportCtx(psContextExportInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntUnexportCtxIN_UI8, ++ IMG_UINT8 * psDevmemIntUnexportCtxOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *) IMG_OFFSET_ADDR(psDevmemIntUnexportCtxIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *) ++ IMG_OFFSET_ADDR(psDevmemIntUnexportCtxOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntUnexportCtxOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT); ++ if (unlikely((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) && ++ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psDevmemIntUnexportCtxOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnexportCtx_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntUnexportCtx_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _DevmemIntAcquireRemoteCtxpsContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntAcquireRemoteCtxIN_UI8, ++ IMG_UINT8 * psDevmemIntAcquireRemoteCtxOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *) ++ IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *) ++ IMG_OFFSET_ADDR(psDevmemIntAcquireRemoteCtxOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR; ++ PMR *psPMRInt = NULL; ++ DEVMEMINT_CTX *psContextInt = NULL; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ psDevmemIntAcquireRemoteCtxOUT->hContext = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntAcquireRemoteCtxOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntAcquireRemoteCtx_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntAcquireRemoteCtxOUT->eError = ++ DevmemIntAcquireRemoteCtx(psPMRInt, &psContextInt, &hPrivDataInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) ++ { ++ goto DevmemIntAcquireRemoteCtx_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntAcquireRemoteCtxOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntAcquireRemoteCtxOUT->hContext, ++ (void *)psContextInt, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE, ++ (PFN_HANDLE_RELEASE) & ++ _DevmemIntAcquireRemoteCtxpsContextIntRelease); ++ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntAcquireRemoteCtx_exit; ++ } ++ ++ psDevmemIntAcquireRemoteCtxOUT->eError = ++ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntAcquireRemoteCtxOUT->hPrivData, ++ (void *)hPrivDataInt, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE, ++ psDevmemIntAcquireRemoteCtxOUT->hContext); ++ if (unlikely(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntAcquireRemoteCtx_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntAcquireRemoteCtx_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK) ++ { ++ if (psDevmemIntAcquireRemoteCtxOUT->hContext) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Lock over handle creation cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) ++ psDevmemIntAcquireRemoteCtxOUT-> ++ hContext, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Avoid freeing/destroying/releasing the resource a second time below */ ++ psContextInt = NULL; ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ } ++ ++ if (psContextInt) ++ { ++ DevmemIntCtxDestroy(psContextInt); ++ } ++ } ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++#endif /* EXCLUDE_CMM_BRIDGE */ ++ ++#if !defined(EXCLUDE_CMM_BRIDGE) ++PVRSRV_ERROR InitCMMBridge(void); ++void DeinitCMMBridge(void); ++ ++/* ++ * Register all CMM functions with services ++ */ ++PVRSRV_ERROR InitCMMBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, ++ PVRSRVBridgeDevmemIntExportCtx, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, ++ PVRSRVBridgeDevmemIntUnexportCtx, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, ++ PVRSRVBridgeDevmemIntAcquireRemoteCtx, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all cmm functions with services ++ */ ++void DeinitCMMBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX); ++ ++} ++#else /* EXCLUDE_CMM_BRIDGE */ ++/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined, ++ * do not populate the dispatch table with its functions ++ */ ++#define InitCMMBridge() \ ++ PVRSRV_OK ++ ++#define DeinitCMMBridge() ++ ++#endif /* EXCLUDE_CMM_BRIDGE */ +diff --git a/drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c b/drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c +new file mode 100644 +index 000000000000..db440d051151 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_devicememhistory_bridge.c +@@ -0,0 +1,846 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for devicememhistory ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for devicememhistory ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "devicemem_history_server.h" ++ ++#include "common_devicememhistory_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#include "lock.h" ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevicememHistoryMapIN_UI8, ++ IMG_UINT8 * psDevicememHistoryMapOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN = ++ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT = ++ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *) IMG_OFFSET_ADDR(psDevicememHistoryMapOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR; ++ PMR *psPMRInt = NULL; ++ IMG_CHAR *uiTextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DevicememHistoryMap_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DevicememHistoryMap_exit; ++ } ++ } ++ } ++ ++ { ++ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapIN->puiText, ++ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistoryMap_exit; ++ } ++ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevicememHistoryMapOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevicememHistoryMapOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevicememHistoryMap_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevicememHistoryMapOUT->eError = ++ DevicememHistoryMapKM(psPMRInt, ++ psDevicememHistoryMapIN->uiOffset, ++ psDevicememHistoryMapIN->sDevVAddr, ++ psDevicememHistoryMapIN->uiSize, ++ uiTextInt, ++ psDevicememHistoryMapIN->ui32Log2PageSize, ++ psDevicememHistoryMapIN->ui32AllocationIndex, ++ &psDevicememHistoryMapOUT->ui32AllocationIndexOut); ++ ++DevicememHistoryMap_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDevicememHistoryMapOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevicememHistoryUnmapIN_UI8, ++ IMG_UINT8 * psDevicememHistoryUnmapOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN = ++ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *) ++ IMG_OFFSET_ADDR(psDevicememHistoryUnmapIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT = ++ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *) ++ IMG_OFFSET_ADDR(psDevicememHistoryUnmapOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR; ++ PMR *psPMRInt = NULL; ++ IMG_CHAR *uiTextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DevicememHistoryUnmap_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryUnmapIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DevicememHistoryUnmap_exit; ++ } ++ } ++ } ++ ++ { ++ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapIN->puiText, ++ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistoryUnmap_exit; ++ } ++ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevicememHistoryUnmapOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevicememHistoryUnmap_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevicememHistoryUnmapOUT->eError = ++ DevicememHistoryUnmapKM(psPMRInt, ++ psDevicememHistoryUnmapIN->uiOffset, ++ psDevicememHistoryUnmapIN->sDevVAddr, ++ psDevicememHistoryUnmapIN->uiSize, ++ uiTextInt, ++ psDevicememHistoryUnmapIN->ui32Log2PageSize, ++ psDevicememHistoryUnmapIN->ui32AllocationIndex, ++ &psDevicememHistoryUnmapOUT->ui32AllocationIndexOut); ++ ++DevicememHistoryUnmap_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDevicememHistoryUnmapOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevicememHistoryMapVRangeIN_UI8, ++ IMG_UINT8 * psDevicememHistoryMapVRangeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN = ++ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *) ++ IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT = ++ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *) ++ IMG_OFFSET_ADDR(psDevicememHistoryMapVRangeOUT_UI8, 0); ++ ++ IMG_CHAR *uiTextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DevicememHistoryMapVRange_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDevicememHistoryMapVRangeIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DevicememHistoryMapVRange_exit; ++ } ++ } ++ } ++ ++ { ++ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextInt, (const void __user *)psDevicememHistoryMapVRangeIN->puiText, ++ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistoryMapVRange_exit; ++ } ++ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ psDevicememHistoryMapVRangeOUT->eError = ++ DevicememHistoryMapVRangeKM(psConnection, OSGetDevNode(psConnection), ++ psDevicememHistoryMapVRangeIN->sBaseDevVAddr, ++ psDevicememHistoryMapVRangeIN->ui32ui32StartPage, ++ psDevicememHistoryMapVRangeIN->ui32NumPages, ++ psDevicememHistoryMapVRangeIN->uiAllocSize, ++ uiTextInt, ++ psDevicememHistoryMapVRangeIN->ui32Log2PageSize, ++ psDevicememHistoryMapVRangeIN->ui32AllocationIndex, ++ &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut); ++ ++DevicememHistoryMapVRange_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDevicememHistoryMapVRangeOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevicememHistoryUnmapVRangeIN_UI8, ++ IMG_UINT8 * psDevicememHistoryUnmapVRangeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN = ++ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *) ++ IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT = ++ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *) ++ IMG_OFFSET_ADDR(psDevicememHistoryUnmapVRangeOUT_UI8, 0); ++ ++ IMG_CHAR *uiTextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + 0; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DevicememHistoryUnmapVRange_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = ++ (IMG_BYTE *) (void *)psDevicememHistoryUnmapVRangeIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDevicememHistoryUnmapVRangeOUT->eError = ++ PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DevicememHistoryUnmapVRange_exit; ++ } ++ } ++ } ++ ++ { ++ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextInt, (const void __user *)psDevicememHistoryUnmapVRangeIN->puiText, ++ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistoryUnmapVRange_exit; ++ } ++ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ psDevicememHistoryUnmapVRangeOUT->eError = ++ DevicememHistoryUnmapVRangeKM(psConnection, OSGetDevNode(psConnection), ++ psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr, ++ psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage, ++ psDevicememHistoryUnmapVRangeIN->ui32NumPages, ++ psDevicememHistoryUnmapVRangeIN->uiAllocSize, ++ uiTextInt, ++ psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize, ++ psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex, ++ &psDevicememHistoryUnmapVRangeOUT-> ++ ui32AllocationIndexOut); ++ ++DevicememHistoryUnmapVRange_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDevicememHistoryUnmapVRangeOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevicememHistorySparseChangeIN_UI8, ++ IMG_UINT8 * psDevicememHistorySparseChangeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN = ++ (PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *) ++ IMG_OFFSET_ADDR(psDevicememHistorySparseChangeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT = ++ (PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *) ++ IMG_OFFSET_ADDR(psDevicememHistorySparseChangeOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR; ++ PMR *psPMRInt = NULL; ++ IMG_CHAR *uiTextInt = NULL; ++ IMG_UINT32 *ui32AllocPageIndicesInt = NULL; ++ IMG_UINT32 *ui32FreePageIndicesInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32AllocPageCount * ++ sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psDevicememHistorySparseChangeIN->ui32FreePageCount * ++ sizeof(IMG_UINT32)) + 0; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DevicememHistorySparseChange_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = ++ (IMG_BYTE *) (void *)psDevicememHistorySparseChangeIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDevicememHistorySparseChangeOUT->eError = ++ PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DevicememHistorySparseChange_exit; ++ } ++ } ++ } ++ ++ { ++ uiTextInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextInt, ++ (const void __user *)psDevicememHistorySparseChangeIN->puiText, ++ DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistorySparseChange_exit; ++ } ++ ((IMG_CHAR *) uiTextInt)[(DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0) ++ { ++ ui32AllocPageIndicesInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32AllocPageIndicesInt, ++ (const void __user *)psDevicememHistorySparseChangeIN->pui32AllocPageIndices, ++ psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistorySparseChange_exit; ++ } ++ } ++ if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0) ++ { ++ ui32FreePageIndicesInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32FreePageIndicesInt, ++ (const void __user *)psDevicememHistorySparseChangeIN->pui32FreePageIndices, ++ psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DevicememHistorySparseChange_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevicememHistorySparseChangeOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevicememHistorySparseChange_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevicememHistorySparseChangeOUT->eError = ++ DevicememHistorySparseChangeKM(psPMRInt, ++ psDevicememHistorySparseChangeIN->uiOffset, ++ psDevicememHistorySparseChangeIN->sDevVAddr, ++ psDevicememHistorySparseChangeIN->uiSize, ++ uiTextInt, ++ psDevicememHistorySparseChangeIN->ui32Log2PageSize, ++ psDevicememHistorySparseChangeIN->ui32AllocPageCount, ++ ui32AllocPageIndicesInt, ++ psDevicememHistorySparseChangeIN->ui32FreePageCount, ++ ui32FreePageIndicesInt, ++ psDevicememHistorySparseChangeIN->ui32AllocationIndex, ++ &psDevicememHistorySparseChangeOUT-> ++ ui32AllocationIndexOut); ++ ++DevicememHistorySparseChange_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDevicememHistorySparseChangeOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++static POS_LOCK pDEVICEMEMHISTORYBridgeLock; ++ ++PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void); ++void DeinitDEVICEMEMHISTORYBridge(void); ++ ++/* ++ * Register all DEVICEMEMHISTORY functions with services ++ */ ++PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void) ++{ ++ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock), "OSLockCreate"); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, ++ PVRSRVBridgeDevicememHistoryMap, pDEVICEMEMHISTORYBridgeLock); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, ++ PVRSRVBridgeDevicememHistoryUnmap, pDEVICEMEMHISTORYBridgeLock); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, ++ PVRSRVBridgeDevicememHistoryMapVRange, pDEVICEMEMHISTORYBridgeLock); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, ++ PVRSRVBridgeDevicememHistoryUnmapVRange, pDEVICEMEMHISTORYBridgeLock); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, ++ PVRSRVBridgeDevicememHistorySparseChange, ++ pDEVICEMEMHISTORYBridgeLock); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all devicememhistory functions with services ++ */ ++void DeinitDEVICEMEMHISTORYBridge(void) ++{ ++ OSLockDestroy(pDEVICEMEMHISTORYBridgeLock); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, ++ PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_di_bridge.c b/drivers/gpu/drm/img-rogue/server_di_bridge.c +new file mode 100644 +index 000000000000..49a97a0515e5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_di_bridge.c +@@ -0,0 +1,639 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for di ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for di ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "di_impl_brg.h" ++ ++#include "common_di_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _DICreateContextpsContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DIDestroyContextKM((DI_CONTEXT *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDICreateContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDICreateContextIN_UI8, ++ IMG_UINT8 * psDICreateContextOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DICREATECONTEXT *psDICreateContextIN = ++ (PVRSRV_BRIDGE_IN_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DICREATECONTEXT *psDICreateContextOUT = ++ (PVRSRV_BRIDGE_OUT_DICREATECONTEXT *) IMG_OFFSET_ADDR(psDICreateContextOUT_UI8, 0); ++ ++ IMG_CHAR *puiStreamNameInt = NULL; ++ DI_CONTEXT *psContextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; ++ ++ PVR_UNREFERENCED_PARAMETER(psDICreateContextIN); ++ ++ psDICreateContextOUT->puiStreamName = psDICreateContextIN->puiStreamName; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDICreateContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DICreateContext_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDICreateContextIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDICreateContextIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDICreateContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DICreateContext_exit; ++ } ++ } ++ } ++ ++ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) ++ { ++ puiStreamNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); ++ } ++ ++ psDICreateContextOUT->eError = DICreateContextKM(puiStreamNameInt, &psContextInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK)) ++ { ++ goto DICreateContext_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDICreateContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDICreateContextOUT->hContext, ++ (void *)psContextInt, ++ PVRSRV_HANDLE_TYPE_DI_CONTEXT, ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE, ++ (PFN_HANDLE_RELEASE) & ++ _DICreateContextpsContextIntRelease); ++ if (unlikely(psDICreateContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DICreateContext_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((puiStreamNameInt) && ((PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psDICreateContextOUT->puiStreamName, puiStreamNameInt, ++ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR))) != PVRSRV_OK)) ++ { ++ psDICreateContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DICreateContext_exit; ++ } ++ } ++ ++DICreateContext_exit: ++ ++ if (psDICreateContextOUT->eError != PVRSRV_OK) ++ { ++ if (psContextInt) ++ { ++ DIDestroyContextKM(psContextInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDICreateContextOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDIDestroyContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDIDestroyContextIN_UI8, ++ IMG_UINT8 * psDIDestroyContextOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *psDIDestroyContextIN = ++ (PVRSRV_BRIDGE_IN_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *psDIDestroyContextOUT = ++ (PVRSRV_BRIDGE_OUT_DIDESTROYCONTEXT *) IMG_OFFSET_ADDR(psDIDestroyContextOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDIDestroyContextOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDIDestroyContextIN->hContext, ++ PVRSRV_HANDLE_TYPE_DI_CONTEXT); ++ if (unlikely((psDIDestroyContextOUT->eError != PVRSRV_OK) && ++ (psDIDestroyContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psDIDestroyContextOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psDIDestroyContextOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto DIDestroyContext_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DIDestroyContext_exit: ++ ++ return 0; ++} ++ ++static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX, ++ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDIReadEntry(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDIReadEntryIN_UI8, ++ IMG_UINT8 * psDIReadEntryOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DIREADENTRY *psDIReadEntryIN = ++ (PVRSRV_BRIDGE_IN_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DIREADENTRY *psDIReadEntryOUT = ++ (PVRSRV_BRIDGE_OUT_DIREADENTRY *) IMG_OFFSET_ADDR(psDIReadEntryOUT_UI8, 0); ++ ++ IMG_HANDLE hContext = psDIReadEntryIN->hContext; ++ DI_CONTEXT *psContextInt = NULL; ++ IMG_CHAR *uiEntryPathInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + 0; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDIReadEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DIReadEntry_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDIReadEntryIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIReadEntryIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDIReadEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DIReadEntry_exit; ++ } ++ } ++ } ++ ++ { ++ uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiEntryPathInt, (const void __user *)psDIReadEntryIN->puiEntryPath, ++ DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDIReadEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DIReadEntry_exit; ++ } ++ ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDIReadEntryOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psContextInt, ++ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); ++ if (unlikely(psDIReadEntryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DIReadEntry_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDIReadEntryOUT->eError = ++ DIReadEntryKM(psContextInt, ++ uiEntryPathInt, psDIReadEntryIN->ui64Offset, psDIReadEntryIN->ui64Size); ++ ++DIReadEntry_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDIReadEntryOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX, ++ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX"); ++static_assert(DI_IMPL_BRG_PATH_LEN <= IMG_UINT32_MAX, ++ "DI_IMPL_BRG_PATH_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeDIWriteEntry(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDIWriteEntryIN_UI8, ++ IMG_UINT8 * psDIWriteEntryOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DIWRITEENTRY *psDIWriteEntryIN = ++ (PVRSRV_BRIDGE_IN_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DIWRITEENTRY *psDIWriteEntryOUT = ++ (PVRSRV_BRIDGE_OUT_DIWRITEENTRY *) IMG_OFFSET_ADDR(psDIWriteEntryOUT_UI8, 0); ++ ++ IMG_HANDLE hContext = psDIWriteEntryIN->hContext; ++ DI_CONTEXT *psContextInt = NULL; ++ IMG_CHAR *uiEntryPathInt = NULL; ++ IMG_CHAR *uiValueInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psDIWriteEntryIN->ui32ValueSize > DI_IMPL_BRG_PATH_LEN)) ++ { ++ psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto DIWriteEntry_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psDIWriteEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto DIWriteEntry_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psDIWriteEntryIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psDIWriteEntryIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psDIWriteEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto DIWriteEntry_exit; ++ } ++ } ++ } ++ ++ { ++ uiEntryPathInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiEntryPathInt, (const void __user *)psDIWriteEntryIN->puiEntryPath, ++ DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DIWriteEntry_exit; ++ } ++ ((IMG_CHAR *) uiEntryPathInt)[(DI_IMPL_BRG_PATH_LEN * sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ if (psDIWriteEntryIN->ui32ValueSize != 0) ++ { ++ uiValueInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiValueInt, (const void __user *)psDIWriteEntryIN->puiValue, ++ psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psDIWriteEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto DIWriteEntry_exit; ++ } ++ ((IMG_CHAR *) uiValueInt)[(psDIWriteEntryIN->ui32ValueSize * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDIWriteEntryOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psContextInt, ++ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); ++ if (unlikely(psDIWriteEntryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DIWriteEntry_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDIWriteEntryOUT->eError = ++ DIWriteEntryKM(psContextInt, ++ uiEntryPathInt, psDIWriteEntryIN->ui32ValueSize, uiValueInt); ++ ++DIWriteEntry_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psDIWriteEntryOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDIListAllEntries(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDIListAllEntriesIN_UI8, ++ IMG_UINT8 * psDIListAllEntriesOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DILISTALLENTRIES *psDIListAllEntriesIN = ++ (PVRSRV_BRIDGE_IN_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *psDIListAllEntriesOUT = ++ (PVRSRV_BRIDGE_OUT_DILISTALLENTRIES *) IMG_OFFSET_ADDR(psDIListAllEntriesOUT_UI8, 0); ++ ++ IMG_HANDLE hContext = psDIListAllEntriesIN->hContext; ++ DI_CONTEXT *psContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDIListAllEntriesOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psContextInt, ++ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT, IMG_TRUE); ++ if (unlikely(psDIListAllEntriesOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DIListAllEntries_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDIListAllEntriesOUT->eError = DIListAllEntriesKM(psContextInt); ++ ++DIListAllEntries_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hContext, PVRSRV_HANDLE_TYPE_DI_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitDIBridge(void); ++void DeinitDIBridge(void); ++ ++/* ++ * Register all DI functions with services ++ */ ++PVRSRV_ERROR InitDIBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT, ++ PVRSRVBridgeDICreateContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT, ++ PVRSRVBridgeDIDestroyContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY, ++ PVRSRVBridgeDIReadEntry, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY, ++ PVRSRVBridgeDIWriteEntry, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES, ++ PVRSRVBridgeDIListAllEntries, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all di functions with services ++ */ ++void DeinitDIBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DICREATECONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIDESTROYCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIREADENTRY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DIWRITEENTRY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DI, PVRSRV_BRIDGE_DI_DILISTALLENTRIES); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c b/drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c +new file mode 100644 +index 000000000000..07851ded9899 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_dmabuf_bridge.c +@@ -0,0 +1,694 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for dmabuf ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for dmabuf ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "physmem_dmabuf.h" ++#include "pmr.h" ++ ++#include "common_dmabuf_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _PhysmemImportDmaBufpsPMRPtrIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefPMR((PMR *) pvData); ++ return eError; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysmemImportDmaBufIN_UI8, ++ IMG_UINT8 * psPhysmemImportDmaBufOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN = ++ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT = ++ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemImportDmaBufOUT_UI8, ++ 0); ++ ++ IMG_CHAR *uiNameInt = NULL; ++ PMR *psPMRPtrInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psPhysmemImportDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemImportDmaBuf_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysmemImportDmaBuf_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysmemImportDmaBuf_exit; ++ } ++ } ++ } ++ ++ if (psPhysmemImportDmaBufIN->ui32NameSize != 0) ++ { ++ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufIN->puiName, ++ psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemImportDmaBuf_exit; ++ } ++ ((IMG_CHAR *) uiNameInt)[(psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) ++ - 1] = '\0'; ++ } ++ ++ psPhysmemImportDmaBufOUT->eError = ++ PhysmemImportDmaBuf(psConnection, OSGetDevNode(psConnection), ++ psPhysmemImportDmaBufIN->ifd, ++ psPhysmemImportDmaBufIN->uiFlags, ++ psPhysmemImportDmaBufIN->ui32NameSize, ++ uiNameInt, ++ &psPMRPtrInt, ++ &psPhysmemImportDmaBufOUT->uiSize, ++ &psPhysmemImportDmaBufOUT->uiAlign); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) ++ { ++ goto PhysmemImportDmaBuf_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPhysmemImportDmaBufOUT-> ++ hPMRPtr, (void *)psPMRPtrInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PhysmemImportDmaBufpsPMRPtrIntRelease); ++ if (unlikely(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PhysmemImportDmaBuf_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PhysmemImportDmaBuf_exit: ++ ++ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK) ++ { ++ if (psPMRPtrInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefPMR(psPMRPtrInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysmemImportDmaBufOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PhysmemImportDmaBufLockedpsPMRPtrIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefUnlockPMR((PMR *) pvData); ++ return eError; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysmemImportDmaBufLocked(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysmemImportDmaBufLockedIN_UI8, ++ IMG_UINT8 * psPhysmemImportDmaBufLockedOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedIN = ++ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUFLOCKED *) ++ IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *psPhysmemImportDmaBufLockedOUT = ++ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUFLOCKED *) ++ IMG_OFFSET_ADDR(psPhysmemImportDmaBufLockedOUT_UI8, 0); ++ ++ IMG_CHAR *uiNameInt = NULL; ++ PMR *psPMRPtrInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psPhysmemImportDmaBufLockedIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemImportDmaBufLocked_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysmemImportDmaBufLocked_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysmemImportDmaBufLockedIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportDmaBufLockedIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysmemImportDmaBufLocked_exit; ++ } ++ } ++ } ++ ++ if (psPhysmemImportDmaBufLockedIN->ui32NameSize != 0) ++ { ++ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiNameInt, (const void __user *)psPhysmemImportDmaBufLockedIN->puiName, ++ psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psPhysmemImportDmaBufLockedOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemImportDmaBufLocked_exit; ++ } ++ ((IMG_CHAR *) ++ uiNameInt)[(psPhysmemImportDmaBufLockedIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ psPhysmemImportDmaBufLockedOUT->eError = ++ PhysmemImportDmaBufLocked(psConnection, OSGetDevNode(psConnection), ++ psPhysmemImportDmaBufLockedIN->ifd, ++ psPhysmemImportDmaBufLockedIN->uiFlags, ++ psPhysmemImportDmaBufLockedIN->ui32NameSize, ++ uiNameInt, ++ &psPMRPtrInt, ++ &psPhysmemImportDmaBufLockedOUT->uiSize, ++ &psPhysmemImportDmaBufLockedOUT->uiAlign); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) ++ { ++ goto PhysmemImportDmaBufLocked_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPhysmemImportDmaBufLockedOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPhysmemImportDmaBufLockedOUT->hPMRPtr, (void *)psPMRPtrInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PhysmemImportDmaBufLockedpsPMRPtrIntRelease); ++ if (unlikely(psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PhysmemImportDmaBufLocked_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PhysmemImportDmaBufLocked_exit: ++ ++ if (psPhysmemImportDmaBufLockedOUT->eError != PVRSRV_OK) ++ { ++ if (psPMRPtrInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefUnlockPMR(psPMRPtrInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysmemImportDmaBufLockedOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysmemExportDmaBufIN_UI8, ++ IMG_UINT8 * psPhysmemExportDmaBufOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN = ++ (PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT = ++ (PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *) IMG_OFFSET_ADDR(psPhysmemExportDmaBufOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psPhysmemExportDmaBufOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PhysmemExportDmaBuf_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psPhysmemExportDmaBufOUT->eError = ++ PhysmemExportDmaBuf(psConnection, OSGetDevNode(psConnection), ++ psPMRInt, &psPhysmemExportDmaBufOUT->iFd); ++ ++PhysmemExportDmaBuf_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PhysmemImportSparseDmaBufpsPMRPtrIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefPMR((PMR *) pvData); ++ return eError; ++} ++ ++static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX, ++ "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysmemImportSparseDmaBufIN_UI8, ++ IMG_UINT8 * psPhysmemImportSparseDmaBufOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN = ++ (PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *) ++ IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT = ++ (PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *) ++ IMG_OFFSET_ADDR(psPhysmemImportSparseDmaBufOUT_UI8, 0); ++ ++ IMG_UINT32 *ui32MappingTableInt = NULL; ++ IMG_CHAR *uiNameInt = NULL; ++ PMR *psPMRPtrInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely ++ (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT)) ++ { ++ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ ++ if (unlikely(psPhysmemImportSparseDmaBufIN->ui32NameSize > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemImportSparseDmaBufIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ } ++ } ++ ++ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0) ++ { ++ ui32MappingTableInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32MappingTableInt, ++ (const void __user *)psPhysmemImportSparseDmaBufIN->pui32MappingTable, ++ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ } ++ if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0) ++ { ++ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiNameInt, (const void __user *)psPhysmemImportSparseDmaBufIN->puiName, ++ psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ ((IMG_CHAR *) ++ uiNameInt)[(psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ psPhysmemImportSparseDmaBufOUT->eError = ++ PhysmemImportSparseDmaBuf(psConnection, OSGetDevNode(psConnection), ++ psPhysmemImportSparseDmaBufIN->ifd, ++ psPhysmemImportSparseDmaBufIN->uiFlags, ++ psPhysmemImportSparseDmaBufIN->uiChunkSize, ++ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks, ++ psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks, ++ ui32MappingTableInt, ++ psPhysmemImportSparseDmaBufIN->ui32NameSize, ++ uiNameInt, ++ &psPMRPtrInt, ++ &psPhysmemImportSparseDmaBufOUT->uiSize, ++ &psPhysmemImportSparseDmaBufOUT->uiAlign); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) ++ { ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPhysmemImportSparseDmaBufOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPhysmemImportSparseDmaBufOUT->hPMRPtr, (void *)psPMRPtrInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PhysmemImportSparseDmaBufpsPMRPtrIntRelease); ++ if (unlikely(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PhysmemImportSparseDmaBuf_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PhysmemImportSparseDmaBuf_exit: ++ ++ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK) ++ { ++ if (psPMRPtrInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefPMR(psPMRPtrInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysmemImportSparseDmaBufOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitDMABUFBridge(void); ++void DeinitDMABUFBridge(void); ++ ++/* ++ * Register all DMABUF functions with services ++ */ ++PVRSRV_ERROR InitDMABUFBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, ++ PVRSRVBridgePhysmemImportDmaBuf, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED, ++ PVRSRVBridgePhysmemImportDmaBufLocked, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, ++ PVRSRVBridgePhysmemExportDmaBuf, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, ++ PVRSRVBridgePhysmemImportSparseDmaBuf, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all dmabuf functions with services ++ */ ++void DeinitDMABUFBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, ++ PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUFLOCKED); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, ++ PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c b/drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c +new file mode 100644 +index 000000000000..dd81d914b2f2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_htbuffer_bridge.c +@@ -0,0 +1,351 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for htbuffer ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for htbuffer ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "htbserver.h" ++ ++#include "common_htbuffer_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#include "lock.h" ++ ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static_assert(HTB_FLAG_NUM_EL <= IMG_UINT32_MAX, ++ "HTB_FLAG_NUM_EL must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHTBControlIN_UI8, ++ IMG_UINT8 * psHTBControlOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN = ++ (PVRSRV_BRIDGE_IN_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT = ++ (PVRSRV_BRIDGE_OUT_HTBCONTROL *) IMG_OFFSET_ADDR(psHTBControlOUT_UI8, 0); ++ ++ IMG_UINT32 *ui32GroupEnableInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) + 0; ++ ++ if (unlikely(psHTBControlIN->ui32NumGroups > HTB_FLAG_NUM_EL)) ++ { ++ psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto HTBControl_exit; ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psHTBControlOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto HTBControl_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBControlIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto HTBControl_exit; ++ } ++ } ++ } ++ ++ if (psHTBControlIN->ui32NumGroups != 0) ++ { ++ ui32GroupEnableInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32GroupEnableInt, ++ (const void __user *)psHTBControlIN->pui32GroupEnable, ++ psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto HTBControl_exit; ++ } ++ } ++ ++ psHTBControlOUT->eError = ++ HTBControlKM(psHTBControlIN->ui32NumGroups, ++ ui32GroupEnableInt, ++ psHTBControlIN->ui32LogLevel, ++ psHTBControlIN->ui32EnablePID, ++ psHTBControlIN->ui32LogMode, psHTBControlIN->ui32OpMode); ++ ++HTBControl_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psHTBControlOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(HTB_LOG_MAX_PARAMS <= IMG_UINT32_MAX, ++ "HTB_LOG_MAX_PARAMS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHTBLogIN_UI8, ++ IMG_UINT8 * psHTBLogOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN = ++ (PVRSRV_BRIDGE_IN_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT = ++ (PVRSRV_BRIDGE_OUT_HTBLOG *) IMG_OFFSET_ADDR(psHTBLogOUT_UI8, 0); ++ ++ IMG_UINT32 *ui32ArgsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) + 0; ++ ++ if (unlikely(psHTBLogIN->ui32NumArgs > HTB_LOG_MAX_PARAMS)) ++ { ++ psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto HTBLog_exit; ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psHTBLogOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto HTBLog_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHTBLogIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto HTBLog_exit; ++ } ++ } ++ } ++ ++ if (psHTBLogIN->ui32NumArgs != 0) ++ { ++ ui32ArgsInt = (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ArgsInt, (const void __user *)psHTBLogIN->pui32Args, ++ psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto HTBLog_exit; ++ } ++ } ++ ++ psHTBLogOUT->eError = ++ HTBLogKM(psHTBLogIN->ui32PID, ++ psHTBLogIN->ui32TID, ++ psHTBLogIN->ui64TimeStamp, ++ psHTBLogIN->ui32SF, psHTBLogIN->ui32NumArgs, ui32ArgsInt); ++ ++HTBLog_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psHTBLogOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++static POS_LOCK pHTBUFFERBridgeLock; ++ ++#endif /* EXCLUDE_HTBUFFER_BRIDGE */ ++ ++#if !defined(EXCLUDE_HTBUFFER_BRIDGE) ++PVRSRV_ERROR InitHTBUFFERBridge(void); ++void DeinitHTBUFFERBridge(void); ++ ++/* ++ * Register all HTBUFFER functions with services ++ */ ++PVRSRV_ERROR InitHTBUFFERBridge(void) ++{ ++ PVR_LOG_RETURN_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock), "OSLockCreate"); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, ++ PVRSRVBridgeHTBControl, pHTBUFFERBridgeLock); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, ++ PVRSRVBridgeHTBLog, pHTBUFFERBridgeLock); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all htbuffer functions with services ++ */ ++void DeinitHTBUFFERBridge(void) ++{ ++ OSLockDestroy(pHTBUFFERBridgeLock); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG); ++ ++} ++#else /* EXCLUDE_HTBUFFER_BRIDGE */ ++/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined, ++ * do not populate the dispatch table with its functions ++ */ ++#define InitHTBUFFERBridge() \ ++ PVRSRV_OK ++ ++#define DeinitHTBUFFERBridge() ++ ++#endif /* EXCLUDE_HTBUFFER_BRIDGE */ +diff --git a/drivers/gpu/drm/img-rogue/server_mm_bridge.c b/drivers/gpu/drm/img-rogue/server_mm_bridge.c +new file mode 100644 +index 000000000000..7375eb61f809 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_mm_bridge.c +@@ -0,0 +1,3802 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for mm ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for mm ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "pvrsrv_memalloc_physheap.h" ++#include "devicemem.h" ++#include "devicemem_server.h" ++#include "pmr.h" ++#include "devicemem_heapcfg.h" ++#include "physmem.h" ++#include "devicemem_utils.h" ++#include "process_stats.h" ++ ++#include "common_mm_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++static PVRSRV_ERROR ReleasePMRExport(void *pvData) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvData); ++ ++ return PVRSRV_OK; ++} ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _PMRExportPMRpsPMRExportIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnexportPMR((PMR_EXPORT *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRExportPMRIN_UI8, ++ IMG_UINT8 * psPMRExportPMROUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN = ++ (PVRSRV_BRIDGE_IN_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT = ++ (PVRSRV_BRIDGE_OUT_PMREXPORTPMR *) IMG_OFFSET_ADDR(psPMRExportPMROUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR; ++ PMR *psPMRInt = NULL; ++ PMR_EXPORT *psPMRExportInt = NULL; ++ IMG_HANDLE hPMRExportInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psPMRExportPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRExportPMR_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psPMRExportPMROUT->eError = ++ PMRExportPMR(psPMRInt, ++ &psPMRExportInt, ++ &psPMRExportPMROUT->ui64Size, ++ &psPMRExportPMROUT->ui32Log2Contig, &psPMRExportPMROUT->ui64Password); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) ++ { ++ goto PMRExportPMR_exit; ++ } ++ ++ /* ++ * For cases where we need a cross process handle we actually allocate two. ++ * ++ * The first one is a connection specific handle and it gets given the real ++ * release function. This handle does *NOT* get returned to the caller. It's ++ * purpose is to release any leaked resources when we either have a bad or ++ * abnormally terminated client. If we didn't do this then the resource ++ * wouldn't be freed until driver unload. If the resource is freed normally, ++ * this handle can be looked up via the cross process handle and then ++ * released accordingly. ++ * ++ * The second one is a cross process handle and it gets given a noop release ++ * function. This handle does get returned to the caller. ++ */ ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ psPMRExportPMROUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ &hPMRExportInt, (void *)psPMRExportInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & _PMRExportPMRpsPMRExportIntRelease); ++ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ goto PMRExportPMR_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ /* Lock over handle creation. */ ++ LockHandle(KERNEL_HANDLE_BASE); ++ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE, ++ &psPMRExportPMROUT->hPMRExport, ++ (void *)psPMRExportInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ ReleasePMRExport); ++ if (unlikely(psPMRExportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ goto PMRExportPMR_exit; ++ } ++ /* Release now we have created handles. */ ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ ++PMRExportPMR_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psPMRExportPMROUT->eError != PVRSRV_OK) ++ { ++ if (psPMRExportPMROUT->hPMRExport) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Lock over handle creation cleanup. */ ++ LockHandle(KERNEL_HANDLE_BASE); ++ ++ eError = PVRSRVDestroyHandleUnlocked(KERNEL_HANDLE_BASE, ++ (IMG_HANDLE) psPMRExportPMROUT-> ++ hPMRExport, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); ++ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ ++ } ++ ++ if (hPMRExportInt) ++ { ++ PVRSRV_ERROR eError; ++ /* Lock over handle creation cleanup. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ eError = ++ PVRSRVDestroyHandleUnlocked(psConnection->psProcessHandleBase-> ++ psHandleBase, hPMRExportInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); ++ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Avoid freeing/destroying/releasing the resource a second time below */ ++ psPMRExportInt = NULL; ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ } ++ ++ if (psPMRExportInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnexportPMR(psPMRExportInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRUnexportPMRIN_UI8, ++ IMG_UINT8 * psPMRUnexportPMROUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN = ++ (PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT = ++ (PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *) IMG_OFFSET_ADDR(psPMRUnexportPMROUT_UI8, 0); ++ ++ PMR_EXPORT *psPMRExportInt = NULL; ++ IMG_HANDLE hPMRExportInt = NULL; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(KERNEL_HANDLE_BASE); ++ psPMRUnexportPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, ++ (void **)&psPMRExportInt, ++ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_FALSE); ++ if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); ++ } ++ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ /* ++ * Find the connection specific handle that represents the same data ++ * as the cross process handle as releasing it will actually call the ++ * data's real release function (see the function where the cross ++ * process handle is allocated for more details). ++ */ ++ psPMRUnexportPMROUT->eError = ++ PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ &hPMRExportInt, ++ psPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); ++ if (unlikely(psPMRUnexportPMROUT->eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); ++ } ++ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK); ++ ++ psPMRUnexportPMROUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ hPMRExportInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); ++ if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) && ++ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); ++ } ++ PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) || ++ (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY)); ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(KERNEL_HANDLE_BASE); ++ ++ psPMRUnexportPMROUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(KERNEL_HANDLE_BASE, ++ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); ++ if (unlikely((psPMRUnexportPMROUT->eError != PVRSRV_OK) && ++ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnexportPMROUT->eError))); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ goto PMRUnexportPMR_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ ++PMRUnexportPMR_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRGetUIDIN_UI8, ++ IMG_UINT8 * psPMRGetUIDOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN = ++ (PVRSRV_BRIDGE_IN_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT = ++ (PVRSRV_BRIDGE_OUT_PMRGETUID *) IMG_OFFSET_ADDR(psPMRGetUIDOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psPMRGetUIDOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psPMRGetUIDOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRGetUID_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psPMRGetUIDOUT->eError = PMRGetUID(psPMRInt, &psPMRGetUIDOUT->ui64UID); ++ ++PMRGetUID_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PMRMakeLocalImportHandlepsExtMemIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnmakeLocalImportHandle((PMR *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRMakeLocalImportHandleIN_UI8, ++ IMG_UINT8 * psPMRMakeLocalImportHandleOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN = ++ (PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *) ++ IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT = ++ (PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *) ++ IMG_OFFSET_ADDR(psPMRMakeLocalImportHandleOUT_UI8, 0); ++ ++ IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer; ++ PMR *psBufferInt = NULL; ++ PMR *psExtMemInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psPMRMakeLocalImportHandleOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psBufferInt, ++ hBuffer, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, IMG_TRUE); ++ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRMakeLocalImportHandle_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psPMRMakeLocalImportHandleOUT->eError = PMRMakeLocalImportHandle(psBufferInt, &psExtMemInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) ++ { ++ goto PMRMakeLocalImportHandle_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ psPMRMakeLocalImportHandleOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ &psPMRMakeLocalImportHandleOUT->hExtMem, (void *)psExtMemInt, ++ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PMRMakeLocalImportHandlepsExtMemIntRelease); ++ if (unlikely(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ goto PMRMakeLocalImportHandle_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++PMRMakeLocalImportHandle_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psBufferInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hBuffer, PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK) ++ { ++ if (psExtMemInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnmakeLocalImportHandle(psExtMemInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRUnmakeLocalImportHandleIN_UI8, ++ IMG_UINT8 * psPMRUnmakeLocalImportHandleOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN = ++ (PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *) ++ IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT = ++ (PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *) ++ IMG_OFFSET_ADDR(psPMRUnmakeLocalImportHandleOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ psPMRUnmakeLocalImportHandleOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem, ++ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); ++ if (unlikely((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) && ++ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psPMRUnmakeLocalImportHandleOUT->eError))); ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ goto PMRUnmakeLocalImportHandle_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++PMRUnmakeLocalImportHandle_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PMRImportPMRpsPMRIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefPMR((PMR *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRImportPMRIN_UI8, ++ IMG_UINT8 * psPMRImportPMROUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN = ++ (PVRSRV_BRIDGE_IN_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT = ++ (PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *) IMG_OFFSET_ADDR(psPMRImportPMROUT_UI8, 0); ++ ++ IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport; ++ PMR_EXPORT *psPMRExportInt = NULL; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(KERNEL_HANDLE_BASE); ++ ++ /* Look up the address from the handle */ ++ psPMRImportPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE, ++ (void **)&psPMRExportInt, ++ hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT, IMG_TRUE); ++ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ goto PMRImportPMR_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ ++ psPMRImportPMROUT->eError = ++ PhysmemImportPMR(psConnection, OSGetDevNode(psConnection), ++ psPMRExportInt, ++ psPMRImportPMRIN->ui64uiPassword, ++ psPMRImportPMRIN->ui64uiSize, ++ psPMRImportPMRIN->ui32uiLog2Contig, &psPMRInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) ++ { ++ goto PMRImportPMR_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPMRImportPMROUT->hPMR, ++ (void *)psPMRInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PMRImportPMRpsPMRIntRelease); ++ if (unlikely(psPMRImportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRImportPMR_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PMRImportPMR_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(KERNEL_HANDLE_BASE); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRExportInt) ++ { ++ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE, ++ hPMRExport, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ ++ if (psPMRImportPMROUT->eError != PVRSRV_OK) ++ { ++ if (psPMRInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefPMR(psPMRInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PMRLocalImportPMRpsPMRIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefPMR((PMR *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRLocalImportPMRIN_UI8, ++ IMG_UINT8 * psPMRLocalImportPMROUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN = ++ (PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT = ++ (PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *) IMG_OFFSET_ADDR(psPMRLocalImportPMROUT_UI8, 0); ++ ++ IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle; ++ PMR *psExtHandleInt = NULL; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psPMRLocalImportPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ (void **)&psExtHandleInt, ++ hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, IMG_TRUE); ++ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ goto PMRLocalImportPMR_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ psPMRLocalImportPMROUT->eError = ++ PMRLocalImportPMR(psExtHandleInt, ++ &psPMRInt, ++ &psPMRLocalImportPMROUT->uiSize, &psPMRLocalImportPMROUT->uiAlign); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) ++ { ++ goto PMRLocalImportPMR_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPMRLocalImportPMROUT->hPMR, ++ (void *)psPMRInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PMRLocalImportPMRpsPMRIntRelease); ++ if (unlikely(psPMRLocalImportPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRLocalImportPMR_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PMRLocalImportPMR_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psExtHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ hExtHandle, PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK) ++ { ++ if (psPMRInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefPMR(psPMRInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRUnrefPMRIN_UI8, ++ IMG_UINT8 * psPMRUnrefPMROUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN = ++ (PVRSRV_BRIDGE_IN_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT = ++ (PVRSRV_BRIDGE_OUT_PMRUNREFPMR *) IMG_OFFSET_ADDR(psPMRUnrefPMROUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPMRUnrefPMROUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psPMRUnrefPMRIN->hPMR, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ if (unlikely((psPMRUnrefPMROUT->eError != PVRSRV_OK) && ++ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefPMROUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRUnrefPMR_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PMRUnrefPMR_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPMRUnrefUnlockPMRIN_UI8, ++ IMG_UINT8 * psPMRUnrefUnlockPMROUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN = ++ (PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT = ++ (PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *) IMG_OFFSET_ADDR(psPMRUnrefUnlockPMROUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPMRUnrefUnlockPMROUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ if (unlikely((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) && ++ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psPMRUnrefUnlockPMROUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto PMRUnrefUnlockPMR_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PMRUnrefUnlockPMR_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PhysmemNewRamBackedPMRpsPMRPtrIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefPMR((PMR *) pvData); ++ return eError; ++} ++ ++static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX, ++ "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysmemNewRamBackedPMRIN_UI8, ++ IMG_UINT8 * psPhysmemNewRamBackedPMROUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN = ++ (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *) ++ IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT = ++ (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *) ++ IMG_OFFSET_ADDR(psPhysmemNewRamBackedPMROUT_UI8, 0); ++ ++ IMG_UINT32 *ui32MappingTableInt = NULL; ++ IMG_CHAR *uiAnnotationInt = NULL; ++ PMR *psPMRPtrInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks > PMR_MAX_SUPPORTED_PAGE_COUNT)) ++ { ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ ++ if (unlikely(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysmemNewRamBackedPMRIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ } ++ } ++ ++ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0) ++ { ++ ui32MappingTableInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32MappingTableInt, ++ (const void __user *)psPhysmemNewRamBackedPMRIN->pui32MappingTable, ++ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ } ++ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0) ++ { ++ uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiAnnotationInt, ++ (const void __user *)psPhysmemNewRamBackedPMRIN->puiAnnotation, ++ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != ++ PVRSRV_OK) ++ { ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ ((IMG_CHAR *) ++ uiAnnotationInt)[(psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * ++ sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ psPhysmemNewRamBackedPMROUT->eError = ++ PhysmemNewRamBackedPMR(psConnection, OSGetDevNode(psConnection), ++ psPhysmemNewRamBackedPMRIN->uiSize, ++ psPhysmemNewRamBackedPMRIN->uiChunkSize, ++ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks, ++ psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks, ++ ui32MappingTableInt, ++ psPhysmemNewRamBackedPMRIN->ui32Log2PageSize, ++ psPhysmemNewRamBackedPMRIN->uiFlags, ++ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength, ++ uiAnnotationInt, ++ psPhysmemNewRamBackedPMRIN->ui32PID, ++ &psPMRPtrInt, ++ psPhysmemNewRamBackedPMRIN->ui32PDumpFlags, ++ &psPhysmemNewRamBackedPMROUT->uiOutFlags); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) ++ { ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPhysmemNewRamBackedPMROUT-> ++ hPMRPtr, ++ (void *)psPMRPtrInt, ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PhysmemNewRamBackedPMRpsPMRPtrIntRelease); ++ if (unlikely(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PhysmemNewRamBackedPMR_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PhysmemNewRamBackedPMR_exit: ++ ++ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK) ++ { ++ if (psPMRPtrInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefPMR(psPMRPtrInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysmemNewRamBackedPMROUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PMRUnrefUnlockPMR((PMR *) pvData); ++ return eError; ++} ++ ++static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX, ++ "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysmemNewRamBackedLockedPMRIN_UI8, ++ IMG_UINT8 * psPhysmemNewRamBackedLockedPMROUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN = ++ (PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) ++ IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT = ++ (PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *) ++ IMG_OFFSET_ADDR(psPhysmemNewRamBackedLockedPMROUT_UI8, 0); ++ ++ IMG_UINT32 *ui32MappingTableInt = NULL; ++ IMG_CHAR *uiAnnotationInt = NULL; ++ PMR *psPMRPtrInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * ++ sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * ++ sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely ++ (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks > PMR_MAX_SUPPORTED_PAGE_COUNT)) ++ { ++ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ ++ if (unlikely ++ (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = ++ (IMG_BYTE *) (void *)psPhysmemNewRamBackedLockedPMRIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysmemNewRamBackedLockedPMROUT->eError = ++ PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ } ++ } ++ ++ if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0) ++ { ++ ui32MappingTableInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32MappingTableInt, ++ (const void __user *)psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, ++ psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ } ++ if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0) ++ { ++ uiAnnotationInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiAnnotationInt, ++ (const void __user *)psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, ++ psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != ++ PVRSRV_OK) ++ { ++ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ ((IMG_CHAR *) ++ uiAnnotationInt)[(psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * ++ sizeof(IMG_CHAR)) - 1] = '\0'; ++ } ++ ++ psPhysmemNewRamBackedLockedPMROUT->eError = ++ PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevNode(psConnection), ++ psPhysmemNewRamBackedLockedPMRIN->uiSize, ++ psPhysmemNewRamBackedLockedPMRIN->uiChunkSize, ++ psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks, ++ psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks, ++ ui32MappingTableInt, ++ psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize, ++ psPhysmemNewRamBackedLockedPMRIN->uiFlags, ++ psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength, ++ uiAnnotationInt, ++ psPhysmemNewRamBackedLockedPMRIN->ui32PID, ++ &psPMRPtrInt, ++ psPhysmemNewRamBackedLockedPMRIN->ui32PDumpFlags, ++ &psPhysmemNewRamBackedLockedPMROUT->uiOutFlags); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) ++ { ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psPhysmemNewRamBackedLockedPMROUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr, ++ (void *)psPMRPtrInt, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _PhysmemNewRamBackedLockedPMRpsPMRPtrIntRelease); ++ if (unlikely(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto PhysmemNewRamBackedLockedPMR_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++PhysmemNewRamBackedLockedPMR_exit: ++ ++ if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK) ++ { ++ if (psPMRPtrInt) ++ { ++ LockHandle(KERNEL_HANDLE_BASE); ++ PMRUnrefUnlockPMR(psPMRPtrInt); ++ UnlockHandle(KERNEL_HANDLE_BASE); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysmemNewRamBackedLockedPMROUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntPinIN_UI8, ++ IMG_UINT8 * psDevmemIntPinOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *) IMG_OFFSET_ADDR(psDevmemIntPinOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntPinOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntPinOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntPin_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntPinOUT->eError = DevmemIntPin(psPMRInt); ++ ++DevmemIntPin_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntUnpinIN_UI8, ++ IMG_UINT8 * psDevmemIntUnpinOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *) IMG_OFFSET_ADDR(psDevmemIntUnpinOUT_UI8, 0); ++ ++ IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntUnpinOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntUnpinOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnpin_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntUnpinOUT->eError = DevmemIntUnpin(psPMRInt); ++ ++DevmemIntUnpin_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntPinValidateIN_UI8, ++ IMG_UINT8 * psDevmemIntPinValidateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *) IMG_OFFSET_ADDR(psDevmemIntPinValidateIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *) ++ IMG_OFFSET_ADDR(psDevmemIntPinValidateOUT_UI8, 0); ++ ++ IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping; ++ DEVMEMINT_MAPPING *psMappingInt = NULL; ++ IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntPinValidateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psMappingInt, ++ hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); ++ if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntPinValidate_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psDevmemIntPinValidateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntPinValidate_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntPinValidateOUT->eError = DevmemIntPinValidate(psMappingInt, psPMRInt); ++ ++DevmemIntPinValidate_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psMappingInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntUnpinInvalidateIN_UI8, ++ IMG_UINT8 * psDevmemIntUnpinInvalidateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *) ++ IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *) ++ IMG_OFFSET_ADDR(psDevmemIntUnpinInvalidateOUT_UI8, 0); ++ ++ IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping; ++ DEVMEMINT_MAPPING *psMappingInt = NULL; ++ IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntUnpinInvalidateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psMappingInt, ++ hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, IMG_TRUE); ++ if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnpinInvalidate_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psDevmemIntUnpinInvalidateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnpinInvalidate_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntUnpinInvalidateOUT->eError = DevmemIntUnpinInvalidate(psMappingInt, psPMRInt); ++ ++DevmemIntUnpinInvalidate_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psMappingInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hMapping, PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _DevmemIntCtxCreatepsDevMemServerContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemIntCtxDestroy((DEVMEMINT_CTX *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntCtxCreateIN_UI8, ++ IMG_UINT8 * psDevmemIntCtxCreateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *) IMG_OFFSET_ADDR(psDevmemIntCtxCreateOUT_UI8, ++ 0); ++ ++ DEVMEMINT_CTX *psDevMemServerContextInt = NULL; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL; ++ ++ psDevmemIntCtxCreateOUT->eError = ++ DevmemIntCtxCreate(psConnection, OSGetDevNode(psConnection), ++ psDevmemIntCtxCreateIN->bbKernelMemoryCtx, ++ &psDevMemServerContextInt, ++ &hPrivDataInt, &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) ++ { ++ goto DevmemIntCtxCreate_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntCtxCreateOUT-> ++ hDevMemServerContext, ++ (void *) ++ psDevMemServerContextInt, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _DevmemIntCtxCreatepsDevMemServerContextIntRelease); ++ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntCtxCreate_exit; ++ } ++ ++ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntCtxCreateOUT-> ++ hPrivData, ++ (void *)hPrivDataInt, ++ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ psDevmemIntCtxCreateOUT-> ++ hDevMemServerContext); ++ if (unlikely(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntCtxCreate_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntCtxCreate_exit: ++ ++ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK) ++ { ++ if (psDevmemIntCtxCreateOUT->hDevMemServerContext) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Lock over handle creation cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDevmemIntCtxCreateOUT-> ++ hDevMemServerContext, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Avoid freeing/destroying/releasing the resource a second time below */ ++ psDevMemServerContextInt = NULL; ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ } ++ ++ if (psDevMemServerContextInt) ++ { ++ DevmemIntCtxDestroy(psDevMemServerContextInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntCtxDestroyIN_UI8, ++ IMG_UINT8 * psDevmemIntCtxDestroyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *) IMG_OFFSET_ADDR(psDevmemIntCtxDestroyOUT_UI8, ++ 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntCtxDestroyOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDevmemIntCtxDestroyIN-> ++ hDevmemServerContext, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ if (unlikely ++ ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) ++ && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psDevmemIntCtxDestroyOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntCtxDestroy_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntCtxDestroy_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemIntHeapDestroy((DEVMEMINT_HEAP *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntHeapCreateIN_UI8, ++ IMG_UINT8 * psDevmemIntHeapCreateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *) IMG_OFFSET_ADDR(psDevmemIntHeapCreateOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx; ++ DEVMEMINT_CTX *psDevmemCtxInt = NULL; ++ DEVMEMINT_HEAP *psDevmemHeapPtrInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntHeapCreateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemCtxInt, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntHeapCreate_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntHeapCreateOUT->eError = ++ DevmemIntHeapCreate(psDevmemCtxInt, ++ psDevmemIntHeapCreateIN->sHeapBaseAddr, ++ psDevmemIntHeapCreateIN->uiHeapLength, ++ psDevmemIntHeapCreateIN->ui32Log2DataPageSize, &psDevmemHeapPtrInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) ++ { ++ goto DevmemIntHeapCreate_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntHeapCreateOUT-> ++ hDevmemHeapPtr, ++ (void *)psDevmemHeapPtrInt, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _DevmemIntHeapCreatepsDevmemHeapPtrIntRelease); ++ if (unlikely(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntHeapCreate_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntHeapCreate_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemCtxInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK) ++ { ++ if (psDevmemHeapPtrInt) ++ { ++ DevmemIntHeapDestroy(psDevmemHeapPtrInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntHeapDestroyIN_UI8, ++ IMG_UINT8 * psDevmemIntHeapDestroyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *) IMG_OFFSET_ADDR(psDevmemIntHeapDestroyIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *) ++ IMG_OFFSET_ADDR(psDevmemIntHeapDestroyOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntHeapDestroyOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); ++ if (unlikely((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) && ++ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psDevmemIntHeapDestroyOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntHeapDestroy_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntHeapDestroy_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _DevmemIntMapPMRpsMappingIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemIntUnmapPMR((DEVMEMINT_MAPPING *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntMapPMRIN_UI8, ++ IMG_UINT8 * psDevmemIntMapPMROUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntMapPMROUT_UI8, 0); ++ ++ IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap; ++ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; ++ IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation; ++ DEVMEMINT_RESERVATION *psReservationInt = NULL; ++ IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR; ++ PMR *psPMRInt = NULL; ++ DEVMEMINT_MAPPING *psMappingInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntMapPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemServerHeapInt, ++ hDevmemServerHeap, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); ++ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntMapPMR_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psDevmemIntMapPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psReservationInt, ++ hReservation, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); ++ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntMapPMR_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psDevmemIntMapPMROUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntMapPMR_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntMapPMROUT->eError = ++ DevmemIntMapPMR(psDevmemServerHeapInt, ++ psReservationInt, ++ psPMRInt, psDevmemIntMapPMRIN->uiMapFlags, &psMappingInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) ++ { ++ goto DevmemIntMapPMR_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntMapPMROUT->hMapping, ++ (void *)psMappingInt, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _DevmemIntMapPMRpsMappingIntRelease); ++ if (unlikely(psDevmemIntMapPMROUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntMapPMR_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntMapPMR_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemServerHeapInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psReservationInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK) ++ { ++ if (psMappingInt) ++ { ++ DevmemIntUnmapPMR(psMappingInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntUnmapPMRIN_UI8, ++ IMG_UINT8 * psDevmemIntUnmapPMROUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMRIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *) IMG_OFFSET_ADDR(psDevmemIntUnmapPMROUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntUnmapPMROUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING); ++ if (unlikely((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) && ++ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psDevmemIntUnmapPMROUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnmapPMR_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntUnmapPMR_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _DevmemIntReserveRangepsReservationIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = DevmemIntUnreserveRange((DEVMEMINT_RESERVATION *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntReserveRangeIN_UI8, ++ IMG_UINT8 * psDevmemIntReserveRangeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *) ++ IMG_OFFSET_ADDR(psDevmemIntReserveRangeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *) ++ IMG_OFFSET_ADDR(psDevmemIntReserveRangeOUT_UI8, 0); ++ ++ IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap; ++ DEVMEMINT_HEAP *psDevmemServerHeapInt = NULL; ++ DEVMEMINT_RESERVATION *psReservationInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntReserveRangeOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemServerHeapInt, ++ hDevmemServerHeap, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); ++ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntReserveRange_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntReserveRangeOUT->eError = ++ DevmemIntReserveRange(psDevmemServerHeapInt, ++ psDevmemIntReserveRangeIN->sAddress, ++ psDevmemIntReserveRangeIN->uiLength, &psReservationInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) ++ { ++ goto DevmemIntReserveRange_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psDevmemIntReserveRangeOUT-> ++ hReservation, ++ (void *)psReservationInt, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _DevmemIntReserveRangepsReservationIntRelease); ++ if (unlikely(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntReserveRange_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntReserveRange_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemServerHeapInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemServerHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK) ++ { ++ if (psReservationInt) ++ { ++ DevmemIntUnreserveRange(psReservationInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntUnreserveRangeIN_UI8, ++ IMG_UINT8 * psDevmemIntUnreserveRangeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *) ++ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *) ++ IMG_OFFSET_ADDR(psDevmemIntUnreserveRangeOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntUnreserveRangeOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psDevmemIntUnreserveRangeIN-> ++ hReservation, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); ++ if (unlikely ++ ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) ++ && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psDevmemIntUnreserveRangeOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnreserveRange_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++DevmemIntUnreserveRange_exit: ++ ++ return 0; ++} ++ ++static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX, ++ "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); ++static_assert(PMR_MAX_SUPPORTED_PAGE_COUNT <= IMG_UINT32_MAX, ++ "PMR_MAX_SUPPORTED_PAGE_COUNT must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psChangeSparseMemIN_UI8, ++ IMG_UINT8 * psChangeSparseMemOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN = ++ (PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT = ++ (PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *) IMG_OFFSET_ADDR(psChangeSparseMemOUT_UI8, 0); ++ ++ IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap; ++ DEVMEMINT_HEAP *psSrvDevMemHeapInt = NULL; ++ IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR; ++ PMR *psPMRInt = NULL; ++ IMG_UINT32 *ui32AllocPageIndicesInt = NULL; ++ IMG_UINT32 *ui32FreePageIndicesInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) + 0; ++ ++ if (unlikely(psChangeSparseMemIN->ui32AllocPageCount > PMR_MAX_SUPPORTED_PAGE_COUNT)) ++ { ++ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto ChangeSparseMem_exit; ++ } ++ ++ if (unlikely(psChangeSparseMemIN->ui32FreePageCount > PMR_MAX_SUPPORTED_PAGE_COUNT)) ++ { ++ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto ChangeSparseMem_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psChangeSparseMemOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto ChangeSparseMem_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psChangeSparseMemIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto ChangeSparseMem_exit; ++ } ++ } ++ } ++ ++ if (psChangeSparseMemIN->ui32AllocPageCount != 0) ++ { ++ ui32AllocPageIndicesInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32AllocPageIndicesInt, ++ (const void __user *)psChangeSparseMemIN->pui32AllocPageIndices, ++ psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto ChangeSparseMem_exit; ++ } ++ } ++ if (psChangeSparseMemIN->ui32FreePageCount != 0) ++ { ++ ui32FreePageIndicesInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32FreePageIndicesInt, ++ (const void __user *)psChangeSparseMemIN->pui32FreePageIndices, ++ psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto ChangeSparseMem_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psChangeSparseMemOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSrvDevMemHeapInt, ++ hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP, IMG_TRUE); ++ if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto ChangeSparseMem_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psChangeSparseMemOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psChangeSparseMemOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto ChangeSparseMem_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psChangeSparseMemOUT->eError = ++ DevmemIntChangeSparse(psSrvDevMemHeapInt, ++ psPMRInt, ++ psChangeSparseMemIN->ui32AllocPageCount, ++ ui32AllocPageIndicesInt, ++ psChangeSparseMemIN->ui32FreePageCount, ++ ui32FreePageIndicesInt, ++ psChangeSparseMemIN->ui32SparseFlags, ++ psChangeSparseMemIN->uiFlags, ++ psChangeSparseMemIN->sDevVAddr, ++ psChangeSparseMemIN->ui64CPUVAddr); ++ ++ChangeSparseMem_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSrvDevMemHeapInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSrvDevMemHeap, PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psChangeSparseMemOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntMapPagesIN_UI8, ++ IMG_UINT8 * psDevmemIntMapPagesOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntMapPagesOUT_UI8, 0); ++ ++ IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation; ++ DEVMEMINT_RESERVATION *psReservationInt = NULL; ++ IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR; ++ PMR *psPMRInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntMapPagesOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psReservationInt, ++ hReservation, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); ++ if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntMapPages_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psDevmemIntMapPagesOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntMapPages_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntMapPagesOUT->eError = ++ DevmemIntMapPages(psReservationInt, ++ psPMRInt, ++ psDevmemIntMapPagesIN->ui32PageCount, ++ psDevmemIntMapPagesIN->ui32PhysicalPgOffset, ++ psDevmemIntMapPagesIN->uiFlags, psDevmemIntMapPagesIN->sDevVAddr); ++ ++DevmemIntMapPages_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psReservationInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntUnmapPagesIN_UI8, ++ IMG_UINT8 * psDevmemIntUnmapPagesOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *) IMG_OFFSET_ADDR(psDevmemIntUnmapPagesOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation; ++ DEVMEMINT_RESERVATION *psReservationInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntUnmapPagesOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psReservationInt, ++ hReservation, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); ++ if (unlikely(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntUnmapPages_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntUnmapPagesOUT->eError = ++ DevmemIntUnmapPages(psReservationInt, ++ psDevmemIntUnmapPagesIN->sDevVAddr, ++ psDevmemIntUnmapPagesIN->ui32PageCount); ++ ++DevmemIntUnmapPages_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psReservationInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIsVDevAddrValidIN_UI8, ++ IMG_UINT8 * psDevmemIsVDevAddrValidOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *) ++ IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *) ++ IMG_OFFSET_ADDR(psDevmemIsVDevAddrValidOUT_UI8, 0); ++ ++ IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx; ++ DEVMEMINT_CTX *psDevmemCtxInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIsVDevAddrValidOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemCtxInt, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIsVDevAddrValid_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIsVDevAddrValidOUT->eError = ++ DevmemIntIsVDevAddrValid(psConnection, OSGetDevNode(psConnection), ++ psDevmemCtxInt, psDevmemIsVDevAddrValidIN->sAddress); ++ ++DevmemIsVDevAddrValid_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemCtxInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#if defined(RGX_SRV_SLC_RANGEBASED_CFI_SUPPORTED) ++ ++static IMG_INT ++PVRSRVBridgeDevmemFlushDevSLCRange(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemFlushDevSLCRangeIN_UI8, ++ IMG_UINT8 * psDevmemFlushDevSLCRangeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMFLUSHDEVSLCRANGE *) ++ IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *psDevmemFlushDevSLCRangeOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMFLUSHDEVSLCRANGE *) ++ IMG_OFFSET_ADDR(psDevmemFlushDevSLCRangeOUT_UI8, 0); ++ ++ IMG_HANDLE hDevmemCtx = psDevmemFlushDevSLCRangeIN->hDevmemCtx; ++ DEVMEMINT_CTX *psDevmemCtxInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemFlushDevSLCRangeOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemCtxInt, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemFlushDevSLCRangeOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemFlushDevSLCRange_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemFlushDevSLCRangeOUT->eError = ++ DevmemIntFlushDevSLCRange(psDevmemCtxInt, ++ psDevmemFlushDevSLCRangeIN->sAddress, ++ psDevmemFlushDevSLCRangeIN->uiSize, ++ psDevmemFlushDevSLCRangeIN->bInvalidate); ++ ++DevmemFlushDevSLCRange_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemCtxInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeDevmemFlushDevSLCRange NULL ++#endif ++ ++#if defined(RGX_FEATURE_FBCDC) ++ ++static IMG_INT ++PVRSRVBridgeDevmemInvalidateFBSCTable(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemInvalidateFBSCTableIN_UI8, ++ IMG_UINT8 * psDevmemInvalidateFBSCTableOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINVALIDATEFBSCTABLE *) ++ IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *psDevmemInvalidateFBSCTableOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINVALIDATEFBSCTABLE *) ++ IMG_OFFSET_ADDR(psDevmemInvalidateFBSCTableOUT_UI8, 0); ++ ++ IMG_HANDLE hDevmemCtx = psDevmemInvalidateFBSCTableIN->hDevmemCtx; ++ DEVMEMINT_CTX *psDevmemCtxInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemInvalidateFBSCTableOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemCtxInt, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemInvalidateFBSCTableOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemInvalidateFBSCTable_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemInvalidateFBSCTableOUT->eError = ++ DevmemIntInvalidateFBSCTable(psDevmemCtxInt, ++ psDevmemInvalidateFBSCTableIN->ui64FBSCEntries); ++ ++DevmemInvalidateFBSCTable_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemCtxInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeDevmemInvalidateFBSCTable NULL ++#endif ++ ++static IMG_INT ++PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHeapCfgHeapConfigCountIN_UI8, ++ IMG_UINT8 * psHeapCfgHeapConfigCountOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN = ++ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *) ++ IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT = ++ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *) ++ IMG_OFFSET_ADDR(psHeapCfgHeapConfigCountOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN); ++ ++ psHeapCfgHeapConfigCountOUT->eError = ++ HeapCfgHeapConfigCount(psConnection, OSGetDevNode(psConnection), ++ &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHeapCfgHeapCountIN_UI8, ++ IMG_UINT8 * psHeapCfgHeapCountOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN = ++ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT = ++ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *) IMG_OFFSET_ADDR(psHeapCfgHeapCountOUT_UI8, 0); ++ ++ psHeapCfgHeapCountOUT->eError = ++ HeapCfgHeapCount(psConnection, OSGetDevNode(psConnection), ++ psHeapCfgHeapCountIN->ui32HeapConfigIndex, ++ &psHeapCfgHeapCountOUT->ui32NumHeaps); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHeapCfgHeapConfigNameIN_UI8, ++ IMG_UINT8 * psHeapCfgHeapConfigNameOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN = ++ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *) ++ IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT = ++ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *) ++ IMG_OFFSET_ADDR(psHeapCfgHeapConfigNameOUT_UI8, 0); ++ ++ IMG_CHAR *puiHeapConfigNameInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) + ++ 0; ++ ++ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH) ++ { ++ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto HeapCfgHeapConfigName_exit; ++ } ++ ++ psHeapCfgHeapConfigNameOUT->puiHeapConfigName = ++ psHeapCfgHeapConfigNameIN->puiHeapConfigName; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto HeapCfgHeapConfigName_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapConfigNameIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto HeapCfgHeapConfigName_exit; ++ } ++ } ++ } ++ ++ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0) ++ { ++ puiHeapConfigNameInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR); ++ } ++ ++ psHeapCfgHeapConfigNameOUT->eError = ++ HeapCfgHeapConfigName(psConnection, OSGetDevNode(psConnection), ++ psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex, ++ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz, ++ puiHeapConfigNameInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psHeapCfgHeapConfigNameOUT->eError != PVRSRV_OK)) ++ { ++ goto HeapCfgHeapConfigName_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((puiHeapConfigNameInt) && ++ ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psHeapCfgHeapConfigNameOUT->puiHeapConfigName, ++ puiHeapConfigNameInt, ++ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != ++ PVRSRV_OK)) ++ { ++ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto HeapCfgHeapConfigName_exit; ++ } ++ } ++ ++HeapCfgHeapConfigName_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psHeapCfgHeapConfigNameOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHeapCfgHeapDetailsIN_UI8, ++ IMG_UINT8 * psHeapCfgHeapDetailsOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN = ++ (PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT = ++ (PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *) IMG_OFFSET_ADDR(psHeapCfgHeapDetailsOUT_UI8, ++ 0); ++ ++ IMG_CHAR *puiHeapNameOutInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) + 0; ++ ++ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz > DEVMEM_HEAPNAME_MAXLENGTH) ++ { ++ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto HeapCfgHeapDetails_exit; ++ } ++ ++ psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto HeapCfgHeapDetails_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psHeapCfgHeapDetailsIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto HeapCfgHeapDetails_exit; ++ } ++ } ++ } ++ ++ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0) ++ { ++ puiHeapNameOutInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR); ++ } ++ ++ psHeapCfgHeapDetailsOUT->eError = ++ HeapCfgHeapDetails(psConnection, OSGetDevNode(psConnection), ++ psHeapCfgHeapDetailsIN->ui32HeapConfigIndex, ++ psHeapCfgHeapDetailsIN->ui32HeapIndex, ++ psHeapCfgHeapDetailsIN->ui32HeapNameBufSz, ++ puiHeapNameOutInt, ++ &psHeapCfgHeapDetailsOUT->sDevVAddrBase, ++ &psHeapCfgHeapDetailsOUT->uiHeapLength, ++ &psHeapCfgHeapDetailsOUT->uiReservedRegionLength, ++ &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut, ++ &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psHeapCfgHeapDetailsOUT->eError != PVRSRV_OK)) ++ { ++ goto HeapCfgHeapDetails_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((puiHeapNameOutInt) && ++ ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psHeapCfgHeapDetailsOUT->puiHeapNameOut, ++ puiHeapNameOutInt, ++ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK)) ++ { ++ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto HeapCfgHeapDetails_exit; ++ } ++ } ++ ++HeapCfgHeapDetails_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psHeapCfgHeapDetailsOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemIntRegisterPFNotifyKMIN_UI8, ++ IMG_UINT8 * psDevmemIntRegisterPFNotifyKMOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *) ++ IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *) ++ IMG_OFFSET_ADDR(psDevmemIntRegisterPFNotifyKMOUT_UI8, 0); ++ ++ IMG_HANDLE hDevm = psDevmemIntRegisterPFNotifyKMIN->hDevm; ++ DEVMEMINT_CTX *psDevmInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemIntRegisterPFNotifyKMOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmInt, ++ hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemIntRegisterPFNotifyKM_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemIntRegisterPFNotifyKMOUT->eError = ++ DevmemIntRegisterPFNotifyKM(psDevmInt, ++ psDevmemIntRegisterPFNotifyKMIN->ui32PID, ++ psDevmemIntRegisterPFNotifyKMIN->bRegister); ++ ++DevmemIntRegisterPFNotifyKM_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevm, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetMaxPhysHeapCount(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetMaxPhysHeapCountIN_UI8, ++ IMG_UINT8 * psGetMaxPhysHeapCountOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountIN = ++ (PVRSRV_BRIDGE_IN_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *psGetMaxPhysHeapCountOUT = ++ (PVRSRV_BRIDGE_OUT_GETMAXPHYSHEAPCOUNT *) IMG_OFFSET_ADDR(psGetMaxPhysHeapCountOUT_UI8, ++ 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psGetMaxPhysHeapCountIN); ++ ++ psGetMaxPhysHeapCountOUT->eError = ++ PVRSRVGetMaxPhysHeapCountKM(psConnection, OSGetDevNode(psConnection), ++ &psGetMaxPhysHeapCountOUT->ui32PhysHeapCount); ++ ++ return 0; ++} ++ ++static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, ++ "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysHeapGetMemInfo(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysHeapGetMemInfoIN_UI8, ++ IMG_UINT8 * psPhysHeapGetMemInfoOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoIN = ++ (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *psPhysHeapGetMemInfoOUT = ++ (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFO *) IMG_OFFSET_ADDR(psPhysHeapGetMemInfoOUT_UI8, ++ 0); ++ ++ PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; ++ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + ++ ((IMG_UINT64) psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) + ++ 0; ++ ++ if (unlikely(psPhysHeapGetMemInfoIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) ++ { ++ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysHeapGetMemInfo_exit; ++ } ++ ++ psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats = ++ psPhysHeapGetMemInfoIN->pasapPhysHeapMemStats; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysHeapGetMemInfo_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysHeapGetMemInfo_exit; ++ } ++ } ++ } ++ ++ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) ++ { ++ eaPhysHeapIDInt = ++ (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, eaPhysHeapIDInt, ++ (const void __user *)psPhysHeapGetMemInfoIN->peaPhysHeapID, ++ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != ++ PVRSRV_OK) ++ { ++ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysHeapGetMemInfo_exit; ++ } ++ } ++ if (psPhysHeapGetMemInfoIN->ui32PhysHeapCount != 0) ++ { ++ pasapPhysHeapMemStatsInt = ++ (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); ++ } ++ ++ psPhysHeapGetMemInfoOUT->eError = ++ PVRSRVPhysHeapGetMemInfoKM(psConnection, OSGetDevNode(psConnection), ++ psPhysHeapGetMemInfoIN->ui32PhysHeapCount, ++ eaPhysHeapIDInt, pasapPhysHeapMemStatsInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysHeapGetMemInfoOUT->eError != PVRSRV_OK)) ++ { ++ goto PhysHeapGetMemInfo_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((pasapPhysHeapMemStatsInt) && ++ ((psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psPhysHeapGetMemInfoOUT->pasapPhysHeapMemStats, ++ pasapPhysHeapMemStatsInt, ++ (psPhysHeapGetMemInfoIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != ++ PVRSRV_OK)) ++ { ++ psPhysHeapGetMemInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysHeapGetMemInfo_exit; ++ } ++ } ++ ++PhysHeapGetMemInfo_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysHeapGetMemInfoOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetDefaultPhysicalHeap(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetDefaultPhysicalHeapIN_UI8, ++ IMG_UINT8 * psGetDefaultPhysicalHeapOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapIN = ++ (PVRSRV_BRIDGE_IN_GETDEFAULTPHYSICALHEAP *) ++ IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *psGetDefaultPhysicalHeapOUT = ++ (PVRSRV_BRIDGE_OUT_GETDEFAULTPHYSICALHEAP *) ++ IMG_OFFSET_ADDR(psGetDefaultPhysicalHeapOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psGetDefaultPhysicalHeapIN); ++ ++ psGetDefaultPhysicalHeapOUT->eError = ++ PVRSRVGetDefaultPhysicalHeapKM(psConnection, OSGetDevNode(psConnection), ++ &psGetDefaultPhysicalHeapOUT->eHeap); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetHeapPhysMemUsage(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetHeapPhysMemUsageIN_UI8, ++ IMG_UINT8 * psGetHeapPhysMemUsageOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageIN = ++ (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *psGetHeapPhysMemUsageOUT = ++ (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGE *) IMG_OFFSET_ADDR(psGetHeapPhysMemUsageOUT_UI8, ++ 0); ++ ++ PHYS_HEAP_MEM_STATS *pasapPhysHeapMemStatsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psGetHeapPhysMemUsageIN->ui32PhysHeapCount * ++ sizeof(PHYS_HEAP_MEM_STATS)) + 0; ++ ++ if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST) ++ { ++ psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto GetHeapPhysMemUsage_exit; ++ } ++ ++ psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats = ++ psGetHeapPhysMemUsageIN->pasapPhysHeapMemStats; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto GetHeapPhysMemUsage_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psGetHeapPhysMemUsageIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsageIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto GetHeapPhysMemUsage_exit; ++ } ++ } ++ } ++ ++ if (psGetHeapPhysMemUsageIN->ui32PhysHeapCount != 0) ++ { ++ pasapPhysHeapMemStatsInt = ++ (PHYS_HEAP_MEM_STATS *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS); ++ } ++ ++ psGetHeapPhysMemUsageOUT->eError = ++ PVRSRVGetHeapPhysMemUsageKM(psConnection, OSGetDevNode(psConnection), ++ psGetHeapPhysMemUsageIN->ui32PhysHeapCount, ++ pasapPhysHeapMemStatsInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psGetHeapPhysMemUsageOUT->eError != PVRSRV_OK)) ++ { ++ goto GetHeapPhysMemUsage_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((pasapPhysHeapMemStatsInt) && ++ ((psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psGetHeapPhysMemUsageOUT->pasapPhysHeapMemStats, ++ pasapPhysHeapMemStatsInt, ++ (psGetHeapPhysMemUsageIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS))) != ++ PVRSRV_OK)) ++ { ++ psGetHeapPhysMemUsageOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto GetHeapPhysMemUsage_exit; ++ } ++ } ++ ++GetHeapPhysMemUsage_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psGetHeapPhysMemUsageOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDevmemGetFaultAddressIN_UI8, ++ IMG_UINT8 * psDevmemGetFaultAddressOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN = ++ (PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *) ++ IMG_OFFSET_ADDR(psDevmemGetFaultAddressIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT = ++ (PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *) ++ IMG_OFFSET_ADDR(psDevmemGetFaultAddressOUT_UI8, 0); ++ ++ IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx; ++ DEVMEMINT_CTX *psDevmemCtxInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psDevmemGetFaultAddressOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psDevmemCtxInt, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX, IMG_TRUE); ++ if (unlikely(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto DevmemGetFaultAddress_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psDevmemGetFaultAddressOUT->eError = ++ DevmemIntGetFaultAddress(psConnection, OSGetDevNode(psConnection), ++ psDevmemCtxInt, &psDevmemGetFaultAddressOUT->sFaultAddress); ++ ++DevmemGetFaultAddress_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psDevmemCtxInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hDevmemCtx, PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#if defined(PVRSRV_ENABLE_PROCESS_STATS) ++ ++static IMG_INT ++PVRSRVBridgePVRSRVUpdateOOMStats(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPVRSRVUpdateOOMStatsIN_UI8, ++ IMG_UINT8 * psPVRSRVUpdateOOMStatsOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsIN = ++ (PVRSRV_BRIDGE_IN_PVRSRVUPDATEOOMSTATS *) IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *psPVRSRVUpdateOOMStatsOUT = ++ (PVRSRV_BRIDGE_OUT_PVRSRVUPDATEOOMSTATS *) ++ IMG_OFFSET_ADDR(psPVRSRVUpdateOOMStatsOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ psPVRSRVUpdateOOMStatsOUT->eError = ++ PVRSRVServerUpdateOOMStats(psPVRSRVUpdateOOMStatsIN->ui32ui32StatType, ++ psPVRSRVUpdateOOMStatsIN->ui32pid); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgePVRSRVUpdateOOMStats NULL ++#endif ++ ++static_assert(PVRSRV_PHYS_HEAP_LAST <= IMG_UINT32_MAX, ++ "PVRSRV_PHYS_HEAP_LAST must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgePhysHeapGetMemInfoPkd(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psPhysHeapGetMemInfoPkdIN_UI8, ++ IMG_UINT8 * psPhysHeapGetMemInfoPkdOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD *psPhysHeapGetMemInfoPkdIN = ++ (PVRSRV_BRIDGE_IN_PHYSHEAPGETMEMINFOPKD *) ++ IMG_OFFSET_ADDR(psPhysHeapGetMemInfoPkdIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD *psPhysHeapGetMemInfoPkdOUT = ++ (PVRSRV_BRIDGE_OUT_PHYSHEAPGETMEMINFOPKD *) ++ IMG_OFFSET_ADDR(psPhysHeapGetMemInfoPkdOUT_UI8, 0); ++ ++ PVRSRV_PHYS_HEAP *eaPhysHeapIDInt = NULL; ++ PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStatsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) + ++ ((IMG_UINT64) psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * ++ sizeof(PHYS_HEAP_MEM_STATS_PKD)) + 0; ++ ++ if (unlikely(psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST)) ++ { ++ psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto PhysHeapGetMemInfoPkd_exit; ++ } ++ ++ psPhysHeapGetMemInfoPkdOUT->psapPhysHeapMemStats = ++ psPhysHeapGetMemInfoPkdIN->psapPhysHeapMemStats; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto PhysHeapGetMemInfoPkd_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psPhysHeapGetMemInfoPkdIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psPhysHeapGetMemInfoPkdIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto PhysHeapGetMemInfoPkd_exit; ++ } ++ } ++ } ++ ++ if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount != 0) ++ { ++ eaPhysHeapIDInt = ++ (PVRSRV_PHYS_HEAP *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP); ++ } ++ ++ /* Copy the data over */ ++ if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, eaPhysHeapIDInt, ++ (const void __user *)psPhysHeapGetMemInfoPkdIN->peaPhysHeapID, ++ psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PVRSRV_PHYS_HEAP)) != ++ PVRSRV_OK) ++ { ++ psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysHeapGetMemInfoPkd_exit; ++ } ++ } ++ if (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount != 0) ++ { ++ psapPhysHeapMemStatsInt = ++ (PHYS_HEAP_MEM_STATS_PKD *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD); ++ } ++ ++ psPhysHeapGetMemInfoPkdOUT->eError = ++ PVRSRVPhysHeapGetMemInfoPkdKM(psConnection, OSGetDevNode(psConnection), ++ psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount, ++ eaPhysHeapIDInt, psapPhysHeapMemStatsInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psPhysHeapGetMemInfoPkdOUT->eError != PVRSRV_OK)) ++ { ++ goto PhysHeapGetMemInfoPkd_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((psapPhysHeapMemStatsInt) && ++ ((psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psPhysHeapGetMemInfoPkdOUT->psapPhysHeapMemStats, ++ psapPhysHeapMemStatsInt, ++ (psPhysHeapGetMemInfoPkdIN->ui32PhysHeapCount * ++ sizeof(PHYS_HEAP_MEM_STATS_PKD))) != PVRSRV_OK)) ++ { ++ psPhysHeapGetMemInfoPkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto PhysHeapGetMemInfoPkd_exit; ++ } ++ } ++ ++PhysHeapGetMemInfoPkd_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psPhysHeapGetMemInfoPkdOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetHeapPhysMemUsagePkd(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetHeapPhysMemUsagePkdIN_UI8, ++ IMG_UINT8 * psGetHeapPhysMemUsagePkdOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD *psGetHeapPhysMemUsagePkdIN = ++ (PVRSRV_BRIDGE_IN_GETHEAPPHYSMEMUSAGEPKD *) ++ IMG_OFFSET_ADDR(psGetHeapPhysMemUsagePkdIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD *psGetHeapPhysMemUsagePkdOUT = ++ (PVRSRV_BRIDGE_OUT_GETHEAPPHYSMEMUSAGEPKD *) ++ IMG_OFFSET_ADDR(psGetHeapPhysMemUsagePkdOUT_UI8, 0); ++ ++ PHYS_HEAP_MEM_STATS_PKD *psapPhysHeapMemStatsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * ++ sizeof(PHYS_HEAP_MEM_STATS_PKD)) + 0; ++ ++ if (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount > PVRSRV_PHYS_HEAP_LAST) ++ { ++ psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto GetHeapPhysMemUsagePkd_exit; ++ } ++ ++ psGetHeapPhysMemUsagePkdOUT->psapPhysHeapMemStats = ++ psGetHeapPhysMemUsagePkdIN->psapPhysHeapMemStats; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto GetHeapPhysMemUsagePkd_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psGetHeapPhysMemUsagePkdIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetHeapPhysMemUsagePkdIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto GetHeapPhysMemUsagePkd_exit; ++ } ++ } ++ } ++ ++ if (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount != 0) ++ { ++ psapPhysHeapMemStatsInt = ++ (PHYS_HEAP_MEM_STATS_PKD *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD); ++ } ++ ++ psGetHeapPhysMemUsagePkdOUT->eError = ++ PVRSRVGetHeapPhysMemUsagePkdKM(psConnection, OSGetDevNode(psConnection), ++ psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount, ++ psapPhysHeapMemStatsInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psGetHeapPhysMemUsagePkdOUT->eError != PVRSRV_OK)) ++ { ++ goto GetHeapPhysMemUsagePkd_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((psapPhysHeapMemStatsInt) && ++ ((psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * sizeof(PHYS_HEAP_MEM_STATS_PKD)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psGetHeapPhysMemUsagePkdOUT->psapPhysHeapMemStats, ++ psapPhysHeapMemStatsInt, ++ (psGetHeapPhysMemUsagePkdIN->ui32PhysHeapCount * ++ sizeof(PHYS_HEAP_MEM_STATS_PKD))) != PVRSRV_OK)) ++ { ++ psGetHeapPhysMemUsagePkdOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto GetHeapPhysMemUsagePkd_exit; ++ } ++ } ++ ++GetHeapPhysMemUsagePkd_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psGetHeapPhysMemUsagePkdOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitMMBridge(void); ++void DeinitMMBridge(void); ++ ++/* ++ * Register all MM functions with services ++ */ ++PVRSRV_ERROR InitMMBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, ++ PVRSRVBridgePMRExportPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, ++ PVRSRVBridgePMRUnexportPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID, ++ NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, ++ PVRSRVBridgePMRMakeLocalImportHandle, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, ++ PVRSRVBridgePMRUnmakeLocalImportHandle, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, ++ PVRSRVBridgePMRImportPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, ++ PVRSRVBridgePMRLocalImportPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, ++ PVRSRVBridgePMRUnrefPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, ++ PVRSRVBridgePMRUnrefUnlockPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, ++ PVRSRVBridgePhysmemNewRamBackedPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, ++ PVRSRVBridgePhysmemNewRamBackedLockedPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, ++ PVRSRVBridgeDevmemIntPin, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, ++ PVRSRVBridgeDevmemIntUnpin, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, ++ PVRSRVBridgeDevmemIntPinValidate, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, ++ PVRSRVBridgeDevmemIntUnpinInvalidate, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, ++ PVRSRVBridgeDevmemIntCtxCreate, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, ++ PVRSRVBridgeDevmemIntCtxDestroy, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, ++ PVRSRVBridgeDevmemIntHeapCreate, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, ++ PVRSRVBridgeDevmemIntHeapDestroy, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, ++ PVRSRVBridgeDevmemIntMapPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, ++ PVRSRVBridgeDevmemIntUnmapPMR, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, ++ PVRSRVBridgeDevmemIntReserveRange, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, ++ PVRSRVBridgeDevmemIntUnreserveRange, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, ++ PVRSRVBridgeChangeSparseMem, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, ++ PVRSRVBridgeDevmemIntMapPages, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, ++ PVRSRVBridgeDevmemIntUnmapPages, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, ++ PVRSRVBridgeDevmemIsVDevAddrValid, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE, ++ PVRSRVBridgeDevmemFlushDevSLCRange, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE, ++ PVRSRVBridgeDevmemInvalidateFBSCTable, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, ++ PVRSRVBridgeHeapCfgHeapConfigCount, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, ++ PVRSRVBridgeHeapCfgHeapCount, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, ++ PVRSRVBridgeHeapCfgHeapConfigName, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, ++ PVRSRVBridgeHeapCfgHeapDetails, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, ++ PVRSRVBridgeDevmemIntRegisterPFNotifyKM, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT, ++ PVRSRVBridgeGetMaxPhysHeapCount, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO, ++ PVRSRVBridgePhysHeapGetMemInfo, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP, ++ PVRSRVBridgeGetDefaultPhysicalHeap, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE, ++ PVRSRVBridgeGetHeapPhysMemUsage, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, ++ PVRSRVBridgeDevmemGetFaultAddress, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS, ++ PVRSRVBridgePVRSRVUpdateOOMStats, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD, ++ PVRSRVBridgePhysHeapGetMemInfoPkd, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD, ++ PVRSRVBridgeGetHeapPhysMemUsagePkd, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all mm functions with services ++ */ ++void DeinitMMBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMFLUSHDEVSLCRANGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINVALIDATEFBSCTABLE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXPHYSHEAPCOUNT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFO); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETDEFAULTPHYSICALHEAP); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PVRSRVUPDATEOOMSTATS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSHEAPGETMEMINFOPKD); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETHEAPPHYSMEMUSAGEPKD); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c b/drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c +new file mode 100644 +index 000000000000..e25137ca76b0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_pvrtl_bridge.c +@@ -0,0 +1,836 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for pvrtl ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for pvrtl ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "tlserver.h" ++ ++#include "common_pvrtl_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _TLOpenStreampsSDIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = TLServerCloseStreamKM((TL_STREAM_DESC *) pvData); ++ return eError; ++} ++ ++static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX, ++ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLOpenStreamIN_UI8, ++ IMG_UINT8 * psTLOpenStreamOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN = ++ (PVRSRV_BRIDGE_IN_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT = ++ (PVRSRV_BRIDGE_OUT_TLOPENSTREAM *) IMG_OFFSET_ADDR(psTLOpenStreamOUT_UI8, 0); ++ ++ IMG_CHAR *uiNameInt = NULL; ++ TL_STREAM_DESC *psSDInt = NULL; ++ PMR *psTLPMRInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + 0; ++ ++ psTLOpenStreamOUT->hSD = NULL; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psTLOpenStreamOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto TLOpenStream_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLOpenStreamIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto TLOpenStream_exit; ++ } ++ } ++ } ++ ++ { ++ uiNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiNameInt, (const void __user *)psTLOpenStreamIN->puiName, ++ PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto TLOpenStream_exit; ++ } ++ ((IMG_CHAR *) uiNameInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ psTLOpenStreamOUT->eError = ++ TLServerOpenStreamKM(uiNameInt, psTLOpenStreamIN->ui32Mode, &psSDInt, &psTLPMRInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) ++ { ++ goto TLOpenStream_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psTLOpenStreamOUT->hSD, ++ (void *)psSDInt, ++ PVRSRV_HANDLE_TYPE_PVR_TL_SD, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _TLOpenStreampsSDIntRelease); ++ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLOpenStream_exit; ++ } ++ ++ psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, ++ &psTLOpenStreamOUT->hTLPMR, ++ (void *)psTLPMRInt, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ psTLOpenStreamOUT->hSD); ++ if (unlikely(psTLOpenStreamOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLOpenStream_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++TLOpenStream_exit: ++ ++ if (psTLOpenStreamOUT->eError != PVRSRV_OK) ++ { ++ if (psTLOpenStreamOUT->hSD) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Lock over handle creation cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psTLOpenStreamOUT->hSD, ++ PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Avoid freeing/destroying/releasing the resource a second time below */ ++ psSDInt = NULL; ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ } ++ ++ if (psSDInt) ++ { ++ TLServerCloseStreamKM(psSDInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psTLOpenStreamOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLCloseStreamIN_UI8, ++ IMG_UINT8 * psTLCloseStreamOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN = ++ (PVRSRV_BRIDGE_IN_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT = ++ (PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *) IMG_OFFSET_ADDR(psTLCloseStreamOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psTLCloseStreamOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psTLCloseStreamIN->hSD, ++ PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ if (unlikely((psTLCloseStreamOUT->eError != PVRSRV_OK) && ++ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psTLCloseStreamOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLCloseStream_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++TLCloseStream_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLAcquireDataIN_UI8, ++ IMG_UINT8 * psTLAcquireDataOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN = ++ (PVRSRV_BRIDGE_IN_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT = ++ (PVRSRV_BRIDGE_OUT_TLACQUIREDATA *) IMG_OFFSET_ADDR(psTLAcquireDataOUT_UI8, 0); ++ ++ IMG_HANDLE hSD = psTLAcquireDataIN->hSD; ++ TL_STREAM_DESC *psSDInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psTLAcquireDataOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSDInt, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); ++ if (unlikely(psTLAcquireDataOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLAcquireData_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psTLAcquireDataOUT->eError = ++ TLServerAcquireDataKM(psSDInt, ++ &psTLAcquireDataOUT->ui32ReadOffset, ++ &psTLAcquireDataOUT->ui32ReadLen); ++ ++TLAcquireData_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSDInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLReleaseDataIN_UI8, ++ IMG_UINT8 * psTLReleaseDataOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN = ++ (PVRSRV_BRIDGE_IN_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT = ++ (PVRSRV_BRIDGE_OUT_TLRELEASEDATA *) IMG_OFFSET_ADDR(psTLReleaseDataOUT_UI8, 0); ++ ++ IMG_HANDLE hSD = psTLReleaseDataIN->hSD; ++ TL_STREAM_DESC *psSDInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psTLReleaseDataOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSDInt, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); ++ if (unlikely(psTLReleaseDataOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLReleaseData_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psTLReleaseDataOUT->eError = ++ TLServerReleaseDataKM(psSDInt, ++ psTLReleaseDataIN->ui32ReadOffset, ++ psTLReleaseDataIN->ui32ReadLen); ++ ++TLReleaseData_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSDInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static_assert(PRVSRVTL_MAX_STREAM_NAME_SIZE <= IMG_UINT32_MAX, ++ "PRVSRVTL_MAX_STREAM_NAME_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLDiscoverStreamsIN_UI8, ++ IMG_UINT8 * psTLDiscoverStreamsOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN = ++ (PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT = ++ (PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *) IMG_OFFSET_ADDR(psTLDiscoverStreamsOUT_UI8, 0); ++ ++ IMG_CHAR *uiNamePatternInt = NULL; ++ IMG_CHAR *puiStreamsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) + 0; ++ ++ if (psTLDiscoverStreamsIN->ui32Size > PVRSRVTL_MAX_DISCOVERABLE_STREAMS_BUFFER) ++ { ++ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto TLDiscoverStreams_exit; ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto TLDiscoverStreams_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLDiscoverStreamsIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto TLDiscoverStreams_exit; ++ } ++ } ++ } ++ ++ { ++ uiNamePatternInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiNamePatternInt, ++ (const void __user *)psTLDiscoverStreamsIN->puiNamePattern, ++ PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto TLDiscoverStreams_exit; ++ } ++ ((IMG_CHAR *) uiNamePatternInt)[(PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ if (psTLDiscoverStreamsIN->ui32Size != 0) ++ { ++ puiStreamsInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR); ++ } ++ ++ psTLDiscoverStreamsOUT->eError = ++ TLServerDiscoverStreamsKM(uiNamePatternInt, ++ psTLDiscoverStreamsIN->ui32Size, ++ puiStreamsInt, &psTLDiscoverStreamsOUT->ui32NumFound); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psTLDiscoverStreamsOUT->eError != PVRSRV_OK)) ++ { ++ goto TLDiscoverStreams_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((puiStreamsInt) && ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt, ++ (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK)) ++ { ++ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto TLDiscoverStreams_exit; ++ } ++ } ++ ++TLDiscoverStreams_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psTLDiscoverStreamsOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLReserveStreamIN_UI8, ++ IMG_UINT8 * psTLReserveStreamOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN = ++ (PVRSRV_BRIDGE_IN_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT = ++ (PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *) IMG_OFFSET_ADDR(psTLReserveStreamOUT_UI8, 0); ++ ++ IMG_HANDLE hSD = psTLReserveStreamIN->hSD; ++ TL_STREAM_DESC *psSDInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psTLReserveStreamOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSDInt, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); ++ if (unlikely(psTLReserveStreamOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLReserveStream_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psTLReserveStreamOUT->eError = ++ TLServerReserveStreamKM(psSDInt, ++ &psTLReserveStreamOUT->ui32BufferOffset, ++ psTLReserveStreamIN->ui32Size, ++ psTLReserveStreamIN->ui32SizeMin, ++ &psTLReserveStreamOUT->ui32Available); ++ ++TLReserveStream_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSDInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLCommitStreamIN_UI8, ++ IMG_UINT8 * psTLCommitStreamOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN = ++ (PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT = ++ (PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *) IMG_OFFSET_ADDR(psTLCommitStreamOUT_UI8, 0); ++ ++ IMG_HANDLE hSD = psTLCommitStreamIN->hSD; ++ TL_STREAM_DESC *psSDInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psTLCommitStreamOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSDInt, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); ++ if (unlikely(psTLCommitStreamOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLCommitStream_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psTLCommitStreamOUT->eError = ++ TLServerCommitStreamKM(psSDInt, psTLCommitStreamIN->ui32ReqSize); ++ ++TLCommitStream_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSDInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static_assert(PVRSRVTL_MAX_PACKET_SIZE <= IMG_UINT32_MAX, ++ "PVRSRVTL_MAX_PACKET_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psTLWriteDataIN_UI8, ++ IMG_UINT8 * psTLWriteDataOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN = ++ (PVRSRV_BRIDGE_IN_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT = ++ (PVRSRV_BRIDGE_OUT_TLWRITEDATA *) IMG_OFFSET_ADDR(psTLWriteDataOUT_UI8, 0); ++ ++ IMG_HANDLE hSD = psTLWriteDataIN->hSD; ++ TL_STREAM_DESC *psSDInt = NULL; ++ IMG_BYTE *ui8DataInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ((IMG_UINT64) psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) + 0; ++ ++ if (unlikely(psTLWriteDataIN->ui32Size > PVRSRVTL_MAX_PACKET_SIZE)) ++ { ++ psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto TLWriteData_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psTLWriteDataOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto TLWriteData_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psTLWriteDataIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto TLWriteData_exit; ++ } ++ } ++ } ++ ++ if (psTLWriteDataIN->ui32Size != 0) ++ { ++ ui8DataInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8DataInt, (const void __user *)psTLWriteDataIN->pui8Data, ++ psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto TLWriteData_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psTLWriteDataOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSDInt, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD, IMG_TRUE); ++ if (unlikely(psTLWriteDataOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto TLWriteData_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psTLWriteDataOUT->eError = ++ TLServerWriteDataKM(psSDInt, psTLWriteDataIN->ui32Size, ui8DataInt); ++ ++TLWriteData_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSDInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSD, PVRSRV_HANDLE_TYPE_PVR_TL_SD); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psTLWriteDataOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitPVRTLBridge(void); ++void DeinitPVRTLBridge(void); ++ ++/* ++ * Register all PVRTL functions with services ++ */ ++PVRSRV_ERROR InitPVRTLBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, ++ PVRSRVBridgeTLOpenStream, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, ++ PVRSRVBridgeTLCloseStream, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, ++ PVRSRVBridgeTLAcquireData, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, ++ PVRSRVBridgeTLReleaseData, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, ++ PVRSRVBridgeTLDiscoverStreams, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, ++ PVRSRVBridgeTLReserveStream, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, ++ PVRSRVBridgeTLCommitStream, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, ++ PVRSRVBridgeTLWriteData, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all pvrtl functions with services ++ */ ++void DeinitPVRTLBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c +new file mode 100644 +index 000000000000..bb7d0120425e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxbreakpoint_bridge.c +@@ -0,0 +1,370 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxbreakpoint ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxbreakpoint ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxbreakpoint.h" ++ ++#include "common_rgxbreakpoint_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetBreakpointIN_UI8, ++ IMG_UINT8 * psRGXSetBreakpointOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN = ++ (PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *) IMG_OFFSET_ADDR(psRGXSetBreakpointOUT_UI8, 0); ++ ++ IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetBreakpointOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXSetBreakpointOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetBreakpoint_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetBreakpointOUT->eError = ++ PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevNode(psConnection), ++ hPrivDataInt, ++ psRGXSetBreakpointIN->eFWDataMaster, ++ psRGXSetBreakpointIN->ui32BreakpointAddr, ++ psRGXSetBreakpointIN->ui32HandlerAddr, ++ psRGXSetBreakpointIN->ui32DM); ++ ++RGXSetBreakpoint_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXClearBreakpointIN_UI8, ++ IMG_UINT8 * psRGXClearBreakpointOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN = ++ (PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *) IMG_OFFSET_ADDR(psRGXClearBreakpointOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXClearBreakpointOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXClearBreakpointOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXClearBreakpoint_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXClearBreakpointOUT->eError = ++ PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); ++ ++RGXClearBreakpoint_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXEnableBreakpointIN_UI8, ++ IMG_UINT8 * psRGXEnableBreakpointOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN = ++ (PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT = ++ (PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXEnableBreakpointOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXEnableBreakpointOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXEnableBreakpoint_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXEnableBreakpointOUT->eError = ++ PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); ++ ++RGXEnableBreakpoint_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDisableBreakpointIN_UI8, ++ IMG_UINT8 * psRGXDisableBreakpointOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN = ++ (PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *) IMG_OFFSET_ADDR(psRGXDisableBreakpointIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *) ++ IMG_OFFSET_ADDR(psRGXDisableBreakpointOUT_UI8, 0); ++ ++ IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXDisableBreakpointOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDisableBreakpoint_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXDisableBreakpointOUT->eError = ++ PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevNode(psConnection), hPrivDataInt); ++ ++RGXDisableBreakpoint_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXOverallocateBPRegistersIN_UI8, ++ IMG_UINT8 * psRGXOverallocateBPRegistersOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN = ++ (PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *) ++ IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT = ++ (PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *) ++ IMG_OFFSET_ADDR(psRGXOverallocateBPRegistersOUT_UI8, 0); ++ ++ psRGXOverallocateBPRegistersOUT->eError = ++ PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevNode(psConnection), ++ psRGXOverallocateBPRegistersIN->ui32TempRegs, ++ psRGXOverallocateBPRegistersIN->ui32SharedRegs); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ ++ ++#if !defined(EXCLUDE_RGXBREAKPOINT_BRIDGE) ++PVRSRV_ERROR InitRGXBREAKPOINTBridge(void); ++void DeinitRGXBREAKPOINTBridge(void); ++ ++/* ++ * Register all RGXBREAKPOINT functions with services ++ */ ++PVRSRV_ERROR InitRGXBREAKPOINTBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT, ++ PVRSRVBridgeRGXSetBreakpoint, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT, ++ PVRSRVBridgeRGXClearBreakpoint, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT, ++ PVRSRVBridgeRGXEnableBreakpoint, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT, ++ PVRSRVBridgeRGXDisableBreakpoint, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS, ++ PVRSRVBridgeRGXOverallocateBPRegisters, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxbreakpoint functions with services ++ */ ++void DeinitRGXBREAKPOINTBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXSETBREAKPOINT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXCLEARBREAKPOINT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXENABLEBREAKPOINT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXDISABLEBREAKPOINT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXBREAKPOINT, ++ PVRSRV_BRIDGE_RGXBREAKPOINT_RGXOVERALLOCATEBPREGISTERS); ++ ++} ++#else /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ ++/* This bridge is conditional on EXCLUDE_RGXBREAKPOINT_BRIDGE - when defined, ++ * do not populate the dispatch table with its functions ++ */ ++#define InitRGXBREAKPOINTBridge() \ ++ PVRSRV_OK ++ ++#define DeinitRGXBREAKPOINTBridge() ++ ++#endif /* EXCLUDE_RGXBREAKPOINT_BRIDGE */ +diff --git a/drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c +new file mode 100644 +index 000000000000..9b97e78e1590 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxcmp_bridge.c +@@ -0,0 +1,1171 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxcmp ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxcmp ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxcompute.h" ++ ++#include "common_rgxcmp_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#include "rgx_bvnc_defs_km.h" ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _RGXCreateComputeContextpsComputeContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXDestroyComputeContextKM((RGX_SERVER_COMPUTE_CONTEXT *) pvData); ++ return eError; ++} ++ ++static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_STATIC_COMPUTECONTEXT_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_STATIC_COMPUTECONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateComputeContextIN_UI8, ++ IMG_UINT8 * psRGXCreateComputeContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateComputeContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateComputeContextOUT_UI8, 0); ++ ++ IMG_BYTE *ui8FrameworkCmdInt = NULL; ++ IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ IMG_BYTE *ui8StaticComputeContextStateInt = NULL; ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) + ++ ((IMG_UINT64) psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * ++ sizeof(IMG_BYTE)) + 0; ++ ++ if (unlikely(psRGXCreateComputeContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE)) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXCreateComputeContext_exit; ++ } ++ ++ if (unlikely ++ (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize > ++ RGXFWIF_STATIC_COMPUTECONTEXT_SIZE)) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXCreateComputeContext_exit; ++ } ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXCreateComputeContext_exit; ++ } ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXCreateComputeContext_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateComputeContextIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXCreateComputeContext_exit; ++ } ++ } ++ } ++ ++ if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0) ++ { ++ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8FrameworkCmdInt, ++ (const void __user *)psRGXCreateComputeContextIN->pui8FrameworkCmd, ++ psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != ++ PVRSRV_OK) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateComputeContext_exit; ++ } ++ } ++ if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize != 0) ++ { ++ ui8StaticComputeContextStateInt = ++ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * ++ sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8StaticComputeContextStateInt, ++ (const void __user *)psRGXCreateComputeContextIN-> ++ pui8StaticComputeContextState, ++ psRGXCreateComputeContextIN->ui32StaticComputeContextStateSize * ++ sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateComputeContext_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXCreateComputeContextOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateComputeContext_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateComputeContextOUT->eError = ++ PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevNode(psConnection), ++ psRGXCreateComputeContextIN->ui32Priority, ++ psRGXCreateComputeContextIN->ui32FrameworkCmdize, ++ ui8FrameworkCmdInt, ++ hPrivDataInt, ++ psRGXCreateComputeContextIN-> ++ ui32StaticComputeContextStateSize, ++ ui8StaticComputeContextStateInt, ++ psRGXCreateComputeContextIN->ui32PackedCCBSizeU88, ++ psRGXCreateComputeContextIN->ui32ContextFlags, ++ psRGXCreateComputeContextIN->ui64RobustnessAddress, ++ psRGXCreateComputeContextIN->ui32MaxDeadlineMS, ++ &psComputeContextInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateComputeContext_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateComputeContextOUT-> ++ hComputeContext, ++ (void *) ++ psComputeContextInt, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateComputeContextpsComputeContextIntRelease); ++ if (unlikely(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateComputeContext_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXCreateComputeContext_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK) ++ { ++ if (psComputeContextInt) ++ { ++ PVRSRVRGXDestroyComputeContextKM(psComputeContextInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXCreateComputeContextOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyComputeContextIN_UI8, ++ IMG_UINT8 * psRGXDestroyComputeContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyComputeContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyComputeContextOUT_UI8, 0); ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXDestroyComputeContext_exit; ++ } ++ } ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyComputeContextOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyComputeContextIN-> ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); ++ if (unlikely ++ ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) ++ && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyComputeContextOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyComputeContext_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyComputeContext_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFlushComputeDataIN_UI8, ++ IMG_UINT8 * psRGXFlushComputeDataOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN = ++ (PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *) IMG_OFFSET_ADDR(psRGXFlushComputeDataOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext; ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXFlushComputeData_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXFlushComputeDataOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psComputeContextInt, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXFlushComputeData_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXFlushComputeDataOUT->eError = PVRSRVRGXFlushComputeDataKM(psComputeContextInt); ++ ++RGXFlushComputeData_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psComputeContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetComputeContextPriorityIN_UI8, ++ IMG_UINT8 * psRGXSetComputeContextPriorityOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN = ++ (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXSetComputeContextPriorityOUT_UI8, 0); ++ ++ IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext; ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXSetComputeContextPriority_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetComputeContextPriorityOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psComputeContextInt, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetComputeContextPriority_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetComputeContextPriorityOUT->eError = ++ PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevNode(psConnection), ++ psComputeContextInt, ++ psRGXSetComputeContextPriorityIN->ui32Priority); ++ ++RGXSetComputeContextPriority_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psComputeContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXNotifyComputeWriteOffsetUpdateIN_UI8, ++ IMG_UINT8 * ++ psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN = ++ (PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) ++ IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT ++ = ++ (PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *) ++ IMG_OFFSET_ADDR(psRGXNotifyComputeWriteOffsetUpdateOUT_UI8, 0); ++ ++ IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext; ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXNotifyComputeWriteOffsetUpdate_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psComputeContextInt, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXNotifyComputeWriteOffsetUpdate_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = ++ PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(psComputeContextInt); ++ ++RGXNotifyComputeWriteOffsetUpdate_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psComputeContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXKickCDM2(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXKickCDM2IN_UI8, ++ IMG_UINT8 * psRGXKickCDM2OUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXKICKCDM2 *psRGXKickCDM2IN = ++ (PVRSRV_BRIDGE_IN_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2IN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *psRGXKickCDM2OUT = ++ (PVRSRV_BRIDGE_OUT_RGXKICKCDM2 *) IMG_OFFSET_ADDR(psRGXKickCDM2OUT_UI8, 0); ++ ++ IMG_HANDLE hComputeContext = psRGXKickCDM2IN->hComputeContext; ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; ++ SYNC_PRIMITIVE_BLOCK **psClientUpdateUFOSyncPrimBlockInt = NULL; ++ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL; ++ IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL; ++ IMG_UINT32 *ui32ClientUpdateValueInt = NULL; ++ IMG_CHAR *uiUpdateFenceNameInt = NULL; ++ IMG_BYTE *ui8DMCmdInt = NULL; ++ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; ++ PMR **psSyncPMRsInt = NULL; ++ IMG_HANDLE *hSyncPMRsInt2 = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)) + ++ ((IMG_UINT64) psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; ++ ++ if (unlikely(psRGXKickCDM2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickCDM2_exit; ++ } ++ ++ if (unlikely(psRGXKickCDM2IN->ui32CmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickCDM2_exit; ++ } ++ ++ if (unlikely(psRGXKickCDM2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickCDM2_exit; ++ } ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXKickCDM2_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXKickCDM2IN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickCDM2IN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXKickCDM2_exit; ++ } ++ } ++ } ++ ++ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) ++ { ++ psClientUpdateUFOSyncPrimBlockInt = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psClientUpdateUFOSyncPrimBlockInt, 0, ++ psRGXKickCDM2IN->ui32ClientUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset += ++ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ hClientUpdateUFOSyncPrimBlockInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hClientUpdateUFOSyncPrimBlockInt2, ++ (const void __user *)psRGXKickCDM2IN->phClientUpdateUFOSyncPrimBlock, ++ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) ++ { ++ ui32ClientUpdateOffsetInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientUpdateOffsetInt, ++ (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateOffset, ++ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ if (psRGXKickCDM2IN->ui32ClientUpdateCount != 0) ++ { ++ ui32ClientUpdateValueInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientUpdateValueInt, ++ (const void __user *)psRGXKickCDM2IN->pui32ClientUpdateValue, ++ psRGXKickCDM2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ ++ { ++ uiUpdateFenceNameInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiUpdateFenceNameInt, ++ (const void __user *)psRGXKickCDM2IN->puiUpdateFenceName, ++ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ if (psRGXKickCDM2IN->ui32CmdSize != 0) ++ { ++ ui8DMCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8DMCmdInt, (const void __user *)psRGXKickCDM2IN->pui8DMCmd, ++ psRGXKickCDM2IN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) ++ { ++ ui32SyncPMRFlagsInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32SyncPMRFlagsInt, ++ (const void __user *)psRGXKickCDM2IN->pui32SyncPMRFlags, ++ psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ if (psRGXKickCDM2IN->ui32SyncPMRCount != 0) ++ { ++ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psSyncPMRsInt, 0, psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *)); ++ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(PMR *); ++ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickCDM2IN->phSyncPMRs, ++ psRGXKickCDM2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickCDM2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickCDM2_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXKickCDM2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psComputeContextInt, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickCDM2_exit; ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickCDM2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **) ++ &psClientUpdateUFOSyncPrimBlockInt[i], ++ hClientUpdateUFOSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickCDM2_exit; ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickCDM2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncPMRsInt[i], ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRGXKickCDM2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickCDM2_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXKickCDM2OUT->eError = ++ PVRSRVRGXKickCDMKM(psComputeContextInt, ++ psRGXKickCDM2IN->ui32ClientUpdateCount, ++ psClientUpdateUFOSyncPrimBlockInt, ++ ui32ClientUpdateOffsetInt, ++ ui32ClientUpdateValueInt, ++ psRGXKickCDM2IN->hCheckFenceFd, ++ psRGXKickCDM2IN->hUpdateTimeline, ++ &psRGXKickCDM2OUT->hUpdateFence, ++ uiUpdateFenceNameInt, ++ psRGXKickCDM2IN->ui32CmdSize, ++ ui8DMCmdInt, ++ psRGXKickCDM2IN->ui32PDumpFlags, ++ psRGXKickCDM2IN->ui32ExtJobRef, ++ psRGXKickCDM2IN->ui32SyncPMRCount, ++ ui32SyncPMRFlagsInt, ++ psSyncPMRsInt, ++ psRGXKickCDM2IN->ui32NumOfWorkgroups, ++ psRGXKickCDM2IN->ui32NumOfWorkitems, ++ psRGXKickCDM2IN->ui64DeadlineInus); ++ ++RGXKickCDM2_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psComputeContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); ++ } ++ ++ if (hClientUpdateUFOSyncPrimBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickCDM2IN->ui32ClientUpdateCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psClientUpdateUFOSyncPrimBlockInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hClientUpdateUFOSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ ++ if (hSyncPMRsInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickCDM2IN->ui32SyncPMRCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncPMRsInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXKickCDM2OUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetComputeContextProperty(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetComputeContextPropertyIN_UI8, ++ IMG_UINT8 * psRGXSetComputeContextPropertyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyIN = ++ (PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *psRGXSetComputeContextPropertyOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetComputeContextPropertyOUT_UI8, 0); ++ ++ IMG_HANDLE hComputeContext = psRGXSetComputeContextPropertyIN->hComputeContext; ++ RGX_SERVER_COMPUTE_CONTEXT *psComputeContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXSetComputeContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXSetComputeContextProperty_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetComputeContextPropertyOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psComputeContextInt, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetComputeContextPropertyOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetComputeContextProperty_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetComputeContextPropertyOUT->eError = ++ PVRSRVRGXSetComputeContextPropertyKM(psComputeContextInt, ++ psRGXSetComputeContextPropertyIN->ui32Property, ++ psRGXSetComputeContextPropertyIN->ui64Input, ++ &psRGXSetComputeContextPropertyOUT->ui64Output); ++ ++RGXSetComputeContextProperty_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psComputeContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hComputeContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXGetLastDeviceError(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXGetLastDeviceErrorIN_UI8, ++ IMG_UINT8 * psRGXGetLastDeviceErrorOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorIN = ++ (PVRSRV_BRIDGE_IN_RGXGETLASTDEVICEERROR *) ++ IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *psRGXGetLastDeviceErrorOUT = ++ (PVRSRV_BRIDGE_OUT_RGXGETLASTDEVICEERROR *) ++ IMG_OFFSET_ADDR(psRGXGetLastDeviceErrorOUT_UI8, 0); ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_COMPUTE_BIT_MASK)) ++ { ++ psRGXGetLastDeviceErrorOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXGetLastDeviceError_exit; ++ } ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXGetLastDeviceErrorIN); ++ ++ psRGXGetLastDeviceErrorOUT->eError = ++ PVRSRVRGXGetLastDeviceErrorKM(psConnection, OSGetDevNode(psConnection), ++ &psRGXGetLastDeviceErrorOUT->ui32Error); ++ ++RGXGetLastDeviceError_exit: ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXCMPBridge(void); ++void DeinitRGXCMPBridge(void); ++ ++/* ++ * Register all RGXCMP functions with services ++ */ ++PVRSRV_ERROR InitRGXCMPBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, ++ PVRSRVBridgeRGXCreateComputeContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, ++ PVRSRVBridgeRGXDestroyComputeContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, ++ PVRSRVBridgeRGXFlushComputeData, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, ++ PVRSRVBridgeRGXSetComputeContextPriority, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, ++ PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2, ++ PVRSRVBridgeRGXKickCDM2, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY, ++ PVRSRVBridgeRGXSetComputeContextProperty, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR, ++ PVRSRVBridgeRGXGetLastDeviceError, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxcmp functions with services ++ */ ++void DeinitRGXCMPBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM2); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, ++ PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPROPERTY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTDEVICEERROR); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c +new file mode 100644 +index 000000000000..e66ce89c6eb9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxfwdbg_bridge.c +@@ -0,0 +1,305 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxfwdbg ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxfwdbg ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "devicemem_server.h" ++#include "rgxfwdbg.h" ++#include "pmr.h" ++#include "rgxtimecorr.h" ++ ++#include "common_rgxfwdbg_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugSetFWLog(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugSetFWLogIN_UI8, ++ IMG_UINT8 * psRGXFWDebugSetFWLogOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *psRGXFWDebugSetFWLogOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETFWLOG *) IMG_OFFSET_ADDR(psRGXFWDebugSetFWLogOUT_UI8, ++ 0); ++ ++ psRGXFWDebugSetFWLogOUT->eError = ++ PVRSRVRGXFWDebugSetFWLogKM(psConnection, OSGetDevNode(psConnection), ++ psRGXFWDebugSetFWLogIN->ui32RGXFWLogType); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugDumpFreelistPageListIN_UI8, ++ IMG_UINT8 * psRGXFWDebugDumpFreelistPageListOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGDUMPFREELISTPAGELIST *) ++ IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *psRGXFWDebugDumpFreelistPageListOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGDUMPFREELISTPAGELIST *) ++ IMG_OFFSET_ADDR(psRGXFWDebugDumpFreelistPageListOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXFWDebugDumpFreelistPageListIN); ++ ++ psRGXFWDebugDumpFreelistPageListOUT->eError = ++ PVRSRVRGXFWDebugDumpFreelistPageListKM(psConnection, OSGetDevNode(psConnection)); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugSetHCSDeadlineIN_UI8, ++ IMG_UINT8 * psRGXFWDebugSetHCSDeadlineOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETHCSDEADLINE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *psRGXFWDebugSetHCSDeadlineOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETHCSDEADLINE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugSetHCSDeadlineOUT_UI8, 0); ++ ++ psRGXFWDebugSetHCSDeadlineOUT->eError = ++ PVRSRVRGXFWDebugSetHCSDeadlineKM(psConnection, OSGetDevNode(psConnection), ++ psRGXFWDebugSetHCSDeadlineIN->ui32RGXHCSDeadline); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugSetOSidPriorityIN_UI8, ++ IMG_UINT8 * psRGXFWDebugSetOSidPriorityOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSIDPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *psRGXFWDebugSetOSidPriorityOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSIDPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXFWDebugSetOSidPriorityOUT_UI8, 0); ++ ++ psRGXFWDebugSetOSidPriorityOUT->eError = ++ PVRSRVRGXFWDebugSetOSidPriorityKM(psConnection, OSGetDevNode(psConnection), ++ psRGXFWDebugSetOSidPriorityIN->ui32OSid, ++ psRGXFWDebugSetOSidPriorityIN->ui32Priority); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateIN_UI8, ++ IMG_UINT8 * psRGXFWDebugSetOSNewOnlineStateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGSETOSNEWONLINESTATE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *psRGXFWDebugSetOSNewOnlineStateOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGSETOSNEWONLINESTATE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugSetOSNewOnlineStateOUT_UI8, 0); ++ ++ psRGXFWDebugSetOSNewOnlineStateOUT->eError = ++ PVRSRVRGXFWDebugSetOSNewOnlineStateKM(psConnection, OSGetDevNode(psConnection), ++ psRGXFWDebugSetOSNewOnlineStateIN->ui32OSid, ++ psRGXFWDebugSetOSNewOnlineStateIN-> ++ ui32OSNewState); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugPHRConfigure(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugPHRConfigureIN_UI8, ++ IMG_UINT8 * psRGXFWDebugPHRConfigureOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGPHRCONFIGURE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *psRGXFWDebugPHRConfigureOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGPHRCONFIGURE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugPHRConfigureOUT_UI8, 0); ++ ++ psRGXFWDebugPHRConfigureOUT->eError = ++ PVRSRVRGXFWDebugPHRConfigureKM(psConnection, OSGetDevNode(psConnection), ++ psRGXFWDebugPHRConfigureIN->ui32ui32PHRMode); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXFWDebugWdgConfigure(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXFWDebugWdgConfigureIN_UI8, ++ IMG_UINT8 * psRGXFWDebugWdgConfigureOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureIN = ++ (PVRSRV_BRIDGE_IN_RGXFWDEBUGWDGCONFIGURE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *psRGXFWDebugWdgConfigureOUT = ++ (PVRSRV_BRIDGE_OUT_RGXFWDEBUGWDGCONFIGURE *) ++ IMG_OFFSET_ADDR(psRGXFWDebugWdgConfigureOUT_UI8, 0); ++ ++ psRGXFWDebugWdgConfigureOUT->eError = ++ PVRSRVRGXFWDebugWdgConfigureKM(psConnection, OSGetDevNode(psConnection), ++ psRGXFWDebugWdgConfigureIN->ui32ui32WdgPeriodUs); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCurrentTimeIN_UI8, ++ IMG_UINT8 * psRGXCurrentTimeOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN = ++ (PVRSRV_BRIDGE_IN_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *) IMG_OFFSET_ADDR(psRGXCurrentTimeOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN); ++ ++ psRGXCurrentTimeOUT->eError = ++ PVRSRVRGXCurrentTime(psConnection, OSGetDevNode(psConnection), ++ &psRGXCurrentTimeOUT->ui64Time); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXFWDBGBridge(void); ++void DeinitRGXFWDBGBridge(void); ++ ++/* ++ * Register all RGXFWDBG functions with services ++ */ ++PVRSRV_ERROR InitRGXFWDBGBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG, ++ PVRSRVBridgeRGXFWDebugSetFWLog, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST, ++ PVRSRVBridgeRGXFWDebugDumpFreelistPageList, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE, ++ PVRSRVBridgeRGXFWDebugSetHCSDeadline, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY, ++ PVRSRVBridgeRGXFWDebugSetOSidPriority, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE, ++ PVRSRVBridgeRGXFWDebugSetOSNewOnlineState, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE, ++ PVRSRVBridgeRGXFWDebugPHRConfigure, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE, ++ PVRSRVBridgeRGXFWDebugWdgConfigure, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME, ++ PVRSRVBridgeRGXCurrentTime, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxfwdbg functions with services ++ */ ++void DeinitRGXFWDBGBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETFWLOG); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGDUMPFREELISTPAGELIST); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETHCSDEADLINE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSIDPRIORITY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGSETOSNEWONLINESTATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGPHRCONFIGURE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, ++ PVRSRV_BRIDGE_RGXFWDBG_RGXFWDEBUGWDGCONFIGURE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXFWDBG, PVRSRV_BRIDGE_RGXFWDBG_RGXCURRENTTIME); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c +new file mode 100644 +index 000000000000..cc22ee30ecf3 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxhwperf_bridge.c +@@ -0,0 +1,651 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxhwperf ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxhwperf ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxhwperf.h" ++#include "rgx_fwif_km.h" ++ ++#include "common_rgxhwperf_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCtrlHWPerfIN_UI8, ++ IMG_UINT8 * psRGXCtrlHWPerfOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN = ++ (PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *) IMG_OFFSET_ADDR(psRGXCtrlHWPerfOUT_UI8, 0); ++ ++ psRGXCtrlHWPerfOUT->eError = ++ PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevNode(psConnection), ++ psRGXCtrlHWPerfIN->ui32StreamId, ++ psRGXCtrlHWPerfIN->bToggle, psRGXCtrlHWPerfIN->ui64Mask); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsIN_UI8, ++ IMG_UINT8 * psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsIN = ++ (PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *) ++ IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsOUT = ++ (PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *) ++ IMG_OFFSET_ADDR(psRGXGetHWPerfBvncFeatureFlagsOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN); ++ ++ psRGXGetHWPerfBvncFeatureFlagsOUT->eError = ++ PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, OSGetDevNode(psConnection), ++ &psRGXGetHWPerfBvncFeatureFlagsOUT->sBVNC); ++ ++ return 0; ++} ++ ++static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, ++ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXConfigMuxHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXConfigMuxHWPerfCountersIN_UI8, ++ IMG_UINT8 * psRGXConfigMuxHWPerfCountersOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersIN = ++ (PVRSRV_BRIDGE_IN_RGXCONFIGMUXHWPERFCOUNTERS *) ++ IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *psRGXConfigMuxHWPerfCountersOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCONFIGMUXHWPERFCOUNTERS *) ++ IMG_OFFSET_ADDR(psRGXConfigMuxHWPerfCountersOUT_UI8, 0); ++ ++ RGX_HWPERF_CONFIG_MUX_CNTBLK *psBlockConfigsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * ++ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) + 0; ++ ++ if (unlikely(psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) ++ { ++ psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXConfigMuxHWPerfCounters_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXConfigMuxHWPerfCounters_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXConfigMuxHWPerfCountersIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = ++ (IMG_BYTE *) (void *)psRGXConfigMuxHWPerfCountersIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXConfigMuxHWPerfCountersOUT->eError = ++ PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXConfigMuxHWPerfCounters_exit; ++ } ++ } ++ } ++ ++ if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen != 0) ++ { ++ psBlockConfigsInt = ++ (RGX_HWPERF_CONFIG_MUX_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ++ ui32NextOffset); ++ ui32NextOffset += ++ psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * ++ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, psBlockConfigsInt, ++ (const void __user *)psRGXConfigMuxHWPerfCountersIN->psBlockConfigs, ++ psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen * ++ sizeof(RGX_HWPERF_CONFIG_MUX_CNTBLK)) != PVRSRV_OK) ++ { ++ psRGXConfigMuxHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXConfigMuxHWPerfCounters_exit; ++ } ++ } ++ ++ psRGXConfigMuxHWPerfCountersOUT->eError = ++ PVRSRVRGXConfigMuxHWPerfCountersKM(psConnection, OSGetDevNode(psConnection), ++ psRGXConfigMuxHWPerfCountersIN->ui32ArrayLen, ++ psBlockConfigsInt); ++ ++RGXConfigMuxHWPerfCounters_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXConfigMuxHWPerfCountersOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, ++ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXControlHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXControlHWPerfBlocksIN_UI8, ++ IMG_UINT8 * psRGXControlHWPerfBlocksOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksIN = ++ (PVRSRV_BRIDGE_IN_RGXCONTROLHWPERFBLOCKS *) ++ IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *psRGXControlHWPerfBlocksOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCONTROLHWPERFBLOCKS *) ++ IMG_OFFSET_ADDR(psRGXControlHWPerfBlocksOUT_UI8, 0); ++ ++ IMG_UINT16 *ui16BlockIDsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) + 0; ++ ++ if (unlikely(psRGXControlHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) ++ { ++ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXControlHWPerfBlocks_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXControlHWPerfBlocks_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXControlHWPerfBlocksIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXControlHWPerfBlocksIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXControlHWPerfBlocks_exit; ++ } ++ } ++ } ++ ++ if (psRGXControlHWPerfBlocksIN->ui32ArrayLen != 0) ++ { ++ ui16BlockIDsInt = (IMG_UINT16 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui16BlockIDsInt, ++ (const void __user *)psRGXControlHWPerfBlocksIN->pui16BlockIDs, ++ psRGXControlHWPerfBlocksIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK) ++ { ++ psRGXControlHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXControlHWPerfBlocks_exit; ++ } ++ } ++ ++ psRGXControlHWPerfBlocksOUT->eError = ++ PVRSRVRGXControlHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), ++ psRGXControlHWPerfBlocksIN->bEnable, ++ psRGXControlHWPerfBlocksIN->ui32ArrayLen, ++ ui16BlockIDsInt); ++ ++RGXControlHWPerfBlocks_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXControlHWPerfBlocksOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(RGX_HWPERF_MAX_CUSTOM_CNTRS <= IMG_UINT32_MAX, ++ "RGX_HWPERF_MAX_CUSTOM_CNTRS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXConfigCustomCountersIN_UI8, ++ IMG_UINT8 * psRGXConfigCustomCountersOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN = ++ (PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *) ++ IMG_OFFSET_ADDR(psRGXConfigCustomCountersIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *) ++ IMG_OFFSET_ADDR(psRGXConfigCustomCountersOUT_UI8, 0); ++ ++ IMG_UINT32 *ui32CustomCounterIDsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) + ++ 0; ++ ++ if (unlikely ++ (psRGXConfigCustomCountersIN->ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)) ++ { ++ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXConfigCustomCounters_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXConfigCustomCounters_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigCustomCountersIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXConfigCustomCounters_exit; ++ } ++ } ++ } ++ ++ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0) ++ { ++ ui32CustomCounterIDsInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32CustomCounterIDsInt, ++ (const void __user *)psRGXConfigCustomCountersIN->pui32CustomCounterIDs, ++ psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXConfigCustomCounters_exit; ++ } ++ } ++ ++ psRGXConfigCustomCountersOUT->eError = ++ PVRSRVRGXConfigCustomCountersKM(psConnection, OSGetDevNode(psConnection), ++ psRGXConfigCustomCountersIN->ui16CustomBlockID, ++ psRGXConfigCustomCountersIN->ui16NumCustomCounters, ++ ui32CustomCounterIDsInt); ++ ++RGXConfigCustomCounters_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXConfigCustomCountersOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static_assert(RGXFWIF_HWPERF_CTRL_BLKS_MAX <= IMG_UINT32_MAX, ++ "RGXFWIF_HWPERF_CTRL_BLKS_MAX must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXConfigureHWPerfBlocks(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXConfigureHWPerfBlocksIN_UI8, ++ IMG_UINT8 * psRGXConfigureHWPerfBlocksOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksIN = ++ (PVRSRV_BRIDGE_IN_RGXCONFIGUREHWPERFBLOCKS *) ++ IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *psRGXConfigureHWPerfBlocksOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCONFIGUREHWPERFBLOCKS *) ++ IMG_OFFSET_ADDR(psRGXConfigureHWPerfBlocksOUT_UI8, 0); ++ ++ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * ++ sizeof(RGX_HWPERF_CONFIG_CNTBLK)) + 0; ++ ++ if (unlikely(psRGXConfigureHWPerfBlocksIN->ui32ArrayLen > RGXFWIF_HWPERF_CTRL_BLKS_MAX)) ++ { ++ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXConfigureHWPerfBlocks_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXConfigureHWPerfBlocks_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXConfigureHWPerfBlocksIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXConfigureHWPerfBlocksIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXConfigureHWPerfBlocks_exit; ++ } ++ } ++ } ++ ++ if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen != 0) ++ { ++ psBlockConfigsInt = ++ (RGX_HWPERF_CONFIG_CNTBLK *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, psBlockConfigsInt, ++ (const void __user *)psRGXConfigureHWPerfBlocksIN->psBlockConfigs, ++ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen * ++ sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK) ++ { ++ psRGXConfigureHWPerfBlocksOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXConfigureHWPerfBlocks_exit; ++ } ++ } ++ ++ psRGXConfigureHWPerfBlocksOUT->eError = ++ PVRSRVRGXConfigureHWPerfBlocksKM(psConnection, OSGetDevNode(psConnection), ++ psRGXConfigureHWPerfBlocksIN->ui32CtrlWord, ++ psRGXConfigureHWPerfBlocksIN->ui32ArrayLen, ++ psBlockConfigsInt); ++ ++RGXConfigureHWPerfBlocks_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXConfigureHWPerfBlocksOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXHWPERFBridge(void); ++void DeinitRGXHWPERFBridge(void); ++ ++/* ++ * Register all RGXHWPERF functions with services ++ */ ++PVRSRV_ERROR InitRGXHWPERFBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, ++ PVRSRVBridgeRGXCtrlHWPerf, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, ++ PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS, ++ PVRSRVBridgeRGXConfigMuxHWPerfCounters, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS, ++ PVRSRVBridgeRGXControlHWPerfBlocks, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, ++ PVRSRVBridgeRGXConfigCustomCounters, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS, ++ PVRSRVBridgeRGXConfigureHWPerfBlocks, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxhwperf functions with services ++ */ ++void DeinitRGXHWPERFBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGMUXHWPERFCOUNTERS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONTROLHWPERFBLOCKS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, ++ PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGUREHWPERFBLOCKS); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c +new file mode 100644 +index 000000000000..25f68f302280 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxkicksync_bridge.c +@@ -0,0 +1,579 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxkicksync ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxkicksync ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxkicksync.h" ++ ++#include "common_rgxkicksync_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _RGXCreateKickSyncContextpsKickSyncContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXDestroyKickSyncContextKM((RGX_SERVER_KICKSYNC_CONTEXT *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateKickSyncContextIN_UI8, ++ IMG_UINT8 * psRGXCreateKickSyncContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateKickSyncContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateKickSyncContextOUT_UI8, 0); ++ ++ IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXCreateKickSyncContextOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateKickSyncContext_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateKickSyncContextOUT->eError = ++ PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevNode(psConnection), ++ hPrivDataInt, ++ psRGXCreateKickSyncContextIN->ui32PackedCCBSizeU88, ++ psRGXCreateKickSyncContextIN->ui32ContextFlags, ++ &psKickSyncContextInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateKickSyncContext_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateKickSyncContextOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateKickSyncContextOUT->hKickSyncContext, ++ (void *)psKickSyncContextInt, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateKickSyncContextpsKickSyncContextIntRelease); ++ if (unlikely(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateKickSyncContext_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXCreateKickSyncContext_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK) ++ { ++ if (psKickSyncContextInt) ++ { ++ PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyKickSyncContextIN_UI8, ++ IMG_UINT8 * psRGXDestroyKickSyncContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyKickSyncContextOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyKickSyncContextOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyKickSyncContextIN-> ++ hKickSyncContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); ++ if (unlikely ++ ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) ++ && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyKickSyncContextOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyKickSyncContext_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyKickSyncContext_exit: ++ ++ return 0; ++} ++ ++static_assert(PVRSRV_MAX_DEV_VARS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_DEV_VARS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXKickSync2(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXKickSync2IN_UI8, ++ IMG_UINT8 * psRGXKickSync2OUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *psRGXKickSync2IN = ++ (PVRSRV_BRIDGE_IN_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2IN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *psRGXKickSync2OUT = ++ (PVRSRV_BRIDGE_OUT_RGXKICKSYNC2 *) IMG_OFFSET_ADDR(psRGXKickSync2OUT_UI8, 0); ++ ++ IMG_HANDLE hKickSyncContext = psRGXKickSync2IN->hKickSyncContext; ++ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; ++ SYNC_PRIMITIVE_BLOCK **psUpdateUFODevVarBlockInt = NULL; ++ IMG_HANDLE *hUpdateUFODevVarBlockInt2 = NULL; ++ IMG_UINT32 *ui32UpdateDevVarOffsetInt = NULL; ++ IMG_UINT32 *ui32UpdateValueInt = NULL; ++ IMG_CHAR *uiUpdateFenceNameInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)) + ++ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psRGXKickSync2IN->ui32ClientUpdateCount > PVRSRV_MAX_DEV_VARS)) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickSync2_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXKickSync2_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXKickSync2IN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickSync2IN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXKickSync2_exit; ++ } ++ } ++ } ++ ++ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) ++ { ++ psUpdateUFODevVarBlockInt = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psUpdateUFODevVarBlockInt, 0, ++ psRGXKickSync2IN->ui32ClientUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset += ++ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ hUpdateUFODevVarBlockInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hUpdateUFODevVarBlockInt2, ++ (const void __user *)psRGXKickSync2IN->phUpdateUFODevVarBlock, ++ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickSync2_exit; ++ } ++ } ++ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) ++ { ++ ui32UpdateDevVarOffsetInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32UpdateDevVarOffsetInt, ++ (const void __user *)psRGXKickSync2IN->pui32UpdateDevVarOffset, ++ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickSync2_exit; ++ } ++ } ++ if (psRGXKickSync2IN->ui32ClientUpdateCount != 0) ++ { ++ ui32UpdateValueInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32UpdateValueInt, ++ (const void __user *)psRGXKickSync2IN->pui32UpdateValue, ++ psRGXKickSync2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickSync2_exit; ++ } ++ } ++ ++ { ++ uiUpdateFenceNameInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiUpdateFenceNameInt, ++ (const void __user *)psRGXKickSync2IN->puiUpdateFenceName, ++ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRGXKickSync2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickSync2_exit; ++ } ++ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXKickSync2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psKickSyncContextInt, ++ hKickSyncContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickSync2_exit; ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickSync2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psUpdateUFODevVarBlockInt[i], ++ hUpdateUFODevVarBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXKickSync2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickSync2_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXKickSync2OUT->eError = ++ PVRSRVRGXKickSyncKM(psKickSyncContextInt, ++ psRGXKickSync2IN->ui32ClientUpdateCount, ++ psUpdateUFODevVarBlockInt, ++ ui32UpdateDevVarOffsetInt, ++ ui32UpdateValueInt, ++ psRGXKickSync2IN->hCheckFenceFD, ++ psRGXKickSync2IN->hTimelineFenceFD, ++ &psRGXKickSync2OUT->hUpdateFenceFD, ++ uiUpdateFenceNameInt, psRGXKickSync2IN->ui32ExtJobRef); ++ ++RGXKickSync2_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psKickSyncContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hKickSyncContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); ++ } ++ ++ if (hUpdateUFODevVarBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickSync2IN->ui32ClientUpdateCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psUpdateUFODevVarBlockInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hUpdateUFODevVarBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXKickSync2OUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetKickSyncContextProperty(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetKickSyncContextPropertyIN_UI8, ++ IMG_UINT8 * psRGXSetKickSyncContextPropertyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyIN = ++ (PVRSRV_BRIDGE_IN_RGXSETKICKSYNCCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *psRGXSetKickSyncContextPropertyOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETKICKSYNCCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetKickSyncContextPropertyOUT_UI8, 0); ++ ++ IMG_HANDLE hKickSyncContext = psRGXSetKickSyncContextPropertyIN->hKickSyncContext; ++ RGX_SERVER_KICKSYNC_CONTEXT *psKickSyncContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetKickSyncContextPropertyOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psKickSyncContextInt, ++ hKickSyncContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetKickSyncContextPropertyOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetKickSyncContextProperty_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetKickSyncContextPropertyOUT->eError = ++ PVRSRVRGXSetKickSyncContextPropertyKM(psKickSyncContextInt, ++ psRGXSetKickSyncContextPropertyIN->ui32Property, ++ psRGXSetKickSyncContextPropertyIN->ui64Input, ++ &psRGXSetKickSyncContextPropertyOUT->ui64Output); ++ ++RGXSetKickSyncContextProperty_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psKickSyncContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hKickSyncContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXKICKSYNCBridge(void); ++void DeinitRGXKICKSYNCBridge(void); ++ ++/* ++ * Register all RGXKICKSYNC functions with services ++ */ ++PVRSRV_ERROR InitRGXKICKSYNCBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, ++ PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, ++ PVRSRVBridgeRGXCreateKickSyncContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, ++ PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, ++ PVRSRVBridgeRGXDestroyKickSyncContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2, ++ PVRSRVBridgeRGXKickSync2, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, ++ PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY, ++ PVRSRVBridgeRGXSetKickSyncContextProperty, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxkicksync functions with services ++ */ ++void DeinitRGXKICKSYNCBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, ++ PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, ++ PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC2); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, ++ PVRSRV_BRIDGE_RGXKICKSYNC_RGXSETKICKSYNCCONTEXTPROPERTY); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c +new file mode 100644 +index 000000000000..4cdcb127299b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxregconfig_bridge.c +@@ -0,0 +1,239 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxregconfig ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxregconfig ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxregconfig.h" ++ ++#include "common_rgxregconfig_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetRegConfigTypeIN_UI8, ++ IMG_UINT8 * psRGXSetRegConfigTypeOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN = ++ (PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *) IMG_OFFSET_ADDR(psRGXSetRegConfigTypeOUT_UI8, ++ 0); ++ ++ psRGXSetRegConfigTypeOUT->eError = ++ PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevNode(psConnection), ++ psRGXSetRegConfigTypeIN->ui8RegPowerIsland); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXAddRegconfigIN_UI8, ++ IMG_UINT8 * psRGXAddRegconfigOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN = ++ (PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT = ++ (PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *) IMG_OFFSET_ADDR(psRGXAddRegconfigOUT_UI8, 0); ++ ++ psRGXAddRegconfigOUT->eError = ++ PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevNode(psConnection), ++ psRGXAddRegconfigIN->ui32RegAddr, ++ psRGXAddRegconfigIN->ui64RegValue, ++ psRGXAddRegconfigIN->ui64RegMask); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXClearRegConfigIN_UI8, ++ IMG_UINT8 * psRGXClearRegConfigOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN = ++ (PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *) IMG_OFFSET_ADDR(psRGXClearRegConfigOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN); ++ ++ psRGXClearRegConfigOUT->eError = ++ PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevNode(psConnection)); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXEnableRegConfigIN_UI8, ++ IMG_UINT8 * psRGXEnableRegConfigOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN = ++ (PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT = ++ (PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXEnableRegConfigOUT_UI8, ++ 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN); ++ ++ psRGXEnableRegConfigOUT->eError = ++ PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevNode(psConnection)); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDisableRegConfigIN_UI8, ++ IMG_UINT8 * psRGXDisableRegConfigOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN = ++ (PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *) IMG_OFFSET_ADDR(psRGXDisableRegConfigOUT_UI8, ++ 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN); ++ ++ psRGXDisableRegConfigOUT->eError = ++ PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevNode(psConnection)); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ ++ ++#if !defined(EXCLUDE_RGXREGCONFIG_BRIDGE) ++PVRSRV_ERROR InitRGXREGCONFIGBridge(void); ++void DeinitRGXREGCONFIGBridge(void); ++ ++/* ++ * Register all RGXREGCONFIG functions with services ++ */ ++PVRSRV_ERROR InitRGXREGCONFIGBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE, ++ PVRSRVBridgeRGXSetRegConfigType, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG, ++ PVRSRVBridgeRGXAddRegconfig, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG, ++ PVRSRVBridgeRGXClearRegConfig, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG, ++ PVRSRVBridgeRGXEnableRegConfig, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG, ++ PVRSRVBridgeRGXDisableRegConfig, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxregconfig functions with services ++ */ ++void DeinitRGXREGCONFIGBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXSETREGCONFIGTYPE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXADDREGCONFIG); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXCLEARREGCONFIG); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXENABLEREGCONFIG); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXREGCONFIG, ++ PVRSRV_BRIDGE_RGXREGCONFIG_RGXDISABLEREGCONFIG); ++ ++} ++#else /* EXCLUDE_RGXREGCONFIG_BRIDGE */ ++/* This bridge is conditional on EXCLUDE_RGXREGCONFIG_BRIDGE - when defined, ++ * do not populate the dispatch table with its functions ++ */ ++#define InitRGXREGCONFIGBridge() \ ++ PVRSRV_OK ++ ++#define DeinitRGXREGCONFIGBridge() ++ ++#endif /* EXCLUDE_RGXREGCONFIG_BRIDGE */ +diff --git a/drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c +new file mode 100644 +index 000000000000..44300ec851ee +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxta3d_bridge.c +@@ -0,0 +1,2406 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxta3d ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxta3d ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxta3d.h" ++ ++#include "common_rgxta3d_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = RGXDestroyHWRTDataSet((RGX_KM_HW_RT_DATASET *) pvData); ++ return eError; ++} ++ ++static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXMKIF_NUM_RTDATA_FREELISTS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_RTDATA_FREELISTS must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXMKIF_NUM_RTDATAS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_RTDATAS must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXMKIF_NUM_GEOMDATAS <= IMG_UINT32_MAX, ++ "RGXMKIF_NUM_GEOMDATAS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateHWRTDataSetIN_UI8, ++ IMG_UINT8 * psRGXCreateHWRTDataSetOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATASET *) IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *psRGXCreateHWRTDataSetOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATASET *) ++ IMG_OFFSET_ADDR(psRGXCreateHWRTDataSetOUT_UI8, 0); ++ ++ IMG_DEV_VIRTADDR *sVHeapTableDevVAddrInt = NULL; ++ IMG_DEV_VIRTADDR *sPMMlistDevVAddrInt = NULL; ++ RGX_FREELIST **psapsFreeListsInt = NULL; ++ IMG_HANDLE *hapsFreeListsInt2 = NULL; ++ IMG_DEV_VIRTADDR *sTailPtrsDevVAddrInt = NULL; ++ IMG_DEV_VIRTADDR *sMacrotileArrayDevVAddrInt = NULL; ++ IMG_DEV_VIRTADDR *sRgnHeaderDevVAddrInt = NULL; ++ IMG_DEV_VIRTADDR *sRTCDevVAddrInt = NULL; ++ RGX_KM_HW_RT_DATASET **psKmHwRTDataSetInt = NULL; ++ IMG_HANDLE *hKmHwRTDataSetInt2 = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) + ++ ((IMG_UINT64) RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) + ++ ((IMG_UINT64) RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE)) + 0; ++ ++ psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet = psRGXCreateHWRTDataSetIN->phKmHwRTDataSet; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXCreateHWRTDataSetIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateHWRTDataSetIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ } ++ ++ { ++ sVHeapTableDevVAddrInt = ++ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, sVHeapTableDevVAddrInt, ++ (const void __user *)psRGXCreateHWRTDataSetIN->psVHeapTableDevVAddr, ++ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++ { ++ sPMMlistDevVAddrInt = ++ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, sPMMlistDevVAddrInt, ++ (const void __user *)psRGXCreateHWRTDataSetIN->psPMMlistDevVAddr, ++ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++ { ++ psapsFreeListsInt = ++ (RGX_FREELIST **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psapsFreeListsInt, 0, ++ RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *)); ++ ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(RGX_FREELIST *); ++ hapsFreeListsInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hapsFreeListsInt2, ++ (const void __user *)psRGXCreateHWRTDataSetIN->phapsFreeLists, ++ RGXMKIF_NUM_RTDATA_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++ { ++ sTailPtrsDevVAddrInt = ++ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, sTailPtrsDevVAddrInt, ++ (const void __user *)psRGXCreateHWRTDataSetIN->psTailPtrsDevVAddr, ++ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++ { ++ sMacrotileArrayDevVAddrInt = ++ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, sMacrotileArrayDevVAddrInt, ++ (const void __user *)psRGXCreateHWRTDataSetIN->psMacrotileArrayDevVAddr, ++ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++ { ++ sRgnHeaderDevVAddrInt = ++ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, sRgnHeaderDevVAddrInt, ++ (const void __user *)psRGXCreateHWRTDataSetIN->psRgnHeaderDevVAddr, ++ RGXMKIF_NUM_RTDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++ { ++ sRTCDevVAddrInt = ++ (IMG_DEV_VIRTADDR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR); ++ } ++ ++ /* Copy the data over */ ++ if (RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, sRTCDevVAddrInt, ++ (const void __user *)psRGXCreateHWRTDataSetIN->psRTCDevVAddr, ++ RGXMKIF_NUM_GEOMDATAS * sizeof(IMG_DEV_VIRTADDR)) != PVRSRV_OK) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ if (IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset) != NULL) ++ { ++ psKmHwRTDataSetInt = ++ (RGX_KM_HW_RT_DATASET **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psKmHwRTDataSetInt, 0, ++ RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)); ++ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *); ++ hKmHwRTDataSetInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += RGXMKIF_NUM_RTDATAS * sizeof(IMG_HANDLE); ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXCreateHWRTDataSetOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psapsFreeListsInt[i], ++ hapsFreeListsInt2[i], ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); ++ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateHWRTDataSetOUT->eError = ++ RGXCreateHWRTDataSet(psConnection, OSGetDevNode(psConnection), ++ sVHeapTableDevVAddrInt, ++ sPMMlistDevVAddrInt, ++ psapsFreeListsInt, ++ psRGXCreateHWRTDataSetIN->ui32PPPScreen, ++ psRGXCreateHWRTDataSetIN->ui64MultiSampleCtl, ++ psRGXCreateHWRTDataSetIN->ui64FlippedMultiSampleCtl, ++ psRGXCreateHWRTDataSetIN->ui32TPCStride, ++ sTailPtrsDevVAddrInt, ++ psRGXCreateHWRTDataSetIN->ui32TPCSize, ++ psRGXCreateHWRTDataSetIN->ui32TEScreen, ++ psRGXCreateHWRTDataSetIN->ui32TEAA, ++ psRGXCreateHWRTDataSetIN->ui32TEMTILE1, ++ psRGXCreateHWRTDataSetIN->ui32TEMTILE2, ++ psRGXCreateHWRTDataSetIN->ui32MTileStride, ++ psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerX, ++ psRGXCreateHWRTDataSetIN->ui32ISPMergeLowerY, ++ psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperX, ++ psRGXCreateHWRTDataSetIN->ui32ISPMergeUpperY, ++ psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleX, ++ psRGXCreateHWRTDataSetIN->ui32ISPMergeScaleY, ++ sMacrotileArrayDevVAddrInt, ++ sRgnHeaderDevVAddrInt, ++ sRTCDevVAddrInt, ++ psRGXCreateHWRTDataSetIN->ui32RgnHeaderSize, ++ psRGXCreateHWRTDataSetIN->ui32ISPMtileSize, ++ psRGXCreateHWRTDataSetIN->ui16MaxRTs, psKmHwRTDataSetInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ if (hKmHwRTDataSetInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) ++ { ++ ++ psRGXCreateHWRTDataSetOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &hKmHwRTDataSetInt2[i], ++ (void *)psKmHwRTDataSetInt[i], ++ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateHWRTDataSetpsKmHwRTDataSetIntRelease); ++ if (unlikely(psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ ++ } ++ } ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((hKmHwRTDataSetInt2) && ((RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psRGXCreateHWRTDataSetOUT->phKmHwRTDataSet, ++ hKmHwRTDataSetInt2, ++ (RGXMKIF_NUM_RTDATAS * sizeof(RGX_KM_HW_RT_DATASET *))) != PVRSRV_OK)) ++ { ++ psRGXCreateHWRTDataSetOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateHWRTDataSet_exit; ++ } ++ } ++ ++RGXCreateHWRTDataSet_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ if (hapsFreeListsInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < RGXMKIF_NUM_RTDATA_FREELISTS; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psapsFreeListsInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hapsFreeListsInt2[i], ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateHWRTDataSetOUT->eError != PVRSRV_OK) ++ { ++ { ++ IMG_UINT32 i; ++ ++ if (hKmHwRTDataSetInt2) ++ { ++ for (i = 0; i < RGXMKIF_NUM_RTDATAS; i++) ++ { ++ if (hKmHwRTDataSetInt2[i]) ++ { ++ RGXDestroyHWRTDataSet(hKmHwRTDataSetInt2[i]); ++ } ++ } ++ } ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXCreateHWRTDataSetOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyHWRTDataSet(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyHWRTDataSetIN_UI8, ++ IMG_UINT8 * psRGXDestroyHWRTDataSetOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATASET *) ++ IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *psRGXDestroyHWRTDataSetOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATASET *) ++ IMG_OFFSET_ADDR(psRGXDestroyHWRTDataSetOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyHWRTDataSetOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyHWRTDataSetIN-> ++ hKmHwRTDataSet, ++ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); ++ if (unlikely ++ ((psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_OK) ++ && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXDestroyHWRTDataSetOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyHWRTDataSetOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyHWRTDataSet_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyHWRTDataSet_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RGXCreateZSBufferpssZSBufferKMIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = RGXDestroyZSBufferKM((RGX_ZSBUFFER_DATA *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateZSBufferIN_UI8, ++ IMG_UINT8 * psRGXCreateZSBufferOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXCreateZSBufferOUT_UI8, 0); ++ ++ IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation; ++ DEVMEMINT_RESERVATION *psReservationInt = NULL; ++ IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR; ++ PMR *psPMRInt = NULL; ++ RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXCreateZSBufferOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psReservationInt, ++ hReservation, ++ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION, IMG_TRUE); ++ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateZSBuffer_exit; ++ } ++ ++ /* Look up the address from the handle */ ++ psRGXCreateZSBufferOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRInt, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateZSBuffer_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateZSBufferOUT->eError = ++ RGXCreateZSBufferKM(psConnection, OSGetDevNode(psConnection), ++ psReservationInt, ++ psPMRInt, psRGXCreateZSBufferIN->uiMapFlags, &pssZSBufferKMInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateZSBuffer_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateZSBufferOUT-> ++ hsZSBufferKM, ++ (void *)pssZSBufferKMInt, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateZSBufferpssZSBufferKMIntRelease); ++ if (unlikely(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateZSBuffer_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXCreateZSBuffer_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psReservationInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hReservation, PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION); ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK) ++ { ++ if (pssZSBufferKMInt) ++ { ++ RGXDestroyZSBufferKM(pssZSBufferKMInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyZSBufferIN_UI8, ++ IMG_UINT8 * psRGXDestroyZSBufferOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *) IMG_OFFSET_ADDR(psRGXDestroyZSBufferOUT_UI8, ++ 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyZSBufferOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyZSBufferIN-> ++ hsZSBufferMemDesc, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); ++ if (unlikely ++ ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) ++ && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyZSBufferOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyZSBuffer_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyZSBuffer_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RGXPopulateZSBufferpssPopulationIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = RGXUnpopulateZSBufferKM((RGX_POPULATION *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXPopulateZSBufferIN_UI8, ++ IMG_UINT8 * psRGXPopulateZSBufferOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN = ++ (PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT = ++ (PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *) IMG_OFFSET_ADDR(psRGXPopulateZSBufferOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM; ++ RGX_ZSBUFFER_DATA *pssZSBufferKMInt = NULL; ++ RGX_POPULATION *pssPopulationInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXPopulateZSBufferOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&pssZSBufferKMInt, ++ hsZSBufferKM, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); ++ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXPopulateZSBuffer_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXPopulateZSBufferOUT->eError = ++ RGXPopulateZSBufferKM(pssZSBufferKMInt, &pssPopulationInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXPopulateZSBuffer_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXPopulateZSBufferOUT-> ++ hsPopulation, ++ (void *)pssPopulationInt, ++ PVRSRV_HANDLE_TYPE_RGX_POPULATION, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXPopulateZSBufferpssPopulationIntRelease); ++ if (unlikely(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXPopulateZSBuffer_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXPopulateZSBuffer_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (pssZSBufferKMInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hsZSBufferKM, PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK) ++ { ++ if (pssPopulationInt) ++ { ++ RGXUnpopulateZSBufferKM(pssPopulationInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXUnpopulateZSBufferIN_UI8, ++ IMG_UINT8 * psRGXUnpopulateZSBufferOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN = ++ (PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *) ++ IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT = ++ (PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *) ++ IMG_OFFSET_ADDR(psRGXUnpopulateZSBufferOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXUnpopulateZSBufferOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation, ++ PVRSRV_HANDLE_TYPE_RGX_POPULATION); ++ if (unlikely((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) && ++ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXUnpopulateZSBufferOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXUnpopulateZSBuffer_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXUnpopulateZSBuffer_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RGXCreateFreeListpsCleanupCookieIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = RGXDestroyFreeList((RGX_FREELIST *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateFreeListIN_UI8, ++ IMG_UINT8 * psRGXCreateFreeListOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *) IMG_OFFSET_ADDR(psRGXCreateFreeListOUT_UI8, 0); ++ ++ IMG_HANDLE hMemCtxPrivData = psRGXCreateFreeListIN->hMemCtxPrivData; ++ IMG_HANDLE hMemCtxPrivDataInt = NULL; ++ IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList; ++ RGX_FREELIST *pssGlobalFreeListInt = NULL; ++ IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR; ++ PMR *pssFreeListPMRInt = NULL; ++ RGX_FREELIST *psCleanupCookieInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXCreateFreeListOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hMemCtxPrivDataInt, ++ hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateFreeList_exit; ++ } ++ ++ if (psRGXCreateFreeListIN->hsGlobalFreeList) ++ { ++ /* Look up the address from the handle */ ++ psRGXCreateFreeListOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&pssGlobalFreeListInt, ++ hsGlobalFreeList, ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST, IMG_TRUE); ++ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateFreeList_exit; ++ } ++ } ++ ++ /* Look up the address from the handle */ ++ psRGXCreateFreeListOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&pssFreeListPMRInt, ++ hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateFreeList_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateFreeListOUT->eError = ++ RGXCreateFreeList(psConnection, OSGetDevNode(psConnection), ++ hMemCtxPrivDataInt, ++ psRGXCreateFreeListIN->ui32MaxFLPages, ++ psRGXCreateFreeListIN->ui32InitFLPages, ++ psRGXCreateFreeListIN->ui32GrowFLPages, ++ psRGXCreateFreeListIN->ui32GrowParamThreshold, ++ pssGlobalFreeListInt, ++ psRGXCreateFreeListIN->bbFreeListCheck, ++ psRGXCreateFreeListIN->spsFreeListDevVAddr, ++ pssFreeListPMRInt, ++ psRGXCreateFreeListIN->uiPMROffset, &psCleanupCookieInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateFreeList_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateFreeListOUT-> ++ hCleanupCookie, ++ (void *)psCleanupCookieInt, ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateFreeListpsCleanupCookieIntRelease); ++ if (unlikely(psRGXCreateFreeListOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateFreeList_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXCreateFreeList_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hMemCtxPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hMemCtxPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ ++ if (psRGXCreateFreeListIN->hsGlobalFreeList) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (pssGlobalFreeListInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hsGlobalFreeList, ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST); ++ } ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (pssFreeListPMRInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hsFreeListPMR, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK) ++ { ++ if (psCleanupCookieInt) ++ { ++ RGXDestroyFreeList(psCleanupCookieInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyFreeListIN_UI8, ++ IMG_UINT8 * psRGXDestroyFreeListOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *) IMG_OFFSET_ADDR(psRGXDestroyFreeListOUT_UI8, ++ 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyFreeListOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie, ++ PVRSRV_HANDLE_TYPE_RGX_FREELIST); ++ if (unlikely((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) && ++ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyFreeListOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyFreeList_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyFreeList_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RGXCreateRenderContextpsRenderContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXDestroyRenderContextKM((RGX_SERVER_RENDER_CONTEXT *) pvData); ++ return eError; ++} ++ ++static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_STATIC_RENDERCONTEXT_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_STATIC_RENDERCONTEXT_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateRenderContextIN_UI8, ++ IMG_UINT8 * psRGXCreateRenderContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateRenderContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateRenderContextOUT_UI8, 0); ++ ++ IMG_BYTE *ui8FrameworkCmdInt = NULL; ++ IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ IMG_BYTE *ui8StaticRenderContextStateInt = NULL; ++ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) + ++ ((IMG_UINT64) psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * ++ sizeof(IMG_BYTE)) + 0; ++ ++ if (unlikely(psRGXCreateRenderContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) ++ { ++ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXCreateRenderContext_exit; ++ } ++ ++ if (unlikely ++ (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize > ++ RGXFWIF_STATIC_RENDERCONTEXT_SIZE)) ++ { ++ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXCreateRenderContext_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXCreateRenderContext_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateRenderContextIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXCreateRenderContext_exit; ++ } ++ } ++ } ++ ++ if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize != 0) ++ { ++ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8FrameworkCmdInt, ++ (const void __user *)psRGXCreateRenderContextIN->pui8FrameworkCmd, ++ psRGXCreateRenderContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != ++ PVRSRV_OK) ++ { ++ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateRenderContext_exit; ++ } ++ } ++ if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize != 0) ++ { ++ ui8StaticRenderContextStateInt = ++ (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8StaticRenderContextStateInt, ++ (const void __user *)psRGXCreateRenderContextIN->pui8StaticRenderContextState, ++ psRGXCreateRenderContextIN->ui32StaticRenderContextStateSize * ++ sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateRenderContext_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXCreateRenderContextOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateRenderContext_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateRenderContextOUT->eError = ++ PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevNode(psConnection), ++ psRGXCreateRenderContextIN->ui32Priority, ++ psRGXCreateRenderContextIN->sVDMCallStackAddr, ++ psRGXCreateRenderContextIN->ui32ui32CallStackDepth, ++ psRGXCreateRenderContextIN->ui32FrameworkCmdSize, ++ ui8FrameworkCmdInt, ++ hPrivDataInt, ++ psRGXCreateRenderContextIN-> ++ ui32StaticRenderContextStateSize, ++ ui8StaticRenderContextStateInt, ++ psRGXCreateRenderContextIN->ui32PackedCCBSizeU8888, ++ psRGXCreateRenderContextIN->ui32ContextFlags, ++ psRGXCreateRenderContextIN->ui64RobustnessAddress, ++ psRGXCreateRenderContextIN->ui32MaxTADeadlineMS, ++ psRGXCreateRenderContextIN->ui32Max3DDeadlineMS, ++ &psRenderContextInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateRenderContext_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateRenderContextOUT-> ++ hRenderContext, ++ (void *)psRenderContextInt, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateRenderContextpsRenderContextIntRelease); ++ if (unlikely(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateRenderContext_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXCreateRenderContext_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK) ++ { ++ if (psRenderContextInt) ++ { ++ PVRSRVRGXDestroyRenderContextKM(psRenderContextInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXCreateRenderContextOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyRenderContextIN_UI8, ++ IMG_UINT8 * psRGXDestroyRenderContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyRenderContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyRenderContextOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyRenderContextOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyRenderContextIN-> ++ hCleanupCookie, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); ++ if (unlikely ++ ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) ++ && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyRenderContextOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyRenderContext_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyRenderContext_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetRenderContextPriorityIN_UI8, ++ IMG_UINT8 * psRGXSetRenderContextPriorityOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN = ++ (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXSetRenderContextPriorityOUT_UI8, 0); ++ ++ IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext; ++ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetRenderContextPriorityOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psRenderContextInt, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetRenderContextPriority_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetRenderContextPriorityOUT->eError = ++ PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevNode(psConnection), ++ psRenderContextInt, ++ psRGXSetRenderContextPriorityIN->ui32Priority); ++ ++RGXSetRenderContextPriority_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psRenderContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXRenderContextStalledIN_UI8, ++ IMG_UINT8 * psRGXRenderContextStalledOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN = ++ (PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *) ++ IMG_OFFSET_ADDR(psRGXRenderContextStalledIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT = ++ (PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *) ++ IMG_OFFSET_ADDR(psRGXRenderContextStalledOUT_UI8, 0); ++ ++ IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext; ++ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXRenderContextStalledOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psRenderContextInt, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXRenderContextStalled_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXRenderContextStalledOUT->eError = RGXRenderContextStalledKM(psRenderContextInt); ++ ++RGXRenderContextStalled_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psRenderContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXKickTA3D2(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXKickTA3D2IN_UI8, ++ IMG_UINT8 * psRGXKickTA3D2OUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *psRGXKickTA3D2IN = ++ (PVRSRV_BRIDGE_IN_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2IN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *psRGXKickTA3D2OUT = ++ (PVRSRV_BRIDGE_OUT_RGXKICKTA3D2 *) IMG_OFFSET_ADDR(psRGXKickTA3D2OUT_UI8, 0); ++ ++ IMG_HANDLE hRenderContext = psRGXKickTA3D2IN->hRenderContext; ++ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; ++ SYNC_PRIMITIVE_BLOCK **psClientTAFenceSyncPrimBlockInt = NULL; ++ IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL; ++ IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL; ++ IMG_UINT32 *ui32ClientTAFenceValueInt = NULL; ++ SYNC_PRIMITIVE_BLOCK **psClientTAUpdateSyncPrimBlockInt = NULL; ++ IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL; ++ IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL; ++ IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL; ++ SYNC_PRIMITIVE_BLOCK **psClient3DUpdateSyncPrimBlockInt = NULL; ++ IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL; ++ IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL; ++ IMG_UINT32 *ui32Client3DUpdateValueInt = NULL; ++ IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3D2IN->hPRFenceUFOSyncPrimBlock; ++ SYNC_PRIMITIVE_BLOCK *psPRFenceUFOSyncPrimBlockInt = NULL; ++ IMG_CHAR *uiUpdateFenceNameInt = NULL; ++ IMG_CHAR *uiUpdateFenceName3DInt = NULL; ++ IMG_BYTE *ui8TACmdInt = NULL; ++ IMG_BYTE *ui83DPRCmdInt = NULL; ++ IMG_BYTE *ui83DCmdInt = NULL; ++ IMG_HANDLE hKMHWRTDataSet = psRGXKickTA3D2IN->hKMHWRTDataSet; ++ RGX_KM_HW_RT_DATASET *psKMHWRTDataSetInt = NULL; ++ IMG_HANDLE hZSBuffer = psRGXKickTA3D2IN->hZSBuffer; ++ RGX_ZSBUFFER_DATA *psZSBufferInt = NULL; ++ IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3D2IN->hMSAAScratchBuffer; ++ RGX_ZSBUFFER_DATA *psMSAAScratchBufferInt = NULL; ++ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; ++ PMR **psSyncPMRsInt = NULL; ++ IMG_HANDLE *hSyncPMRsInt2 = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)) + ++ ((IMG_UINT64) psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; ++ ++ if (unlikely(psRGXKickTA3D2IN->ui32ClientTAFenceCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (unlikely(psRGXKickTA3D2IN->ui32ClientTAUpdateCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (unlikely(psRGXKickTA3D2IN->ui32Client3DUpdateCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (unlikely(psRGXKickTA3D2IN->ui32TACmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (unlikely(psRGXKickTA3D2IN->ui323DPRCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (unlikely(psRGXKickTA3D2IN->ui323DCmdSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (unlikely(psRGXKickTA3D2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXKickTA3D2_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXKickTA3D2IN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXKickTA3D2IN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ } ++ ++ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) ++ { ++ psClientTAFenceSyncPrimBlockInt = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psClientTAFenceSyncPrimBlockInt, 0, ++ psRGXKickTA3D2IN->ui32ClientTAFenceCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset += ++ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ hClientTAFenceSyncPrimBlockInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hClientTAFenceSyncPrimBlockInt2, ++ (const void __user *)psRGXKickTA3D2IN->phClientTAFenceSyncPrimBlock, ++ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) ++ { ++ ui32ClientTAFenceSyncOffsetInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientTAFenceSyncOffsetInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceSyncOffset, ++ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount != 0) ++ { ++ ui32ClientTAFenceValueInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientTAFenceValueInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAFenceValue, ++ psRGXKickTA3D2IN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) ++ { ++ psClientTAUpdateSyncPrimBlockInt = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psClientTAUpdateSyncPrimBlockInt, 0, ++ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset += ++ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ hClientTAUpdateSyncPrimBlockInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hClientTAUpdateSyncPrimBlockInt2, ++ (const void __user *)psRGXKickTA3D2IN->phClientTAUpdateSyncPrimBlock, ++ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) ++ { ++ ui32ClientTAUpdateSyncOffsetInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientTAUpdateSyncOffsetInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateSyncOffset, ++ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount != 0) ++ { ++ ui32ClientTAUpdateValueInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientTAUpdateValueInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32ClientTAUpdateValue, ++ psRGXKickTA3D2IN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) ++ { ++ psClient3DUpdateSyncPrimBlockInt = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psClient3DUpdateSyncPrimBlockInt, 0, ++ psRGXKickTA3D2IN->ui32Client3DUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset += ++ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ hClient3DUpdateSyncPrimBlockInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hClient3DUpdateSyncPrimBlockInt2, ++ (const void __user *)psRGXKickTA3D2IN->phClient3DUpdateSyncPrimBlock, ++ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) ++ { ++ ui32Client3DUpdateSyncOffsetInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32Client3DUpdateSyncOffsetInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateSyncOffset, ++ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount != 0) ++ { ++ ui32Client3DUpdateValueInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32Client3DUpdateValueInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32Client3DUpdateValue, ++ psRGXKickTA3D2IN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ ++ { ++ uiUpdateFenceNameInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiUpdateFenceNameInt, ++ (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName, ++ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ ++ { ++ uiUpdateFenceName3DInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiUpdateFenceName3DInt, ++ (const void __user *)psRGXKickTA3D2IN->puiUpdateFenceName3D, ++ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ ((IMG_CHAR *) uiUpdateFenceName3DInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ if (psRGXKickTA3D2IN->ui32TACmdSize != 0) ++ { ++ ui8TACmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8TACmdInt, (const void __user *)psRGXKickTA3D2IN->pui8TACmd, ++ psRGXKickTA3D2IN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui323DPRCmdSize != 0) ++ { ++ ui83DPRCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui83DPRCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DPRCmd, ++ psRGXKickTA3D2IN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui323DCmdSize != 0) ++ { ++ ui83DCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui83DCmdInt, (const void __user *)psRGXKickTA3D2IN->pui83DCmd, ++ psRGXKickTA3D2IN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) ++ { ++ ui32SyncPMRFlagsInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32SyncPMRFlagsInt, ++ (const void __user *)psRGXKickTA3D2IN->pui32SyncPMRFlags, ++ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ if (psRGXKickTA3D2IN->ui32SyncPMRCount != 0) ++ { ++ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psSyncPMRsInt, 0, ++ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *)); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(PMR *); ++ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hSyncPMRsInt2, (const void __user *)psRGXKickTA3D2IN->phSyncPMRs, ++ psRGXKickTA3D2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXKickTA3D2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psRenderContextInt, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psClientTAFenceSyncPrimBlockInt[i], ++ hClientTAFenceSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **) ++ &psClientTAUpdateSyncPrimBlockInt[i], ++ hClientTAUpdateSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **) ++ &psClient3DUpdateSyncPrimBlockInt[i], ++ hClient3DUpdateSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ } ++ ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPRFenceUFOSyncPrimBlockInt, ++ hPRFenceUFOSyncPrimBlock, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ ++ if (psRGXKickTA3D2IN->hKMHWRTDataSet) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psKMHWRTDataSetInt, ++ hKMHWRTDataSet, ++ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET, IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ ++ if (psRGXKickTA3D2IN->hZSBuffer) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psZSBufferInt, ++ hZSBuffer, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ ++ if (psRGXKickTA3D2IN->hMSAAScratchBuffer) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psMSAAScratchBufferInt, ++ hMSAAScratchBuffer, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER, IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncPMRsInt[i], ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRGXKickTA3D2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXKickTA3D2_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXKickTA3D2OUT->eError = ++ PVRSRVRGXKickTA3DKM(psRenderContextInt, ++ psRGXKickTA3D2IN->ui32ClientTAFenceCount, ++ psClientTAFenceSyncPrimBlockInt, ++ ui32ClientTAFenceSyncOffsetInt, ++ ui32ClientTAFenceValueInt, ++ psRGXKickTA3D2IN->ui32ClientTAUpdateCount, ++ psClientTAUpdateSyncPrimBlockInt, ++ ui32ClientTAUpdateSyncOffsetInt, ++ ui32ClientTAUpdateValueInt, ++ psRGXKickTA3D2IN->ui32Client3DUpdateCount, ++ psClient3DUpdateSyncPrimBlockInt, ++ ui32Client3DUpdateSyncOffsetInt, ++ ui32Client3DUpdateValueInt, ++ psPRFenceUFOSyncPrimBlockInt, ++ psRGXKickTA3D2IN->ui32PRFenceUFOSyncOffset, ++ psRGXKickTA3D2IN->ui32PRFenceValue, ++ psRGXKickTA3D2IN->hCheckFence, ++ psRGXKickTA3D2IN->hUpdateTimeline, ++ &psRGXKickTA3D2OUT->hUpdateFence, ++ uiUpdateFenceNameInt, ++ psRGXKickTA3D2IN->hCheckFence3D, ++ psRGXKickTA3D2IN->hUpdateTimeline3D, ++ &psRGXKickTA3D2OUT->hUpdateFence3D, ++ uiUpdateFenceName3DInt, ++ psRGXKickTA3D2IN->ui32TACmdSize, ++ ui8TACmdInt, ++ psRGXKickTA3D2IN->ui323DPRCmdSize, ++ ui83DPRCmdInt, ++ psRGXKickTA3D2IN->ui323DCmdSize, ++ ui83DCmdInt, ++ psRGXKickTA3D2IN->ui32ExtJobRef, ++ psRGXKickTA3D2IN->bbKickTA, ++ psRGXKickTA3D2IN->bbKickPR, ++ psRGXKickTA3D2IN->bbKick3D, ++ psRGXKickTA3D2IN->bbAbort, ++ psRGXKickTA3D2IN->ui32PDumpFlags, ++ psKMHWRTDataSetInt, ++ psZSBufferInt, ++ psMSAAScratchBufferInt, ++ psRGXKickTA3D2IN->ui32SyncPMRCount, ++ ui32SyncPMRFlagsInt, ++ psSyncPMRsInt, ++ psRGXKickTA3D2IN->ui32RenderTargetSize, ++ psRGXKickTA3D2IN->ui32NumberOfDrawCalls, ++ psRGXKickTA3D2IN->ui32NumberOfIndices, ++ psRGXKickTA3D2IN->ui32NumberOfMRTs, psRGXKickTA3D2IN->ui64Deadline); ++ ++RGXKickTA3D2_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psRenderContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); ++ } ++ ++ if (hClientTAFenceSyncPrimBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAFenceCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psClientTAFenceSyncPrimBlockInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hClientTAFenceSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ ++ if (hClientTAUpdateSyncPrimBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32ClientTAUpdateCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psClientTAUpdateSyncPrimBlockInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hClientTAUpdateSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ ++ if (hClient3DUpdateSyncPrimBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32Client3DUpdateCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psClient3DUpdateSyncPrimBlockInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hClient3DUpdateSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ ++ /* Unreference the previously looked up handle */ ++ if (psPRFenceUFOSyncPrimBlockInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPRFenceUFOSyncPrimBlock, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ ++ if (psRGXKickTA3D2IN->hKMHWRTDataSet) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psKMHWRTDataSetInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hKMHWRTDataSet, ++ PVRSRV_HANDLE_TYPE_RGX_KM_HW_RT_DATASET); ++ } ++ } ++ ++ if (psRGXKickTA3D2IN->hZSBuffer) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psZSBufferInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hZSBuffer, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); ++ } ++ } ++ ++ if (psRGXKickTA3D2IN->hMSAAScratchBuffer) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psMSAAScratchBufferInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hMSAAScratchBuffer, ++ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER); ++ } ++ } ++ ++ if (hSyncPMRsInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXKickTA3D2IN->ui32SyncPMRCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncPMRsInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXKickTA3D2OUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetRenderContextProperty(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetRenderContextPropertyIN_UI8, ++ IMG_UINT8 * psRGXSetRenderContextPropertyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyIN = ++ (PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *psRGXSetRenderContextPropertyOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetRenderContextPropertyOUT_UI8, 0); ++ ++ IMG_HANDLE hRenderContext = psRGXSetRenderContextPropertyIN->hRenderContext; ++ RGX_SERVER_RENDER_CONTEXT *psRenderContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetRenderContextPropertyOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psRenderContextInt, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetRenderContextPropertyOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetRenderContextProperty_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetRenderContextPropertyOUT->eError = ++ PVRSRVRGXSetRenderContextPropertyKM(psRenderContextInt, ++ psRGXSetRenderContextPropertyIN->ui32Property, ++ psRGXSetRenderContextPropertyIN->ui64Input, ++ &psRGXSetRenderContextPropertyOUT->ui64Output); ++ ++RGXSetRenderContextProperty_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psRenderContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hRenderContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXTA3DBridge(void); ++void DeinitRGXTA3DBridge(void); ++ ++/* ++ * Register all RGXTA3D functions with services ++ */ ++PVRSRV_ERROR InitRGXTA3DBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET, ++ PVRSRVBridgeRGXCreateHWRTDataSet, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET, ++ PVRSRVBridgeRGXDestroyHWRTDataSet, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, ++ PVRSRVBridgeRGXCreateZSBuffer, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, ++ PVRSRVBridgeRGXDestroyZSBuffer, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, ++ PVRSRVBridgeRGXPopulateZSBuffer, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, ++ PVRSRVBridgeRGXUnpopulateZSBuffer, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, ++ PVRSRVBridgeRGXCreateFreeList, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, ++ PVRSRVBridgeRGXDestroyFreeList, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, ++ PVRSRVBridgeRGXCreateRenderContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, ++ PVRSRVBridgeRGXDestroyRenderContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, ++ PVRSRVBridgeRGXSetRenderContextPriority, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, ++ PVRSRVBridgeRGXRenderContextStalled, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2, ++ PVRSRVBridgeRGXKickTA3D2, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY, ++ PVRSRVBridgeRGXSetRenderContextProperty, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxta3d functions with services ++ */ ++void DeinitRGXTA3DBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATASET); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATASET); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D2); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, ++ PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPROPERTY); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c +new file mode 100644 +index 000000000000..99e6239cf7a9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxtimerquery_bridge.c +@@ -0,0 +1,167 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxtimerquery ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxtimerquery ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxtimerquery.h" ++ ++#include "common_rgxtimerquery_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXBeginTimerQueryIN_UI8, ++ IMG_UINT8 * psRGXBeginTimerQueryOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN = ++ (PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT = ++ (PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *) IMG_OFFSET_ADDR(psRGXBeginTimerQueryOUT_UI8, ++ 0); ++ ++ psRGXBeginTimerQueryOUT->eError = ++ PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevNode(psConnection), ++ psRGXBeginTimerQueryIN->ui32QueryId); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXEndTimerQueryIN_UI8, ++ IMG_UINT8 * psRGXEndTimerQueryOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN = ++ (PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT = ++ (PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *) IMG_OFFSET_ADDR(psRGXEndTimerQueryOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN); ++ ++ psRGXEndTimerQueryOUT->eError = ++ PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevNode(psConnection)); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXQueryTimerIN_UI8, ++ IMG_UINT8 * psRGXQueryTimerOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN = ++ (PVRSRV_BRIDGE_IN_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT = ++ (PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *) IMG_OFFSET_ADDR(psRGXQueryTimerOUT_UI8, 0); ++ ++ psRGXQueryTimerOUT->eError = ++ PVRSRVRGXQueryTimerKM(psConnection, OSGetDevNode(psConnection), ++ psRGXQueryTimerIN->ui32QueryId, ++ &psRGXQueryTimerOUT->ui64StartTime, ++ &psRGXQueryTimerOUT->ui64EndTime); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXTIMERQUERYBridge(void); ++void DeinitRGXTIMERQUERYBridge(void); ++ ++/* ++ * Register all RGXTIMERQUERY functions with services ++ */ ++PVRSRV_ERROR InitRGXTIMERQUERYBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, ++ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY, ++ PVRSRVBridgeRGXBeginTimerQuery, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, ++ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY, ++ PVRSRVBridgeRGXEndTimerQuery, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, ++ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer, ++ NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxtimerquery functions with services ++ */ ++void DeinitRGXTIMERQUERYBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, ++ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXBEGINTIMERQUERY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, ++ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXENDTIMERQUERY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTIMERQUERY, ++ PVRSRV_BRIDGE_RGXTIMERQUERY_RGXQUERYTIMER); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c +new file mode 100644 +index 000000000000..f73bb906b2c7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxtq2_bridge.c +@@ -0,0 +1,1210 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxtq2 ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxtq2 ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxtdmtransfer.h" ++ ++#include "common_rgxtq2_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#include "rgx_bvnc_defs_km.h" ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _RGXTDMCreateTransferContextpsTransferContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXTDMDestroyTransferContextKM((RGX_SERVER_TQ_TDM_CONTEXT *) pvData); ++ return eError; ++} ++ ++static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMCreateTransferContextIN_UI8, ++ IMG_UINT8 * psRGXTDMCreateTransferContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXTDMCreateTransferContextOUT_UI8, 0); ++ ++ IMG_BYTE *ui8FrameworkCmdInt = NULL; ++ IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * ++ sizeof(IMG_BYTE)) + 0; ++ ++ if (unlikely(psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize > RGXFWIF_RF_CMD_SIZE)) ++ { ++ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = ++ (IMG_BYTE *) (void *)psRGXTDMCreateTransferContextIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXTDMCreateTransferContextOUT->eError = ++ PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ } ++ } ++ ++ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize != 0) ++ { ++ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8FrameworkCmdInt, ++ (const void __user *)psRGXTDMCreateTransferContextIN->pui8FrameworkCmd, ++ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != ++ PVRSRV_OK) ++ { ++ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXTDMCreateTransferContextOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMCreateTransferContextOUT->eError = ++ PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevNode(psConnection), ++ psRGXTDMCreateTransferContextIN->ui32Priority, ++ psRGXTDMCreateTransferContextIN-> ++ ui32FrameworkCmdSize, ui8FrameworkCmdInt, ++ hPrivDataInt, ++ psRGXTDMCreateTransferContextIN-> ++ ui32PackedCCBSizeU88, ++ psRGXTDMCreateTransferContextIN->ui32ContextFlags, ++ psRGXTDMCreateTransferContextIN-> ++ ui64RobustnessAddress, &psTransferContextInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMCreateTransferContextOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXTDMCreateTransferContextOUT->hTransferContext, ++ (void *)psTransferContextInt, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXTDMCreateTransferContextpsTransferContextIntRelease); ++ if (unlikely(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMCreateTransferContext_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXTDMCreateTransferContext_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK) ++ { ++ if (psTransferContextInt) ++ { ++ PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXTDMCreateTransferContextOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMDestroyTransferContextIN_UI8, ++ IMG_UINT8 * psRGXTDMDestroyTransferContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXTDMDestroyTransferContextOUT_UI8, 0); ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMDestroyTransferContext_exit; ++ } ++ } ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMDestroyTransferContextOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXTDMDestroyTransferContextIN-> ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); ++ if (unlikely ++ ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) ++ && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, ++ PVRSRVGetErrorString(psRGXTDMDestroyTransferContextOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMDestroyTransferContext_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXTDMDestroyTransferContext_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMSetTransferContextPriorityIN_UI8, ++ IMG_UINT8 * psRGXTDMSetTransferContextPriorityOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPriorityOUT_UI8, 0); ++ ++ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext; ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMSetTransferContextPriority_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXTDMSetTransferContextPriorityOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMSetTransferContextPriority_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMSetTransferContextPriorityOUT->eError = ++ PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection), ++ psTransferContextInt, ++ psRGXTDMSetTransferContextPriorityIN-> ++ ui32Priority); ++ ++RGXTDMSetTransferContextPriority_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateIN_UI8, ++ IMG_UINT8 * psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *) ++ IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *) ++ IMG_OFFSET_ADDR(psRGXTDMNotifyWriteOffsetUpdateOUT_UI8, 0); ++ ++ IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext; ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMNotifyWriteOffsetUpdate_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMNotifyWriteOffsetUpdate_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = ++ PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(psTransferContextInt, ++ psRGXTDMNotifyWriteOffsetUpdateIN-> ++ ui32PDumpFlags); ++ ++RGXTDMNotifyWriteOffsetUpdate_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMSubmitTransfer2IN_UI8, ++ IMG_UINT8 * psRGXTDMSubmitTransfer2OUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2IN = ++ (PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER2 *) ++ IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2IN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *psRGXTDMSubmitTransfer2OUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER2 *) ++ IMG_OFFSET_ADDR(psRGXTDMSubmitTransfer2OUT_UI8, 0); ++ ++ IMG_HANDLE hTransferContext = psRGXTDMSubmitTransfer2IN->hTransferContext; ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; ++ SYNC_PRIMITIVE_BLOCK **psUpdateUFOSyncPrimBlockInt = NULL; ++ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL; ++ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL; ++ IMG_UINT32 *ui32UpdateValueInt = NULL; ++ IMG_CHAR *uiUpdateFenceNameInt = NULL; ++ IMG_UINT8 *ui8FWCommandInt = NULL; ++ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; ++ PMR **psSyncPMRsInt = NULL; ++ IMG_HANDLE *hSyncPMRsInt2 = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + ++ ((IMG_UINT64) psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; ++ ++ if (unlikely(psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ ++ if (unlikely ++ (psRGXTDMSubmitTransfer2IN->ui32CommandSize > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE)) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ ++ if (unlikely(psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXTDMSubmitTransfer2IN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXTDMSubmitTransfer2IN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ } ++ ++ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) ++ { ++ psUpdateUFOSyncPrimBlockInt = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psUpdateUFOSyncPrimBlockInt, 0, ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset += ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * ++ sizeof(SYNC_PRIMITIVE_BLOCK *); ++ hUpdateUFOSyncPrimBlockInt2 = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hUpdateUFOSyncPrimBlockInt2, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->phUpdateUFOSyncPrimBlock, ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != ++ PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) ++ { ++ ui32UpdateSyncOffsetInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32UpdateSyncOffsetInt, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateSyncOffset, ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount != 0) ++ { ++ ui32UpdateValueInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32UpdateValueInt, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32UpdateValue, ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != ++ PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ ++ { ++ uiUpdateFenceNameInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiUpdateFenceNameInt, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->puiUpdateFenceName, ++ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ if (psRGXTDMSubmitTransfer2IN->ui32CommandSize != 0) ++ { ++ ui8FWCommandInt = (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8FWCommandInt, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->pui8FWCommand, ++ psRGXTDMSubmitTransfer2IN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) ++ { ++ ui32SyncPMRFlagsInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32SyncPMRFlagsInt, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->pui32SyncPMRFlags, ++ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount != 0) ++ { ++ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psSyncPMRsInt, 0, ++ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)); ++ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); ++ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hSyncPMRsInt2, ++ (const void __user *)psRGXTDMSubmitTransfer2IN->phSyncPMRs, ++ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXTDMSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXTDMSubmitTransfer2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXTDMSubmitTransfer2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psUpdateUFOSyncPrimBlockInt[i], ++ hUpdateUFOSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXTDMSubmitTransfer2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncPMRsInt[i], ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRGXTDMSubmitTransfer2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMSubmitTransfer2_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMSubmitTransfer2OUT->eError = ++ PVRSRVRGXTDMSubmitTransferKM(psTransferContextInt, ++ psRGXTDMSubmitTransfer2IN->ui32PDumpFlags, ++ psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount, ++ psUpdateUFOSyncPrimBlockInt, ++ ui32UpdateSyncOffsetInt, ++ ui32UpdateValueInt, ++ psRGXTDMSubmitTransfer2IN->hCheckFenceFD, ++ psRGXTDMSubmitTransfer2IN->hUpdateTimeline, ++ &psRGXTDMSubmitTransfer2OUT->hUpdateFence, ++ uiUpdateFenceNameInt, ++ psRGXTDMSubmitTransfer2IN->ui32CommandSize, ++ ui8FWCommandInt, ++ psRGXTDMSubmitTransfer2IN->ui32ExternalJobReference, ++ psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount, ++ ui32SyncPMRFlagsInt, ++ psSyncPMRsInt, ++ psRGXTDMSubmitTransfer2IN->ui32Characteristic1, ++ psRGXTDMSubmitTransfer2IN->ui32Characteristic2, ++ psRGXTDMSubmitTransfer2IN->ui64DeadlineInus); ++ ++RGXTDMSubmitTransfer2_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); ++ } ++ ++ if (hUpdateUFOSyncPrimBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32ClientUpdateCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psUpdateUFOSyncPrimBlockInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hUpdateUFOSyncPrimBlockInt2[i], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ ++ if (hSyncPMRsInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXTDMSubmitTransfer2IN->ui32SyncPMRCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncPMRsInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXTDMSubmitTransfer2OUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); ++ return eError; ++} ++ ++static PVRSRV_ERROR _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXTDMReleaseSharedMemoryKM((PMR *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMGetSharedMemory(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMGetSharedMemoryIN_UI8, ++ IMG_UINT8 * psRGXTDMGetSharedMemoryOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMGETSHAREDMEMORY *) ++ IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *psRGXTDMGetSharedMemoryOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMGETSHAREDMEMORY *) ++ IMG_OFFSET_ADDR(psRGXTDMGetSharedMemoryOUT_UI8, 0); ++ ++ PMR *psCLIPMRMemInt = NULL; ++ PMR *psUSCPMRMemInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMGetSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMGetSharedMemory_exit; ++ } ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psRGXTDMGetSharedMemoryIN); ++ ++ psRGXTDMGetSharedMemoryOUT->eError = ++ PVRSRVRGXTDMGetSharedMemoryKM(psConnection, OSGetDevNode(psConnection), ++ &psCLIPMRMemInt, &psUSCPMRMemInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXTDMGetSharedMemory_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXTDMGetSharedMemoryOUT-> ++ hCLIPMRMem, ++ (void *)psCLIPMRMemInt, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXTDMGetSharedMemorypsCLIPMRMemIntRelease); ++ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMGetSharedMemory_exit; ++ } ++ ++ psRGXTDMGetSharedMemoryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXTDMGetSharedMemoryOUT-> ++ hUSCPMRMem, ++ (void *)psUSCPMRMemInt, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXTDMGetSharedMemorypsUSCPMRMemIntRelease); ++ if (unlikely(psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMGetSharedMemory_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXTDMGetSharedMemory_exit: ++ ++ if (psRGXTDMGetSharedMemoryOUT->eError != PVRSRV_OK) ++ { ++ if (psCLIPMRMemInt) ++ { ++ PVRSRVRGXTDMReleaseSharedMemoryKM(psCLIPMRMemInt); ++ } ++ if (psUSCPMRMemInt) ++ { ++ PVRSRVRGXTDMReleaseSharedMemoryKM(psUSCPMRMemInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMReleaseSharedMemory(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMReleaseSharedMemoryIN_UI8, ++ IMG_UINT8 * psRGXTDMReleaseSharedMemoryOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMRELEASESHAREDMEMORY *) ++ IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *psRGXTDMReleaseSharedMemoryOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMRELEASESHAREDMEMORY *) ++ IMG_OFFSET_ADDR(psRGXTDMReleaseSharedMemoryOUT_UI8, 0); ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMReleaseSharedMemoryOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMReleaseSharedMemory_exit; ++ } ++ } ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMReleaseSharedMemoryOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXTDMReleaseSharedMemoryIN->hPMRMem, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE); ++ if (unlikely((psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_OK) && ++ (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psRGXTDMReleaseSharedMemoryOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXTDMReleaseSharedMemoryOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMReleaseSharedMemory_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXTDMReleaseSharedMemory_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXTDMSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXTDMSetTransferContextPropertyIN_UI8, ++ IMG_UINT8 * psRGXTDMSetTransferContextPropertyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyIN = ++ (PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *psRGXTDMSetTransferContextPropertyOUT = ++ (PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXTDMSetTransferContextPropertyOUT_UI8, 0); ++ ++ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPropertyIN->hTransferContext; ++ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContextInt = NULL; ++ ++ { ++ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevNode(psConnection); ++ ++ /* Check that device supports the required feature */ ++ if ((psDeviceNode->pfnCheckDeviceFeature) && ++ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, ++ RGX_FEATURE_FASTRENDER_DM_BIT_MASK)) ++ { ++ psRGXTDMSetTransferContextPropertyOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ ++ goto RGXTDMSetTransferContextProperty_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXTDMSetTransferContextPropertyOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXTDMSetTransferContextPropertyOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXTDMSetTransferContextProperty_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXTDMSetTransferContextPropertyOUT->eError = ++ PVRSRVRGXTDMSetTransferContextPropertyKM(psTransferContextInt, ++ psRGXTDMSetTransferContextPropertyIN-> ++ ui32Property, ++ psRGXTDMSetTransferContextPropertyIN-> ++ ui64Input, ++ &psRGXTDMSetTransferContextPropertyOUT-> ++ ui64Output); ++ ++RGXTDMSetTransferContextProperty_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRGXTQ2Bridge(void); ++void DeinitRGXTQ2Bridge(void); ++ ++/* ++ * Register all RGXTQ2 functions with services ++ */ ++PVRSRV_ERROR InitRGXTQ2Bridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, ++ PVRSRVBridgeRGXTDMCreateTransferContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, ++ PVRSRVBridgeRGXTDMDestroyTransferContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, ++ PVRSRVBridgeRGXTDMSetTransferContextPriority, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, ++ PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2, ++ PVRSRVBridgeRGXTDMSubmitTransfer2, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY, ++ PVRSRVBridgeRGXTDMGetSharedMemory, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY, ++ PVRSRVBridgeRGXTDMReleaseSharedMemory, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY, ++ PVRSRVBridgeRGXTDMSetTransferContextProperty, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxtq2 functions with services ++ */ ++void DeinitRGXTQ2Bridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER2); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMGETSHAREDMEMORY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMRELEASESHAREDMEMORY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, ++ PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPROPERTY); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c b/drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c +new file mode 100644 +index 000000000000..70415f5ffd82 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_rgxtq_bridge.c +@@ -0,0 +1,1212 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for rgxtq ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for rgxtq ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "rgxtransfer.h" ++#include "rgx_tq_shared.h" ++ ++#include "common_rgxtq_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _RGXCreateTransferContextpsTransferContextIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVRGXDestroyTransferContextKM((RGX_SERVER_TQ_CONTEXT *) pvData); ++ return eError; ++} ++ ++static_assert(RGXFWIF_RF_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_RF_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXCreateTransferContextIN_UI8, ++ IMG_UINT8 * psRGXCreateTransferContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN = ++ (PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateTransferContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXCreateTransferContextOUT_UI8, 0); ++ ++ IMG_BYTE *ui8FrameworkCmdInt = NULL; ++ IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData; ++ IMG_HANDLE hPrivDataInt = NULL; ++ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; ++ PMR *psCLIPMRMemInt = NULL; ++ PMR *psUSCPMRMemInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) + 0; ++ ++ if (unlikely(psRGXCreateTransferContextIN->ui32FrameworkCmdize > RGXFWIF_RF_CMD_SIZE)) ++ { ++ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXCreateTransferContext_exit; ++ } ++ ++ psRGXCreateTransferContextOUT->hTransferContext = NULL; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXCreateTransferContext_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXCreateTransferContextIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXCreateTransferContext_exit; ++ } ++ } ++ } ++ ++ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0) ++ { ++ ui8FrameworkCmdInt = (IMG_BYTE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui8FrameworkCmdInt, ++ (const void __user *)psRGXCreateTransferContextIN->pui8FrameworkCmd, ++ psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != ++ PVRSRV_OK) ++ { ++ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXCreateTransferContext_exit; ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXCreateTransferContextOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hPrivDataInt, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA, IMG_TRUE); ++ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateTransferContext_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateTransferContextOUT->eError = ++ PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevNode(psConnection), ++ psRGXCreateTransferContextIN->ui32Priority, ++ psRGXCreateTransferContextIN->ui32FrameworkCmdize, ++ ui8FrameworkCmdInt, ++ hPrivDataInt, ++ psRGXCreateTransferContextIN->ui32PackedCCBSizeU8888, ++ psRGXCreateTransferContextIN->ui32ContextFlags, ++ psRGXCreateTransferContextIN->ui64RobustnessAddress, ++ &psTransferContextInt, ++ &psCLIPMRMemInt, &psUSCPMRMemInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ goto RGXCreateTransferContext_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXCreateTransferContextOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateTransferContextOUT->hTransferContext, ++ (void *)psTransferContextInt, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RGXCreateTransferContextpsTransferContextIntRelease); ++ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateTransferContext_exit; ++ } ++ ++ psRGXCreateTransferContextOUT->eError = ++ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateTransferContextOUT->hCLIPMRMem, ++ (void *)psCLIPMRMemInt, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ psRGXCreateTransferContextOUT->hTransferContext); ++ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateTransferContext_exit; ++ } ++ ++ psRGXCreateTransferContextOUT->eError = ++ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, ++ &psRGXCreateTransferContextOUT->hUSCPMRMem, ++ (void *)psUSCPMRMemInt, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ psRGXCreateTransferContextOUT->hTransferContext); ++ if (unlikely(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXCreateTransferContext_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXCreateTransferContext_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hPrivDataInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPrivData, PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK) ++ { ++ if (psRGXCreateTransferContextOUT->hTransferContext) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Lock over handle creation cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) ++ psRGXCreateTransferContextOUT-> ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); ++ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Avoid freeing/destroying/releasing the resource a second time below */ ++ psTransferContextInt = NULL; ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ } ++ ++ if (psTransferContextInt) ++ { ++ PVRSRVRGXDestroyTransferContextKM(psTransferContextInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXCreateTransferContextOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXDestroyTransferContextIN_UI8, ++ IMG_UINT8 * psRGXDestroyTransferContextOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN = ++ (PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyTransferContextIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT = ++ (PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *) ++ IMG_OFFSET_ADDR(psRGXDestroyTransferContextOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRGXDestroyTransferContextOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRGXDestroyTransferContextIN-> ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); ++ if (unlikely ++ ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) ++ && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRGXDestroyTransferContextOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXDestroyTransferContext_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RGXDestroyTransferContext_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetTransferContextPriorityIN_UI8, ++ IMG_UINT8 * psRGXSetTransferContextPriorityOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN = ++ (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *) ++ IMG_OFFSET_ADDR(psRGXSetTransferContextPriorityOUT_UI8, 0); ++ ++ IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext; ++ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetTransferContextPriorityOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetTransferContextPriority_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetTransferContextPriorityOUT->eError = ++ PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevNode(psConnection), ++ psTransferContextInt, ++ psRGXSetTransferContextPriorityIN->ui32Priority); ++ ++RGXSetTransferContextPriority_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++static_assert(RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE <= IMG_UINT32_MAX, ++ "RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE must not be larger than IMG_UINT32_MAX"); ++static_assert(PVRSRV_MAX_SYNCS <= IMG_UINT32_MAX, ++ "PVRSRV_MAX_SYNCS must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRGXSubmitTransfer2(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSubmitTransfer2IN_UI8, ++ IMG_UINT8 * psRGXSubmitTransfer2OUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2IN = ++ (PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2IN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *psRGXSubmitTransfer2OUT = ++ (PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER2 *) IMG_OFFSET_ADDR(psRGXSubmitTransfer2OUT_UI8, ++ 0); ++ ++ IMG_HANDLE hTransferContext = psRGXSubmitTransfer2IN->hTransferContext; ++ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; ++ IMG_UINT32 *ui32ClientUpdateCountInt = NULL; ++ SYNC_PRIMITIVE_BLOCK ***psUpdateUFOSyncPrimBlockInt = NULL; ++ IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL; ++ IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL; ++ IMG_UINT32 **ui32UpdateValueInt = NULL; ++ IMG_CHAR *uiUpdateFenceNameInt = NULL; ++ IMG_UINT32 *ui32CommandSizeInt = NULL; ++ IMG_UINT8 **ui8FWCommandInt = NULL; ++ IMG_UINT32 *ui32TQPrepareFlagsInt = NULL; ++ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL; ++ PMR **psSyncPMRsInt = NULL; ++ IMG_HANDLE *hSyncPMRsInt2 = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++ IMG_BYTE *pArrayArgsBuffer2 = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) + ++ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) + ++ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)) + ++ ((IMG_UINT64) psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) + 0; ++ IMG_UINT32 ui32BufferSize2 = 0; ++ IMG_UINT32 ui32NextOffset2 = 0; ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ ++ ui64BufferSize += ++ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **); ++ ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE **); ++ ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); ++ ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); ++ ui64BufferSize += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *); ++ } ++ ++ if (unlikely(psRGXSubmitTransfer2IN->ui32SyncPMRCount > PVRSRV_MAX_SYNCS)) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRGXSubmitTransfer2IN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRGXSubmitTransfer2IN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ ui32ClientUpdateCountInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32ClientUpdateCountInt, ++ (const void __user *)psRGXSubmitTransfer2IN->pui32ClientUpdateCount, ++ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */ ++ psUpdateUFOSyncPrimBlockInt = ++ (SYNC_PRIMITIVE_BLOCK ***) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += ++ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **); ++ /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */ ++ hUpdateUFOSyncPrimBlockInt2 = ++ (IMG_HANDLE **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_HANDLE); ++ } ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */ ++ ui32UpdateSyncOffsetInt = ++ (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); ++ } ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */ ++ ui32UpdateValueInt = ++ (IMG_UINT32 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32 *); ++ } ++ ++ { ++ uiUpdateFenceNameInt = ++ (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiUpdateFenceNameInt, ++ (const void __user *)psRGXSubmitTransfer2IN->puiUpdateFenceName, ++ PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ ((IMG_CHAR *) uiUpdateFenceNameInt)[(PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) - ++ 1] = '\0'; ++ } ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ ui32CommandSizeInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32CommandSizeInt, ++ (const void __user *)psRGXSubmitTransfer2IN->pui32CommandSize, ++ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */ ++ ui8FWCommandInt = (IMG_UINT8 **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT8 *); ++ } ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ ui32TQPrepareFlagsInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32TQPrepareFlagsInt, ++ (const void __user *)psRGXSubmitTransfer2IN->pui32TQPrepareFlags, ++ psRGXSubmitTransfer2IN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) ++ { ++ ui32SyncPMRFlagsInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32SyncPMRFlagsInt, ++ (const void __user *)psRGXSubmitTransfer2IN->pui32SyncPMRFlags, ++ psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount != 0) ++ { ++ psSyncPMRsInt = (PMR **) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ OSCachedMemSet(psSyncPMRsInt, 0, ++ psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *)); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(PMR *); ++ hSyncPMRsInt2 = (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE); ++ } ++ ++ /* Copy the data over */ ++ if (psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, hSyncPMRsInt2, (const void __user *)psRGXSubmitTransfer2IN->phSyncPMRs, ++ psRGXSubmitTransfer2IN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ IMG_UINT32 i; ++ ui64BufferSize = 0; ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ ui64BufferSize += ++ ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *); ++ ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); ++ ui64BufferSize += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); ++ ui64BufferSize += ui32CommandSizeInt[i] * sizeof(IMG_UINT8); ++ } ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RGXSubmitTransfer2_exit; ++ } ++ ui32BufferSize2 = (IMG_UINT32) ui64BufferSize; ++ } ++ ++ if (ui32BufferSize2 != 0) ++ { ++ pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2); ++ ++ if (!pArrayArgsBuffer2) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ IMG_UINT32 i; ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ if (ui32ClientUpdateCountInt[i] > PVRSRV_MAX_SYNCS) ++ { ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */ ++ psUpdateUFOSyncPrimBlockInt[i] = ++ (SYNC_PRIMITIVE_BLOCK **) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ++ ui32NextOffset2); ++ OSCachedMemSet(psUpdateUFOSyncPrimBlockInt[i], 0, ++ ui32ClientUpdateCountInt[i] * ++ sizeof(SYNC_PRIMITIVE_BLOCK *)); ++ ui32NextOffset2 += ++ ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *); ++ /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */ ++ hUpdateUFOSyncPrimBlockInt2[i] = ++ (IMG_HANDLE *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); ++ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE); ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ IMG_UINT32 i; ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */ ++ ui32UpdateSyncOffsetInt[i] = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); ++ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ IMG_UINT32 i; ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */ ++ ui32UpdateValueInt[i] = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); ++ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32); ++ } ++ } ++ if (psRGXSubmitTransfer2IN->ui32PrepareCount != 0) ++ { ++ IMG_UINT32 i; ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ if (ui32CommandSizeInt[i] > RGXFWIF_DM_INDEPENDENT_KICK_CMD_SIZE) ++ { ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */ ++ ui8FWCommandInt[i] = ++ (IMG_UINT8 *) IMG_OFFSET_ADDR(pArrayArgsBuffer2, ui32NextOffset2); ++ ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8); ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ IMG_HANDLE **psPtr; ++ ++ /* Loop over all the pointers in the array copying the data into the kernel */ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ /* Copy the pointer over from the client side */ ++ if (OSCopyFromUser ++ (NULL, &psPtr, ++ (const void __user *)&psRGXSubmitTransfer2IN-> ++ phUpdateUFOSyncPrimBlock[i], sizeof(IMG_HANDLE **)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ /* Copy the data over */ ++ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, (hUpdateUFOSyncPrimBlockInt2[i]), ++ (const void __user *)psPtr, ++ (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != ++ PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ IMG_UINT32 **psPtr; ++ ++ /* Loop over all the pointers in the array copying the data into the kernel */ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ /* Copy the pointer over from the client side */ ++ if (OSCopyFromUser ++ (NULL, &psPtr, ++ (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateSyncOffset[i], ++ sizeof(IMG_UINT32 **)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ /* Copy the data over */ ++ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, (ui32UpdateSyncOffsetInt[i]), (const void __user *)psPtr, ++ (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != ++ PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ IMG_UINT32 **psPtr; ++ ++ /* Loop over all the pointers in the array copying the data into the kernel */ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ /* Copy the pointer over from the client side */ ++ if (OSCopyFromUser ++ (NULL, &psPtr, ++ (const void __user *)&psRGXSubmitTransfer2IN->pui32UpdateValue[i], ++ sizeof(IMG_UINT32 **)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ /* Copy the data over */ ++ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, (ui32UpdateValueInt[i]), (const void __user *)psPtr, ++ (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != ++ PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ IMG_UINT8 **psPtr; ++ ++ /* Loop over all the pointers in the array copying the data into the kernel */ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ /* Copy the pointer over from the client side */ ++ if (OSCopyFromUser ++ (NULL, &psPtr, ++ (const void __user *)&psRGXSubmitTransfer2IN->pui8FWCommand[i], ++ sizeof(IMG_UINT8 **)) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ /* Copy the data over */ ++ if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, (ui8FWCommandInt[i]), (const void __user *)psPtr, ++ (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK) ++ { ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSubmitTransfer2_exit; ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ IMG_UINT32 j; ++ for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) ++ { ++ /* Look up the address from the handle */ ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **) ++ &psUpdateUFOSyncPrimBlockInt[i][j], ++ hUpdateUFOSyncPrimBlockInt2[i][j], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ IMG_TRUE); ++ if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ } ++ ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) ++ { ++ /* Look up the address from the handle */ ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncPMRsInt[i], ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRGXSubmitTransfer2OUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSubmitTransfer2_exit; ++ } ++ } ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSubmitTransfer2OUT->eError = ++ PVRSRVRGXSubmitTransferKM(psTransferContextInt, ++ psRGXSubmitTransfer2IN->ui32PrepareCount, ++ ui32ClientUpdateCountInt, ++ psUpdateUFOSyncPrimBlockInt, ++ ui32UpdateSyncOffsetInt, ++ ui32UpdateValueInt, ++ psRGXSubmitTransfer2IN->hCheckFenceFD, ++ psRGXSubmitTransfer2IN->h2DUpdateTimeline, ++ &psRGXSubmitTransfer2OUT->h2DUpdateFence, ++ psRGXSubmitTransfer2IN->h3DUpdateTimeline, ++ &psRGXSubmitTransfer2OUT->h3DUpdateFence, ++ uiUpdateFenceNameInt, ++ ui32CommandSizeInt, ++ ui8FWCommandInt, ++ ui32TQPrepareFlagsInt, ++ psRGXSubmitTransfer2IN->ui32ExtJobRef, ++ psRGXSubmitTransfer2IN->ui32SyncPMRCount, ++ ui32SyncPMRFlagsInt, psSyncPMRsInt); ++ ++RGXSubmitTransfer2_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); ++ } ++ ++ if (hUpdateUFOSyncPrimBlockInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32PrepareCount; i++) ++ { ++ IMG_UINT32 j; ++ for (j = 0; j < ui32ClientUpdateCountInt[i]; j++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psUpdateUFOSyncPrimBlockInt[i][j]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hUpdateUFOSyncPrimBlockInt2[i] ++ [j], ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ } ++ } ++ } ++ ++ if (hSyncPMRsInt2) ++ { ++ IMG_UINT32 i; ++ ++ for (i = 0; i < psRGXSubmitTransfer2IN->ui32SyncPMRCount; i++) ++ { ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncPMRsInt[i]) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncPMRsInt2[i], ++ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ } ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXSubmitTransfer2OUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRGXSubmitTransfer2OUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++ if (pArrayArgsBuffer2) ++ OSFreeMemNoStats(pArrayArgsBuffer2); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRGXSetTransferContextProperty(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRGXSetTransferContextPropertyIN_UI8, ++ IMG_UINT8 * psRGXSetTransferContextPropertyOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyIN = ++ (PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *psRGXSetTransferContextPropertyOUT = ++ (PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPROPERTY *) ++ IMG_OFFSET_ADDR(psRGXSetTransferContextPropertyOUT_UI8, 0); ++ ++ IMG_HANDLE hTransferContext = psRGXSetTransferContextPropertyIN->hTransferContext; ++ RGX_SERVER_TQ_CONTEXT *psTransferContextInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRGXSetTransferContextPropertyOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psTransferContextInt, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT, IMG_TRUE); ++ if (unlikely(psRGXSetTransferContextPropertyOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RGXSetTransferContextProperty_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRGXSetTransferContextPropertyOUT->eError = ++ PVRSRVRGXSetTransferContextPropertyKM(psTransferContextInt, ++ psRGXSetTransferContextPropertyIN->ui32Property, ++ psRGXSetTransferContextPropertyIN->ui64Input, ++ &psRGXSetTransferContextPropertyOUT->ui64Output); ++ ++RGXSetTransferContextProperty_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psTransferContextInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hTransferContext, ++ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++#endif /* SUPPORT_RGXTQ_BRIDGE */ ++ ++#if defined(SUPPORT_RGXTQ_BRIDGE) ++PVRSRV_ERROR InitRGXTQBridge(void); ++void DeinitRGXTQBridge(void); ++ ++/* ++ * Register all RGXTQ functions with services ++ */ ++PVRSRV_ERROR InitRGXTQBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, ++ PVRSRVBridgeRGXCreateTransferContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, ++ PVRSRVBridgeRGXDestroyTransferContext, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, ++ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, ++ PVRSRVBridgeRGXSetTransferContextPriority, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2, ++ PVRSRVBridgeRGXSubmitTransfer2, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, ++ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY, ++ PVRSRVBridgeRGXSetTransferContextProperty, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all rgxtq functions with services ++ */ ++void DeinitRGXTQBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, ++ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER2); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, ++ PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPROPERTY); ++ ++} ++#else /* SUPPORT_RGXTQ_BRIDGE */ ++/* This bridge is conditional on SUPPORT_RGXTQ_BRIDGE - when not defined, ++ * do not populate the dispatch table with its functions ++ */ ++#define InitRGXTQBridge() \ ++ PVRSRV_OK ++ ++#define DeinitRGXTQBridge() ++ ++#endif /* SUPPORT_RGXTQ_BRIDGE */ +diff --git a/drivers/gpu/drm/img-rogue/server_ri_bridge.c b/drivers/gpu/drm/img-rogue/server_ri_bridge.c +new file mode 100644 +index 000000000000..80f246c58d9e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_ri_bridge.c +@@ -0,0 +1,760 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for ri ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for ri ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "ri_server.h" ++ ++#include "common_ri_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIWritePMREntryIN_UI8, ++ IMG_UINT8 * psRIWritePMREntryOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN = ++ (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT = ++ (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *) IMG_OFFSET_ADDR(psRIWritePMREntryOUT_UI8, 0); ++ ++ IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle; ++ PMR *psPMRHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRIWritePMREntryOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRHandleInt, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRIWritePMREntryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIWritePMREntry_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRIWritePMREntryOUT->eError = RIWritePMREntryKM(psPMRHandleInt); ++ ++RIWritePMREntry_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RIWriteMEMDESCEntrypsRIHandleIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); ++ return eError; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIWriteMEMDESCEntryIN_UI8, ++ IMG_UINT8 * psRIWriteMEMDESCEntryOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN = ++ (PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT = ++ (PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIWriteMEMDESCEntryOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle; ++ PMR *psPMRHandleInt = NULL; ++ IMG_CHAR *uiTextBInt = NULL; ++ RI_HANDLE psRIHandleInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psRIWriteMEMDESCEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteMEMDESCEntryIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ } ++ } ++ ++ if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0) ++ { ++ uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextBInt, (const void __user *)psRIWriteMEMDESCEntryIN->puiTextB, ++ psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ ((IMG_CHAR *) ++ uiTextBInt)[(psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRIWriteMEMDESCEntryOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRHandleInt, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRIWriteMEMDESCEntryOUT->eError = ++ RIWriteMEMDESCEntryKM(psPMRHandleInt, ++ psRIWriteMEMDESCEntryIN->ui32TextBSize, ++ uiTextBInt, ++ psRIWriteMEMDESCEntryIN->ui64Offset, ++ psRIWriteMEMDESCEntryIN->ui64Size, ++ psRIWriteMEMDESCEntryIN->bIsImport, ++ psRIWriteMEMDESCEntryIN->bIsSuballoc, &psRIHandleInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) ++ { ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRIWriteMEMDESCEntryOUT-> ++ hRIHandle, ++ (void *)psRIHandleInt, ++ PVRSRV_HANDLE_TYPE_RI_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RIWriteMEMDESCEntrypsRIHandleIntRelease); ++ if (unlikely(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIWriteMEMDESCEntry_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RIWriteMEMDESCEntry_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK) ++ { ++ if (psRIHandleInt) ++ { ++ RIDeleteMEMDESCEntryKM(psRIHandleInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRIWriteMEMDESCEntryOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _RIWriteProcListEntrypsRIHandleIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = RIDeleteMEMDESCEntryKM((RI_HANDLE) pvData); ++ return eError; ++} ++ ++static_assert(DEVMEM_ANNOTATION_MAX_LEN <= IMG_UINT32_MAX, ++ "DEVMEM_ANNOTATION_MAX_LEN must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIWriteProcListEntryIN_UI8, ++ IMG_UINT8 * psRIWriteProcListEntryOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN = ++ (PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *) IMG_OFFSET_ADDR(psRIWriteProcListEntryIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT = ++ (PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *) ++ IMG_OFFSET_ADDR(psRIWriteProcListEntryOUT_UI8, 0); ++ ++ IMG_CHAR *uiTextBInt = NULL; ++ RI_HANDLE psRIHandleInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psRIWriteProcListEntryIN->ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)) ++ { ++ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto RIWriteProcListEntry_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto RIWriteProcListEntry_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psRIWriteProcListEntryIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto RIWriteProcListEntry_exit; ++ } ++ } ++ } ++ ++ if (psRIWriteProcListEntryIN->ui32TextBSize != 0) ++ { ++ uiTextBInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiTextBInt, (const void __user *)psRIWriteProcListEntryIN->puiTextB, ++ psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto RIWriteProcListEntry_exit; ++ } ++ ((IMG_CHAR *) ++ uiTextBInt)[(psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ psRIWriteProcListEntryOUT->eError = ++ RIWriteProcListEntryKM(psRIWriteProcListEntryIN->ui32TextBSize, ++ uiTextBInt, ++ psRIWriteProcListEntryIN->ui64Size, ++ psRIWriteProcListEntryIN->ui64DevVAddr, &psRIHandleInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) ++ { ++ goto RIWriteProcListEntry_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psRIWriteProcListEntryOUT-> ++ hRIHandle, ++ (void *)psRIHandleInt, ++ PVRSRV_HANDLE_TYPE_RI_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _RIWriteProcListEntrypsRIHandleIntRelease); ++ if (unlikely(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIWriteProcListEntry_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RIWriteProcListEntry_exit: ++ ++ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK) ++ { ++ if (psRIHandleInt) ++ { ++ RIDeleteMEMDESCEntryKM(psRIHandleInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psRIWriteProcListEntryOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIUpdateMEMDESCAddrIN_UI8, ++ IMG_UINT8 * psRIUpdateMEMDESCAddrOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN = ++ (PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT = ++ (PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *) IMG_OFFSET_ADDR(psRIUpdateMEMDESCAddrOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle; ++ RI_HANDLE psRIHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRIUpdateMEMDESCAddrOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psRIHandleInt, ++ hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE, IMG_TRUE); ++ if (unlikely(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIUpdateMEMDESCAddr_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRIUpdateMEMDESCAddrOUT->eError = ++ RIUpdateMEMDESCAddrKM(psRIHandleInt, psRIUpdateMEMDESCAddrIN->sAddr); ++ ++RIUpdateMEMDESCAddr_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psRIHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hRIHandle, PVRSRV_HANDLE_TYPE_RI_HANDLE); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIDeleteMEMDESCEntryIN_UI8, ++ IMG_UINT8 * psRIDeleteMEMDESCEntryOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN = ++ (PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *) IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT = ++ (PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *) ++ IMG_OFFSET_ADDR(psRIDeleteMEMDESCEntryOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psRIDeleteMEMDESCEntryOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle, ++ PVRSRV_HANDLE_TYPE_RI_HANDLE); ++ if (unlikely((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) && ++ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psRIDeleteMEMDESCEntryOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIDeleteMEMDESCEntry_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++RIDeleteMEMDESCEntry_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIDumpListIN_UI8, ++ IMG_UINT8 * psRIDumpListOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN = ++ (PVRSRV_BRIDGE_IN_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT = ++ (PVRSRV_BRIDGE_OUT_RIDUMPLIST *) IMG_OFFSET_ADDR(psRIDumpListOUT_UI8, 0); ++ ++ IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle; ++ PMR *psPMRHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRIDumpListOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRHandleInt, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRIDumpListOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIDumpList_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRIDumpListOUT->eError = RIDumpListKM(psPMRHandleInt); ++ ++RIDumpList_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIDumpAllIN_UI8, ++ IMG_UINT8 * psRIDumpAllOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN = ++ (PVRSRV_BRIDGE_IN_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT = ++ (PVRSRV_BRIDGE_OUT_RIDUMPALL *) IMG_OFFSET_ADDR(psRIDumpAllOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN); ++ ++ psRIDumpAllOUT->eError = RIDumpAllKM(); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIDumpProcessIN_UI8, ++ IMG_UINT8 * psRIDumpProcessOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN = ++ (PVRSRV_BRIDGE_IN_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT = ++ (PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *) IMG_OFFSET_ADDR(psRIDumpProcessOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ psRIDumpProcessOUT->eError = RIDumpProcessKM(psRIDumpProcessIN->ui32Pid); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psRIWritePMREntryWithOwnerIN_UI8, ++ IMG_UINT8 * psRIWritePMREntryWithOwnerOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN = ++ (PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *) ++ IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerOUT = ++ (PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *) ++ IMG_OFFSET_ADDR(psRIWritePMREntryWithOwnerOUT_UI8, 0); ++ ++ IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle; ++ PMR *psPMRHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psRIWritePMREntryWithOwnerOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psPMRHandleInt, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR, IMG_TRUE); ++ if (unlikely(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto RIWritePMREntryWithOwner_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psRIWritePMREntryWithOwnerOUT->eError = ++ RIWritePMREntryWithOwnerKM(psPMRHandleInt, psRIWritePMREntryWithOwnerIN->ui32Owner); ++ ++RIWritePMREntryWithOwner_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psPMRHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hPMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitRIBridge(void); ++void DeinitRIBridge(void); ++ ++/* ++ * Register all RI functions with services ++ */ ++PVRSRV_ERROR InitRIBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, ++ PVRSRVBridgeRIWritePMREntry, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, ++ PVRSRVBridgeRIWriteMEMDESCEntry, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, ++ PVRSRVBridgeRIWriteProcListEntry, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, ++ PVRSRVBridgeRIUpdateMEMDESCAddr, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, ++ PVRSRVBridgeRIDeleteMEMDESCEntry, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList, ++ NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll, ++ NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, ++ PVRSRVBridgeRIDumpProcess, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, ++ PVRSRVBridgeRIWritePMREntryWithOwner, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all ri functions with services ++ */ ++void DeinitRIBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_srvcore_bridge.c b/drivers/gpu/drm/img-rogue/server_srvcore_bridge.c +new file mode 100644 +index 000000000000..6fda53ff8b31 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_srvcore_bridge.c +@@ -0,0 +1,1072 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for srvcore ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for srvcore ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "srvcore.h" ++#include "info_page.h" ++#include "proc_stats.h" ++#include "rgx_fwif_alignchecks.h" ++ ++#include "common_srvcore_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psConnectIN_UI8, ++ IMG_UINT8 * psConnectOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_CONNECT *psConnectIN = ++ (PVRSRV_BRIDGE_IN_CONNECT *) IMG_OFFSET_ADDR(psConnectIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT = ++ (PVRSRV_BRIDGE_OUT_CONNECT *) IMG_OFFSET_ADDR(psConnectOUT_UI8, 0); ++ ++ psConnectOUT->eError = ++ PVRSRVConnectKM(psConnection, OSGetDevNode(psConnection), ++ psConnectIN->ui32Flags, ++ psConnectIN->ui32ClientBuildOptions, ++ psConnectIN->ui32ClientDDKVersion, ++ psConnectIN->ui32ClientDDKBuild, ++ &psConnectOUT->ui8KernelArch, ++ &psConnectOUT->ui32CapabilityFlags, &psConnectOUT->ui64PackedBvnc); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDisconnectIN_UI8, ++ IMG_UINT8 * psDisconnectOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN = ++ (PVRSRV_BRIDGE_IN_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT = ++ (PVRSRV_BRIDGE_OUT_DISCONNECT *) IMG_OFFSET_ADDR(psDisconnectOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ PVR_UNREFERENCED_PARAMETER(psDisconnectIN); ++ ++ psDisconnectOUT->eError = PVRSRVDisconnectKM(); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _AcquireGlobalEventObjecthGlobalEventObjectIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVReleaseGlobalEventObjectKM((IMG_HANDLE) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psAcquireGlobalEventObjectIN_UI8, ++ IMG_UINT8 * psAcquireGlobalEventObjectOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN = ++ (PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *) ++ IMG_OFFSET_ADDR(psAcquireGlobalEventObjectIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT = ++ (PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *) ++ IMG_OFFSET_ADDR(psAcquireGlobalEventObjectOUT_UI8, 0); ++ ++ IMG_HANDLE hGlobalEventObjectInt = NULL; ++ ++ PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN); ++ ++ psAcquireGlobalEventObjectOUT->eError = ++ PVRSRVAcquireGlobalEventObjectKM(&hGlobalEventObjectInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) ++ { ++ goto AcquireGlobalEventObject_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psAcquireGlobalEventObjectOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psAcquireGlobalEventObjectOUT->hGlobalEventObject, ++ (void *)hGlobalEventObjectInt, ++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _AcquireGlobalEventObjecthGlobalEventObjectIntRelease); ++ if (unlikely(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto AcquireGlobalEventObject_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++AcquireGlobalEventObject_exit: ++ ++ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK) ++ { ++ if (hGlobalEventObjectInt) ++ { ++ PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psReleaseGlobalEventObjectIN_UI8, ++ IMG_UINT8 * psReleaseGlobalEventObjectOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN = ++ (PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *) ++ IMG_OFFSET_ADDR(psReleaseGlobalEventObjectIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT = ++ (PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *) ++ IMG_OFFSET_ADDR(psReleaseGlobalEventObjectOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psReleaseGlobalEventObjectOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psReleaseGlobalEventObjectIN-> ++ hGlobalEventObject, ++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); ++ if (unlikely ++ ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) ++ && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) ++ && (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psReleaseGlobalEventObjectOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto ReleaseGlobalEventObject_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ReleaseGlobalEventObject_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _EventObjectOpenhOSEventIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = OSEventObjectClose((IMG_HANDLE) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psEventObjectOpenIN_UI8, ++ IMG_UINT8 * psEventObjectOpenOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN = ++ (PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT = ++ (PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *) IMG_OFFSET_ADDR(psEventObjectOpenOUT_UI8, 0); ++ ++ IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject; ++ IMG_HANDLE hEventObjectInt = NULL; ++ IMG_HANDLE hOSEventInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psEventObjectOpenOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hEventObjectInt, ++ hEventObject, ++ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT, IMG_TRUE); ++ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto EventObjectOpen_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psEventObjectOpenOUT->eError = OSEventObjectOpen(hEventObjectInt, &hOSEventInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) ++ { ++ goto EventObjectOpen_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psEventObjectOpenOUT->hOSEvent, ++ (void *)hOSEventInt, ++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _EventObjectOpenhOSEventIntRelease); ++ if (unlikely(psEventObjectOpenOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto EventObjectOpen_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++EventObjectOpen_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hEventObjectInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hEventObject, PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psEventObjectOpenOUT->eError != PVRSRV_OK) ++ { ++ if (hOSEventInt) ++ { ++ OSEventObjectClose(hOSEventInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psEventObjectWaitIN_UI8, ++ IMG_UINT8 * psEventObjectWaitOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN = ++ (PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT = ++ (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *) IMG_OFFSET_ADDR(psEventObjectWaitOUT_UI8, 0); ++ ++ IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM; ++ IMG_HANDLE hOSEventKMInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psEventObjectWaitOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hOSEventKMInt, ++ hOSEventKM, ++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE); ++ if (unlikely(psEventObjectWaitOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto EventObjectWait_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psEventObjectWaitOUT->eError = OSEventObjectWait(hOSEventKMInt); ++ ++EventObjectWait_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hOSEventKMInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psEventObjectCloseIN_UI8, ++ IMG_UINT8 * psEventObjectCloseOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN = ++ (PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT = ++ (PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *) IMG_OFFSET_ADDR(psEventObjectCloseOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psEventObjectCloseOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM, ++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); ++ if (unlikely((psEventObjectCloseOUT->eError != PVRSRV_OK) && ++ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: %s", __func__, PVRSRVGetErrorString(psEventObjectCloseOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto EventObjectClose_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++EventObjectClose_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psDumpDebugInfoIN_UI8, ++ IMG_UINT8 * psDumpDebugInfoOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN = ++ (PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT = ++ (PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *) IMG_OFFSET_ADDR(psDumpDebugInfoOUT_UI8, 0); ++ ++ psDumpDebugInfoOUT->eError = ++ PVRSRVDumpDebugInfoKM(psConnection, OSGetDevNode(psConnection), ++ psDumpDebugInfoIN->ui32VerbLevel); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetDevClockSpeedIN_UI8, ++ IMG_UINT8 * psGetDevClockSpeedOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN = ++ (PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT = ++ (PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *) IMG_OFFSET_ADDR(psGetDevClockSpeedOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN); ++ ++ psGetDevClockSpeedOUT->eError = ++ PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevNode(psConnection), ++ &psGetDevClockSpeedOUT->ui32ClockSpeed); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psHWOpTimeoutIN_UI8, ++ IMG_UINT8 * psHWOpTimeoutOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN = ++ (PVRSRV_BRIDGE_IN_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT = ++ (PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *) IMG_OFFSET_ADDR(psHWOpTimeoutOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN); ++ ++ psHWOpTimeoutOUT->eError = PVRSRVHWOpTimeoutKM(psConnection, OSGetDevNode(psConnection)); ++ ++ return 0; ++} ++ ++static_assert(RGXFW_ALIGN_CHECKS_UM_MAX <= IMG_UINT32_MAX, ++ "RGXFW_ALIGN_CHECKS_UM_MAX must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psAlignmentCheckIN_UI8, ++ IMG_UINT8 * psAlignmentCheckOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN = ++ (PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT = ++ (PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *) IMG_OFFSET_ADDR(psAlignmentCheckOUT_UI8, 0); ++ ++ IMG_UINT32 *ui32AlignChecksInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) + 0; ++ ++ if (unlikely(psAlignmentCheckIN->ui32AlignChecksSize > RGXFW_ALIGN_CHECKS_UM_MAX)) ++ { ++ psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto AlignmentCheck_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psAlignmentCheckOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto AlignmentCheck_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psAlignmentCheckIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto AlignmentCheck_exit; ++ } ++ } ++ } ++ ++ if (psAlignmentCheckIN->ui32AlignChecksSize != 0) ++ { ++ ui32AlignChecksInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32); ++ } ++ ++ /* Copy the data over */ ++ if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, ui32AlignChecksInt, ++ (const void __user *)psAlignmentCheckIN->pui32AlignChecks, ++ psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK) ++ { ++ psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto AlignmentCheck_exit; ++ } ++ } ++ ++ psAlignmentCheckOUT->eError = ++ PVRSRVAlignmentCheckKM(psConnection, OSGetDevNode(psConnection), ++ psAlignmentCheckIN->ui32AlignChecksSize, ui32AlignChecksInt); ++ ++AlignmentCheck_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psAlignmentCheckOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetDeviceStatusIN_UI8, ++ IMG_UINT8 * psGetDeviceStatusOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN = ++ (PVRSRV_BRIDGE_IN_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT = ++ (PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *) IMG_OFFSET_ADDR(psGetDeviceStatusOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN); ++ ++ psGetDeviceStatusOUT->eError = ++ PVRSRVGetDeviceStatusKM(psConnection, OSGetDevNode(psConnection), ++ &psGetDeviceStatusOUT->ui32DeviceSatus); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeGetMultiCoreInfo(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psGetMultiCoreInfoIN_UI8, ++ IMG_UINT8 * psGetMultiCoreInfoOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_GETMULTICOREINFO *psGetMultiCoreInfoIN = ++ (PVRSRV_BRIDGE_IN_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *psGetMultiCoreInfoOUT = ++ (PVRSRV_BRIDGE_OUT_GETMULTICOREINFO *) IMG_OFFSET_ADDR(psGetMultiCoreInfoOUT_UI8, 0); ++ ++ IMG_UINT64 *pui64CapsInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) + 0; ++ ++ if (psGetMultiCoreInfoIN->ui32CapsSize > 8) ++ { ++ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto GetMultiCoreInfo_exit; ++ } ++ ++ psGetMultiCoreInfoOUT->pui64Caps = psGetMultiCoreInfoIN->pui64Caps; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto GetMultiCoreInfo_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psGetMultiCoreInfoIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psGetMultiCoreInfoIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto GetMultiCoreInfo_exit; ++ } ++ } ++ } ++ ++ if (psGetMultiCoreInfoIN->ui32CapsSize != 0) ++ { ++ pui64CapsInt = (IMG_UINT64 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64); ++ } ++ ++ psGetMultiCoreInfoOUT->eError = ++ PVRSRVGetMultiCoreInfoKM(psConnection, OSGetDevNode(psConnection), ++ psGetMultiCoreInfoIN->ui32CapsSize, ++ &psGetMultiCoreInfoOUT->ui32NumCores, pui64CapsInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psGetMultiCoreInfoOUT->eError != PVRSRV_OK)) ++ { ++ goto GetMultiCoreInfo_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((pui64CapsInt) && ((psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psGetMultiCoreInfoOUT->pui64Caps, pui64CapsInt, ++ (psGetMultiCoreInfoIN->ui32CapsSize * sizeof(IMG_UINT64))) != PVRSRV_OK)) ++ { ++ psGetMultiCoreInfoOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto GetMultiCoreInfo_exit; ++ } ++ } ++ ++GetMultiCoreInfo_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psGetMultiCoreInfoOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psEventObjectWaitTimeoutIN_UI8, ++ IMG_UINT8 * psEventObjectWaitTimeoutOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN = ++ (PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *) ++ IMG_OFFSET_ADDR(psEventObjectWaitTimeoutIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT = ++ (PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *) ++ IMG_OFFSET_ADDR(psEventObjectWaitTimeoutOUT_UI8, 0); ++ ++ IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM; ++ IMG_HANDLE hOSEventKMInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psEventObjectWaitTimeoutOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&hOSEventKMInt, ++ hOSEventKM, ++ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT, IMG_TRUE); ++ if (unlikely(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto EventObjectWaitTimeout_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psEventObjectWaitTimeoutOUT->eError = ++ OSEventObjectWaitTimeout(hOSEventKMInt, psEventObjectWaitTimeoutIN->ui64uiTimeoutus); ++ ++EventObjectWaitTimeout_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (hOSEventKMInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hOSEventKM, PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psFindProcessMemStatsIN_UI8, ++ IMG_UINT8 * psFindProcessMemStatsOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN = ++ (PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsIN_UI8, ++ 0); ++ PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT = ++ (PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *) IMG_OFFSET_ADDR(psFindProcessMemStatsOUT_UI8, ++ 0); ++ ++ IMG_UINT32 *pui32MemStatsArrayInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) + 0; ++ ++ if (psFindProcessMemStatsIN->ui32ArrSize > PVRSRV_PROCESS_STAT_TYPE_COUNT) ++ { ++ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto FindProcessMemStats_exit; ++ } ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray; ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto FindProcessMemStats_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psFindProcessMemStatsIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto FindProcessMemStats_exit; ++ } ++ } ++ } ++ ++ if (psFindProcessMemStatsIN->ui32ArrSize != 0) ++ { ++ pui32MemStatsArrayInt = ++ (IMG_UINT32 *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32); ++ } ++ ++ psFindProcessMemStatsOUT->eError = ++ PVRSRVFindProcessMemStatsKM(psFindProcessMemStatsIN->ui32PID, ++ psFindProcessMemStatsIN->ui32ArrSize, ++ psFindProcessMemStatsIN->bbAllProcessStats, ++ pui32MemStatsArrayInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psFindProcessMemStatsOUT->eError != PVRSRV_OK)) ++ { ++ goto FindProcessMemStats_exit; ++ } ++ ++ /* If dest ptr is non-null and we have data to copy */ ++ if ((pui32MemStatsArrayInt) && ++ ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)) ++ { ++ if (unlikely ++ (OSCopyToUser ++ (NULL, (void __user *)psFindProcessMemStatsOUT->pui32MemStatsArray, ++ pui32MemStatsArrayInt, ++ (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK)) ++ { ++ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto FindProcessMemStats_exit; ++ } ++ } ++ ++FindProcessMemStats_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psFindProcessMemStatsOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _AcquireInfoPagepsPMRIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVReleaseInfoPageKM((PMR *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psAcquireInfoPageIN_UI8, ++ IMG_UINT8 * psAcquireInfoPageOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN = ++ (PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT = ++ (PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *) IMG_OFFSET_ADDR(psAcquireInfoPageOUT_UI8, 0); ++ ++ PMR *psPMRInt = NULL; ++ ++ PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN); ++ ++ psAcquireInfoPageOUT->eError = PVRSRVAcquireInfoPageKM(&psPMRInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) ++ { ++ goto AcquireInfoPage_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ psAcquireInfoPageOUT->eError = ++ PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ &psAcquireInfoPageOUT->hPMR, (void *)psPMRInt, ++ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & _AcquireInfoPagepsPMRIntRelease); ++ if (unlikely(psAcquireInfoPageOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ goto AcquireInfoPage_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++AcquireInfoPage_exit: ++ ++ if (psAcquireInfoPageOUT->eError != PVRSRV_OK) ++ { ++ if (psPMRInt) ++ { ++ PVRSRVReleaseInfoPageKM(psPMRInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psReleaseInfoPageIN_UI8, ++ IMG_UINT8 * psReleaseInfoPageOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN = ++ (PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT = ++ (PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *) IMG_OFFSET_ADDR(psReleaseInfoPageOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ psReleaseInfoPageOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psProcessHandleBase->psHandleBase, ++ (IMG_HANDLE) psReleaseInfoPageIN->hPMR, ++ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT); ++ if (unlikely((psReleaseInfoPageOUT->eError != PVRSRV_OK) && ++ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(psReleaseInfoPageOUT->eError))); ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ goto ReleaseInfoPage_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psProcessHandleBase->psHandleBase); ++ ++ReleaseInfoPage_exit: ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitSRVCOREBridge(void); ++void DeinitSRVCOREBridge(void); ++ ++/* ++ * Register all SRVCORE functions with services ++ */ ++PVRSRV_ERROR InitSRVCOREBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, ++ PVRSRVBridgeConnect, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, ++ PVRSRVBridgeDisconnect, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, ++ PVRSRVBridgeAcquireGlobalEventObject, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, ++ PVRSRVBridgeReleaseGlobalEventObject, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, ++ PVRSRVBridgeEventObjectOpen, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, ++ PVRSRVBridgeEventObjectWait, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, ++ PVRSRVBridgeEventObjectClose, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, ++ PVRSRVBridgeDumpDebugInfo, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, ++ PVRSRVBridgeGetDevClockSpeed, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, ++ PVRSRVBridgeHWOpTimeout, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, ++ PVRSRVBridgeAlignmentCheck, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, ++ PVRSRVBridgeGetDeviceStatus, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO, ++ PVRSRVBridgeGetMultiCoreInfo, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, ++ PVRSRVBridgeEventObjectWaitTimeout, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, ++ PVRSRVBridgeFindProcessMemStats, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, ++ PVRSRVBridgeAcquireInfoPage, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, ++ PVRSRVBridgeReleaseInfoPage, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all srvcore functions with services ++ */ ++void DeinitSRVCOREBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, ++ PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, ++ PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETMULTICOREINFO); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, ++ PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_sync_bridge.c b/drivers/gpu/drm/img-rogue/server_sync_bridge.c +new file mode 100644 +index 000000000000..4788fc1fb723 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_sync_bridge.c +@@ -0,0 +1,746 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for sync ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for sync ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "sync.h" ++#include "sync_server.h" ++#include "pdump.h" ++#include "pvrsrv_sync_km.h" ++#include "sync_fallback_server.h" ++#include "sync_checkpoint.h" ++ ++#include "common_sync_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static PVRSRV_ERROR _AllocSyncPrimitiveBlockpsSyncHandleIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVFreeSyncPrimitiveBlockKM((SYNC_PRIMITIVE_BLOCK *) pvData); ++ return eError; ++} ++ ++static IMG_INT ++PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psAllocSyncPrimitiveBlockIN_UI8, ++ IMG_UINT8 * psAllocSyncPrimitiveBlockOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN = ++ (PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *) ++ IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT = ++ (PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *) ++ IMG_OFFSET_ADDR(psAllocSyncPrimitiveBlockOUT_UI8, 0); ++ ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ PMR *pshSyncPMRInt = NULL; ++ ++ PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN); ++ ++ psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL; ++ ++ psAllocSyncPrimitiveBlockOUT->eError = ++ PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevNode(psConnection), ++ &psSyncHandleInt, ++ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr, ++ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize, ++ &pshSyncPMRInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) ++ { ++ goto AllocSyncPrimitiveBlock_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psAllocSyncPrimitiveBlockOUT-> ++ hSyncHandle, ++ (void *)psSyncHandleInt, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, ++ PVRSRV_HANDLE_ALLOC_FLAG_MULTI, ++ (PFN_HANDLE_RELEASE) & ++ _AllocSyncPrimitiveBlockpsSyncHandleIntRelease); ++ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto AllocSyncPrimitiveBlock_exit; ++ } ++ ++ psAllocSyncPrimitiveBlockOUT->eError = ++ PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase, ++ &psAllocSyncPrimitiveBlockOUT->hhSyncPMR, ++ (void *)pshSyncPMRInt, ++ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE, ++ psAllocSyncPrimitiveBlockOUT->hSyncHandle); ++ if (unlikely(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto AllocSyncPrimitiveBlock_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++AllocSyncPrimitiveBlock_exit: ++ ++ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK) ++ { ++ if (psAllocSyncPrimitiveBlockOUT->hSyncHandle) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* Lock over handle creation cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ eError = PVRSRVDestroyHandleUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) ++ psAllocSyncPrimitiveBlockOUT-> ++ hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ if (unlikely((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", __func__, PVRSRVGetErrorString(eError))); ++ } ++ /* Releasing the handle should free/destroy/release the resource. ++ * This should never fail... */ ++ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY)); ++ ++ /* Avoid freeing/destroying/releasing the resource a second time below */ ++ psSyncHandleInt = NULL; ++ /* Release now we have cleaned up creation handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ } ++ ++ if (psSyncHandleInt) ++ { ++ PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt); ++ } ++ } ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psFreeSyncPrimitiveBlockIN_UI8, ++ IMG_UINT8 * psFreeSyncPrimitiveBlockOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN = ++ (PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *) ++ IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT = ++ (PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *) ++ IMG_OFFSET_ADDR(psFreeSyncPrimitiveBlockOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psFreeSyncPrimitiveBlockOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ if (unlikely((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) && ++ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psFreeSyncPrimitiveBlockOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto FreeSyncPrimitiveBlock_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++FreeSyncPrimitiveBlock_exit: ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncPrimSetIN_UI8, ++ IMG_UINT8 * psSyncPrimSetOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN = ++ (PVRSRV_BRIDGE_IN_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCPRIMSET *) IMG_OFFSET_ADDR(psSyncPrimSetOUT_UI8, 0); ++ ++ IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psSyncPrimSetOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncHandleInt, ++ hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psSyncPrimSetOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncPrimSet_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psSyncPrimSetOUT->eError = ++ PVRSRVSyncPrimSetKM(psSyncHandleInt, ++ psSyncPrimSetIN->ui32Index, psSyncPrimSetIN->ui32Value); ++ ++SyncPrimSet_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#if defined(PDUMP) ++ ++static IMG_INT ++PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncPrimPDumpIN_UI8, ++ IMG_UINT8 * psSyncPrimPDumpOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN = ++ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *) IMG_OFFSET_ADDR(psSyncPrimPDumpOUT_UI8, 0); ++ ++ IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psSyncPrimPDumpOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncHandleInt, ++ hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psSyncPrimPDumpOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncPrimPDump_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psSyncPrimPDumpOUT->eError = ++ PVRSRVSyncPrimPDumpKM(psSyncHandleInt, psSyncPrimPDumpIN->ui32Offset); ++ ++SyncPrimPDump_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeSyncPrimPDump NULL ++#endif ++ ++#if defined(PDUMP) ++ ++static IMG_INT ++PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncPrimPDumpValueIN_UI8, ++ IMG_UINT8 * psSyncPrimPDumpValueOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN = ++ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *) IMG_OFFSET_ADDR(psSyncPrimPDumpValueOUT_UI8, ++ 0); ++ ++ IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psSyncPrimPDumpValueOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncHandleInt, ++ hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncPrimPDumpValue_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psSyncPrimPDumpValueOUT->eError = ++ PVRSRVSyncPrimPDumpValueKM(psSyncHandleInt, ++ psSyncPrimPDumpValueIN->ui32Offset, ++ psSyncPrimPDumpValueIN->ui32Value); ++ ++SyncPrimPDumpValue_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeSyncPrimPDumpValue NULL ++#endif ++ ++#if defined(PDUMP) ++ ++static IMG_INT ++PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncPrimPDumpPolIN_UI8, ++ IMG_UINT8 * psSyncPrimPDumpPolOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN = ++ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *) IMG_OFFSET_ADDR(psSyncPrimPDumpPolOUT_UI8, 0); ++ ++ IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psSyncPrimPDumpPolOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncHandleInt, ++ hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncPrimPDumpPol_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psSyncPrimPDumpPolOUT->eError = ++ PVRSRVSyncPrimPDumpPolKM(psSyncHandleInt, ++ psSyncPrimPDumpPolIN->ui32Offset, ++ psSyncPrimPDumpPolIN->ui32Value, ++ psSyncPrimPDumpPolIN->ui32Mask, ++ psSyncPrimPDumpPolIN->eOperator, ++ psSyncPrimPDumpPolIN->uiPDumpFlags); ++ ++SyncPrimPDumpPol_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeSyncPrimPDumpPol NULL ++#endif ++ ++#if defined(PDUMP) ++ ++static IMG_INT ++PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncPrimPDumpCBPIN_UI8, ++ IMG_UINT8 * psSyncPrimPDumpCBPOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN = ++ (PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *) IMG_OFFSET_ADDR(psSyncPrimPDumpCBPOUT_UI8, 0); ++ ++ IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle; ++ SYNC_PRIMITIVE_BLOCK *psSyncHandleInt = NULL; ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psSyncPrimPDumpCBPOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&psSyncHandleInt, ++ hSyncHandle, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncPrimPDumpCBP_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psSyncPrimPDumpCBPOUT->eError = ++ PVRSRVSyncPrimPDumpCBPKM(psSyncHandleInt, ++ psSyncPrimPDumpCBPIN->ui32Offset, ++ psSyncPrimPDumpCBPIN->uiWriteOffset, ++ psSyncPrimPDumpCBPIN->uiPacketSize, ++ psSyncPrimPDumpCBPIN->uiBufferSize); ++ ++SyncPrimPDumpCBP_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (psSyncHandleInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hSyncHandle, PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeSyncPrimPDumpCBP NULL ++#endif ++ ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncAllocEventIN_UI8, ++ IMG_UINT8 * psSyncAllocEventOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN = ++ (PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *) IMG_OFFSET_ADDR(psSyncAllocEventOUT_UI8, 0); ++ ++ IMG_CHAR *uiClassNameInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psSyncAllocEventIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) ++ { ++ psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto SyncAllocEvent_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psSyncAllocEventOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto SyncAllocEvent_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncAllocEventIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto SyncAllocEvent_exit; ++ } ++ } ++ } ++ ++ if (psSyncAllocEventIN->ui32ClassNameSize != 0) ++ { ++ uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiClassNameInt, (const void __user *)psSyncAllocEventIN->puiClassName, ++ psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto SyncAllocEvent_exit; ++ } ++ ((IMG_CHAR *) ++ uiClassNameInt)[(psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ psSyncAllocEventOUT->eError = ++ PVRSRVSyncAllocEventKM(psConnection, OSGetDevNode(psConnection), ++ psSyncAllocEventIN->bServerSync, ++ psSyncAllocEventIN->ui32FWAddr, ++ psSyncAllocEventIN->ui32ClassNameSize, uiClassNameInt); ++ ++SyncAllocEvent_exit: ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psSyncAllocEventOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++static IMG_INT ++PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncFreeEventIN_UI8, ++ IMG_UINT8 * psSyncFreeEventOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN = ++ (PVRSRV_BRIDGE_IN_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *) IMG_OFFSET_ADDR(psSyncFreeEventOUT_UI8, 0); ++ ++ psSyncFreeEventOUT->eError = ++ PVRSRVSyncFreeEventKM(psConnection, OSGetDevNode(psConnection), ++ psSyncFreeEventIN->ui32FWAddr); ++ ++ return 0; ++} ++ ++#if defined(PDUMP) ++ ++static IMG_INT ++PVRSRVBridgeSyncCheckpointSignalledPDumpPol(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncCheckpointSignalledPDumpPolIN_UI8, ++ IMG_UINT8 * psSyncCheckpointSignalledPDumpPolOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolIN = ++ (PVRSRV_BRIDGE_IN_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) ++ IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *psSyncCheckpointSignalledPDumpPolOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCCHECKPOINTSIGNALLEDPDUMPPOL *) ++ IMG_OFFSET_ADDR(psSyncCheckpointSignalledPDumpPolOUT_UI8, 0); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ psSyncCheckpointSignalledPDumpPolOUT->eError = ++ PVRSRVSyncCheckpointSignalledPDumpPolKM(psSyncCheckpointSignalledPDumpPolIN->hFence); ++ ++ return 0; ++} ++ ++#else ++#define PVRSRVBridgeSyncCheckpointSignalledPDumpPol NULL ++#endif ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitSYNCBridge(void); ++void DeinitSYNCBridge(void); ++ ++/* ++ * Register all SYNC functions with services ++ */ ++PVRSRV_ERROR InitSYNCBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, ++ PVRSRVBridgeAllocSyncPrimitiveBlock, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, ++ PVRSRVBridgeFreeSyncPrimitiveBlock, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, ++ PVRSRVBridgeSyncPrimSet, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, ++ PVRSRVBridgeSyncPrimPDump, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, ++ PVRSRVBridgeSyncPrimPDumpValue, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, ++ PVRSRVBridgeSyncPrimPDumpPol, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, ++ PVRSRVBridgeSyncPrimPDumpCBP, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, ++ PVRSRVBridgeSyncAllocEvent, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, ++ PVRSRVBridgeSyncFreeEvent, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, ++ PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL, ++ PVRSRVBridgeSyncCheckpointSignalledPDumpPol, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all sync functions with services ++ */ ++void DeinitSYNCBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, ++ PVRSRV_BRIDGE_SYNC_SYNCCHECKPOINTSIGNALLEDPDUMPPOL); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/server_synctracking_bridge.c b/drivers/gpu/drm/img-rogue/server_synctracking_bridge.c +new file mode 100644 +index 000000000000..adc8ab487a12 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/server_synctracking_bridge.c +@@ -0,0 +1,333 @@ ++/******************************************************************************* ++@File ++@Title Server bridge for synctracking ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side of the bridge for synctracking ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*******************************************************************************/ ++ ++#include ++ ++#include "img_defs.h" ++ ++#include "sync.h" ++#include "sync_server.h" ++ ++#include "common_synctracking_bridge.h" ++ ++#include "allocmem.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++#include "srvcore.h" ++#include "handle.h" ++ ++#include ++ ++/* *************************************************************************** ++ * Server-side bridge entry points ++ */ ++ ++static IMG_INT ++PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncRecordRemoveByHandleIN_UI8, ++ IMG_UINT8 * psSyncRecordRemoveByHandleOUT_UI8, ++ CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN = ++ (PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *) ++ IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *) ++ IMG_OFFSET_ADDR(psSyncRecordRemoveByHandleOUT_UI8, 0); ++ ++ /* Lock over handle destruction. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psSyncRecordRemoveByHandleOUT->eError = ++ PVRSRVDestroyHandleStagedUnlocked(psConnection->psHandleBase, ++ (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord, ++ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE); ++ if (unlikely((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) && ++ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_KERNEL_CCB_FULL) && ++ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s", ++ __func__, PVRSRVGetErrorString(psSyncRecordRemoveByHandleOUT->eError))); ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncRecordRemoveByHandle_exit; ++ } ++ ++ /* Release now we have destroyed handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++SyncRecordRemoveByHandle_exit: ++ ++ return 0; ++} ++ ++static PVRSRV_ERROR _SyncRecordAddpshRecordIntRelease(void *pvData) ++{ ++ PVRSRV_ERROR eError; ++ eError = PVRSRVSyncRecordRemoveByHandleKM((SYNC_RECORD_HANDLE) pvData); ++ return eError; ++} ++ ++static_assert(PVRSRV_SYNC_NAME_LENGTH <= IMG_UINT32_MAX, ++ "PVRSRV_SYNC_NAME_LENGTH must not be larger than IMG_UINT32_MAX"); ++ ++static IMG_INT ++PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 * psSyncRecordAddIN_UI8, ++ IMG_UINT8 * psSyncRecordAddOUT_UI8, CONNECTION_DATA * psConnection) ++{ ++ PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN = ++ (PVRSRV_BRIDGE_IN_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddIN_UI8, 0); ++ PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT = ++ (PVRSRV_BRIDGE_OUT_SYNCRECORDADD *) IMG_OFFSET_ADDR(psSyncRecordAddOUT_UI8, 0); ++ ++ SYNC_RECORD_HANDLE pshRecordInt = NULL; ++ IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock; ++ SYNC_PRIMITIVE_BLOCK *pshServerSyncPrimBlockInt = NULL; ++ IMG_CHAR *uiClassNameInt = NULL; ++ ++ IMG_UINT32 ui32NextOffset = 0; ++ IMG_BYTE *pArrayArgsBuffer = NULL; ++#if !defined(INTEGRITY_OS) ++ IMG_BOOL bHaveEnoughSpace = IMG_FALSE; ++#endif ++ ++ IMG_UINT32 ui32BufferSize = 0; ++ IMG_UINT64 ui64BufferSize = ++ ((IMG_UINT64) psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) + 0; ++ ++ if (unlikely(psSyncRecordAddIN->ui32ClassNameSize > PVRSRV_SYNC_NAME_LENGTH)) ++ { ++ psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_ARRAY_SIZE_TOO_BIG; ++ goto SyncRecordAdd_exit; ++ } ++ ++ if (ui64BufferSize > IMG_UINT32_MAX) ++ { ++ psSyncRecordAddOUT->eError = PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL; ++ goto SyncRecordAdd_exit; ++ } ++ ++ ui32BufferSize = (IMG_UINT32) ui64BufferSize; ++ ++ if (ui32BufferSize != 0) ++ { ++#if !defined(INTEGRITY_OS) ++ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */ ++ IMG_UINT32 ui32InBufferOffset = ++ PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long)); ++ IMG_UINT32 ui32InBufferExcessSize = ++ ui32InBufferOffset >= ++ PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 : PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset; ++ ++ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize; ++ if (bHaveEnoughSpace) ++ { ++ IMG_BYTE *pInputBuffer = (IMG_BYTE *) (void *)psSyncRecordAddIN; ++ ++ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset]; ++ } ++ else ++#endif ++ { ++ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize); ++ ++ if (!pArrayArgsBuffer) ++ { ++ psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto SyncRecordAdd_exit; ++ } ++ } ++ } ++ ++ if (psSyncRecordAddIN->ui32ClassNameSize != 0) ++ { ++ uiClassNameInt = (IMG_CHAR *) IMG_OFFSET_ADDR(pArrayArgsBuffer, ui32NextOffset); ++ ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR); ++ } ++ ++ /* Copy the data over */ ++ if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0) ++ { ++ if (OSCopyFromUser ++ (NULL, uiClassNameInt, (const void __user *)psSyncRecordAddIN->puiClassName, ++ psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK) ++ { ++ psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS; ++ ++ goto SyncRecordAdd_exit; ++ } ++ ((IMG_CHAR *) ++ uiClassNameInt)[(psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) - 1] = ++ '\0'; ++ } ++ ++ /* Lock over handle lookup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Look up the address from the handle */ ++ psSyncRecordAddOUT->eError = ++ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase, ++ (void **)&pshServerSyncPrimBlockInt, ++ hhServerSyncPrimBlock, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK, IMG_TRUE); ++ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncRecordAdd_exit; ++ } ++ /* Release now we have looked up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ psSyncRecordAddOUT->eError = ++ PVRSRVSyncRecordAddKM(psConnection, OSGetDevNode(psConnection), ++ &pshRecordInt, ++ pshServerSyncPrimBlockInt, ++ psSyncRecordAddIN->ui32ui32FwBlockAddr, ++ psSyncRecordAddIN->ui32ui32SyncOffset, ++ psSyncRecordAddIN->bbServerSync, ++ psSyncRecordAddIN->ui32ClassNameSize, uiClassNameInt); ++ /* Exit early if bridged call fails */ ++ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) ++ { ++ goto SyncRecordAdd_exit; ++ } ++ ++ /* Lock over handle creation. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase, ++ &psSyncRecordAddOUT->hhRecord, ++ (void *)pshRecordInt, ++ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE, ++ PVRSRV_HANDLE_ALLOC_FLAG_NONE, ++ (PFN_HANDLE_RELEASE) & ++ _SyncRecordAddpshRecordIntRelease); ++ if (unlikely(psSyncRecordAddOUT->eError != PVRSRV_OK)) ++ { ++ UnlockHandle(psConnection->psHandleBase); ++ goto SyncRecordAdd_exit; ++ } ++ ++ /* Release now we have created handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++SyncRecordAdd_exit: ++ ++ /* Lock over handle lookup cleanup. */ ++ LockHandle(psConnection->psHandleBase); ++ ++ /* Unreference the previously looked up handle */ ++ if (pshServerSyncPrimBlockInt) ++ { ++ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase, ++ hhServerSyncPrimBlock, ++ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK); ++ } ++ /* Release now we have cleaned up look up handles. */ ++ UnlockHandle(psConnection->psHandleBase); ++ ++ if (psSyncRecordAddOUT->eError != PVRSRV_OK) ++ { ++ if (pshRecordInt) ++ { ++ PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt); ++ } ++ } ++ ++ /* Allocated space should be equal to the last updated offset */ ++#ifdef PVRSRV_NEED_PVR_ASSERT ++ if (psSyncRecordAddOUT->eError == PVRSRV_OK) ++ PVR_ASSERT(ui32BufferSize == ui32NextOffset); ++#endif /* PVRSRV_NEED_PVR_ASSERT */ ++ ++#if defined(INTEGRITY_OS) ++ if (pArrayArgsBuffer) ++#else ++ if (!bHaveEnoughSpace && pArrayArgsBuffer) ++#endif ++ OSFreeMemNoStats(pArrayArgsBuffer); ++ ++ return 0; ++} ++ ++/* *************************************************************************** ++ * Server bridge dispatch related glue ++ */ ++ ++PVRSRV_ERROR InitSYNCTRACKINGBridge(void); ++void DeinitSYNCTRACKINGBridge(void); ++ ++/* ++ * Register all SYNCTRACKING functions with services ++ */ ++PVRSRV_ERROR InitSYNCTRACKINGBridge(void) ++{ ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, ++ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, ++ PVRSRVBridgeSyncRecordRemoveByHandle, NULL); ++ ++ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, ++ PVRSRVBridgeSyncRecordAdd, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ * Unregister all synctracking functions with services ++ */ ++void DeinitSYNCTRACKINGBridge(void) ++{ ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, ++ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE); ++ ++ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, ++ PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD); ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/services_kernel_client.h b/drivers/gpu/drm/img-rogue/services_kernel_client.h +new file mode 100644 +index 000000000000..aaca47f1e6d6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/services_kernel_client.h +@@ -0,0 +1,291 @@ ++/*************************************************************************/ /*! ++@File services_kernel_client.h ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* This file contains a partial redefinition of the PowerVR Services 5 ++ * interface for use by components which are checkpatch clean. This ++ * header is included by the unrefined, non-checkpatch clean headers ++ * to ensure that prototype/typedef/macro changes break the build. ++ */ ++ ++#ifndef __SERVICES_KERNEL_CLIENT__ ++#define __SERVICES_KERNEL_CLIENT__ ++ ++#include "pvrsrv_error.h" ++ ++#include ++ ++#include "pvrsrv_sync_km.h" ++#include "sync_checkpoint_external.h" ++ ++/* included for the define PVRSRV_LINUX_DEV_INIT_ON_PROBE */ ++#include "pvr_drm.h" ++ ++#ifndef __pvrsrv_defined_struct_enum__ ++ ++/* sync_external.h */ ++ ++struct PVRSRV_CLIENT_SYNC_PRIM_TAG { ++ volatile __u32 *pui32LinAddr; ++}; ++ ++struct PVRSRV_CLIENT_SYNC_PRIM_OP { ++ __u32 ui32Flags; ++ struct pvrsrv_sync_prim *psSync; ++ __u32 ui32FenceValue; ++ __u32 ui32UpdateValue; ++}; ++ ++#else /* __pvrsrv_defined_struct_enum__ */ ++ ++struct PVRSRV_CLIENT_SYNC_PRIM_TAG; ++struct PVRSRV_CLIENT_SYNC_PRIM_OP; ++ ++enum tag_img_bool; ++ ++#endif /* __pvrsrv_defined_struct_enum__ */ ++ ++struct _PMR_; ++struct _PVRSRV_DEVICE_NODE_; ++struct dma_buf; ++struct SYNC_PRIM_CONTEXT_TAG; ++ ++/* pvr_notifier.h */ ++ ++#ifndef CMDCOMPNOTIFY_PFN ++typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle); ++#define CMDCOMPNOTIFY_PFN ++#endif ++enum PVRSRV_ERROR_TAG PVRSRVRegisterCmdCompleteNotify(void **phNotify, ++ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData); ++enum PVRSRV_ERROR_TAG PVRSRVUnregisterCmdCompleteNotify(void *hNotify); ++void PVRSRVCheckStatus(void *hCmdCompCallerHandle); ++ ++#define DEBUG_REQUEST_DC 0 ++#define DEBUG_REQUEST_SYNCTRACKING 1 ++#define DEBUG_REQUEST_SRV 2 ++#define DEBUG_REQUEST_SYS 3 ++#define DEBUG_REQUEST_RGX 4 ++#define DEBUG_REQUEST_ANDROIDSYNC 5 ++#define DEBUG_REQUEST_LINUXFENCE 6 ++#define DEBUG_REQUEST_SYNCCHECKPOINT 7 ++#define DEBUG_REQUEST_HTB 8 ++#define DEBUG_REQUEST_APPHINT 9 ++#define DEBUG_REQUEST_FALLBACKSYNC 10 ++ ++#define DEBUG_REQUEST_VERBOSITY_LOW 0 ++#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1 ++#define DEBUG_REQUEST_VERBOSITY_HIGH 2 ++#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH ++ ++#define DD_VERB_LVL_ENABLED(_verbLvl, _verbLvlChk) ((_verbLvl) >= (_verbLvlChk)) ++ ++#ifndef DBGNOTIFY_PFNS ++typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile, ++ const char *fmt, ...) __printf(2, 3); ++typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle, ++ __u32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++#define DBGNOTIFY_PFNS ++#endif ++enum PVRSRV_ERROR_TAG PVRSRVRegisterDeviceDbgRequestNotify(void **phNotify, ++ struct _PVRSRV_DEVICE_NODE_ *psDevNode, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ __u32 ui32RequesterID, ++ void *hDbgRequestHandle); ++enum PVRSRV_ERROR_TAG PVRSRVUnregisterDeviceDbgRequestNotify(void *hNotify); ++enum PVRSRV_ERROR_TAG PVRSRVRegisterDriverDbgRequestNotify(void **phNotify, ++ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify, ++ __u32 ui32RequesterID, ++ void *hDbgRequestHandle); ++enum PVRSRV_ERROR_TAG PVRSRVUnregisterDriverDbgRequestNotify(void *hNotify); ++ ++/* physmem_dmabuf.h */ ++ ++struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR); ++ ++/* pvrsrv.h */ ++ ++enum PVRSRV_ERROR_TAG PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject); ++enum PVRSRV_ERROR_TAG PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject); ++ ++/* sync.h */ ++ ++enum PVRSRV_ERROR_TAG SyncPrimContextCreate( ++ struct _PVRSRV_DEVICE_NODE_ *psDevConnection, ++ struct SYNC_PRIM_CONTEXT_TAG **phSyncPrimContext); ++void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT_TAG *hSyncPrimContext); ++ ++enum PVRSRV_ERROR_TAG SyncPrimAlloc(struct SYNC_PRIM_CONTEXT_TAG *hSyncPrimContext, ++ struct PVRSRV_CLIENT_SYNC_PRIM_TAG **ppsSync, const char *pszClassName); ++enum PVRSRV_ERROR_TAG SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM_TAG *psSync); ++enum PVRSRV_ERROR_TAG SyncPrimGetFirmwareAddr( ++ struct PVRSRV_CLIENT_SYNC_PRIM_TAG *psSync, ++ __u32 *sync_addr); ++ ++/* osfunc.h */ ++enum PVRSRV_ERROR_TAG OSEventObjectWait(void *hOSEventKM); ++enum PVRSRV_ERROR_TAG OSEventObjectOpen(void *hEventObject, void **phOSEventKM); ++enum PVRSRV_ERROR_TAG OSEventObjectClose(void *hOSEventKM); ++__u32 OSGetCurrentClientProcessIDKM(void); ++__u32 OSStringUINT32ToStr(char *pszBuf, size_t uSize, __u32 ui32Num); ++ ++/* srvkm.h */ ++ ++enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceCreate(void *pvOSDevice, ++ int i32OsDeviceID, ++ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); ++enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceDestroy( ++ struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++const char *PVRSRVGetErrorString(enum PVRSRV_ERROR_TAG eError); ++#if (PVRSRV_DEVICE_INIT_MODE == PVRSRV_LINUX_DEV_INIT_ON_PROBE) ++enum PVRSRV_ERROR_TAG PVRSRVCommonDeviceInitialise( ++ struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++#endif ++ ++#ifndef CHECKPOINT_PFNS ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid); ++ ++#ifndef CHECKPOINT_PFNS ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)( ++ struct _PVRSRV_DEVICE_NODE_ *device, ++ const char *fence_name, ++ PVRSRV_TIMELINE timeline, ++ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE *new_fence, ++ u64 *fence_uid, ++ void **fence_finalise_data, ++ PSYNC_CHECKPOINT *new_checkpoint_handle, ++ void **timeline_update_sync, ++ __u32 *timeline_update_value); ++#endif ++ ++#ifndef CHECKPOINT_PFNS ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); ++#endif ++ ++#ifndef CHECKPOINT_PFNS ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); ++#endif ++ ++#ifndef CHECKPOINT_PFNS ++typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs); ++#endif ++ ++#ifndef CHECKPOINT_PFNS ++typedef enum tag_img_bool (*PFN_SYNC_CHECKPOINT_UFO_HAS_SIGNALLED_FN)( ++ __u32 ui32FwAddr, __u32 ui32Value); ++typedef enum PVRSRV_ERROR_TAG (*PFN_SYNC_CHECKPOINT_SIGNAL_WAITERS_FN)(void); ++typedef void(*PFN_SYNC_CHECKPOINT_CHECK_STATE_FN)(void); ++#if defined(PDUMP) ++typedef PVRSRV_ERROR(*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, ++ IMG_UINT32 *puiNumCheckpoints, ++ PSYNC_CHECKPOINT **papsCheckpoints); ++#endif ++#endif ++ ++/* This is the function that kick code will call in a NO_HARDWARE build only after ++ * sync checkpoints have been manually signalled, to allow the OS native sync ++ * implementation to update its timelines (as the usual callback notification ++ * of signalled checkpoints is not supported for NO_HARDWARE). ++ */ ++#ifndef CHECKPOINT_PFNS ++typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); ++typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); ++ ++#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 ++ ++typedef struct { ++ PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; ++ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; ++ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; ++ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; ++ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; ++ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; ++ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; ++ char pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; ++#if defined(PDUMP) ++ PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; ++#endif ++} PFN_SYNC_CHECKPOINT_STRUCT; ++ ++enum PVRSRV_ERROR_TAG SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); ++ ++#define CHECKPOINT_PFNS ++#endif ++ ++/* sync_checkpoint.h */ ++enum PVRSRV_ERROR_TAG SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext); ++enum PVRSRV_ERROR_TAG SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext); ++void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); ++void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); ++enum PVRSRV_ERROR_TAG SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, PVRSRV_FENCE fence, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint); ++void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); ++void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); ++enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); ++enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags); ++enum PVRSRV_ERROR_TAG SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); ++enum PVRSRV_ERROR_TAG SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); ++void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); ++__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); ++void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); ++__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); ++__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); ++__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); ++PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); ++const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) ++struct _PVRSRV_DEVICE_NODE_ *SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); ++#endif ++ ++#endif ++ ++#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined(SUPPORT_BUFFER_SYNC) ++/*************************************************************************/ /*! ++@Function NativeSyncGetFenceStatusWq ++@Description Called to get the Foreign Fence status workqueue used in ++ Fence sync and Buffer sync. ++@Return struct workqueue_struct ptr on success, NULL otherwise. ++*/ /**************************************************************************/ ++struct workqueue_struct *NativeSyncGetFenceStatusWq(void); ++#endif ++ ++#endif /* __SERVICES_KERNEL_CLIENT__ */ +diff --git a/drivers/gpu/drm/img-rogue/services_km.h b/drivers/gpu/drm/img-rogue/services_km.h +new file mode 100644 +index 000000000000..91ee3b2f0976 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/services_km.h +@@ -0,0 +1,180 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services API Kernel mode Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Exported services API details ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SERVICES_KM_H ++#define SERVICES_KM_H ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "virt_validation_defs.h" ++#endif ++ ++/*! 4k page size definition */ ++#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */ ++#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 16k page size definition */ ++#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */ ++#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 64k page size definition */ ++#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */ ++#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 256k page size definition */ ++#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */ ++#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 1MB page size definition */ ++#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */ ++#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++/*! 2MB page size definition */ ++#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */ ++#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that ++ it is always page-aligned */ ++ ++/*! ++ * @AddToGroup SRVConnectInterfaces ++ * @{ ++ */ ++ ++#ifndef PVRSRV_DEV_CONNECTION_TYPEDEF ++#define PVRSRV_DEV_CONNECTION_TYPEDEF ++/*! ++ * Forward declaration (look on connection.h) ++ */ ++typedef struct PVRSRV_DEV_CONNECTION_TAG PVRSRV_DEV_CONNECTION; ++#endif ++ ++/*! ++ * @Anchor SRV_FLAGS ++ * @Name SRV_FLAGS: Services connection flags ++ * Allows to define per-client policy for Services. ++ * @{ ++ */ ++ ++/* ++ * Use of the 32-bit connection flags mask ++ * ( X = taken/in use, - = available/unused ) ++ * ++ * 31 27 20 6 4 0 ++ * | | | | | | ++ * X---XXXXXXXX-------------XXX---- ++ */ ++ ++#define SRV_NO_HWPERF_CLIENT_STREAM (1UL << 4) /*!< Don't create HWPerf for this connection */ ++#define SRV_FLAGS_CLIENT_64BIT_COMPAT (1UL << 5) /*!< This flags gets set if the client is 64 Bit compatible. */ ++#define SRV_FLAGS_CLIENT_SLR_DISABLED (1UL << 6) /*!< This flag is set if the client does not want Sync Lockup Recovery (SLR) enabled. */ ++#define SRV_FLAGS_PDUMPCTRL (1UL << 31) /*!< PDump Ctrl client flag */ ++ ++/*! @} SRV_FLAGS */ ++ ++/*! @} End of SRVConnectInterfaces */ ++ ++/* ++ * Bits 20 - 27 are used to pass information needed for validation ++ * of the GPU Virtualisation Validation mechanism. In particular: ++ * ++ * Bits: ++ * [20 - 22]: OSid of the memory region that will be used for allocations ++ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses ++ * regarding that memory context. ++ * [26]: If the AXI Protection register will be set to secure for that OSid ++ * [27]: If the Emulator Wrapper Register checking for protection violation ++ * will be set to secure for that OSid ++ */ ++ ++#define VIRTVAL_FLAG_OSID_SHIFT (20) ++#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT) ++ ++#define VIRTVAL_FLAG_OSIDREG_SHIFT (23) ++#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT) ++ ++#define VIRTVAL_FLAG_AXIPREG_SHIFT (26) ++#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT) ++ ++#define VIRTVAL_FLAG_AXIPTD_SHIFT (27) ++#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT) ++ ++ ++/* Size of pointer on a 64 bit machine */ ++#define POINTER_SIZE_64BIT (8U) ++ ++ ++/* ++ Pdump flags which are accessible to Services clients ++*/ ++#define PDUMP_NONE 0x00000000U /*pszIOCName, ++ (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)", ++ psEntry->ui32CallCount, ++ psEntry->ui32CopyFromUserTotalBytes, ++ psEntry->ui32CopyToUserTotalBytes, ++ (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder), ++ (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder)); ++ ++ ++ } ++} ++#endif ++ ++PVRSRV_ERROR ++CopyFromUserWrapper(CONNECTION_DATA *psConnection, ++ IMG_UINT32 ui32DispatchTableEntry, ++ void *pvDest, ++ void __user *pvSrc, ++ IMG_UINT32 ui32Size) ++{ ++ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size; ++ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size; ++ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); ++} ++PVRSRV_ERROR ++CopyToUserWrapper(CONNECTION_DATA *psConnection, ++ IMG_UINT32 ui32DispatchTableEntry, ++ void __user *pvDest, ++ void *pvSrc, ++ IMG_UINT32 ui32Size) ++{ ++ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size; ++ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size; ++ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); ++} ++#else ++INLINE PVRSRV_ERROR ++CopyFromUserWrapper(CONNECTION_DATA *psConnection, ++ IMG_UINT32 ui32DispatchTableEntry, ++ void *pvDest, ++ void __user *pvSrc, ++ IMG_UINT32 ui32Size) ++{ ++ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); ++ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size); ++} ++INLINE PVRSRV_ERROR ++CopyToUserWrapper(CONNECTION_DATA *psConnection, ++ IMG_UINT32 ui32DispatchTableEntry, ++ void __user *pvDest, ++ void *pvSrc, ++ IMG_UINT32 ui32Size) ++{ ++ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry); ++ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size); ++} ++#endif ++ ++PVRSRV_ERROR ++PVRSRVConnectKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32Flags, ++ IMG_UINT32 ui32ClientBuildOptions, ++ IMG_UINT32 ui32ClientDDKVersion, ++ IMG_UINT32 ui32ClientDDKBuild, ++ IMG_UINT8 *pui8KernelArch, ++ IMG_UINT32 *pui32CapabilityFlags, ++ IMG_UINT64 *ui64PackedBvnc) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch; ++ IMG_UINT32 ui32DDKVersion, ui32DDKBuild; ++ PVRSRV_DATA *psSRVData = NULL; ++ IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize(); ++ static IMG_BOOL bIsFirstConnection=IMG_FALSE; ++ ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice; ++ ++ /* Gather BVNC information to output to UM */ ++ ++ *ui64PackedBvnc = rgx_bvnc_pack(psDevInfo->sDevFeatureCfg.ui32B, ++ psDevInfo->sDevFeatureCfg.ui32V, ++ psDevInfo->sDevFeatureCfg.ui32N, ++ psDevInfo->sDevFeatureCfg.ui32C); ++#else ++ *ui64PackedBvnc = 0; ++#endif /* defined(SUPPORT_RGX)*/ ++ ++ /* Clear the flags */ ++ *pui32CapabilityFlags = 0; ++ ++ psSRVData = PVRSRVGetPVRSRVData(); ++ ++ psConnection->ui32ClientFlags = ui32Flags; ++ ++ /*Set flags to pass back to the client showing which cache coherency is available.*/ ++ /* Is the system snooping of caches emulated in software? */ ++ if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig)) ++ { ++ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG; ++ } ++ else ++ { ++ /*Set flags to pass back to the client showing which cache coherency is available.*/ ++ /*Is the system CPU cache coherent?*/ ++ if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig)) ++ { ++ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG; ++ } ++ /*Is the system device cache coherent?*/ ++ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) ++ { ++ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG; ++ } ++ } ++ ++ /* Has the system device non-mappable local memory?*/ ++ if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig)) ++ { ++ *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG; ++ } ++ ++ /* Is system using FBCDC v31? */ ++ if (psDeviceNode->pfnHasFBCDCVersion31(psDeviceNode)) ++ { ++ *pui32CapabilityFlags |= PVRSRV_FBCDC_V3_1_USED; ++ } ++ ++ /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */ ++ if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize) ++ { ++ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED; ++ } ++ else ++ { ++ if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA) ++ { ++ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED; ++ } ++ else ++ { ++ /* This can happen when processor has more virtual address bits ++ than device (i.e. alloc is not always guaranteed to succeed) */ ++ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL; ++ } ++ } ++ ++ /* Is the system DMA capable? */ ++ if (psDeviceNode->bHasSystemDMA) ++ { ++ *pui32CapabilityFlags |= PVRSRV_SYSTEM_DMA_USED; ++ } ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++{ ++ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0; ++ IMG_BOOL bOSidAxiProtReg = IMG_FALSE; ++ ++ ui32OSid = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK) >> (VIRTVAL_FLAG_OSID_SHIFT); ++ ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT); ++ ++#if defined(EMULATOR) ++{ ++ /* AXI_ACELITE is only supported on rogue cores - volcanic cores all support full ACE ++ * and don't want to compile the code below (RGX_FEATURE_AXI_ACELITE_BIT_MASK is not ++ * defined for volcanic cores). ++ */ ++ ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice; ++ ++#if defined(RGX_FEATURE_AXI_ACELITE_BIT_MASK) ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)) ++#else ++ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACE)) ++#endif ++ { ++ IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0; ++ ++ ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT); ++ ui32OSidAxiProtTD = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK) >> (VIRTVAL_FLAG_AXIPTD_SHIFT); ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s", ++ ui32OSidReg, ++ (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE")); ++ ++ bOSidAxiProtReg = ui32OSidAxiProtReg == 1; ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s", ++ ui32OSidReg, ++ bOSidAxiProtReg?"TRUE":"FALSE")); ++ ++ SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD); ++ } ++} ++#endif /* defined(EMULATOR) */ ++ ++ /* We now know the OSid, OSidReg and bOSidAxiProtReg setting for this ++ * connection. We can access these from wherever we have a connection ++ * reference and do not need to traverse an arbitrary linked-list to ++ * obtain them. The settings are process-specific. ++ */ ++ psConnection->ui32OSid = ui32OSid; ++ psConnection->ui32OSidReg = ui32OSidReg; ++ psConnection->bOSidAxiProtReg = bOSidAxiProtReg; ++ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "[GPU Virtualization Validation]: OSIDs: %d, %d", ++ ui32OSid, ++ ui32OSidReg)); ++} ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++ ++#if defined(SUPPORT_WORKLOAD_ESTIMATION) ++ /* Only enabled if enabled in the UM */ ++ if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Workload Estimation disabled. Not enabled in UM", ++ __func__)); ++ } ++#endif ++ ++#if defined(SUPPORT_PDVFS) ++ /* Only enabled if enabled in the UM */ ++ if (!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Proactive DVFS disabled. Not enabled in UM", ++ __func__)); ++ } ++#endif ++ ++ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN); ++ ui32DDKBuild = PVRVERSION_BUILD; ++ ++ if (ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT) ++ { ++ psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT; ++ } ++ else ++ { ++ psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT; ++ } ++ ++ if (IMG_FALSE == bIsFirstConnection) ++ { ++ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); ++ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions; ++ ++ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion; ++ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion; ++ ++ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild; ++ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild; ++ ++ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = ++ ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; ++ ++ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = ++ (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK) ? BUILD_TYPE_DEBUG : BUILD_TYPE_RELEASE; ++ ++ if (sizeof(void *) == POINTER_SIZE_64BIT) ++ { ++ psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT; ++ } ++ else ++ { ++ psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT; ++ } ++ } ++ ++ /* Masking out every option that is not kernel specific*/ ++ ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM; ++ ++ /* ++ * Validate the build options ++ */ ++ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM); ++ if (ui32BuildOptions != ui32ClientBuildOptions) ++ { ++ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions; ++#if !defined(PVRSRV_STRICT_COMPAT_CHECK) ++ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/ ++ ui32BuildOptionsMismatch &= OPTIONS_STRICT; ++#endif ++ if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0) ++ { ++ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " ++ "extra options present in client-side driver: (0x%x). Please check rgx_options.h", ++ __func__, ++ ui32ClientBuildOptions & ui32BuildOptionsMismatch )); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); ++ } ++ ++ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0) ++ { ++ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; " ++ "extra options present in KM driver: (0x%x). Please check rgx_options.h", ++ __func__, ++ ui32BuildOptions & ui32BuildOptionsMismatch )); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH, chk_exit); ++ } ++ if (IMG_FALSE == bIsFirstConnection) ++ { ++ PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.", ++ __func__, ++ ui32ClientBuildOptions, ++ (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug", ++ ui32BuildOptions, ++ (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug")); ++ }else{ ++ PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.", ++ __func__, ++ ui32ClientBuildOptions, ++ ui32BuildOptions)); ++ ++ } ++ if (!psSRVData->sDriverInfo.bIsNoMatch) ++ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __func__)); ++ } ++ ++ /* ++ * Validate DDK version ++ */ ++ if (ui32ClientDDKVersion != ui32DDKVersion) ++ { ++ if (!psSRVData->sDriverInfo.bIsNoMatch) ++ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; ++ PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).", ++ __func__, ++ PVRVERSION_MAJ, PVRVERSION_MIN, ++ PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion), ++ PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion))); ++ PVR_DBG_BREAK; ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_VERSION_MISMATCH, chk_exit); ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]", ++ __func__, ++ PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN)); ++ } ++ ++ /* Create stream for every connection except for the special clients ++ * that don't need it e.g.: recipients of HWPerf data. */ ++ if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM)) ++ { ++ IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; ++ OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE, ++ PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC, ++ psDeviceNode->sDevId.i32OsDeviceID, ++ psConnection->pid); ++ ++ eError = TLStreamCreate(&psConnection->hClientTLStream, ++ acStreamName, ++ PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT, ++ TL_OPMODE_DROP_NEWER | ++ TL_FLAG_ALLOCATE_ON_FIRST_OPEN, ++ NULL, NULL, NULL, NULL); ++ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS) ++ { ++ PVR_LOG_ERROR(eError, "TLStreamCreate"); ++ psConnection->hClientTLStream = NULL; ++ } ++ else if (eError == PVRSRV_OK) ++ { ++ /* Set "tlctrl" stream as a notification channel. This channel is ++ * is used to notify recipients about stream open/close (by writer) ++ * actions (and possibly other actions in the future). */ ++ eError = TLStreamSetNotifStream(psConnection->hClientTLStream, ++ psSRVData->hTLCtrlStream); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "TLStreamSetNotifStream"); ++ TLStreamClose(psConnection->hClientTLStream); ++ psConnection->hClientTLStream = NULL; ++ } ++ } ++ ++ /* Reset error status. We don't want to propagate any errors from here. */ ++ eError = PVRSRV_OK; ++ PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName)); ++ } ++ ++ /* ++ * Validate DDK build ++ */ ++ if (ui32ClientDDKBuild != ui32DDKBuild) ++ { ++ if (!psSRVData->sDriverInfo.bIsNoMatch) ++ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE; ++ PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).", ++ __func__, ui32DDKBuild, ui32ClientDDKBuild)); ++#if defined(PVRSRV_STRICT_COMPAT_CHECK) ++ PVR_DBG_BREAK; ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_DDK_BUILD_MISMATCH, chk_exit); ++#endif ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]", ++ __func__, ui32DDKBuild, ui32ClientDDKBuild)); ++ } ++ ++#if defined(PDUMP) ++ /* Success so far so is it the PDump client that is connecting? */ ++ if (ui32Flags & SRV_FLAGS_PDUMPCTRL) ++ { ++ if (psDeviceNode->sDevId.ui32InternalID == psSRVData->ui32PDumpBoundDevice) ++ { ++ PDumpConnectionNotify(psDeviceNode); ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_PDUMP_CAPTURE_BOUND_TO_ANOTHER_DEVICE; ++ PVR_DPF((PVR_DBG_ERROR, "%s: PDump requested for device %u but only permitted for device %u", ++ __func__, psDeviceNode->sDevId.ui32InternalID, psSRVData->ui32PDumpBoundDevice)); ++ goto chk_exit; ++ } ++ } ++ else ++ { ++ /* Warn if the app is connecting to a device PDump won't be able to capture */ ++ if (psDeviceNode->sDevId.ui32InternalID != psSRVData->ui32PDumpBoundDevice) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s: NB. App running on device %d won't be captured by PDump (must be on device %u)", ++ __func__, psDeviceNode->sDevId.ui32InternalID, psSRVData->ui32PDumpBoundDevice)); ++ } ++ } ++#endif ++ ++ PVR_ASSERT(pui8KernelArch != NULL); ++ ++ if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT) ++ { ++ *pui8KernelArch = 64; ++ } ++ else ++ { ++ *pui8KernelArch = 32; ++ } ++ ++ bIsFirstConnection = IMG_TRUE; ++ ++#if defined(DEBUG_BRIDGE_KM) ++ { ++ int ii; ++ ++ /* dump dispatch table offset lookup table */ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __func__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1)); ++ for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC])); ++ } ++ } ++#endif ++ ++#if defined(PDUMP) ++ if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL)) ++ { ++ IMG_UINT64 ui64PDumpState = 0; ++ ++ PDumpGetStateKM(&ui64PDumpState); ++ if (ui64PDumpState & PDUMP_STATE_CONNECTED) ++ { ++ *pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING; ++ } ++ } ++#endif ++ ++chk_exit: ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVDisconnectKM(void) ++{ ++#if defined(INTEGRITY_OS) && defined(DEBUG_BRIDGE_KM) ++ PVRSRVPrintBridgeStats(); ++#endif ++ /* just return OK, per-process data is cleaned up by resmgr */ ++ ++ return PVRSRV_OK; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVAcquireGlobalEventObjectKM ++@Description Acquire the global event object. ++@Output phGlobalEventObject On success, points to the global event ++ object handle ++@Return PVRSRV_ERROR PVRSRV_OK on success or an error ++ otherwise ++*/ /***************************************************************************/ ++PVRSRV_ERROR ++PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ *phGlobalEventObject = psPVRSRVData->hGlobalEventObject; ++ ++ return PVRSRV_OK; ++} ++ ++/**************************************************************************/ /*! ++@Function PVRSRVReleaseGlobalEventObjectKM ++@Description Release the global event object. ++@Output hGlobalEventObject Global event object handle ++@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise ++*/ /***************************************************************************/ ++PVRSRV_ERROR ++PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject) ++{ ++ PVR_ASSERT(PVRSRVGetPVRSRVData()->hGlobalEventObject == hGlobalEventObject); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ PVRSRVDumpDebugInfoKM ++*/ ++PVRSRV_ERROR ++PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32VerbLevel) ++{ ++ if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX) ++ { ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ PVR_LOG(("User requested PVR debug info")); ++ ++ PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL); ++ ++ return PVRSRV_OK; ++} ++ ++/* ++ PVRSRVGetDevClockSpeedKM ++*/ ++PVRSRV_ERROR ++PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_PUINT32 pui32RGXClockSpeed) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL); ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed); ++ PVR_WARN_IF_ERROR(eError, "pfnDeviceClockSpeed"); ++ ++ return eError; ++} ++ ++ ++/* ++ PVRSRVHWOpTimeoutKM ++*/ ++PVRSRV_ERROR ++PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode) ++{ ++#if defined(PVRSRV_RESET_ON_HWTIMEOUT) ++ PVR_LOG(("User requested OS reset")); ++ OSPanic(); ++#endif ++ PVR_LOG(("HW operation timeout, dump server info")); ++ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL); ++ return PVRSRV_OK; ++} ++ ++ ++IMG_INT ++DummyBW(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 *psBridgeIn, ++ IMG_UINT8 *psBridgeOut, ++ CONNECTION_DATA *psConnection) ++{ ++ PVR_UNREFERENCED_PARAMETER(psBridgeIn); ++ PVR_UNREFERENCED_PARAMETER(psBridgeOut); ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++#if defined(DEBUG_BRIDGE_KM) ++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to " ++ "Dummy Wrapper (probably not what you want!)", ++ __func__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName)); ++#else ++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to " ++ "Dummy Wrapper (probably not what you want!)", ++ __func__, ui32DispatchTableEntry)); ++#endif ++ return PVRSRV_ERROR_BRIDGE_ENOTTY; ++} ++ ++PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32AlignChecksSize, ++ IMG_UINT32 aui32AlignChecks[]) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++#if !defined(NO_HARDWARE) ++ ++ PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL); ++ return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize, ++ aui32AlignChecks); ++ ++#else ++ ++ PVR_UNREFERENCED_PARAMETER(psDeviceNode); ++ PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize); ++ PVR_UNREFERENCED_PARAMETER(aui32AlignChecks); ++ ++ return PVRSRV_OK; ++ ++#endif /* !defined(NO_HARDWARE) */ ++ ++} ++ ++PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *pui32DeviceStatus) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ /* First try to update the status. */ ++ if (psDeviceNode->pfnUpdateHealthStatus != NULL) ++ { ++ PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, ++ IMG_FALSE); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to " ++ "check for device status (%d)", eError)); ++ ++ /* Return unknown status and error because we don't know what ++ * happened and if the status is valid. */ ++ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; ++ return eError; ++ } ++ } ++ ++ switch (OSAtomicRead(&psDeviceNode->eHealthStatus)) ++ { ++ case PVRSRV_DEVICE_HEALTH_STATUS_OK: ++ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK; ++ return PVRSRV_OK; ++ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: ++ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING; ++ return PVRSRV_OK; ++ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: ++ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: ++ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: ++ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR; ++ return PVRSRV_OK; ++ default: ++ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN; ++ return PVRSRV_ERROR_INTERNAL_ERROR; ++ } ++} ++ ++PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32CapsSize, ++ IMG_UINT32 *pui32NumCores, ++ IMG_UINT64 *pui64Caps) ++{ ++ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ if (ui32CapsSize > 0) ++ { ++ /* Clear the buffer to ensure no uninitialised data is returned to UM ++ * if the pfn call below does not write to the whole array, or is null. ++ */ ++ memset(pui64Caps, 0x00, (ui32CapsSize * sizeof(IMG_UINT64))); ++ } ++ ++ if (psDeviceNode->pfnGetMultiCoreInfo != NULL) ++ { ++ eError = psDeviceNode->pfnGetMultiCoreInfo(psDeviceNode, ui32CapsSize, pui32NumCores, pui64Caps); ++ } ++ return eError; ++} ++ ++ ++/*! ++ * ***************************************************************************** ++ * @brief A wrapper for removing entries in the g_BridgeDispatchTable array. ++ * All this does is zero the entry to allow for a full table re-population ++ * later. ++ * ++ * @param ui32BridgeGroup ++ * @param ui32Index ++ * ++ * @return ++ ********************************************************************************/ ++void ++UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, IMG_UINT32 ui32Index) ++{ ++ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; ++ ++ g_BridgeDispatchTable[ui32Index].pfFunction = NULL; ++ g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL; ++#if defined(DEBUG_BRIDGE_KM) ++ g_BridgeDispatchTable[ui32Index].pszIOCName = NULL; ++ g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL; ++ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL; ++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; ++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; ++ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; ++ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; ++#endif ++} ++ ++/*! ++ * ***************************************************************************** ++ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does ++ * error checking. ++ * ++ * @param ui32Index ++ * @param pszIOCName ++ * @param pfFunction ++ * @param pszFunctionName ++ * ++ * @return ++ ********************************************************************************/ ++void ++_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, ++ IMG_UINT32 ui32Index, ++ const IMG_CHAR *pszIOCName, ++ BridgeWrapperFunction pfFunction, ++ const IMG_CHAR *pszFunctionName, ++ POS_LOCK hBridgeLock, ++ const IMG_CHAR *pszBridgeLockName) ++{ ++ static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */ ++ ++#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM) ++ PVR_UNREFERENCED_PARAMETER(pszFunctionName); ++ PVR_UNREFERENCED_PARAMETER(pszBridgeLockName); ++#endif ++ ++ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]; ++ ++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) ++ /* Enable this to dump out the dispatch table entries */ ++ PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __func__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC])); ++ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __func__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName)); ++#endif ++ ++ /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly ++ * interested in spotting any large gap of wasted memory that could be ++ * accidentally introduced. ++ * ++ * This will currently flag up any gaps > 5 entries. ++ * ++ * NOTE: This shouldn't be debug only since switching from debug->release ++ * etc is likely to modify the available ioctls and thus be a point where ++ * mistakes are exposed. This isn't run at a performance critical time. ++ */ ++ if ((ui32PrevIndex != IMG_UINT32_MAX) && ++ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) || ++ (ui32Index <= ui32PrevIndex))) ++ { ++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)", ++ __func__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName, ++ ui32Index, pszIOCName)); ++#else ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: There is a gap in the dispatch table between indices %u and %u (%s)", ++ __func__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName)); ++#endif ++ } ++ ++ if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range", ++ __func__, (IMG_UINT)ui32Index, pszIOCName)); ++ ++#if defined(DEBUG_BRIDGE_KM) ++ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu", ++ __func__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)); ++#if defined(SUPPORT_RGX) ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXREGCONFIG_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXFWDBG_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXBREAKPOINT_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGXTIMERQUERY_DISPATCH_LAST)); ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_RGX_LAST)); ++#endif ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)); ++ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu", ++ __func__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)); ++#endif ++ ++ OSPanic(); ++ } ++ ++ /* Panic if the previous entry has been overwritten as this is not allowed! ++ * NOTE: This shouldn't be debug only since switching from debug->release ++ * etc is likely to modify the available ioctls and thus be a point where ++ * mistakes are exposed. This isn't run at a performance critical time. ++ */ ++ if (g_BridgeDispatchTable[ui32Index].pfFunction) ++ { ++ if (g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction) ++ { ++#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)", ++ __func__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName), ++ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); ++#else ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)", ++ __func__, pszIOCName, ui32Index, ++ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction)); ++ PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue.")); ++#endif ++ OSPanic(); ++ } ++ } ++ else ++ { ++ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction; ++ g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock; ++#if defined(DEBUG_BRIDGE_KM) ++ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName; ++ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName; ++ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName; ++ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0; ++ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0; ++ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0; ++ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0; ++#endif ++ } ++ ++ ui32PrevIndex = ui32Index; ++} ++ ++static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvPrivData); ++ ++ *pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE + ++ PVRSRV_MAX_BRIDGE_OUT_SIZE); ++ PVR_RETURN_IF_NOMEM(*pvOut); ++ ++ return PVRSRV_OK; ++} ++ ++static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData) ++{ ++ PVR_UNREFERENCED_PARAMETER(pvPrivData); ++ ++ OSFreeMem(pvFreeData); ++} ++ ++PVRSRV_ERROR BridgeDispatcherInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++#if defined(DEBUG_BRIDGE_KM) ++ eError = OSLockCreate(&g_hStatsLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", errorLockCreateFailed); ++#endif ++ ++ eError = PVRSRVPoolCreate(_BridgeBufferAlloc, ++ _BridgeBufferFree, ++ PVRSRV_MAX_POOLED_BRIDGE_BUFFERS, ++ "Bridge buffer pool", ++ NULL, ++ &g_psBridgeBufferPool); ++ PVR_LOG_GOTO_IF_ERROR(eError, "PVRSRVPoolCreate", erroPoolCreateFailed); ++ ++ return PVRSRV_OK; ++ ++erroPoolCreateFailed: ++#if defined(DEBUG_BRIDGE_KM) ++ OSLockDestroy(g_hStatsLock); ++ g_hStatsLock = NULL; ++errorLockCreateFailed: ++#endif ++ return eError; ++} ++ ++void BridgeDispatcherDeinit(void) ++{ ++ if (g_psBridgeBufferPool) ++ { ++ PVRSRVPoolDestroy(g_psBridgeBufferPool); ++ g_psBridgeBufferPool = NULL; ++ } ++ ++#if defined(DEBUG_BRIDGE_KM) ++ if (g_hStatsLock) ++ { ++ OSLockDestroy(g_hStatsLock); ++ g_hStatsLock = NULL; ++ } ++#endif ++} ++ ++PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection, ++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM) ++{ ++ ++ void * psBridgeIn=NULL; ++ void * psBridgeOut=NULL; ++ BridgeWrapperFunction pfBridgeHandler; ++ IMG_UINT32 ui32DispatchTableEntry, ui32GroupBoundary; ++ PVRSRV_ERROR err = PVRSRV_OK; ++#if !defined(INTEGRITY_OS) ++ PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL; ++#endif ++ IMG_UINT32 ui32Timestamp = OSClockus(); ++#if defined(DEBUG_BRIDGE_KM) ++ IMG_UINT64 ui64TimeStart; ++ IMG_UINT64 ui64TimeEnd; ++ IMG_UINT64 ui64TimeDiff; ++#endif ++ IMG_UINT32 ui32DispatchTableIndex, ui32DispatchTableEntryIndex; ++ ++#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH) ++ PVR_DBG_BREAK; ++#endif ++ ++ if (psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d", ++ __func__, psBridgePackageKM->ui32BridgeID)); ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); ++ } ++ ++ ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT); ++ ++ ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC]; ++ ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC]; ++ ++ /* bridge function is not implemented in this build */ ++ if (0 == ui32DispatchTableEntry) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", ++ __func__, ++ ui32DispatchTableEntry, ++ ui32GroupBoundary, ++ psBridgePackageKM->ui32BridgeID, ++ psBridgePackageKM->ui32FunctionID)); ++ /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */ ++ err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry, ++ psBridgeIn, ++ psBridgeOut, ++ psConnection); ++ goto return_error; ++ } ++ if ((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)", ++ __func__, ++ ui32DispatchTableEntry, ++ ui32GroupBoundary, ++ psBridgePackageKM->ui32BridgeID, ++ psBridgePackageKM->ui32FunctionID)); ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); ++ } ++ ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID; ++ ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1); ++ if (BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu," ++ " (bridge module %d, function %d)", __func__, ++ ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT, ++ psBridgePackageKM->ui32BridgeID, ++ psBridgePackageKM->ui32FunctionID)); ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EINVAL, return_error); ++ } ++#if defined(DEBUG_BRIDGE_KM) ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)", ++ __func__, ++ ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID)); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s", ++ __func__, ++ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName)); ++ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++; ++ g_BridgeGlobalStats.ui32IOCTLCount++; ++#endif ++ ++ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) ++ { ++ OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); ++ } ++#if !defined(INTEGRITY_OS) ++ /* try to acquire a bridge buffer from the pool */ ++ ++ err = PVRSRVPoolGet(g_psBridgeBufferPool, ++ &hBridgeBufferPoolToken, ++ &psBridgeIn); ++ PVR_LOG_GOTO_IF_ERROR(err, "PVRSRVPoolGet", unlock_and_return_error); ++ ++ psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE; ++#endif ++ ++#if defined(DEBUG_BRIDGE_KM) ++ ui64TimeStart = OSClockns64(); ++#endif ++ ++ if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small " ++ "(data size %u, buffer size %u)!", __func__, ++ psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE)); ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); ++ } ++ ++#if !defined(INTEGRITY_OS) ++ if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small " ++ "(data size %u, buffer size %u)!", __func__, ++ psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE)); ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_ERANGE, unlock_and_return_error); ++ } ++ ++ if ((CopyFromUserWrapper (psConnection, ++ ui32DispatchTableEntryIndex, ++ psBridgeIn, ++ psBridgePackageKM->pvParamIn, ++ psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK) ++#if defined(__QNXNTO__) ++/* For Neutrino, the output bridge buffer acts as an input as well */ ++ || (CopyFromUserWrapper(psConnection, ++ ui32DispatchTableEntryIndex, ++ psBridgeOut, ++ (void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize), ++ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) ++#endif ++ ) /* end of if-condition */ ++ { ++ PVR_LOG_GOTO_WITH_ERROR("CopyFromUserWrapper", err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); ++ } ++#else ++ psBridgeIn = psBridgePackageKM->pvParamIn; ++ psBridgeOut = psBridgePackageKM->pvParamOut; ++#endif ++ ++ pfBridgeHandler = ++ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction; ++ ++ if (pfBridgeHandler == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!", ++ __func__, ui32DispatchTableEntry)); ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); ++ } ++ ++ /* pfBridgeHandler functions do not fail and return an IMG_INT. ++ * The value returned is either 0 or PVRSRV_OK (0). ++ * In the event this changes an error may be +ve or -ve, ++ * so try to return something consistent here. ++ */ ++ if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex, ++ psBridgeIn, ++ psBridgeOut, ++ psConnection) ++ ) ++ { ++ PVR_LOG_GOTO_WITH_ERROR("pfBridgeHandler", err, PVRSRV_ERROR_BRIDGE_EPERM, unlock_and_return_error); ++ } ++ ++ /* ++ This should always be true as a.t.m. all bridge calls have to ++ return an error message, but this could change so we do this ++ check to be safe. ++ */ ++#if !defined(INTEGRITY_OS) ++ if (psBridgePackageKM->ui32OutBufferSize > 0) ++ { ++ if (CopyToUserWrapper (psConnection, ++ ui32DispatchTableEntryIndex, ++ psBridgePackageKM->pvParamOut, ++ psBridgeOut, ++ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK) ++ { ++ PVR_GOTO_WITH_ERROR(err, PVRSRV_ERROR_BRIDGE_EFAULT, unlock_and_return_error); ++ } ++ } ++#endif ++ ++#if defined(DEBUG_BRIDGE_KM) ++ ui64TimeEnd = OSClockns64(); ++ ++ ui64TimeDiff = ui64TimeEnd - ui64TimeStart; ++ ++ /* if there is no lock held then acquire the stats lock to ++ * ensure the calculations are done safely ++ */ ++ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL) ++ { ++ BridgeGlobalStatsLock(); ++ } ++ ++ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff; ++ ++ if (ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS) ++ { ++ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff; ++ } ++ ++ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL) ++ { ++ BridgeGlobalStatsUnlock(); ++ } ++#endif ++ ++unlock_and_return_error: ++ ++ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL) ++ { ++ OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock); ++ } ++ ++#if !defined(INTEGRITY_OS) ++ if (hBridgeBufferPoolToken != NULL) ++ { ++ err = PVRSRVPoolPut(g_psBridgeBufferPool, ++ hBridgeBufferPoolToken); ++ PVR_LOG_IF_ERROR(err, "PVRSRVPoolPut"); ++ } ++#endif ++ ++return_error: ++ if (err) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __func__, err)); ++ } ++ /* ignore transport layer bridge to avoid HTB flooding */ ++ if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL) ++ { ++ if (err) ++ { ++ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp, ++ psBridgePackageKM->ui32BridgeID, ++ psBridgePackageKM->ui32FunctionID, err); ++ } ++ else ++ { ++ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp, ++ psBridgePackageKM->ui32BridgeID, ++ psBridgePackageKM->ui32FunctionID); ++ } ++ } ++ ++ return err; ++} ++ ++PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray) ++{ ++#if !defined(__QNXNTO__) ++ return PVRSRVFindProcessMemStats(pid, ++ ui32ArrSize, ++ bAllProcessStats, ++ pui32MemStatArray); ++#else ++ PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform")); ++ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++#endif ++ ++} +diff --git a/drivers/gpu/drm/img-rogue/srvcore.h b/drivers/gpu/drm/img-rogue/srvcore.h +new file mode 100644 +index 000000000000..0483b0aff56a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/srvcore.h +@@ -0,0 +1,229 @@ ++/**************************************************************************/ /*! ++@File ++@Title PVR Bridge Functionality ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header for the PVR Bridge code ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVCORE_H ++#define SRVCORE_H ++ ++#include "lock_types.h" ++#include "connection_server.h" ++#include "pvr_debug.h" ++ ++#include "pvr_bridge.h" ++#if defined(SUPPORT_RGX) ++#include "rgx_bridge.h" ++#endif ++ ++PVRSRV_ERROR ++CopyFromUserWrapper(CONNECTION_DATA *psConnection, ++ IMG_UINT32 ui32DispatchTableEntry, ++ void *pvDest, ++ void __user *pvSrc, ++ IMG_UINT32 ui32Size); ++PVRSRV_ERROR ++CopyToUserWrapper(CONNECTION_DATA *psConnection, ++ IMG_UINT32 ui32DispatchTableEntry, ++ void __user *pvDest, ++ void *pvSrc, ++ IMG_UINT32 ui32Size); ++ ++IMG_INT ++DummyBW(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 *psBridgeIn, ++ IMG_UINT8 *psBridgeOut, ++ CONNECTION_DATA *psConnection); ++ ++typedef PVRSRV_ERROR (*ServerResourceDestroyFunction)(IMG_HANDLE, IMG_HANDLE); ++ ++typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry, ++ IMG_UINT8 *psBridgeIn, ++ IMG_UINT8 *psBridgeOut, ++ CONNECTION_DATA *psConnection); ++ ++typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY ++{ ++ BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl ++ arguments before calling into srvkm proper */ ++ POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired ++ before calling the above wrapper */ ++#if defined(DEBUG_BRIDGE_KM) ++ const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */ ++ const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */ ++ const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */ ++ IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */ ++ IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from ++ userspace within this ioctl */ ++ IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from ++ userspace within this ioctl */ ++ IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */ ++ IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */ ++#endif ++}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY; ++ ++#if defined(SUPPORT_RGX) ++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1) ++ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_LAST+1) ++#else ++ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_DISPATCH_LAST+1) ++ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_LAST+1) ++#endif ++ ++extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT]; ++ ++void BridgeDispatchTableStartOffsetsInit(void); ++ ++void ++_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, ++ IMG_UINT32 ui32Index, ++ const IMG_CHAR *pszIOCName, ++ BridgeWrapperFunction pfFunction, ++ const IMG_CHAR *pszFunctionName, ++ POS_LOCK hBridgeLock, ++ const IMG_CHAR* pszBridgeLockName); ++void ++UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup, ++ IMG_UINT32 ui32Index); ++ ++ ++/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */ ++#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\ ++ hBridgeLock) \ ++ _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\ ++ (POS_LOCK)hBridgeLock, #hBridgeLock) ++ ++#define DISPATCH_TABLE_GAP_THRESHOLD 5 ++ ++ ++#if defined(DEBUG_BRIDGE_KM) ++typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS ++{ ++ IMG_UINT32 ui32IOCTLCount; ++ IMG_UINT32 ui32TotalCopyFromUserBytes; ++ IMG_UINT32 ui32TotalCopyToUserBytes; ++} PVRSRV_BRIDGE_GLOBAL_STATS; ++ ++void BridgeGlobalStatsLock(void); ++void BridgeGlobalStatsUnlock(void); ++ ++/* OS specific code may want to report the stats held here and within the ++ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a ++ * debugfs entry /(sys/kernel/debug|proc)/pvr/bridge_stats) */ ++extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats; ++#endif ++ ++PVRSRV_ERROR BridgeDispatcherInit(void); ++void BridgeDispatcherDeinit(void); ++ ++PVRSRV_ERROR ++BridgedDispatchKM(CONNECTION_DATA * psConnection, ++ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM); ++ ++PVRSRV_ERROR ++PVRSRVConnectKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32Flags, ++ IMG_UINT32 ui32ClientBuildOptions, ++ IMG_UINT32 ui32ClientDDKVersion, ++ IMG_UINT32 ui32ClientDDKBuild, ++ IMG_UINT8 *pui8KernelArch, ++ IMG_UINT32 *ui32CapabilityFlags, ++ IMG_UINT64 *ui64PackedBvnc); ++ ++PVRSRV_ERROR ++PVRSRVDisconnectKM(void); ++ ++PVRSRV_ERROR ++PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject); ++ ++PVRSRV_ERROR ++PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject); ++ ++PVRSRV_ERROR ++PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32VerbLevel); ++ ++PVRSRV_ERROR ++PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_PUINT32 pui32RGXClockSpeed); ++ ++PVRSRV_ERROR ++PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDeviceNode, ++ IMG_UINT32 ui32FWAlignChecksSize, ++ IMG_UINT32 aui32FWAlignChecks[]); ++ ++PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 *pui32DeviceStatus); ++ ++PVRSRV_ERROR PVRSRVGetMultiCoreInfoKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ IMG_UINT32 ui32CapsSize, ++ IMG_UINT32 *pui32NumCores, ++ IMG_UINT64 *pui64Caps); ++ ++PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, ++ IMG_UINT32 ui32ArrSize, ++ IMG_BOOL bAllProcessStats, ++ IMG_UINT32 *ui32MemoryStats); ++ ++static INLINE ++PVRSRV_ERROR DestroyServerResource(const SHARED_DEV_CONNECTION hConnection, ++ IMG_HANDLE hEvent, ++ ServerResourceDestroyFunction pfnDestroyCall, ++ IMG_HANDLE hResource) ++{ ++ PVR_UNREFERENCED_PARAMETER(hEvent); ++ ++ return pfnDestroyCall(GetBridgeHandle(hConnection), hResource); ++} ++ ++#endif /* SRVCORE_H */ ++ ++/****************************************************************************** ++ End of file (srvcore.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/srvinit.h b/drivers/gpu/drm/img-rogue/srvinit.h +new file mode 100644 +index 000000000000..48e6863eae1f +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/srvinit.h +@@ -0,0 +1,68 @@ ++/*************************************************************************/ /*! ++@File ++@Title Initialisation server internal header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the connections between the various parts of the ++ initialisation server. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SRVINIT_H ++#define SRVINIT_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "device_connection.h" ++#include "device.h" ++ ++#if defined(SUPPORT_RGX) ++PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode); ++#endif ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* SRVINIT_H */ ++ ++/****************************************************************************** ++ End of file (srvinit.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/srvkm.h b/drivers/gpu/drm/img-rogue/srvkm.h +new file mode 100644 +index 000000000000..1ca4ee807a0a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/srvkm.h +@@ -0,0 +1,145 @@ ++/**************************************************************************/ /*! ++@File ++@Title Services kernel module internal header file ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SRVKM_H ++#define SRVKM_H ++ ++#include "servicesext.h" ++ ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++struct _PVRSRV_DEVICE_NODE_; ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCommonDriverInit ++@Description Performs one time driver initialisation of Services Common and ++ Device layers. ++@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVCommonDriverInit(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCommonDriverInit ++@Description Performs one time driver de-initialisation of Services. ++@Return void ++*/ /**************************************************************************/ ++void PVRSRVCommonDriverDeInit(void); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCommonDeviceCreate ++@Description Creates and initialises a common layer Services device node ++ for an OS native device. First stage device discovery. ++@Input pvOSDevice OS native device ++@Input i32OsDeviceID A unique identifier which helps recognise this ++ Device in the UM space provided by the OS. ++@Output ppsDeviceNode Points to the new device node on success ++@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVCommonDeviceCreate(void *pvOSDevice, IMG_INT32 i32OsDeviceID, ++ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCommonDeviceInitialise ++@Description Initialises the device layer specifics (e.g. boot FW etc) ++ for the supplied device node, created previously by ++ PVRSRVCommonDeviceCreate. The device is ready for use when this ++ second stage device initialisation returns successfully. ++@Input psDeviceNode Device node of the device to be initialised ++@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR PVRSRVCommonDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++ ++/*************************************************************************/ /*! ++@Function PVRSRVCommonDeviceDestroy ++@Description Destroys a PVR Services device node. ++@Input psDeviceNode Device node to destroy ++@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++PVRSRVCommonDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode); ++ ++/****************** ++HIGHER LEVEL MACROS ++*******************/ ++ ++/*---------------------------------------------------------------------------- ++Repeats the body of the loop for a certain minimum time, or until the body ++exits by its own means (break, return, goto, etc.) ++ ++Example of usage: ++ ++LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US) ++{ ++ if (psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset) ++ { ++ bTimeout = IMG_FALSE; ++ break; ++ } ++ ++ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT); ++} END_LOOP_UNTIL_TIMEOUT(); ++ ++-----------------------------------------------------------------------------*/ ++ ++/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time ++ * it will be decremented and the loop executed one final time. This is ++ * necessary when preemption is enabled. ++ */ ++/* PRQA S 3411,3431 12 */ /* critical format, leave alone */ ++#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \ ++{\ ++ IMG_UINT32 uiOffset, uiStart, uiCurrent; \ ++ IMG_INT32 iNotLastLoop; \ ++ for (uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\ ++ ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \ ++ uiCurrent = OSClockus(), \ ++ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \ ++ uiStart = uiCurrent < uiStart ? 0 : uiStart) ++ ++#define END_LOOP_UNTIL_TIMEOUT() \ ++} ++ ++#endif /* SRVKM_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync.c b/drivers/gpu/drm/img-rogue/sync.c +new file mode 100644 +index 000000000000..36234ae5ee57 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync.c +@@ -0,0 +1,907 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services synchronisation interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements client side code for services synchronisation ++ interface ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "client_sync_bridge.h" ++#include "client_synctracking_bridge.h" ++#include "info_page_client.h" ++#include "pvr_bridge.h" ++#include "allocmem.h" ++#include "osfunc.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "pvr_debug.h" ++#include "dllist.h" ++#include "sync.h" ++#include "sync_internal.h" ++#include "lock.h" ++#include "log2.h" ++#if defined(__KERNEL__) ++#include "pvrsrv.h" ++#include "srvcore.h" ++#else ++#include "srvcore_intern.h" ++#endif ++ ++ ++#define SYNC_BLOCK_LIST_CHUNCK_SIZE 10 ++ ++/* ++ This defines the maximum amount of synchronisation memory ++ that can be allocated per SyncPrim context. ++ In reality this number is meaningless as we would run out ++ of synchronisation memory before we reach this limit, but ++ we need to provide a size to the span RA. ++ */ ++#define MAX_SYNC_MEM (4 * 1024 * 1024) ++ ++/* forward declaration */ ++static PVRSRV_ERROR ++_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value); ++ ++/* ++ Internal interfaces for management of SYNC_PRIM_CONTEXT ++ */ ++static void ++_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext) ++{ ++ if (!OSAtomicRead(&psContext->hRefCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: context already freed", __func__)); ++ } ++ else if (0 == OSAtomicDecrement(&psContext->hRefCount)) ++ { ++ /* SyncPrimContextDestroy only when no longer referenced */ ++ RA_Delete(psContext->psSpanRA); ++ RA_Delete(psContext->psSubAllocRA); ++ OSFreeMem(psContext); ++ } ++} ++ ++static void ++_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext) ++{ ++ if (!OSAtomicRead(&psContext->hRefCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: context use after free", __func__)); ++ } ++ else ++ { ++ OSAtomicIncrement(&psContext->hRefCount); ++ } ++} ++ ++/* ++ Internal interfaces for management of synchronisation block memory ++ */ ++static PVRSRV_ERROR ++AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext, ++ SYNC_PRIM_BLOCK **ppsSyncBlock) ++{ ++ SYNC_PRIM_BLOCK *psSyncBlk; ++ IMG_HANDLE hSyncPMR; ++ IMG_HANDLE hSyncImportHandle; ++ IMG_DEVMEM_SIZE_T uiImportSize; ++ PVRSRV_ERROR eError; ++ ++ psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK)); ++ PVR_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); ++ ++ psSyncBlk->psContext = psContext; ++ ++ /* Allocate sync prim block */ ++ eError = BridgeAllocSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), ++ &psSyncBlk->hServerSyncPrimBlock, ++ &psSyncBlk->ui32FirmwareAddr, ++ &psSyncBlk->ui32SyncBlockSize, ++ &hSyncPMR); ++ PVR_GOTO_IF_ERROR(eError, fail_blockalloc); ++ ++ /* Make it mappable by the client */ ++ eError = DevmemMakeLocalImportHandle(psContext->hDevConnection, ++ hSyncPMR, ++ &hSyncImportHandle); ++ PVR_GOTO_IF_ERROR(eError, fail_export); ++ ++ /* Get CPU mapping of the memory block */ ++ eError = DevmemLocalImport(psContext->hDevConnection, ++ hSyncImportHandle, ++ PVRSRV_MEMALLOCFLAG_CPU_READABLE, ++ &psSyncBlk->hMemDesc, ++ &uiImportSize, ++ "SyncPrimitiveBlock"); ++ ++ /* ++ Regardless of success or failure we "undo" the export ++ */ ++ DevmemUnmakeLocalImportHandle(psContext->hDevConnection, ++ hSyncImportHandle); ++ ++ PVR_GOTO_IF_ERROR(eError, fail_import); ++ ++ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, ++ (void **) &psSyncBlk->pui32LinAddr); ++ PVR_GOTO_IF_ERROR(eError, fail_cpuvaddr); ++ ++ *ppsSyncBlock = psSyncBlk; ++ return PVRSRV_OK; ++ ++fail_cpuvaddr: ++ DevmemFree(psSyncBlk->hMemDesc); ++fail_import: ++fail_export: ++ BridgeFreeSyncPrimitiveBlock(GetBridgeHandle(psContext->hDevConnection), ++ psSyncBlk->hServerSyncPrimBlock); ++fail_blockalloc: ++ OSFreeMem(psSyncBlk); ++fail_alloc: ++ return eError; ++} ++ ++static void ++FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk) ++{ ++ SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext; ++ ++ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); ++ DevmemFree(psSyncBlk->hMemDesc); ++ (void) DestroyServerResource(psContext->hDevConnection, ++ NULL, ++ BridgeFreeSyncPrimitiveBlock, ++ psSyncBlk->hServerSyncPrimBlock); ++ OSFreeMem(psSyncBlk); ++} ++ ++static PVRSRV_ERROR ++SyncPrimBlockImport(RA_PERARENA_HANDLE hArena, ++ RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags, ++ const IMG_CHAR *pszAnnotation, ++ RA_BASE_T *puiBase, ++ RA_LENGTH_T *puiActualSize, ++ RA_PERISPAN_HANDLE *phImport) ++{ ++ SYNC_PRIM_CONTEXT *psContext = hArena; ++ SYNC_PRIM_BLOCK *psSyncBlock = NULL; ++ RA_LENGTH_T uiSpanSize; ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(uFlags); ++ ++ /* Check we've not been called with an unexpected size */ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(hArena, eError, e0); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(uSize == sizeof(IMG_UINT32), eError, e0); ++ ++ /* ++ Ensure the synprim context doesn't go away while we have sync blocks ++ attached to it ++ */ ++ _SyncPrimContextRef(psContext); ++ ++ /* Allocate the block of memory */ ++ eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "AllocSyncPrimitiveBlock", fail_syncblockalloc); ++ ++ /* Allocate a span for it */ ++ eError = RA_Alloc(psContext->psSpanRA, ++ psSyncBlock->ui32SyncBlockSize, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, ++ psSyncBlock->ui32SyncBlockSize, ++ pszAnnotation, ++ &psSyncBlock->uiSpanBase, ++ &uiSpanSize, ++ NULL); ++ PVR_GOTO_IF_ERROR(eError, fail_spanalloc); ++ ++ /* ++ There is no reason the span RA should return an allocation larger ++ then we request ++ */ ++ PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize); ++ ++ *puiBase = psSyncBlock->uiSpanBase; ++ *puiActualSize = psSyncBlock->ui32SyncBlockSize; ++ *phImport = psSyncBlock; ++ return PVRSRV_OK; ++ ++fail_spanalloc: ++ FreeSyncPrimitiveBlock(psSyncBlock); ++fail_syncblockalloc: ++ _SyncPrimContextUnref(psContext); ++e0: ++ return eError; ++} ++ ++static void ++SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena, ++ RA_BASE_T uiBase, ++ RA_PERISPAN_HANDLE hImport) ++{ ++ SYNC_PRIM_CONTEXT *psContext = hArena; ++ SYNC_PRIM_BLOCK *psSyncBlock = hImport; ++ ++ if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase) ++ { ++ /* Invalid input params */ ++ return; ++ } ++ ++ /* Free the span this import is using */ ++ RA_Free(psContext->psSpanRA, uiBase); ++ ++ /* Free the syncpim block */ ++ FreeSyncPrimitiveBlock(psSyncBlock); ++ ++ /* Drop our reference to the syncprim context */ ++ _SyncPrimContextUnref(psContext); ++} ++ ++static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt) ++{ ++ IMG_UINT64 ui64Temp; ++ ++ PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL); ++ ++ ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; ++ PVR_ASSERT(ui64Tempu.sLocal.psSyncBlock; ++ ++ psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr + ++ (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32)); ++} ++ ++static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt, IMG_BOOL bFreeFirstSyncPrim) ++{ ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM_CONTEXT *psContext; ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ psContext = psSyncBlock->psContext; ++ ++#if !defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) ++ PVR_UNREFERENCED_PARAMETER(bFreeFirstSyncPrim); ++#else ++ /* Defer freeing the first allocated sync prim in the sync context */ ++ if (psSyncInt != psContext->hFirstSyncPrim || (psSyncInt == psContext->hFirstSyncPrim && bFreeFirstSyncPrim)) ++#endif ++ { ++ PVRSRV_ERROR eError; ++ SHARED_DEV_CONNECTION hDevConnection = ++ psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection; ++ ++ if (GetInfoPageDebugFlags(hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ if (psSyncInt->u.sLocal.hRecord) ++ { ++ /* remove this sync record */ ++ eError = DestroyServerResource(hDevConnection, ++ NULL, ++ BridgeSyncRecordRemoveByHandle, ++ psSyncInt->u.sLocal.hRecord); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncRecordRemoveByHandle"); ++ } ++ } ++ else ++ { ++ IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr + ++ SyncPrimGetOffset(psSyncInt); ++ ++ eError = BridgeSyncFreeEvent(GetBridgeHandle(hDevConnection), ui32FWAddr); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncFreeEvent"); ++ } ++#if defined(PVRSRV_ENABLE_SYNC_POISONING) ++ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE); ++#else ++ /* reset the sync prim value as it is freed. ++ * this guarantees the client sync allocated to the client will ++ * have a value of zero and the client does not need to ++ * explicitly initialise the sync value to zero. ++ * the allocation of the backing memory for the sync prim block ++ * is done with ZERO_ON_ALLOC so the memory is initially all zero. ++ */ ++ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE); ++#endif ++ ++ RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr); ++ OSFreeMem(psSyncInt); ++ _SyncPrimContextUnref(psContext); ++ } ++} ++ ++static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt) ++{ ++ if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed")); ++ } ++ else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount)) ++ { ++ SyncPrimLocalFree(psSyncInt, IMG_FALSE); ++ } ++} ++ ++static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt) ++{ ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt); ++} ++ ++static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align) ++{ ++ PVR_ASSERT(IsPower2(ui32Align)); ++ return ExactLog2(ui32Align); ++} ++ ++/* ++ External interfaces ++ */ ++ ++IMG_INTERNAL PVRSRV_ERROR ++SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, ++ PSYNC_PRIM_CONTEXT *phSyncPrimContext) ++{ ++ SYNC_PRIM_CONTEXT *psContext; ++ PVRSRV_ERROR eError; ++ ++ psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT)); ++ PVR_GOTO_IF_NOMEM(psContext, eError, fail_alloc); ++ ++ psContext->hDevConnection = hDevConnection; ++ ++ OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext); ++ OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext); ++ ++ /* ++ Create the RA for sub-allocations of the SynPrim's ++ ++ Note: ++ The import size doesn't matter here as the server will pass ++ back the blocksize when does the import which overrides ++ what we specify here. ++ */ ++ ++ psContext->psSubAllocRA = RA_Create(psContext->azName, ++ /* Params for imports */ ++ _Log2(sizeof(IMG_UINT32)), ++ RA_LOCKCLASS_2, ++ SyncPrimBlockImport, ++ SyncPrimBlockUnimport, ++ psContext, ++ RA_POLICY_DEFAULT); ++ PVR_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); ++ ++ /* ++ Create the span-management RA ++ ++ The RA requires that we work with linear spans. For our use ++ here we don't require this behaviour as we're always working ++ within offsets of blocks (imports). However, we need to keep ++ the RA happy so we create the "span" management RA which ++ ensures that all are imports are added to the RA in a linear ++ fashion ++ */ ++ psContext->psSpanRA = RA_Create(psContext->azSpanName, ++ /* Params for imports */ ++ 0, ++ RA_LOCKCLASS_1, ++ NULL, ++ NULL, ++ NULL, ++ RA_POLICY_DEFAULT); ++ PVR_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); ++ ++ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL)) ++ { ++ RA_Delete(psContext->psSpanRA); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_OUT_OF_MEMORY, fail_span); ++ } ++ ++#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) ++ psContext->hFirstSyncPrim = NULL; ++#endif ++ ++ OSAtomicWrite(&psContext->hRefCount, 1); ++ ++ *phSyncPrimContext = psContext; ++ return PVRSRV_OK; ++fail_span: ++ RA_Delete(psContext->psSubAllocRA); ++fail_suballoc: ++ OSFreeMem(psContext); ++fail_alloc: ++ return eError; ++} ++ ++IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext) ++{ ++ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; ++ ++#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) ++ /* Free the first sync prim that was allocated as part of this context */ ++ if (psContext->hFirstSyncPrim) ++ { ++ SyncPrimLocalFree((SYNC_PRIM *)psContext->hFirstSyncPrim, IMG_TRUE); ++ psContext->hFirstSyncPrim = NULL; ++ } ++#endif ++ ++ if (1 != OSAtomicRead(&psContext->hRefCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __func__)); ++ } ++#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE) ++#if defined(__KERNEL__) ++ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state", __func__)); ++ OSAtomicWrite(&psContext->hRefCount, 1); ++ } ++#endif ++#endif ++ _SyncPrimContextUnref(psContext); ++} ++ ++static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, ++ PVRSRV_CLIENT_SYNC_PRIM **ppsSync, ++ const IMG_CHAR *pszClassName, ++ IMG_BOOL bServerSync) ++{ ++ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext; ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM *psNewSync; ++ PVRSRV_ERROR eError; ++ RA_BASE_T uiSpanAddr; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(hSyncPrimContext, "hSyncPrimeContext"); ++ ++ psNewSync = OSAllocMem(sizeof(SYNC_PRIM)); ++ PVR_GOTO_IF_NOMEM(psNewSync, eError, fail_alloc); ++ ++ eError = RA_Alloc(psContext->psSubAllocRA, ++ sizeof(IMG_UINT32), ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, ++ sizeof(IMG_UINT32), ++ "Sync_Prim", ++ &uiSpanAddr, ++ NULL, ++ (RA_PERISPAN_HANDLE *) &psSyncBlock); ++ PVR_GOTO_IF_ERROR(eError, fail_raalloc); ++ ++ psNewSync->eType = SYNC_PRIM_TYPE_LOCAL; ++ OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1); ++ psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr; ++ psNewSync->u.sLocal.psSyncBlock = psSyncBlock; ++ SyncPrimGetCPULinAddr(psNewSync); ++ *ppsSync = &psNewSync->sCommon; ++ _SyncPrimContextRef(psContext); ++#if defined(PVRSRV_ENABLE_SYNC_POISONING) ++ (void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE); ++#endif ++ ++#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) ++ /* If this is the first sync prim allocated in the context, keep a handle to it */ ++ if (psSyncBlock->uiSpanBase == 0 && psNewSync->u.sLocal.uiSpanAddr == 0) ++ { ++ psContext->hFirstSyncPrim = psNewSync; ++ } ++#endif ++ ++ if (GetInfoPageDebugFlags(psSyncBlock->psContext->hDevConnection) & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; ++ size_t uiSize; ++ ++ if (pszClassName) ++ { ++ uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); ++ /* Copy the class name annotation into a fixed-size array */ ++ OSCachedMemCopy(szClassName, pszClassName, uiSize); ++ if (uiSize == PVRSRV_SYNC_NAME_LENGTH) ++ szClassName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0'; ++ else ++ szClassName[uiSize++] = '\0'; ++ } ++ else ++ { ++ /* No class name annotation */ ++ uiSize = 0; ++ szClassName[0] = '\0'; ++ } ++ ++ /* record this sync */ ++ eError = BridgeSyncRecordAdd( ++ GetBridgeHandle(psSyncBlock->psContext->hDevConnection), ++ &psNewSync->u.sLocal.hRecord, ++ psSyncBlock->hServerSyncPrimBlock, ++ psSyncBlock->ui32FirmwareAddr, ++ SyncPrimGetOffset(psNewSync), ++ bServerSync, ++ uiSize, ++ szClassName); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)", ++ __func__, ++ szClassName, ++ PVRSRVGETERRORSTRING(eError))); ++ psNewSync->u.sLocal.hRecord = NULL; ++ } ++ } ++ else ++ { ++ size_t uiSize; ++ ++ uiSize = OSStringNLength(pszClassName, PVRSRV_SYNC_NAME_LENGTH); ++ ++ if (uiSize < PVRSRV_SYNC_NAME_LENGTH) ++ uiSize++; ++ /* uiSize now reflects size used for pszClassName + NUL byte */ ++ ++ eError = BridgeSyncAllocEvent(GetBridgeHandle(hSyncPrimContext->hDevConnection), ++ bServerSync, ++ psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync), ++ uiSize, ++ pszClassName); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncAllocEvent"); ++ } ++ ++ return PVRSRV_OK; ++ ++fail_raalloc: ++ OSFreeMem(psNewSync); ++fail_alloc: ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, ++ PVRSRV_CLIENT_SYNC_PRIM **ppsSync, ++ const IMG_CHAR *pszClassName) ++{ ++ return _SyncPrimAlloc(hSyncPrimContext, ++ ppsSync, ++ pszClassName, ++ IMG_FALSE); ++} ++ ++static PVRSRV_ERROR ++_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) ++ { ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM_CONTEXT *psContext; ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ psContext = psSyncBlock->psContext; ++ ++ eError = BridgeSyncPrimSet(GetBridgeHandle(psContext->hDevConnection), ++ psSyncBlock->hServerSyncPrimBlock, ++ SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32), ++ ui32Value); ++ } ++ else ++ { ++ /* Server sync not supported, attempted use of server sync */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ SYNC_PRIM *psSyncInt; ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); ++ ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) ++ { ++ SyncPrimLocalUnref(psSyncInt); ++ } ++ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) ++ { ++ /* Server sync not supported, attempted use of server sync */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ else ++ { ++ /* ++ Either the client has given us a bad pointer or there is an ++ error in this module ++ */ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); ++ } ++ ++err_out: ++ return eError; ++} ++ ++#if defined(NO_HARDWARE) ++IMG_INTERNAL PVRSRV_ERROR ++SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ SYNC_PRIM *psSyncInt; ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); ++ ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ ++ /* There is no check for the psSyncInt to be LOCAL as this call ++ substitutes the Firmware updating a sync and that sync could ++ be a server one */ ++ ++ eError = _SyncPrimSetValue(psSyncInt, ui32Value); ++ ++err_out: ++ return eError; ++} ++#endif ++ ++IMG_INTERNAL PVRSRV_ERROR ++SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ SYNC_PRIM *psSyncInt; ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); ++ ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) ++ { ++ /* Invalid sync type */ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); ++ } ++ ++ eError = _SyncPrimSetValue(psSyncInt, ui32Value); ++ ++#if defined(PDUMP) ++ SyncPrimPDump(psSync); ++#endif ++err_out: ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_HANDLE *phBlock, ++ IMG_UINT32 *pui32Offset) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ SYNC_PRIM *psSyncInt; ++ ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(phBlock, eError, err_out); ++ PVR_LOG_GOTO_IF_INVALID_PARAM(pui32Offset, eError, err_out); ++ ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ ++ if (likely(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)) ++ { ++ *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock; ++ *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)", ++ __func__, psSyncInt->eType)); ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_PARAMS, err_out); ++ } ++ ++err_out: ++ return eError; ++} ++ ++IMG_INTERNAL PVRSRV_ERROR ++SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ SYNC_PRIM *psSyncInt; ++ ++ *pui32FwAddr = 0; ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psSync, eError, err_out); ++ ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL) ++ { ++ *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt); ++ } ++ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER) ++ { ++ /* Server sync not supported, attempted use of server sync */ ++ return PVRSRV_ERROR_NOT_SUPPORTED; ++ } ++ else ++ { ++ /* Either the client has given us a bad pointer or there is an ++ * error in this module ++ */ ++ PVR_GOTO_WITH_ERROR(eError, PVRSRV_ERROR_INVALID_SYNC_PRIM, err_out); ++ } ++ ++err_out: ++ return eError; ++} ++ ++#if defined(PDUMP) ++IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) ++{ ++ SYNC_PRIM *psSyncInt; ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM_CONTEXT *psContext; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psSync != NULL); ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ ++ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) ++ { ++ /* Invalid sync type */ ++ PVR_ASSERT(IMG_FALSE); ++ return; ++ } ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ psContext = psSyncBlock->psContext; ++ ++ eError = BridgeSyncPrimPDump(GetBridgeHandle(psContext->hDevConnection), ++ psSyncBlock->hServerSyncPrimBlock, ++ SyncPrimGetOffset(psSyncInt)); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDump"); ++ PVR_ASSERT(eError == PVRSRV_OK); ++} ++ ++IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) ++{ ++ SYNC_PRIM *psSyncInt; ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM_CONTEXT *psContext; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psSync != NULL); ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ ++ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) ++ { ++ /* Invalid sync type */ ++ PVR_ASSERT(IMG_FALSE); ++ return; ++ } ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ psContext = psSyncBlock->psContext; ++ ++ eError = BridgeSyncPrimPDumpValue(GetBridgeHandle(psContext->hDevConnection), ++ psSyncBlock->hServerSyncPrimBlock, ++ SyncPrimGetOffset(psSyncInt), ++ ui32Value); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpValue"); ++ PVR_ASSERT(eError == PVRSRV_OK); ++} ++ ++IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ SYNC_PRIM *psSyncInt; ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM_CONTEXT *psContext; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psSync != NULL); ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ ++ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) ++ { ++ /* Invalid sync type */ ++ PVR_ASSERT(IMG_FALSE); ++ return; ++ } ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ psContext = psSyncBlock->psContext; ++ ++ eError = BridgeSyncPrimPDumpPol(GetBridgeHandle(psContext->hDevConnection), ++ psSyncBlock->hServerSyncPrimBlock, ++ SyncPrimGetOffset(psSyncInt), ++ ui32Value, ++ ui32Mask, ++ eOperator, ++ ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpPol"); ++ PVR_ASSERT(eError == PVRSRV_OK); ++} ++ ++IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize) ++{ ++ SYNC_PRIM *psSyncInt; ++ SYNC_PRIM_BLOCK *psSyncBlock; ++ SYNC_PRIM_CONTEXT *psContext; ++ PVRSRV_ERROR eError; ++ ++ PVR_ASSERT(psSync != NULL); ++ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon); ++ ++ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL) ++ { ++ /* Invalid sync type */ ++ PVR_ASSERT(IMG_FALSE); ++ return; ++ } ++ ++ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock; ++ psContext = psSyncBlock->psContext; ++ ++#if defined(__linux__) && defined(__i386__) ++ PVR_ASSERT(uiWriteOffsethDevConnection), ++ psSyncBlock->hServerSyncPrimBlock, ++ SyncPrimGetOffset(psSyncInt), ++ TRUNCATE_64BITS_TO_32BITS(uiWriteOffset), ++ TRUNCATE_64BITS_TO_32BITS(uiPacketSize), ++ TRUNCATE_64BITS_TO_32BITS(uiBufferSize)); ++ PVR_LOG_IF_ERROR(eError, "BridgeSyncPrimPDumpCBP"); ++ PVR_ASSERT(eError == PVRSRV_OK); ++} ++ ++#endif +diff --git a/drivers/gpu/drm/img-rogue/sync.h b/drivers/gpu/drm/img-rogue/sync.h +new file mode 100644 +index 000000000000..f126915060f4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync.h +@@ -0,0 +1,292 @@ ++/*************************************************************************/ /*! ++@File ++@Title Synchronisation interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the client side interface for synchronisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_H ++#define SYNC_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "sync_prim_internal.h" ++#include "pdumpdefs.h" ++#include "dllist.h" ++#include "pvr_debug.h" ++ ++#include "device_connection.h" ++ ++#if defined(__KERNEL__) && defined(__linux__) && !defined(__GENKSYMS__) ++#define __pvrsrv_defined_struct_enum__ ++#include ++#endif ++ ++/*************************************************************************/ /*! ++@Function SyncPrimContextCreate ++ ++@Description Create a new synchronisation context ++ ++@Input hBridge Bridge handle ++ ++@Input hDeviceNode Device node handle ++ ++@Output hSyncPrimContext Handle to the created synchronisation ++ primitive context ++ ++@Return PVRSRV_OK if the synchronisation primitive context was ++ successfully created ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection, ++ PSYNC_PRIM_CONTEXT *hSyncPrimContext); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimContextDestroy ++ ++@Description Destroy a synchronisation context ++ ++@Input hSyncPrimContext Handle to the synchronisation ++ primitive context to destroy ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimAlloc ++ ++@Description Allocate a new synchronisation primitive on the specified ++ synchronisation context ++ ++@Input hSyncPrimContext Handle to the synchronisation ++ primitive context ++ ++@Output ppsSync Created synchronisation primitive ++ ++@Input pszClassName Sync source annotation ++ ++@Return PVRSRV_OK if the synchronisation primitive was ++ successfully created ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext, ++ PVRSRV_CLIENT_SYNC_PRIM **ppsSync, ++ const IMG_CHAR *pszClassName); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimFree ++ ++@Description Free a synchronisation primitive ++ ++@Input psSync The synchronisation primitive to free ++ ++@Return PVRSRV_OK if the synchronisation primitive was ++ successfully freed ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimSet ++ ++@Description Set the synchronisation primitive to a value ++ ++@Input psSync The synchronisation primitive to set ++ ++@Input ui32Value Value to set it to ++ ++@Return PVRSRV_OK on success ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); ++ ++#if defined(NO_HARDWARE) ++ ++/*************************************************************************/ /*! ++@Function SyncPrimNoHwUpdate ++ ++@Description Updates the synchronisation primitive value (in NoHardware drivers) ++ ++@Input psSync The synchronisation primitive to update ++ ++@Input ui32Value Value to update it to ++ ++@Return PVRSRV_OK on success ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); ++#endif ++ ++#if defined(PDUMP) ++/*************************************************************************/ /*! ++@Function SyncPrimPDump ++ ++@Description PDump the current value of the synchronisation primitive ++ ++@Input psSync The synchronisation primitive to PDump ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimPDumpValue ++ ++@Description PDump the ui32Value as the value of the synchronisation ++ primitive (regardless of the current value). ++ ++@Input psSync The synchronisation primitive to PDump ++@Input ui32Value Value to give to the sync prim on the pdump ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimPDumpPol ++ ++@Description Do a PDump poll of the synchronisation primitive ++ ++@Input psSync The synchronisation primitive to PDump ++ ++@Input ui32Value Value to poll for ++ ++@Input ui32Mask PDump mask operator ++ ++@Input ui32PDumpFlags PDump flags ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncPrimPDumpCBP ++ ++@Description Do a PDump CB poll using the synchronisation primitive ++ ++@Input psSync The synchronisation primitive to PDump ++ ++@Input uiWriteOffset Current write offset of buffer ++ ++@Input uiPacketSize Size of the packet to write into CB ++ ++@Input uiBufferSize Size of the CB ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize); ++ ++#else ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(SyncPrimPDumpValue) ++#endif ++static INLINE void ++SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSync); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(SyncPrimPDump) ++#endif ++static INLINE void ++SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSync); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(SyncPrimPDumpPol) ++#endif ++static INLINE void ++SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_UINT32 ui32Value, ++ IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ IMG_UINT32 ui32PDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSync); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags); ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(SyncPrimPDumpCBP) ++#endif ++static INLINE void ++SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_UINT64 uiWriteOffset, ++ IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSync); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++} ++#endif /* PDUMP */ ++#endif /* SYNC_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint.c b/drivers/gpu/drm/img-rogue/sync_checkpoint.c +new file mode 100644 +index 000000000000..1bab1afee87b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_checkpoint.c +@@ -0,0 +1,2981 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services synchronisation checkpoint interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Server side code for services synchronisation interface ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "img_defs.h" ++#include "img_types.h" ++#include "allocmem.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "pvr_debug.h" ++#include "pvr_notifier.h" ++#include "osfunc.h" ++#include "dllist.h" ++#include "sync.h" ++#include "sync_checkpoint_external.h" ++#include "sync_checkpoint.h" ++#include "sync_checkpoint_internal.h" ++#include "sync_checkpoint_init.h" ++#include "lock.h" ++#include "log2.h" ++#include "pvrsrv.h" ++#include "pdump_km.h" ++#include "info_page.h" ++ ++#include "pvrsrv_sync_km.h" ++#include "rgxhwperf.h" ++ ++#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) ++#include "rgxsoctimer.h" ++#endif ++ ++#if defined(PVRSRV_NEED_PVR_DPF) ++ ++/* Enable this to turn on debug relating to the creation and ++ resolution of contexts */ ++#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 ++ ++/* Enable this to turn on debug relating to the creation and ++ resolution of fences */ ++#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 ++ ++/* Enable this to turn on debug relating to the sync checkpoint ++ allocation and freeing */ ++#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 ++ ++/* Enable this to turn on debug relating to the sync checkpoint ++ enqueuing and signalling */ ++#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 ++ ++/* Enable this to turn on debug relating to the sync checkpoint pool */ ++#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 ++ ++/* Enable this to turn on debug relating to sync checkpoint UFO ++ lookup */ ++#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 ++ ++/* Enable this to turn on sync checkpoint deferred cleanup debug ++ * (for syncs we have been told to free but which have some ++ * outstanding FW operations remaining (enqueued in CCBs) ++ */ ++#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 ++ ++#else ++ ++#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0 ++#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0 ++#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0 ++#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0 ++#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0 ++#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0 ++#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0 ++ ++#endif ++ ++/* Maximum number of deferred sync checkpoint signal/error received for atomic context */ ++#define SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL 500 ++ ++/* Set the size of the sync checkpoint pool (not used if 0). ++ * A pool will be maintained for each sync checkpoint context. ++ */ ++#if defined(PDUMP) ++#define SYNC_CHECKPOINT_POOL_SIZE 0 ++#else ++#define SYNC_CHECKPOINT_POOL_SIZE 128 ++#define SYNC_CHECKPOINT_POOL_MASK (SYNC_CHECKPOINT_POOL_SIZE - 1) ++#endif ++ ++/* The 'sediment' value represents the minimum number of ++ * sync checkpoints which must be in the pool before one ++ * will be allocated from the pool rather than from memory. ++ * This effectively helps avoid re-use of a sync checkpoint ++ * just after it has been returned to the pool, making ++ * debugging somewhat easier to understand. ++ */ ++#define SYNC_CHECKPOINT_POOL_SEDIMENT 20 ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE & (SYNC_CHECKPOINT_POOL_SIZE - 1)) != 0 ++#error "SYNC_CHECKPOINT_POOL_SIZE must be power of 2." ++#endif ++ ++#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10 ++ ++/* ++ This defines the maximum amount of synchronisation memory ++ that can be allocated per sync checkpoint context. ++ In reality this number is meaningless as we would run out ++ of synchronisation memory before we reach this limit, but ++ we need to provide a size to the span RA. ++ */ ++#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024) ++ ++ ++typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_ ++{ ++ IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */ ++ IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */ ++ SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */ ++} SYNC_CHECKPOINT_BLOCK_LIST; ++ ++struct _SYNC_CHECKPOINT_CONTEXT_CTL_ ++{ ++ SHARED_DEV_CONNECTION psDeviceNode; ++ PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; ++ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; ++ /* ++ * Used as head of linked-list of sync checkpoints for which ++ * SyncCheckpointFree() has been called, but have outstanding ++ * FW operations (enqueued in CCBs) ++ * This list will be check whenever a SyncCheckpointFree() is ++ * called, and when SyncCheckpointContextDestroy() is called. ++ */ ++ DLLIST_NODE sDeferredCleanupListHead; ++ /* Lock to protect the deferred cleanup list */ ++ POS_SPINLOCK hDeferredCleanupListLock; ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++ SYNC_CHECKPOINT *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE]; ++ IMG_BOOL bSyncCheckpointPoolFull; ++ IMG_BOOL bSyncCheckpointPoolValid; ++ IMG_UINT32 ui32SyncCheckpointPoolCount; ++ IMG_UINT32 ui32SyncCheckpointPoolWp; ++ IMG_UINT32 ui32SyncCheckpointPoolRp; ++ POS_SPINLOCK hSyncCheckpointPoolLock; /*! Protects access to the checkpoint pool control data. */ ++#endif ++}; /*_SYNC_CHECKPOINT_CONTEXT_CTL is already typedef-ed in sync_checkpoint_internal.h */ ++ ++/* this is the max number of sync checkpoint records we will search or dump ++ * at any time. ++ */ ++#define SYNC_CHECKPOINT_RECORD_LIMIT 20000 ++ ++#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) ++ ++struct SYNC_CHECKPOINT_RECORD ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */ ++ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ ++ IMG_UINT32 ui32FwBlockAddr; ++ IMG_PID uiPID; ++ IMG_UINT32 ui32UID; ++ IMG_UINT64 ui64OSTime; ++ DLLIST_NODE sNode; ++ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; ++ PSYNC_CHECKPOINT pSyncCheckpt; ++}; ++ ++static PFN_SYNC_CHECKPOINT_STRUCT *g_psSyncCheckpointPfnStruct = NULL; ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext); ++static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint); ++static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext); ++#endif ++ ++#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) ++static IMG_UINT32 gui32NumSyncCheckpointContexts = 0; ++#endif ++ ++/* Defined values to indicate status of sync checkpoint, which is ++ * stored in the memory of the structure */ ++#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa ++#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb ++#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc ++ ++#if defined(SUPPORT_RGX) ++static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo, ++ SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) ++ && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ { ++ RGX_HWPERF_UFO_EV eEv; ++ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; ++ ++ if (psSyncCheckpointInt) ++ { ++ if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || ++ (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)) ++ { ++ sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); ++ sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; ++ eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS; ++ } ++ else ++ { ++ sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); ++ sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; ++ sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL; ++ } ++ RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData, ++ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); ++ } ++ } ++} ++ ++static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo, ++ SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO) ++ && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ { ++ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; ++ ++ if (psSyncCheckpointInt) ++ { ++ sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt); ++ sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; ++ sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, ++ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); ++ } ++ } ++} ++#endif ++ ++static PVRSRV_ERROR ++_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord, ++ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, ++ IMG_UINT32 ui32FwBlockAddr, ++ IMG_UINT32 ui32SyncOffset, ++ IMG_UINT32 ui32UID, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt); ++static PVRSRV_ERROR ++_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord); ++static void _SyncCheckpointState(PDLLIST_NODE psNode, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode); ++static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode); ++ ++#if defined(PDUMP) ++static void ++MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData); ++static PVRSRV_ERROR _SyncCheckpointAllocPDump(PVRSRV_DEVICE_NODE *psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint); ++static PVRSRV_ERROR _SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags); ++static PVRSRV_ERROR _SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent); ++#endif ++ ++/* Unique incremental ID assigned to sync checkpoints when allocated */ ++static IMG_UINT32 g_SyncCheckpointUID; ++ ++static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext); ++ ++void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *) psContext; ++ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContextInt->psContextCtl; ++ IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); ++ ++ if (ui32RefCt == 0) ++ { ++ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, ++ "SyncCheckpointContextUnref context already freed"); ++ } ++ else if (OSAtomicDecrement(&psContextInt->hRefCount) == 0) ++ { ++ /* SyncCheckpointContextDestroy only when no longer referenced */ ++ OSSpinLockDestroy(psCtxCtl->hDeferredCleanupListLock); ++ psCtxCtl->hDeferredCleanupListLock = NULL; ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++ if (psCtxCtl->ui32SyncCheckpointPoolCount) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called for context<%p> with %d sync checkpoints still" ++ " in the pool", ++ __func__, ++ (void *) psContext, ++ psCtxCtl->ui32SyncCheckpointPoolCount)); ++ } ++ psCtxCtl->bSyncCheckpointPoolValid = IMG_FALSE; ++ OSSpinLockDestroy(psCtxCtl->hSyncCheckpointPoolLock); ++ psCtxCtl->hSyncCheckpointPoolLock = NULL; ++#endif ++ OSFreeMem(psContextInt->psContextCtl); ++ RA_Delete(psContextInt->psSpanRA); ++ RA_Delete(psContextInt->psSubAllocRA); ++ OSLockDestroy(psContextInt->hLock); ++ psContextInt->hLock = NULL; ++ OSFreeMem(psContext); ++ } ++} ++ ++void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContextInt = (_SYNC_CHECKPOINT_CONTEXT *)psContext; ++ IMG_UINT32 ui32RefCt = OSAtomicRead(&psContextInt->hRefCount); ++ ++ if (ui32RefCt == 0) ++ { ++ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT, ++ "SyncCheckpointContextRef context use after free"); ++ } ++ else ++ { ++ OSAtomicIncrement(&psContextInt->hRefCount); ++ } ++} ++ ++/* ++ Internal interfaces for management of synchronisation block memory ++ */ ++static PVRSRV_ERROR ++_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext, ++ SYNC_CHECKPOINT_BLOCK **ppsSyncBlock) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ SYNC_CHECKPOINT_BLOCK *psSyncBlk; ++ PVRSRV_ERROR eError; ++ ++ psSyncBlk = OSAllocMem(sizeof(*psSyncBlk)); ++ PVR_LOG_GOTO_IF_NOMEM(psSyncBlk, eError, fail_alloc); ++ ++ psSyncBlk->psContext = psContext; ++ ++ /* Allocate sync checkpoint block */ ++ psDevNode = psContext->psDevNode; ++ PVR_LOG_GOTO_IF_INVALID_PARAM(psDevNode, eError, fail_alloc_ufo_block); ++ ++ psSyncBlk->psDevNode = psDevNode; ++ ++ eError = psDevNode->pfnAllocUFOBlock(psDevNode, ++ &psSyncBlk->hMemDesc, ++ &psSyncBlk->ui32FirmwareAddr, ++ &psSyncBlk->ui32SyncBlockSize); ++ PVR_LOG_GOTO_IF_ERROR(eError, "pfnAllocUFOBlock", fail_alloc_ufo_block); ++ ++ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc, ++ (void **) &psSyncBlk->pui32LinAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail_devmem_acquire); ++ ++ OSAtomicWrite(&psSyncBlk->hRefCount, 1); ++ ++ OSLockCreate(&psSyncBlk->hLock); ++ ++ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, ++ "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)", ++ psSyncBlk->ui32FirmwareAddr); ++#if defined(PDUMP) ++ OSLockAcquire(psContext->hSyncCheckpointBlockListLock); ++ dllist_add_to_tail(&psContext->sSyncCheckpointBlockListHead, &psSyncBlk->sListNode); ++ OSLockRelease(psContext->hSyncCheckpointBlockListLock); ++#endif ++ ++ *ppsSyncBlock = psSyncBlk; ++ return PVRSRV_OK; ++ ++fail_devmem_acquire: ++ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); ++fail_alloc_ufo_block: ++ OSFreeMem(psSyncBlk); ++fail_alloc: ++ return eError; ++} ++ ++static void ++_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk) ++{ ++ OSLockAcquire(psSyncBlk->hLock); ++ if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount)) ++ { ++ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; ++ ++#if defined(PDUMP) ++ OSLockAcquire(psSyncBlk->psContext->hSyncCheckpointBlockListLock); ++ dllist_remove_node(&psSyncBlk->sListNode); ++ OSLockRelease(psSyncBlk->psContext->hSyncCheckpointBlockListLock); ++#endif ++ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc); ++ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc); ++ OSLockRelease(psSyncBlk->hLock); ++ OSLockDestroy(psSyncBlk->hLock); ++ psSyncBlk->hLock = NULL; ++ OSFreeMem(psSyncBlk); ++ } ++ else ++ { ++ OSLockRelease(psSyncBlk->hLock); ++ } ++} ++ ++static PVRSRV_ERROR ++_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena, ++ RA_LENGTH_T uSize, ++ RA_FLAGS_T uFlags, ++ const IMG_CHAR *pszAnnotation, ++ RA_BASE_T *puiBase, ++ RA_LENGTH_T *puiActualSize, ++ RA_PERISPAN_HANDLE *phImport) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; ++ SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL; ++ RA_LENGTH_T uiSpanSize; ++ PVRSRV_ERROR eError; ++ PVR_UNREFERENCED_PARAMETER(uFlags); ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM((hArena != NULL), "hArena"); ++ ++ /* Check we've not be called with an unexpected size */ ++ PVR_LOG_RETURN_IF_INVALID_PARAM((uSize == sizeof(SYNC_CHECKPOINT_FW_OBJ)), "uSize"); ++ ++ /* ++ Ensure the sync checkpoint context doesn't go away while we have ++ sync blocks attached to it. ++ */ ++ SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); ++ ++ /* Allocate the block of memory */ ++ eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock); ++ PVR_GOTO_IF_ERROR(eError, fail_syncblockalloc); ++ ++ /* Allocate a span for it */ ++ eError = RA_Alloc(psContext->psSpanRA, ++ psSyncBlock->ui32SyncBlockSize, ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, ++ psSyncBlock->ui32SyncBlockSize, ++ pszAnnotation, ++ &psSyncBlock->uiSpanBase, ++ &uiSpanSize, ++ NULL); ++ PVR_GOTO_IF_ERROR(eError, fail_spanalloc); ++ ++ /* ++ There is no reason the span RA should return an allocation larger ++ then we request ++ */ ++ PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize), ++ "uiSpanSize invalid"); ++ ++ *puiBase = psSyncBlock->uiSpanBase; ++ *puiActualSize = psSyncBlock->ui32SyncBlockSize; ++ *phImport = psSyncBlock; ++ return PVRSRV_OK; ++ ++fail_spanalloc: ++ _FreeSyncCheckpointBlock(psSyncBlock); ++fail_syncblockalloc: ++ SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); ++ ++ return eError; ++} ++ ++static void ++_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena, ++ RA_BASE_T uiBase, ++ RA_PERISPAN_HANDLE hImport) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena; ++ SYNC_CHECKPOINT_BLOCK *psSyncBlock = hImport; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE((psContext != NULL), "hArena invalid"); ++ PVR_LOG_RETURN_VOID_IF_FALSE((psSyncBlock != NULL), "hImport invalid"); ++ PVR_LOG_RETURN_VOID_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid"); ++ ++ /* Free the span this import is using */ ++ RA_Free(psContext->psSpanRA, uiBase); ++ ++ /* Free the sync checkpoint block */ ++ _FreeSyncCheckpointBlock(psSyncBlock); ++ ++ /* Drop our reference to the sync checkpoint context */ ++ SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); ++} ++ ++static INLINE IMG_UINT32 _SyncCheckpointGetOffset(SYNC_CHECKPOINT *psSyncInt) ++{ ++ IMG_UINT64 ui64Temp; ++ ++ ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase; ++ PVR_ASSERT(ui64TemppfnFenceResolve)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", ++ __func__)); ++ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; ++ PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL"); ++ return eError; ++ } ++ ++ if (papsSyncCheckpoints) ++ { ++ eError = g_psSyncCheckpointPfnStruct->pfnFenceResolve( ++ psSyncCheckpointContext, ++ hFence, ++ pui32NumSyncCheckpoints, ++ papsSyncCheckpoints, ++ pui64FenceUID); ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVR_LOG_RETURN_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceResolve"); ++ ++#if defined(PDUMP) ++ if (*papsSyncCheckpoints) ++ { ++ for (i = 0; i < *pui32NumSyncCheckpoints; i++) ++ { ++ psSyncCheckpoint = (SYNC_CHECKPOINT *)(*papsSyncCheckpoints)[i]; ++ psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; ++ } ++ } ++#endif ++ ++ if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)", ++ __func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE)); ++ ++ /* Free resources after error */ ++ if (*papsSyncCheckpoints) ++ { ++ for (i = 0; i < *pui32NumSyncCheckpoints; i++) ++ { ++ SyncCheckpointDropRef((*papsSyncCheckpoints)[i]); ++ } ++ ++ SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints); ++ } ++ ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) ++ { ++ IMG_UINT32 ii; ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: g_psSyncCheckpointPfnStruct->pfnFenceResolve() for fence %d returned the following %d checkpoints:", ++ __func__, ++ hFence, ++ *pui32NumSyncCheckpoints)); ++ ++ for (ii=0; ii<*pui32NumSyncCheckpoints; ii++) ++ { ++ PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints + ii); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: *papsSyncCheckpoints[%d]:<%p>", ++ __func__, ++ ii, ++ (void*)psNextCheckpoint)); ++ } ++ } ++#endif ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode, ++ const IMG_CHAR *pszFenceName, ++ PVRSRV_TIMELINE hTimeline, ++ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE *phNewFence, ++ IMG_UINT64 *puiUpdateFenceUID, ++ void **ppvFenceFinaliseData, ++ PSYNC_CHECKPOINT *psNewSyncCheckpoint, ++ void **ppvTimelineUpdateSyncPrim, ++ IMG_UINT32 *pui32TimelineUpdateValue, ++ PDUMP_FLAGS_T ui32PDumpFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_UNREFERENCED_PARAMETER(psDevNode); ++ ++ if (unlikely(!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceCreate)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", ++ __func__)); ++ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; ++ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceCreate is NULL"); ++ } ++ else ++ { ++ eError = g_psSyncCheckpointPfnStruct->pfnFenceCreate( ++ psDevNode, ++ pszFenceName, ++ hTimeline, ++ psSyncCheckpointContext, ++ phNewFence, ++ puiUpdateFenceUID, ++ ppvFenceFinaliseData, ++ psNewSyncCheckpoint, ++ ppvTimelineUpdateSyncPrim, ++ pui32TimelineUpdateValue); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s failed to create new fence<%p> for timeline<%d> using " ++ "sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s", ++ __func__, ++ (void*)phNewFence, ++ hTimeline, ++ (void*)psSyncCheckpointContext, ++ (void*)psNewSyncCheckpoint, ++ PVRSRVGetErrorString(eError))); ++ } ++#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s created new fence<%d> for timeline<%d> using " ++ "sync checkpoint context<%p>, new sync_checkpoint=<%p>", ++ __func__, ++ *phNewFence, ++ hTimeline, ++ (void*)psSyncCheckpointContext, ++ (void*)*psNewSyncCheckpoint)); ++ } ++#endif ++ ++#if defined(PDUMP) ++ if (eError == PVRSRV_OK) ++ { ++ SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT*)(*psNewSyncCheckpoint); ++ if (psSyncCheckpoint) ++ { ++ psSyncCheckpoint->ui32PDumpFlags = ui32PDumpFlags; ++ } ++ } ++#endif ++ } ++ return eError; ++} ++ ++PVRSRV_ERROR ++SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceDataRollback) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", ++ __func__)); ++ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; ++ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback is NULL"); ++ } ++ else ++ { ++#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: called to rollback fence data <%p>", ++ __func__, ++ pvFinaliseData)); ++#endif ++ eError = g_psSyncCheckpointPfnStruct->pfnFenceDataRollback( ++ hFence, pvFinaliseData); ++ PVR_LOG_IF_ERROR(eError, ++ "g_psSyncCheckpointPfnStruct->pfnFenceDataRollback returned error"); ++ } ++ return eError; ++} ++ ++PVRSRV_ERROR ++SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, ++ PVRSRV_FENCE hFence, ++ void *pvFinaliseData, ++ PSYNC_CHECKPOINT psSyncCheckpoint, ++ const IMG_CHAR *pszName) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnFenceFinalise) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)", ++ __func__)); ++ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; ++ } ++ else ++ { ++#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: called to finalise fence <%d>", ++ __func__, ++ hFence)); ++#endif ++ eError = g_psSyncCheckpointPfnStruct->pfnFenceFinalise(hFence, pvFinaliseData); ++ PVR_LOG_IF_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnFenceFinalise returned error"); ++ ++ RGXSRV_HWPERF_ALLOC_FENCE(psDevNode, OSGetCurrentClientProcessIDKM(), hFence, ++ SyncCheckpointGetFirmwareAddr(psSyncCheckpoint), ++ pszName, OSStringLength(pszName)); ++ } ++ return eError; ++} ++ ++void ++SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem) ++{ ++ if (g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem) ++ { ++ g_psSyncCheckpointPfnStruct->pfnFreeCheckpointListMem(pvCheckpointListMem); ++ } ++} ++ ++PVRSRV_ERROR ++SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", ++ __func__)); ++ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; ++ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines is NULL"); ++ } ++ else ++ { ++ g_psSyncCheckpointPfnStruct->pfnNoHWUpdateTimelines(pvPrivateData); ++ } ++ return eError; ++ ++} ++ ++PVRSRV_ERROR ++SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_LOG_RETURN_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ if (!g_psSyncCheckpointPfnStruct || !g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs) ++ { ++ *pui32NumSyncOwnedUFOs = 0; ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)", ++ __func__)); ++ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED; ++ PVR_LOG_ERROR(eError, "g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs is NULL"); ++ } ++ else ++ { ++ *pui32NumSyncOwnedUFOs = g_psSyncCheckpointPfnStruct->pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs); ++ PVR_LOG(("%d sync checkpoint%s owned by %s in stalled context", ++ *pui32NumSyncOwnedUFOs, *pui32NumSyncOwnedUFOs==1 ? "" : "s", ++ g_psSyncCheckpointPfnStruct->pszImplName)); ++ } ++ return eError; ++} ++ ++PVRSRV_ERROR ++SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, ++ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext = NULL; ++ _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpointContext != NULL), ++ "ppsSyncCheckpointContext invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psContext = OSAllocMem(sizeof(*psContext)); ++ PVR_LOG_GOTO_IF_NOMEM(psContext, eError, fail_alloc); /* Sets OOM error code */ ++ ++ psContextCtl = OSAllocMem(sizeof(*psContextCtl)); ++ PVR_LOG_GOTO_IF_NOMEM(psContextCtl, eError, fail_alloc2); /* Sets OOM error code */ ++ ++ eError = OSLockCreate(&psContext->hLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate:1", fail_create_context_lock); ++ ++ eError = OSSpinLockCreate(&psContextCtl->hDeferredCleanupListLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:1", fail_create_deferred_cleanup_lock); ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++ eError = OSSpinLockCreate(&psContextCtl->hSyncCheckpointPoolLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSSpinLockCreate:2", fail_create_pool_lock); ++#endif ++ ++ dllist_init(&psContextCtl->sDeferredCleanupListHead); ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++ psContextCtl->ui32SyncCheckpointPoolCount = 0; ++ psContextCtl->ui32SyncCheckpointPoolWp = 0; ++ psContextCtl->ui32SyncCheckpointPoolRp = 0; ++ psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE; ++ psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE; ++#endif ++ psContext->psDevNode = psDevNode; ++ ++ OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext); ++ OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext); ++ ++ /* ++ Create the RA for sub-allocations of the sync checkpoints ++ ++ Note: ++ The import size doesn't matter here as the server will pass ++ back the blocksize when it does the import which overrides ++ what we specify here. ++ */ ++ psContext->psSubAllocRA = RA_Create(psContext->azName, ++ /* Params for imports */ ++ _Log2(sizeof(IMG_UINT32)), ++ RA_LOCKCLASS_2, ++ _SyncCheckpointBlockImport, ++ _SyncCheckpointBlockUnimport, ++ psContext, ++ RA_POLICY_DEFAULT); ++ PVR_LOG_GOTO_IF_NOMEM(psContext->psSubAllocRA, eError, fail_suballoc); ++ ++ /* ++ Create the span-management RA ++ ++ The RA requires that we work with linear spans. For our use ++ here we don't require this behaviour as we're always working ++ within offsets of blocks (imports). However, we need to keep ++ the RA happy so we create the "span" management RA which ++ ensures that all are imports are added to the RA in a linear ++ fashion ++ */ ++ psContext->psSpanRA = RA_Create(psContext->azSpanName, ++ /* Params for imports */ ++ 0, ++ RA_LOCKCLASS_1, ++ NULL, ++ NULL, ++ NULL, ++ RA_POLICY_DEFAULT); ++ PVR_LOG_GOTO_IF_NOMEM(psContext->psSpanRA, eError, fail_span); ++ ++ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL)) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed"); ++ goto fail_span_add; ++ } ++ ++ OSAtomicWrite(&psContext->hRefCount, 1); ++ OSAtomicWrite(&psContext->hCheckpointCount, 0); ++ ++ psContext->psContextCtl = psContextCtl; ++ ++ *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext; ++#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: created psSyncCheckpointContext=<%p> (%d contexts exist)", ++ __func__, ++ (void*)*ppsSyncCheckpointContext, ++ ++gui32NumSyncCheckpointContexts)); ++#endif ++ ++#if defined(PDUMP) ++ dllist_init(&psContext->sSyncCheckpointBlockListHead); ++ ++ eError = OSLockCreate(&psContext->hSyncCheckpointBlockListLock); ++ PVR_GOTO_IF_ERROR(eError, fail_span_add); ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); ++ dllist_add_to_tail(&psDevNode->sSyncCheckpointContextListHead, &psContext->sListNode); ++ OSLockRelease(psDevNode->hSyncCheckpointContextListLock); ++ ++#endif ++ ++ return PVRSRV_OK; ++ ++fail_span_add: ++ RA_Delete(psContext->psSpanRA); ++fail_span: ++ RA_Delete(psContext->psSubAllocRA); ++fail_suballoc: ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++ OSSpinLockDestroy(psContextCtl->hSyncCheckpointPoolLock); ++ psContextCtl->hSyncCheckpointPoolLock = NULL; ++fail_create_pool_lock: ++#endif ++ OSSpinLockDestroy(psContextCtl->hDeferredCleanupListLock); ++ psContextCtl->hDeferredCleanupListLock = NULL; ++fail_create_deferred_cleanup_lock: ++ OSLockDestroy(psContext->hLock); ++ psContext->hLock = NULL; ++fail_create_context_lock: ++ OSFreeMem(psContextCtl); ++fail_alloc2: ++ OSFreeMem(psContext); ++fail_alloc: ++ return eError; ++} ++ ++/* Poisons and frees the checkpoint ++ * Decrements context refcount. */ ++static void _FreeSyncCheckpoint(SYNC_CHECKPOINT *psSyncCheckpoint) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; ++ ++ psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0; ++ psSyncCheckpoint->psSyncCheckpointFwObj = NULL; ++ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED; ++ ++ RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, ++ psSyncCheckpoint->uiSpanAddr); ++ psSyncCheckpoint->psSyncCheckpointBlock = NULL; ++ ++ OSFreeMem(psSyncCheckpoint); ++ ++ OSAtomicDecrement(&psContext->hCheckpointCount); ++} ++ ++PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ IMG_INT iRf = 0; ++ ++ PVR_LOG_RETURN_IF_FALSE((psSyncCheckpointContext != NULL), ++ "psSyncCheckpointContext invalid", ++ PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; ++ ++#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s: destroying psSyncCheckpointContext=<%p> (now have %d contexts)", ++ __func__, ++ (void*)psSyncCheckpointContext, ++ --gui32NumSyncCheckpointContexts)); ++#endif ++ ++ _CheckDeferredCleanupList(psContext); ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++ if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) ++ { ++ IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext); ++ ++#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s freed %d sync checkpoints that were still in the pool for context<%p>", ++ __func__, ++ ui32NumFreedFromPool, ++ (void*)psContext)); ++#else ++ PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool); ++#endif ++ } ++#endif ++ ++ iRf = OSAtomicRead(&psContext->hCheckpointCount); ++ ++ if (iRf != 0) ++ { ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* Note, this is not a permanent error as the caller may retry later */ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s <%p> attempted with active references (iRf=%d), " ++ "may be the result of a race", ++ __func__, ++ (void*)psContext, ++ iRf)); ++ ++ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT; ++ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ { ++ DLLIST_NODE *psNode, *psNext; ++ ++ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) ++ { ++ SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); ++ bool bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode); ++ ++ /* Line below avoids build error in release builds (where PVR_DPF is not defined) */ ++ PVR_UNREFERENCED_PARAMETER(bDeferredFree); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s", ++ __func__, ++ (void*)psSyncCheckpoint, ++ psSyncCheckpoint->ui32UID, ++ psSyncCheckpoint->azName, ++ OSAtomicRead(&psSyncCheckpoint->hRefCount), ++ psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ? ++ "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" : ++ psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE ? ++ "PVRSRV_SYNC_CHECKPOINT_ACTIVE" : "PVRSRV_SYNC_CHECKPOINT_ERRORED", ++ psSyncCheckpoint->ui32FWAddr, ++ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), ++ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, ++ bDeferredFree ? "(deferred free)" : "")); ++ ++#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1) ++ gui32NumSyncCheckpointContexts++; ++#endif ++ } ++ } ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ } ++ else ++ { ++ IMG_INT iRf2 = 0; ++ ++ iRf2 = OSAtomicRead(&psContext->hRefCount); ++ SyncCheckpointContextUnref(psSyncCheckpointContext); ++ } ++ ++#if defined(PDUMP) ++ if (dllist_is_empty(&psContext->sSyncCheckpointBlockListHead)) ++ { ++ OSLockDestroy(psContext->hSyncCheckpointBlockListLock); ++ psContext->hSyncCheckpointBlockListLock = NULL; ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); ++ dllist_remove_node(&psContext->sListNode); ++ OSLockRelease(psDevNode->hSyncCheckpointContextListLock); ++ } ++#endif ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, ++ PVRSRV_TIMELINE hTimeline, ++ PVRSRV_FENCE hFence, ++ const IMG_CHAR *pszCheckpointName, ++ PSYNC_CHECKPOINT *ppsSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL; ++ _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ PVRSRV_ERROR eError; ++ ++ PVR_LOG_RETURN_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS); ++ PVR_LOG_RETURN_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode; ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) ++ PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool", ++ __func__)); ++#endif ++ psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt); ++ if (!psNewSyncCheckpoint) ++ { ++#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s checkpoint pool empty - will have to allocate", ++ __func__)); ++#endif ++ } ++#endif ++ /* If pool is empty (or not defined) alloc the new sync checkpoint */ ++ if (!psNewSyncCheckpoint) ++ { ++ psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint)); ++ PVR_LOG_GOTO_IF_NOMEM(psNewSyncCheckpoint, eError, fail_alloc); /* Sets OOM error code */ ++ ++ eError = RA_Alloc(psSyncContextInt->psSubAllocRA, ++ sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj), ++ RA_NO_IMPORT_MULTIPLIER, ++ 0, ++ sizeof(IMG_UINT32), ++ (IMG_CHAR*)pszCheckpointName, ++ &psNewSyncCheckpoint->uiSpanAddr, ++ NULL, ++ (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RA_Alloc", fail_raalloc); ++ ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", ++ __func__, ++ (void*)psSyncContextInt->psSubAllocRA, ++ psNewSyncCheckpoint->uiSpanAddr)); ++#endif ++ psNewSyncCheckpoint->psSyncCheckpointFwObj = ++ (volatile SYNC_CHECKPOINT_FW_OBJ*)(void *)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr + ++ (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32))); ++ psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + ++ _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1; ++ OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount); ++ psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called to allocate new sync checkpoint<%p> for context<%p>", ++ __func__, (void*)psNewSyncCheckpoint, (void*)psSyncContext)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpointFwObj<%p>", ++ __func__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint FwAddr=0x%x", ++ __func__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint))); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s pszCheckpointName = %s", ++ __func__, pszCheckpointName)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint Timeline=%d", ++ __func__, hTimeline)); ++#endif ++ } ++ ++ psNewSyncCheckpoint->hTimeline = hTimeline; ++ OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1); ++ OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0); ++ psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0; ++ psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ACTIVE; ++ psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM(); ++ OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode)); ++ ++ if (pszCheckpointName) ++ { ++ /* Copy over the checkpoint name annotation */ ++ OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); ++ } ++ else ++ { ++ /* No sync checkpoint name annotation */ ++ psNewSyncCheckpoint->azName[0] = '\0'; ++ } ++ ++ /* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */ ++ psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint); ++ ++ /* Assign unique ID to this sync checkpoint */ ++ psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++; ++ ++#if defined(PDUMP) ++ /* Flushing deferred fence signals to pdump */ ++ MISRHandler_PdumpDeferredSyncSignalPoster(psDevNode); ++ ++ _SyncCheckpointAllocPDump(psDevNode, psNewSyncCheckpoint); ++#endif ++ ++ RGXSRV_HWPERF_ALLOC_SYNC_CP(psDevNode, psNewSyncCheckpoint->hTimeline, ++ OSGetCurrentClientProcessIDKM(), ++ hFence, ++ psNewSyncCheckpoint->ui32FWAddr, ++ psNewSyncCheckpoint->azName, ++ sizeof(psNewSyncCheckpoint->azName)); ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH]; ++ ++ if (pszCheckpointName) ++ { ++ /* Copy the checkpoint name annotation into a fixed-size array */ ++ OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH); ++ } ++ else ++ { ++ /* No checkpoint name annotation */ ++ szChkptName[0] = 0; ++ } ++ /* record this sync */ ++ eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord, ++ psNewSyncCheckpoint->psSyncCheckpointBlock, ++ psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr, ++ _SyncCheckpointGetOffset(psNewSyncCheckpoint), ++ psNewSyncCheckpoint->ui32UID, ++ OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH), ++ szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)", ++ __func__, ++ szChkptName, ++ PVRSRVGetErrorString(eError))); ++ psNewSyncCheckpoint->hRecord = NULL; ++ /* note the error but continue without affecting driver operation */ ++ } ++ } ++ ++ { ++ OS_SPINLOCK_FLAGS uiFlags; ++ /* Add the sync checkpoint to the device list */ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList, ++ &psNewSyncCheckpoint->sListNode); ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ } ++ ++ *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint; ++ ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>", ++ __func__, ++ psNewSyncCheckpoint->ui32UID, ++ (void*)psNewSyncCheckpoint)); ++#endif ++ return PVRSRV_OK; ++ ++fail_raalloc: ++ OSFreeMem(psNewSyncCheckpoint); ++fail_alloc: ++ return eError; ++} ++ ++static void SyncCheckpointUnref(SYNC_CHECKPOINT *psSyncCheckpointInt) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext; ++ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode; ++ ++ /* ++ * Without this reference, the context may be destroyed as soon ++ * as _FreeSyncCheckpoint is called, but the context is still ++ * needed when _CheckDeferredCleanupList is called at the end ++ * of this function. ++ */ ++ SyncCheckpointContextRef((PSYNC_CHECKPOINT_CONTEXT)psContext); ++ ++ PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE); ++ if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed")); ++ } ++ else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount)) ++ { ++ /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */ ++ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == ++ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) ++ { ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..", ++ __func__)); ++#endif ++ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ && psSyncCheckpointInt->hRecord) ++ { ++ PVRSRV_ERROR eError; ++ /* remove this sync record */ ++ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); ++ PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); ++ } ++ ++ { ++ OS_SPINLOCK_FLAGS uiFlags; ++ /* Remove the sync checkpoint from the global list */ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_remove_node(&psSyncCheckpointInt->sListNode); ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ } ++ ++ RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s attempting to return sync checkpoint to the pool", ++ __func__)); ++#endif ++ if (!_PutCheckpointInPool(psSyncCheckpointInt)) ++#endif ++ { ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s pool is full, so just free it", ++ __func__)); ++#endif ++#endif ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", ++ __func__, ++ psSyncCheckpointInt->ui32UID, ++ (void*)psSyncCheckpointInt, ++ (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA, ++ psSyncCheckpointInt->uiSpanAddr)); ++#endif ++ _FreeSyncCheckpoint(psSyncCheckpointInt); ++ } ++ } ++ else ++ { ++ OS_SPINLOCK_FLAGS uiFlags; ++#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d " ++ "- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>", ++ __func__, ++ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount, ++ psSyncCheckpointInt->ui32UID, ++ (void*)psSyncCheckpointInt)); ++#endif ++ /* Add the sync checkpoint to the deferred free list */ ++ OSSpinLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); ++ dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead, ++ &psSyncCheckpointInt->sDeferredFreeListNode); ++ OSSpinLockRelease(psContext->psContextCtl->hDeferredCleanupListLock, uiFlags); ++ } ++ } ++ else ++ { ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d", ++ __func__, ++ psSyncCheckpointInt->ui32UID, ++ (void*)psSyncCheckpointInt, ++ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)))); ++#endif ++ } ++ ++ /* See if any sync checkpoints in the deferred cleanup list can be freed */ ++ _CheckDeferredCleanupList(psContext); ++ ++ SyncCheckpointContextUnref((PSYNC_CHECKPOINT_CONTEXT)psContext); ++} ++ ++void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); ++ ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x", ++ __func__, ++ psSyncCheckpointInt->ui32UID, ++ (void*)psSyncCheckpoint, ++ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)), ++ psSyncCheckpointInt->ui32ValidationCheck)); ++#endif ++ SyncCheckpointUnref(psSyncCheckpointInt); ++} ++ ++void ++SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); ++ ++ if (psSyncCheckpointInt) ++ { ++ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), ++ "psSyncCheckpoint already signalled"); ++ ++ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; ++ ++ RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); ++#endif ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ ++#if defined(PDUMP) ++ _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ui32FenceSyncFlags); ++#endif ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " ++ "when value is already %d", ++ __func__, ++ PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ++ psSyncCheckpointInt->ui32UID, ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); ++ } ++ } ++} ++ ++void ++SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); ++ ++ if (psSyncCheckpointInt) ++ { ++ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), ++ "psSyncCheckpoint already signalled"); ++ ++ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; ++ ++ RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE); ++#endif ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED; ++ } ++ else ++ { ++#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), " ++ "when value is already %d", ++ __func__, ++ PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ++ psSyncCheckpointInt->ui32UID, ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)); ++#endif ++ } ++ } ++} ++ ++void ++SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); ++ ++ if (psSyncCheckpointInt) ++ { ++ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE), ++ "psSyncCheckpoint already signalled"); ++ ++ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; ++ if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT)) ++ { ++ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData; ++ ++ sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint); ++ sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; ++ sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED; ++ ++ RGXSRV_HWPERF_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData, ++ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE); ++ } ++#endif ++ ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED; ++ ++#if defined(PDUMP) ++ _SyncCheckpointUpdatePDump(psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode, psSyncCheckpointInt, PVRSRV_SYNC_CHECKPOINT_ERRORED, ui32FenceSyncFlags); ++#endif ++ } ++ } ++} ++ ++IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ IMG_BOOL bRet = IMG_FALSE; ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); ++ ++ if (psSyncCheckpointInt) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; ++ ++ RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); ++#endif ++ bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) || ++ (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED)); ++ ++#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called for psSyncCheckpoint<%p>, returning %d", ++ __func__, ++ (void*)psSyncCheckpoint, ++ bRet)); ++#endif ++ } ++ return bRet; ++} ++ ++IMG_BOOL ++SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ IMG_BOOL bRet = IMG_FALSE; ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid"); ++ ++ if (psSyncCheckpointInt) ++ { ++#if defined(SUPPORT_RGX) ++ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice; ++ ++ RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags); ++#endif ++ bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED); ++ ++#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called for psSyncCheckpoint<%p>, returning %d", ++ __func__, ++ (void*)psSyncCheckpoint, ++ bRet)); ++#endif ++ } ++ return bRet; ++} ++ ++const IMG_CHAR * ++SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_RETURN_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null"); ++ ++ switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State) ++ { ++ case PVRSRV_SYNC_CHECKPOINT_SIGNALLED: ++ return "Signalled"; ++ case PVRSRV_SYNC_CHECKPOINT_ACTIVE: ++ return "Active"; ++ case PVRSRV_SYNC_CHECKPOINT_ERRORED: ++ return "Errored"; ++ case PVRSRV_SYNC_CHECKPOINT_UNDEF: ++ return "Undefined"; ++ default: ++ return "Unknown"; ++ } ++} ++ ++PVRSRV_ERROR ++SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ PVRSRV_ERROR eRet = PVRSRV_OK; ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); ++ ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", ++ __func__, ++ psSyncCheckpointInt, ++ OSAtomicRead(&psSyncCheckpointInt->hRefCount), ++ OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1, ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); ++#endif ++ OSAtomicIncrement(&psSyncCheckpointInt->hRefCount); ++ ++ return eRet; ++} ++ ++PVRSRV_ERROR ++SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ PVRSRV_ERROR eRet = PVRSRV_OK; ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_RETURN_IF_INVALID_PARAM(psSyncCheckpoint, "psSyncCheckpoint"); ++ ++#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", ++ __func__, ++ psSyncCheckpointInt, ++ OSAtomicRead(&psSyncCheckpointInt->hRefCount), ++ OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1, ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); ++#endif ++ SyncCheckpointUnref(psSyncCheckpointInt); ++ ++ return eRet; ++} ++ ++void ++SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_RETURN_VOID_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint"); ++ ++ if (psSyncCheckpointInt) ++ { ++#if !defined(NO_HARDWARE) ++#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)", ++ __func__, ++ (void*)psSyncCheckpoint, ++ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), ++ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1, ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); ++#endif ++ OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount); ++#endif ++ } ++} ++ ++PRGXFWIF_UFO_ADDR* ++SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ ++ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); ++ ++ if (psSyncCheckpointInt) ++ { ++ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) ++ { ++ return &psSyncCheckpointInt->sCheckpointUFOAddr; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", ++ __func__, ++ (void*)psSyncCheckpoint, ++ psSyncCheckpointInt->ui32ValidationCheck)); ++ } ++ } ++ ++invalid_chkpt: ++ return NULL; ++} ++ ++IMG_UINT32 ++SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ IMG_UINT32 ui32Ret = 0; ++ ++ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); ++ ++ if (psSyncCheckpointInt) ++ { ++ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE) ++ { ++ ui32Ret = psSyncCheckpointInt->ui32FWAddr; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x", ++ __func__, ++ (void*)psSyncCheckpoint, ++ psSyncCheckpointInt->ui32ValidationCheck)); ++ } ++ } ++ ++invalid_chkpt: ++ return ui32Ret; ++} ++ ++IMG_UINT32 ++SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ IMG_UINT32 ui32Ret = 0; ++ ++ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); ++ ++ if (psSyncCheckpointInt) ++ { ++#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s returning ID for sync checkpoint<%p>", ++ __func__, ++ (void*)psSyncCheckpointInt)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s (validationCheck=0x%x)", ++ __func__, ++ psSyncCheckpointInt->ui32ValidationCheck)); ++#endif ++ ui32Ret = psSyncCheckpointInt->ui32UID; ++#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s (ui32UID=0x%x)", ++ __func__, ++ psSyncCheckpointInt->ui32UID)); ++#endif ++ } ++ return ui32Ret; ++ ++invalid_chkpt: ++ return 0; ++} ++ ++PVRSRV_TIMELINE ++SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE; ++ ++ PVR_LOG_GOTO_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt); ++ ++ if (psSyncCheckpointInt) ++ { ++ i32Ret = psSyncCheckpointInt->hTimeline; ++ } ++ return i32Ret; ++ ++invalid_chkpt: ++ return 0; ++} ++ ++ ++IMG_UINT32 ++SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); ++ ++ return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount); ++} ++ ++IMG_UINT32 ++SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); ++ ++ return OSAtomicRead(&psSyncCheckpointInt->hRefCount); ++} ++ ++IMG_PID ++SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt = (SYNC_CHECKPOINT*)psSyncCheckpoint; ++ PVR_LOG_RETURN_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0); ++ ++ return psSyncCheckpointInt->uiProcess; ++} ++ ++IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32FwAddr) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt; ++ PDLLIST_NODE psNode, psNext; ++ IMG_UINT32 ui32State = 0; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) ++ { ++ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); ++ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) ++ { ++ ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State; ++ break; ++ } ++ } ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ return ui32State; ++} ++ ++void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32FwAddr) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpointInt; ++ PDLLIST_NODE psNode, psNext; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called to error UFO with ui32FWAddr=%d", ++ __func__, ++ ui32FwAddr)); ++#endif ++ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) ++ { ++ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); ++ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) ++ { ++#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s calling SyncCheckpointError for sync checkpoint <%p>", ++ __func__, ++ (void*)psSyncCheckpointInt)); ++#endif ++ /* Mark as errored */ ++ SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE); ++ break; ++ } ++ } ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++} ++ ++void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr) ++{ ++#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called to rollback UFO with ui32FWAddr=0x%x", ++ __func__, ++ ui32FwAddr)); ++#endif ++#if !defined(NO_HARDWARE) ++ { ++ SYNC_CHECKPOINT *psSyncCheckpointInt = NULL; ++ PDLLIST_NODE psNode = NULL, psNext = NULL; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) ++ { ++ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); ++ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt)) ++ { ++#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s called for psSyncCheckpointInt<%p> %d->%d", ++ __func__, ++ (void *) psSyncCheckpointInt, ++ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount), ++ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount) - 1)); ++#endif ++ OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount); ++ break; ++ } ++ } ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ } ++#endif ++} ++ ++static void _SyncCheckpointState(PDLLIST_NODE psNode, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sListNode); ++ ++ if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ACTIVE) ++ { ++ PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s", ++ psSyncCheckpoint->ui32UID, ++ psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + ++ _SyncCheckpointGetOffset(psSyncCheckpoint), ++ OSAtomicRead(&psSyncCheckpoint->hRefCount), ++ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), ++ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, ++ psSyncCheckpoint->azName); ++ } ++} ++ ++static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; ++ DLLIST_NODE *psNode, *psNext; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) ++ { ++ PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------"); ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext) ++ { ++ _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ } ++} ++ ++PVRSRV_ERROR ++SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode) ++{ ++ PVRSRV_ERROR eError; ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ psDevInfo = psDevNode->pvDevice; ++#endif ++ ++ eError = OSSpinLockCreate(&psDevNode->hSyncCheckpointListLock); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ dllist_init(&psDevNode->sSyncCheckpointSyncsList); ++ ++ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncCheckpointNotify, ++ psDevNode, ++ _SyncCheckpointDebugRequest, ++ DEBUG_REQUEST_SYNCCHECKPOINT, ++ (PVRSRV_DBGREQ_HANDLE)psDevNode); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ _SyncCheckpointRecordListInit(psDevNode); ++ } ++ ++#if defined(PDUMP) ++ eError = OSSpinLockCreate(&psDevInfo->hSyncCheckpointSignalSpinLock); ++ if (eError != PVRSRV_OK) ++ { ++ psDevInfo->hSyncCheckpointSignalSpinLock = NULL; ++ goto e1; ++ } ++ ++ eError = OSLockCreate(&psDevNode->hSyncCheckpointSignalLock); ++ if (eError != PVRSRV_OK) ++ { ++ psDevNode->hSyncCheckpointSignalLock = NULL; ++ goto e2; ++ } ++ ++ psDevNode->pui8DeferredSyncCPSignal = OSAllocMem(SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL ++ * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)); ++ PVR_GOTO_IF_NOMEM(psDevNode->pui8DeferredSyncCPSignal, eError, e3); ++ ++ psDevNode->ui16SyncCPWriteIdx = 0; ++ psDevNode->ui16SyncCPReadIdx = 0; ++ ++ eError = OSInstallMISR(&psDevNode->pvSyncCPMISR, ++ MISRHandler_PdumpDeferredSyncSignalPoster, ++ psDevNode, ++ "RGX_PdumpDeferredSyncSignalPoster"); ++ PVR_GOTO_IF_ERROR(eError, e4); ++ ++ eError = OSLockCreate(&psDevNode->hSyncCheckpointContextListLock); ++ if (eError != PVRSRV_OK) ++ { ++ psDevNode->hSyncCheckpointContextListLock = NULL; ++ goto e5; ++ } ++ ++ ++ dllist_init(&psDevNode->sSyncCheckpointContextListHead); ++ ++ eError = PDumpRegisterTransitionCallbackFenceSync(psDevNode, ++ _SyncCheckpointPDumpTransition, ++ &psDevNode->hTransition); ++ if (eError != PVRSRV_OK) ++ { ++ psDevNode->hTransition = NULL; ++ goto e6; ++ } ++#endif ++ ++ return PVRSRV_OK; ++ ++#if defined(PDUMP) ++e6: ++ OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); ++ psDevNode->hSyncCheckpointContextListLock = NULL; ++e5: ++ (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); ++ psDevNode->pvSyncCPMISR = NULL; ++e4: ++ if (psDevNode->pui8DeferredSyncCPSignal) ++ { ++ OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); ++ psDevNode->pui8DeferredSyncCPSignal = NULL; ++ } ++e3: ++ OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); ++ psDevNode->hSyncCheckpointSignalLock = NULL; ++e2: ++ OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); ++ psDevInfo->hSyncCheckpointSignalSpinLock = NULL; ++e1: ++ _SyncCheckpointRecordListDeinit(psDevNode); ++#endif ++e0: ++ OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); ++ psDevNode->hSyncCheckpointListLock = NULL; ++ ++ return eError; ++} ++ ++void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode) ++{ ++#if defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ psDevInfo = psDevNode->pvDevice; ++ PDumpUnregisterTransitionCallbackFenceSync(psDevNode->hTransition); ++ psDevNode->hTransition = NULL; ++ ++ if (psDevNode->hSyncCheckpointContextListLock) ++ { ++ OSLockDestroy(psDevNode->hSyncCheckpointContextListLock); ++ psDevNode->hSyncCheckpointContextListLock = NULL; ++ } ++ ++ if (psDevNode->pvSyncCPMISR) ++ { ++ (void) OSUninstallMISR(psDevNode->pvSyncCPMISR); ++ psDevNode->pvSyncCPMISR = NULL; ++ } ++ ++ if (psDevNode->pui8DeferredSyncCPSignal) ++ { ++ OSFreeMem(psDevNode->pui8DeferredSyncCPSignal); ++ psDevNode->pui8DeferredSyncCPSignal = NULL; ++ } ++ if (psDevNode->hSyncCheckpointSignalLock) ++ { ++ OSLockDestroy(psDevNode->hSyncCheckpointSignalLock); ++ psDevNode->hSyncCheckpointSignalLock = NULL; ++ } ++ if (psDevInfo->hSyncCheckpointSignalSpinLock) ++ { ++ OSSpinLockDestroy(psDevInfo->hSyncCheckpointSignalSpinLock); ++ psDevInfo->hSyncCheckpointSignalSpinLock = NULL; ++ } ++#endif ++ ++ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncCheckpointNotify); ++ psDevNode->hSyncCheckpointNotify = NULL; ++ OSSpinLockDestroy(psDevNode->hSyncCheckpointListLock); ++ psDevNode->hSyncCheckpointListLock = NULL; ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ _SyncCheckpointRecordListDeinit(psDevNode); ++ } ++} ++ ++void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr, ++ IMG_CHAR * pszSyncInfo, size_t len) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ IMG_BOOL bFound = IMG_FALSE; ++ ++ if (!pszSyncInfo) ++ { ++ return; ++ } ++ ++ pszSyncInfo[0] = '\0'; ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) ++ { ++ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = ++ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); ++ if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr) ++ { ++ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; ++ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) ++ { ++ void *pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, ++ psSyncCheckpointRec->ui32SyncOffset); ++ OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)", ++ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? ++ "SIGNALLED" : ++ ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? ++ "ERRORED" : "ACTIVE"), ++ psSyncCheckpointRec->uiPID, ++ psSyncCheckpointRec->szClassName); ++ } ++ else ++ { ++ OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)", ++ psSyncCheckpointRec->uiPID, ++ psSyncCheckpointRec->szClassName); ++ } ++ ++ bFound = IMG_TRUE; ++ break; ++ } ++ } ++ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); ++ ++ if (!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)) ++ { ++ OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); ++ } ++} ++ ++static PVRSRV_ERROR ++_SyncCheckpointRecordAdd( ++ PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord, ++ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock, ++ IMG_UINT32 ui32FwBlockAddr, ++ IMG_UINT32 ui32SyncOffset, ++ IMG_UINT32 ui32UID, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt) ++{ ++ struct SYNC_CHECKPOINT_RECORD * psSyncRec; ++ _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext; ++ PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_RETURN_IF_INVALID_PARAM(phRecord); ++ ++ *phRecord = NULL; ++ ++ psSyncRec = OSAllocMem(sizeof(*psSyncRec)); ++ PVR_LOG_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); /* Sets OOM error code */ ++ ++ psSyncRec->psDevNode = psDevNode; ++ psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock; ++ psSyncRec->ui32SyncOffset = ui32SyncOffset; ++ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; ++ psSyncRec->ui64OSTime = OSClockns64(); ++ psSyncRec->uiPID = OSGetCurrentProcessID(); ++ psSyncRec->ui32UID = ui32UID; ++ psSyncRec->pSyncCheckpt = pSyncCheckpt; ++ if (pszClassName) ++ { ++ if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) ++ ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; ++ /* Copy over the class name annotation */ ++ OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); ++ } ++ else ++ { ++ /* No class name annotation */ ++ psSyncRec->szClassName[0] = 0; ++ } ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); ++ if (psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT) ++ { ++ dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode); ++ psDevNode->ui32SyncCheckpointRecordCount++; ++ ++ if (psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark) ++ { ++ psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.", ++ __func__, ++ pszClassName, ++ psDevNode->ui32SyncCheckpointRecordCount)); ++ OSFreeMem(psSyncRec); ++ psSyncRec = NULL; ++ eError = PVRSRV_ERROR_TOOMANYBUFFERS; ++ } ++ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); ++ ++ *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec; ++ ++fail_alloc: ++ return eError; ++} ++ ++static PVRSRV_ERROR ++_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord) ++{ ++ struct SYNC_CHECKPOINT_RECORD **ppFreedSync; ++ struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ PVR_RETURN_IF_INVALID_PARAM(hRecord); ++ ++ psDevNode = pSync->psDevNode; ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); ++ ++ dllist_remove_node(&pSync->sNode); ++ ++ if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range", ++ __func__)); ++ psDevNode->uiSyncCheckpointRecordFreeIdx = 0; ++ } ++ ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx]; ++ psDevNode->uiSyncCheckpointRecordFreeIdx = ++ (psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; ++ ++ if (*ppFreedSync) ++ { ++ OSFreeMem(*ppFreedSync); ++ } ++ pSync->psSyncCheckpointBlock = NULL; ++ pSync->ui64OSTime = OSClockns64(); ++ *ppFreedSync = pSync; ++ ++ psDevNode->ui32SyncCheckpointRecordCount--; ++ ++ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); ++ ++ return PVRSRV_OK; ++} ++ ++#define NS_IN_S (1000000000UL) ++static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec, ++ IMG_UINT64 ui64TimeNow, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ SYNC_CHECKPOINT *psSyncCheckpoint = (SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt; ++ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock; ++ IMG_UINT64 ui64DeltaS; ++ IMG_UINT32 ui32DeltaF; ++ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime; ++ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); ++ ++ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr) ++ { ++ void *pSyncCheckpointAddr; ++ pSyncCheckpointAddr = IMG_OFFSET_ADDR(psSyncCheckpointBlock->pui32LinAddr, ++ psSyncCheckpointRec->ui32SyncOffset); ++ ++ PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)", ++ psSyncCheckpointRec->uiPID, ++ ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, ++ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), ++ OSAtomicRead(&psSyncCheckpoint->hRefCount), ++ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount), ++ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount, ++ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ? ++ "SIGNALLED" : ++ ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ? ++ "ERRORED" : "ACTIVE"), ++ psSyncCheckpointRec->szClassName); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("\t%05u %05" IMG_UINT64_FMTSPEC ".%09u %010u FWAddr=0x%08x State= (%s)", ++ psSyncCheckpointRec->uiPID, ++ ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID, ++ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset), ++ psSyncCheckpointRec->szClassName ++ ); ++ } ++} ++ ++static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; ++ IMG_UINT64 ui64TimeNowS; ++ IMG_UINT32 ui32TimeNowF; ++ IMG_UINT64 ui64TimeNow = OSClockns64(); ++ DLLIST_NODE *psNode, *psNext; ++ ++ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) ++ { ++ IMG_UINT32 i; ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); ++ ++ PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05" IMG_UINT64_FMTSPEC ".%09u)", ++ psDevNode->ui32SyncCheckpointRecordCount, ++ psDevNode->ui32SyncCheckpointRecordCountHighWatermark, ++ ui64TimeNowS, ++ ui32TimeNowF); ++ if (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT) ++ { ++ PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", ++ SYNC_CHECKPOINT_RECORD_LIMIT); ++ } ++ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", ++ "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); ++ ++ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) ++ { ++ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec = ++ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); ++ _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow, ++ pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ ++ PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05" IMG_UINT64_FMTSPEC ".%09u", ++ ui64TimeNowS, ++ ui32TimeNowF); ++ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)", ++ "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation"); ++ for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); ++ i != psDevNode->uiSyncCheckpointRecordFreeIdx; ++ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) ++ { ++ if (psDevNode->apsSyncCheckpointRecordsFreed[i]) ++ { ++ _SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i], ++ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ else ++ { ++ break; ++ } ++ } ++ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); ++ } ++} ++#undef NS_IN_S ++static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock); ++ PVR_GOTO_IF_ERROR(eError, fail_lock_create); ++ dllist_init(&psDevNode->sSyncCheckpointRecordList); ++ ++ psDevNode->ui32SyncCheckpointRecordCount = 0; ++ psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0; ++ ++ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify, ++ psDevNode, ++ _SyncCheckpointRecordRequest, ++ DEBUG_REQUEST_SYNCCHECKPOINT, ++ (PVRSRV_DBGREQ_HANDLE)psDevNode); ++ PVR_GOTO_IF_ERROR(eError, fail_dbg_register); ++ ++ return PVRSRV_OK; ++ ++fail_dbg_register: ++ OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); ++fail_lock_create: ++ return eError; ++} ++ ++static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ int i; ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext) ++ { ++ struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec = ++ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode); ++ ++ dllist_remove_node(psNode); ++ OSFreeMem(pSyncCheckpointRec); ++ } ++ ++ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) ++ { ++ if (psDevNode->apsSyncCheckpointRecordsFreed[i]) ++ { ++ OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]); ++ psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL; ++ } ++ } ++ OSLockRelease(psDevNode->hSyncCheckpointRecordLock); ++ ++ if (psDevNode->hSyncCheckpointRecordNotify) ++ { ++ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify); ++ } ++ OSLockDestroy(psDevNode->hSyncCheckpointRecordLock); ++} ++ ++#if defined(PDUMP) ++ ++static PVRSRV_ERROR ++_SyncCheckpointAllocPDump(PVRSRV_DEVICE_NODE *psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint) ++{ ++ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, ++ "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", ++ psSyncCheckpoint->azName, ++ psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, ++ psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr); ++ ++ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, ++ _SyncCheckpointGetOffset(psSyncCheckpoint), ++ PVRSRV_SYNC_CHECKPOINT_ACTIVE, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++_SyncCheckpointUpdatePDump(PPVRSRV_DEVICE_NODE psDevNode, SYNC_CHECKPOINT *psSyncCheckpoint, IMG_UINT32 ui32Status, IMG_UINT32 ui32FenceSyncFlags) ++{ ++ IMG_BOOL bSleepAllowed = (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ psDevInfo = psDevNode->pvDevice; ++ /* ++ We might be ask to PDump sync state outside of capture range ++ (e.g. texture uploads) so make this continuous. ++ */ ++ if (bSleepAllowed) ++ { ++ if (ui32Status == PVRSRV_SYNC_CHECKPOINT_ERRORED) ++ { ++ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, ++ "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", ++ psSyncCheckpoint->azName, ++ psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, ++ (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + ++ _SyncCheckpointGetOffset(psSyncCheckpoint))); ++ } ++ else ++ { ++ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, ++ "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)", ++ psSyncCheckpoint->azName, ++ psSyncCheckpoint->ui32UID, psSyncCheckpoint->hTimeline, ++ (psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr + ++ _SyncCheckpointGetOffset(psSyncCheckpoint))); ++ } ++ ++ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, ++ _SyncCheckpointGetOffset(psSyncCheckpoint), ++ ui32Status, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ else ++ { ++ _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; ++ OS_SPINLOCK_FLAGS uiFlags; ++ IMG_UINT16 ui16NewWriteIdx; ++ ++ OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); ++ ++ ui16NewWriteIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPWriteIdx); ++ if (ui16NewWriteIdx == psDevNode->ui16SyncCPReadIdx) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: ERROR Deferred SyncCheckpointSignal CB is full)", ++ __func__)); ++ } ++ else ++ { ++ psSyncData = GET_CP_CB_BASE(psDevNode->ui16SyncCPWriteIdx); ++ psSyncData->asSyncCheckpoint = *psSyncCheckpoint; ++ psSyncData->ui32Status = ui32Status; ++ psDevNode->ui16SyncCPWriteIdx = ui16NewWriteIdx; ++ } ++ ++ OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); ++ ++ OSScheduleMISR(psDevNode->pvSyncCPMISR); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static void ++MISRHandler_PdumpDeferredSyncSignalPoster(void *pvData) ++{ ++ PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; ++ OS_SPINLOCK_FLAGS uiFlags; ++ IMG_UINT16 ui16ReadIdx, ui16WriteIdx; ++ _SYNC_CHECKPOINT_DEFERRED_SIGNAL *psSyncData; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ psDevInfo = psDevNode->pvDevice; ++ ++ OSLockAcquire(psDevNode->hSyncCheckpointSignalLock); ++ ++ OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); ++ /* Snapshot current write and read offset of CB */ ++ ui16WriteIdx = psDevNode->ui16SyncCPWriteIdx; ++ ui16ReadIdx = psDevNode->ui16SyncCPReadIdx; ++ ++ OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); ++ /* CB is empty */ ++ if (ui16WriteIdx == ui16ReadIdx) ++ { ++ OSLockRelease(psDevNode->hSyncCheckpointSignalLock); ++ return; ++ } ++ do ++ { ++ /* Read item in the CB and flush it to pdump */ ++ psSyncData = GET_CP_CB_BASE(ui16ReadIdx); ++ _SyncCheckpointUpdatePDump(psDevNode, &psSyncData->asSyncCheckpoint, psSyncData->ui32Status, PVRSRV_FENCE_FLAG_NONE); ++ ui16ReadIdx = GET_CP_CB_NEXT_IDX(psDevNode->ui16SyncCPReadIdx); ++ /* Increment read offset in CB as one item is flushed to pdump */ ++ OSSpinLockAcquire(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); ++ psDevNode->ui16SyncCPReadIdx = ui16ReadIdx; ++ OSSpinLockRelease(psDevInfo->hSyncCheckpointSignalSpinLock, uiFlags); ++ /* Call to this function will flush all the items present in CB ++ * when this function is called i.e. use snapshot of WriteOffset ++ * taken at the beginning in this function and iterate till Write != Read */ ++ } while (ui16WriteIdx != ui16ReadIdx); ++ ++ OSLockRelease(psDevNode->hSyncCheckpointSignalLock); ++} ++ ++PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence) ++{ ++ PVRSRV_ERROR eError; ++ PSYNC_CHECKPOINT *apsCheckpoints = NULL; ++ SYNC_CHECKPOINT *psSyncCheckpoint = NULL; ++ IMG_UINT32 i, uiNumCheckpoints = 0; ++#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++#endif ++ ++ if (hFence != PVRSRV_NO_FENCE) ++ { ++ eError = g_psSyncCheckpointPfnStruct->pfnSyncFenceGetCheckpoints(hFence, &uiNumCheckpoints, &apsCheckpoints); ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PVR_LOG_RETURN_IF_ERROR(eError, "g_pfnFenceGetCheckpoints"); ++ ++ if (uiNumCheckpoints) ++ { ++ /* Flushing deferred fence signals to pdump */ ++ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0]; ++ MISRHandler_PdumpDeferredSyncSignalPoster(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode); ++ } ++ ++ for (i=0; i < uiNumCheckpoints; i++) ++ { ++ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[i]; ++ if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ++ { ++ PDUMPCOMMENTWITHFLAGS(psSyncCheckpoint->psSyncCheckpointBlock->psDevNode, ++ psSyncCheckpoint->ui32PDumpFlags, ++ "Wait for Fence %s (ID:%d)", ++ psSyncCheckpoint->azName, ++ psSyncCheckpoint->ui32UID); ++ ++ eError = DevmemPDumpDevmemPol32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc, ++ _SyncCheckpointGetOffset(psSyncCheckpoint), ++ PVRSRV_SYNC_CHECKPOINT_SIGNALLED, ++ 0xFFFFFFFF, ++ PDUMP_POLL_OPERATOR_EQUAL, ++ psSyncCheckpoint->ui32PDumpFlags); ++ PVR_LOG_IF_ERROR(eError, "DevmemPDumpDevmemPol32"); ++ } ++ } ++ ++#if defined(SUPPORT_VALIDATION) && defined(SUPPORT_SOC_TIMER) && defined(NO_HARDWARE) && defined(PDUMP) ++ /* Sampling of USC timers can only be done after synchronisation for a 3D kick is over */ ++ if (uiNumCheckpoints) ++ { ++ psSyncCheckpoint = (SYNC_CHECKPOINT *)apsCheckpoints[0]; ++ psDevInfo = psSyncCheckpoint->psSyncCheckpointBlock->psDevNode->pvDevice; ++ if (psDevInfo->psRGXFWIfFwSysData->ui32ConfigFlags & RGXFWIF_INICFG_VALIDATE_SOCUSC_TIMER) ++ { ++ RGXValidateSOCUSCTimer(psDevInfo, PDUMP_CONT, 0, 0, NULL); ++ } ++ } ++#endif ++ ++ /* Free the memory that was allocated for the sync checkpoint list returned */ ++ if (apsCheckpoints) ++ { ++ SyncCheckpointFreeCheckpointListMem(apsCheckpoints); ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR ++_SyncCheckpointPDumpTransition(void *pvData, PDUMP_TRANSITION_EVENT eEvent) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext; ++ DLLIST_NODE *psNode, *psNext; ++ DLLIST_NODE *psNode1, *psNext1; ++ PPVRSRV_DEVICE_NODE psDevNode = (PPVRSRV_DEVICE_NODE) pvData; ++ ++ if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) ++ { ++ OSLockAcquire(psDevNode->hSyncCheckpointContextListLock); ++ dllist_foreach_node(&psDevNode->sSyncCheckpointContextListHead, psNode, psNext) ++ { ++ psContext = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT_CONTEXT, sListNode); ++ ++ OSLockAcquire(psContext->hSyncCheckpointBlockListLock); ++ dllist_foreach_node(&psContext->sSyncCheckpointBlockListHead, psNode1, psNext1) ++ { ++ SYNC_CHECKPOINT_BLOCK *psSyncBlk = ++ IMG_CONTAINER_OF(psNode1, SYNC_CHECKPOINT_BLOCK, sListNode); ++ DevmemPDumpLoadMem(psSyncBlk->hMemDesc, ++ 0, ++ psSyncBlk->ui32SyncBlockSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ OSLockRelease(psContext->hSyncCheckpointBlockListLock); ++ } ++ OSLockRelease(psDevNode->hSyncCheckpointContextListLock); ++ } ++ ++ return PVRSRV_OK; ++} ++#endif ++ ++static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext) ++{ ++ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode; ++ DECLARE_DLLIST(sCleanupList); ++ DLLIST_NODE *psNode, *psNext; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s called", __func__)); ++#endif ++ ++ /* Check the deferred cleanup list and free any sync checkpoints we can */ ++ OSSpinLockAcquire(psCtxCtl->hDeferredCleanupListLock, uiFlags); ++ ++ if (dllist_is_empty(&psCtxCtl->sDeferredCleanupListHead)) ++ { ++ OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); ++#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __func__)); ++#endif ++ /* if list is empty then we have nothing to do here */ ++ return; ++ } ++ ++ dllist_foreach_node(&psCtxCtl->sDeferredCleanupListHead, psNode, psNext) ++ { ++ SYNC_CHECKPOINT *psSyncCheckpointInt = ++ IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sDeferredFreeListNode); ++ ++ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount == ++ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount))) ++ { ++ if ((GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ && psSyncCheckpointInt->hRecord) ++ { ++ PVRSRV_ERROR eError; ++ /* remove this sync record */ ++ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord); ++ PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove"); ++ } ++ ++ /* Move the sync checkpoint from the deferred free list to local list */ ++ dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode); ++ /* It's not an ideal solution to traverse list of checkpoints-to-free ++ * twice but it allows us to avoid holding the lock for too long */ ++ dllist_add_to_tail(&sCleanupList, &psSyncCheckpointInt->sDeferredFreeListNode); ++ } ++#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), " ++ "still pending (enq=%d,FWRef=%d)", __func__, ++ psSyncCheckpointInt->azName, psSyncCheckpointInt->ui32UID, ++ (void*)psSyncCheckpointInt, ++ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); ++ } ++#endif ++ } ++ ++ OSSpinLockRelease(psCtxCtl->hDeferredCleanupListLock, uiFlags); ++ ++ dllist_foreach_node(&sCleanupList, psNode, psNext) { ++ SYNC_CHECKPOINT *psSyncCheckpointInt = ++ IMG_CONTAINER_OF(psNode, SYNC_CHECKPOINT, sDeferredFreeListNode); ++ ++ /* Remove the sync checkpoint from the global list */ ++ OSSpinLockAcquire(psDevNode->hSyncCheckpointListLock, uiFlags); ++ dllist_remove_node(&psSyncCheckpointInt->sListNode); ++ OSSpinLockRelease(psDevNode->hSyncCheckpointListLock, uiFlags); ++ ++ RGXSRV_HWPERF_FREE(psDevNode, SYNC_CP, psSyncCheckpointInt->ui32FWAddr); ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s attempting to return sync(ID:%d),%p> to pool", ++ __func__, ++ psSyncCheckpointInt->ui32UID, ++ (void *) psSyncCheckpointInt)); ++#endif ++ if (!_PutCheckpointInPool(psSyncCheckpointInt)) ++#endif ++ { ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", ++ __func__)); ++#endif ++#endif ++#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) ++ else ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)", ++ __func__, ++ psSyncCheckpointInt->azName, ++ psSyncCheckpointInt->ui32UID, ++ (void*)psSyncCheckpointInt, ++ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)), ++ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount)); ++#endif ++ _FreeSyncCheckpoint(psSyncCheckpointInt); ++ } ++ } ++} ++ ++#if (SYNC_CHECKPOINT_POOL_SIZE > 0) ++static SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext) ++{ ++ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; ++ SYNC_CHECKPOINT *psSyncCheckpoint = NULL; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* Acquire sync checkpoint pool lock */ ++ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); ++ ++ /* Check if we can allocate from the pool */ ++ if (psCtxCtl->bSyncCheckpointPoolValid && ++ (psCtxCtl->ui32SyncCheckpointPoolCount > SYNC_CHECKPOINT_POOL_SEDIMENT) && ++ (psCtxCtl->ui32SyncCheckpointPoolWp != psCtxCtl->ui32SyncCheckpointPoolRp)) ++ { ++ /* Get the next sync checkpoint from the pool */ ++ psSyncCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; ++ psCtxCtl->ui32SyncCheckpointPoolRp = ++ (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; ++ psCtxCtl->ui32SyncCheckpointPoolCount--; ++ psCtxCtl->bSyncCheckpointPoolFull = IMG_FALSE; ++ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE; ++#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, " ++ "poolRp=%d, poolWp=%d", ++ __func__, ++ psSyncCheckpoint->ui32UID, ++ psCtxCtl->ui32SyncCheckpointPoolCount, ++ SYNC_CHECKPOINT_POOL_SIZE, ++ (void *) psContext, ++ psCtxCtl->ui32SyncCheckpointPoolRp, ++ psCtxCtl->ui32SyncCheckpointPoolWp)); ++#endif ++ } ++ /* Release sync checkpoint pool lock */ ++ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); ++ ++ return psSyncCheckpoint; ++} ++ ++static IMG_BOOL _PutCheckpointInPool(SYNC_CHECKPOINT *psSyncCheckpoint) ++{ ++ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext; ++ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; ++ IMG_BOOL bReturnedToPool = IMG_FALSE; ++ OS_SPINLOCK_FLAGS uiFlags; ++ ++ /* Acquire sync checkpoint pool lock */ ++ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); ++ ++ /* Check if pool has space */ ++ if (psCtxCtl->bSyncCheckpointPoolValid && !psCtxCtl->bSyncCheckpointPoolFull) ++ { ++ /* Put the sync checkpoint into the next write slot in the pool */ ++ psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolWp] = psSyncCheckpoint; ++ psCtxCtl->ui32SyncCheckpointPoolWp = ++ (psCtxCtl->ui32SyncCheckpointPoolWp + 1) & SYNC_CHECKPOINT_POOL_MASK; ++ psCtxCtl->ui32SyncCheckpointPoolCount++; ++ psCtxCtl->bSyncCheckpointPoolFull = ++ ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && ++ (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); ++ bReturnedToPool = IMG_TRUE; ++ psSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_UNDEF; ++ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL; ++#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d", ++ __func__, ++ psSyncCheckpoint->ui32UID, ++ psCtxCtl->ui32SyncCheckpointPoolCount, ++ SYNC_CHECKPOINT_POOL_SIZE, ++ psCtxCtl->ui32SyncCheckpointPoolRp, ++ psCtxCtl->ui32SyncCheckpointPoolWp)); ++#endif ++ } ++ /* Release sync checkpoint pool lock */ ++ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); ++ ++ return bReturnedToPool; ++} ++ ++static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext) ++{ ++ _SYNC_CHECKPOINT_CONTEXT_CTL *const psCtxCtl = psContext->psContextCtl; ++ SYNC_CHECKPOINT *psCheckpoint = NULL; ++ DECLARE_DLLIST(sCleanupList); ++ DLLIST_NODE *psThis, *psNext; ++ OS_SPINLOCK_FLAGS uiFlags; ++ IMG_UINT32 ui32ItemsFreed = 0, ui32NullScpCount = 0, ui32PoolCount; ++ IMG_BOOL bPoolValid; ++ ++ /* Acquire sync checkpoint pool lock */ ++ OSSpinLockAcquire(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); ++ ++ bPoolValid = psCtxCtl->bSyncCheckpointPoolValid; ++ ui32PoolCount = psCtxCtl->ui32SyncCheckpointPoolCount; ++ ++ /* While the pool still contains sync checkpoints, free them */ ++ while (bPoolValid && psCtxCtl->ui32SyncCheckpointPoolCount > 0) ++ { ++ /* Get the sync checkpoint from the next read slot in the pool */ ++ psCheckpoint = psCtxCtl->psSyncCheckpointPool[psCtxCtl->ui32SyncCheckpointPoolRp]; ++ psCtxCtl->ui32SyncCheckpointPoolRp = ++ (psCtxCtl->ui32SyncCheckpointPoolRp + 1) & SYNC_CHECKPOINT_POOL_MASK; ++ psCtxCtl->ui32SyncCheckpointPoolCount--; ++ psCtxCtl->bSyncCheckpointPoolFull = ++ ((psCtxCtl->ui32SyncCheckpointPoolCount > 0) && ++ (psCtxCtl->ui32SyncCheckpointPoolWp == psCtxCtl->ui32SyncCheckpointPoolRp)); ++ ++ if (psCheckpoint) ++ { ++ PVR_ASSERT(!dllist_node_is_in_list(&psCheckpoint->sListNode)); ++ /* before checkpoints are added to the pool they are removed ++ * from the list so it's safe to use sListNode here */ ++ dllist_add_to_head(&sCleanupList, &psCheckpoint->sListNode); ++ } ++ else ++ { ++ ui32NullScpCount++; ++ } ++ } ++ ++ /* Release sync checkpoint pool lock */ ++ OSSpinLockRelease(psCtxCtl->hSyncCheckpointPoolLock, uiFlags); ++ ++ /* go through the local list and free all of the sync checkpoints */ ++ ++#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) ++ PVR_DPF((PVR_DBG_WARNING, "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, " ++ "uiSyncCheckpointPoolCount=%d", __func__, (void *) psContext, ++ bPoolValid, ui32PoolCount)); ++ ++ if (ui32NullScpCount > 0) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s pool contained %u NULL entries", __func__, ++ ui32NullScpCount)); ++ } ++#endif ++ ++ dllist_foreach_node(&sCleanupList, psThis, psNext) ++ { ++ psCheckpoint = IMG_CONTAINER_OF(psThis, SYNC_CHECKPOINT, sListNode); ++ ++#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) ++ if (psCheckpoint->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "%s pool contains invalid entry " ++ "(ui32ValidationCheck=0x%x)", __func__, ++ psCheckpoint->ui32ValidationCheck)); ++ } ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint(ID:%d)", ++ __func__, psCheckpoint->ui32UID)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", ++ __func__, psCheckpoint->ui32ValidationCheck)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint->uiSpanAddr=0x%llx", ++ __func__, psCheckpoint->uiSpanAddr)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", ++ __func__, (void *) psCheckpoint->psSyncCheckpointBlock)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>", ++ __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext)); ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>", ++ __func__, (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA)); ++ ++ PVR_DPF((PVR_DBG_WARNING, ++ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), " ++ "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx", ++ __func__, ++ psCheckpoint->ui32UID, ++ (void *) psCheckpoint, ++ (void *) psCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA, ++ psCheckpoint->uiSpanAddr)); ++#endif ++ ++ dllist_remove_node(psThis); ++ ++ _FreeSyncCheckpoint(psCheckpoint); ++ ui32ItemsFreed++; ++ } ++ ++ return ui32ItemsFreed; ++} ++#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */ +diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint.h b/drivers/gpu/drm/img-rogue/sync_checkpoint.h +new file mode 100644 +index 000000000000..33c26f420862 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_checkpoint.h +@@ -0,0 +1,666 @@ ++/*************************************************************************/ /*! ++@File ++@Title Synchronisation checkpoint interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the client side interface for synchronisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_CHECKPOINT_H ++#define SYNC_CHECKPOINT_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_sync_km.h" ++#include "pdumpdefs.h" ++#include "pdump.h" ++#include "dllist.h" ++#include "pvr_debug.h" ++#include "device_connection.h" ++#include "opaque_types.h" ++ ++#ifndef CHECKPOINT_TYPES ++#define CHECKPOINT_TYPES ++typedef struct SYNC_CHECKPOINT_CONTEXT_TAG *PSYNC_CHECKPOINT_CONTEXT; ++ ++typedef struct SYNC_CHECKPOINT_TAG *PSYNC_CHECKPOINT; ++#endif ++ ++/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code ++ will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions ++ we can then call */ ++#ifndef CHECKPOINT_PFNS ++#define CHECKPOINT_PFNS ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE fence, ++ IMG_UINT32 *nr_checkpoints, ++ PSYNC_CHECKPOINT **checkpoint_handles, ++ IMG_UINT64 *pui64FenceUID); ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(PPVRSRV_DEVICE_NODE device, ++ const IMG_CHAR *fence_name, ++ PVRSRV_TIMELINE timeline, ++ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE *new_fence, ++ IMG_UINT64 *pui64FenceUID, ++ void **ppvFenceFinaliseData, ++ PSYNC_CHECKPOINT *new_checkpoint_handle, ++ IMG_HANDLE *timeline_update_sync, ++ IMG_UINT32 *timeline_update_value); ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data); ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data); ++typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data); ++typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr); ++typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs); ++#if defined(PDUMP) ++typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN)(PVRSRV_FENCE iFence, ++ IMG_UINT32 *puiNumCheckpoints, ++ PSYNC_CHECKPOINT **papsCheckpoints); ++#endif ++ ++#define SYNC_CHECKPOINT_IMPL_MAX_STRLEN 20 ++ ++typedef struct ++{ ++ PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve; ++ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate; ++ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback; ++ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise; ++ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines; ++ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem; ++ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs; ++ IMG_CHAR pszImplName[SYNC_CHECKPOINT_IMPL_MAX_STRLEN]; ++#if defined(PDUMP) ++ PFN_SYNC_CHECKPOINT_FENCE_GETCHECKPOINTS_FN pfnSyncFenceGetCheckpoints; ++#endif ++} PFN_SYNC_CHECKPOINT_STRUCT; ++ ++PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_STRUCT *psSyncCheckpointPfns); ++ ++#endif /* ifndef CHECKPOINT_PFNS */ ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointContextCreate ++ ++@Description Create a new synchronisation checkpoint context ++ ++@Input psDevNode Device node ++ ++@Output ppsSyncCheckpointContext Handle to the created synchronisation ++ checkpoint context ++ ++@Return PVRSRV_OK if the synchronisation checkpoint context was ++ successfully created ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode, ++ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointContextDestroy ++ ++@Description Destroy a synchronisation checkpoint context ++ ++@Input psSyncCheckpointContext Handle to the synchronisation ++ checkpoint context to destroy ++ ++@Return PVRSRV_OK if the synchronisation checkpoint context was ++ successfully destroyed. ++ PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still ++ has sync checkpoints defined ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointContextRef ++ ++@Description Takes a reference on a synchronisation checkpoint context ++ ++@Input psContext Handle to the synchronisation checkpoint context ++ on which a ref is to be taken ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void SyncCheckpointContextRef(PSYNC_CHECKPOINT_CONTEXT psContext); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointContextUnref ++ ++@Description Drops a reference taken on a synchronisation checkpoint ++ context ++ ++@Input psContext Handle to the synchronisation checkpoint context ++ on which the ref is to be dropped ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void SyncCheckpointContextUnref(PSYNC_CHECKPOINT_CONTEXT psContext); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointAlloc ++ ++@Description Allocate a new synchronisation checkpoint on the specified ++ synchronisation checkpoint context ++ ++@Input hSyncCheckpointContext Handle to the synchronisation ++ checkpoint context ++ ++@Input hTimeline Timeline on which this sync ++ checkpoint is being created ++ ++@Input hFence Fence as passed into pfnFenceResolve ++ API, when the API encounters a non-PVR ++ fence as part of its input fence. From ++ all other places this argument must be ++ PVRSRV_NO_FENCE. ++ ++@Input pszClassName Sync checkpoint source annotation ++ (will be truncated to at most ++ PVRSRV_SYNC_NAME_LENGTH chars) ++ ++@Output ppsSyncCheckpoint Created synchronisation checkpoint ++ ++@Return PVRSRV_OK if the synchronisation checkpoint was ++ successfully created ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, ++ PVRSRV_TIMELINE hTimeline, ++ PVRSRV_FENCE hFence, ++ const IMG_CHAR *pszCheckpointName, ++ PSYNC_CHECKPOINT *ppsSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointFree ++ ++@Description Free a synchronisation checkpoint ++ The reference count held for the synchronisation checkpoint ++ is decremented - if it has becomes zero, it is also freed. ++ ++@Input psSyncCheckpoint The synchronisation checkpoint to free ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointSignal ++ ++@Description Signal the synchronisation checkpoint ++ ++@Input psSyncCheckpoint The synchronisation checkpoint to signal ++ ++@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointSignalNoHW ++ ++@Description Signal the synchronisation checkpoint in NO_HARWARE build ++ ++@Input psSyncCheckpoint The synchronisation checkpoint to signal ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointError ++ ++@Description Error the synchronisation checkpoint ++ ++@Input psSyncCheckpoint The synchronisation checkpoint to error ++ ++@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointStateFromUFO ++ ++@Description Returns the current state of the synchronisation checkpoint ++ which has the given UFO firmware address ++ ++@Input psDevNode The device owning the sync ++ checkpoint ++ ++@Input ui32FwAddr The firmware address of the sync ++ checkpoint ++ ++@Return The current state (32-bit value) of the sync checkpoint ++*/ ++/*****************************************************************************/ ++IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32FwAddr); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointErrorFromUFO ++ ++@Description Error the synchronisation checkpoint which has the ++ given UFO firmware address ++ ++@Input psDevNode The device owning the sync ++ checkpoint to be errored ++ ++@Input ui32FwAddr The firmware address of the sync ++ checkpoint to be errored ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointRollbackFromUFO ++ ++@Description Drop the enqueued count reference taken on the synchronisation ++ checkpoint on behalf of the firmware. ++ Called in the event of a DM Kick failing. ++ ++@Input psDevNode The device owning the sync ++ checkpoint to be rolled back ++ ++@Input ui32FwAddr The firmware address of the sync ++ checkpoint to be rolled back ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointIsSignalled ++ ++@Description Returns IMG_TRUE if the synchronisation checkpoint is ++ signalled or errored ++ ++@Input psSyncCheckpoint The synchronisation checkpoint to test ++ ++@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_BOOL ++SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, ++ IMG_UINT32 ui32FenceSyncFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointIsErrored ++ ++@Description Returns IMG_TRUE if the synchronisation checkpoint is ++ errored ++ ++@Input psSyncCheckpoint The synchronisation checkpoint to test ++ ++@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior ++ ++@Return None ++*/ ++/*****************************************************************************/ ++IMG_BOOL ++SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, ++ IMG_UINT32 ui32FenceSyncFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointTakeRef ++ ++@Description Take a reference on a synchronisation checkpoint ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to take a ++ reference on ++ ++@Return PVRSRV_OK if a reference was taken on the synchronisation ++ primitive ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointDropRef ++ ++@Description Drop a reference on a synchronisation checkpoint ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to drop a ++ reference on ++ ++@Return PVRSRV_OK if a reference was dropped on the synchronisation ++ primitive ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointResolveFence ++ ++@Description Resolve a fence, returning a list of the sync checkpoints ++ that fence contains. ++ This function in turn calls a function provided by the ++ OS native sync implementation. ++ ++@Input psSyncCheckpointContext The sync checkpoint context ++ on which checkpoints should be ++ created (in the event of the fence ++ having a native sync pt with no ++ associated sync checkpoint) ++ ++@Input hFence The fence to be resolved ++ ++@Output pui32NumSyncCheckpoints The number of sync checkpoints the ++ fence contains. Can return 0 if ++ passed a null (-1) fence. ++ ++@Output papsSyncCheckpoints List of sync checkpoints the fence ++ contains ++ ++@Output puiFenceUID Unique ID of the resolved fence ++ ++@Return PVRSRV_OK if a valid fence was provided. ++ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native ++ sync has not registered a callback function. ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE hFence, ++ IMG_UINT32 *pui32NumSyncCheckpoints, ++ PSYNC_CHECKPOINT **papsSyncCheckpoints, ++ IMG_UINT64 *puiFenceUID, ++ PDUMP_FLAGS_T ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointCreateFence ++ ++@Description Create a fence containing a single sync checkpoint. ++ Return the fence and a ptr to sync checkpoint it contains. ++ This function in turn calls a function provided by the ++ OS native sync implementation. ++ ++@Input pszFenceName String to assign to the new fence ++ (for debugging purposes) ++ ++@Input hTimeline Timeline on which the new fence is ++ to be created ++ ++@Input psSyncCheckpointContext Sync checkpoint context to be used ++ when creating the new fence ++ ++@Output phNewFence The newly created fence ++ ++@Output pui64FenceUID Unique ID of the created fence ++ ++@Output ppvFenceFinaliseData Any data needed to finalise the fence ++ in a later call to the function ++ SyncCheckpointFinaliseFence() ++ ++@Output psNewSyncCheckpoint The sync checkpoint contained in ++ the new fence ++ ++@Return PVRSRV_OK if a valid fence was provided. ++ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native ++ sync has not registered a callback function. ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszFenceName, ++ PVRSRV_TIMELINE hTimeline, ++ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, ++ PVRSRV_FENCE *phNewFence, ++ IMG_UINT64 *pui64FenceUID, ++ void **ppvFenceFinaliseData, ++ PSYNC_CHECKPOINT *psNewSyncCheckpoint, ++ void **ppvTimelineUpdateSyncPrim, ++ IMG_UINT32 *pui32TimelineUpdateValue, ++ PDUMP_FLAGS_T ui32PDumpFlags); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointRollbackFenceData ++ ++@Description 'Rolls back' the fence specified (destroys the fence and ++ takes any other required actions to undo the fence ++ creation (eg if the implementation wishes to revert the ++ incrementing of the fence's timeline, etc). ++ This function in turn calls a function provided by the ++ OS native sync implementation. ++ ++@Input hFence Fence to be 'rolled back' ++ ++@Input pvFinaliseData Data needed to finalise the ++ fence ++ ++@Return PVRSRV_OK if a valid fence was provided. ++ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native ++ sync has not registered a callback function. ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointFinaliseFence ++ ++@Description 'Finalise' the fence specified (performs any actions the ++ underlying implementation may need to perform just prior ++ to the fence being returned to the client. ++ This function in turn calls a function provided by the ++ OS native sync implementation - if the native sync ++ implementation does not need to perform any actions at ++ this time, this function does not need to be registered. ++ ++@Input psDevNode Device node ++ ++@Input hFence Fence to be 'finalised' ++ ++@Input pvFinaliseData Data needed to finalise the fence ++ ++@Input psSyncCheckpoint Base sync checkpoint that this fence ++ is formed of ++ ++@Input pszName Fence annotation ++ ++@Return PVRSRV_OK if a valid fence and finalise data were provided. ++ PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise ++ data were provided. ++ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native ++ sync has not registered a callback function (permitted). ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointFinaliseFence(PPVRSRV_DEVICE_NODE psDevNode, ++ PVRSRV_FENCE hFence, ++ void *pvFinaliseData, ++ PSYNC_CHECKPOINT psSyncCheckpoint, ++ const IMG_CHAR *pszName); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointFreeCheckpointListMem ++ ++@Description Free memory the memory which was allocated by the sync ++ implementation and used to return the list of sync ++ checkpoints when resolving a fence. ++ to the fence being returned to the client. ++ This function in turn calls a free function registered by ++ the sync implementation (if a function has been registered). ++ ++@Input pvCheckpointListMem Pointer to the memory to be freed ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointNoHWUpdateTimelines ++ ++@Description Called by the DDK in a NO_HARDWARE build only. ++ After syncs have been manually signalled by the DDK, this ++ function is called to allow the OS native sync implementation ++ to update its timelines (as the usual callback notification ++ of signalled checkpoints is not supported for NO_HARDWARE). ++ This function in turn calls a function provided by the ++ OS native sync implementation. ++ ++@Input pvPrivateData Any data the OS native sync ++ implementation might require. ++ ++@Return PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native ++ sync has not registered a callback function, otherwise ++ PVRSRV_OK. ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointDumpInfoOnStalledUFOs ++ ++@Description Called by the DDK in the event of the health check watchdog ++ examining the CCBs and determining that one has failed to ++ progress after 10 second when the GPU is idle due to waiting ++ on one or more UFO fences. ++ The DDK will pass a list of UFOs on which the CCB is waiting ++ and the sync implementation will check them to see if any ++ relate to sync points it has created. If so, the ++ implementation should dump debug information on those sync ++ points to the kernel log or other suitable output (which will ++ allow the unsignalled syncs to be identified). ++ The function shall return the number of syncs in the provided ++ array that were syncs which it had created. ++ ++@Input ui32NumUFOs The number of UFOs in the array passed ++ in the pui32VAddrs parameter. ++ pui32Vaddr The array of UFOs the CCB is waiting on. ++ ++@Output pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which ++ relate to syncs created by the sync ++ implementation. ++ ++@Return PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs. ++ PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in ++ pui32NumSyncOwnedUFOs. ++ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native ++ sync has not registered a callback function. ++ ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, ++ IMG_UINT32 *pui32Vaddrs, ++ IMG_UINT32 *pui32NumSyncOwnedUFOs); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetStateString ++ ++@Description Called to get a string representing the current state of a ++ sync checkpoint. ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get the ++ state for. ++ ++@Return The string representing the current state of this checkpoint ++*/ ++/*****************************************************************************/ ++const IMG_CHAR * ++SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointRecordLookup ++ ++@Description Returns a debug string with information about the ++ sync checkpoint. ++ ++@Input psDevNode The device owning the sync ++ checkpoint to lookup ++ ++@Input ui32FwAddr The firmware address of the sync ++ checkpoint to lookup ++ ++@Input pszSyncInfo Character array to write to ++ ++@Input len Len of the character array ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, ++ IMG_UINT32 ui32FwAddr, ++ IMG_CHAR * pszSyncInfo, size_t len); ++ ++#if defined(PDUMP) ++/*************************************************************************/ /*! ++@Function PVRSRVSyncCheckpointFencePDumpPolKM ++ ++@Description Called to insert a poll into the PDump script on a given ++ Fence being signalled or errored. ++ ++@Input hFence Fence for PDump to poll on ++ ++@Return PVRSRV_OK if a valid sync checkpoint was provided. ++*/ ++/*****************************************************************************/ ++ ++PVRSRV_ERROR PVRSRVSyncCheckpointSignalledPDumpPolKM(PVRSRV_FENCE hFence); ++ ++#endif ++ ++#endif /* SYNC_CHECKPOINT_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint_external.h b/drivers/gpu/drm/img-rogue/sync_checkpoint_external.h +new file mode 100644 +index 000000000000..19b5011aa8b7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_checkpoint_external.h +@@ -0,0 +1,83 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services external synchronisation checkpoint interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines synchronisation checkpoint structures that are visible ++ internally and externally ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_CHECKPOINT_EXTERNAL_H ++#define SYNC_CHECKPOINT_EXTERNAL_H ++ ++#include "img_types.h" ++ ++#ifndef CHECKPOINT_TYPES ++#define CHECKPOINT_TYPES ++typedef struct SYNC_CHECKPOINT_CONTEXT_TAG *PSYNC_CHECKPOINT_CONTEXT; ++ ++typedef struct SYNC_CHECKPOINT_TAG *PSYNC_CHECKPOINT; ++#endif ++ ++/* PVRSRV_SYNC_CHECKPOINT states. ++ * The OS native sync implementation should call pfnIsSignalled() to determine if a ++ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the ++ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state) ++ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync. ++ */ ++typedef IMG_UINT32 PVRSRV_SYNC_CHECKPOINT_STATE; ++ ++#define PVRSRV_SYNC_CHECKPOINT_UNDEF 0x000U ++#define PVRSRV_SYNC_CHECKPOINT_ACTIVE 0xac1U /*!< checkpoint has not signalled */ ++#define PVRSRV_SYNC_CHECKPOINT_SIGNALLED 0x519U /*!< checkpoint has signalled */ ++#define PVRSRV_SYNC_CHECKPOINT_ERRORED 0xeffU /*!< checkpoint has been errored */ ++ ++ ++#define PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(fwaddr) (((fwaddr) & 0x1U) != 0U) ++#define PVRSRV_UFO_IS_SYNC_CHECKPOINT(ufoptr) (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR((ufoptr)->puiAddrUFO.ui32Addr)) ++ ++/* Maximum number of sync checkpoints the firmware supports in one fence */ ++#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32U ++ ++/*! ++ * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which ++ * represents a foreign sync point or collection of foreign sync points. ++ */ ++#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) - 2U) ++ ++#endif /* SYNC_CHECKPOINT_EXTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint_init.h b/drivers/gpu/drm/img-rogue/sync_checkpoint_init.h +new file mode 100644 +index 000000000000..94f2e000ded9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_checkpoint_init.h +@@ -0,0 +1,82 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services synchronisation checkpoint initialisation interface ++ header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines synchronisation checkpoint structures that are visible ++ internally and externally ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_CHECKPOINT_INIT_H ++#define SYNC_CHECKPOINT_INIT_H ++ ++#include "device.h" ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointInit ++ ++@Description Initialise the sync checkpoint driver by giving it the ++ device node (needed to determine the pfnUFOAlloc function ++ to call in order to allocate sync block memory). ++ ++@Input psDevNode Device for which sync checkpoints ++ are being initialised ++ ++@Return PVRSRV_OK initialised successfully, ++ PVRSRV_ERROR_ otherwise ++*/ ++/*****************************************************************************/ ++PVRSRV_ERROR ++SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointDeinit ++ ++@Description Deinitialise the sync checkpoint driver. ++ Frees resources allocated during initialisation. ++ ++@Input psDevNode Device for which sync checkpoints ++ are being de-initialised ++ ++@Return None ++*/ ++/*****************************************************************************/ ++void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode); ++ ++#endif /* SYNC_CHECKPOINT_INIT_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h b/drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h +new file mode 100644 +index 000000000000..ce178474112c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_checkpoint_internal.h +@@ -0,0 +1,288 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services internal synchronisation checkpoint interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the internal server interface for services ++ synchronisation checkpoints. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_CHECKPOINT_INTERNAL_H ++#define SYNC_CHECKPOINT_INTERNAL_H ++ ++#include "img_types.h" ++#include "opaque_types.h" ++#include "sync_checkpoint_external.h" ++#include "sync_checkpoint.h" ++#include "ra.h" ++#include "dllist.h" ++#include "lock.h" ++#include "devicemem.h" ++#include "rgx_fwif_shared.h" ++#include "rgx_fwif_km.h" ++ ++struct SYNC_CHECKPOINT_RECORD; ++ ++/* ++ Private structures ++*/ ++ ++typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ _SYNC_CHECKPOINT_CONTEXT_CTL, *_PSYNC_CHECKPOINT_CONTEXT_CTL; ++ ++typedef struct SYNC_CHECKPOINT_CONTEXT_TAG ++{ ++ PPVRSRV_DEVICE_NODE psDevNode; ++ IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the RA */ ++ RA_ARENA *psSubAllocRA; /*!< RA context */ ++ IMG_CHAR azSpanName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the span RA */ ++ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ ++ ATOMIC_T hRefCount; /*!< Ref count for this context */ ++ ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */ ++ POS_LOCK hLock; ++ _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl; ++#if defined(PDUMP) ++ DLLIST_NODE sSyncCheckpointBlockListHead; /*!< List head for the sync chkpt blocks in this context*/ ++ POS_LOCK hSyncCheckpointBlockListLock; /*!< sync chkpt blocks list lock*/ ++ DLLIST_NODE sListNode; /*!< List node for the sync chkpt context list*/ ++#endif ++} _SYNC_CHECKPOINT_CONTEXT; ++ ++typedef struct _SYNC_CHECKPOINT_BLOCK_ ++{ ++ ATOMIC_T hRefCount; /*!< Ref count for this sync block */ ++ POS_LOCK hLock; ++ _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */ ++ PPVRSRV_DEVICE_NODE psDevNode; ++ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */ ++ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ ++ DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */ ++ volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */ ++ IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */ ++#if defined(PDUMP) ++ DLLIST_NODE sListNode; /*!< List node for the sync chkpt blocks */ ++#endif ++} SYNC_CHECKPOINT_BLOCK; ++ ++typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE; ++ ++typedef struct SYNC_CHECKPOINT_TAG ++{ ++ //_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< pointer to the parent context of this checkpoint */ ++ /* A sync checkpoint is assigned a unique ID, to avoid any confusion should ++ * the same memory be re-used later for a different checkpoint ++ */ ++ IMG_UINT32 ui32UID; /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/ ++ ATOMIC_T hRefCount; /*!< Ref count for this sync */ ++ ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */ ++ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */ ++ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ ++ volatile SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */ ++ PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */ ++ IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */ ++ PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */ ++ IMG_UINT32 ui32ValidationCheck; ++ IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */ ++ PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */ ++ DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */ ++ DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */ ++ IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */ ++ PDUMP_FLAGS_T ui32PDumpFlags; /*!< Pdump Capture mode to be used for POL*/ ++} SYNC_CHECKPOINT; ++ ++ ++typedef struct _SYNC_CHECKPOINT_SIGNAL_ ++{ ++ SYNC_CHECKPOINT asSyncCheckpoint; /*!< Store sync checkpt for deferred signal */ ++ IMG_UINT32 ui32Status; /*!< sync checkpt status signal/errored */ ++} _SYNC_CHECKPOINT_DEFERRED_SIGNAL; ++ ++#define GET_CP_CB_NEXT_IDX(_curridx) (((_curridx) + 1) % SYNC_CHECKPOINT_MAX_DEFERRED_SIGNAL) ++#define GET_CP_CB_BASE(_idx) (IMG_OFFSET_ADDR(psDevNode->pui8DeferredSyncCPSignal, \ ++ ((_idx) * sizeof(_SYNC_CHECKPOINT_DEFERRED_SIGNAL)))) ++ ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetFirmwareAddr ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the firmware address of ++ ++@Return The firmware address of the sync checkpoint ++ ++*/ ++/*****************************************************************************/ ++IMG_UINT32 ++SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointCCBEnqueued ++ ++@Description Increment the CCB enqueued reference count for a ++ synchronisation checkpoint. This indicates how many FW ++ operations (checks/update) have been placed into CCBs for the ++ sync checkpoint. ++ When the FW services these operation, it increments its own ++ reference count. When these two values are equal, we know ++ there are not outstanding FW operating for the checkpoint ++ in any CCB. ++ ++@Input psSyncCheckpoint Synchronisation checkpoint for which ++ to increment the enqueued reference ++ count ++ ++@Return None ++ ++*/ ++/*****************************************************************************/ ++void ++SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetEnqueuedCount ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the enqueued count of ++ ++@Return The enqueued count of the sync checkpoint ++ (i.e. the number of FW operations (checks or updates) ++ currently enqueued in CCBs for the sync checkpoint) ++ ++*/ ++/*****************************************************************************/ ++IMG_UINT32 ++SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetReferenceCount ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the reference count of ++ ++@Return The host reference count of the sync checkpoint ++ ++*/ ++/*****************************************************************************/ ++IMG_UINT32 ++SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetCreator ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the creating process of ++ ++@Return The process id of the process which created this sync checkpoint. ++ ++*/ ++/*****************************************************************************/ ++IMG_PID ++SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetId ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the unique Id of ++ ++@Return The unique Id of the sync checkpoint ++ ++*/ ++/*****************************************************************************/ ++IMG_UINT32 ++SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetTimeline ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the parent timeline of ++ ++@Return The parent timeline of the sync checkpoint ++ ++*/ ++/*****************************************************************************/ ++PVRSRV_TIMELINE ++SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetRGXFWIFUFOAddr ++ ++@Description . ++ ++@Input psSyncCheckpoint Synchronisation checkpoint to get ++ the PRGXFWIF_UFO_ADDR of ++ ++@Return The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when ++ providing the update in server kick code. ++ ++*/ ++/*****************************************************************************/ ++PRGXFWIF_UFO_ADDR* ++SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint); ++ ++#if !defined(SUPPORT_NATIVE_FENCE_SYNC) ++/*************************************************************************/ /*! ++@Function SyncCheckpointGetAssociatedDevice ++ ++@Description . ++ ++@Input psSyncCheckpointContext Synchronisation Checkpoint context ++ to get the device node of ++ ++@Return The PVRSRV_DEVICE_NODE of the device on which the sync ++ checkpoint context was created. ++ ++*/ ++/*****************************************************************************/ ++PPVRSRV_DEVICE_NODE ++SyncCheckpointGetAssociatedDevice(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext); ++#endif /* !defined(SUPPORT_NATIVE_FENCE_SYNC) */ ++ ++#endif /* SYNC_CHECKPOINT_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_fallback_server.h b/drivers/gpu/drm/img-rogue/sync_fallback_server.h +new file mode 100644 +index 000000000000..ac6bd4755b0e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_fallback_server.h +@@ -0,0 +1,204 @@ ++/**************************************************************************/ /*! ++@File ++@Title Fallback sync interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#ifndef SYNC_FALLBACK_SERVER_H ++#define SYNC_FALLBACK_SERVER_H ++ ++#include "img_types.h" ++#include "sync_checkpoint.h" ++#include "device.h" ++#include "connection_server.h" ++ ++ ++typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER; ++typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER; ++typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT; ++ ++typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT; ++ ++#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH ++#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH ++ ++/*****************************************************************************/ ++/* */ ++/* SW SPECIFIC FUNCTIONS */ ++/* */ ++/*****************************************************************************/ ++ ++PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize, ++ const IMG_CHAR *pszTimelineName, ++ PVRSRV_TIMELINE_SERVER **ppsTimeline); ++ ++PVRSRV_ERROR SyncFbFenceCreateSW(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_TIMELINE_SERVER *psTimeline, ++ IMG_UINT32 uiFenceNameSize, ++ const IMG_CHAR *pszFenceName, ++ PVRSRV_FENCE_SERVER **ppsOutputFence, ++ IMG_UINT64 *pui64SyncPtIdx); ++PVRSRV_ERROR SyncFbSWTimelineFenceCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode, ++ PVRSRV_TIMELINE iSWTimeline, ++ const IMG_CHAR *pszFenceName, ++ PVRSRV_FENCE *piOutputFence, ++ IMG_UINT64* pui64SyncPtIdx); ++ ++PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline, ++ IMG_UINT64 *pui64SyncPtIdx); ++PVRSRV_ERROR SyncFbSWTimelineAdvanceKM(void *pvSWTimelineObj, ++ IMG_UINT64* pui64SyncPtIdx); ++ ++/*****************************************************************************/ ++/* */ ++/* PVR SPECIFIC FUNCTIONS */ ++/* */ ++/*****************************************************************************/ ++ ++PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize, ++ const IMG_CHAR *pszTimelineName, ++ PVRSRV_TIMELINE_SERVER **ppsTimeline); ++ ++PVRSRV_ERROR SyncFbFenceCreatePVR(PPVRSRV_DEVICE_NODE psDeviceNode, ++ const IMG_CHAR *pszName, ++ PVRSRV_TIMELINE iTl, ++ PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext, ++ PVRSRV_FENCE *piOutFence, ++ IMG_UINT64 *puiFenceUID, ++ void **ppvFenceFinaliseData, ++ PSYNC_CHECKPOINT *ppsOutCheckpoint, ++ void **ppvTimelineUpdateSync, ++ IMG_UINT32 *puiTimelineUpdateValue); ++ ++PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext, ++ PVRSRV_FENCE iFence, ++ IMG_UINT32 *puiNumCheckpoints, ++ PSYNC_CHECKPOINT **papsCheckpoints, ++ IMG_UINT64 *puiFenceUID); ++ ++/*****************************************************************************/ ++/* */ ++/* GENERIC FUNCTIONS */ ++/* */ ++/*****************************************************************************/ ++ ++PVRSRV_ERROR SyncFbGetFenceObj(PVRSRV_FENCE iFence, ++ void **ppvFenceObj); ++ ++PVRSRV_ERROR SyncFbSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline, ++ void **ppvSWTimelineObj); ++ ++PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl); ++ ++PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence); ++PVRSRV_ERROR SyncFbFenceReleaseKM(void *pvFenceObj); ++ ++PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence, ++ PVRSRV_FENCE_SERVER **ppsOutFence); ++ ++PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1, ++ PVRSRV_FENCE_SERVER *psInFence2, ++ IMG_UINT32 uiFenceNameSize, ++ const IMG_CHAR *pszFenceName, ++ PVRSRV_FENCE_SERVER **ppsOutFence); ++ ++PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence, ++ IMG_UINT32 uiTimeout); ++ ++PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence, ++ IMG_UINT32 uiLine, ++ IMG_UINT32 uiFileNameLength, ++ const IMG_CHAR *pszFile, ++ IMG_UINT32 uiModuleLength, ++ const IMG_CHAR *pszModule, ++ IMG_UINT32 uiDescLength, ++ const IMG_CHAR *pszDesc); ++ ++PVRSRV_ERROR SyncFbDumpFenceKM(void *pvSWFenceObj, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++PVRSRV_ERROR SyncFbSWDumpTimelineKM(void *pvSWTimelineObj, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++PVRSRV_ERROR SyncFbRegisterSyncFunctions(void); ++ ++PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs); ++ ++IMG_BOOL SyncFbCheckpointHasSignalled(IMG_UINT32 ui32FwAddr, IMG_UINT32 ui32Value); ++ ++/*****************************************************************************/ ++/* */ ++/* IMPORT/EXPORT FUNCTIONS */ ++/* */ ++/*****************************************************************************/ ++ ++#if defined(SUPPORT_INSECURE_EXPORT) ++PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence, ++ PVRSRV_FENCE_EXPORT **ppExport); ++ ++PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport); ++ ++PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevice, ++ PVRSRV_FENCE_EXPORT *psImport, ++ PVRSRV_FENCE_SERVER **psFence); ++#endif /* defined(SUPPORT_INSECURE_EXPORT) */ ++ ++PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ PVRSRV_FENCE_SERVER *psFence, ++ IMG_SECURE_TYPE *phSecure, ++ PVRSRV_FENCE_EXPORT **ppsExport, ++ CONNECTION_DATA **ppsSecureConnection); ++ ++PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport); ++ ++PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevice, ++ IMG_SECURE_TYPE hSecure, ++ PVRSRV_FENCE_SERVER **psFence); ++ ++#endif /* SYNC_FALLBACK_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_internal.h b/drivers/gpu/drm/img-rogue/sync_internal.h +new file mode 100644 +index 000000000000..29c836054cae +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_internal.h +@@ -0,0 +1,127 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services internal synchronisation interface header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Defines the internal client side interface for services ++ synchronisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_INTERNAL ++#define SYNC_INTERNAL ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "ra.h" ++#include "dllist.h" ++#include "lock.h" ++#include "devicemem.h" ++#include "sync_prim_internal.h" ++ ++#define LOCAL_SYNC_PRIM_RESET_VALUE 0 ++#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u ++ ++/* ++ Debug feature to protect against GP DM page faults when ++ sync prims are freed by client before work is completed. ++*/ ++#define LOCAL_SYNC_BLOCK_RETAIN_FIRST ++ ++/* ++ Private structure's ++*/ ++#define SYNC_PRIM_NAME_SIZE 50 ++typedef struct SYNC_PRIM_CONTEXT_TAG ++{ ++ SHARED_DEV_CONNECTION hDevConnection; ++ IMG_CHAR azName[SYNC_PRIM_NAME_SIZE]; /*!< Name of the RA */ ++ RA_ARENA *psSubAllocRA; /*!< RA context */ ++ IMG_CHAR azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */ ++ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */ ++ ATOMIC_T hRefCount; /*!< Ref count for this context */ ++#if defined(LOCAL_SYNC_BLOCK_RETAIN_FIRST) ++ IMG_HANDLE hFirstSyncPrim; /*!< Handle to the first allocated sync prim */ ++#endif ++} SYNC_PRIM_CONTEXT; ++ ++typedef struct SYNC_PRIM_BLOCK_TAG ++{ ++ SYNC_PRIM_CONTEXT *psContext; /*!< Our copy of the services connection */ ++ IMG_HANDLE hServerSyncPrimBlock; /*!< Server handle for this block */ ++ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync prim block */ ++ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */ ++ DEVMEM_MEMDESC *hMemDesc; /*!< Host mapping handle */ ++ IMG_UINT32 __iomem *pui32LinAddr; /*!< User CPU mapping */ ++ IMG_UINT64 uiSpanBase; /*!< Base of this import in the span RA */ ++ DLLIST_NODE sListNode; /*!< List node for the sync block list */ ++} SYNC_PRIM_BLOCK; ++ ++typedef enum SYNC_PRIM_TYPE_TAG ++{ ++ SYNC_PRIM_TYPE_UNKNOWN = 0, ++ SYNC_PRIM_TYPE_LOCAL, ++ SYNC_PRIM_TYPE_SERVER, ++} SYNC_PRIM_TYPE; ++ ++typedef struct SYNC_PRIM_LOCAL_TAG ++{ ++ ATOMIC_T hRefCount; /*!< Ref count for this sync */ ++ SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */ ++ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */ ++ IMG_HANDLE hRecord; /*!< Sync record handle */ ++} SYNC_PRIM_LOCAL; ++ ++typedef struct SYNC_PRIM_TAG ++{ ++ PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */ ++ SYNC_PRIM_TYPE eType; /*!< Sync primitive type */ ++ union { ++ SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */ ++ } u; ++} SYNC_PRIM; ++ ++ ++IMG_INTERNAL PVRSRV_ERROR ++SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr); ++ ++IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync, ++ IMG_HANDLE *phBlock, ++ IMG_UINT32 *pui32Offset); ++ ++ ++#endif /* SYNC_INTERNAL */ +diff --git a/drivers/gpu/drm/img-rogue/sync_prim_internal.h b/drivers/gpu/drm/img-rogue/sync_prim_internal.h +new file mode 100644 +index 000000000000..77164c2356cd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_prim_internal.h +@@ -0,0 +1,84 @@ ++/*************************************************************************/ /*! ++@File ++@Title Services internal synchronisation typedef header ++@Description Defines synchronisation types that are used internally ++ only ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef SYNC_INTERNAL_H ++#define SYNC_INTERNAL_H ++ ++#if defined(__cplusplus) ++extern "C" { ++#endif ++ ++#include ++ ++/* These are included here as the typedefs are required ++ * internally. ++ */ ++ ++typedef struct SYNC_PRIM_CONTEXT_TAG *PSYNC_PRIM_CONTEXT; ++typedef struct PVRSRV_CLIENT_SYNC_PRIM_TAG ++{ ++ volatile uint32_t __iomem *pui32LinAddr; /*!< User pointer to the primitive */ ++} PVRSRV_CLIENT_SYNC_PRIM; ++ ++/*! ++ * Bundled information for a sync prim operation ++ * ++ * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP ++ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP ++ */ ++typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP_TAG ++{ ++ #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1U << 0) ++ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1U << 1) ++ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1U<<2)) ++ uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */ ++ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */ ++ uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */ ++ uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */ ++} PVRSRV_CLIENT_SYNC_PRIM_OP; ++ ++#if defined(__cplusplus) ++} ++#endif ++#endif /* SYNC_INTERNAL_H */ +diff --git a/drivers/gpu/drm/img-rogue/sync_server.c b/drivers/gpu/drm/img-rogue/sync_server.c +new file mode 100644 +index 000000000000..7398f4417d5e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_server.c +@@ -0,0 +1,1223 @@ ++/*************************************************************************/ /*! ++@File sync_server.c ++@Title Server side synchronisation functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements the server side functions that for synchronisation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "img_types.h" ++#include "img_defs.h" ++#include "sync_server.h" ++#include "allocmem.h" ++#include "device.h" ++#include "devicemem.h" ++#include "devicemem_pdump.h" ++#include "osfunc.h" ++#include "pdump.h" ++#include "pvr_debug.h" ++#include "pvr_notifier.h" ++#include "pdump_km.h" ++#include "sync.h" ++#include "sync_internal.h" ++#include "connection_server.h" ++#include "htbuffer.h" ++#include "rgxhwperf.h" ++#include "info_page.h" ++ ++#include "sync_checkpoint_internal.h" ++#include "sync_checkpoint.h" ++ ++/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */ ++#include "sync_checkpoint_external.h" ++ ++/* Include this to obtain PVRSRV_MAX_DEV_VARS */ ++#include "pvrsrv_devvar.h" ++ ++#if defined(SUPPORT_SECURE_EXPORT) ++#include "ossecure_export.h" ++#endif ++ ++/* Set this to enable debug relating to the construction and maintenance of the sync address list */ ++#define SYNC_ADDR_LIST_DEBUG 0 ++ ++/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST. ++ * This should allow for PVRSRV_MAX_DEV_VARS dev vars plus ++ * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints for check fences. ++ * The same SYNC_ADDR_LIST is also used to hold UFOs for updates. While this ++ * may need to accommodate the additional sync prim update returned by Native ++ * sync implementation (used for timeline debug), the size calculated from ++ * PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE should be ample. ++ */ ++#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_DEV_VARS+MAX_SYNC_CHECKPOINTS_PER_FENCE) ++/* Check that helper functions will not be preparing longer lists of ++ * UFOs than the FW can handle. ++ */ ++static_assert(PVRSRV_MAX_SYNC_ADDR_LIST_SIZE <= RGXFWIF_CCB_CMD_MAX_UFOS, ++ "PVRSRV_MAX_SYNC_ADDR_LIST_SIZE > RGXFWIF_CCB_CMD_MAX_UFOS."); ++ ++/* Max number of syncs allowed in a sync prim op */ ++#define SYNC_PRIM_OP_MAX_SYNCS 1024 ++ ++struct _SYNC_PRIMITIVE_BLOCK_ ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ DEVMEM_MEMDESC *psMemDesc; ++ IMG_UINT32 *pui32LinAddr; ++ IMG_UINT32 ui32BlockSize; /*!< Size of the Sync Primitive Block */ ++ ATOMIC_T sRefCount; ++ DLLIST_NODE sConnectionNode; ++ SYNC_CONNECTION_DATA *psSyncConnectionData; /*!< Link back to the sync connection data if there is one */ ++ PRGXFWIF_UFO_ADDR uiFWAddr; /*!< The firmware address of the sync prim block */ ++}; ++ ++struct _SYNC_CONNECTION_DATA_ ++{ ++ DLLIST_NODE sListHead; /*!< list of sync block associated with / created against this connection */ ++ ATOMIC_T sRefCount; /*!< number of references to this object */ ++ POS_LOCK hLock; /*!< lock protecting the list of sync blocks */ ++}; ++ ++#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1)) ++ ++/* this is the max number of syncs we will search or dump ++ * at any time. ++ */ ++#define SYNC_RECORD_LIMIT 20000 ++ ++enum SYNC_RECORD_TYPE ++{ ++ SYNC_RECORD_TYPE_UNKNOWN = 0, ++ SYNC_RECORD_TYPE_CLIENT, ++ SYNC_RECORD_TYPE_SERVER, ++}; ++ ++struct SYNC_RECORD ++{ ++ PVRSRV_DEVICE_NODE *psDevNode; ++ SYNC_PRIMITIVE_BLOCK *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */ ++ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */ ++ IMG_UINT32 ui32FwBlockAddr; ++ IMG_PID uiPID; ++ IMG_UINT64 ui64OSTime; ++ enum SYNC_RECORD_TYPE eRecordType; ++ DLLIST_NODE sNode; ++ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH]; ++}; ++ ++#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG) ++#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) ++#else ++#define SYNC_REFCOUNT_PRINT(fmt, ...) ++#endif ++ ++#if defined(SYNC_DEBUG) ++#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__) ++#else ++#define SYNC_UPDATES_PRINT(fmt, ...) ++#endif ++ ++/*! ++***************************************************************************** ++ @Function : SyncPrimitiveBlockToFWAddr ++ ++ @Description : Given a pointer to a sync primitive block and an offset, ++ returns the firmware address of the sync. ++ ++ @Input psSyncPrimBlock : Sync primitive block which contains the sync ++ @Input ui32Offset : Offset of sync within the sync primitive block ++ @Output psAddrOut : Absolute FW address of the sync is written out through ++ this pointer ++ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input ++ parameters are invalid. ++*****************************************************************************/ ++ ++PVRSRV_ERROR ++SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, ++ IMG_UINT32 ui32Offset, ++ PRGXFWIF_UFO_ADDR *psAddrOut) ++{ ++ /* check offset is legal */ ++ if (unlikely((ui32Offset >= psSyncPrimBlock->ui32BlockSize) || ++ (ui32Offset % sizeof(IMG_UINT32)))) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "SyncPrimitiveBlockToFWAddr: parameters check failed")); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset; ++ return PVRSRV_OK; ++} ++ ++/*! ++***************************************************************************** ++ @Function : SyncAddrListGrow ++ ++ @Description : Grow the SYNC_ADDR_LIST so it can accommodate the given ++ number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS. ++ ++ @Input psList : The SYNC_ADDR_LIST to grow ++ @Input ui32NumSyncs : The number of sync addresses to be able to hold ++ @Return : PVRSRV_OK on success ++*****************************************************************************/ ++ ++static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs) ++{ ++ if (unlikely(ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __func__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); ++#endif ++ if (ui32NumSyncs > psList->ui32NumSyncs) ++ { ++ if (psList->pasFWAddrs == NULL) ++ { ++ psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE); ++ PVR_RETURN_IF_NOMEM(psList->pasFWAddrs); ++ } ++ ++ psList->ui32NumSyncs = ui32NumSyncs; ++ } ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); ++#endif ++ return PVRSRV_OK; ++} ++ ++/*! ++***************************************************************************** ++ @Function : SyncAddrListInit ++ ++ @Description : Initialise a SYNC_ADDR_LIST structure ready for use ++ ++ @Input psList : The SYNC_ADDR_LIST structure to initialise ++ @Return : None ++*****************************************************************************/ ++ ++void ++SyncAddrListInit(SYNC_ADDR_LIST *psList) ++{ ++ psList->ui32NumSyncs = 0; ++ psList->pasFWAddrs = NULL; ++} ++ ++/*! ++***************************************************************************** ++ @Function : SyncAddrListDeinit ++ ++ @Description : Frees any resources associated with the given SYNC_ADDR_LIST ++ ++ @Input psList : The SYNC_ADDR_LIST structure to deinitialise ++ @Return : None ++*****************************************************************************/ ++ ++void ++SyncAddrListDeinit(SYNC_ADDR_LIST *psList) ++{ ++ if (psList->pasFWAddrs != NULL) ++ { ++ OSFreeMem(psList->pasFWAddrs); ++ } ++} ++ ++/*! ++***************************************************************************** ++ @Function : SyncAddrListPopulate ++ ++ @Description : Populate the given SYNC_ADDR_LIST with the FW addresses ++ of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets ++ ++ @Input ui32NumSyncs : The number of syncs being passed in ++ @Input apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures ++ in which the syncs are based ++ @Input paui32SyncOffset: Array of offsets within each of the sync primitive blocks ++ where the syncs are located ++ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input ++ parameters are invalid. ++*****************************************************************************/ ++ ++PVRSRV_ERROR ++SyncAddrListPopulate(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumSyncs, ++ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, ++ IMG_UINT32 *paui32SyncOffset) ++{ ++ IMG_UINT32 i; ++ PVRSRV_ERROR eError; ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); ++#endif ++ if (ui32NumSyncs > psList->ui32NumSyncs) ++ { ++ eError = SyncAddrListGrow(psList, ui32NumSyncs); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++ psList->ui32NumSyncs = ui32NumSyncs; ++ ++ for (i = 0; i < ui32NumSyncs; i++) ++ { ++ eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i], ++ paui32SyncOffset[i], ++ &psList->pasFWAddrs[i]); ++ ++ PVR_RETURN_IF_ERROR(eError); ++ } ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs)); ++#endif ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, ++ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32FwAddr = 0; ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __func__, (void*)psList, psList->ui32NumSyncs)); ++#endif ++ /* Ensure there's room in psList for the additional sync prim update */ ++ eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr); ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __func__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1)); ++#endif ++ psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr; ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ { ++ IMG_UINT32 iii; ++ ++ PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); ++ for (iii=0; iiiui32NumSyncs; iii++) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __func__, iii, psList->pasFWAddrs[iii].ui32Addr)); ++ } ++ } ++#endif ++e0: ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __func__, (void*)psList, psList->ui32NumSyncs)); ++#endif ++ return eError; ++} ++ ++ ++static PVRSRV_ERROR ++_AppendCheckpoints(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint, ++ IMG_BOOL bDeRefCheckpoints) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ IMG_UINT32 ui32SyncCheckpointIndex; ++ IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs; ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); ++#endif ++ /* Ensure there's room in psList for the sync checkpoints */ ++ eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints); ++ if (unlikely(eError != PVRSRV_OK)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); ++ goto e0; ++ } ++ ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __func__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize)); ++ if (ui32RollbackSize > 0) ++ { ++ { ++ IMG_UINT32 kk; ++ for (kk=0; kkpsList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, ++ (void*)&psList->pasFWAddrs[kk], kk, ++ psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); ++ } ++ } ++ } ++ PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __func__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0])); ++#endif ++ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndexpasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]); ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCCBEnqueued(<%p>)", __func__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex])); ++ PVR_DPF((PVR_DBG_ERROR, "%s: ID:%d", __func__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]))); ++#endif ++ SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); ++ if (bDeRefCheckpoints) ++ { ++ /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */ ++ SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]); ++ } ++ } ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ if (psList->ui32NumSyncs > 0) ++ { ++ IMG_UINT32 kk; ++ for (kk=0; kkui32NumSyncs; kk++) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __func__, ++ (void*)&psList->pasFWAddrs[kk], kk, ++ psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr)); ++ } ++ } ++#endif ++ return eError; ++ ++e0: ++ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __func__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints)); ++#endif ++ return eError; ++} ++ ++/*! ++***************************************************************************** ++ @Function : SyncAddrListAppendCheckpoints ++ ++ @Description : Append the FW addresses of the sync checkpoints given in ++ the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST ++ ++ @Input ui32NumSyncCheckpoints : The number of sync checkpoints ++ being passed in ++ @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details ++ are to be appended to the SYNC_ADDR_LIST ++ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input ++ parameters are invalid. ++*****************************************************************************/ ++PVRSRV_ERROR ++SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint) ++{ ++ return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE); ++} ++ ++/*! ++***************************************************************************** ++ @Function : SyncAddrListAppendAndDeRefCheckpoints ++ ++ @Description : Append the FW addresses of the sync checkpoints given in ++ the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST. ++ A reference is dropped for each of the checkpoints. ++ ++ @Input ui32NumSyncCheckpoints : The number of sync checkpoints ++ being passed in ++ @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details ++ are to be appended to the SYNC_ADDR_LIST ++ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input ++ parameters are invalid. ++*****************************************************************************/ ++PVRSRV_ERROR ++SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint) ++{ ++ return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE); ++} ++ ++void ++SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint) ++{ ++ IMG_UINT32 ui32SyncCheckpointIndex; ++ ++ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex)", __func__, (void*)psList)); ++#endif ++ if (psList) ++ { ++#if (SYNC_ADDR_LIST_DEBUG == 1) ++ PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __func__, psList->ui32NumSyncs)); ++#endif ++ for (ui32SyncIndex=0; ui32SyncIndexui32NumSyncs; ui32SyncIndex++) ++ { ++ if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1) ++ { ++ SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr); ++ } ++ } ++ } ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ SYNC_RECORD_HANDLE *phRecord, ++ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, ++ IMG_UINT32 ui32FwBlockAddr, ++ IMG_UINT32 ui32SyncOffset, ++ IMG_BOOL bServerSync, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR *pszClassName) ++{ ++ struct SYNC_RECORD * psSyncRec; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ ++ RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ++ ui32FwBlockAddr + ui32SyncOffset, ++ pszClassName, ++ ui32ClassNameSize); ++ ++ PVR_RETURN_IF_INVALID_PARAM(phRecord); ++ ++ *phRecord = NULL; ++ ++ psSyncRec = OSAllocMem(sizeof(*psSyncRec)); ++ PVR_GOTO_IF_NOMEM(psSyncRec, eError, fail_alloc); ++ ++ psSyncRec->psDevNode = psDevNode; ++ psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock; ++ psSyncRec->ui32SyncOffset = ui32SyncOffset; ++ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr; ++ psSyncRec->ui64OSTime = OSClockns64(); ++ psSyncRec->uiPID = OSGetCurrentProcessID(); ++ psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT; ++ ++ if (pszClassName) ++ { ++ if (ui32ClassNameSize >= PVRSRV_SYNC_NAME_LENGTH) ++ ui32ClassNameSize = PVRSRV_SYNC_NAME_LENGTH; ++ /* Copy over the class name annotation */ ++ OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize); ++ } ++ else ++ { ++ /* No class name annotation */ ++ psSyncRec->szClassName[0] = 0; ++ } ++ ++ OSLockAcquire(psDevNode->hSyncServerRecordLock); ++ if (psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT) ++ { ++ dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode); ++ psDevNode->ui32SyncServerRecordCount++; ++ ++ if (psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark) ++ { ++ psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount; ++ } ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.", ++ __func__, ++ pszClassName, ++ psDevNode->ui32SyncServerRecordCount)); ++ OSFreeMem(psSyncRec); ++ psSyncRec = NULL; ++ eError = PVRSRV_ERROR_TOOMANYBUFFERS; ++ } ++ OSLockRelease(psDevNode->hSyncServerRecordLock); ++ ++ *phRecord = (SYNC_RECORD_HANDLE)psSyncRec; ++ ++fail_alloc: ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncRecordRemoveByHandleKM( ++ SYNC_RECORD_HANDLE hRecord) ++{ ++ struct SYNC_RECORD **ppFreedSync; ++ struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord; ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ PVR_RETURN_IF_INVALID_PARAM(hRecord); ++ ++ psDevNode = pSync->psDevNode; ++ ++ OSLockAcquire(psDevNode->hSyncServerRecordLock); ++ ++ RGXSRV_HWPERF_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset); ++ ++ dllist_remove_node(&pSync->sNode); ++ ++ if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range", ++ __func__)); ++ psDevNode->uiSyncServerRecordFreeIdx = 0; ++ } ++ ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx]; ++ psDevNode->uiSyncServerRecordFreeIdx = ++ (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; ++ ++ if (*ppFreedSync) ++ { ++ OSFreeMem(*ppFreedSync); ++ } ++ pSync->psServerSyncPrimBlock = NULL; ++ pSync->ui64OSTime = OSClockns64(); ++ *ppFreedSync = pSync; ++ ++ psDevNode->ui32SyncServerRecordCount--; ++ ++ OSLockRelease(psDevNode->hSyncServerRecordLock); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_BOOL bServerSync, ++ IMG_UINT32 ui32FWAddr, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR *pszClassName) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ RGXSRV_HWPERF_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32FWAddr) ++{ ++ PVR_UNREFERENCED_PARAMETER(psConnection); ++ RGXSRV_HWPERF_FREE(psDevNode, SYNC, ui32FWAddr); ++ ++ return PVRSRV_OK; ++} ++ ++static ++void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData) ++{ ++ IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount); ++ ++ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", ++ __func__, psSyncConnectionData, iRefCount); ++ PVR_UNREFERENCED_PARAMETER(iRefCount); ++} ++ ++static ++void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData) ++{ ++ IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount); ++ if (iRefCount == 0) ++ { ++ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", ++ __func__, psSyncConnectionData, iRefCount); ++ ++ PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead)); ++ OSLockDestroy(psSyncConnectionData->hLock); ++ OSFreeMem(psSyncConnectionData); ++ } ++ else ++ { ++ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d", ++ __func__, psSyncConnectionData, iRefCount); ++ PVR_ASSERT(iRefCount > 0); ++ } ++} ++ ++static ++void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock) ++{ ++ if (psConnection) ++ { ++ SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData; ++ ++ /* ++ Make sure the connection doesn't go away. It doesn't matter that we will release ++ the lock between as the refcount and list don't have to be atomic w.r.t. to each other ++ */ ++ _SyncConnectionRef(psSyncConnectionData); ++ ++ OSLockAcquire(psSyncConnectionData->hLock); ++ if (psConnection != NULL) ++ { ++ dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode); ++ } ++ OSLockRelease(psSyncConnectionData->hLock); ++ psBlock->psSyncConnectionData = psSyncConnectionData; ++ } ++ else ++ { ++ psBlock->psSyncConnectionData = NULL; ++ } ++} ++ ++static ++void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock) ++{ ++ SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData; ++ ++ if (psBlock->psSyncConnectionData) ++ { ++ OSLockAcquire(psSyncConnectionData->hLock); ++ dllist_remove_node(&psBlock->sConnectionNode); ++ OSLockRelease(psSyncConnectionData->hLock); ++ ++ _SyncConnectionUnref(psBlock->psSyncConnectionData); ++ } ++} ++ ++static inline ++void _DoPrimBlockFree(SYNC_PRIMITIVE_BLOCK *psSyncBlk) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode; ++ ++ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)", ++ __func__, psSyncBlk, OSAtomicRead(&psSyncBlk->sRefCount)); ++ ++ PVR_ASSERT(OSAtomicRead(&psSyncBlk->sRefCount) == 1); ++ ++ _SyncConnectionRemoveBlock(psSyncBlk); ++ DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc); ++ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc); ++ OSFreeMem(psSyncBlk); ++} ++ ++PVRSRV_ERROR ++PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, ++ IMG_UINT32 *puiSyncPrimVAddr, ++ IMG_UINT32 *puiSyncPrimBlockSize, ++ PMR **ppsSyncPMR) ++{ ++ SYNC_PRIMITIVE_BLOCK *psNewSyncBlk; ++ PVRSRV_ERROR eError; ++ ++ psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK)); ++ PVR_GOTO_IF_NOMEM(psNewSyncBlk, eError, e0); ++ ++ psNewSyncBlk->psDevNode = psDevNode; ++ ++ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block"); ++ ++ eError = psDevNode->pfnAllocUFOBlock(psDevNode, ++ &psNewSyncBlk->psMemDesc, ++ &psNewSyncBlk->uiFWAddr.ui32Addr, ++ &psNewSyncBlk->ui32BlockSize); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr; ++ ++ eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc, ++ (void **) &psNewSyncBlk->pui32LinAddr); ++ PVR_GOTO_IF_ERROR(eError, e2); ++ ++ eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR); ++ ++ PVR_GOTO_IF_ERROR(eError, e3); ++ ++ OSAtomicWrite(&psNewSyncBlk->sRefCount, 1); ++ ++ /* If there is a connection pointer then add the new block onto it's list */ ++ _SyncConnectionAddBlock(psConnection, psNewSyncBlk); ++ ++ *ppsSyncBlk = psNewSyncBlk; ++ *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize; ++ ++ PDUMPCOMMENTWITHFLAGS(psDevNode, PDUMP_FLAGS_CONTINUOUS, ++ "Allocated UFO block (FirmwareVAddr = 0x%08x)", ++ *puiSyncPrimVAddr); ++ ++ return PVRSRV_OK; ++ ++e3: ++ DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc); ++e2: ++ psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc); ++e1: ++ OSFreeMem(psNewSyncBlk); ++e0: ++ return eError; ++} ++ ++PVRSRV_ERROR ++PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk) ++{ ++ ++ /* This function is an alternative to the above without reference counting. ++ * With the removal of sync prim ops for server syncs we no longer have to ++ * reference count prim blocks as the reference will never be incremented / ++ * decremented by a prim op */ ++ _DoPrimBlockFree(psSyncBlk); ++ return PVRSRV_OK; ++} ++ ++static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk, ++ IMG_UINT32 ui32Index) ++{ ++ return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize); ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, ++ IMG_UINT32 ui32Value) ++{ ++ if (_CheckSyncIndex(psSyncBlk, ui32Index)) ++ { ++ psSyncBlk->pui32LinAddr[ui32Index] = ui32Value; ++ return PVRSRV_OK; ++ } ++ else ++ { ++ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for " ++ "0x%08X byte sync block (value 0x%08X)", ++ ui32Index, ++ psSyncBlk->ui32BlockSize, ++ ui32Value)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++} ++ ++#if defined(PDUMP) ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value) ++{ ++ /* ++ We might be ask to PDump sync state outside of capture range ++ (e.g. texture uploads) so make this continuous. ++ */ ++ DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc, ++ ui32Offset, ++ ui32Value, ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) ++{ ++ /* ++ We might be ask to PDump sync state outside of capture range ++ (e.g. texture uploads) so make this continuous. ++ */ ++ DevmemPDumpLoadMem(psSyncBlk->psMemDesc, ++ ui32Offset, ++ sizeof(IMG_UINT32), ++ PDUMP_FLAGS_CONTINUOUS); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T ui32PDumpFlags) ++{ ++ DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc, ++ ui32Offset, ++ ui32Value, ++ ui32Mask, ++ eOperator, ++ ui32PDumpFlags); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, ++ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize) ++{ ++ DevmemPDumpCBP(psSyncBlk->psMemDesc, ++ ui32Offset, ++ uiWriteOffset, ++ uiPacketSize, ++ uiBufferSize); ++ return PVRSRV_OK; ++} ++#endif ++ ++/* SyncRegisterConnection */ ++PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData) ++{ ++ SYNC_CONNECTION_DATA *psSyncConnectionData; ++ PVRSRV_ERROR eError; ++ ++ psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA)); ++ if (psSyncConnectionData == NULL) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto fail_alloc; ++ } ++ ++ eError = OSLockCreate(&psSyncConnectionData->hLock); ++ PVR_GOTO_IF_ERROR(eError, fail_lockcreate); ++ dllist_init(&psSyncConnectionData->sListHead); ++ OSAtomicWrite(&psSyncConnectionData->sRefCount, 1); ++ ++ *ppsSyncConnectionData = psSyncConnectionData; ++ return PVRSRV_OK; ++ ++fail_lockcreate: ++ OSFreeMem(psSyncConnectionData); ++fail_alloc: ++ PVR_ASSERT(eError != PVRSRV_OK); ++ return eError; ++} ++ ++/* SyncUnregisterConnection */ ++void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData) ++{ ++ _SyncConnectionUnref(psSyncConnectionData); ++} ++ ++void SyncConnectionPDumpSyncBlocks(PVRSRV_DEVICE_NODE *psDevNode, void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent) ++{ ++ if ((eEvent == PDUMP_TRANSITION_EVENT_RANGE_ENTERED) || (eEvent == PDUMP_TRANSITION_EVENT_BLOCK_STARTED)) ++ { ++ SYNC_CONNECTION_DATA *psSyncConnectionData = hSyncPrivData; ++ DLLIST_NODE *psNode, *psNext; ++ ++ OSLockAcquire(psSyncConnectionData->hLock); ++ ++ PDUMPCOMMENT(psDevNode, "Dump client Sync Prim state"); ++ dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext) ++ { ++ SYNC_PRIMITIVE_BLOCK *psSyncBlock = ++ IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode); ++ ++ DevmemPDumpLoadMem(psSyncBlock->psMemDesc, ++ 0, ++ psSyncBlock->ui32BlockSize, ++ PDUMP_FLAGS_CONTINUOUS); ++ } ++ ++ OSLockRelease(psSyncConnectionData->hLock); ++ } ++} ++ ++void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, ++ IMG_CHAR * pszSyncInfo, size_t len) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ IMG_INT iEnd; ++ IMG_BOOL bFound = IMG_FALSE; ++ ++ if (!pszSyncInfo) ++ { ++ return; ++ } ++ ++ OSLockAcquire(psDevNode->hSyncServerRecordLock); ++ pszSyncInfo[0] = '\0'; ++ ++ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) ++ { ++ struct SYNC_RECORD *psSyncRec = ++ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); ++ if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr ++ && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType ++ && psSyncRec->psServerSyncPrimBlock ++ && psSyncRec->psServerSyncPrimBlock->pui32LinAddr ++ ) ++ { ++ IMG_UINT32 *pui32SyncAddr; ++ pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr ++ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); ++ iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)", ++ *pui32SyncAddr, ++ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), ++ psSyncRec->uiPID, ++ psSyncRec->szClassName ++ ); ++ if (iEnd >= 0 && iEnd < len) ++ { ++ pszSyncInfo[iEnd] = '\0'; ++ } ++ bFound = IMG_TRUE; ++ break; ++ } ++ } ++ ++ OSLockRelease(psDevNode->hSyncServerRecordLock); ++ ++ if (!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)) ++ { ++ OSSNPrintf(pszSyncInfo, len, "(Record may be lost)"); ++ } ++} ++ ++#define NS_IN_S (1000000000UL) ++static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec, ++ IMG_UINT64 ui64TimeNow, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock; ++ ++ if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType) ++ { ++ IMG_UINT64 ui64DeltaS; ++ IMG_UINT32 ui32DeltaF; ++ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime; ++ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF); ++ ++ if (psSyncBlock && psSyncBlock->pui32LinAddr) ++ { ++ IMG_UINT32 *pui32SyncAddr; ++ pui32SyncAddr = psSyncBlock->pui32LinAddr ++ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32)); ++ ++ PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val=0x%08x (%s)", ++ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), ++ psSyncRec->uiPID, ++ ui64DeltaS, ui32DeltaF, ++ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), ++ *pui32SyncAddr, ++ psSyncRec->szClassName ++ ); ++ } ++ else ++ { ++ PVR_DUMPDEBUG_LOG("\t%s %05u %05" IMG_UINT64_FMTSPEC ".%09u FWAddr=0x%08x Val= (%s)", ++ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"), ++ psSyncRec->uiPID, ++ ui64DeltaS, ui32DeltaF, ++ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset), ++ psSyncRec->szClassName ++ ); ++ } ++ } ++} ++ ++static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle, ++ IMG_UINT32 ui32VerbLevel, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle; ++ IMG_UINT64 ui64TimeNowS; ++ IMG_UINT32 ui32TimeNowF; ++ IMG_UINT64 ui64TimeNow = OSClockns64(); ++ DLLIST_NODE *psNode, *psNext; ++ ++ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF); ++ ++ if (DD_VERB_LVL_ENABLED(ui32VerbLevel, DEBUG_REQUEST_VERBOSITY_MEDIUM)) ++ { ++ IMG_UINT32 i; ++ OSLockAcquire(psDevNode->hSyncServerRecordLock); ++ ++ PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05" IMG_UINT64_FMTSPEC ".%09u", ++ psDevNode->ui32SyncServerRecordCount, ++ psDevNode->ui32SyncServerRecordCountHighWatermark, ++ ui64TimeNowS, ++ ui32TimeNowF); ++ if (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT) ++ { ++ PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.", ++ SYNC_RECORD_LIMIT); ++ } ++ ++ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", ++ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); ++ ++ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) ++ { ++ struct SYNC_RECORD *psSyncRec = ++ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); ++ _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ ++ PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05" IMG_UINT64_FMTSPEC ".%09u", ++ ui64TimeNowS, ui32TimeNowF); ++ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)", ++ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation"); ++ for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN); ++ i != psDevNode->uiSyncServerRecordFreeIdx; ++ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)) ++ { ++ if (psDevNode->apsSyncServerRecordsFreed[i]) ++ { ++ _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i], ++ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile); ++ } ++ else ++ { ++ break; ++ } ++ } ++ ++ OSLockRelease(psDevNode->hSyncServerRecordLock); ++ } ++} ++#undef NS_IN_S ++ ++static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ psDevNode->ui32SyncServerRecordCount = 0; ++ psDevNode->ui32SyncServerRecordCountHighWatermark = 0; ++ ++ eError = OSLockCreate(&psDevNode->hSyncServerRecordLock); ++ PVR_GOTO_IF_ERROR(eError, fail_lock_create); ++ dllist_init(&psDevNode->sSyncServerRecordList); ++ ++ eError = PVRSRVRegisterDeviceDbgRequestNotify(&psDevNode->hSyncServerRecordNotify, ++ psDevNode, ++ _SyncRecordRequest, ++ DEBUG_REQUEST_SYNCTRACKING, ++ psDevNode); ++ ++ PVR_GOTO_IF_ERROR(eError, fail_dbg_register); ++ ++ return PVRSRV_OK; ++ ++fail_dbg_register: ++ OSLockDestroy(psDevNode->hSyncServerRecordLock); ++fail_lock_create: ++ return eError; ++} ++ ++static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ DLLIST_NODE *psNode, *psNext; ++ int i; ++ ++ OSLockAcquire(psDevNode->hSyncServerRecordLock); ++ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext) ++ { ++ struct SYNC_RECORD *pSyncRec = ++ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode); ++ ++ dllist_remove_node(psNode); ++ OSFreeMem(pSyncRec); ++ } ++ ++ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++) ++ { ++ if (psDevNode->apsSyncServerRecordsFreed[i]) ++ { ++ OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]); ++ psDevNode->apsSyncServerRecordsFreed[i] = NULL; ++ } ++ } ++ OSLockRelease(psDevNode->hSyncServerRecordLock); ++ ++ if (psDevNode->hSyncServerRecordNotify) ++ { ++ PVRSRVUnregisterDeviceDbgRequestNotify(psDevNode->hSyncServerRecordNotify); ++ } ++ OSLockDestroy(psDevNode->hSyncServerRecordLock); ++} ++ ++PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ PVRSRV_ERROR eError; ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ eError = SyncRecordListInit(psDevNode); ++ PVR_GOTO_IF_ERROR(eError, fail_record_list); ++ } ++ ++ return PVRSRV_OK; ++ ++fail_record_list: ++ return eError; ++} ++ ++void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode) ++{ ++ ++ if (GetInfoPageDebugFlagsKM() & DEBUG_FEATURE_FULL_SYNC_TRACKING_ENABLED) ++ { ++ SyncRecordListDeinit(psDevNode); ++ } ++} +diff --git a/drivers/gpu/drm/img-rogue/sync_server.h b/drivers/gpu/drm/img-rogue/sync_server.h +new file mode 100644 +index 000000000000..e35682970af6 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sync_server.h +@@ -0,0 +1,249 @@ ++/**************************************************************************/ /*! ++@File ++@Title Server side synchronisation interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Describes the server side synchronisation functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv.h" ++#include "device.h" ++#include "devicemem.h" ++#include "pdump.h" ++#include "pvrsrv_error.h" ++#include "connection_server.h" ++#include "pdump_km.h" ++ ++#ifndef SYNC_SERVER_H ++#define SYNC_SERVER_H ++ ++typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK; ++typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA; ++typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE; ++ ++typedef struct _SYNC_ADDR_LIST_ ++{ ++ IMG_UINT32 ui32NumSyncs; ++ PRGXFWIF_UFO_ADDR *pasFWAddrs; ++} SYNC_ADDR_LIST; ++ ++PVRSRV_ERROR ++SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock, ++ IMG_UINT32 ui32Offset, ++ PRGXFWIF_UFO_ADDR *psAddrOut); ++ ++void ++SyncAddrListInit(SYNC_ADDR_LIST *psList); ++ ++void ++SyncAddrListDeinit(SYNC_ADDR_LIST *psList); ++ ++PVRSRV_ERROR ++SyncAddrListPopulate(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumSyncs, ++ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock, ++ IMG_UINT32 *paui32SyncOffset); ++ ++PVRSRV_ERROR ++SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList, ++ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim); ++PVRSRV_ERROR ++SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint); ++ ++PVRSRV_ERROR ++SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList, ++ IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint); ++ ++void ++SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints, ++ PSYNC_CHECKPOINT *apsSyncCheckpoint); ++ ++PVRSRV_ERROR ++SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList); ++ ++PVRSRV_ERROR ++PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE * psDevNode, ++ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk, ++ IMG_UINT32 *puiSyncPrimVAddr, ++ IMG_UINT32 *puiSyncPrimBlockSize, ++ PMR **ppsSyncPMR); ++ ++PVRSRV_ERROR ++PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, ++ DEVMEM_EXPORTCOOKIE **psExportCookie); ++ ++PVRSRV_ERROR ++PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk); ++ ++PVRSRV_ERROR ++PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk); ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index, ++ IMG_UINT32 ui32Value); ++ ++PVRSRV_ERROR ++PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_BOOL bServerSync, ++ IMG_UINT32 ui32FWAddr, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR *pszClassName); ++ ++PVRSRV_ERROR ++PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ IMG_UINT32 ui32FWAddr); ++ ++PVRSRV_ERROR ++PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection, ++ PVRSRV_DEVICE_NODE *psDevNode, ++ SYNC_RECORD_HANDLE *phRecord, ++ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock, ++ IMG_UINT32 ui32FwBlockAddr, ++ IMG_UINT32 ui32SyncOffset, ++ IMG_BOOL bServerSync, ++ IMG_UINT32 ui32ClassNameSize, ++ const IMG_CHAR *pszClassName); ++ ++PVRSRV_ERROR ++PVRSRVSyncRecordRemoveByHandleKM( ++ SYNC_RECORD_HANDLE hRecord); ++void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr, ++ IMG_CHAR * pszSyncInfo, size_t len); ++ ++PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData); ++void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData); ++void SyncConnectionPDumpSyncBlocks(PVRSRV_DEVICE_NODE *psDevNode, void *hSyncPrivData, PDUMP_TRANSITION_EVENT eEvent); ++ ++/*! ++****************************************************************************** ++@Function SyncServerInit ++ ++@Description Per-device initialisation for the ServerSync module ++******************************************************************************/ ++PVRSRV_ERROR SyncServerInit(PVRSRV_DEVICE_NODE *psDevNode); ++void SyncServerDeinit(PVRSRV_DEVICE_NODE *psDevNode); ++ ++ ++#if defined(PDUMP) ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset); ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value); ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiDumpFlags); ++ ++PVRSRV_ERROR ++PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, ++ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize); ++ ++#else /* PDUMP */ ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVSyncPrimPDumpKM) ++#endif ++static INLINE PVRSRV_ERROR ++PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSyncBlk); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVSyncPrimPDumpValueKM) ++#endif ++static INLINE PVRSRV_ERROR ++PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSyncBlk); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVSyncPrimPDumpPolKM) ++#endif ++static INLINE PVRSRV_ERROR ++PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, ++ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask, ++ PDUMP_POLL_OPERATOR eOperator, ++ PDUMP_FLAGS_T uiDumpFlags) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSyncBlk); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++ PVR_UNREFERENCED_PARAMETER(ui32Mask); ++ PVR_UNREFERENCED_PARAMETER(eOperator); ++ PVR_UNREFERENCED_PARAMETER(uiDumpFlags); ++ return PVRSRV_OK; ++} ++ ++#ifdef INLINE_IS_PRAGMA ++#pragma inline(PVRSRVSyncPrimPDumpCBPKM) ++#endif ++static INLINE PVRSRV_ERROR ++PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset, ++ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize, ++ IMG_UINT64 uiBufferSize) ++{ ++ PVR_UNREFERENCED_PARAMETER(psSyncBlk); ++ PVR_UNREFERENCED_PARAMETER(ui32Offset); ++ PVR_UNREFERENCED_PARAMETER(uiWriteOffset); ++ PVR_UNREFERENCED_PARAMETER(uiPacketSize); ++ PVR_UNREFERENCED_PARAMETER(uiBufferSize); ++ return PVRSRV_OK; ++} ++#endif /* PDUMP */ ++#endif /*SYNC_SERVER_H */ +diff --git a/drivers/gpu/drm/img-rogue/syscommon.h b/drivers/gpu/drm/img-rogue/syscommon.h +new file mode 100644 +index 000000000000..934974834e50 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/syscommon.h +@@ -0,0 +1,146 @@ ++/**************************************************************************/ /*! ++@File ++@Title Common System APIs and structures ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides common system-specific declarations and ++ macros that are supported by all systems ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /***************************************************************************/ ++ ++#if !defined(SYSCOMMON_H) ++#define SYSCOMMON_H ++ ++#include "img_types.h" ++#include "pvr_notifier.h" ++#include "pvrsrv_device.h" ++#include "pvrsrv_error.h" ++ ++/*************************************************************************/ /*! ++@Description Pointer to a Low-level Interrupt Service Routine (LISR). ++@Input pvData Private data provided to the LISR. ++@Return True if interrupt handled, false otherwise. ++*/ /**************************************************************************/ ++typedef IMG_BOOL (*PFN_LISR)(void *pvData); ++ ++/**************************************************************************/ /*! ++@Function SysDevInit ++@Description System specific device initialisation function. ++@Input pvOSDevice pointer to the OS device reference ++@Input ppsDevConfig returned device configuration info ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /***************************************************************************/ ++PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig); ++ ++/**************************************************************************/ /*! ++@Function SysDevDeInit ++@Description System specific device deinitialisation function. ++@Input psDevConfig device configuration info of the device to be ++ deinitialised ++@Return None. ++*/ /***************************************************************************/ ++void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/**************************************************************************/ /*! ++@Function SysDebugInfo ++@Description Dump system specific device debug information. ++@Input psDevConfig pointer to device configuration info ++@Input pfnDumpDebugPrintf the 'printf' function to be called to ++ display the debug info ++@Input pvDumpDebugFile optional file identifier to be passed to ++ the 'printf' function if required ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /***************************************************************************/ ++PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile); ++ ++/**************************************************************************/ /*! ++@Function SysInstallDeviceLISR ++@Description Installs the system Low-level Interrupt Service Routine (LISR) ++ which handles low-level processing of interrupts from the device ++ (GPU). ++ The LISR will be invoked when the device raises an interrupt. An ++ LISR may not be descheduled, so code which needs to do so should ++ be placed in an MISR. ++ The installed LISR will schedule any MISRs once it has completed ++ its interrupt processing, by calling OSScheduleMISR(). ++@Input hSysData pointer to the system data of the device ++@Input ui32IRQ the IRQ on which the LISR is to be installed ++@Input pszName name of the module installing the LISR ++@Input pfnLISR pointer to the function to be installed as the ++ LISR ++@Input pvData private data provided to the LISR ++@Output phLISRData handle to the installed LISR (to be used for a ++ subsequent uninstall) ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /***************************************************************************/ ++PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszName, ++ PFN_LISR pfnLISR, ++ void *pvData, ++ IMG_HANDLE *phLISRData); ++ ++/**************************************************************************/ /*! ++@Function SysUninstallDeviceLISR ++@Description Uninstalls the system Low-level Interrupt Service Routine (LISR) ++ which handles low-level processing of interrupts from the device ++ (GPU). ++@Input hLISRData handle of the LISR to be uninstalled ++@Return PVRSRV_OK on success, a failure code otherwise. ++*/ /***************************************************************************/ ++PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData); ++ ++/**************************************************************************/ /*! ++@Function SysRGXErrorNotify ++@Description Error reporting callback function, registered as the ++ pfnSysDevErrorNotify member of the PVRSRV_DEVICE_CONFIG ++ struct. System layer will be notified of device errors and ++ resets via this callback. ++ NB. implementers should ensure that the minimal amount of ++ work is done in this callback function, as it will be ++ executed in the main RGX MISR. (e.g. any blocking or lengthy ++ work should be performed by a worker queue/thread instead). ++@Input hSysData pointer to the system data of the device ++@Output psErrorData structure containing details of the reported error ++@Return None. ++*/ /***************************************************************************/ ++void SysRGXErrorNotify(IMG_HANDLE hSysData, ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData); ++ ++#endif /* !defined(SYSCOMMON_H) */ +diff --git a/drivers/gpu/drm/img-rogue/sysconfig.c b/drivers/gpu/drm/img-rogue/sysconfig.c +new file mode 100644 +index 000000000000..aedd2f8c9dd2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sysconfig.c +@@ -0,0 +1,462 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Configuration ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description System Configuration functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include "interrupt_support.h" ++#include "pvrsrv_device.h" ++#include "syscommon.h" ++#include "vz_vmm_pvz.h" ++#include "allocmem.h" ++#include "sysinfo.h" ++#include "sysconfig.h" ++#include "physheap.h" ++#include "pvr_debug.h" ++#if defined(SUPPORT_ION) ++#include "ion_support.h" ++#endif ++#if defined(__linux__) ++#include ++#include ++#endif ++#include "rgx_bvnc_defs_km.h" ++#include "xuantie_sys.h" ++/* ++ * In systems that support trusted device address protection, there are three ++ * physical heaps from which pages should be allocated: ++ * - one heap for normal allocations ++ * - one heap for allocations holding META code memory ++ * - one heap for allocations holding secured DRM data ++ */ ++ ++#define PHYS_HEAP_IDX_GENERAL 0 ++#define PHYS_HEAP_IDX_FW 1 ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++#define PHYS_HEAP_IDX_TDFWMEM 2 ++#define PHYS_HEAP_IDX_TDSECUREBUF 3 ++#elif defined(SUPPORT_DEDICATED_FW_MEMORY) ++#define PHYS_HEAP_IDX_FW_MEMORY 2 ++#endif ++ ++#define UMA_HEAP_USAGE_FLAG PHYS_HEAP_USAGE_GPU_LOCAL ++ ++#define UMA_DEFAULT_HEAP PVRSRV_PHYS_HEAP_GPU_LOCAL ++ ++#if defined(SUPPORT_LINUX_DVFS) ++static struct clk *xuantie_gpu_core_clk = NULL; ++ ++static void SetFrequency(IMG_UINT32 ui32Frequency) ++{ ++ if (!xuantie_gpu_core_clk) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "xuantie_gpu_core_clk is NULL")); ++ return; ++ } ++ clk_set_rate(xuantie_gpu_core_clk, ui32Frequency); ++} ++ ++static void SetVoltage(IMG_UINT32 ui32Voltage) ++{ ++ ++} ++#endif ++ ++/* ++ CPU to Device physical address translation ++*/ ++static ++void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_DEV_PHYADDR *psDevPAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivData); ++ ++ /* Optimise common case */ ++ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr; ++ if (ui32NumOfAddr > 1) ++ { ++ IMG_UINT32 ui32Idx; ++ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) ++ { ++ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr; ++ } ++ } ++} ++ ++/* ++ Device to CPU physical address translation ++*/ ++static ++void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData, ++ IMG_UINT32 ui32NumOfAddr, ++ IMG_CPU_PHYADDR *psCpuPAddr, ++ IMG_DEV_PHYADDR *psDevPAddr) ++{ ++ PVR_UNREFERENCED_PARAMETER(hPrivData); ++ ++ /* Optimise common case */ ++ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr; ++ if (ui32NumOfAddr > 1) ++ { ++ IMG_UINT32 ui32Idx; ++ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx) ++ { ++ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr; ++ } ++ } ++} ++ ++static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = ++{ ++ /* pfnCpuPAddrToDevPAddr */ ++ UMAPhysHeapCpuPAddrToDevPAddr, ++ /* pfnDevPAddrToCpuPAddr */ ++ UMAPhysHeapDevPAddrToCpuPAddr, ++}; ++ ++static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, ++ IMG_UINT32 *puiPhysHeapCountOut) ++{ ++ /* ++ * This function is called during device initialisation, which on Linux, ++ * means it won't be called concurrently. As such, there's no need to ++ * protect it with a lock or use an atomic variable. ++ */ ++ PHYS_HEAP_CONFIG *pasPhysHeaps; ++ IMG_UINT32 uiHeapCount = 2; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ uiHeapCount += 2; ++#elif defined(SUPPORT_DEDICATED_FW_MEMORY) ++ uiHeapCount += 1; ++#endif ++ ++ pasPhysHeaps = OSAllocZMem(sizeof(*pasPhysHeaps) * uiHeapCount); ++ if (!pasPhysHeaps) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].pszPDumpMemspaceName = "SYSMEM"; ++ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].eType = PHYS_HEAP_TYPE_UMA; ++ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].psMemFuncs = &gsPhysHeapFuncs; ++ pasPhysHeaps[PHYS_HEAP_IDX_GENERAL].ui32UsageFlags = UMA_HEAP_USAGE_FLAG; ++ ++ pasPhysHeaps[PHYS_HEAP_IDX_FW].pszPDumpMemspaceName = "SYSMEM_FW"; ++ pasPhysHeaps[PHYS_HEAP_IDX_FW].eType = PHYS_HEAP_TYPE_UMA; ++ pasPhysHeaps[PHYS_HEAP_IDX_FW].psMemFuncs = &gsPhysHeapFuncs; ++ pasPhysHeaps[PHYS_HEAP_IDX_FW].ui32UsageFlags = PHYS_HEAP_USAGE_FW_MAIN; ++ ++#if defined(SUPPORT_TRUSTED_DEVICE) ++ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].pszPDumpMemspaceName = "TDFWMEM"; ++ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].eType = PHYS_HEAP_TYPE_UMA; ++ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].psMemFuncs = &gsPhysHeapFuncs; ++ pasPhysHeaps[PHYS_HEAP_IDX_TDFWMEM].ui32UsageFlags = ++ PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; ++ ++ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].pszPDumpMemspaceName = "TDSECBUFMEM"; ++ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].eType = PHYS_HEAP_TYPE_UMA; ++ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].psMemFuncs = &gsPhysHeapFuncs; ++ pasPhysHeaps[PHYS_HEAP_IDX_TDSECUREBUF].ui32UsageFlags = ++ PHYS_HEAP_USAGE_GPU_SECURE; ++ ++#elif defined(SUPPORT_DEDICATED_FW_MEMORY) ++ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].pszPDumpMemspaceName = "DEDICATEDFWMEM"; ++ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].eType = PHYS_HEAP_TYPE_UMA; ++ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].psMemFuncs = &gsPhysHeapFuncs; ++ pasPhysHeaps[PHYS_HEAP_IDX_FW_MEMORY].ui32UsageFlags = ++ PHYS_HEAP_USAGE_FW_CODE | PHYS_HEAP_USAGE_FW_PRIV_DATA; ++#endif ++ ++ *ppasPhysHeapsOut = pasPhysHeaps; ++ *puiPhysHeapCountOut = uiHeapCount; ++ ++ return PVRSRV_OK; ++} ++ ++static void PhysHeapsDestroy(PHYS_HEAP_CONFIG *pasPhysHeaps) ++{ ++ OSFreeMem(pasPhysHeaps); ++} ++ ++static PVRSRV_ERROR SysDevPrePowerState( ++ IMG_HANDLE hSysData, ++ PVRSRV_SYS_POWER_STATE eNewPowerState, ++ PVRSRV_SYS_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ struct gpu_plat_if *mfg = hSysData; ++ ++ xuantie_debug("SysDevPrePowerState (%d->%d), bPwrFlags = 0x%08x\n", ++ eCurrentPowerState, eNewPowerState, ePwrFlags); ++ ++ mutex_lock(&mfg->set_power_state); ++ ++ if ((PVRSRV_SYS_POWER_STATE_OFF == eNewPowerState) && ++ (PVRSRV_SYS_POWER_STATE_ON == eCurrentPowerState)) ++ xuantie_mfg_disable(mfg); ++ ++ mutex_unlock(&mfg->set_power_state); ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR SysDevPostPowerState( ++ IMG_HANDLE hSysData, ++ PVRSRV_SYS_POWER_STATE eNewPowerState, ++ PVRSRV_SYS_POWER_STATE eCurrentPowerState, ++ PVRSRV_POWER_FLAGS ePwrFlags) ++{ ++ struct gpu_plat_if *mfg = hSysData; ++ PVRSRV_ERROR ret; ++ ++ xuantie_debug("SysDevPostPowerState (%d->%d)\n", ++ eCurrentPowerState, eNewPowerState); ++ ++ mutex_lock(&mfg->set_power_state); ++ ++ if ((PVRSRV_SYS_POWER_STATE_ON == eNewPowerState) && ++ (PVRSRV_SYS_POWER_STATE_OFF == eCurrentPowerState)) { ++ if (xuantie_mfg_enable(mfg)) { ++ ret = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE; ++ goto done; ++ } ++ } ++ ++ ret = PVRSRV_OK; ++done: ++ mutex_unlock(&mfg->set_power_state); ++ ++ return ret; ++} ++ ++ ++static void SysDevFeatureDepInit(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT64 ui64Features) ++{ ++#if defined(SUPPORT_AXI_ACE_TEST) ++ if ( ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK) ++ { ++ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_CPU_ONLY; ++ }else ++#endif ++ { ++ psDevConfig->eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE; ++ } ++} ++ ++PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig) ++{ ++ PVRSRV_DEVICE_CONFIG *psDevConfig; ++ RGX_DATA *psRGXData; ++ RGX_TIMING_INFORMATION *psRGXTimingInfo; ++ PHYS_HEAP_CONFIG *pasPhysHeaps; ++ IMG_UINT32 uiPhysHeapCount; ++ PVRSRV_ERROR eError; ++ struct gpu_plat_if *mfg; ++ ++#if defined(__linux__) ++ int iIrq; ++ struct resource *psDevMemRes = NULL; ++ struct platform_device *psDev; ++ ++ psDev = to_platform_device((struct device *)pvOSDevice); ++#endif ++ ++#if defined(__linux__) ++ dma_set_mask(pvOSDevice, DMA_BIT_MASK(40)); ++#endif ++ ++ xuantie_sysfs_init(pvOSDevice); ++ ++ mfg = dt_hw_init(pvOSDevice); ++ if (IS_ERR(mfg)) { ++ if (PTR_ERR(mfg) == -EPROBE_DEFER) ++ return PVRSRV_ERROR_PROBE_DEFER; ++ else ++ return PVRSRV_ERROR_INIT_FAILURE; ++ } ++ ++ psDevConfig = OSAllocZMem(sizeof(*psDevConfig) + ++ sizeof(*psRGXData) + ++ sizeof(*psRGXTimingInfo)); ++ if (!psDevConfig) ++ { ++ return PVRSRV_ERROR_OUT_OF_MEMORY; ++ } ++ ++ psRGXData = (RGX_DATA *)((IMG_CHAR *)psDevConfig + sizeof(*psDevConfig)); ++ psRGXTimingInfo = (RGX_TIMING_INFORMATION *)((IMG_CHAR *)psRGXData + sizeof(*psRGXData)); ++ ++ eError = PhysHeapsCreate(&pasPhysHeaps, &uiPhysHeapCount); ++ if (eError) ++ { ++ goto ErrorFreeDevConfig; ++ } ++ ++ /* Setup RGX specific timing data */ ++ psRGXTimingInfo->ui32CoreClockSpeed = RGX_XUANTIE_CORE_CLOCK_SPEED; ++ psRGXTimingInfo->bEnableActivePM = IMG_TRUE; ++ psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE; ++ psRGXTimingInfo->ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS; ++ ++ /* Set up the RGX data */ ++ psRGXData->psRGXTimingInfo = psRGXTimingInfo; ++ ++ /* Setup the device config */ ++ psDevConfig->pvOSDevice = pvOSDevice; ++ psDevConfig->pszName = "xuantie"; ++ psDevConfig->pszVersion = NULL; ++ psDevConfig->pfnSysDevFeatureDepInit = SysDevFeatureDepInit; ++ ++ /* Device setup information */ ++#if defined(__linux__) ++ psDevMemRes = platform_get_resource(psDev, IORESOURCE_MEM, 0); ++ if (psDevMemRes) ++ { ++ psDevConfig->sRegsCpuPBase.uiAddr = psDevMemRes->start; ++ psDevConfig->ui32RegsSize = (unsigned int)(psDevMemRes->end - psDevMemRes->start); ++ } ++ iIrq = platform_get_irq(psDev, 0); ++ if (iIrq >= 0) ++ { ++ psDevConfig->ui32IRQ = (IMG_UINT32) iIrq; ++ } ++#else ++ /* Device setup information */ ++ psDevConfig->sRegsCpuPBase.uiAddr = 0xFFEF400000; ++ psDevConfig->ui32RegsSize = 0x100000; ++ psDevConfig->ui32IRQ = 102; ++#endif ++ ++ PVR_LOG(("*****enter sys dev init %llx %d\r\n", psDevConfig->sRegsCpuPBase.uiAddr, psDevConfig->ui32IRQ)); ++ psDevConfig->pasPhysHeaps = pasPhysHeaps; ++ psDevConfig->ui32PhysHeapCount = uiPhysHeapCount; ++ psDevConfig->eDefaultHeap = UMA_DEFAULT_HEAP; ++ ++ /* No power management on no HW system */ ++ psDevConfig->pfnPrePowerState = SysDevPrePowerState; ++ psDevConfig->pfnPostPowerState = SysDevPostPowerState; ++ ++ psDevConfig->bHasFBCDCVersion31 = IMG_FALSE; ++ ++ /* No clock frequency either */ ++ psDevConfig->pfnClockFreqGet = NULL; ++ ++ psDevConfig->hDevData = psRGXData; ++ psDevConfig->hSysData = mfg; ++ ++#if defined(SUPPORT_LINUX_DVFS) ++ xuantie_gpu_core_clk = mfg->gpu_cclk; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.pasOPPTable = NULL; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage; ++ psDevConfig->sDVFS.sDVFSDeviceCfg.ui32PollMs = 50; ++ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 50; ++ psDevConfig->sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10; ++#endif ++ ++ /* Setup other system specific stuff */ ++#if defined(SUPPORT_ION) ++ IonInit(NULL); ++#endif ++ ++ /* Set psDevConfig->pfnSysDevErrorNotify callback */ ++ psDevConfig->pfnSysDevErrorNotify = SysRGXErrorNotify; ++ ++ *ppsDevConfig = psDevConfig; ++ ++ return PVRSRV_OK; ++ ++ErrorFreeDevConfig: ++ OSFreeMem(psDevConfig); ++ dt_hw_uninit(mfg); ++ return eError; ++} ++ ++void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ struct gpu_plat_if *mfg = (struct gpu_plat_if*)psDevConfig->hSysData; ++ ++#if defined(SUPPORT_ION) ++ IonDeinit(); ++#endif ++ ++ PhysHeapsDestroy(psDevConfig->pasPhysHeaps); ++ xuantie_sysfs_uninit(psDevConfig->pvOSDevice); ++ OSFreeMem(psDevConfig); ++ dt_hw_uninit(mfg); ++} ++ ++PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData, ++ IMG_UINT32 ui32IRQ, ++ const IMG_CHAR *pszName, ++ PFN_LISR pfnLISR, ++ void *pvData, ++ IMG_HANDLE *phLISRData) ++{ ++ PVR_LOG(("*****enter sysintstall LISR\r\n")); ++ PVR_UNREFERENCED_PARAMETER(hSysData); ++ return OSInstallSystemLISR(phLISRData, ui32IRQ, pszName, pfnLISR, pvData, ++ SYS_IRQ_FLAG_TRIGGER_DEFAULT); ++} ++ ++PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData) ++{ ++ return OSUninstallSystemLISR(hLISRData); ++} ++ ++PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig, ++ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, ++ void *pvDumpDebugFile) ++{ ++ PVR_UNREFERENCED_PARAMETER(psDevConfig); ++ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf); ++ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile); ++ return PVRSRV_OK; ++} ++ ++/****************************************************************************** ++ End of file (sysconfig.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/sysconfig.h b/drivers/gpu/drm/img-rogue/sysconfig.h +new file mode 100644 +index 000000000000..adf39e20216a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sysconfig.h +@@ -0,0 +1,59 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvrsrv_device.h" ++#include "rgxdevice.h" ++ ++#if !defined(__SYSCCONFIG_H__) ++#define __SYSCCONFIG_H__ ++ ++ ++#define RGX_XUANTIE_CORE_CLOCK_SPEED 792000000 ++//#define RGX_XUANTIE_CORE_CLOCK_SPEED 18000000 ++#define SYS_RGX_ACTIVE_POWER_LATENCY_MS (10) ++ ++/***************************************************************************** ++ * system specific data structures ++ *****************************************************************************/ ++ ++#endif /* __SYSCCONFIG_H__ */ +diff --git a/drivers/gpu/drm/img-rogue/sysconfig_cmn.c b/drivers/gpu/drm/img-rogue/sysconfig_cmn.c +new file mode 100644 +index 000000000000..ac878dd8f709 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sysconfig_cmn.c +@@ -0,0 +1,132 @@ ++/*************************************************************************/ /*! ++@File ++@Title Sysconfig layer common to all platforms ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implements system layer functions common to all platforms ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv.h" ++#include "pvrsrv_device.h" ++#include "syscommon.h" ++#include "pvr_debug.h" ++ ++void SysRGXErrorNotify(IMG_HANDLE hSysData, ++ PVRSRV_ROBUSTNESS_NOTIFY_DATA *psErrorData) ++{ ++ PVR_UNREFERENCED_PARAMETER(hSysData); ++ ++#if defined(PVRSRV_NEED_PVR_DPF) ++ { ++ IMG_UINT32 ui32DgbLvl; ++ ++ switch (psErrorData->eResetReason) ++ { ++ case RGX_CONTEXT_RESET_REASON_NONE: ++ case RGX_CONTEXT_RESET_REASON_GUILTY_LOCKUP: ++ case RGX_CONTEXT_RESET_REASON_INNOCENT_LOCKUP: ++ case RGX_CONTEXT_RESET_REASON_GUILTY_OVERRUNING: ++ case RGX_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING: ++ case RGX_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH: ++ case RGX_CONTEXT_RESET_REASON_GPU_ECC_OK: ++ case RGX_CONTEXT_RESET_REASON_FW_ECC_OK: ++ { ++ ui32DgbLvl = PVR_DBG_MESSAGE; ++ break; ++ } ++ case RGX_CONTEXT_RESET_REASON_GPU_ECC_HWR: ++ case RGX_CONTEXT_RESET_REASON_FW_EXEC_ERR: ++ { ++ ui32DgbLvl = PVR_DBG_WARNING; ++ break; ++ } ++ case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: ++ case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: ++ case RGX_CONTEXT_RESET_REASON_FW_ECC_ERR: ++ case RGX_CONTEXT_RESET_REASON_FW_WATCHDOG: ++ case RGX_CONTEXT_RESET_REASON_FW_PAGEFAULT: ++ case RGX_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR: ++ { ++ ui32DgbLvl = PVR_DBG_ERROR; ++ break; ++ } ++ default: ++ { ++ PVR_ASSERT(false && "Unhandled reset reason"); ++ ui32DgbLvl = PVR_DBG_ERROR; ++ break; ++ } ++ } ++ ++ if (psErrorData->pid > 0) ++ { ++ PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " PID %d experienced error %d", ++ psErrorData->pid, psErrorData->eResetReason); ++ } ++ else ++ { ++ PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " Device experienced error %d", ++ psErrorData->eResetReason); ++ } ++ ++ switch (psErrorData->eResetReason) ++ { ++ case RGX_CONTEXT_RESET_REASON_WGP_CHECKSUM: ++ case RGX_CONTEXT_RESET_REASON_TRP_CHECKSUM: ++ { ++ PVRSRVDebugPrintf(ui32DgbLvl, __FILE__, __LINE__, " ExtJobRef 0x%x, DM %d", ++ psErrorData->uErrData.sChecksumErrData.ui32ExtJobRef, ++ psErrorData->uErrData.sChecksumErrData.eDM); ++ break; ++ } ++ default: ++ { ++ break; ++ } ++ } ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(psErrorData); ++#endif /* PVRSRV_NEED_PVR_DPF */ ++} ++ ++/****************************************************************************** ++ End of file (sysconfig_cmn.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/sysinfo.h b/drivers/gpu/drm/img-rogue/sysinfo.h +new file mode 100644 +index 000000000000..a02d17434083 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sysinfo.h +@@ -0,0 +1,58 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(__SYSINFO_H__) ++#define __SYSINFO_H__ ++ ++/*!< System specific poll/timeout details */ ++#define MAX_HW_TIME_US (500000) ++#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (1500)//(10000) ++#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000) ++#define WAIT_TRY_COUNT (10000) ++ ++#if defined(__linux__) ++#define SYS_RGX_DEV_NAME "rgx,xuantie" ++#endif ++ ++#define SYS_RGX_OF_COMPATIBLE "img,gpu" ++#endif /* !defined(__SYSINFO_H__) */ +diff --git a/drivers/gpu/drm/img-rogue/sysvalidation.h b/drivers/gpu/drm/img-rogue/sysvalidation.h +new file mode 100644 +index 000000000000..5f6d5f9c67dc +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/sysvalidation.h +@@ -0,0 +1,62 @@ ++/*************************************************************************/ /*! ++@File ++@Title Validation System APIs and structures ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++ needed for hardware validation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(SYSVALIDATION_H) ++#define SYSVALIDATION_H ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) ++#include "img_types.h" ++#include "rgxdefs_km.h" ++#include "virt_validation_defs.h" ++ ++void SysInitVirtInitialization(IMG_UINT64 aui64OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], ++ IMG_UINT64 aui64OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]); ++ ++#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR) ++void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState); ++void SysSetTrustedDeviceAceEnabled(void); ++#endif ++#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */ ++ ++#endif /* !defined(SYSVALIDATION_H) */ +diff --git a/drivers/gpu/drm/img-rogue/tlclient.c b/drivers/gpu/drm/img-rogue/tlclient.c +new file mode 100644 +index 000000000000..dc3f17a46812 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlclient.c +@@ -0,0 +1,500 @@ ++/*************************************************************************/ /*! ++@File tlclient.c ++@Title Services Transport Layer shared API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport layer common API used in both clients and server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++/* DESIGN NOTE ++ * This transport layer consumer-role API was created as a shared API when a ++ * client wanted to read the data of a TL stream from within the KM server ++ * driver. This was in addition to the existing clients supported externally ++ * by the UM client library component via PVR API layer. ++ * This shared API is thus used by the PVR TL API in the client library and ++ * by clients internal to the server driver module. It depends on ++ * client entry points of the TL and DEVMEM bridge modules. These entry points ++ * encapsulate from the TL shared API whether a direct bridge or an indirect ++ * (ioctl) bridge is used. ++ * One reason for needing this layer centres around the fact that some of the ++ * API functions make multiple bridge calls and the logic that glues these ++ * together is common regardless of client location. Further this layer has ++ * allowed the defensive coding that checks parameters to move into the PVR ++ * API layer where untrusted clients enter giving a more efficient KM code path. ++ */ ++ ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "pvr_debug.h" ++#include "osfunc.h" ++ ++#include "allocmem.h" ++#include "devicemem.h" ++ ++#include "tlclient.h" ++#include "pvrsrv_tlcommon.h" ++#include "client_pvrtl_bridge.h" ++ ++#if defined(__KERNEL__) ++#include "srvcore.h" ++#else ++#include "srvcore_intern.h" ++#endif ++ ++/* Defines/Constants ++ */ ++ ++#define NO_ACQUIRE 0xffffffffU ++ ++/* User-side stream descriptor structure. ++ */ ++typedef struct _TL_STREAM_DESC_ ++{ ++ /* Handle on kernel-side stream descriptor*/ ++ IMG_HANDLE hServerSD; ++ ++ /* Stream data buffer variables */ ++ DEVMEM_MEMDESC* psUMmemDesc; ++ IMG_PBYTE pBaseAddr; ++ ++ /* Offset in bytes into the circular buffer and valid only after ++ * an Acquire call and undefined after a release. */ ++ IMG_UINT32 uiReadOffset; ++ ++ /* Always a positive integer when the Acquire call returns and a release ++ * is outstanding. Undefined at all other times. */ ++ IMG_UINT32 uiReadLen; ++ ++ /* Counter indicating how many writes to a stream failed. ++ * It's used to reduce number of errors in output log. */ ++ IMG_UINT32 ui32WritesFailed; ++ ++ /* Name of the stream. */ ++ IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; ++} TL_STREAM_DESC, *PTL_STREAM_DESC; ++ ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, ++ const IMG_CHAR* pszName, ++ IMG_UINT32 ui32Mode, ++ IMG_HANDLE* phSD) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ TL_STREAM_DESC *psSD = NULL; ++ IMG_HANDLE hTLPMR; ++ IMG_HANDLE hTLImportHandle; ++ IMG_DEVMEM_SIZE_T uiImportSize; ++ PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(pszName); ++ PVR_ASSERT(phSD); ++ *phSD = NULL; ++ ++ /* Allocate memory for the stream descriptor object, initialise with ++ * "no data read" yet. */ ++ psSD = OSAllocZMem(sizeof(TL_STREAM_DESC)); ++ PVR_LOG_GOTO_IF_NOMEM(psSD, eError, e0); ++ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; ++ ++ /* Send open stream request to kernel server to get stream handle and ++ * buffer cookie so we can get access to the buffer in this process. */ ++ eError = BridgeTLOpenStream(GetBridgeHandle(hDevConnection), pszName, ++ ui32Mode, &psSD->hServerSD, &hTLPMR); ++ if (eError != PVRSRV_OK) ++ { ++ if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) && ++ (eError == PVRSRV_ERROR_TIMEOUT)) ++ { ++ goto e1; ++ } ++ PVR_LOG_GOTO_IF_ERROR(eError, "BridgeTLOpenStream", e1); ++ } ++ ++ /* Convert server export cookie into a cookie for use by this client */ ++ eError = DevmemMakeLocalImportHandle(hDevConnection, ++ hTLPMR, &hTLImportHandle); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2); ++ ++ uiMemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0ULL; ++ /* Now convert client cookie into a client handle on the buffer's ++ * physical memory region */ ++ eError = DevmemLocalImport(hDevConnection, ++ hTLImportHandle, ++ uiMemFlags, ++ &psSD->psUMmemDesc, ++ &uiImportSize, ++ "TLBuffer"); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemImport", e3); ++ ++ /* Now map the memory into the virtual address space of this process. */ ++ eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **) ++ &psSD->pBaseAddr); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4); ++ ++ /* Ignore error, not much that can be done */ ++ (void) DevmemUnmakeLocalImportHandle(hDevConnection, ++ hTLImportHandle); ++ ++ /* Copy stream name */ ++ OSStringLCopy(psSD->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE); ++ ++ /* Return client descriptor handle to caller */ ++ *phSD = psSD; ++ return PVRSRV_OK; ++ ++/* Clean up post buffer setup */ ++e4: ++ DevmemFree(psSD->psUMmemDesc); ++e3: ++ (void) DevmemUnmakeLocalImportHandle(hDevConnection, ++ &hTLImportHandle); ++/* Clean up post stream open */ ++e2: ++ BridgeTLCloseStream(GetBridgeHandle(hDevConnection), psSD->hServerSD); ++ ++/* Clean up post allocation of the descriptor object */ ++e1: ++ OSFreeMem(psSD); ++ ++e0: ++ return eError; ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ ++ /* Check the caller provided connection is valid */ ++ if (!psSD->hServerSD) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: descriptor already " ++ "closed/not open", __func__)); ++ return PVRSRV_ERROR_HANDLE_NOT_FOUND; ++ } ++ ++ /* Check if acquire is outstanding, perform release if it is, ignore result ++ * as there is not much we can do if it is an error other than close */ ++ if (psSD->uiReadLen != NO_ACQUIRE) ++ { ++ (void) BridgeTLReleaseData(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, psSD->uiReadOffset, psSD->uiReadLen); ++ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; ++ } ++ ++ /* Clean up DevMem resources used for this stream in this client */ ++ DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc); ++ ++ DevmemFree(psSD->psUMmemDesc); ++ ++ /* Send close to server to clean up kernel mode resources for this ++ * handle and release the memory. */ ++ eError = DestroyServerResource(hDevConnection, ++ NULL, ++ BridgeTLCloseStream, ++ psSD->hServerSD); ++ PVR_LOG_IF_ERROR(eError, "BridgeTLCloseStream"); ++ ++ if (psSD->ui32WritesFailed != 0) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s() %u writes failed to stream %s (%c)", ++ __func__, psSD->ui32WritesFailed, psSD->szName, ++ psSD->ui32WritesFailed == IMG_UINT32_MAX ? 'T' : 'F')); ++ } ++ ++ OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC)); ++ OSFreeMem(psSD); ++ ++ return eError; ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, ++ const IMG_CHAR *pszNamePattern, ++ IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], ++ IMG_UINT32 *pui32NumFound) ++{ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(pszNamePattern); ++ PVR_ASSERT(pui32NumFound); ++ ++ return BridgeTLDiscoverStreams(GetBridgeHandle(hDevConnection), ++ pszNamePattern, ++ /* we need to treat this as one dimensional array */ ++ *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE, ++ (IMG_CHAR *) aszStreams, ++ pui32NumFound); ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ IMG_UINT32 ui32BufferOffset, ui32Unused; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ PVR_ASSERT(ppui8Data); ++ PVR_ASSERT(ui32Size); ++ ++ eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32Size, &ui32Unused); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; ++ ++ return PVRSRV_OK; ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32 *pui32Available) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ IMG_UINT32 ui32BufferOffset; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ PVR_ASSERT(ppui8Data); ++ PVR_ASSERT(ui32Size); ++ ++ eError = BridgeTLReserveStream(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, &ui32BufferOffset, ui32Size, ui32SizeMin, ++ pui32Available); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset; ++ ++ return PVRSRV_OK; ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32Size) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ PVR_ASSERT(ui32Size); ++ ++ eError = BridgeTLCommitStream(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, ui32Size); ++ PVR_RETURN_IF_ERROR(eError); ++ ++ return PVRSRV_OK; ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_PBYTE* ppPacketBuf, ++ IMG_UINT32* pui32BufLen) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ PVR_ASSERT(ppPacketBuf); ++ PVR_ASSERT(pui32BufLen); ++ ++ /* In case of non-blocking acquires, which can return no data, and ++ * error paths ensure we clear the output parameters first. */ ++ *ppPacketBuf = NULL; ++ *pui32BufLen = 0; ++ ++ /* Check Acquire has not been called twice in a row without a release */ ++ if (psSD->uiReadOffset != NO_ACQUIRE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: acquire already " ++ "outstanding, ReadOffset(%d), ReadLength(%d)", ++ __func__, psSD->uiReadOffset, psSD->uiReadLen)); ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ /* Ask the kernel server for the next chunk of data to read */ ++ eError = BridgeTLAcquireData(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, &psSD->uiReadOffset, &psSD->uiReadLen); ++ if (eError != PVRSRV_OK) ++ { ++ /* Mask reporting of the errors seen under normal operation */ ++ if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) && ++ (eError != PVRSRV_ERROR_TIMEOUT) && ++ (eError != PVRSRV_ERROR_STREAM_READLIMIT_REACHED)) ++ { ++ PVR_LOG_ERROR(eError, "BridgeTLAcquireData"); ++ } ++ psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE; ++ return eError; ++ } ++ /* else PVRSRV_OK */ ++ ++ /* Return the data offset and length to the caller if bytes are available ++ * to be read. Could be zero for non-blocking mode so pass back cleared ++ * values above */ ++ if (psSD->uiReadLen) ++ { ++ *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset; ++ *pui32BufLen = psSD->uiReadLen; ++ } ++ ++ return PVRSRV_OK; ++} ++ ++static PVRSRV_ERROR _TLClientReleaseDataLen( ++ SHARED_DEV_CONNECTION hDevConnection, ++ TL_STREAM_DESC* psSD, ++ IMG_UINT32 uiReadLen) ++{ ++ PVRSRV_ERROR eError; ++ ++ /* the previous acquire did not return any data, this is a no-operation */ ++ if (psSD->uiReadLen == 0) ++ { ++ return PVRSRV_OK; ++ } ++ ++ /* Check release has not been called twice in a row without an acquire */ ++ if (psSD->uiReadOffset == NO_ACQUIRE) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); ++ return PVRSRV_ERROR_RETRY; ++ } ++ ++ /* Inform the kernel to release the data from the buffer */ ++ eError = BridgeTLReleaseData(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, ++ psSD->uiReadOffset, uiReadLen); ++ PVR_LOG_IF_ERROR(eError, "BridgeTLReleaseData"); ++ ++ /* Reset state to indicate no outstanding acquire */ ++ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE; ++ ++ return eError; ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD) ++{ ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ ++ return _TLClientReleaseDataLen(hDevConnection, psSD, psSD->uiReadLen); ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen) ++{ ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ ++ /* Check the specified size is within the size returned by Acquire */ ++ if (uiActualReadLen > psSD->uiReadLen) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: no acquire to release", __func__)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ return _TLClientReleaseDataLen(hDevConnection, psSD, uiActualReadLen); ++} ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32Size, ++ IMG_BYTE *pui8Data) ++{ ++ PVRSRV_ERROR eError; ++ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD; ++ ++ PVR_ASSERT(hDevConnection); ++ PVR_ASSERT(hSD); ++ PVR_ASSERT(ui32Size); ++ PVR_ASSERT(pui8Data); ++ ++ eError = BridgeTLWriteData(GetBridgeHandle(hDevConnection), ++ psSD->hServerSD, ui32Size, pui8Data); ++ ++ if (eError == PVRSRV_ERROR_STREAM_FULL) ++ { ++ if (psSD->ui32WritesFailed == 0) ++ { ++ PVR_LOG_ERROR(eError, "BridgeTLWriteData"); ++ } ++ if (psSD->ui32WritesFailed != IMG_UINT32_MAX) ++ { ++ psSD->ui32WritesFailed++; ++ } ++ } ++ else if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "BridgeTLWriteData"); ++ } ++ ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (tlclient.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/tlclient.h b/drivers/gpu/drm/img-rogue/tlclient.h +new file mode 100644 +index 000000000000..00f7aa8fc043 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlclient.h +@@ -0,0 +1,257 @@ ++/*************************************************************************/ /*! ++@File tlclient.h ++@Title Services Transport Layer shared API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport layer common API used in both clients and server ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef TLCLIENT_H ++#define TLCLIENT_H ++ ++ ++#include "img_defs.h" ++#include "pvrsrv_tlcommon.h" ++#include "pvrsrv_error.h" ++ ++ ++/* This value is used for the hSrvHandle argument in the client API when ++ * called directly from the kernel which will lead to a direct bridge access. ++ */ ++#define DIRECT_BRIDGE_HANDLE ((IMG_HANDLE)0xDEADBEEFU) ++ ++ ++/*************************************************************************/ /*! ++ @Function TLClientOpenStream ++ @Description Open a descriptor onto an existing kernel transport stream. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input pszName Address of the stream name string, no longer ++ than PRVSRVTL_MAX_STREAM_NAME_SIZE. ++ @Input ui32Mode Unused ++ @Output phSD Address of a pointer to an stream object ++ @Return PVRSRV_ERROR_NOT_FOUND when named stream not found ++ @Return PVRSRV_ERROR_ALREADY_OPEN stream already open by another ++ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error ++ @Return PVRSRV_ERROR_TIMEOUT timed out, stream not found ++ @Return PVRSRV_ERROR for other system codes ++*/ /**************************************************************************/ ++ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientOpenStream(SHARED_DEV_CONNECTION hDevConnection, ++ const IMG_CHAR* pszName, ++ IMG_UINT32 ui32Mode, ++ IMG_HANDLE* phSD); ++ ++ ++/*************************************************************************/ /*! ++ @Function TLClientCloseStream ++ @Description Close and release the stream connection to Services kernel ++ server transport layer. Any outstanding Acquire will be ++ released. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to close ++ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle is not known ++ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error ++ @Return PVRSRV_ERROR for system codes ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientCloseStream(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD); ++ ++/*************************************************************************/ /*! ++ @Function TLClientDiscoverStreams ++ @Description Finds all streams that's name starts with pszNamePattern and ++ ends with a number. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input pszNamePattern Name pattern. Must be beginning of a string. ++ @Output aszStreams Array of numbers from end of the discovered ++ names. ++ @inOut pui32NumFound When input, max number that can fit into ++ pui32Streams. When output, number of ++ discovered streams. ++ @Return PVRSRV_ERROR for system codes ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientDiscoverStreams(SHARED_DEV_CONNECTION hDevConnection, ++ const IMG_CHAR *pszNamePattern, ++ IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], ++ IMG_UINT32 *pui32NumFound); ++ ++/*************************************************************************/ /*! ++ @Function TLClientReserveStream ++ @Description Reserves a region with given size in the stream. If the stream ++ is already reserved the function will return an error. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to close ++ @Output ppui8Data pointer to the buffer ++ @Input ui32Size size of the data ++ @Return ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReserveStream(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size); ++ ++/*************************************************************************/ /*! ++ @Function TLClientStreamReserve2 ++ @Description Reserves a region with given size in the stream. If the stream ++ is already reserved the function will return an error. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to close ++ @Output ppui8Data pointer to the buffer ++ @Input ui32Size size of the data ++ @Input ui32SizeMin minimum size of the data ++ @Input ui32Available available space in buffer ++ @Return ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReserveStream2(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32 *pui32Available); ++ ++/*************************************************************************/ /*! ++ @Function TLClientStreamCommit ++ @Description Commits previously reserved region in the stream and therefore ++ allows next reserves. ++ This function call has to be preceded by the call to ++ TLClientReserveStream or TLClientReserveStream2. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to close ++ @Input ui32Size Size of the data ++ @Return ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientCommitStream(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32Size); ++ ++/*************************************************************************/ /*! ++ @Function TLClientAcquireData ++ @Description When there is data available in the stream buffer this call ++ returns with the address and length of the data buffer the ++ client can safely read. This buffer may contain one or more ++ packets of data. ++ If no data is available then this call blocks until it becomes ++ available. However if the stream has been destroyed while ++ waiting then a resource unavailable error will be returned to ++ the caller. Clients must pair this call with a ReleaseData ++ call. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to read ++ @Output ppPacketBuf Address of a pointer to an byte buffer. On exit ++ pointer contains address of buffer to read from ++ @Output puiBufLen Pointer to an integer. On exit it is the size ++ of the data to read from the packet buffer ++ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists ++ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known ++ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error ++ @Return PVRSRV_ERROR_RETRY release not called beforehand ++ @Return PVRSRV_ERROR_TIMEOUT block timed out, no data ++ @Return PVRSRV_ERROR for other system codes ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientAcquireData(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_PBYTE* ppPacketBuf, ++ IMG_UINT32* puiBufLen); ++ ++ ++/*************************************************************************/ /*! ++ @Function TLClientReleaseData ++ @Description Called after client has read the stream data out of the buffer ++ The data is subsequently flushed from the stream buffer to make ++ room for more data packets from the stream source. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to read ++ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists ++ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL ++ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error ++ @Return PVRSRV_ERROR_RETRY acquire not called beforehand ++ @Return PVRSRV_ERROR for system codes ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReleaseData(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD); ++ ++/*************************************************************************/ /*! ++ @Function TLClientReleaseDataLess ++ @Description Called after client has read only some data out of the buffer ++ and wishes to complete the read early i.e. does not want to ++ read the full data that the acquire call returned e.g read just ++ one packet from the stream. ++ The data is subsequently flushed from the stream buffer to make ++ room for more data packets from the stream source. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to read ++ @Input uiActualReadLen Size of data read, in bytes. Must be on a TL ++ packet boundary. ++ @Return PVRSRV_ERROR_INVALID_PARAMS when read length too big ++ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE when stream no longer exists ++ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND when SD handle not known to TL ++ @Return PVRSRV_ERROR_STREAM_ERROR internal driver state error ++ @Return PVRSRV_ERROR_RETRY acquire not called beforehand ++ @Return PVRSRV_ERROR for system codes ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientReleaseDataLess(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen); ++ ++/*************************************************************************/ /*! ++ @Function TLClientWriteData ++ @Description Writes data to the stream. ++ @Input hDevConnection Address of a pointer to a connection object ++ @Input hSD Handle of the stream object to read ++ @Input ui32Size Size of the data ++ @Input pui8Data Pointer to data ++*/ /**************************************************************************/ ++IMG_INTERNAL ++PVRSRV_ERROR TLClientWriteData(SHARED_DEV_CONNECTION hDevConnection, ++ IMG_HANDLE hSD, ++ IMG_UINT32 ui32Size, ++ IMG_BYTE *pui8Data); ++ ++ ++#endif /* TLCLIENT_H */ ++ ++/****************************************************************************** ++ End of file (tlclient.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/tlintern.c b/drivers/gpu/drm/img-rogue/tlintern.c +new file mode 100644 +index 000000000000..70d8b09d6e40 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlintern.c +@@ -0,0 +1,473 @@ ++/*************************************************************************/ /*! ++@File ++@Title Transport Layer kernel side API implementation. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport Layer functions available to driver components in ++ the driver. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++//#define PVR_DPF_FUNCTION_TRACE_ON 1 ++#undef PVR_DPF_FUNCTION_TRACE_ON ++#include "pvr_debug.h" ++ ++#include "allocmem.h" ++#include "pvrsrv_error.h" ++#include "osfunc.h" ++#include "devicemem.h" ++ ++#include "pvrsrv_tlcommon.h" ++#include "tlintern.h" ++ ++/* ++ * Make functions ++ */ ++PTL_STREAM_DESC ++TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3) ++{ ++ PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC)); ++ if (ps == NULL) ++ { ++ return NULL; ++ } ++ ps->psNode = f1; ++ ps->ui32Flags = f2; ++ ps->hReadEvent = f3; ++ ps->uiRefCount = 1; ++ ++ if (f2 & PVRSRV_STREAM_FLAG_READ_LIMIT) ++ { ++ ps->ui32ReadLimit = f1->psStream->ui32Write; ++ } ++ return ps; ++} ++ ++PTL_SNODE ++TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4) ++{ ++ PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE)); ++ if (ps == NULL) ++ { ++ return NULL; ++ } ++ ps->hReadEventObj = f2; ++ ps->psStream = f3; ++ ps->psRDesc = f4; ++ f3->psNode = ps; ++ return ps; ++} ++ ++/* ++ * Transport Layer Global top variables and functions ++ */ ++static TL_GLOBAL_DATA sTLGlobalData; ++ ++TL_GLOBAL_DATA *TLGGD(void) /* TLGetGlobalData() */ ++{ ++ return &sTLGlobalData; ++} ++ ++/* TLInit must only be called once at driver initialisation. ++ * An assert is provided to check this condition on debug builds. ++ */ ++PVRSRV_ERROR ++TLInit(void) ++{ ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL); ++ ++ /* Allocate a lock for TL global data, to be used while updating the TL data. ++ * This is for making TL global data multi-thread safe */ ++ eError = OSLockCreate(&sTLGlobalData.hTLGDLock); ++ PVR_GOTO_IF_ERROR(eError, e0); ++ ++ /* Allocate the event object used to signal global TL events such as ++ * a new stream created */ ++ eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ ++ PVR_DPF_RETURN_OK; ++ ++/* Don't allow the driver to start up on error */ ++e1: ++ OSLockDestroy (sTLGlobalData.hTLGDLock); ++ sTLGlobalData.hTLGDLock = NULL; ++e0: ++ PVR_DPF_RETURN_RC (eError); ++} ++ ++static void RemoveAndFreeStreamNode(PTL_SNODE psRemove) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE* last; ++ PTL_SNODE psn; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ /* Unlink the stream node from the master list */ ++ PVR_ASSERT(psGD->psHead); ++ last = &psGD->psHead; ++ for (psn = psGD->psHead; psn; psn=psn->psNext) ++ { ++ if (psn == psRemove) ++ { ++ /* Other calling code may have freed and zeroed the pointers */ ++ if (psn->psRDesc) ++ { ++ OSFreeMem(psn->psRDesc); ++ psn->psRDesc = NULL; ++ } ++ if (psn->psStream) ++ { ++ OSFreeMem(psn->psStream); ++ psn->psStream = NULL; ++ } ++ *last = psn->psNext; ++ break; ++ } ++ last = &psn->psNext; ++ } ++ ++ /* Release the event list object owned by the stream node */ ++ if (psRemove->hReadEventObj) ++ { ++ eError = OSEventObjectDestroy(psRemove->hReadEventObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ ++ psRemove->hReadEventObj = NULL; ++ } ++ ++ /* Release the memory of the stream node */ ++ OSFreeMem(psRemove); ++ ++ PVR_DPF_RETURN; ++} ++ ++static void FreeGlobalData(void) ++{ ++ PTL_SNODE psCurrent = sTLGlobalData.psHead; ++ PTL_SNODE psNext; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ /* Clean up the SNODE list */ ++ if (psCurrent) ++ { ++ while (psCurrent) ++ { ++ psNext = psCurrent->psNext; ++ ++ /* Other calling code may have freed and zeroed the pointers */ ++ if (psCurrent->psRDesc) ++ { ++ OSFreeMem(psCurrent->psRDesc); ++ psCurrent->psRDesc = NULL; ++ } ++ if (psCurrent->psStream) ++ { ++ OSFreeMem(psCurrent->psStream); ++ psCurrent->psStream = NULL; ++ } ++ ++ /* Release the event list object owned by the stream node */ ++ if (psCurrent->hReadEventObj) ++ { ++ eError = OSEventObjectDestroy(psCurrent->hReadEventObj); ++ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy"); ++ ++ psCurrent->hReadEventObj = NULL; ++ } ++ ++ OSFreeMem(psCurrent); ++ psCurrent = psNext; ++ } ++ ++ sTLGlobalData.psHead = NULL; ++ } ++ ++ PVR_DPF_RETURN; ++} ++ ++void ++TLDeInit(void) ++{ ++ PVR_DPF_ENTERED; ++ ++ if (sTLGlobalData.uiClientCnt) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt)); ++ sTLGlobalData.uiClientCnt = 0; ++ } ++ ++ FreeGlobalData(); ++ ++ /* Clean up the TL global event object */ ++ if (sTLGlobalData.hTLEventObj) ++ { ++ OSEventObjectDestroy(sTLGlobalData.hTLEventObj); ++ sTLGlobalData.hTLEventObj = NULL; ++ } ++ ++ /* Destroy the TL global data lock */ ++ if (sTLGlobalData.hTLGDLock) ++ { ++ OSLockDestroy (sTLGlobalData.hTLGDLock); ++ sTLGlobalData.hTLGDLock = NULL; ++ } ++ ++ PVR_DPF_RETURN; ++} ++ ++void TLAddStreamNode(PTL_SNODE psAdd) ++{ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psAdd); ++ psAdd->psNext = TLGGD()->psHead; ++ TLGGD()->psHead = psAdd; ++ ++ PVR_DPF_RETURN; ++} ++ ++PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE psn; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(pszName); ++ ++ for (psn = psGD->psHead; psn; psn=psn->psNext) ++ { ++ if (psn->psStream && OSStringNCompare(psn->psStream->szName, pszName, PRVSRVTL_MAX_STREAM_NAME_SIZE)==0) ++ { ++ PVR_DPF_RETURN_VAL(psn); ++ } ++ } ++ ++ PVR_DPF_RETURN_VAL(NULL); ++} ++ ++PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE psn; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psDesc); ++ ++ for (psn = psGD->psHead; psn; psn=psn->psNext) ++ { ++ if (psn->psRDesc == psDesc || psn->psWDesc == psDesc) ++ { ++ PVR_DPF_RETURN_VAL(psn); ++ } ++ } ++ PVR_DPF_RETURN_VAL(NULL); ++} ++ ++static inline IMG_BOOL IsDigit(IMG_CHAR c) ++{ ++ return c >= '0' && c <= '9'; ++} ++ ++static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer, ++ IMG_UINT32 *pui32Number) ++{ ++ IMG_CHAR acTmp[11] = {0}; /* max 10 digits */ ++ IMG_UINT32 ui32Result; ++ IMG_UINT i; ++ ++ for (i = 0; i < sizeof(acTmp) - 1; i++) ++ { ++ if (!IsDigit(*pszBuffer)) ++ break; ++ acTmp[i] = *pszBuffer++; ++ } ++ ++ /* if there are no digits or there is something after the number */ ++ if (i == 0 || *pszBuffer != '\0') ++ return IMG_FALSE; ++ ++ if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK) ++ return IMG_FALSE; ++ ++ *pui32Number = ui32Result; ++ ++ return IMG_TRUE; ++} ++ ++IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, ++ IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], ++ IMG_UINT32 ui32Max) ++{ ++ TL_GLOBAL_DATA *psGD = TLGGD(); ++ PTL_SNODE psn; ++ IMG_UINT32 ui32Count = 0; ++ size_t uiLen; ++ ++ PVR_ASSERT(pszNamePattern); ++ ++ if ((uiLen = OSStringLength(pszNamePattern)) == 0) ++ return 0; ++ ++ for (psn = psGD->psHead; psn; psn = psn->psNext) ++ { ++ if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0) ++ continue; ++ ++ /* If aaszStreams is NULL we only count how many string match ++ * the given pattern. If it's a valid pointer we also return ++ * the names. */ ++ if (aaszStreams != NULL) ++ { ++ if (ui32Count >= ui32Max) ++ break; ++ ++ /* all of names are shorter than MAX and null terminated */ ++ OSStringLCopy(aaszStreams[ui32Count], psn->psStream->szName, ++ PRVSRVTL_MAX_STREAM_NAME_SIZE); ++ } ++ ++ ui32Count++; ++ } ++ ++ return ui32Count; ++} ++ ++PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc) ++{ ++ PTL_SNODE psn; ++ ++ PVR_DPF_ENTERED; ++ ++ psn = TLFindStreamNodeByDesc(psDesc); ++ if (psn == NULL) ++ PVR_DPF_RETURN_VAL(NULL); ++ ++ PVR_ASSERT(psDesc == psn->psWDesc); ++ ++ psn->uiWRefCount++; ++ psDesc->uiRefCount++; ++ ++ PVR_DPF_RETURN_VAL(psn); ++} ++ ++void TLReturnStreamNode(PTL_SNODE psNode) ++{ ++ psNode->uiWRefCount--; ++ psNode->psWDesc->uiRefCount--; ++ ++ PVR_ASSERT(psNode->uiWRefCount > 0); ++ PVR_ASSERT(psNode->psWDesc->uiRefCount > 0); ++} ++ ++IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove) ++{ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psRemove); ++ ++ /* If there is a client connected to this stream, defer stream's deletion */ ++ if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL) ++ { ++ PVR_DPF_RETURN_VAL(IMG_FALSE); ++ } ++ ++ /* Remove stream from TL_GLOBAL_DATA's list and free stream node */ ++ psRemove->psStream = NULL; ++ RemoveAndFreeStreamNode(psRemove); ++ ++ PVR_DPF_RETURN_VAL(IMG_TRUE); ++} ++ ++IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove, ++ PTL_STREAM_DESC psSD) ++{ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psNodeToRemove); ++ PVR_ASSERT(psSD); ++ ++ /* Decrement reference count. For descriptor obtained by reader it must ++ * reach 0 (only single reader allowed) and for descriptors obtained by ++ * writers it must reach value greater or equal to 0 (multiple writers ++ * model). */ ++ psSD->uiRefCount--; ++ ++ if (psSD == psNodeToRemove->psRDesc) ++ { ++ PVR_ASSERT(0 == psSD->uiRefCount); ++ /* Remove stream descriptor (i.e. stream reader context) */ ++ psNodeToRemove->psRDesc = NULL; ++ } ++ else if (psSD == psNodeToRemove->psWDesc) ++ { ++ PVR_ASSERT(0 <= psSD->uiRefCount); ++ ++ psNodeToRemove->uiWRefCount--; ++ ++ /* Remove stream descriptor if reference == 0 */ ++ if (0 == psSD->uiRefCount) ++ { ++ psNodeToRemove->psWDesc = NULL; ++ } ++ } ++ ++ /* Do not Free Stream Node if there is a write reference (a producer ++ * context) to the stream */ ++ if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc || ++ 0 != psNodeToRemove->uiWRefCount) ++ { ++ PVR_DPF_RETURN_VAL(IMG_FALSE); ++ } ++ ++ /* Make stream pointer NULL to prevent it from being destroyed in ++ * RemoveAndFreeStreamNode. Cleanup of stream should be done by the ++ * calling context */ ++ psNodeToRemove->psStream = NULL; ++ RemoveAndFreeStreamNode(psNodeToRemove); ++ ++ PVR_DPF_RETURN_VAL(IMG_TRUE); ++} +diff --git a/drivers/gpu/drm/img-rogue/tlintern.h b/drivers/gpu/drm/img-rogue/tlintern.h +new file mode 100644 +index 000000000000..c3edce6b8cd1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlintern.h +@@ -0,0 +1,345 @@ ++/*************************************************************************/ /*! ++@File ++@Title Transport Layer internals ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport Layer header used by TL internally ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef TLINTERN_H ++#define TLINTERN_H ++ ++ ++#include "devicemem_typedefs.h" ++#include "pvrsrv_tlcommon.h" ++#include "lock.h" ++#include "tlstream.h" ++ ++/* Forward declarations */ ++typedef struct _TL_SNODE_* PTL_SNODE; ++ ++/* To debug buffer utilisation enable this macro here and define ++ * PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c ++ * before the inclusion of pvr_debug.h. ++ * Issue pvrtutils 6 on target to see stream buffer utilisation. */ ++//#define TL_BUFFER_STATS 1 ++ ++/*! TL stream structure container. ++ * pbyBuffer holds the circular buffer. ++ * ui32Read points to the beginning of the buffer, ie to where data to ++ * Read begin. ++ * ui32Write points to the end of data that have been committed, ie this is ++ * where new data will be written. ++ * ui32Pending number of bytes reserved in last reserve call which have not ++ * yet been submitted. Therefore these data are not ready to ++ * be transported. ++ * hStreamWLock - provides atomic protection for the ui32Pending & ui32Write ++ * members of the structure for when they are checked and/or ++ * updated in the context of a stream writer (producer) ++ * calling DoTLStreamReserve() & TLStreamCommit(). ++ * - Reader context is not multi-threaded, only one client per ++ * stream is allowed. Also note the read context may be in an ++ * ISR which prevents a design where locks can be held in the ++ * AcquireData/ReleaseData() calls. Thus this lock only ++ * protects the stream members from simultaneous writers. ++ * ++ * ui32Read < ui32Write <= ui32Pending ++ * where < and <= operators are overloaded to make sense in a circular way. ++ */ ++typedef struct _TL_STREAM_ ++{ ++ IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; /*!< String name identifier */ ++ TL_OPMODE eOpMode; /*!< Mode of Operation of TL Buffer */ ++ ++ IMG_BOOL bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non-empty stream block until ++ * stream is drained. */ ++ IMG_BOOL bNoSignalOnCommit; /*!< Flag: Used to avoid the TL signalling waiting consumers ++ * that new data is available on every commit. Producers ++ * using this flag will need to manually signal when ++ * appropriate using the TLStreamSync() API */ ++ ++ void (*pfOnReaderOpenCallback)(void *pvArg); /*!< Optional on reader connect callback */ ++ void *pvOnReaderOpenUserData; /*!< On reader connect user data */ ++ void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */ ++ void *pvProducerUserData; /*!< Producer callback user data */ ++ ++ struct _TL_STREAM_ *psNotifStream; /*!< Pointer to the stream to which notification will be sent */ ++ ++ volatile IMG_UINT32 ui32Read; /*!< Pointer to the beginning of available data */ ++ volatile IMG_UINT32 ui32Write; /*!< Pointer to already committed data which are ready to be ++ * copied to user space */ ++ IMG_UINT32 ui32Pending; /*!< Count pending bytes reserved in buffer */ ++ IMG_UINT32 ui32Size; /*!< Buffer size */ ++ IMG_UINT32 ui32ThresholdUsageForSignal; /*!< Buffer usage threshold at which a TL writer signals a blocked/ ++ * waiting reader when transitioning from empty->non-empty */ ++ IMG_UINT32 ui32MaxPacketSize; /*! Max TL packet size */ ++ IMG_BYTE *pbyBuffer; /*!< Actual data buffer */ ++ ++ PTL_SNODE psNode; /*!< Ptr to parent stream node */ ++ DEVMEM_MEMDESC *psStreamMemDesc; /*!< MemDescriptor used to allocate buffer space through PMR */ ++ ++ IMG_HANDLE hProducerEvent; /*!< Handle to wait on if there is not enough space */ ++ IMG_HANDLE hProducerEventObj; /*!< Handle to signal blocked reserve calls */ ++ IMG_BOOL bSignalPending; /*!< Tracks if a "signal" is pending to be sent to a blocked/ ++ * waiting reader */ ++ ++ POS_LOCK hStreamWLock; /*!< Writers Lock for ui32Pending & ui32Write*/ ++ POS_LOCK hReadLock; /*!< Readers Lock for bReadPending & ui32Read*/ ++ IMG_BOOL bReadPending; /*!< Tracks if a read operation is pending or not*/ ++ IMG_BOOL bNoWrapPermanent; /*!< Flag: Prevents buffer wrap and subsequent data loss ++ * as well as resetting the read position on close. */ ++ ++#if defined(TL_BUFFER_STATS) ++ IMG_UINT32 ui32CntReadFails; /*!< Tracks how many times reader failed to acquire read lock */ ++ IMG_UINT32 ui32CntReadSuccesses; /*!< Tracks how many times reader acquires read lock successfully */ ++ IMG_UINT32 ui32CntWriteSuccesses; /*!< Tracks how many times writer acquires read lock successfully */ ++ IMG_UINT32 ui32CntWriteWaits; /*!< Tracks how many times writer had to wait to acquire read lock */ ++ IMG_UINT32 ui32CntNumWriteSuccess; /*!< Tracks how many write operations were successful*/ ++ IMG_UINT32 ui32BufferUt; /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */ ++ IMG_UINT32 ui32MaxReserveWatermark; /*!< Max stream reserve size that was ever requested by a writer */ ++ IMG_UINT32 ui32SignalsSent; /*!< Number of signals that were actually sent by the write API */ ++ ATOMIC_T bNoReaderSinceFirstReserve; /*!< Tracks if a read has been done since the buffer was last found empty */ ++ IMG_UINT32 ui32TimeStart; /*!< Time at which a write (Reserve call) was done into an empty buffer. ++ * Guarded by hStreamWLock. */ ++ IMG_UINT32 ui32MinTimeToFullInUs; /*!< Minimum time taken to (nearly) fully fill an empty buffer. Guarded ++ * by hStreamWLock. */ ++ /* Behaviour counters, protected by hStreamLock in case of ++ * multi-threaded access */ ++ IMG_UINT32 ui32NumCommits; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32SignalNotSent; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32ManSyncs; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32ProducerByteCount; /*!< Counters used to analysing stream performance, see ++ loc */ ++ ++ /* Not protected by the lock, inc in the reader thread which is currently singular */ ++ IMG_UINT32 ui32AcquireRead1; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32AcquireRead2; /*!< Counters used to analysing stream performance, see ++ loc */ ++#endif ++ ++} TL_STREAM, *PTL_STREAM; ++ ++/* there need to be enough space reserved in the buffer for 2 minimal packets ++ * and it needs to be aligned the same way the buffer is or there will be a ++ * compile error.*/ ++#define BUFFER_RESERVED_SPACE (2 * PVRSRVTL_PACKET_ALIGNMENT) ++ ++/* ensure the space reserved follows the buffer's alignment */ ++static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)), ++ "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT"); ++ ++/* Define the largest value that a uint that matches the ++ * PVRSRVTL_PACKET_ALIGNMENT size can hold */ ++#define MAX_UINT 0xffffFFFF ++ ++/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is ++ * outstanding on the stream. */ ++#define NOTHING_PENDING IMG_UINT32_MAX ++ ++ ++/* ++ * Transport Layer Stream Descriptor types/defs ++ */ ++typedef struct _TL_STREAM_DESC_ ++{ ++ PTL_SNODE psNode; /*!< Ptr to parent stream node */ ++ IMG_UINT32 ui32Flags; /*!< Flags supplied by client on stream open */ ++ IMG_HANDLE hReadEvent; /*!< For wait call (only used/set in reader descriptors) */ ++ IMG_INT uiRefCount; /*!< Reference count to the SD */ ++ ++#if defined(TL_BUFFER_STATS) ++ /* Behaviour counters, no multi-threading protection need as they are ++ * incremented in a single thread due to only supporting one reader ++ * at present */ ++ IMG_UINT32 ui32AcquireCount; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32NoData; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32NoDataSleep; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32Signalled; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32TimeoutEmpty; /*!< Counters used to analysing stream performance, see ++ loc */ ++ IMG_UINT32 ui32TimeoutData; /*!< Counters used to analysing stream performance, see ++ loc */ ++#endif ++ IMG_UINT32 ui32ReadLimit; /*!< Limit buffer reads to data present in the ++ buffer at the time of stream open. */ ++ IMG_UINT32 ui32ReadLen; /*!< Size of data returned by initial Acquire */ ++} TL_STREAM_DESC, *PTL_STREAM_DESC; ++ ++PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3); ++ ++#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000 ++#define TL_STREAM_FLAG_TEST 0x10000000 ++#define TL_STREAM_FLAG_WRAPREAD 0x00010000 ++ ++#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF ++ ++#if defined(TL_BUFFER_STATS) ++# define TL_COUNTER_INC(a) ((a)++) ++# define TL_COUNTER_ADD(a,b) ((a) += (b)) ++#else ++# define TL_COUNTER_INC(a) (void)(0) ++# define TL_COUNTER_ADD(a,b) (void)(0) ++#endif ++/* ++ * Transport Layer stream list node ++ */ ++typedef struct _TL_SNODE_ ++{ ++ struct _TL_SNODE_* psNext; /*!< Linked list next element */ ++ IMG_HANDLE hReadEventObj; /*!< Readers 'wait for data' event */ ++ PTL_STREAM psStream; /*!< TL Stream object */ ++ IMG_INT uiWRefCount; /*!< Stream writer reference count */ ++ PTL_STREAM_DESC psRDesc; /*!< Stream reader 0 or ptr only */ ++ PTL_STREAM_DESC psWDesc; /*!< Stream writer 0 or ptr only */ ++} TL_SNODE; ++ ++PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4); ++ ++/* ++ * Transport Layer global top types and variables ++ * Use access function to obtain pointer. ++ * ++ * hTLGDLock - provides atomicity over read/check/write operations and ++ * sequence of operations on uiClientCnt, psHead list of SNODEs and ++ * the immediate members in a list element SNODE structure. ++ * - This larger scope of responsibility for this lock helps avoid ++ * the need for a lock in the SNODE structure. ++ * - Lock held in the client (reader) context when streams are ++ * opened/closed and in the server (writer) context when streams ++ * are created/open/closed. ++ */ ++typedef struct _TL_GDATA_ ++{ ++ IMG_HANDLE hTLEventObj; /* Global TL signal object, new streams, etc */ ++ ++ IMG_UINT uiClientCnt; /* Counter to track the number of client stream connections. */ ++ PTL_SNODE psHead; /* List of TL streams and associated client handle */ ++ ++ POS_LOCK hTLGDLock; /* Lock for structure AND psHead SNODE list */ ++} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA; ++ ++/* ++ * Transport Layer Internal Kernel-Mode Server API ++ */ ++TL_GLOBAL_DATA* TLGGD(void); /* TLGetGlobalData() */ ++ ++PVRSRV_ERROR TLInit(void); ++void TLDeInit(void); ++ ++void TLAddStreamNode(PTL_SNODE psAdd); ++PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName); ++PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc); ++IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern, ++ IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE], ++ IMG_UINT32 ui32Max); ++PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc); ++void TLReturnStreamNode(PTL_SNODE psNode); ++ ++/****************************************************************************** ++ Function Name : TLTryRemoveStreamAndFreeStreamNode ++ ++ Inputs : PTL_SNODE Pointer to the TL_SNODE whose stream is requested ++ to be removed from TL_GLOBAL_DATA's list ++ ++ Return Value : IMG_TRUE - If the stream was made NULL and this ++ TL_SNODE was removed from the ++ TL_GLOBAL_DATA's list ++ ++ IMG_FALSE - If the stream wasn't made NULL as there ++ is a client connected to this stream ++ ++ Description : If there is no client currently connected to this stream then, ++ This function removes this TL_SNODE from the ++ TL_GLOBAL_DATA's list. The caller is responsible for the ++ cleanup of the TL_STREAM whose TL_SNODE may be removed ++ ++ Otherwise, this function does nothing ++******************************************************************************/ ++IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove); ++ ++/****************************************************************************** ++ Function Name : TLUnrefDescAndTryFreeStreamNode ++ ++ Inputs : PTL_SNODE Pointer to the TL_SNODE whose descriptor is ++ requested to be removed ++ : PTL_STREAM_DESC Pointer to the STREAM_DESC ++ ++ Return Value : IMG_TRUE - If this TL_SNODE was removed from the ++ TL_GLOBAL_DATA's list ++ ++ IMG_FALSE - Otherwise ++ ++ Description : This function removes the stream descriptor from this TL_SNODE ++ and, if there is no writer (producer context) currently bound to this ++ stream, this function removes this TL_SNODE from the TL_GLOBAL_DATA's ++ list. The caller is responsible for the cleanup of the TL_STREAM ++ whose TL_SNODE may be removed ++******************************************************************************/ ++IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD); ++ ++/* ++ * Transport Layer stream interface to server part declared here to avoid ++ * circular dependency. ++ */ ++IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream, ++ IMG_BOOL bDisableCallback, ++ IMG_UINT32* puiReadOffset); ++PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream, ++ IMG_UINT32 uiReadLen, ++ IMG_UINT32 uiOrigReadLen); ++void TLStreamResetReadPos(PTL_STREAM psStream); ++ ++DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream); ++IMG_BOOL TLStreamOutOfData(IMG_HANDLE psStream); ++ ++/****************************************************************************** ++ Function Name : TLStreamDestroy ++ ++ Inputs : PTL_STREAM Pointer to the TL_STREAM to be destroyed ++ ++ Description : This function performs all the clean-up operations required for ++ destruction of this stream ++******************************************************************************/ ++void TLStreamDestroy(PTL_STREAM psStream); ++ ++/* ++ * Test related functions ++ */ ++PVRSRV_ERROR TUtilsInit(PVRSRV_DEVICE_NODE *psDeviceNode); ++PVRSRV_ERROR TUtilsDeinit(PVRSRV_DEVICE_NODE *psDeviceNode); ++ ++ ++#endif /* TLINTERN_H */ ++/****************************************************************************** ++ End of file (tlintern.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/tlserver.c b/drivers/gpu/drm/img-rogue/tlserver.c +new file mode 100644 +index 000000000000..c250dd3dc618 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlserver.c +@@ -0,0 +1,747 @@ ++/*************************************************************************/ /*! ++@File ++@Title KM server Transport Layer implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Main bridge APIs for Transport Layer client functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "img_defs.h" ++ ++/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/ ++#undef PVR_DPF_FUNCTION_TRACE_ON ++#include "pvr_debug.h" ++ ++#include "connection_server.h" ++#include "allocmem.h" ++#include "devicemem.h" ++ ++#include "tlintern.h" ++#include "tlstream.h" ++#include "tlserver.h" ++ ++#include "pvrsrv_tlstreams.h" ++#define NO_STREAM_WAIT_PERIOD_US 2000000ULL ++#define NO_DATA_WAIT_PERIOD_US 500000ULL ++#define NO_ACQUIRE 0xffffffffU ++ ++ ++/* ++ * Transport Layer Client API Kernel-Mode bridge implementation ++ */ ++PVRSRV_ERROR ++TLServerOpenStreamKM(const IMG_CHAR* pszName, ++ IMG_UINT32 ui32Mode, ++ PTL_STREAM_DESC* ppsSD, ++ PMR** ppsTLPMR) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_ERROR eErrorEO = PVRSRV_OK; ++ PTL_SNODE psNode; ++ PTL_STREAM psStream; ++ TL_STREAM_DESC *psNewSD = NULL; ++ IMG_HANDLE hEvent; ++ IMG_BOOL bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? ++ IMG_TRUE : IMG_FALSE; ++ IMG_BOOL bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ? ++ IMG_TRUE : IMG_FALSE; ++ IMG_BOOL bNoOpenCB = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ? ++ IMG_TRUE : IMG_FALSE; ++ PTL_GLOBAL_DATA psGD = TLGGD(); ++ ++#if defined(PVR_DPF_FUNCTION_TRACE_ON) ++ PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode)); ++#endif ++ ++ PVR_ASSERT(pszName); ++ ++ /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName ++ * returns NON NULL PTL_SNODE, we try updating the global data client count and ++ * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has ++ * not been deleted) while we are updating it ++ */ ++ OSLockAcquire (psGD->hTLGDLock); ++ ++ psNode = TLFindStreamNodeByName(pszName); ++ if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT)) ++ { /* Blocking code to wait for stream to be created if it does not exist */ ++ eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent); ++ PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectOpen", e0); ++ ++ do ++ { ++ if ((psNode = TLFindStreamNodeByName(pszName)) == NULL) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName)); ++ ++ /* Release TL_GLOBAL_DATA lock before sleeping */ ++ OSLockRelease (psGD->hTLGDLock); ++ ++ /* Will exit OK or with timeout, both cases safe to ignore */ ++ eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD_US); ++ ++ /* Acquire lock after waking up */ ++ OSLockAcquire (psGD->hTLGDLock); ++ } ++ } ++ while ((psNode == NULL) && (eErrorEO == PVRSRV_OK)); ++ ++ eError = OSEventObjectClose(hEvent); ++ PVR_LOG_GOTO_IF_ERROR (eError, "OSEventObjectClose", e0); ++ } ++ ++ /* Make sure we have found a stream node after wait/search */ ++ if (psNode == NULL) ++ { ++ /* Did we exit the wait with timeout, inform caller */ ++ if (eErrorEO == PVRSRV_ERROR_TIMEOUT) ++ { ++ eError = eErrorEO; ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_NOT_FOUND; ++ PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName)); ++ } ++ goto e0; ++ } ++ ++ psStream = psNode->psStream; ++ ++ /* Allocate memory for the stream. The memory will be allocated with the ++ * first call. */ ++ eError = TLAllocSharedMemIfNull(psStream); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream" ++ " \"%s\"", pszName)); ++ goto e0; ++ } ++ ++ if (bIsWriteOnly) ++ { ++ ++ /* If psWDesc == NULL it means that this is the first attempt ++ * to open stream for write. If yes create the descriptor or increment ++ * reference count otherwise. */ ++ if (psNode->psWDesc == NULL) ++ { ++ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL); ++ psNode->psWDesc = psNewSD; ++ } ++ else ++ { ++ psNewSD = psNode->psWDesc; ++ psNode->psWDesc->uiRefCount++; ++ } ++ ++ PVR_LOG_GOTO_IF_NOMEM(psNewSD, eError, e0); ++ ++ psNode->uiWRefCount++; ++ } ++ else ++ { ++ /* Only one reader per stream supported */ ++ if (psNode->psRDesc != NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already" ++ " opened", pszName)); ++ eError = PVRSRV_ERROR_ALREADY_OPEN; ++ goto e0; ++ } ++ ++ /* Create an event handle for this client to wait on when no data in ++ * stream buffer. */ ++ eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_LOG_ERROR(eError, "OSEventObjectOpen"); ++ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT; ++ goto e0; ++ } ++ ++ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent); ++ psNode->psRDesc = psNewSD; ++ ++ if (!psNewSD) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor")); ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e1; ++ } ++ ++ PVR_DPF((PVR_DBG_VERBOSE, ++ "TLServerOpenStreamKM evList=%p, evObj=%p", ++ psNode->hReadEventObj, ++ psNode->psRDesc->hReadEvent)); ++ } ++ ++ /* Copy the import handle back to the user mode API to enable access to ++ * the stream buffer from user-mode process. */ ++ eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream), ++ (void**) ppsTLPMR); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2); ++ ++ psGD->uiClientCnt++; ++ ++ /* Global data updated. Now release global lock */ ++ OSLockRelease (psGD->hTLGDLock); ++ ++ *ppsSD = psNewSD; ++ ++ if (bResetOnOpen) ++ { ++ TLStreamReset(psStream); ++ } ++ ++ /* This callback is executed only on reader open. There are some actions ++ * executed on reader open that don't make much sense for writers e.g. ++ * injection on time synchronisation packet into the stream. */ ++ if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB) ++ { ++ psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData); ++ } ++ ++ /* psNode->uiWRefCount is set to '1' on stream create so the first open ++ * is '2'. */ ++ if (bIsWriteOnly && psStream->psNotifStream != NULL && ++ psNode->uiWRefCount == 2) ++ { ++ TLStreamMarkStreamOpen(psStream); ++ } ++ ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName, ++ ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read")); ++ ++ PVR_DPF_RETURN_OK; ++ ++e2: ++ OSFreeMem(psNewSD); ++e1: ++ if (!bIsWriteOnly) ++ OSEventObjectClose(hEvent); ++e0: ++ OSLockRelease (psGD->hTLGDLock); ++ PVR_DPF_RETURN_RC (eError); ++} ++ ++PVRSRV_ERROR ++TLServerCloseStreamKM(PTL_STREAM_DESC psSD) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PTL_GLOBAL_DATA psGD = TLGGD(); ++ PTL_SNODE psNode; ++ PTL_STREAM psStream; ++ IMG_BOOL bDestroyStream; ++ IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ? ++ IMG_TRUE : IMG_FALSE; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psSD); ++ ++ /* Quick exit if there are no streams */ ++ if (psGD->psHead == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ /* Check stream still valid */ ++ psNode = TLFindStreamNodeByDesc(psSD); ++ if ((psNode == NULL) || (psNode != psSD->psNode)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ /* Since the descriptor is valid, the stream should not have been made NULL */ ++ PVR_ASSERT (psNode->psStream); ++ ++ /* Save the stream's reference in-case its destruction is required after this ++ * client is removed */ ++ psStream = psNode->psStream; ++ ++ /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode ++ * call will update the TL_SNODE's descriptor value */ ++ OSLockAcquire (psGD->hTLGDLock); ++ ++ /* Close event handle because event object list might be destroyed in ++ * TLUnrefDescAndTryFreeStreamNode(). */ ++ if (!bIsWriteOnly) ++ { ++ /* Reset the read position on close if the stream requires it. */ ++ TLStreamResetReadPos(psStream); ++ ++ /* Close and free the event handle resource used by this descriptor */ ++ eError = OSEventObjectClose(psSD->hReadEvent); ++ if (eError != PVRSRV_OK) ++ { ++ /* Log error but continue as it seems best */ ++ PVR_LOG_ERROR(eError, "OSEventObjectClose"); ++ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT; ++ } ++ } ++ else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL) ++ { ++ /* psNode->uiWRefCount is set to '1' on stream create so the last close ++ * before destruction is '2'. */ ++ TLStreamMarkStreamClose(psStream); ++ } ++ ++ /* Remove descriptor from stream object/list */ ++ bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD); ++ ++ /* Check the counter is sensible after input data validated. */ ++ PVR_ASSERT(psGD->uiClientCnt > 0); ++ psGD->uiClientCnt--; ++ ++ OSLockRelease (psGD->hTLGDLock); ++ ++ /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */ ++ if (bDestroyStream) ++ { ++ TLStreamDestroy (psStream); ++ psStream = NULL; ++ } ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__)); ++ ++ /* Free the descriptor if ref count reaches 0. */ ++ if (psSD->uiRefCount == 0) ++ { ++ /* Free the stream descriptor object */ ++ OSFreeMem(psSD); ++ } ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++PVRSRV_ERROR ++TLServerReserveStreamKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32* ui32BufferOffset, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32* pui32Available) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE psNode; ++ IMG_UINT8* pui8Buffer = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psSD); ++ ++ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Quick exit if there are no streams */ ++ if (psGD->psHead == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); ++ } ++ ++ /* Acquire the global lock. We have to be sure that no one modifies ++ * the list while we are looking for our stream. */ ++ OSLockAcquire(psGD->hTLGDLock); ++ /* Check stream still valid */ ++ psNode = TLFindAndGetStreamNodeByDesc(psSD); ++ OSLockRelease(psGD->hTLGDLock); ++ ++ if ((psNode == NULL) || (psNode != psSD->psNode)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ ++ /* Since we have a valid stream descriptor, the stream should not have been ++ * made NULL by any producer context. */ ++ PVR_ASSERT (psNode->psStream); ++ ++ /* The TL writers that currently land here are at a very low to none risk ++ * to breach max TL packet size constraint (even if there is no reader ++ * connected to the TL stream and hence eventually will cause the TL stream ++ * to be full). Hence no need to know the status of TL stream reader ++ * connection. ++ */ ++ eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size, ++ ui32SizeMin, pui32Available, NULL); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", ++ ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError))); ++ } ++ else if (pui8Buffer == NULL) ++ { ++ PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream.")); ++ eError = PVRSRV_ERROR_STREAM_FULL; ++ } ++ else ++ { ++ *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer; ++ PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size); ++ } ++ ++ OSLockAcquire(psGD->hTLGDLock); ++ TLReturnStreamNode(psNode); ++ OSLockRelease(psGD->hTLGDLock); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++PVRSRV_ERROR ++TLServerCommitStreamKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32 ui32Size) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE psNode; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psSD); ++ ++ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Quick exit if there are no streams */ ++ if (psGD->psHead == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); ++ } ++ ++ /* Acquire the global lock. We have to be sure that no one modifies ++ * the list while we are looking for our stream. */ ++ OSLockAcquire(psGD->hTLGDLock); ++ /* Check stream still valid */ ++ psNode = TLFindAndGetStreamNodeByDesc(psSD); ++ OSLockRelease(psGD->hTLGDLock); ++ ++ if ((psNode == NULL) || (psNode != psSD->psNode)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ /* Since we have a valid stream descriptor, the stream should not have been ++ * made NULL by any producer context. */ ++ PVR_ASSERT (psNode->psStream); ++ ++ eError = TLStreamCommit(psNode->psStream, ui32Size); ++ PVR_LOG_IF_ERROR(eError, "TLStreamCommit"); ++ ++ OSLockAcquire(psGD->hTLGDLock); ++ TLReturnStreamNode(psNode); ++ OSLockRelease(psGD->hTLGDLock); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++PVRSRV_ERROR ++TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, ++ IMG_UINT32 ui32Size, ++ IMG_CHAR *pszStreams, ++ IMG_UINT32 *pui32NumFound) ++{ ++ PTL_SNODE psNode = NULL; ++ IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] = ++ (IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) (void *)pszStreams; ++ ++ if (*pszNamePattern == '\0') ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0) ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ ++ /* Quick exit if there are no streams */ ++ if (TLGGD()->psHead == NULL) ++ { ++ *pui32NumFound = 0; ++ return PVRSRV_OK; ++ } ++ ++ OSLockAcquire(TLGGD()->hTLGDLock); ++ ++ *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams, ++ ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE); ++ ++ /* Find "tlctrl" stream and reset it */ ++ psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM); ++ if (psNode != NULL) ++ TLStreamReset(psNode->psStream); ++ ++ OSLockRelease(TLGGD()->hTLGDLock); ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++TLServerAcquireDataKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32* puiReadOffset, ++ IMG_UINT32* puiReadLen) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ IMG_UINT32 uiTmpOffset; ++ IMG_UINT32 uiTmpLen = 0; ++ PTL_SNODE psNode; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psSD); ++ ++ TL_COUNTER_INC(psSD->ui32AcquireCount); ++ ++ /* Quick exit if there are no streams */ ++ if (psGD->psHead == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); ++ } ++ ++ /* Check stream still valid */ ++ psNode = TLFindStreamNodeByDesc(psSD); ++ if ((psNode == NULL) || (psNode != psSD->psNode)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ /* If we are here, the stream will never be made NULL until this context itself ++ * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will ++ * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode) ++ * when a valid stream descriptor is present (i.e. a client is connected). ++ * Hence, no checks for stream being NON NULL are required after this. */ ++ PVR_ASSERT (psNode->psStream); ++ ++ psSD->ui32ReadLen = 0; /* Handle NULL read returns */ ++ ++ do ++ { ++ uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset); ++ ++ /* Check we have not already exceeded read limit with just offset ++ * regardless of data length to ensure the client sees the RC */ ++ if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) ++ { ++ /* Check to see if we are reading beyond the read limit */ ++ if (uiTmpOffset >= psSD->ui32ReadLimit) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_READLIMIT_REACHED); ++ } ++ } ++ ++ if (uiTmpLen > 0) ++ { /* Data found */ ++ ++ /* Check we have not already exceeded read limit offset+len */ ++ if (psSD->ui32Flags & PVRSRV_STREAM_FLAG_READ_LIMIT) ++ { ++ /* Adjust the read length if it goes beyond the read limit ++ * limit always guaranteed to be on packet */ ++ if ((uiTmpOffset + uiTmpLen) >= psSD->ui32ReadLimit) ++ { ++ uiTmpLen = psSD->ui32ReadLimit - uiTmpOffset; ++ } ++ } ++ ++ *puiReadOffset = uiTmpOffset; ++ *puiReadLen = uiTmpLen; ++ psSD->ui32ReadLen = uiTmpLen; /* Save the original data length in the stream desc */ ++ PVR_DPF_RETURN_OK; ++ } ++ else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) ++ { /* No data found blocking */ ++ ++ /* Instead of doing a complete sleep for `NO_DATA_WAIT_PERIOD_US` us, we sleep in chunks ++ * of 168 ms. In a "deferred" signal scenario from writer, this gives us a chance to ++ * wake-up (timeout) early and continue reading in-case some data is available */ ++ IMG_UINT64 ui64WaitInChunksUs = MIN(NO_DATA_WAIT_PERIOD_US, 168000ULL); ++ IMG_BOOL bDataFound = IMG_FALSE; ++ ++ TL_COUNTER_INC(psSD->ui32NoDataSleep); ++ ++ LOOP_UNTIL_TIMEOUT(NO_DATA_WAIT_PERIOD_US) ++ { ++ eError = OSEventObjectWaitTimeout(psSD->hReadEvent, ui64WaitInChunksUs); ++ if (eError == PVRSRV_OK) ++ { ++ bDataFound = IMG_TRUE; ++ TL_COUNTER_INC(psSD->ui32Signalled); ++ break; ++ } ++ else if (eError == PVRSRV_ERROR_TIMEOUT) ++ { ++ if (TLStreamOutOfData(psNode->psStream)) ++ { ++ /* Return on timeout if stream empty, else let while exit and return data */ ++ continue; ++ } ++ else ++ { ++ bDataFound = IMG_TRUE; ++ TL_COUNTER_INC(psSD->ui32TimeoutData); ++ PVR_DPF((PVR_DBG_MESSAGE, "%s: Data found at timeout. Current BuffUt = %u", ++ __func__, TLStreamGetUT(psNode->psStream))); ++ break; ++ } ++ } ++ else ++ { /* Some other system error with event objects */ ++ PVR_DPF_RETURN_RC(eError); ++ } ++ } END_LOOP_UNTIL_TIMEOUT(); ++ ++ if (bDataFound) ++ { ++ continue; ++ } ++ else ++ { ++ TL_COUNTER_INC(psSD->ui32TimeoutEmpty); ++ return PVRSRV_ERROR_TIMEOUT; ++ } ++ } ++ else ++ { /* No data non-blocking */ ++ TL_COUNTER_INC(psSD->ui32NoData); ++ ++ /* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE ++ * signifying there's no need of Release call */ ++ *puiReadOffset = NO_ACQUIRE; ++ *puiReadLen = 0; ++ PVR_DPF_RETURN_OK; ++ } ++ } ++ while (1); ++} ++ ++PVRSRV_ERROR ++TLServerReleaseDataKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32 uiReadOffset, ++ IMG_UINT32 uiReadLen) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE psNode; ++ ++ PVR_DPF_ENTERED; ++ ++ /* Unreferenced in release builds */ ++ PVR_UNREFERENCED_PARAMETER(uiReadOffset); ++ ++ PVR_ASSERT(psSD); ++ ++ /* Quick exit if there are no streams */ ++ if (psGD->psHead == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); ++ } ++ ++ if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Check stream still valid */ ++ psNode = TLFindStreamNodeByDesc(psSD); ++ if ((psNode == NULL) || (psNode != psSD->psNode)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ /* Since we have a valid stream descriptor, the stream should not have been ++ * made NULL by any producer context. */ ++ PVR_ASSERT (psNode->psStream); ++ ++ PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen)); ++ ++ /* Move read position on to free up space in stream buffer */ ++ PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen)); ++} ++ ++PVRSRV_ERROR ++TLServerWriteDataKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32 ui32Size, ++ IMG_BYTE* pui8Data) ++{ ++ TL_GLOBAL_DATA* psGD = TLGGD(); ++ PTL_SNODE psNode; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psSD); ++ ++ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Quick exit if there are no streams */ ++ if (psGD->psHead == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); ++ } ++ ++ OSLockAcquire(psGD->hTLGDLock); ++ /* Check stream still valid */ ++ psNode = TLFindAndGetStreamNodeByDesc(psSD); ++ OSLockRelease(psGD->hTLGDLock); ++ ++ if ((psNode == NULL) || (psNode != psSD->psNode)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND); ++ } ++ ++ /* Since we have a valid stream descriptor, the stream should not have been ++ * made NULL by any producer context. */ ++ PVR_ASSERT (psNode->psStream); ++ ++ eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size); ++ /* propagate error up but don't print anything here */ ++ ++ OSLockAcquire(psGD->hTLGDLock); ++ TLReturnStreamNode(psNode); ++ OSLockRelease(psGD->hTLGDLock); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++/****************************************************************************** ++ End of file (tlserver.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/tlserver.h b/drivers/gpu/drm/img-rogue/tlserver.h +new file mode 100644 +index 000000000000..7ac2958eac1e +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlserver.h +@@ -0,0 +1,97 @@ ++/*************************************************************************/ /*! ++@File ++@Title KM server Transport Layer implementation ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Main bridge APIs for Transport Layer client functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef TLSERVER_H ++#define TLSERVER_H ++ ++#include "img_defs.h" ++#include "pvr_debug.h" ++#include "connection_server.h" ++ ++#include "tlintern.h" ++ ++/* ++ * Transport Layer Client API Kernel-Mode bridge implementation ++ */ ++ ++PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection); ++PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection); ++ ++PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName, ++ IMG_UINT32 ui32Mode, ++ PTL_STREAM_DESC* ppsSD, ++ PMR** ppsTLPMR); ++ ++PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD); ++ ++PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern, ++ IMG_UINT32 ui32Max, ++ IMG_CHAR *pszStreams, ++ IMG_UINT32 *pui32NumFound); ++ ++PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32* ui32BufferOffset, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32* pui32Available); ++ ++PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32 ui32Size); ++ ++PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32* puiReadOffset, ++ IMG_UINT32* puiReadLen); ++ ++PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32 uiReadOffset, ++ IMG_UINT32 uiReadLen); ++ ++PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD, ++ IMG_UINT32 ui32Size, ++ IMG_BYTE *pui8Data); ++ ++#endif /* TLSERVER_H */ ++ ++/****************************************************************************** ++ End of file (tlserver.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/tlstream.c b/drivers/gpu/drm/img-rogue/tlstream.c +new file mode 100644 +index 000000000000..a80792e7bffe +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlstream.c +@@ -0,0 +1,1625 @@ ++/*************************************************************************/ /*! ++@File ++@Title Transport Layer kernel side API implementation. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Transport Layer API implementation. ++ These functions are provided to driver components. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++//#define PVR_DPF_FUNCTION_TRACE_ON 1 ++#undef PVR_DPF_FUNCTION_TRACE_ON ++#include "pvr_debug.h" ++ ++#include "allocmem.h" ++#include "devicemem.h" ++#include "pvrsrv_error.h" ++#include "osfunc.h" ++#include "log2.h" ++ ++#include "tlintern.h" ++#include "tlstream.h" ++ ++#include "pvrsrv.h" ++ ++#define EVENT_OBJECT_TIMEOUT_US 1000000ULL ++#define READ_PENDING_TIMEOUT_US 100000ULL ++ ++/*! Compute maximum TL packet size for this stream. Max packet size will be ++ * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation ++ * is required to avoid a corner case that was observed when TL buffer size is ++ * smaller than twice of TL max packet size and read, write index are positioned ++ * in such a way that the TL packet (write packet + padding packet) size is may ++ * be bigger than the buffer size itself. ++ */ ++#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) ) ++ ++/* Given the state of the buffer it returns a number of bytes that the client ++ * can use for a successful allocation. */ ++static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead, ++ IMG_UINT32 ui32LWrite, ++ IMG_UINT32 ui32CBSize, ++ IMG_UINT32 ui32ReqSizeMin, ++ IMG_UINT32 ui32MaxPacketSize) ++{ ++ IMG_UINT32 ui32AvSpace = 0; ++ ++ /* This could be written in fewer lines using the ? operator but it ++ would not be kind to potential readers of this source at all. */ ++ if (ui32LRead > ui32LWrite) /* Buffer WRAPPED */ ++ { ++ if ((ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) ++ { ++ ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; ++ } ++ } ++ else /* Normal, no wrap */ ++ { ++ if ((ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) ++ { ++ ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; ++ } ++ else if ((ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE)) ++ { ++ ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE; ++ } ++ } ++ /* The max size of a TL packet currently is UINT16. adjust accordingly */ ++ return MIN(ui32AvSpace, ui32MaxPacketSize); ++} ++ ++/* Returns bytes left in the buffer. Negative if there is not any. ++ * two 8b aligned values are reserved, one for the write failed buffer flag ++ * and one to be able to distinguish the buffer full state to the buffer ++ * empty state. ++ * Always returns free space -8 even when the "write failed" packet may be ++ * already in the stream before this write. */ ++static INLINE IMG_INT ++circbufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) ++{ ++ /* We need to reserve 8b (one packet) in the buffer to be able to tell empty ++ * buffers from full buffers and one more for packet write fail packet */ ++ if (ui32Read > ui32Write) ++ { ++ return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE; ++ } ++ else ++ { ++ return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE; ++ } ++} ++ ++IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psStream = (PTL_STREAM) hStream; ++ IMG_UINT32 ui32LRead = psStream->ui32Read, ui32LWrite = psStream->ui32Write; ++ ++ if (ui32LWrite >= ui32LRead) ++ { ++ return (ui32LWrite-ui32LRead); ++ } ++ else ++ { ++ return (psStream->ui32Size-ui32LRead+ui32LWrite); ++ } ++} ++ ++PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psStream = (PTL_STREAM) hStream; ++ PVRSRV_ERROR eError; ++ ++ /* CPU Local memory used as these buffers are not accessed by the device. ++ * CPU Uncached write combine memory used to improve write performance, ++ * memory barrier added in TLStreamCommit to ensure data written to memory ++ * before CB write point is updated before consumption by the reader. ++ */ ++ IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20]; ++ PVRSRV_MEMALLOCFLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | ++ PVRSRV_MEMALLOCFLAG_GPU_READABLE | ++ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED_WC | ++ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | ++ PVRSRV_MEMALLOCFLAG_PHYS_HEAP_HINT(CPU_LOCAL); /* TL for now is only used by host driver, so cpulocal mem suffices */ ++ ++ /* Exit if memory has already been allocated. */ ++ if (psStream->pbyBuffer != NULL) ++ return PVRSRV_OK; ++ ++ OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s", ++ psStream->szName); ++ ++ ++ /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster ++ * accesses to CPU local memory. When the framework to access CPU_LOCAL device ++ * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for ++ * TL buffers */ ++ eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode, ++ (IMG_DEVMEM_SIZE_T) psStream->ui32Size, ++ (IMG_DEVMEM_ALIGN_T) OSGetPageSize(), ++ ExactLog2(OSGetPageSize()), ++ uiMemFlags, ++ pszBufferLabel, ++ &psStream->psStreamMemDesc); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAllocateExportable", e0); ++ ++ eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc, ++ (void**) &psStream->pbyBuffer); ++ PVR_LOG_GOTO_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1); ++ ++ return PVRSRV_OK; ++ ++e1: ++ DevmemFree(psStream->psStreamMemDesc); ++e0: ++ return eError; ++} ++ ++void TLFreeSharedMem(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psStream = (PTL_STREAM) hStream; ++ ++ if (psStream->pbyBuffer != NULL) ++ { ++ DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc); ++ psStream->pbyBuffer = NULL; ++ } ++ if (psStream->psStreamMemDesc != NULL) ++ { ++ DevmemFree(psStream->psStreamMemDesc); ++ psStream->psStreamMemDesc = NULL; ++ } ++} ++ ++/* Special space left routine for TL_FLAG_PERMANENT_NO_WRAP streams */ ++static INLINE IMG_UINT ++bufSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size) ++{ ++ /* buffers from full buffers and one more for packet write fail packet */ ++ PVR_ASSERT(ui32Read<=ui32Write); ++ return ui32size - ui32Write; ++} ++ ++/******************************************************************************* ++ * TL Server public API implementation. ++ ******************************************************************************/ ++PVRSRV_ERROR ++TLStreamCreate(IMG_HANDLE *phStream, ++ const IMG_CHAR *szStreamName, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32StreamFlags, ++ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, ++ void *pvOnReaderOpenUD, ++ TL_STREAM_SOURCECB pfProducerCB, ++ void *pvProducerUD) ++{ ++ PTL_STREAM psTmp; ++ PVRSRV_ERROR eError; ++ IMG_HANDLE hEventList; ++ PTL_SNODE psn; ++ TL_OPMODE eOpMode; ++ ++ PVR_DPF_ENTERED; ++ /* Parameter checks: non NULL handler required */ ++ if (NULL == phStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ if (szStreamName == NULL || *szStreamName == '\0' || ++ OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ eOpMode = ui32StreamFlags & TL_OPMODE_MASK; ++ if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName() ++ * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */ ++ OSLockAcquire (TLGGD()->hTLGDLock); ++ ++ /* Check if there already exists a stream with this name. */ ++ psn = TLFindStreamNodeByName( szStreamName ); ++ if (NULL != psn) ++ { ++ eError = PVRSRV_ERROR_ALREADY_EXISTS; ++ goto e0; ++ } ++ ++ /* Allocate stream structure container (stream struct) for the new stream */ ++ psTmp = OSAllocZMem(sizeof(TL_STREAM)); ++ if (NULL == psTmp) ++ { ++ eError = PVRSRV_ERROR_OUT_OF_MEMORY; ++ goto e0; ++ } ++ ++ OSStringLCopy(psTmp->szName, szStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE); ++ ++ if (ui32StreamFlags & TL_FLAG_FORCE_FLUSH) ++ { ++ psTmp->bWaitForEmptyOnDestroy = IMG_TRUE; ++ } ++ ++ psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE; ++ psTmp->bNoWrapPermanent = (ui32StreamFlags&TL_FLAG_PERMANENT_NO_WRAP) ? IMG_TRUE : IMG_FALSE; ++ ++ psTmp->eOpMode = eOpMode; ++ if (psTmp->eOpMode == TL_OPMODE_BLOCK) ++ { ++ /* Only allow drop properties to be mixed with no-wrap type streams ++ * since space does not become available when reads take place hence ++ * no point blocking. ++ */ ++ if (psTmp->bNoWrapPermanent) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e1; ++ } ++ } ++ ++ /* Additional synchronisation object required for some streams e.g. blocking */ ++ eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj); ++ PVR_GOTO_IF_ERROR(eError, e1); ++ /* Create an event handle for this kind of stream */ ++ eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent); ++ PVR_GOTO_IF_ERROR(eError, e2); ++ ++ psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB; ++ psTmp->pvOnReaderOpenUserData = pvOnReaderOpenUD; ++ /* Remember producer supplied CB and data for later */ ++ psTmp->pfProducerCallback = (void(*)(void))pfProducerCB; ++ psTmp->pvProducerUserData = pvProducerUD; ++ ++ psTmp->psNotifStream = NULL; ++ ++ /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */ ++ psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size); ++ ++ /* Signalling from TLStreamCommit is deferred until buffer is slightly (~12%) filled */ ++ psTmp->ui32ThresholdUsageForSignal = psTmp->ui32Size >> 3; ++ psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size); ++ psTmp->ui32Read = 0; ++ psTmp->ui32Write = 0; ++ psTmp->ui32Pending = NOTHING_PENDING; ++ psTmp->bReadPending = IMG_FALSE; ++ psTmp->bSignalPending = IMG_FALSE; ++ ++#if defined(TL_BUFFER_STATS) ++ OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); ++ /* Setting MAX possible value for "minimum" time to full, ++ * helps in the logic which calculates this time */ ++ psTmp->ui32MinTimeToFullInUs = IMG_UINT32_MAX; ++#endif ++ ++ /* Memory will be allocated on first connect to the stream */ ++ if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN)) ++ { ++ /* Allocate memory for the circular buffer and export it to user space. */ ++ eError = TLAllocSharedMemIfNull(psTmp); ++ PVR_LOG_GOTO_IF_ERROR(eError, "TLAllocSharedMem", e3); ++ } ++ ++ /* Synchronisation object to synchronise with user side data transfers. */ ++ eError = OSEventObjectCreate(psTmp->szName, &hEventList); ++ PVR_GOTO_IF_ERROR(eError, e4); ++ ++ eError = OSLockCreate (&psTmp->hStreamWLock); ++ PVR_GOTO_IF_ERROR(eError, e5); ++ ++ eError = OSLockCreate (&psTmp->hReadLock); ++ PVR_GOTO_IF_ERROR(eError, e6); ++ ++ /* Now remember the stream in the global TL structures */ ++ psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL); ++ PVR_GOTO_IF_NOMEM(psn, eError, e7); ++ ++ /* Stream node created, now reset the write reference count to 1 ++ * (i.e. this context's reference) */ ++ psn->uiWRefCount = 1; ++ ++ TLAddStreamNode(psn); ++ ++ /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */ ++ OSLockRelease (TLGGD()->hTLGDLock); ++ ++ /* Best effort signal, client wait timeout will ultimately let it find the ++ * new stream if this fails, acceptable to avoid clean-up as it is tricky ++ * at this point */ ++ (void) OSEventObjectSignal(TLGGD()->hTLEventObj); ++ ++ /* Pass the newly created stream handle back to caller */ ++ *phStream = (IMG_HANDLE)psTmp; ++ PVR_DPF_RETURN_OK; ++ ++e7: ++ OSLockDestroy(psTmp->hReadLock); ++e6: ++ OSLockDestroy(psTmp->hStreamWLock); ++e5: ++ OSEventObjectDestroy(hEventList); ++e4: ++ TLFreeSharedMem(psTmp); ++e3: ++ OSEventObjectClose(psTmp->hProducerEvent); ++e2: ++ OSEventObjectDestroy(psTmp->hProducerEventObj); ++e1: ++ OSFreeMem(psTmp); ++e0: ++ OSLockRelease (TLGGD()->hTLGDLock); ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++void TLStreamReset(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psStream = (PTL_STREAM) hStream; ++ ++ PVR_ASSERT(psStream != NULL); ++ ++ OSLockAcquire(psStream->hStreamWLock); ++ ++ while (psStream->ui32Pending != NOTHING_PENDING) ++ { ++ PVRSRV_ERROR eError; ++ ++ /* We're in the middle of a write so we cannot reset the stream. ++ * We are going to wait until the data is committed. Release lock while ++ * we're here. */ ++ OSLockRelease(psStream->hStreamWLock); ++ ++ /* Event when psStream->bNoSignalOnCommit is set we can still use ++ * the timeout capability of event object API (time in us). */ ++ eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100); ++ if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK) ++ { ++ PVR_LOG_RETURN_VOID_IF_ERROR(eError, "OSEventObjectWaitTimeout"); ++ } ++ ++ OSLockAcquire(psStream->hStreamWLock); ++ ++ /* Either timeout occurred or the stream has been signalled. ++ * If former we have to check if the data was committed and if latter ++ * if the stream hasn't been re-reserved. Either way we have to go ++ * back to the condition. ++ * If the stream has been released we'll exit with the lock held so ++ * we can finally go and reset the stream. */ ++ } ++ ++ psStream->ui32Read = 0; ++ psStream->ui32Write = 0; ++ /* we know that ui32Pending already has correct value (no need to set) */ ++ ++ OSLockRelease(psStream->hStreamWLock); ++} ++ ++PVRSRV_ERROR ++TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream) ++{ ++ PTL_STREAM psStream = (PTL_STREAM) hStream; ++ ++ if (hStream == NULL || hNotifStream == NULL) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ psStream->psNotifStream = (PTL_STREAM) hNotifStream; ++ ++ return PVRSRV_OK; ++} ++ ++PVRSRV_ERROR ++TLStreamReconfigure( ++ IMG_HANDLE hStream, ++ IMG_UINT32 ui32StreamFlags) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PTL_STREAM psTmp; ++ TL_OPMODE eOpMode; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == hStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ eOpMode = ui32StreamFlags & TL_OPMODE_MASK; ++ if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST )) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid")); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ psTmp = (PTL_STREAM)hStream; ++ ++ /* Prevent the TL Stream buffer from being written to ++ * while its mode is being reconfigured ++ */ ++ OSLockAcquire (psTmp->hStreamWLock); ++ if (NOTHING_PENDING != psTmp->ui32Pending) ++ { ++ OSLockRelease (psTmp->hStreamWLock); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); ++ } ++ psTmp->ui32Pending = 0; ++ OSLockRelease (psTmp->hStreamWLock); ++ ++ psTmp->eOpMode = eOpMode; ++ if (psTmp->eOpMode == TL_OPMODE_BLOCK) ++ { ++ /* Only allow drop properties to be mixed with no-wrap type streams ++ * since space does not become available when reads take place hence ++ * no point blocking. ++ */ ++ if (psTmp->bNoWrapPermanent) ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e1; ++ } ++ } ++ ++ OSLockAcquire (psTmp->hStreamWLock); ++ psTmp->ui32Pending = NOTHING_PENDING; ++ OSLockRelease (psTmp->hStreamWLock); ++e1: ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++PVRSRV_ERROR ++TLStreamOpen(IMG_HANDLE *phStream, ++ const IMG_CHAR *szStreamName) ++{ ++ PTL_SNODE psTmpSNode; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == phStream || NULL == szStreamName) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ /* Acquire the TL_GLOBAL_DATA lock first to ensure, ++ * the TL_STREAM while returned and being modified, ++ * is not deleted by some other context */ ++ OSLockAcquire (TLGGD()->hTLGDLock); ++ ++ /* Search for a stream node with a matching stream name */ ++ psTmpSNode = TLFindStreamNodeByName(szStreamName); ++ ++ if (NULL == psTmpSNode) ++ { ++ OSLockRelease (TLGGD()->hTLGDLock); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND); ++ } ++ ++ if (psTmpSNode->psStream->psNotifStream != NULL && ++ psTmpSNode->uiWRefCount == 1) ++ { ++ TLStreamMarkStreamOpen(psTmpSNode->psStream); ++ } ++ ++ /* The TL_SNODE->uiWRefCount governs the presence of this node in the ++ * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing ++ * this node from the TL_GLOBAL_DATA list. Hence, is protected using the ++ * TL_GLOBAL_DATA lock and not TL_STREAM lock */ ++ psTmpSNode->uiWRefCount++; ++ ++ OSLockRelease (TLGGD()->hTLGDLock); ++ ++ /* Return the stream handle to the caller */ ++ *phStream = (IMG_HANDLE)psTmpSNode->psStream; ++ ++ PVR_DPF_RETURN_VAL(PVRSRV_OK); ++} ++ ++void ++TLStreamClose(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psTmp; ++ IMG_BOOL bDestroyStream; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == hStream) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "TLStreamClose failed as NULL stream handler passed, nothing done.")); ++ PVR_DPF_RETURN; ++ } ++ ++ psTmp = (PTL_STREAM)hStream; ++ ++ /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required ++ * in-case this TL_STREAM node is to be deleted */ ++ OSLockAcquire (TLGGD()->hTLGDLock); ++ ++ /* Decrement write reference counter of the stream */ ++ psTmp->psNode->uiWRefCount--; ++ ++ if (0 != psTmp->psNode->uiWRefCount) ++ { ++ /* The stream is still being used in other context(s) do not destroy ++ * anything */ ++ ++ /* uiWRefCount == 1 means that stream was closed for write. Next ++ * close is pairing TLStreamCreate(). Send notification to indicate ++ * that no writer are connected to the stream any more. */ ++ if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1) ++ { ++ TLStreamMarkStreamClose(psTmp); ++ } ++ ++ OSLockRelease (TLGGD()->hTLGDLock); ++ PVR_DPF_RETURN; ++ } ++ else ++ { ++ /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */ ++ ++ if (psTmp->bWaitForEmptyOnDestroy) ++ { ++ /* We won't require the TL_STREAM lock to be acquired here for accessing its read ++ * and write offsets. REASON: We are here because there is no producer context ++ * referencing this TL_STREAM, hence its ui32Write offset won't be changed now. ++ * Also, the update of ui32Read offset is not protected by locks */ ++ while (psTmp->ui32Read != psTmp->ui32Write) ++ { ++ /* Release lock before sleeping */ ++ OSLockRelease (TLGGD()->hTLGDLock); ++ ++ OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US); ++ ++ OSLockAcquire (TLGGD()->hTLGDLock); ++ ++ /* Ensure destruction of stream is still required */ ++ if (0 != psTmp->psNode->uiWRefCount) ++ { ++ OSLockRelease (TLGGD()->hTLGDLock); ++ PVR_DPF_RETURN; ++ } ++ } ++ } ++ ++ /* Try removing the stream from TL_GLOBAL_DATA */ ++ bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode); ++ ++ OSLockRelease (TLGGD()->hTLGDLock); ++ ++ if (bDestroyStream) ++ { ++ /* Destroy the stream if it was removed from TL_GLOBAL_DATA */ ++ TLStreamDestroy (psTmp); ++ psTmp = NULL; ++ } ++ PVR_DPF_RETURN; ++ } ++} ++ ++/* ++ * DoTLSetPacketHeader ++ * ++ * Ensure that whenever we update a Header we always add the RESERVED field ++ */ ++static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32); ++static inline void ++DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr, ++ IMG_UINT32 ui32Val) ++{ ++ PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0); ++ ++ /* Check that this is a correctly aligned packet header. */ ++ if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0) ++ { ++ /* Should return an error because the header is misaligned */ ++ PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr)); ++ pHdr->uiTypeSize = ui32Val; ++ } ++ else ++ { ++ pHdr->uiTypeSize = ui32Val; ++ pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED; ++ } ++} ++ ++static PVRSRV_ERROR ++DoTLStreamReserve(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32ReqSize, ++ IMG_UINT32 ui32ReqSizeMin, ++ PVRSRVTL_PACKETTYPE ePacketType, ++ IMG_UINT32* pui32AvSpace, ++ IMG_UINT32* pui32Flags) ++{ ++ PTL_STREAM psTmp; ++ IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace; ++ IMG_UINT32 ui32InputFlags = 0; ++ IMG_INT pad, iFreeSpace; ++ IMG_UINT8 *pui8IncrRead = NULL; ++ PVRSRVTL_PPACKETHDR pHdr; ++ ++ PVR_DPF_ENTERED; ++ if (pui32AvSpace) *pui32AvSpace = 0; ++ if (pui32Flags) ++ { ++ ui32InputFlags = *pui32Flags; ++ *pui32Flags = 0; ++ } ++ ++ if (NULL == hStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ psTmp = (PTL_STREAM)hStream; ++ ++ /* Assert used as the packet type parameter is currently only provided ++ * by the TL APIs, not the calling client */ ++ PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType)); ++ ++ /* The buffer is only used in "rounded" (aligned) chunks */ ++ lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize); ++ ++ /* Lock the stream before reading it's pending value, because if pending is set ++ * to NOTHING_PENDING, we update the pending value such that subsequent calls to ++ * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */ ++ OSLockAcquire (psTmp->hStreamWLock); ++ ++#if defined(TL_BUFFER_STATS) ++ /* If writing into an empty buffer, start recording time-to-full */ ++ if (psTmp->ui32Read == psTmp->ui32Write) ++ { ++ OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 1); ++ psTmp->ui32TimeStart = OSClockus(); ++ } ++ ++ if (ui32ReqSize > psTmp->ui32MaxReserveWatermark) ++ { ++ psTmp->ui32MaxReserveWatermark = ui32ReqSize; ++ } ++#endif ++ ++ /* Get a local copy of the stream buffer parameters */ ++ ui32LRead = psTmp->ui32Read; ++ ui32LWrite = psTmp->ui32Write; ++ ui32LPending = psTmp->ui32Pending; ++ ++ /* Multiple pending reserves are not supported. */ ++ if (NOTHING_PENDING != ui32LPending) ++ { ++ OSLockRelease (psTmp->hStreamWLock); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY); ++ } ++ ++ if (psTmp->ui32MaxPacketSize < lReqSizeAligned) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize)); ++ psTmp->ui32Pending = NOTHING_PENDING; ++ if (pui32AvSpace) ++ { ++ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); ++ if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) ++ { ++ *pui32AvSpace = psTmp->ui32MaxPacketSize; ++ PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u", *pui32AvSpace)); ++ } ++ } ++ OSLockRelease (psTmp->hStreamWLock); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED); ++ } ++ ++ /* Prevent other threads from entering this region before we are done ++ * updating the pending value and write offset (in case of padding). This ++ * is not exactly a lock but a signal for other contexts that there is a ++ * TLStreamCommit operation pending on this stream */ ++ psTmp->ui32Pending = 0; ++ ++ OSLockRelease (psTmp->hStreamWLock); ++ ++ /* If there is enough contiguous space following the current Write ++ * position then no padding is required */ ++ if ( psTmp->ui32Size ++ < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) ) ++ { ++ pad = psTmp->ui32Size - ui32LWrite; ++ } ++ else ++ { ++ pad = 0; ++ } ++ ++ lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad; ++ if (psTmp->bNoWrapPermanent) ++ { ++ iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); ++ } ++ else ++ { ++ iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); ++ } ++ ++ if (iFreeSpace < (IMG_INT) lReqSizeActual) ++ { ++ /* If this is a blocking reserve and there is not enough space then wait. */ ++ if (psTmp->eOpMode == TL_OPMODE_BLOCK) ++ { ++ /* Stream create should stop us entering here when ++ * psTmp->bNoWrapPermanent is true as it does not make sense to ++ * block on permanent data streams. */ ++ PVR_ASSERT(psTmp->bNoWrapPermanent == IMG_FALSE); ++ while ( ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) ++ <(IMG_INT) lReqSizeActual ) ) ++ { ++ /* The TL bridge is lockless now, so changing to OSEventObjectWait() */ ++ OSEventObjectWait(psTmp->hProducerEvent); ++ // update local copies. ++ ui32LRead = psTmp->ui32Read; ++ ui32LWrite = psTmp->ui32Write; ++ } ++ } ++ /* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */ ++ else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST) ++ { ++ OSLockAcquire(psTmp->hReadLock); ++ ++ while (psTmp->bReadPending) ++ { ++ PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete.")); ++ OSLockRelease(psTmp->hReadLock); ++#if defined(TL_BUFFER_STATS) ++ TL_COUNTER_INC(psTmp->ui32CntWriteWaits); ++#endif ++ (void) OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US); ++ OSLockAcquire(psTmp->hReadLock); ++ } ++ ++#if defined(TL_BUFFER_STATS) ++ TL_COUNTER_INC(psTmp->ui32CntWriteSuccesses); ++#endif ++ ui32LRead = psTmp->ui32Read; ++ ++ if ( circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size) ++ < (IMG_INT) lReqSizeActual ) ++ { ++ ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100); ++ if (ui32CreateFreeSpace < lReqSizeActual) ++ { ++ ui32CreateFreeSpace = lReqSizeActual; ++ } ++ ++ while (ui32CreateFreeSpace > (IMG_UINT32)circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)) ++ { ++ pui8IncrRead = &psTmp->pbyBuffer[ui32LRead]; ++ ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) )); ++ ++ /* Check if buffer needs to wrap */ ++ if (ui32LRead >= psTmp->ui32Size) ++ { ++ ui32LRead = 0; ++ } ++ } ++ psTmp->ui32Read = ui32LRead; ++ pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read]; ++ ++ pHdr = GET_PACKET_HDR(pui8IncrRead); ++ DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr)); ++ } ++ /* else fall through as there is enough space now to write the data */ ++ ++ OSLockRelease(psTmp->hReadLock); ++ /* If we accepted a flag var set the OVERWRITE bit*/ ++ if (pui32Flags) *pui32Flags |= TL_FLAG_OVERWRITE_DETECTED; ++ } ++ /* No data overwriting, insert write_failed flag and return */ ++ else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER) ++ { ++ /* Caller should not try to use ppui8Data, ++ * NULLify to give user a chance of avoiding memory corruption */ ++ *ppui8Data = NULL; ++ ++ /* This flag should not be inserted two consecutive times, so ++ * check the last ui32 in case it was a packet drop packet. */ ++ pui32Buf = ui32LWrite ++ ? ++ (void *)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)] ++ : // Previous four bytes are not guaranteed to be a packet header... ++ (void *)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT]; ++ ++ pHdr = GET_PACKET_HDR(pui32Buf); ++ if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED ++ != ++ GET_PACKET_TYPE( pHdr ) && (ui32InputFlags & TL_FLAG_NO_WRITE_FAILED) == 0) ++ { ++ /* Insert size-stamped packet header */ ++ pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; ++ pHdr = GET_PACKET_HDR(pui32Buf); ++ DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED); ++ ui32LWrite += sizeof(PVRSRVTL_PACKETHDR); ++ ui32LWrite %= psTmp->ui32Size; ++ iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR); ++ } ++ ++ OSLockAcquire (psTmp->hStreamWLock); ++ psTmp->ui32Write = ui32LWrite; ++ psTmp->ui32Pending = NOTHING_PENDING; ++ OSLockRelease (psTmp->hStreamWLock); ++ ++ if (pui32AvSpace) ++ { ++ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize); ++ } ++ ++ /* Inform call of permanent stream misuse, no space left, ++ * the size of the stream will need to be increased. */ ++ if (psTmp->bNoWrapPermanent) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE); ++ } ++ ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL); ++ } ++ } ++ ++ /* The easy case: buffer has enough space to hold the requested packet (data + header) */ ++ ++ /* Should we treat the buffer as non-circular buffer? */ ++ if (psTmp->bNoWrapPermanent) ++ { ++ iFreeSpace = bufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); ++ } ++ else ++ { ++ iFreeSpace = circbufSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size); ++ } ++ ++ if (iFreeSpace >= (IMG_INT) lReqSizeActual) ++ { ++ if (pad) ++ { ++ /* Inserting padding packet. */ ++ pui32Buf = (void *)&psTmp->pbyBuffer[ui32LWrite]; ++ pHdr = GET_PACKET_HDR(pui32Buf); ++ DoTLSetPacketHeader(pHdr, ++ PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR))); ++ ++ /* CAUTION: the used pad value should always result in a properly ++ * aligned ui32LWrite pointer, which in this case is 0 */ ++ ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size; ++ /* Detect unaligned pad value */ ++ PVR_ASSERT(ui32LWrite == 0); ++ } ++ /* Insert size-stamped packet header */ ++ pui32Buf = (void *) &psTmp->pbyBuffer[ui32LWrite]; ++ ++ pHdr = GET_PACKET_HDR(pui32Buf); ++ DoTLSetPacketHeader(pHdr, ++ PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType)); ++ ++ /* return the next position in the buffer to the user */ ++ *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ]; ++ ++ /* update pending offset: size stamp + data */ ++ ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR); ++ } ++ else ++ { ++ OSLockAcquire (psTmp->hStreamWLock); ++ psTmp->ui32Pending = NOTHING_PENDING; ++ OSLockRelease (psTmp->hStreamWLock); ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR); ++ } ++ ++ /* Acquire stream lock for updating stream parameters */ ++ OSLockAcquire (psTmp->hStreamWLock); ++ psTmp->ui32Write = ui32LWrite; ++ psTmp->ui32Pending = ui32LPending; ++ OSLockRelease (psTmp->hStreamWLock); ++ ++#if defined(TL_BUFFER_STATS) ++ TL_COUNTER_INC(psTmp->ui32CntNumWriteSuccess); ++#endif ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++PVRSRV_ERROR ++TLStreamReserve(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size) ++{ ++ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, NULL); ++} ++ ++PVRSRV_ERROR ++TLStreamReserve2(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32* pui32Available, ++ IMG_BOOL* pbIsReaderConnected) ++{ ++ PVRSRV_ERROR eError; ++ ++ eError = DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available, NULL); ++ if (eError != PVRSRV_OK && pbIsReaderConnected != NULL) ++ { ++ *pbIsReaderConnected = TLStreamIsOpenForReading(hStream); ++ } ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++TLStreamReserveReturnFlags(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32* pui32Flags) ++{ ++ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL, pui32Flags); ++} ++ ++PVRSRV_ERROR ++TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize) ++{ ++ PTL_STREAM psTmp; ++ IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending; ++ PVRSRV_ERROR eError; ++ ++#if defined(TL_BUFFER_STATS) ++ IMG_UINT32 ui32UnreadBytes; ++#endif ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == hStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ psTmp = (PTL_STREAM)hStream; ++ ++ /* Get a local copy of the stream buffer parameters */ ++ ui32LRead = psTmp->ui32Read; ++ ui32LWrite = psTmp->ui32Write; ++ ui32LPending = psTmp->ui32Pending; ++ ++ ui32OldWrite = ui32LWrite; ++ ++ // Space in buffer is aligned ++ ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR); ++ ++ /* Check pending reserver and ReqSize + packet header size. */ ++ if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); ++ } ++ ++ /* Update pointer to written data. */ ++ ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size; ++ ++ /* and reset LPending to 0 since data are now submitted */ ++ ui32LPending = NOTHING_PENDING; ++ ++#if defined(TL_BUFFER_STATS) ++ /* Calculate new number of bytes unread */ ++ if (ui32LWrite > ui32LRead) ++ { ++ ui32UnreadBytes = (ui32LWrite-ui32LRead); ++ } ++ else if (ui32LWrite < ui32LRead) ++ { ++ ui32UnreadBytes = (psTmp->ui32Size-ui32LRead+ui32LWrite); ++ } ++ else ++ { /* else equal, ignore */ ++ ui32UnreadBytes = 0; ++ } ++ ++ /* Calculate high water mark for debug purposes */ ++ if (ui32UnreadBytes > psTmp->ui32BufferUt) ++ { ++ psTmp->ui32BufferUt = ui32UnreadBytes; ++ } ++#endif ++ ++ /* Memory barrier required to ensure prior data written by writer is ++ * flushed from WC buffer to main memory. */ ++ OSWriteMemoryBarrier(NULL); ++ ++ /* Acquire stream lock to ensure other context(s) (if any) ++ * wait on the lock (in DoTLStreamReserve) for consistent values ++ * of write offset and pending value */ ++ OSLockAcquire (psTmp->hStreamWLock); ++ ++ /* Update stream buffer parameters to match local copies */ ++ psTmp->ui32Write = ui32LWrite; ++ psTmp->ui32Pending = ui32LPending; ++ ++ /* Ensure write pointer is flushed */ ++ OSWriteMemoryBarrier(&psTmp->ui32Write); ++ ++ TL_COUNTER_ADD(psTmp->ui32ProducerByteCount, ui32ReqSize); ++ TL_COUNTER_INC(psTmp->ui32NumCommits); ++ ++#if defined(TL_BUFFER_STATS) ++ /* IF there has been no-reader since first reserve on an empty-buffer, ++ * AND current utilisation is considerably high (90%), calculate the ++ * time taken to fill up the buffer */ ++ if ((OSAtomicRead(&psTmp->bNoReaderSinceFirstReserve) == 1) && ++ (TLStreamGetUT(psTmp) >= 90 * psTmp->ui32Size/100)) ++ { ++ IMG_UINT32 ui32TimeToFullInUs = OSClockus() - psTmp->ui32TimeStart; ++ if (psTmp->ui32MinTimeToFullInUs > ui32TimeToFullInUs) ++ { ++ psTmp->ui32MinTimeToFullInUs = ui32TimeToFullInUs; ++ } ++ /* Following write ensures ui32MinTimeToFullInUs doesn't lose its ++ * real (expected) value in case there is no reader until next Commit call */ ++ OSAtomicWrite(&psTmp->bNoReaderSinceFirstReserve, 0); ++ } ++#endif ++ ++ if (!psTmp->bNoSignalOnCommit) ++ { ++ /* If we have transitioned from an empty buffer to a non-empty buffer, we ++ * must signal possibly waiting consumer. BUT, let the signal be "deferred" ++ * until buffer is at least 'ui32ThresholdUsageForSignal' bytes full. This ++ * avoids a race between OSEventObjectSignal and OSEventObjectWaitTimeout ++ * (in TLServerAcquireDataKM), where a "signal" might happen before "wait", ++ * resulting into signal being lost and stream-reader waiting even though ++ * buffer is no-more empty */ ++ if (ui32OldWrite == ui32LRead) ++ { ++ psTmp->bSignalPending = IMG_TRUE; ++ } ++ ++ if (psTmp->bSignalPending && (TLStreamGetUT(psTmp) >= psTmp->ui32ThresholdUsageForSignal)) ++ { ++ TL_COUNTER_INC(psTmp->ui32SignalsSent); ++ psTmp->bSignalPending = IMG_FALSE; ++ ++ /* Signal consumers that may be waiting */ ++ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); ++ if (eError != PVRSRV_OK) ++ { ++ OSLockRelease (psTmp->hStreamWLock); ++ PVR_DPF_RETURN_RC(eError); ++ } ++ } ++ else ++ { ++ TL_COUNTER_INC(psTmp->ui32SignalNotSent); ++ } ++ } ++ OSLockRelease (psTmp->hStreamWLock); ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++PVRSRV_ERROR ++TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size) ++{ ++ IMG_BYTE *pbyDest = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == hStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ eError = TLStreamReserve(hStream, &pbyDest, ui32Size); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ else ++ { ++ OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); ++ eError = TLStreamCommit(hStream, ui32Size); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ } ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++PVRSRV_ERROR ++TLStreamWriteRetFlags(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size, IMG_UINT32 *pui32Flags){ ++ IMG_BYTE *pbyDest = NULL; ++ PVRSRV_ERROR eError; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == hStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ eError = TLStreamReserveReturnFlags(hStream, &pbyDest, ui32Size, pui32Flags); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ else ++ { ++ OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size); ++ eError = TLStreamCommit(hStream, ui32Size); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ } ++ ++ PVR_DPF_RETURN_OK; ++} ++ ++void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo) ++{ ++ IMG_DEVMEM_SIZE_T actual_req_size; ++ IMG_DEVMEM_ALIGN_T align = 4; /* Low fake value so the real value can be obtained */ ++ ++ actual_req_size = 2; ++ /* ignore error as OSGetPageShift() should always return correct value */ ++ (void) DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align); ++ ++ psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR); ++ psInfo->minReservationSize = sizeof(IMG_UINT32); ++ psInfo->pageSize = (IMG_UINT32)(actual_req_size); ++ psInfo->pageAlign = (IMG_UINT32)(align); ++ psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize; ++} ++ ++PVRSRV_ERROR ++TLStreamMarkEOS(IMG_HANDLE psStream, IMG_BOOL bRemoveOld) ++{ ++ PTL_STREAM psTmp; ++ PVRSRV_ERROR eError; ++ IMG_UINT8* pData; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == psStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ psTmp = (PTL_STREAM)psStream; ++ ++ /* Do not support EOS packets on permanent stream buffers at present, ++ * EOS is best used with streams where data is consumed. */ ++ if (psTmp->bNoWrapPermanent) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE); ++ } ++ ++ if (bRemoveOld) ++ { ++ eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS_REMOVEOLD, NULL, NULL); ++ } ++ else ++ { ++ eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL, NULL); ++ } ++ ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0)); ++} ++ ++ ++static PVRSRV_ERROR ++_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType) ++{ ++ PVRSRV_ERROR eError; ++ PTL_STREAM psStream = hStream; ++ IMG_UINT32 ui32Size; ++ IMG_UINT8 *pData; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == psStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ ++ if (NULL == psStream->psNotifStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM); ++ } ++ ++ ui32Size = OSStringLength(psStream->szName) + 1; ++ ++ eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size, ++ ui32Size, ePacketType, NULL, NULL); ++ if (PVRSRV_OK != eError) ++ { ++ PVR_DPF_RETURN_RC(eError); ++ } ++ ++ OSDeviceMemCopy(pData, psStream->szName, ui32Size); ++ ++ PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size)); ++} ++ ++PVRSRV_ERROR ++TLStreamMarkStreamOpen(IMG_HANDLE psStream) ++{ ++ return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE); ++} ++ ++PVRSRV_ERROR ++TLStreamMarkStreamClose(IMG_HANDLE psStream) ++{ ++ return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE); ++} ++ ++PVRSRV_ERROR ++TLStreamSync(IMG_HANDLE psStream) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PTL_STREAM psTmp; ++ ++ PVR_DPF_ENTERED; ++ ++ if (NULL == psStream) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS); ++ } ++ psTmp = (PTL_STREAM)psStream; ++ ++ /* If read client exists and has opened stream in blocking mode, ++ * signal when data is available to read. */ ++ if (psTmp->psNode->psRDesc && ++ (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) && ++ psTmp->ui32Read != psTmp->ui32Write) ++ { ++ TL_COUNTER_INC(psTmp->ui32ManSyncs); ++ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj); ++ } ++ ++ PVR_DPF_RETURN_RC(eError); ++} ++ ++IMG_BOOL ++TLStreamIsOpenForReading(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psTmp; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(hStream); ++ psTmp = (PTL_STREAM)hStream; ++ ++ PVR_DPF_RETURN_VAL(psTmp->psNode->psRDesc != NULL); ++} ++ ++IMG_BOOL ++TLStreamOutOfData(IMG_HANDLE hStream) ++{ ++ PTL_STREAM psTmp; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(hStream); ++ psTmp = (PTL_STREAM)hStream; ++ ++ /* If both pointers are equal then the buffer is empty */ ++ PVR_DPF_RETURN_VAL(psTmp->ui32Read == psTmp->ui32Write); ++} ++ ++ ++PVRSRV_ERROR ++TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value) ++{ ++ PTL_STREAM psTmp; ++ IMG_UINT32 ui32LRead, ui32LWrite; ++ PVRSRV_ERROR eErr = PVRSRV_OK; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(hStream); ++ psTmp = (PTL_STREAM)hStream; ++ ui32LRead = psTmp->ui32Read; ++ ui32LWrite = psTmp->ui32Write; ++ ++ if (ui32LRead != ui32LWrite) ++ { ++ eErr = PVRSRV_ERROR_STREAM_MISUSE; ++ } ++#if defined(TL_BUFFER_STATS) ++ psTmp->ui32ProducerByteCount = ui32Value; ++#else ++ PVR_UNREFERENCED_PARAMETER(ui32Value); ++#endif ++ PVR_DPF_RETURN_RC(eErr); ++} ++/* ++ * Internal stream APIs to server part of Transport Layer, declared in ++ * header tlintern.h. Direct pointers to stream objects are used here as ++ * these functions are internal. ++ */ ++IMG_UINT32 ++TLStreamAcquireReadPos(PTL_STREAM psStream, ++ IMG_BOOL bDisableCallback, ++ IMG_UINT32* puiReadOffset) ++{ ++ IMG_UINT32 uiReadLen = 0; ++ IMG_UINT32 ui32LRead, ui32LWrite; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psStream); ++ PVR_ASSERT(puiReadOffset); ++ ++ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) ++ { ++ if (!OSTryLockAcquire(psStream->hReadLock)) ++ { ++ /* ++ * This is a normal event when the system is under load. ++ * An example of how to produce this is to run testrunner / ++ * regression/ddk_test_seq2_host_fw_mem.conf with HTB / pvrhtbd ++ * configured as ++ * ++ * # pvrdebug -log trace -loggroups main,pow,debug \ ++ * -hostloggroups main,ctrl,sync,brg -hostlogtype dropoldest ++ * ++ * # pvrhtbd -hostloggroups main,ctrl,sync,brg ++ * ++ * We will see a small number of these collisions but as this is ++ * an expected calling path, and an expected return code, we drop ++ * the severity to just be a debug MESSAGE instead of WARNING ++ */ ++ PVR_DPF((PVR_DBG_MESSAGE, ++ "%s: Read lock on stream '%s' is acquired by some writer, " ++ "hence reader failed to acquire read lock.", __func__, ++ psStream->szName)); ++#if defined(TL_BUFFER_STATS) ++ TL_COUNTER_INC(psStream->ui32CntReadFails); ++#endif ++ PVR_DPF_RETURN_VAL(0); ++ } ++ } ++ ++#if defined(TL_BUFFER_STATS) ++ TL_COUNTER_INC(psStream->ui32CntReadSuccesses); ++#endif ++ ++ /* Grab a local copy */ ++ ui32LRead = psStream->ui32Read; ++ ui32LWrite = psStream->ui32Write; ++ ++ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) ++ { ++ psStream->bReadPending = IMG_TRUE; ++ OSLockRelease(psStream->hReadLock); ++ } ++ ++ /* No data available and CB defined - try and get data */ ++ if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback) ++ { ++ PVRSRV_ERROR eRc; ++ IMG_UINT32 ui32Resp = 0; ++ ++ eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS, ++ &ui32Resp, psStream->pvProducerUserData); ++ PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback"); ++ ++ ui32LWrite = psStream->ui32Write; ++ } ++ ++ /* No data available... */ ++ if (ui32LRead == ui32LWrite) ++ { ++ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) ++ { ++ psStream->bReadPending = IMG_FALSE; ++ } ++ PVR_DPF_RETURN_VAL(0); ++ } ++ ++#if defined(TL_BUFFER_STATS) ++ /* The moment reader knows it will see a non-zero data, it marks its presence in writer's eyes */ ++ OSAtomicWrite (&psStream->bNoReaderSinceFirstReserve, 0); ++#endif ++ ++ /* Data is available to read... */ ++ *puiReadOffset = ui32LRead; ++ ++ /*PVR_DPF((PVR_DBG_VERBOSE, ++ * "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d", ++ * ui32LWrite, ui32LRead, psStream->ui32Size)); ++ */ ++ ++ if (ui32LRead > ui32LWrite) ++ { /* CB has wrapped around. */ ++ PVR_ASSERT(!psStream->bNoWrapPermanent); ++ /* Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer] ++ * and let a subsequent AcquireReadPos read the rest of the Buffer */ ++ /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/ ++ uiReadLen = psStream->ui32Size - ui32LRead; ++ TL_COUNTER_INC(psStream->ui32AcquireRead2); ++ } ++ else ++ { /* CB has not wrapped */ ++ uiReadLen = ui32LWrite - ui32LRead; ++ TL_COUNTER_INC(psStream->ui32AcquireRead1); ++ } ++ ++ PVR_DPF_RETURN_VAL(uiReadLen); ++} ++ ++PVRSRV_ERROR ++TLStreamAdvanceReadPos(PTL_STREAM psStream, ++ IMG_UINT32 uiReadLen, ++ IMG_UINT32 uiOrigReadLen) ++{ ++ IMG_UINT32 uiNewReadPos; ++ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psStream); ++ ++ /* ++ * This API does not use Read lock as 'bReadPending' is sufficient ++ * to keep Read index safe by preventing a write from updating the ++ * index and 'bReadPending' itself is safe as it can only be modified ++ * by readers and there can be only one reader in action at a time. ++ */ ++ ++ /* Update the read offset by the length provided in a circular manner. ++ * Assuming the update to be atomic hence, avoiding use of locks ++ */ ++ uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size; ++ ++ /* Must validate length is on a packet boundary, for ++ * TLReleaseDataLess calls. ++ */ ++ if (uiReadLen != uiOrigReadLen) /* buffer not empty */ ++ { ++ PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos); ++ PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr); ++ ++ if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) || ++ (eType == PVRSRVTL_PACKETTYPE_UNDEF) || ++ (eType >= PVRSRVTL_PACKETTYPE_LAST)) ++ { ++ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT); ++ } ++ /* else OK, on a packet boundary */ ++ } ++ /* else no check needed */ ++ ++ psStream->ui32Read = uiNewReadPos; ++ ++ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST) ++ { ++ psStream->bReadPending = IMG_FALSE; ++ } ++ ++ /* notify reserves that may be pending */ ++ /* The producer event object is used to signal the StreamReserve if the TL ++ * Buffer is in blocking mode and is full. ++ * Previously this event was only signalled if the buffer was created in ++ * blocking mode. Since the buffer mode can now change dynamically the event ++ * is signalled every time to avoid any potential race where the signal is ++ * required, but not produced. ++ */ ++ { ++ PVRSRV_ERROR eError; ++ eError = OSEventObjectSignal(psStream->hProducerEventObj); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_WARNING, ++ "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u", ++ eError)); ++ /* We've failed to notify the producer event. This means there may ++ * be a delay in generating more data to be consumed until the next ++ * Write() generating action occurs. ++ */ ++ } ++ } ++ ++ PVR_DPF((PVR_DBG_VERBOSE, ++ "TLStreamAdvanceReadPos Read now at: %d", ++ psStream->ui32Read)); ++ PVR_DPF_RETURN_OK; ++} ++ ++void ++TLStreamResetReadPos(PTL_STREAM psStream) ++{ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psStream); ++ ++ if (psStream->bNoWrapPermanent) ++ { ++ ++ /* Update the read offset by the length provided in a circular manner. ++ * Assuming the update to be atomic hence, avoiding use of locks */ ++ psStream->ui32Read = 0; ++ ++ PVR_DPF((PVR_DBG_VERBOSE, ++ "TLStreamResetReadPos Read now at: %d", ++ psStream->ui32Read)); ++ } ++ else ++ { ++ /* else for other stream types this is a no-op */ ++ PVR_DPF((PVR_DBG_VERBOSE, ++ "No need to reset read position of circular tlstream")); ++ } ++ ++ PVR_DPF_RETURN; ++} ++ ++void ++TLStreamDestroy (PTL_STREAM psStream) ++{ ++ PVR_ASSERT (psStream); ++ ++ OSLockDestroy (psStream->hStreamWLock); ++ OSLockDestroy (psStream->hReadLock); ++ ++ OSEventObjectClose(psStream->hProducerEvent); ++ OSEventObjectDestroy(psStream->hProducerEventObj); ++ ++ TLFreeSharedMem(psStream); ++ OSFreeMem(psStream); ++} ++ ++DEVMEM_MEMDESC* ++TLStreamGetBufferPointer(PTL_STREAM psStream) ++{ ++ PVR_DPF_ENTERED; ++ ++ PVR_ASSERT(psStream); ++ ++ PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc); ++} +diff --git a/drivers/gpu/drm/img-rogue/tlstream.h b/drivers/gpu/drm/img-rogue/tlstream.h +new file mode 100644 +index 000000000000..911e720e7cdd +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/tlstream.h +@@ -0,0 +1,600 @@ ++/*************************************************************************/ /*! ++@File ++@Title Transport Layer kernel side API. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description TL provides driver components with a way to copy data from kernel ++ space to user space (e.g. screen/file). ++ ++ Data can be passed to the Transport Layer through the ++ TL Stream (kernel space) API interface. ++ ++ The buffer provided to every stream is a modified version of a ++ circular buffer. Which CB version is created is specified by ++ relevant flags when creating a stream. Currently two types ++ of buffer are available: ++ - TL_OPMODE_DROP_NEWER: ++ When the buffer is full, incoming data are dropped ++ (instead of overwriting older data) and a marker is set ++ to let the user know that data have been lost. ++ - TL_OPMODE_BLOCK: ++ When the circular buffer is full, reserve/write calls block ++ until enough space is freed. ++ - TL_OPMODE_DROP_OLDEST: ++ When the circular buffer is full, the oldest packets in the ++ buffer are dropped and a flag is set in header of next packet ++ to let the user know that data have been lost. ++ ++ All size/space requests are in bytes. However, the actual ++ implementation uses native word sizes (i.e. 4 byte aligned). ++ ++ The user does not need to provide space for the stream buffer ++ as the TL handles memory allocations and usage. ++ ++ Inserting data to a stream's buffer can be done either: ++ - by using TLReserve/TLCommit: User is provided with a buffer ++ to write data to. ++ - or by using TLWrite: User provides a buffer with ++ data to be committed. The TL ++ copies the data from the ++ buffer into the stream buffer ++ and returns. ++ Users should be aware that there are implementation overheads ++ associated with every stream buffer. If you find that less ++ data are captured than expected then try increasing the ++ stream buffer size or use TLInfo to obtain buffer parameters ++ and calculate optimum required values at run time. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#ifndef TLSTREAM_H ++#define TLSTREAM_H ++ ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "pvrsrv_tlcommon.h" ++#include "device.h" ++ ++/*! Extract TL stream opmode from the given stream create flags. ++ * Last 3 bits of streamFlag is used for storing opmode, hence ++ * opmode mask is set as following. */ ++#define TL_OPMODE_MASK 0x7 ++ ++/* ++ * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values ++ * within htbserver.c. ++ * As such we *MUST* keep the values matching in order of declaration. ++ */ ++/*! Opmode specifying circular buffer behaviour */ ++typedef enum ++{ ++ /*! Undefined operation mode */ ++ TL_OPMODE_UNDEF = 0, ++ ++ /*! Reject new data if the buffer is full, producer may then decide to ++ * drop the data or retry after some time. */ ++ TL_OPMODE_DROP_NEWER, ++ ++ /*! When buffer is full, advance the tail/read position to accept the new ++ * reserve call (size permitting), effectively overwriting the oldest ++ * data in the circular buffer. Not supported yet. */ ++ TL_OPMODE_DROP_OLDEST, ++ ++ /*! Block Reserve (subsequently Write) calls if there is not enough space ++ * until some space is freed via a client read operation. */ ++ TL_OPMODE_BLOCK, ++ ++ /*!< For error checking */ ++ TL_OPMODE_LAST ++ ++} TL_OPMODE; ++ ++typedef enum { ++ /* Enum to be used in conjunction with new Flags feature */ ++ ++ /* Flag set when Drop Oldest is set and packets have been dropped */ ++ TL_FLAG_OVERWRITE_DETECTED = (1 << 0), ++ /* Prevents DoTLStreamReserve() from adding from injecting ++ * PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED */ ++ TL_FLAG_NO_WRITE_FAILED = (1 << 1), ++} TL_Flags; ++ ++static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK, ++ "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK"); ++ ++/*! Flags specifying stream behaviour */ ++/*! Do not destroy stream if there still are data that have not been ++ * copied in user space. Block until the stream is emptied. */ ++#define TL_FLAG_FORCE_FLUSH (1U<<8) ++/*! Do not signal consumers on commit automatically when the stream buffer ++ * transitions from empty to non-empty. Producer responsible for signal when ++ * it chooses. */ ++#define TL_FLAG_NO_SIGNAL_ON_COMMIT (1U<<9) ++ ++/*! When a stream has this property it never wraps around and ++ * overwrites existing data, hence it is a fixed size persistent ++ * buffer, data written is permanent. Producers need to ensure ++ * the buffer is big enough for their needs. ++ * When a stream is opened for reading the client will always ++ * find the read position at the start of the buffer/data. */ ++#define TL_FLAG_PERMANENT_NO_WRAP (1U<<10) ++ ++/*! Defer allocation of stream's shared memory until first open. */ ++#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<11) ++ ++/*! Structure used to pass internal TL stream sizes information to users.*/ ++typedef struct _TL_STREAM_INFO_ ++{ ++ IMG_UINT32 headerSize; /*!< Packet header size in bytes */ ++ IMG_UINT32 minReservationSize; /*!< Minimum data size reserved in bytes */ ++ IMG_UINT32 pageSize; /*!< Page size in bytes */ ++ IMG_UINT32 pageAlign; /*!< Page alignment in bytes */ ++ IMG_UINT32 maxTLpacketSize; /*! Max allowed TL packet size*/ ++} TL_STREAM_INFO, *PTL_STREAM_INFO; ++ ++/*! Callback operations or notifications that a stream producer may handle ++ * when requested by the Transport Layer. ++ */ ++#define TL_SOURCECB_OP_CLIENT_EOS 0x01 /*!< Client has reached end of stream, ++ * can anymore data be supplied? ++ * ui32Resp ignored in this operation */ ++ ++/*! Function pointer type for the callback handler into the "producer" code ++ * that writes data to the TL stream. Producer should handle the notification ++ * or operation supplied in ui32ReqOp on stream hStream. The ++ * Operations and notifications are defined above in TL_SOURCECB_OP */ ++typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream, ++ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser); ++ ++typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg); ++ ++/*************************************************************************/ /*! ++ @Function TLAllocSharedMemIfNull ++ @Description Allocates shared memory for the stream. ++ @Input hStream Stream handle. ++ @Return eError Internal services call returned eError error ++ number. ++ @Return PVRSRV_OK ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLAllocSharedMemIfNull(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLFreeSharedMem ++ @Description Frees stream's shared memory. ++ @Input phStream Stream handle. ++*/ /**************************************************************************/ ++void ++TLFreeSharedMem(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamCreate ++ @Description Request the creation of a new stream and open a handle. ++ If creating a stream which should continue to exist after the ++ current context is finished, then TLStreamCreate must be ++ followed by a TLStreamOpen call. On any case, the number of ++ create/open calls must balance with the number of close calls ++ used. This ensures the resources of a stream are released when ++ it is no longer required. ++ @Output phStream Pointer to handle to store the new stream. ++ @Input szStreamName Name of stream, maximum length: ++ PRVSRVTL_MAX_STREAM_NAME_SIZE. ++ If a longer string is provided,creation fails. ++ @Input ui32Size Desired buffer size in bytes. ++ @Input ui32StreamFlags Used to configure buffer behaviour. See above. ++ @Input pfOnReaderOpenCB Optional callback called when a client ++ opens this stream, may be null. ++ @Input pvOnReaderOpenUD Optional user data for pfOnReaderOpenCB, ++ may be null. ++ @Input pfProducerCB Optional callback, may be null. ++ @Input pvProducerUD Optional user data for callback, may be null. ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name ++ exceeded MAX_STREAM_NAME_SIZE ++ @Return PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate space for ++ stream handle. ++ @Return PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with ++ the same stream name string. ++ @Return eError Internal services call returned ++ eError error number. ++ @Return PVRSRV_OK ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamCreate(IMG_HANDLE *phStream, ++ const IMG_CHAR *szStreamName, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32StreamFlags, ++ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB, ++ void *pvOnReaderOpenUD, ++ TL_STREAM_SOURCECB pfProducerCB, ++ void *pvProducerUD); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamOpen ++ @Description Attach to existing stream that has already been created by a ++ TLStreamCreate call. A handle is returned to the stream. ++ @Output phStream Pointer to handle to store the stream. ++ @Input szStreamName Name of stream, should match an already ++ existing stream name ++ @Return PVRSRV_ERROR_NOT_FOUND None of the streams matched the ++ requested stream name. ++ PVRSRV_ERROR_INVALID_PARAMS Non-NULL pointer to stream ++ handler is required. ++ @Return PVRSRV_OK Success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamOpen(IMG_HANDLE *phStream, ++ const IMG_CHAR *szStreamName); ++ ++ ++/*************************************************************************/ /*! ++ @Function TLStreamReset ++ @Description Resets read and write pointers and pending flag. ++ @Output phStream Pointer to stream's handle ++*/ /**************************************************************************/ ++void TLStreamReset(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamSetNotifStream ++ @Description Registers a "notification stream" which will be used to ++ publish information about state change of the "hStream" ++ stream. Notification can inform about events such as stream ++ open/close, etc. ++ @Input hStream Handle to stream to update. ++ @Input hNotifStream Handle to the stream which will be used for ++ publishing notifications. ++ @Return PVRSRV_ERROR_INVALID_PARAMS If either of the parameters is ++ NULL ++ @Return PVRSRV_OK Success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamReconfigure ++ @Description Request the stream flags controlling buffer behaviour to ++ be updated. ++ In the case where TL_OPMODE_BLOCK is to be used, ++ TLStreamCreate should be called without that flag and this ++ function used to change the stream mode once a consumer process ++ has been started. This avoids a deadlock scenario where the ++ TLStreaWrite/TLStreamReserve call will hold the Bridge Lock ++ while blocking if the TL buffer is full. ++ The TL_OPMODE_BLOCK should never drop the Bridge Lock ++ as this leads to another deadlock scenario where the caller to ++ TLStreamWrite/TLStreamReserve has already acquired another lock ++ (e.g. gHandleLock) which is not dropped. This then leads to that ++ thread acquiring locks out of order. ++ @Input hStream Handle to stream to update. ++ @Input ui32StreamFlags Flags that configure buffer behaviour. See above. ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or inconsistent ++ stream flags. ++ @Return PVRSRV_ERROR_NOT_READY Stream is currently being written to ++ try again later. ++ @Return eError Internal services call returned ++ eError error number. ++ @Return PVRSRV_OK ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamReconfigure(IMG_HANDLE hStream, ++ IMG_UINT32 ui32StreamFlags); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamClose ++ @Description Detach from the stream associated with the given handle. If ++ the current handle is the last one accessing the stream ++ (i.e. the number of TLStreamCreate+TLStreamOpen calls matches ++ the number of TLStreamClose calls) then the stream is also ++ deleted. ++ On return the handle is no longer valid. ++ @Input hStream Handle to stream that will be closed. ++ @Return None. ++*/ /**************************************************************************/ ++void ++TLStreamClose(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamReserve ++ @Description Reserve space in stream buffer. When successful every ++ TLStreamReserve call must be followed by a matching ++ TLStreamCommit call. While a TLStreamCommit call is pending ++ for a stream, subsequent TLStreamReserve calls for this ++ stream will fail. ++ @Input hStream Stream handle. ++ @Output ppui8Data Pointer to a pointer to a location in the ++ buffer. The caller can then use this address ++ in writing data into the stream. ++ @Input ui32Size Number of bytes to reserve in buffer. ++ @Return PVRSRV_INVALID_PARAMS NULL stream handler. ++ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved ++ that are pending to be committed. ++ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to ++ reserve more space than the ++ buffer size. ++ @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested ++ is larger than the free ++ space. ++ @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size ++ requested is larger ++ than max TL packet size ++ @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer ++ does not have enough space ++ for the reserve. ++ @Return PVRSRV_OK Success, output arguments valid. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamReserve(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamReserve2 ++ @Description Reserve space in stream buffer. When successful every ++ TLStreamReserve call must be followed by a matching ++ TLStreamCommit call. While a TLStreamCommit call is pending ++ for a stream, subsequent TLStreamReserve calls for this ++ stream will fail. ++ @Input hStream Stream handle. ++ @Output ppui8Data Pointer to a pointer to a location in the ++ buffer. The caller can then use this address ++ in writing data into the stream. ++ @Input ui32Size Ideal number of bytes to reserve in buffer. ++ @Input ui32SizeMin Minimum number of bytes to reserve in buffer. ++ @Input pui32Available Optional, but when present and the ++ RESERVE_TOO_BIG error is returned, a size ++ suggestion is returned in this argument which ++ the caller can attempt to reserve again for a ++ successful allocation. ++ @Output pbIsReaderConnected Let writing clients know if reader is ++ connected or not, in case of error. ++ @Return PVRSRV_INVALID_PARAMS NULL stream handler. ++ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved ++ that are pending to be committed. ++ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to ++ reserve more space than the ++ buffer size. ++ @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested ++ is larger than the free ++ space. ++ Check the pui32Available ++ value for the correct ++ reserve size to use. ++ @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size ++ requested is larger ++ than max TL packet size ++ @Return PVRSRV_ERROR_STREAM_NOT_ENOUGH_SPACE Permanent stream buffer ++ does not have enough space ++ for the reserve. ++ @Return PVRSRV_OK Success, output arguments valid. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamReserve2(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 ui32SizeMin, ++ IMG_UINT32* pui32Available, ++ IMG_BOOL* pbIsReaderConnected); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamReserveReturnFlags ++ @Description Reserve space in stream buffer. When successful every ++ TLStreamReserve call must be followed by a matching ++ TLStreamCommit call. While a TLStreamCommit call is pending ++ for a stream, subsequent TLStreamReserve calls for this ++ stream will fail. ++ @Input hStream Stream handle. ++ @Output ppui8Data Pointer to a pointer to a location in the ++ buffer. The caller can then use this address ++ in writing data into the stream. ++ @Input ui32Size Ideal number of bytes to reserve in buffer. ++ @Output pui32Flags Output parameter to return flags generated within ++ the reserve function. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamReserveReturnFlags(IMG_HANDLE hStream, ++ IMG_UINT8 **ppui8Data, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32* pui32Flags); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamGetUT ++ @Description Returns the current stream utilisation in bytes ++ @Input hStream Stream handle. ++ @Return IMG_UINT32 Stream utilisation ++*/ /**************************************************************************/ ++IMG_UINT32 TLStreamGetUT(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamCommit ++ @Description Notify TL that data have been written in the stream buffer. ++ Should always follow and match TLStreamReserve call. ++ @Input hStream Stream handle. ++ @Input ui32Size Number of bytes that have been added to the ++ stream. ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. ++ @Return PVRSRV_ERROR_STREAM_MISUSE Commit results in more data ++ committed than the buffer size, ++ the stream is misused. ++ @Return eError Commit was successful but ++ internal services call returned ++ eError error number. ++ @Return PVRSRV_OK ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamCommit(IMG_HANDLE hStream, ++ IMG_UINT32 ui32Size); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamWrite ++ @Description Combined Reserve/Commit call. This function Reserves space in ++ the specified stream buffer, copies ui32Size bytes of data ++ from the array pui8Src points to and Commits in an "atomic" ++ style operation. ++ @Input hStream Stream handle. ++ @Input pui8Src Source to read data from. ++ @Input ui32Size Number of bytes to copy and commit. ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. ++ @Return eError Error codes returned by either ++ Reserve or Commit. ++ @Return PVRSRV_OK ++ */ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamWrite(IMG_HANDLE hStream, ++ IMG_UINT8 *pui8Src, ++ IMG_UINT32 ui32Size); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamWriteRetFlags ++ @Description Combined Reserve/Commit call. This function Reserves space in ++ the specified stream buffer, copies ui32Size bytes of data ++ from the array pui8Src points to and Commits in an "atomic" ++ style operation. Also accepts a pointer to a bit flag value ++ for returning write status flags. ++ @Input hStream Stream handle. ++ @Input pui8Src Source to read data from. ++ @Input ui32Size Number of bytes to copy and commit. ++ @Output pui32Flags Output parameter for write status info ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. ++ @Return eError Error codes returned by either ++ Reserve or Commit. ++ @Return PVRSRV_OK ++ */ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamWriteRetFlags(IMG_HANDLE hStream, ++ IMG_UINT8 *pui8Src, ++ IMG_UINT32 ui32Size, ++ IMG_UINT32 *pui32Flags); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamSync ++ @Description Signal the consumer to start acquiring data from the stream ++ buffer. Called by producers that use the flag ++ TL_FLAG_NO_SIGNAL_ON_COMMIT to manually control when ++ consumers starting reading the stream. ++ Used when multiple small writes need to be batched. ++ @Input hStream Stream handle. ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle. ++ @Return eError Error codes returned by either ++ Reserve or Commit. ++ @Return PVRSRV_OK ++ */ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamSync(IMG_HANDLE hStream); ++ ++ ++/*************************************************************************/ /*! ++ @Function TLStreamMarkEOS ++ @Description Insert a EOS marker packet in the given stream. ++ @Input hStream Stream handle. ++ @Input bRemoveOld if TRUE, remove old stream record file before ++ splitting to new file. ++ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler. ++ @Return eError Error codes returned by either ++ Reserve or Commit. ++ @Return PVRSRV_OK Success. ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamMarkEOS(IMG_HANDLE hStream, IMG_BOOL bRemoveOld); ++ ++/*************************************************************************/ /*! ++@Function TLStreamMarkStreamOpen ++@Description Puts *open* stream packet into hStream's notification stream, ++ if set, error otherwise." ++@Input hStream Stream handle. ++@Return PVRSRV_OK on success and error code on failure ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamMarkStreamOpen(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++@Function TLStreamMarkStreamClose ++@Description Puts *close* stream packet into hStream's notification stream, ++ if set, error otherwise." ++@Input hStream Stream handle. ++@Return PVRSRV_OK on success and error code on failure ++*/ /**************************************************************************/ ++PVRSRV_ERROR ++TLStreamMarkStreamClose(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamInfo ++ @Description Run time information about buffer elemental sizes. ++ It sets psInfo members accordingly. Users can use those values ++ to calculate the parameters they use in TLStreamCreate and ++ TLStreamReserve. ++ @Output psInfo pointer to stream info structure. ++ @Return None. ++*/ /**************************************************************************/ ++void ++TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamIsOpenForReading ++ @Description Query if a stream has any readers connected. ++ @Input hStream Stream handle. ++ @Return IMG_BOOL True if at least one reader is connected, ++ false otherwise ++*/ /**************************************************************************/ ++IMG_BOOL ++TLStreamIsOpenForReading(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamOutOfData ++ @Description Query if the stream is empty (no data waiting to be read). ++ @Input hStream Stream handle. ++ @Return IMG_BOOL True if read==write, no data waiting, ++ false otherwise ++*/ /**************************************************************************/ ++IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream); ++ ++/*************************************************************************/ /*! ++ @Function TLStreamResetProducerByteCount ++ @Description Reset the producer byte counter on the specified stream. ++ @Input hStream Stream handle. ++ @Input IMG_UINT32 Value to reset counter to, often 0. ++ @Return PVRSRV_OK Success. ++ @Return PVRSRV_ERROR_STREAM_MISUSE Success but the read and write ++ positions did not match, ++ stream not empty. ++*/ /**************************************************************************/ ++ ++PVRSRV_ERROR ++TLStreamResetProducerByteCount(IMG_HANDLE hStream, IMG_UINT32 ui32Value); ++ ++#endif /* TLSTREAM_H */ ++/***************************************************************************** ++ End of file (tlstream.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/trace_events.c b/drivers/gpu/drm/img-rogue/trace_events.c +new file mode 100644 +index 000000000000..39242ed2b95c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/trace_events.c +@@ -0,0 +1,265 @@ ++/*************************************************************************/ /*! ++@Title Linux trace event helper functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++ ++#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ++#if !defined(CONFIG_TRACE_GPU_MEM) ++#define CREATE_TRACE_POINTS ++#include ++#undef CREATE_TRACE_POINTS ++#else /* !defined(CONFIG_TRACE_GPU_MEM) */ ++#include ++#endif /* !defined(CONFIG_TRACE_GPU_MEM) */ ++#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ ++ ++#include "img_types.h" ++#include "trace_events.h" ++#include "rogue_trace_events.h" ++#include "sync_checkpoint_external.h" ++ ++static bool fence_update_event_enabled, fence_check_event_enabled; ++ ++bool trace_rogue_are_fence_updates_traced(void) ++{ ++ return fence_update_event_enabled; ++} ++ ++bool trace_rogue_are_fence_checks_traced(void) ++{ ++ return fence_check_event_enabled; ++} ++ ++/* ++ * Call backs referenced from rogue_trace_events.h. Note that these are not ++ * thread-safe, however, since running trace code when tracing is not enabled is ++ * simply a no-op, there is no harm in it. ++ */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++int trace_fence_update_enabled_callback(void) ++#else ++void trace_fence_update_enabled_callback(void) ++#endif ++{ ++ fence_update_event_enabled = true; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ return 0; ++#endif ++} ++ ++void trace_fence_update_disabled_callback(void) ++{ ++ fence_update_event_enabled = false; ++} ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++int trace_fence_check_enabled_callback(void) ++#else ++void trace_fence_check_enabled_callback(void) ++#endif ++{ ++ fence_check_event_enabled = true; ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ return 0; ++#endif ++} ++ ++void trace_fence_check_disabled_callback(void) ++{ ++ fence_check_event_enabled = false; ++} ++ ++#if defined(SUPPORT_RGX) ++/* This is a helper that calls trace_rogue_fence_update for each fence in an ++ * array. ++ */ ++void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT uCount, ++ PRGXFWIF_UFO_ADDR *pauiAddresses, ++ IMG_UINT32 *paui32Values) ++{ ++ IMG_UINT i; ++ for (i = 0; i < uCount; i++) ++ { ++ trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset, ++ pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); ++ } ++} ++ ++void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT uCount, ++ PRGXFWIF_UFO_ADDR *pauiAddresses, ++ IMG_UINT32 *paui32Values) ++{ ++ IMG_UINT i; ++ for (i = 0; i < uCount; i++) ++ { ++ trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset, ++ pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED); ++ } ++} ++ ++void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++ IMG_UINT i; ++ for (i = 0; i < ui32UFOCount; i++) ++ { ++ trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx, ++ ui32IntJobRef, ++ ui32ExtJobRef, ++ puData->sUpdate.ui32FWAddr, ++ puData->sUpdate.ui32OldValue, ++ puData->sUpdate.ui32NewValue); ++ puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sUpdate)); ++ } ++} ++ ++void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_BOOL bPrEvent, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++ IMG_UINT i; ++ for (i = 0; i < ui32UFOCount; i++) ++ { ++ if (bPrEvent) ++ { ++ trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx, ++ ui32IntJobRef, ui32ExtJobRef, ++ puData->sCheckSuccess.ui32FWAddr, ++ puData->sCheckSuccess.ui32Value); ++ } ++ else ++ { ++ trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx, ++ ui32IntJobRef, ui32ExtJobRef, ++ puData->sCheckSuccess.ui32FWAddr, ++ puData->sCheckSuccess.ui32Value); ++ } ++ puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckSuccess)); ++ } ++} ++ ++void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_BOOL bPrEvent, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++ IMG_UINT i; ++ for (i = 0; i < ui32UFOCount; i++) ++ { ++ if (bPrEvent) ++ { ++ trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx, ++ ui32IntJobRef, ui32ExtJobRef, ++ puData->sCheckFail.ui32FWAddr, ++ puData->sCheckFail.ui32Value, ++ puData->sCheckFail.ui32Required); ++ } ++ else ++ { ++ trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx, ++ ui32IntJobRef, ui32ExtJobRef, ++ puData->sCheckFail.ui32FWAddr, ++ puData->sCheckFail.ui32Value, ++ puData->sCheckFail.ui32Required); ++ } ++ puData = IMG_OFFSET_ADDR(puData, sizeof(puData->sCheckFail)); ++ } ++} ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) ++ ++int PVRGpuTraceEnableUfoCallbackWrapper(void) ++{ ++ ++#if defined(SUPPORT_RGX) ++ PVRGpuTraceEnableUfoCallback(); ++#endif ++ ++ return 0; ++} ++ ++int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void) ++{ ++ ++#if defined(SUPPORT_RGX) ++ PVRGpuTraceEnableFirmwareActivityCallback(); ++#endif ++ ++ return 0; ++} ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */ ++ ++void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, ++ IMG_UINT64 ui64Size) ++{ ++#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ++ trace_gpu_mem_total(ui8GPUId, 0, ui64Size); ++#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ ++} ++ ++void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, ++ IMG_UINT32 ui32Pid, ++ IMG_UINT64 ui64Size) ++{ ++#if defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) ++ trace_gpu_mem_total(ui8GPUId, ui32Pid, ui64Size); ++#endif /* defined(CONFIG_TRACE_GPU_MEM) || defined(PVRSRV_ENABLE_GPU_MEM_TRACEPOINT) */ ++} +diff --git a/drivers/gpu/drm/img-rogue/trace_events.h b/drivers/gpu/drm/img-rogue/trace_events.h +new file mode 100644 +index 000000000000..0a8fffd5bc37 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/trace_events.h +@@ -0,0 +1,198 @@ ++/*************************************************************************/ /*! ++@Title Linux trace events and event helper functions ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#if !defined(TRACE_EVENTS_H) ++#define TRACE_EVENTS_H ++ ++#include "rgx_fwif_km.h" ++#include "rgx_hwperf.h" ++ ++/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't ++ * enabled, just like the actual trace event functions that the kernel ++ * defines for us. ++ */ ++#ifdef CONFIG_EVENT_TRACING ++bool trace_rogue_are_fence_checks_traced(void); ++ ++bool trace_rogue_are_fence_updates_traced(void); ++ ++void trace_job_enqueue(IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ const char *pszKickType); ++ ++#if defined(SUPPORT_RGX) ++void trace_rogue_fence_updates(const char *cmd, const char *dm, ++ IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT uCount, ++ PRGXFWIF_UFO_ADDR *pauiAddresses, ++ IMG_UINT32 *paui32Values); ++ ++void trace_rogue_fence_checks(const char *cmd, const char *dm, ++ IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT uCount, ++ PRGXFWIF_UFO_ADDR *pauiAddresses, ++ IMG_UINT32 *paui32Values); ++ ++void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData); ++ ++void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_BOOL bPrEvent, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData); ++ ++void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_BOOL bPrEvent, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData); ++#endif /* if defined(SUPPORT_RGX) */ ++ ++void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, ++ IMG_UINT64 ui64Size); ++ ++void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, ++ IMG_UINT32 ui32Pid, ++ IMG_UINT64 ui64Size); ++ ++#else /* CONFIG_TRACE_EVENTS */ ++static inline ++bool trace_rogue_are_fence_checks_traced(void) ++{ ++ return false; ++} ++ ++static inline ++bool trace_rogue_are_fence_updates_traced(void) ++{ ++ return false; ++} ++ ++static inline ++void trace_job_enqueue(IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ const char *pszKickType) ++{ ++} ++ ++#if defined(SUPPORT_RGX) ++static inline ++void trace_rogue_fence_updates(const char *cmd, const char *dm, ++ IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT uCount, ++ PRGXFWIF_UFO_ADDR *pauiAddresses, ++ IMG_UINT32 *paui32Values) ++{ ++} ++ ++static inline ++void trace_rogue_fence_checks(const char *cmd, const char *dm, ++ IMG_UINT32 ui32FWContext, ++ IMG_UINT32 ui32Offset, ++ IMG_UINT uCount, ++ PRGXFWIF_UFO_ADDR *pauiAddresses, ++ IMG_UINT32 *paui32Values) ++{ ++} ++ ++static inline ++void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++} ++ ++static inline ++void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_BOOL bPrEvent, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++} ++ ++static inline ++void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp, ++ IMG_UINT32 ui32FWCtx, ++ IMG_UINT32 ui32ExtJobRef, ++ IMG_UINT32 ui32IntJobRef, ++ IMG_BOOL bPrEvent, ++ IMG_UINT32 ui32UFOCount, ++ const RGX_HWPERF_UFO_DATA_ELEMENT *puData) ++{ ++} ++#endif /* if defined(SUPPORT_RGX)*/ ++ ++static inline ++void TracepointUpdateGPUMemGlobal(IMG_UINT8 ui8GPUId, ++ IMG_UINT64 ui64Size) ++{ ++} ++ ++static inline ++void TracepointUpdateGPUMemPerProcess(IMG_UINT8 ui8GPUId, ++ IMG_UINT32 ui32Pid, ++ IMG_UINT64 ui64Size) ++{ ++} ++ ++#endif /* CONFIG_TRACE_EVENTS */ ++ ++#endif /* TRACE_EVENTS_H */ +diff --git a/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c +new file mode 100644 +index 000000000000..8adf200cdcef +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.c +@@ -0,0 +1,280 @@ ++/*************************************************************************/ /*! ++@File ++@Title Provides splay-trees. ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Implementation of splay-trees. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ /**************************************************************************/ ++ ++#include "allocmem.h" /* for OSMemAlloc / OSMemFree */ ++#include "osfunc.h" /* for OSMemFree */ ++#include "pvr_debug.h" ++#include "uniq_key_splay_tree.h" ++ ++/** ++ * This function performs a simple top down splay ++ * ++ * @param uiFlags the flags that must splayed to the root (if possible). ++ * @param psTree The tree to splay. ++ * @return the resulting tree after the splay operation. ++ */ ++IMG_INTERNAL ++IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) ++{ ++ IMG_SPLAY_TREE sTmp1; ++ IMG_PSPLAY_TREE psLeft; ++ IMG_PSPLAY_TREE psRight; ++ IMG_PSPLAY_TREE psTmp2; ++ ++ if (psTree == NULL) ++ { ++ return NULL; ++ } ++ ++ sTmp1.psLeft = NULL; ++ sTmp1.psRight = NULL; ++ ++ psLeft = &sTmp1; ++ psRight = &sTmp1; ++ ++ for (;;) ++ { ++ if (uiFlags < psTree->uiFlags) ++ { ++ if (psTree->psLeft == NULL) ++ { ++ break; ++ } ++ ++ if (uiFlags < psTree->psLeft->uiFlags) ++ { ++ /* if we get to this point, we need to rotate right the tree */ ++ psTmp2 = psTree->psLeft; ++ psTree->psLeft = psTmp2->psRight; ++ psTmp2->psRight = psTree; ++ psTree = psTmp2; ++ if (psTree->psLeft == NULL) ++ { ++ break; ++ } ++ } ++ ++ /* if we get to this point, we need to link right */ ++ psRight->psLeft = psTree; ++ psRight = psTree; ++ psTree = psTree->psLeft; ++ } ++ else ++ { ++ if (uiFlags > psTree->uiFlags) ++ { ++ if (psTree->psRight == NULL) ++ { ++ break; ++ } ++ ++ if (uiFlags > psTree->psRight->uiFlags) ++ { ++ /* if we get to this point, we need to rotate left the tree */ ++ psTmp2 = psTree->psRight; ++ psTree->psRight = psTmp2->psLeft; ++ psTmp2->psLeft = psTree; ++ psTree = psTmp2; ++ if (psTree->psRight == NULL) ++ { ++ break; ++ } ++ } ++ ++ /* if we get to this point, we need to link left */ ++ psLeft->psRight = psTree; ++ psLeft = psTree; ++ psTree = psTree->psRight; ++ } ++ else ++ { ++ break; ++ } ++ } ++ } ++ ++ /* at this point re-assemble the tree */ ++ psLeft->psRight = psTree->psLeft; ++ psRight->psLeft = psTree->psRight; ++ psTree->psLeft = sTmp1.psRight; ++ psTree->psRight = sTmp1.psLeft; ++ return psTree; ++} ++ ++ ++/** ++ * This function inserts a node into the Tree (unless it is already present, in ++ * which case it is equivalent to performing only a splay operation ++ * ++ * @param uiFlags the key of the new node ++ * @param psTree The tree into which one wants to add a new node ++ * @return The resulting with the node in it ++ */ ++IMG_INTERNAL ++IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) ++{ ++ IMG_PSPLAY_TREE psNew; ++ ++ if (psTree != NULL) ++ { ++ psTree = PVRSRVSplay(uiFlags, psTree); ++ if (psTree->uiFlags == uiFlags) ++ { ++ return psTree; ++ } ++ } ++ ++ psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE)); ++ if (psNew == NULL) ++ { ++ PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree.")); ++ return NULL; ++ } ++ ++ psNew->uiFlags = uiFlags; ++ OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets)); ++ ++#if defined(PVR_CTZLL) ++ psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1); ++#endif ++ ++ if (psTree == NULL) ++ { ++ psNew->psLeft = NULL; ++ psNew->psRight = NULL; ++ return psNew; ++ } ++ ++ if (uiFlags < psTree->uiFlags) ++ { ++ psNew->psLeft = psTree->psLeft; ++ psNew->psRight = psTree; ++ psTree->psLeft = NULL; ++ } ++ else ++ { ++ psNew->psRight = psTree->psRight; ++ psNew->psLeft = psTree; ++ psTree->psRight = NULL; ++ } ++ ++ return psNew; ++} ++ ++ ++/** ++ * Deletes a node from the tree (unless it is not there, in which case it is ++ * equivalent to a splay operation) ++ * ++ * @param uiFlags the value of the node to remove ++ * @param psTree the tree into which the node must be removed ++ * @return the resulting tree ++ */ ++IMG_INTERNAL ++IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) ++{ ++ IMG_PSPLAY_TREE psTmp; ++ if (psTree == NULL) ++ { ++ return NULL; ++ } ++ ++ psTree = PVRSRVSplay(uiFlags, psTree); ++ if (uiFlags == psTree->uiFlags) ++ { ++ /* The value was present in the tree */ ++ if (psTree->psLeft == NULL) ++ { ++ psTmp = psTree->psRight; ++ } ++ else ++ { ++ psTmp = PVRSRVSplay(uiFlags, psTree->psLeft); ++ psTmp->psRight = psTree->psRight; ++ } ++ OSFreeMem(psTree); ++ return psTmp; ++ } ++ ++ /* The value was not present in the tree, so just return it as is ++ * (after the splay) */ ++ return psTree; ++} ++ ++/** ++ * This function picks up the appropriate node for the given flags ++ * ++ * @param uiFlags the flags that must associated with the node. ++ * @param psTree current splay tree node. ++ * @return the resulting tree node after the search operation. ++ */ ++IMG_INTERNAL ++IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree) ++{ ++ if (psTree == NULL) ++ { ++ return NULL; ++ } ++ ++ while (psTree) ++ { ++ if (uiFlags == psTree->uiFlags) ++ { ++ return psTree; ++ } ++ ++ if (uiFlags < psTree->uiFlags) ++ { ++ psTree = psTree->psLeft; ++ continue; ++ } ++ ++ if (uiFlags > psTree->uiFlags) ++ { ++ psTree = psTree->psRight; ++ continue; ++ } ++ } ++ ++ return NULL; ++} +diff --git a/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h +new file mode 100644 +index 000000000000..75ec9297e9c2 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/uniq_key_splay_tree.h +@@ -0,0 +1,90 @@ ++/*************************************************************************/ /*! ++@File ++@Title Splay trees interface ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Provides debug functionality ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef UNIQ_KEY_SPLAY_TREE_H_ ++#define UNIQ_KEY_SPLAY_TREE_H_ ++ ++#include "img_types.h" ++#include "pvr_intrinsics.h" ++ ++#if defined(PVR_CTZLL) ++ /* map the is_bucket_n_free to an int. ++ * This way, the driver can find the first non empty without loop ++ */ ++ typedef IMG_UINT64 IMG_ELTS_MAPPINGS; ++#endif ++ ++typedef IMG_UINT64 IMG_PSPLAY_FLAGS_T; ++ ++/* head of list of free boundary tags for indexed by pvr_log2 of the ++ boundary tag size */ ++ ++#define FREE_TABLE_LIMIT 40 ++ ++struct _BT_; ++ ++typedef struct img_splay_tree ++{ ++ /* left child/subtree */ ++ struct img_splay_tree * psLeft; ++ ++ /* right child/subtree */ ++ struct img_splay_tree * psRight; ++ ++ /* Flags to match on this span, used as the key. */ ++ IMG_PSPLAY_FLAGS_T uiFlags; ++#if defined(PVR_CTZLL) ++ /* each bit of this int is a boolean telling if the corresponding ++ bucket is empty or not */ ++ IMG_ELTS_MAPPINGS bHasEltsMapping; ++#endif ++ struct _BT_ * buckets[FREE_TABLE_LIMIT]; ++} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE; ++ ++IMG_PSPLAY_TREE PVRSRVSplay (IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); ++IMG_PSPLAY_TREE PVRSRVInsert(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); ++IMG_PSPLAY_TREE PVRSRVDelete(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); ++IMG_PSPLAY_TREE PVRSRVFindNode(IMG_PSPLAY_FLAGS_T uiFlags, IMG_PSPLAY_TREE psTree); ++ ++ ++#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */ +diff --git a/drivers/gpu/drm/img-rogue/vmm_impl.h b/drivers/gpu/drm/img-rogue/vmm_impl.h +new file mode 100644 +index 000000000000..9ad5adedaef0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_impl.h +@@ -0,0 +1,186 @@ ++/*************************************************************************/ /*! ++@File vmm_impl.h ++@Title Common VM manager API ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides common VM manager definitions that need to ++ be shared by system virtualization layer itself and modules that ++ implement the actual VM manager types. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef VMM_IMPL_H ++#define VMM_IMPL_H ++ ++#include "img_types.h" ++#include "pvrsrv_error.h" ++ ++typedef enum _VMM_CONF_PARAM_ ++{ ++ VMM_CONF_PRIO_OSID0 = 0, ++ VMM_CONF_PRIO_OSID1 = 1, ++ VMM_CONF_PRIO_OSID2 = 2, ++ VMM_CONF_PRIO_OSID3 = 3, ++ VMM_CONF_PRIO_OSID4 = 4, ++ VMM_CONF_PRIO_OSID5 = 5, ++ VMM_CONF_PRIO_OSID6 = 6, ++ VMM_CONF_PRIO_OSID7 = 7, ++ VMM_CONF_HCS_DEADLINE = 8 ++} VMM_CONF_PARAM; ++ ++/* ++ Virtual machine manager (hypervisor) para-virtualization (PVZ) connection: ++ - Type is implemented by host and guest drivers ++ - Assumes synchronous function call semantics ++ - Unidirectional semantics ++ - For Host (vmm -> host) ++ - For Guest (guest -> vmm) ++ - Parameters can be IN/OUT/INOUT ++ ++ - Host pvz entries are pre-implemented by IMG ++ - For host implementation, see vmm_pvz_server.c ++ - Called by host side hypercall handler or VMM ++ ++ - Guest pvz entries are supplied by 3rd-party ++ - These are specific to hypervisor (VMM) type ++ - These implement the actual hypercalls mechanism ++ ++ Para-virtualization (PVZ) call runtime sequence: ++ 1 - Guest driver in guest VM calls PVZ function ++ 1.1 - Guest PVZ connection calls ++ 1.2 - Guest VM Manager type which ++ 1.2.1 - Performs any pre-processing like parameter packing, etc. ++ 1.2.2 - Issues hypercall (blocking synchronous call) ++ ++ 2 - VM Manager (hypervisor) receives hypercall ++ 2.1 - Hypercall handler: ++ 2.1.1 - Performs any pre-processing ++ 2.1.2 - If call terminates in VM Manager: perform action and return from hypercall ++ 2.1.3 - Otherwise forward to host driver (implementation specific call) ++ ++ 3 - Host driver receives call from VM Manager ++ 3.1 - Host VM manager type: ++ 3.1.1 - Performs any pre-processing like parameter unpacking, etc. ++ 3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry ++ 3.2 - Host PVZ connection calls corresponding host system virtualisation layer ++ 3.3 - Host driver system virtualisation layer: ++ 3.3.1 - Perform action requested by guest driver ++ 3.3.2 - Return to host VM Manager type ++ 3.4 - Host VM Manager type: ++ 3.4.1 - Prepare to return from hypercall ++ 3.4.2 - Perform any post-processing like result packing, etc. ++ 3.4.3 - Issue return from hypercall ++ ++ 4 - VM Manager (hypervisor) ++ 4.1 - Perform any post-processing ++ 4.2 - Return control to guest driver ++ ++ 5 - Guest driver in guest VM ++ 5.1 - Perform any post-processing like parameter unpacking, etc. ++ 5.2 - Continue execution in guest VM ++ */ ++typedef struct _VMM_PVZ_CONNECTION_ ++{ ++ struct { ++ /* ++ This pair must be implemented if the guest is responsible ++ for allocating the physical heap that backs its firmware ++ allocations, this is the default configuration. The physical ++ heap is allocated within the guest VM IPA space and this ++ IPA Addr/Size must be translated into the host's IPA space ++ by the VM manager before forwarding request to host. ++ If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED. ++ */ ++ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64PAddr); ++ ++ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID); ++ } sClientFuncTab; ++ ++ struct { ++ /* ++ Corresponding server side entries to handle guest PVZ calls ++ NOTE: ++ - Additional PVZ function ui32OSID parameter ++ - OSID determination is responsibility of VM manager ++ - Actual OSID value must be supplied by VM manager ++ - This can be done either in client/VMM/host side ++ - Must be done before host pvz function(s) are called ++ - Host pvz function validates incoming OSID values ++ */ ++ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID, ++ IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64PAddr); ++ ++ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID, ++ IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID); ++ } sServerFuncTab; ++ ++ struct { ++ /* ++ This is used by the VM manager to report pertinent runtime guest VM ++ information to the host; these events may in turn be forwarded to ++ the firmware ++ */ ++ PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID); ++ ++ PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID); ++ ++ PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue); ++ ++ } sVmmFuncTab; ++} VMM_PVZ_CONNECTION; ++ ++/*! ++******************************************************************************* ++ @Function VMMCreatePvzConnection() and VMMDestroyPvzConnection() ++ @Description Both the guest and VM manager call this in order to obtain a ++ PVZ connection to the VM and host respectively; that is, guest ++ calls it to obtain connection to VM, VM calls it to obtain a ++ connection to the host. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection); ++void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection); ++ ++#endif /* VMM_IMPL_H */ +diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_client.c b/drivers/gpu/drm/img-rogue/vmm_pvz_client.c +new file mode 100644 +index 000000000000..427811a7b3a5 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_pvz_client.c +@@ -0,0 +1,138 @@ ++/*************************************************************************/ /*! ++@File vmm_pvz_client.c ++@Title VM manager client para-virtualization ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header provides VMM client para-virtualization APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvrsrv.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++ ++#include "vmm_impl.h" ++#include "vz_vmm_pvz.h" ++#include "vmm_pvz_client.h" ++ ++ ++static inline void ++PvzClientLockAcquire(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ OSLockAcquire(psPVRSRVData->hPvzConnectionLock); ++} ++ ++static inline void ++PvzClientLockRelease(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ OSLockRelease(psPVRSRVData->hPvzConnectionLock); ++} ++ ++/* ++ * =========================================================== ++ * The following client para-virtualization (pvz) functions ++ * are exclusively called by guests to initiate a pvz call ++ * to the host via hypervisor (guest -> vm manager -> host) ++ * =========================================================== ++ */ ++ ++PVRSRV_ERROR ++PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ PVRSRV_ERROR eError; ++ IMG_DEV_PHYADDR sDevPAddr; ++ VMM_PVZ_CONNECTION *psVmmPvz; ++ IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP; ++ PHYS_HEAP *psFwPhysHeap = psDevConfig->psDevNode->apsPhysHeap[PVRSRV_PHYS_HEAP_FW_MAIN]; ++ ++ eError = PhysHeapGetDevPAddr(psFwPhysHeap, &sDevPAddr); ++ ++#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES) ++{ ++ /* Host expects PA rather than IPA address, so on the platforms where ++ * IPA-PA translation is not done in hw, performs a software translation */ ++ ++ IMG_DEV_PHYADDR sDevPAddrTranslated; ++ ++ PhysHeapCpuPAddrToDevPAddr(psFwPhysHeap, 1, &sDevPAddrTranslated, (IMG_CPU_PHYADDR *)&sDevPAddr); ++ sDevPAddr.uiAddr = sDevPAddrTranslated.uiAddr; ++} ++#endif ++ ++ PVR_LOG_RETURN_IF_ERROR(eError, "PhysHeapGetDevPAddr"); ++ PVR_LOG_RETURN_IF_FALSE((sDevPAddr.uiAddr != 0), "PhysHeapGetDevPAddr", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ psVmmPvz = PvzConnectionAcquire(); ++ PvzClientLockAcquire(); ++ ++ eError = psVmmPvz->sClientFuncTab.pfnMapDevPhysHeap(uiFuncID, ++ 0, ++ RGX_FIRMWARE_RAW_HEAP_SIZE, ++ sDevPAddr.uiAddr); ++ ++ PvzClientLockRelease(); ++ PvzConnectionRelease(psVmmPvz); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ PVRSRV_ERROR eError; ++ IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP; ++ VMM_PVZ_CONNECTION *psVmmPvz = PvzConnectionAcquire(); ++ PVR_ASSERT(psVmmPvz); ++ ++ PvzClientLockAcquire(); ++ ++ PVR_ASSERT(psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap); ++ ++ eError = psVmmPvz->sClientFuncTab.pfnUnmapDevPhysHeap(uiFuncID, 0); ++ ++ PvzClientLockRelease(); ++ PvzConnectionRelease(psVmmPvz); ++ ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (vmm_pvz_client.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_client.h b/drivers/gpu/drm/img-rogue/vmm_pvz_client.h +new file mode 100644 +index 000000000000..688e9f36c98c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_pvz_client.h +@@ -0,0 +1,77 @@ ++/*************************************************************************/ /*! ++@File vmm_pvz_client.h ++@Title Guest VM manager client para-virtualization routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header provides guest VMM client para-virtualization APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef VMM_PVZ_CLIENT_H ++#define VMM_PVZ_CLIENT_H ++ ++#include "pvrsrv.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "vmm_pvz_common.h" ++#include "vmm_impl.h" ++ ++/*! ++******************************************************************************* ++ @Function PvzClientMapDevPhysHeap ++ @Description The guest front-end to initiate a pfnMapDevPhysHeap PVZ call ++ to the host. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR ++PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++/*! ++******************************************************************************* ++ @Function PvzClientUnmapDevPhysHeap ++ @Description The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ call ++ to the host. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR ++PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig); ++ ++#endif /* VMM_PVZ_CLIENT_H */ ++ ++/****************************************************************************** ++ End of file (vmm_pvz_client.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_common.h b/drivers/gpu/drm/img-rogue/vmm_pvz_common.h +new file mode 100644 +index 000000000000..82ab50d6fa30 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_pvz_common.h +@@ -0,0 +1,65 @@ ++/*************************************************************************/ /*! ++@File vmm_pvz_common.h ++@Title Common VM manager function IDs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header provides VM manager para-virtualization function IDs and ++ definitions of their payload structures, if appropriate. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef VMM_PVZ_COMMON_H ++#define VMM_PVZ_COMMON_H ++ ++#include "img_types.h" ++ ++#define PVZ_BRIDGE_DEFAULT 0UL ++#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP (PVZ_BRIDGE_DEFAULT + 1) ++#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP (PVZ_BRIDGE_MAPDEVICEPHYSHEAP + 1) ++#define PVZ_BRIDGE_LAST (PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1) ++ ++typedef struct _PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP ++{ ++ IMG_UINT64 ui64MemBase; ++ IMG_UINT32 ui32OSID; ++}PVZ_BRIDGEPARA_MAPDEVICEPHYSHEAP; ++ ++#endif /* VMM_PVZ_COMMON_H */ ++ ++/***************************************************************************** ++ End of file (vmm_pvz_common.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_server.c b/drivers/gpu/drm/img-rogue/vmm_pvz_server.c +new file mode 100644 +index 000000000000..f2c77e86cc3a +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_pvz_server.c +@@ -0,0 +1,245 @@ ++/*************************************************************************/ /*! ++@File vmm_pvz_server.c ++@Title VM manager server para-virtualization handlers ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header provides VMM server para-virtz handler APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvrsrv.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "rgxfwutils.h" ++ ++#include "vz_vm.h" ++#include "vmm_impl.h" ++#include "vz_vmm_pvz.h" ++#include "vmm_pvz_server.h" ++ ++static inline void ++PvzServerLockAcquire(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ OSLockAcquire(psPVRSRVData->hPvzConnectionLock); ++} ++ ++static inline void ++PvzServerLockRelease(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ OSLockRelease(psPVRSRVData->hPvzConnectionLock); ++} ++ ++ ++/* ++ * =========================================================== ++ * The following server para-virtualization (pvz) functions ++ * are exclusively called by the VM manager (hypervisor) on ++ * behalf of guests to complete guest pvz calls ++ * (guest -> vm manager -> host) ++ * =========================================================== ++ */ ++ ++PVRSRV_ERROR ++PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, ++ IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64PAddr) ++{ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ /* ++ * Reject hypercall if called on a system configured at build time to ++ * preallocate the Guest's firmware heaps from static carveout memory. ++ */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Host PVZ config: Does not match with Guest PVZ config\n" ++ " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); ++ return PVRSRV_ERROR_INVALID_PVZ_CONFIG; ++#else ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ if (ui32FuncID != PVZ_BRIDGE_MAPDEVICEPHYSHEAP) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d", ++ __func__, ++ ui32OSID, ++ (IMG_UINT32)PVZ_BRIDGE_MAPDEVICEPHYSHEAP, ++ ui32FuncID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PvzServerLockAcquire(); ++ ++#if defined(SUPPORT_RGX) ++ if (IsVmOnline(ui32OSID)) ++ { ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr}; ++ IMG_UINT32 sync; ++ ++ eError = RGXFwRawHeapAllocMap(psDeviceNode, ui32OSID, sDevPAddr, ui64Size); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXFwRawHeapAllocMap", e0); ++ ++ /* Invalidate MMU cache in preparation for a kick from this Guest */ ++ eError = psDeviceNode->pfnMMUCacheInvalidateKick(psDeviceNode, &sync); ++ PVR_LOG_GOTO_IF_ERROR(eError, "MMUCacheInvalidateKick", e0); ++ ++ /* Everything is ready for the firmware to start interacting with this OS */ ++ eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_ONLINE); ++ } ++e0: ++#endif /* defined(SUPPORT_RGX) */ ++ PvzServerLockRelease(); ++ ++ return eError; ++#endif ++} ++ ++PVRSRV_ERROR ++PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, ++ IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID) ++{ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ /* ++ * Reject hypercall if called on a system configured at built time to ++ * preallocate the Guest's firmware heaps from static carveout memory. ++ */ ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Host PVZ config: Does not match with Guest PVZ config\n" ++ " Host preallocates the Guest's FW physheap from static memory carveouts at startup.\n", __func__)); ++ return PVRSRV_ERROR_INVALID_PVZ_CONFIG; ++#else ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVR_LOG_RETURN_IF_FALSE((ui32DevID == 0), "Invalid Device ID", PVRSRV_ERROR_INVALID_PARAMS); ++ ++ if (ui32FuncID != PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: Host PVZ call: OSID: %d: Invalid function ID: expected %d, got %d", ++ __func__, ++ ui32OSID, ++ (IMG_UINT32)PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP, ++ ui32FuncID)); ++ return PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ ++ PvzServerLockAcquire(); ++ ++#if defined(SUPPORT_RGX) ++ if (IsVmOnline(ui32OSID)) ++ { ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ ++ /* Order firmware to offload this OS' data and stop accepting commands from it */ ++ eError = RGXFWSetFwOsState(psDeviceNode->pvDevice, ui32OSID, RGXFWIF_OS_OFFLINE); ++ PVR_LOG_GOTO_IF_ERROR(eError, "RGXFWSetFwOsState", e0); ++ ++ /* it is now safe to remove the Guest's memory mappings */ ++ RGXFwRawHeapUnmapFree(psDeviceNode, ui32OSID); ++ } ++e0: ++#endif ++ ++ PvzServerLockRelease(); ++ ++ return eError; ++#endif ++} ++ ++/* ++ * ============================================================ ++ * The following server para-virtualization (pvz) functions ++ * are exclusively called by the VM manager (hypervisor) to ++ * pass side band information to the host (vm manager -> host) ++ * ============================================================ ++ */ ++ ++PVRSRV_ERROR ++PvzServerOnVmOnline(IMG_UINT32 ui32OSID) ++{ ++ PVRSRV_ERROR eError; ++ ++ PvzServerLockAcquire(); ++ ++ eError = PvzOnVmOnline(ui32OSID); ++ ++ PvzServerLockRelease(); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PvzServerOnVmOffline(IMG_UINT32 ui32OSID) ++{ ++ PVRSRV_ERROR eError; ++ ++ PvzServerLockAcquire(); ++ ++ eError = PvzOnVmOffline(ui32OSID); ++ ++ PvzServerLockRelease(); ++ ++ return eError; ++} ++ ++PVRSRV_ERROR ++PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue) ++{ ++ PVRSRV_ERROR eError; ++ ++ PvzServerLockAcquire(); ++ ++ eError = PvzVMMConfigure(eVMMParamType, ui32ParamValue); ++ ++ PvzServerLockRelease(); ++ ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (vmm_pvz_server.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vmm_pvz_server.h b/drivers/gpu/drm/img-rogue/vmm_pvz_server.h +new file mode 100644 +index 000000000000..58223a0032e9 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_pvz_server.h +@@ -0,0 +1,121 @@ ++/*************************************************************************/ /*! ++@File vmm_pvz_server.h ++@Title VM manager para-virtualization interface helper routines ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Header provides API(s) available to VM manager, this must be ++ called to close the loop during guest para-virtualization calls. ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef VMM_PVZ_SERVER_H ++#define VMM_PVZ_SERVER_H ++ ++#include "vmm_impl.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "vmm_pvz_common.h" ++ ++/*! ++******************************************************************************* ++ @Function PvzServerMapDevPhysHeap ++ @Description The VM manager calls this in response to guest PVZ interface ++ call pfnMapDevPhysHeap. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR ++PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID, ++ IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64PAddr); ++ ++/*! ++******************************************************************************* ++ @Function PvzServerUnmapDevPhysHeap ++ @Description The VM manager calls this in response to guest PVZ interface ++ call pfnUnmapDevPhysHeap. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR ++PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID, ++ IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID); ++ ++/*! ++******************************************************************************* ++ @Function PvzServerOnVmOnline ++ @Description The VM manager calls this when guest VM machine comes online. ++ The host driver will initialize the FW if it has not done so ++ already. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR ++PvzServerOnVmOnline(IMG_UINT32 ui32OSID); ++ ++/*! ++******************************************************************************* ++ @Function PvzServerOnVmOffline ++ @Description The VM manager calls this when a guest VM machine is about to ++ go offline. The VM manager might have unmapped the GPU kick ++ register for such VM but not the GPU memory until the call ++ returns. Once the function returns, the FW does not hold any ++ reference for such VM and no workloads from it are running in ++ the GPU and it is safe to remove the memory for such VM. ++ @Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if for some reason ++ the FW is taking too long to clean-up the resources of the ++ OSID. Otherwise, a PVRSRV_ERROR code. ++******************************************************************************/ ++PVRSRV_ERROR ++PvzServerOnVmOffline(IMG_UINT32 ui32OSID); ++ ++/*! ++******************************************************************************* ++ @Function PvzServerVMMConfigure ++ @Description The VM manager calls this to configure several parameters like ++ HCS or isolation. ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR ++PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, ++ IMG_UINT32 ui32ParamValue); ++ ++#endif /* VMM_PVZ_SERVER_H */ ++ ++/****************************************************************************** ++ End of file (vmm_pvz_server.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vmm_type_stub.c b/drivers/gpu/drm/img-rogue/vmm_type_stub.c +new file mode 100644 +index 000000000000..747bf4a8e1f1 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vmm_type_stub.c +@@ -0,0 +1,119 @@ ++/*************************************************************************/ /*! ++@File vmm_type_stub.c ++@Title Stub VM manager type ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description Sample stub (no-operation) VM manager implementation ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "pvrsrv.h" ++#include "img_types.h" ++#include "img_defs.h" ++#include "pvrsrv_error.h" ++#include "rgxheapconfig.h" ++ ++#include "vmm_impl.h" ++#include "vmm_pvz_server.h" ++ ++static PVRSRV_ERROR ++StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID, ++ IMG_UINT64 ui64Size, ++ IMG_UINT64 ui64Addr) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32FuncID); ++ PVR_UNREFERENCED_PARAMETER(ui32DevID); ++ PVR_UNREFERENCED_PARAMETER(ui64Size); ++ PVR_UNREFERENCED_PARAMETER(ui64Addr); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++} ++ ++static PVRSRV_ERROR ++StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID, ++ IMG_UINT32 ui32DevID) ++{ ++ PVR_UNREFERENCED_PARAMETER(ui32FuncID); ++ PVR_UNREFERENCED_PARAMETER(ui32DevID); ++ return PVRSRV_ERROR_NOT_IMPLEMENTED; ++} ++ ++static VMM_PVZ_CONNECTION gsStubVmmPvz = ++{ ++ .sClientFuncTab = { ++ /* pfnMapDevPhysHeap */ ++ &StubVMMMapDevPhysHeap, ++ ++ /* pfnUnmapDevPhysHeap */ ++ &StubVMMUnmapDevPhysHeap ++ }, ++ ++ .sServerFuncTab = { ++ /* pfnMapDevPhysHeap */ ++ &PvzServerMapDevPhysHeap, ++ ++ /* pfnUnmapDevPhysHeap */ ++ &PvzServerUnmapDevPhysHeap ++ }, ++ ++ .sVmmFuncTab = { ++ /* pfnOnVmOnline */ ++ &PvzServerOnVmOnline, ++ ++ /* pfnOnVmOffline */ ++ &PvzServerOnVmOffline, ++ ++ /* pfnVMMConfigure */ ++ &PvzServerVMMConfigure ++ } ++}; ++ ++PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection) ++{ ++ PVR_LOG_RETURN_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS); ++ *psPvzConnection = &gsStubVmmPvz; ++ PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support")); ++ return PVRSRV_OK; ++} ++ ++void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection) ++{ ++ PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection"); ++} ++ ++/****************************************************************************** ++ End of file (vmm_type_stub.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vz_vm.h b/drivers/gpu/drm/img-rogue/vz_vm.h +new file mode 100644 +index 000000000000..3a8042c7668b +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vz_vm.h +@@ -0,0 +1,61 @@ ++/*************************************************************************/ /*! ++@File vz_vm.h ++@Title System virtualization VM support APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides VM management support APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef VZ_VM_H ++#define VZ_VM_H ++ ++#include "vmm_impl.h" ++ ++bool IsVmOnline(IMG_UINT32 ui32OSID); ++ ++PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid); ++ ++PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid); ++ ++PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue); ++ ++#endif /* VZ_VM_H */ ++ ++/***************************************************************************** ++ End of file (vz_vm.h) ++*****************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vz_vmm_pvz.c b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.c +new file mode 100644 +index 000000000000..39a52b5fadc0 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.c +@@ -0,0 +1,183 @@ ++/*************************************************************************/ /*! ++@File vz_vmm_pvz.c ++@Title VM manager para-virtualization APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description VM manager para-virtualization management ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include "pvrsrv.h" ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv_error.h" ++#include "allocmem.h" ++#include "pvrsrv.h" ++#include "vz_vmm_pvz.h" ++ ++#if (RGX_NUM_OS_SUPPORTED > 1) ++static PVRSRV_ERROR ++PvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ VMM_PVZ_CONNECTION *psVmmPvz; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ /* ++ * Acquire the underlying VM manager PVZ connection & validate it. ++ */ ++ psVmmPvz = PvzConnectionAcquire(); ++ if (psVmmPvz == NULL) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: %s PVZ config: Unable to acquire PVZ connection", ++ __func__, PVRSRV_VZ_MODE_IS(GUEST) ? "Guest" : "Host")); ++ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; ++ goto e0; ++ } ++ ++ /* Log which PVZ setup type is being used by driver */ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ /* ++ * Static PVZ bootstrap setup ++ * ++ * This setup uses carve-out memory, has no hypercall mechanism & does not support ++ * out-of-order initialisation of host/guest VMs/drivers. The host driver has all ++ * the information needed to initialize all OSIDs firmware state when it's loaded ++ * and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ ++ * initialisation. Having no out-of-order initialisation support, the guest driver ++ * can only submit a workload to the device after the host driver has completely ++ * initialized the firmware, the VZ hypervisor/VM setup must guarantee this. ++ */ ++ PVR_LOG(("Using static PVZ bootstrap setup")); ++#else ++ /* ++ * Dynamic PVZ bootstrap setup ++ * ++ * This setup uses guest memory, has PVZ hypercall mechanism & supports out-of-order ++ * initialisation of host/guest VMs/drivers. The host driver initializes only its ++ * own OSID-0 firmware state when its loaded and each guest driver will use its PVZ ++ * interface to hypercall to the host driver to both synchronise its initialisation ++ * so it does not submit any workload to the firmware before the host driver has ++ * had a chance to initialize the firmware and to also initialize its own OSID-x ++ * firmware state. ++ */ ++ PVR_LOG(("Using dynamic PVZ bootstrap setup")); ++ ++ if (!PVRSRV_VZ_MODE_IS(GUEST) && ++ (psVmmPvz->sServerFuncTab.pfnMapDevPhysHeap == NULL || ++ psVmmPvz->sServerFuncTab.pfnUnmapDevPhysHeap == NULL)) ++ { ++ PVR_DPF((PVR_DBG_ERROR, "%s: Host PVZ config: Functions for mapping a Guest's heaps not implemented\n", __func__)); ++ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG; ++ } ++#endif ++ ++ PvzConnectionRelease(psVmmPvz); ++e0: ++ return eError; ++} ++#endif /* (RGX_NUM_OS_SUPPORTED > 1) */ ++ ++PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig) ++{ ++ PVRSRV_ERROR eError; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++#if (RGX_NUM_OS_SUPPORTED == 1) ++# if !defined(PVRSRV_NEED_PVR_DPF) ++ PVR_UNREFERENCED_PARAMETER(psPVRSRVData); ++# endif ++ PVR_DPF((PVR_DBG_ERROR, "This kernel driver does not support virtualization. Please rebuild with RGX_NUM_OS_SUPPORTED > 1")); ++ PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode", ++ psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest")); ++ eError = PVRSRV_ERROR_NOT_SUPPORTED; ++ goto e0; ++#else ++ ++ /* Create para-virtualization connection lock */ ++ eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock); ++ PVR_LOG_GOTO_IF_ERROR(eError, "OSLockCreate", e0); ++ ++ /* Create VM manager para-virtualization connection */ ++ eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection); ++ if (eError != PVRSRV_OK) ++ { ++ OSLockDestroy(psPVRSRVData->hPvzConnectionLock); ++ psPVRSRVData->hPvzConnectionLock = NULL; ++ ++ PVR_LOG_ERROR(eError, "VMMCreatePvzConnection"); ++ goto e0; ++ } ++ ++ /* Ensure pvz connection is configured correctly */ ++ eError = PvzConnectionValidate(psDevConfig); ++ PVR_LOG_RETURN_IF_ERROR(eError, "PvzConnectionValidate"); ++ ++ psPVRSRVData->abVmOnline[RGXFW_HOST_OS] = IMG_TRUE; ++#endif ++e0: ++ return eError; ++} ++ ++void PvzConnectionDeInit(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection); ++ psPVRSRVData->hPvzConnection = NULL; ++ ++ OSLockDestroy(psPVRSRVData->hPvzConnectionLock); ++ psPVRSRVData->hPvzConnectionLock = NULL; ++} ++ ++VMM_PVZ_CONNECTION* PvzConnectionAcquire(void) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL); ++ return psPVRSRVData->hPvzConnection; ++} ++ ++void PvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ /* Nothing to do, just validate the pointer we're passed back */ ++ PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection); ++} ++ ++/****************************************************************************** ++ End of file (vz_vmm_pvz.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vz_vmm_pvz.h b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.h +new file mode 100644 +index 000000000000..abc6470ebd9c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vz_vmm_pvz.h +@@ -0,0 +1,79 @@ ++/*************************************************************************/ /*! ++@File vz_vmm_pvz.h ++@Title System virtualization VM manager management APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides VM manager para-virtz management APIs ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef VZ_VMM_PVZ_H ++#define VZ_VMM_PVZ_H ++ ++#include "img_types.h" ++#include "vmm_impl.h" ++ ++/*! ++******************************************************************************* ++ @Function PvzConnectionInit() and PvzConnectionDeInit() ++ @Description PvzConnectionInit initializes the VM manager para-virt ++ which is used subsequently for communication between guest and ++ host; depending on the underlying VM setup, this could either ++ be a hyper-call or cross-VM call ++ @Return PVRSRV_OK on success. Otherwise, a PVRSRV error code ++******************************************************************************/ ++PVRSRV_ERROR PvzConnectionInit(PVRSRV_DEVICE_CONFIG *psDevConfig); ++void PvzConnectionDeInit(void); ++ ++/*! ++******************************************************************************* ++ @Function PvzConnectionAcquire() and PvzConnectionRelease() ++ @Description These are to acquire/release a handle to the VM manager ++ para-virtz connection to make a pvz call; on the client, use it ++ it to make the actual pvz call and on the server handler / ++ VM manager, use it to complete the processing for the pvz call ++ or make a VM manager to host pvzbridge call ++@Return VMM_PVZ_CONNECTION* on success. Otherwise NULL ++******************************************************************************/ ++VMM_PVZ_CONNECTION* PvzConnectionAcquire(void); ++void PvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection); ++ ++#endif /* VZ_VMM_PVZ_H */ ++ ++/****************************************************************************** ++ End of file (vz_vmm_pvz.h) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/vz_vmm_vm.c b/drivers/gpu/drm/img-rogue/vz_vmm_vm.c +new file mode 100644 +index 000000000000..488c8b48a35c +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/vz_vmm_vm.c +@@ -0,0 +1,221 @@ ++/*************************************************************************/ /*! ++@File vz_vmm_vm.c ++@Title System virtualization VM support APIs ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description System virtualization VM support functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++#include "osfunc.h" ++#include "pvrsrv.h" ++#include "img_defs.h" ++#include "img_types.h" ++#include "pvrsrv.h" ++#include "pvrsrv_error.h" ++#include "vz_vm.h" ++#include "rgxfwutils.h" ++ ++bool IsVmOnline(IMG_UINT32 ui32OSID) ++{ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ return (ui32OSID >= RGX_NUM_OS_SUPPORTED) ? (false) : (psPVRSRVData->abVmOnline[ui32OSID]); ++} ++ ++PVRSRV_ERROR PvzOnVmOnline(IMG_UINT32 ui32OSid) ++{ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) ++ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; ++#else ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDevNode; ++ ++ if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: invalid OSID (%d)", ++ __func__, ui32OSid)); ++ ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ if (psPVRSRVData->abVmOnline[ui32OSid]) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OSID %d is already enabled.", ++ __func__, ui32OSid)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ /* For now, limit support to single device setups */ ++ psDevNode = psPVRSRVData->psDeviceNodeList; ++ ++ if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT) ++ { ++ ++ /* Firmware not initialized yet, do it here */ ++ eError = PVRSRVCommonDeviceInitialise(psDevNode); ++ if (eError != PVRSRV_OK) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: failed to initialize firmware (%s)", ++ __func__, PVRSRVGetErrorString(eError))); ++ goto e0; ++ } ++ } ++ ++ eError = RGXFWHealthCheckCmd(psDevNode->pvDevice); ++ if (eError != PVRSRV_OK) ++ { ++ goto e0; ++ } ++ ++ psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE; ++ ++#if defined(RGX_VZ_STATIC_CARVEOUT_FW_HEAPS) ++ /* Everything is ready for the firmware to start interacting with this OS */ ++ eError = RGXFWSetFwOsState(psDevNode->pvDevice, ui32OSid, RGXFWIF_OS_ONLINE); ++#endif ++ ++e0: ++#endif ++ return eError; ++} ++ ++PVRSRV_ERROR PvzOnVmOffline(IMG_UINT32 ui32OSid) ++{ ++#if defined(RGX_NUM_OS_SUPPORTED) && (RGX_NUM_OS_SUPPORTED == 1) ++ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS; ++#else ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDevNode; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ if (ui32OSid == 0 || ui32OSid >= RGX_NUM_OS_SUPPORTED) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: invalid OSID (%d)", ++ __func__, ui32OSid)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ if (!psPVRSRVData->abVmOnline[ui32OSid]) ++ { ++ PVR_DPF((PVR_DBG_ERROR, ++ "%s: OSID %d is already disabled.", ++ __func__, ui32OSid)); ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ goto e0; ++ } ++ ++ /* For now, limit support to single device setups */ ++ psDevNode = psPVRSRVData->psDeviceNodeList; ++ psDevInfo = psDevNode->pvDevice; ++ ++ eError = RGXFWSetFwOsState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE); ++ if (eError == PVRSRV_OK) ++ { ++ psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE; ++ } ++ ++e0: ++#endif ++ return eError; ++} ++ ++PVRSRV_ERROR PvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue) ++{ ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ PVRSRV_DEVICE_NODE *psDeviceNode; ++ PVRSRV_RGXDEV_INFO *psDevInfo; ++ ++ psDeviceNode = psPVRSRVData->psDeviceNodeList; ++ psDevInfo = psDeviceNode->pvDevice; ++ ++ switch (eVMMParamType) ++ { ++#if defined(SUPPORT_RGX) ++ case VMM_CONF_PRIO_OSID0: ++ case VMM_CONF_PRIO_OSID1: ++ case VMM_CONF_PRIO_OSID2: ++ case VMM_CONF_PRIO_OSID3: ++ case VMM_CONF_PRIO_OSID4: ++ case VMM_CONF_PRIO_OSID5: ++ case VMM_CONF_PRIO_OSID6: ++ case VMM_CONF_PRIO_OSID7: ++ { ++ IMG_UINT32 ui32OSid = eVMMParamType; ++ IMG_UINT32 ui32Prio = ui32ParamValue; ++ ++ if (ui32OSid < RGX_NUM_OS_SUPPORTED) ++ { ++ eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio); ++ } ++ else ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ break; ++ } ++ case VMM_CONF_HCS_DEADLINE: ++ { ++ IMG_UINT32 ui32HCSDeadline = ui32ParamValue; ++ eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline); ++ break; ++ } ++#else ++ PVR_UNREFERENCED_PARAMETER(ui32ParamValue); ++#endif ++ default: ++ { ++ eError = PVRSRV_ERROR_INVALID_PARAMS; ++ } ++ } ++ ++ return eError; ++} ++ ++/****************************************************************************** ++ End of file (vz_vmm_vm.c) ++******************************************************************************/ +diff --git a/drivers/gpu/drm/img-rogue/xuantie_sys.c b/drivers/gpu/drm/img-rogue/xuantie_sys.c +new file mode 100644 +index 000000000000..1c8f090e87a7 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/xuantie_sys.c +@@ -0,0 +1,521 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Configuration ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description System Configuration functions ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "pvrsrv.h" ++#include "pvr_drv.h" ++#include "proc_stats.h" ++#include "pvrversion.h" ++#include "rgxhwperf.h" ++#include "rgxinit.h" ++#include "process_stats.h" ++#include "xuantie_sys.h" ++ ++#ifdef SUPPORT_RGX ++static IMG_HANDLE ghGpuUtilSysFS; ++#endif ++static int xuantie_gpu_period_ms = -1; ++static int xuantie_gpu_loading_max_percent = -1; ++static int xuantie_gpu_last_server_error = 0; ++static int xuantie_gpu_last_rgx_error = 0; ++ ++struct gpu_sysfs_private_data { ++ struct device *dev; ++ struct timer_list timer; ++ struct workqueue_struct *workqueue; ++ struct work_struct work; ++}; ++static struct gpu_sysfs_private_data xuantie_gpu_sysfs_private_data; ++ ++/******************定义log的读写属性*************************************/ ++static ssize_t xuantie_gpu_log_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++{ ++ ssize_t len = 0; ++ PDLLIST_NODE pNext, pNode; ++ struct device *dev = kobj_to_dev(kobj); ++ struct drm_device *drm_dev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = drm_dev->dev_private; ++ PVRSRV_DEVICE_NODE *psDevNode = priv->dev_node; ++ PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDevNode->eHealthStatus); ++ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData(); ++ ++ // 驱动信息 ++ int dev_id = (int)psDevNode->sDevId.ui32InternalID; ++ int dev_connection_num = 0; ++ int dev_loading_percent = -1; ++ ++ // 内存信息 ++ IMG_UINT32 dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_COUNT]; ++ ++ // 实例/会话/通道信息 ++ int instance_id = 0; ++ ++ // 异常信息 ++ char* server_state[3] = {"UNDEFINED", "OK", "BAD"}; ++ char* rgx_state[5] = {"UNDEFINED", "OK", "NOT RESPONDING", "DEAD", "FAULT"}; ++ int rgx_err = 0; ++ ++ if (psDevNode->pvDevice != NULL) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++#ifdef SUPPORT_RGX ++ if (!PVRSRV_VZ_MODE_IS(GUEST) && ++ psDevInfo->pfnGetGpuUtilStats && ++ eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) ++ { ++ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ eError = psDevInfo->pfnGetGpuUtilStats(psDevNode, ++ ghGpuUtilSysFS, ++ &sGpuUtilStats); ++ ++ if ((eError == PVRSRV_OK) && ++ ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative)) ++ { ++ IMG_UINT64 util; ++ IMG_UINT32 rem; ++ ++ util = 100 * sGpuUtilStats.ui64GpuStatActive; ++ util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem); ++ ++ dev_loading_percent = (int)util; ++ } ++ } ++#endif ++ rgx_err = psDevInfo->sErrorCounts.ui32WGPErrorCount + psDevInfo->sErrorCounts.ui32TRPErrorCount; ++ } ++ ++ if (dev_loading_percent > xuantie_gpu_loading_max_percent) ++ xuantie_gpu_loading_max_percent = dev_loading_percent; ++ ++ PVRSRVFindProcessMemStats(0, PVRSRV_DRIVER_STAT_TYPE_COUNT, IMG_TRUE, dev_mem_state); ++ ++ if (!psDevNode->hConnectionsLock) ++ { ++ len += scnprintf(buf + len, PAGE_SIZE - len, ++ "[GPU] Version: %s\n" ++ "Build Info: %s %s %s %s\n" ++ "----------------------------------------MODULE PARAM-----------------------------------------\n" ++ "updatePeriod_ms\n" ++ "%d\n" ++ "----------------------------------------MODULE STATUS----------------------------------------\n" ++ "DevId DevInstanceNum DevLoading_%% DevLoadingMax_%%\n" ++ "%-10d%-20d%-15d%-15d\n" ++ "----------------------------------------MEM INFO(KB)-----------------------------------------\n" ++ "KMalloc VMalloc PTMemoryUMA VMapPTUMA\n" ++ "%-18d%-18d%-18d%-18d\n" ++ "PTMemoryLMA IORemapPTLMA GPUMemLMA GPUMemUMA\n" ++ "%-18d%-18d%-18d%-18d\n" ++ "GPUMemUMAPool MappedGPUMemUMA/LMA DmaBufImport\n" ++ "%-18d%-36d%-18d\n" ++ "----------------------------------------INSTANCE INFO----------------------------------------\n" ++ "Id ProName ProId ThdId\n" ++ "---------------------------------------EXCEPTION INFO----------------------------------------\n" ++ "Server_State Server_Error RGX_State RGX_Error\n" ++ "%-18s%-18d%-18s%-18d\n", ++ PVRVERSION_STRING, utsname()->sysname, utsname()->release, utsname()->version, utsname()->machine, xuantie_gpu_period_ms, ++ dev_id, 0, dev_loading_percent, xuantie_gpu_loading_max_percent, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_KMALLOC] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_VMALLOC], ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT] >> 10, ++ server_state[psPVRSRVData->eServicesState], PVRSRV_KM_ERRORS - xuantie_gpu_last_server_error, ++ rgx_state[eHealthStatus], rgx_err - xuantie_gpu_last_rgx_error); ++ return len; ++ } ++ ++ OSLockAcquire(psDevNode->hConnectionsLock); ++ dllist_foreach_node(&psDevNode->sConnections, pNode, pNext) ++ { ++ dev_connection_num++; ++ } ++ ++ // 格式化输出 ++ len += scnprintf(buf + len, PAGE_SIZE - len, ++ "[GPU] Version: %s\n" ++ "Build Info: %s %s %s %s\n" ++ "----------------------------------------MODULE PARAM-----------------------------------------\n" ++ "updatePeriod_ms\n" ++ "%d\n" ++ "----------------------------------------MODULE STATUS----------------------------------------\n" ++ "DevId DevInstanceNum DevLoading_%% DevLoadingMax_%%\n" ++ "%-10d%-20d%-15d%-15d\n" ++ "----------------------------------------MEM INFO(KB)-----------------------------------------\n" ++ "KMalloc VMalloc PTMemoryUMA VMapPTUMA\n" ++ "%-18d%-18d%-18d%-18d\n" ++ "PTMemoryLMA IORemapPTLMA GPUMemLMA GPUMemUMA\n" ++ "%-18d%-18d%-18d%-18d\n" ++ "GPUMemUMAPool MappedGPUMemUMA/LMA DmaBufImport\n" ++ "%-18d%-36d%-18d\n" ++ "----------------------------------------INSTANCE INFO----------------------------------------\n" ++ "Id ProName ProId ThdId\n", ++ PVRVERSION_STRING, utsname()->sysname, utsname()->release, utsname()->version, utsname()->machine, xuantie_gpu_period_ms, ++ dev_id, dev_connection_num, dev_loading_percent, xuantie_gpu_loading_max_percent, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_KMALLOC] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_VMALLOC], ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL] >> 10, dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA] >> 10, ++ dev_mem_state[PVRSRV_DRIVER_STAT_TYPE_DMA_BUF_IMPORT] >> 10); ++ ++ dllist_foreach_node(&psDevNode->sConnections, pNode, pNext) ++ { ++ CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode); ++ len += scnprintf(buf + len, PAGE_SIZE - len, ++ "%-5d%-20s%-10d%-10d\n", ++ instance_id, sData->pszProcName, sData->pid, sData->tid); ++ instance_id++; ++ } ++ ++ len += scnprintf(buf + len, PAGE_SIZE - len, ++ "---------------------------------------EXCEPTION INFO----------------------------------------\n" ++ "Server_State Server_Error RGX_State RGX_Error\n" ++ "%-18s%-18d%-18s%-18d\n", ++ server_state[psPVRSRVData->eServicesState], PVRSRV_KM_ERRORS - xuantie_gpu_last_server_error, ++ rgx_state[eHealthStatus], rgx_err - xuantie_gpu_last_rgx_error); ++ ++ OSLockRelease(psDevNode->hConnectionsLock); ++ return len; ++} ++ ++static ssize_t xuantie_gpu_log_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) ++{ ++ struct device *dev = kobj_to_dev(kobj); ++ struct drm_device *drm_dev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = drm_dev->dev_private; ++ PVRSRV_DEVICE_NODE *psDevNode = priv->dev_node; ++ ++ xuantie_gpu_loading_max_percent = -1; ++ xuantie_gpu_last_server_error = PVRSRV_KM_ERRORS; ++ if (psDevNode->pvDevice != NULL) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++ xuantie_gpu_last_rgx_error = psDevInfo->sErrorCounts.ui32WGPErrorCount + psDevInfo->sErrorCounts.ui32TRPErrorCount; ++ } ++ return count; ++} ++ ++static struct kobj_attribute sxuantie_gpu_log_attr = __ATTR(log, 0664, xuantie_gpu_log_show, xuantie_gpu_log_store); ++ ++/******************定义updatePeriod的读写属性*************************************/ ++static ssize_t xuantie_gpu_updatePeriod_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n(set 50~10000 to enable update period, set other value to disable)\n", ++ xuantie_gpu_period_ms); ++} ++ ++static ssize_t xuantie_gpu_updatePeriod_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) ++{ ++ char *start = (char *)buf; ++ int temp_period_ms = simple_strtoul(start, &start, 0); ++ if (temp_period_ms >= 50 && temp_period_ms <= 10000) { ++ xuantie_gpu_period_ms = temp_period_ms; ++ mod_timer(&xuantie_gpu_sysfs_private_data.timer, jiffies + msecs_to_jiffies(xuantie_gpu_period_ms)); ++ } else { ++ xuantie_gpu_period_ms = -1; ++ del_timer(&xuantie_gpu_sysfs_private_data.timer); ++ } ++ return count; ++} ++ ++static struct kobj_attribute sxuantie_gpu_updateperiod_attr = __ATTR(updatePeriod_ms, 0664, xuantie_gpu_updatePeriod_show, xuantie_gpu_updatePeriod_store); ++ ++/******************定义sysfs属性info group*************************************/ ++static struct attribute *pxuantie_gpu_attrs[] = { ++ &sxuantie_gpu_log_attr.attr, ++ &sxuantie_gpu_updateperiod_attr.attr, ++ NULL, // must be NULL ++}; ++ ++static struct attribute_group sxuantie_gpu_attr_group = { ++ .name = "info", // device下目录指定 ++ .attrs = pxuantie_gpu_attrs, ++}; ++ ++static void xuantie_gpu_work_func(struct work_struct *w) ++{ ++ struct gpu_sysfs_private_data *data = container_of(w, struct gpu_sysfs_private_data, work); ++ struct device *dev = data->dev; ++ struct drm_device *drm_dev = dev_get_drvdata(dev); ++ struct pvr_drm_private *priv = drm_dev->dev_private; ++ PVRSRV_DEVICE_NODE *psDevNode = priv->dev_node; ++ PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus = OSAtomicRead(&psDevNode->eHealthStatus); ++ int current_loading_percent = -1; ++ if (psDevNode->pvDevice != NULL) ++ { ++ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice; ++#ifdef SUPPORT_RGX ++ if (!PVRSRV_VZ_MODE_IS(GUEST) && ++ psDevInfo->pfnGetGpuUtilStats && ++ eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK) ++ { ++ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats; ++ PVRSRV_ERROR eError = PVRSRV_OK; ++ ++ eError = psDevInfo->pfnGetGpuUtilStats(psDevNode, ++ ghGpuUtilSysFS, ++ &sGpuUtilStats); ++ ++ if ((eError == PVRSRV_OK) && ++ ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative)) ++ { ++ IMG_UINT64 util; ++ IMG_UINT32 rem; ++ ++ util = 100 * sGpuUtilStats.ui64GpuStatActive; ++ util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem); ++ ++ current_loading_percent = (int)util; ++ } ++ } ++#endif ++ } ++ if (current_loading_percent > xuantie_gpu_loading_max_percent) ++ xuantie_gpu_loading_max_percent = current_loading_percent; ++ mod_timer(&data->timer, jiffies + msecs_to_jiffies(xuantie_gpu_period_ms)); ++} ++ ++static void xuantie_gpu_timer_callback(struct timer_list *t) ++{ ++ struct gpu_sysfs_private_data *data = container_of(t, struct gpu_sysfs_private_data, timer); ++ queue_work(data->workqueue, &data->work); ++} ++ ++int xuantie_sysfs_init(struct device *dev) ++{ ++ int ret; ++ ret = sysfs_create_group(&dev->kobj, &sxuantie_gpu_attr_group); ++ if (ret) { ++ dev_err(dev, "Failed to create gpu dev sysfs.\n"); ++ return ret; ++ } ++ ++#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) ++ if (SORgxGpuUtilStatsRegister(&ghGpuUtilSysFS) != PVRSRV_OK) ++ { ++ dev_err(dev, "Failed to register GpuUtil for sysfs.\n"); ++ return -ENOMEM; ++ } ++#endif ++ ++ xuantie_gpu_sysfs_private_data.workqueue = create_workqueue("gpu_sysfs_workqueue"); ++ if (!xuantie_gpu_sysfs_private_data.workqueue) ++ return -ENOMEM; ++ INIT_WORK(&xuantie_gpu_sysfs_private_data.work, xuantie_gpu_work_func); ++ ++ xuantie_gpu_sysfs_private_data.dev = dev; ++ timer_setup(&xuantie_gpu_sysfs_private_data.timer, xuantie_gpu_timer_callback, 0); ++ return ret; ++} ++ ++void xuantie_sysfs_uninit(struct device *dev) ++{ ++ if (xuantie_gpu_sysfs_private_data.dev == dev) ++ del_timer(&xuantie_gpu_sysfs_private_data.timer); ++ ++ if (xuantie_gpu_sysfs_private_data.workqueue) ++ destroy_workqueue(xuantie_gpu_sysfs_private_data.workqueue); ++ ++#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE) ++ if (SORgxGpuUtilStatsUnregister(ghGpuUtilSysFS) != PVRSRV_OK) ++ { ++ dev_err(dev, "Failed to unregister GpuUtil for sysfs.\n"); ++ } ++#endif ++ ++ sysfs_remove_group(&dev->kobj, &sxuantie_gpu_attr_group); ++} ++ ++int xuantie_mfg_enable(struct gpu_plat_if *mfg) ++{ ++ int ret; ++ int val; ++ ret = pm_runtime_get_sync(mfg->dev); ++ /* don't check ret > 0 here for pm status maybe ACTIVE */ ++ if (ret < 0) ++ return ret; ++ ++ xuantie_debug("xuantie_mfg_enable aclk\n"); ++ if (mfg->gpu_aclk) { ++ ret = clk_prepare_enable(mfg->gpu_aclk); ++ if (ret) { ++ xuantie_debug("xuantie_mfg_enable aclk\n"); ++ goto err_pm_runtime_put; ++ } ++ } ++ if (mfg->gpu_cclk) { ++ ret = clk_prepare_enable(mfg->gpu_cclk); ++ if (ret) { ++ xuantie_debug("xuantie_mfg_enable cclk\n"); ++ clk_disable_unprepare(mfg->gpu_aclk); ++ goto err_pm_runtime_put; ++ } ++ } ++ ++ regmap_read(mfg->vosys_regmap, 0x0, &val); ++ if (val) ++ { ++ regmap_update_bits(mfg->vosys_regmap, 0x0, 3, 0); ++ regmap_read(mfg->vosys_regmap, 0x0, &val); ++ if (val) { ++ pr_info("[GPU_RST]" "val is %x\r\n", val); ++ clk_disable_unprepare(mfg->gpu_cclk); ++ clk_disable_unprepare(mfg->gpu_aclk); ++ goto err_pm_runtime_put; ++ } ++ udelay(1); ++ } ++ /* rst gpu clkgen */ ++ regmap_update_bits(mfg->vosys_regmap, 0x0, 2, 2); ++ regmap_read(mfg->vosys_regmap, 0x0, &val); ++ if (!(val & 0x2)) { ++ pr_info("[GPU_CLK_RST]" "val is %x\r\n", val); ++ clk_disable_unprepare(mfg->gpu_cclk); ++ clk_disable_unprepare(mfg->gpu_aclk); ++ goto err_pm_runtime_put; ++ } ++ udelay(1); ++ /* rst gpu */ ++ regmap_update_bits(mfg->vosys_regmap, 0x0, 1, 1); ++ regmap_read(mfg->vosys_regmap, 0x0, &val); ++ if (!(val & 0x1)) { ++ pr_info("[GPU_RST]" "val is %x\r\n", val); ++ clk_disable_unprepare(mfg->gpu_cclk); ++ clk_disable_unprepare(mfg->gpu_aclk); ++ goto err_pm_runtime_put; ++ } ++ return 0; ++err_pm_runtime_put: ++ pm_runtime_put_sync(mfg->dev); ++ return ret; ++} ++ ++void xuantie_mfg_disable(struct gpu_plat_if *mfg) ++{ ++ int val; ++ regmap_update_bits(mfg->vosys_regmap, 0x0, 3, 0); ++ regmap_read(mfg->vosys_regmap, 0x0, &val); ++ if (val) { ++ pr_info("[GPU_RST]" "val is %x\r\n", val); ++ return; ++ } ++ if (mfg->gpu_aclk) { ++ clk_disable_unprepare(mfg->gpu_aclk); ++ xuantie_debug("xuantie_mfg_disable aclk\n"); ++ } ++ if (mfg->gpu_cclk) { ++ clk_disable_unprepare(mfg->gpu_cclk); ++ xuantie_debug("xuantie_mfg_disable cclk\n"); ++ } ++ ++ xuantie_debug("xuantie_mfg_disable cclk\n"); ++ pm_runtime_put_sync(mfg->dev); ++} ++ ++struct gpu_plat_if *dt_hw_init(struct device *dev) ++{ ++ struct gpu_plat_if *mfg; ++ ++ xuantie_debug("gpu_plat_if_create Begin\n"); ++ ++ mfg = devm_kzalloc(dev, sizeof(*mfg), GFP_KERNEL); ++ if (!mfg) ++ return ERR_PTR(-ENOMEM); ++ mfg->dev = dev; ++ ++ mfg->gpu_cclk = devm_clk_get(dev, "cclk"); ++ if (IS_ERR(mfg->gpu_cclk)) { ++ dev_err(dev, "devm_clk_get cclk failed !!!\n"); ++ pm_runtime_disable(dev); ++ return ERR_PTR(PTR_ERR(mfg->gpu_aclk)); ++ } ++ ++ mfg->gpu_aclk = devm_clk_get(dev, "aclk"); ++ if (IS_ERR(mfg->gpu_aclk)) { ++ dev_err(dev, "devm_clk_get aclk failed !!!\n"); ++ pm_runtime_disable(dev); ++ return ERR_PTR(PTR_ERR(mfg->gpu_aclk)); ++ } ++ ++ mfg->vosys_regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "vosys-regmap"); ++ if (IS_ERR(mfg->vosys_regmap)) { ++ dev_err(dev, "syscon_regmap_lookup_by_phandle vosys-regmap failed !!!\n"); ++ pm_runtime_disable(dev); ++ return ERR_PTR(PTR_ERR(mfg->vosys_regmap)); ++ } ++ ++ mutex_init(&mfg->set_power_state); ++ ++ pm_runtime_enable(dev); ++ ++ xuantie_debug("gpu_plat_if_create End\n"); ++ ++ return mfg; ++} ++ ++void dt_hw_uninit(struct gpu_plat_if *mfg) ++{ ++ pm_runtime_disable(mfg->dev); ++} +diff --git a/drivers/gpu/drm/img-rogue/xuantie_sys.h b/drivers/gpu/drm/img-rogue/xuantie_sys.h +new file mode 100644 +index 000000000000..06af8d97ede4 +--- /dev/null ++++ b/drivers/gpu/drm/img-rogue/xuantie_sys.h +@@ -0,0 +1,75 @@ ++/*************************************************************************/ /*! ++@File ++@Title System Description Header ++@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved ++@Description This header provides system-specific declarations and macros ++@License Dual MIT/GPLv2 ++ ++The contents of this file are subject to the MIT license as set out below. ++ ++Permission is hereby granted, free of charge, to any person obtaining a copy ++of this software and associated documentation files (the "Software"), to deal ++in the Software without restriction, including without limitation the rights ++to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++copies of the Software, and to permit persons to whom the Software is ++furnished to do so, subject to the following conditions: ++ ++The above copyright notice and this permission notice shall be included in ++all copies or substantial portions of the Software. ++ ++Alternatively, the contents of this file may be used under the terms of ++the GNU General Public License Version 2 ("GPL") in which case the provisions ++of GPL are applicable instead of those above. ++ ++If you wish to allow use of your version of this file only under the terms of ++GPL, and not to allow others to use your version of this file under the terms ++of the MIT license, indicate your decision by deleting the provisions above ++and replace them with the notice and other provisions required by GPL as set ++out in the file called "GPL-COPYING" included in this distribution. If you do ++not delete the provisions above, a recipient may use your version of this file ++under the terms of either the MIT license or GPL. ++ ++This License is also included in this distribution in the file called ++"MIT-COPYING". ++ ++EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS ++PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING ++BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR ++PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR ++COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER ++IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ++CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++*/ /**************************************************************************/ ++ ++#ifndef XUANTIE_SYS_H ++#define XUANTIE_SYS_H ++ ++#include ++ ++#define ENABLE_XUANTIE_DEBUG 0 ++ ++#if ENABLE_XUANTIE_DEBUG ++#define xuantie_debug(fmt, args...) pr_info("[XUANTIE_CLK]" fmt, ##args) ++#else ++#define xuantie_debug(fmt, args...) do { } while (0) ++#endif ++ ++struct gpu_plat_if { ++ struct device *dev; ++ /* mutex protect for set power state */ ++ struct mutex set_power_state; ++ struct clk *gpu_cclk; ++ struct clk *gpu_aclk; ++ struct regmap *vosys_regmap; ++}; ++ ++int xuantie_sysfs_init(struct device *dev); ++void xuantie_sysfs_uninit(struct device *dev); ++ ++struct gpu_plat_if *dt_hw_init(struct device *dev); ++void dt_hw_uninit(struct gpu_plat_if *mfg); ++ ++int xuantie_mfg_enable(struct gpu_plat_if *mfg); ++void xuantie_mfg_disable(struct gpu_plat_if *mfg); ++ ++#endif /* XUANTIE_SYS_H*/ +diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig +index 3a2f4a9f1d46..974b66e92fdb 100644 +--- a/drivers/gpu/drm/panel/Kconfig ++++ b/drivers/gpu/drm/panel/Kconfig +@@ -835,4 +835,13 @@ config DRM_PANEL_XINPENG_XPP055C272 + Say Y here if you want to enable support for the Xinpeng + XPP055C272 controller for 720x1280 LCD panels with MIPI/RGB/SPI + system interfaces. ++ ++config DRM_PANEL_HX8279 ++ tristate "HX8279-based panels" ++ depends on OF ++ depends on DRM_MIPI_DSI ++ depends on BACKLIGHT_CLASS_DEVICE ++ help ++ Say Y if you want to enable support for panels based on the ++ HX8279 controller. + endmenu +diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile +index 433e93d57949..e9b6550e9e26 100644 +--- a/drivers/gpu/drm/panel/Makefile ++++ b/drivers/gpu/drm/panel/Makefile +@@ -20,7 +20,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o + obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o + obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o + obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o +-obj-$(CONFIG_DRM_PANEL_JADARD_JD9365DA_H3) += panel-jadard-jd9365da-h3.o ++obj-$(CONFIG_DRM_PANEL_JADARD_JD9365DA_H3) += panel-jadard-jd9365da.o + obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o + obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o + obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o +@@ -85,3 +85,4 @@ obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o + obj-$(CONFIG_DRM_PANEL_VISIONOX_R66451) += panel-visionox-r66451.o + obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o + obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o ++obj-$(CONFIG_DRM_PANEL_HX8279) += panel-himax-hx8279.o +diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c +new file mode 100644 +index 000000000000..8c52de95fea7 +--- /dev/null ++++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c +@@ -0,0 +1,326 @@ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include